diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/DESCRIPTION.rst deleted file mode 100644 index dc9292c..0000000 --- a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,17 +0,0 @@ - -A Pure-Python library built as a PDF toolkit. It is capable of: - -- extracting document information (title, author, ...) -- splitting documents page by page -- merging documents page by page -- cropping pages -- merging multiple pages into a single page -- encrypting and decrypting PDF files -- and more! - -By being Pure-Python, it should run on any Python platform without any -dependencies on external libraries. It can also work entirely on StringIO -objects rather than file streams, allowing for PDF manipulation in memory. -It is therefore a useful tool for websites that manage or manipulate PDFs. - - diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/METADATA b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/METADATA deleted file mode 100644 index 34688a7..0000000 --- a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/METADATA +++ /dev/null @@ -1,34 +0,0 @@ -Metadata-Version: 2.0 -Name: PyPDF2 -Version: 1.25.1 -Summary: PDF toolkit -Home-page: http://mstamy2.github.com/PyPDF2 -Author: Phaseit, Inc. -Author-email: PyPDF2@phaseit.net -License: UNKNOWN -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 3 -Classifier: Operating System :: OS Independent -Classifier: Topic :: Software Development :: Libraries :: Python Modules - - -A Pure-Python library built as a PDF toolkit. It is capable of: - -- extracting document information (title, author, ...) -- splitting documents page by page -- merging documents page by page -- cropping pages -- merging multiple pages into a single page -- encrypting and decrypting PDF files -- and more! - -By being Pure-Python, it should run on any Python platform without any -dependencies on external libraries. It can also work entirely on StringIO -objects rather than file streams, allowing for PDF manipulation in memory. -It is therefore a useful tool for websites that manage or manipulate PDFs. - - diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/RECORD b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/RECORD deleted file mode 100644 index 8c29a0e..0000000 --- a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/RECORD +++ /dev/null @@ -1,25 +0,0 @@ -PyPDF2/__init__.py,sha256=ugkP-3fEFZZ2-54PmYpjJ5CISEPD5W8TikZlloOJZ5M,210 -PyPDF2/_version.py,sha256=ufPT1c1QzU2MdIAGUZ89UoQfl6t3IJdOjhMyLVhsDmQ,23 -PyPDF2/filters.py,sha256=U4KQ7fJX129ePxoff-6-009e9kCWlj8_d2ipnm5QDG4,13167 -PyPDF2/generic.py,sha256=bJ3e3PpqJCvTHrQ3IH3VEXMh1RWVqiCh9T1IcmkBuAo,45129 -PyPDF2/merger.py,sha256=2Cz4QaB8R-Zm3V5P2rI-QYdqMZlN4geaAtNfrPbcTM4,21387 -PyPDF2/pagerange.py,sha256=AEMerbVjzXE55sJ2EYZzBgH1Xt4NiUsHaiycoNaW8Ys,5534 -PyPDF2/pdf.py,sha256=ceuZWSZIupSbzEzw6QrbNmN9D8PrdM6dh8zHSB9Rg2o,124907 -PyPDF2/utils.py,sha256=-ZQky5qa4gsO0zprA8V_E5sTNRBSa_ungvxvxjdHr64,7833 -PyPDF2/xmp.py,sha256=vdjDUAMCqb7-AhkuNaqCanviPHMpuJ-5adY8Kxe5jUc,13639 -PyPDF2-1.25.1.dist-info/DESCRIPTION.rst,sha256=mCiWyCHYtsbQ22O_f2FbbD8CjW1GMfwvbn67J_THZ5M,600 -PyPDF2-1.25.1.dist-info/METADATA,sha256=lGFpbQOrG5_oOYPi4GlzoQT4Lyj3eCvNEHIomSf4JsU,1174 -PyPDF2-1.25.1.dist-info/RECORD,, -PyPDF2-1.25.1.dist-info/WHEEL,sha256=lCqt3ViRAf9c8mCs6o7ffkwROUdYSy8_YHn5f_rulB4,93 -PyPDF2-1.25.1.dist-info/metadata.json,sha256=gaK0QZPmK8xsUqrxEf3uGaJQXIevQ5Z5ZfV-NfIhVc4,692 -PyPDF2-1.25.1.dist-info/top_level.txt,sha256=BERWrwqdvKXaVKhpnMbtO6b11qPA-mBt2r9a0VPF-Ow,7 -PyPDF2-1.25.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -PyPDF2/__pycache__/pagerange.cpython-34.pyc,, -PyPDF2/__pycache__/xmp.cpython-34.pyc,, -PyPDF2/__pycache__/_version.cpython-34.pyc,, -PyPDF2/__pycache__/__init__.cpython-34.pyc,, -PyPDF2/__pycache__/utils.cpython-34.pyc,, -PyPDF2/__pycache__/pdf.cpython-34.pyc,, -PyPDF2/__pycache__/generic.cpython-34.pyc,, -PyPDF2/__pycache__/filters.cpython-34.pyc,, -PyPDF2/__pycache__/merger.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/WHEEL deleted file mode 100644 index 6d9801a..0000000 --- a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: cp34-none-any - diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/metadata.json deleted file mode 100644 index 5c2e993..0000000 --- a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.details": {"contacts": [{"email": "PyPDF2@phaseit.net", "name": "Phaseit, Inc.", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://mstamy2.github.com/PyPDF2"}}}, "generator": "bdist_wheel (0.29.0)", "metadata_version": "2.0", "name": "PyPDF2", "summary": "PDF toolkit", "version": "1.25.1"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/PKG-INFO new file mode 100644 index 0000000..13192d5 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/PKG-INFO @@ -0,0 +1,34 @@ +Metadata-Version: 1.2 +Name: PyPDF2 +Version: 1.25.1 +Summary: PDF toolkit +Home-page: http://mstamy2.github.com/PyPDF2 +Author: Mathieu Fenniak +Author-email: biziqe@mathieu.fenniak.net +Maintainer: Phaseit, Inc. +Maintainer-email: PyPDF2@phaseit.net +License: UNKNOWN +Description: + A Pure-Python library built as a PDF toolkit. It is capable of: + + - extracting document information (title, author, ...) + - splitting documents page by page + - merging documents page by page + - cropping pages + - merging multiple pages into a single page + - encrypting and decrypting PDF files + - and more! + + By being Pure-Python, it should run on any Python platform without any + dependencies on external libraries. It can also work entirely on StringIO + objects rather than file streams, allowing for PDF manipulation in memory. + It is therefore a useful tool for websites that manage or manipulate PDFs. + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/SOURCES.txt new file mode 100644 index 0000000..095b8e1 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/SOURCES.txt @@ -0,0 +1,16 @@ +CHANGELOG +MANIFEST.in +README.md +PyPDF2/__init__.py +PyPDF2/_version.py +PyPDF2/filters.py +PyPDF2/generic.py +PyPDF2/merger.py +PyPDF2/pagerange.py +PyPDF2/pdf.py +PyPDF2/utils.py +PyPDF2/xmp.py +PyPDF2.egg-info/PKG-INFO +PyPDF2.egg-info/SOURCES.txt +PyPDF2.egg-info/dependency_links.txt +PyPDF2.egg-info/top_level.txt \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/dependency_links.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/dependency_links.txt rename to Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/dependency_links.txt diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/installed-files.txt new file mode 100644 index 0000000..33aec90 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/installed-files.txt @@ -0,0 +1,22 @@ +../PyPDF2/__init__.py +../PyPDF2/__pycache__/__init__.cpython-37.pyc +../PyPDF2/__pycache__/_version.cpython-37.pyc +../PyPDF2/__pycache__/filters.cpython-37.pyc +../PyPDF2/__pycache__/generic.cpython-37.pyc +../PyPDF2/__pycache__/merger.cpython-37.pyc +../PyPDF2/__pycache__/pagerange.cpython-37.pyc +../PyPDF2/__pycache__/pdf.cpython-37.pyc +../PyPDF2/__pycache__/utils.cpython-37.pyc +../PyPDF2/__pycache__/xmp.cpython-37.pyc +../PyPDF2/_version.py +../PyPDF2/filters.py +../PyPDF2/generic.py +../PyPDF2/merger.py +../PyPDF2/pagerange.py +../PyPDF2/pdf.py +../PyPDF2/utils.py +../PyPDF2/xmp.py +PKG-INFO +SOURCES.txt +dependency_links.txt +top_level.txt diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.egg-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/DESCRIPTION.rst deleted file mode 100644 index e118723..0000000 --- a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,3 +0,0 @@ -UNKNOWN - - diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/RECORD b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/RECORD deleted file mode 100644 index 214cd97..0000000 --- a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/RECORD +++ /dev/null @@ -1,11 +0,0 @@ -socks.py,sha256=0pa1zGmoXBtXOf0uCgb7a2RaVyYXJGy2szcqaH-4J54,28004 -sockshandler.py,sha256=ENwUUO3vt84_1yRHAq1A147TL_TAuFux2t_1QvNV2Vo,2913 -PySocks-1.5.6.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 -PySocks-1.5.6.dist-info/METADATA,sha256=FMkow33TD954netuDo9XoNwJvnYfhG3xb29-SqYfD3s,312 -PySocks-1.5.6.dist-info/RECORD,, -PySocks-1.5.6.dist-info/WHEEL,sha256=lCqt3ViRAf9c8mCs6o7ffkwROUdYSy8_YHn5f_rulB4,93 -PySocks-1.5.6.dist-info/metadata.json,sha256=1YIXTsL7gr8l01o_J6_5NIv29fVrNchGX8t-w1bmPKA,498 -PySocks-1.5.6.dist-info/top_level.txt,sha256=TKSOIfCFBoK9EY8FBYbYqC3PWd3--G15ph9n8-QHPDk,19 -PySocks-1.5.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -__pycache__/sockshandler.cpython-34.pyc,, -__pycache__/socks.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/WHEEL deleted file mode 100644 index 6d9801a..0000000 --- a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: cp34-none-any - diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/metadata.json deleted file mode 100644 index 1d169e2..0000000 --- a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"extensions": {"python.details": {"contacts": [{"email": "anorov.vorona@gmail.com", "name": "Anorov", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/Anorov/PySocks"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["socks", "proxy"], "license": "BSD", "metadata_version": "2.0", "name": "PySocks", "summary": "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information.", "version": "1.5.6"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/METADATA b/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/PKG-INFO similarity index 56% rename from Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/METADATA rename to Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/PKG-INFO index 316f318..44214b7 100644 --- a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/METADATA +++ b/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/PKG-INFO @@ -1,14 +1,14 @@ -Metadata-Version: 2.0 +Metadata-Version: 1.1 Name: PySocks -Version: 1.5.6 +Version: 1.6.8 Summary: A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information. Home-page: https://github.com/Anorov/PySocks Author: Anorov Author-email: anorov.vorona@gmail.com License: BSD +Description: UNKNOWN Keywords: socks,proxy Platform: UNKNOWN - -UNKNOWN - - +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/SOURCES.txt new file mode 100644 index 0000000..2834f45 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/SOURCES.txt @@ -0,0 +1,16 @@ +LICENSE +MANIFEST.in +README.md +setup.cfg +setup.py +socks.py +sockshandler.py +PySocks.egg-info/PKG-INFO +PySocks.egg-info/SOURCES.txt +PySocks.egg-info/dependency_links.txt +PySocks.egg-info/top_level.txt +test/__init__.py +test/test_pysocks.py +test/util.py +test/bin/3proxy +test/bin/3proxy.license \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/zip-safe b/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/dependency_links.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/zip-safe rename to Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/dependency_links.txt diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/installed-files.txt new file mode 100644 index 0000000..fe9dd45 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/installed-files.txt @@ -0,0 +1,8 @@ +../__pycache__/socks.cpython-37.pyc +../__pycache__/sockshandler.cpython-37.pyc +../socks.py +../sockshandler.py +PKG-INFO +SOURCES.txt +dependency_links.txt +top_level.txt diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/PySocks-1.6.8.egg-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/DESCRIPTION.rst deleted file mode 100644 index c6b6a1c..0000000 --- a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,238 +0,0 @@ -=============================== -Installing and Using Setuptools -=============================== - -.. contents:: **Table of Contents** - - -`Change History `_. - -------------------------- -Installation Instructions -------------------------- - -The recommended way to bootstrap setuptools on any system is to download -`ez_setup.py`_ and run it using the target Python environment. Different -operating systems have different recommended techniques to accomplish this -basic routine, so below are some examples to get you started. - -Setuptools requires Python 2.6 or later. To install setuptools -on Python 2.4 or Python 2.5, use the `bootstrap script for Setuptools 1.x -`_. - -The link provided to ez_setup.py is a bookmark to bootstrap script for the -latest known stable release. - -.. _ez_setup.py: https://bootstrap.pypa.io/ez_setup.py - -Windows (Powershell 3 or later) -=============================== - -For best results, uninstall previous versions FIRST (see `Uninstalling`_). - -Using Windows 8 (which includes PowerShell 3) or earlier versions of Windows -with PowerShell 3 installed, it's possible to install with one simple -Powershell command. Start up Powershell and paste this command:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | python - - -You must start the Powershell with Administrative privileges or you may choose -to install a user-local installation:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | python - --user - -If you have Python 3.3 or later, you can use the ``py`` command to install to -different Python versions. For example, to install to Python 3.3 if you have -Python 2.7 installed:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | py -3 - - -The recommended way to install setuptools on Windows is to download -`ez_setup.py`_ and run it. The script will download the appropriate -distribution file and install it for you. - -Once installation is complete, you will find an ``easy_install`` program in -your Python ``Scripts`` subdirectory. For simple invocation and best results, -add this directory to your ``PATH`` environment variable, if it is not already -present. If you did a user-local install, the ``Scripts`` subdirectory is -``$env:APPDATA\Python\Scripts``. - - -Windows (simplified) -==================== - -For Windows without PowerShell 3 or for installation without a command-line, -download `ez_setup.py`_ using your preferred web browser or other technique -and "run" that file. - - -Unix (wget) -=========== - -Most Linux distributions come with wget. - -Download `ez_setup.py`_ and run it using the target Python version. The script -will download the appropriate version and install it for you:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | python - -Note that you will may need to invoke the command with superuser privileges to -install to the system Python:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | sudo python - -Alternatively, Setuptools may be installed to a user-local path:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | python - --user - -Note that on some older systems (noted on Debian 6 and CentOS 5 installations), -`wget` may refuse to download `ez_setup.py`, complaining that the certificate common name `*.c.ssl.fastly.net` -does not match the host name `bootstrap.pypa.io`. In addition, the `ez_setup.py` script may then encounter similar problems using -`wget` internally to download `setuptools-x.y.zip`, complaining that the certificate common name of `www.python.org` does not match the -host name `pypi.python.org`. Those are known issues, related to a bug in the older versions of `wget` -(see `Issue 59 `_). If you happen to encounter them, -install Setuptools as follows:: - - > wget --no-check-certificate https://bootstrap.pypa.io/ez_setup.py - > python ez_setup.py --insecure - - -Unix including Mac OS X (curl) -============================== - -If your system has curl installed, follow the ``wget`` instructions but -replace ``wget`` with ``curl`` and ``-O`` with ``-o``. For example:: - - > curl https://bootstrap.pypa.io/ez_setup.py -o - | python - - -Advanced Installation -===================== - -For more advanced installation options, such as installing to custom -locations or prefixes, download and extract the source -tarball from `Setuptools on PyPI `_ -and run setup.py with any supported distutils and Setuptools options. -For example:: - - setuptools-x.x$ python setup.py install --prefix=/opt/setuptools - -Use ``--help`` to get a full options list, but we recommend consulting -the `EasyInstall manual`_ for detailed instructions, especially `the section -on custom installation locations`_. - -.. _EasyInstall manual: https://pythonhosted.org/setuptools/EasyInstall -.. _the section on custom installation locations: https://pythonhosted.org/setuptools/EasyInstall#custom-installation-locations - - -Downloads -========= - -All setuptools downloads can be found at `the project's home page in the Python -Package Index`_. Scroll to the very bottom of the page to find the links. - -.. _the project's home page in the Python Package Index: https://pypi.python.org/pypi/setuptools - -In addition to the PyPI downloads, the development version of ``setuptools`` -is available from the `Bitbucket repo`_, and in-development versions of the -`0.6 branch`_ are available as well. - -.. _Bitbucket repo: https://bitbucket.org/pypa/setuptools/get/default.tar.gz#egg=setuptools-dev -.. _0.6 branch: http://svn.python.org/projects/sandbox/branches/setuptools-0.6/#egg=setuptools-dev06 - -Uninstalling -============ - -On Windows, if Setuptools was installed using an ``.exe`` or ``.msi`` -installer, simply use the uninstall feature of "Add/Remove Programs" in the -Control Panel. - -Otherwise, to uninstall Setuptools or Distribute, regardless of the Python -version, delete all ``setuptools*`` and ``distribute*`` files and -directories from your system's ``site-packages`` directory -(and any other ``sys.path`` directories) FIRST. - -If you are upgrading or otherwise plan to re-install Setuptools or Distribute, -nothing further needs to be done. If you want to completely remove Setuptools, -you may also want to remove the 'easy_install' and 'easy_install-x.x' scripts -and associated executables installed to the Python scripts directory. - --------------------------------- -Using Setuptools and EasyInstall --------------------------------- - -Here are some of the available manuals, tutorials, and other resources for -learning about Setuptools, Python Eggs, and EasyInstall: - -* `The EasyInstall user's guide and reference manual`_ -* `The setuptools Developer's Guide`_ -* `The pkg_resources API reference`_ -* `The Internal Structure of Python Eggs`_ - -Questions, comments, and bug reports should be directed to the `distutils-sig -mailing list`_. If you have written (or know of) any tutorials, documentation, -plug-ins, or other resources for setuptools users, please let us know about -them there, so this reference list can be updated. If you have working, -*tested* patches to correct problems or add features, you may submit them to -the `setuptools bug tracker`_. - -.. _setuptools bug tracker: https://bitbucket.org/pypa/setuptools/issues -.. _The Internal Structure of Python Eggs: https://pythonhosted.org/setuptools/formats.html -.. _The setuptools Developer's Guide: https://pythonhosted.org/setuptools/setuptools.html -.. _The pkg_resources API reference: https://pythonhosted.org/setuptools/pkg_resources.html -.. _The EasyInstall user's guide and reference manual: https://pythonhosted.org/setuptools/easy_install.html -.. _distutils-sig mailing list: http://mail.python.org/pipermail/distutils-sig/ - - -------- -Credits -------- - -* The original design for the ``.egg`` format and the ``pkg_resources`` API was - co-created by Phillip Eby and Bob Ippolito. Bob also implemented the first - version of ``pkg_resources``, and supplied the OS X operating system version - compatibility algorithm. - -* Ian Bicking implemented many early "creature comfort" features of - easy_install, including support for downloading via Sourceforge and - Subversion repositories. Ian's comments on the Web-SIG about WSGI - application deployment also inspired the concept of "entry points" in eggs, - and he has given talks at PyCon and elsewhere to inform and educate the - community about eggs and setuptools. - -* Jim Fulton contributed time and effort to build automated tests of various - aspects of ``easy_install``, and supplied the doctests for the command-line - ``.exe`` wrappers on Windows. - -* Phillip J. Eby is the seminal author of setuptools, and - first proposed the idea of an importable binary distribution format for - Python application plug-ins. - -* Significant parts of the implementation of setuptools were funded by the Open - Source Applications Foundation, to provide a plug-in infrastructure for the - Chandler PIM application. In addition, many OSAF staffers (such as Mike - "Code Bear" Taylor) contributed their time and stress as guinea pigs for the - use of eggs and setuptools, even before eggs were "cool". (Thanks, guys!) - -* Tarek Ziadé is the principal author of the Distribute fork, which - re-invigorated the community on the project, encouraged renewed innovation, - and addressed many defects. - -* Since the merge with Distribute, Jason R. Coombs is the - maintainer of setuptools. The project is maintained in coordination with - the Python Packaging Authority (PyPA) and the larger Python community. - -.. _files: - - ---------------- -Code of Conduct ---------------- - -Everyone interacting in the setuptools project's codebases, issue trackers, -chat rooms, and mailing lists is expected to follow the -`PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - diff --git a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/METADATA b/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/METADATA deleted file mode 100644 index 2a37585..0000000 --- a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/METADATA +++ /dev/null @@ -1,263 +0,0 @@ -Metadata-Version: 2.0 -Name: setuptools -Version: 20.1.1 -Summary: Easily download, build, install, upgrade, and uninstall Python packages -Home-page: https://bitbucket.org/pypa/setuptools -Author: Python Packaging Authority -Author-email: distutils-sig@python.org -License: UNKNOWN -Keywords: CPAN PyPI distutils eggs package management -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: System :: Archiving :: Packaging -Classifier: Topic :: System :: Systems Administration -Classifier: Topic :: Utilities - -=============================== -Installing and Using Setuptools -=============================== - -.. contents:: **Table of Contents** - - -`Change History `_. - -------------------------- -Installation Instructions -------------------------- - -The recommended way to bootstrap setuptools on any system is to download -`ez_setup.py`_ and run it using the target Python environment. Different -operating systems have different recommended techniques to accomplish this -basic routine, so below are some examples to get you started. - -Setuptools requires Python 2.6 or later. To install setuptools -on Python 2.4 or Python 2.5, use the `bootstrap script for Setuptools 1.x -`_. - -The link provided to ez_setup.py is a bookmark to bootstrap script for the -latest known stable release. - -.. _ez_setup.py: https://bootstrap.pypa.io/ez_setup.py - -Windows (Powershell 3 or later) -=============================== - -For best results, uninstall previous versions FIRST (see `Uninstalling`_). - -Using Windows 8 (which includes PowerShell 3) or earlier versions of Windows -with PowerShell 3 installed, it's possible to install with one simple -Powershell command. Start up Powershell and paste this command:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | python - - -You must start the Powershell with Administrative privileges or you may choose -to install a user-local installation:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | python - --user - -If you have Python 3.3 or later, you can use the ``py`` command to install to -different Python versions. For example, to install to Python 3.3 if you have -Python 2.7 installed:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | py -3 - - -The recommended way to install setuptools on Windows is to download -`ez_setup.py`_ and run it. The script will download the appropriate -distribution file and install it for you. - -Once installation is complete, you will find an ``easy_install`` program in -your Python ``Scripts`` subdirectory. For simple invocation and best results, -add this directory to your ``PATH`` environment variable, if it is not already -present. If you did a user-local install, the ``Scripts`` subdirectory is -``$env:APPDATA\Python\Scripts``. - - -Windows (simplified) -==================== - -For Windows without PowerShell 3 or for installation without a command-line, -download `ez_setup.py`_ using your preferred web browser or other technique -and "run" that file. - - -Unix (wget) -=========== - -Most Linux distributions come with wget. - -Download `ez_setup.py`_ and run it using the target Python version. The script -will download the appropriate version and install it for you:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | python - -Note that you will may need to invoke the command with superuser privileges to -install to the system Python:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | sudo python - -Alternatively, Setuptools may be installed to a user-local path:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | python - --user - -Note that on some older systems (noted on Debian 6 and CentOS 5 installations), -`wget` may refuse to download `ez_setup.py`, complaining that the certificate common name `*.c.ssl.fastly.net` -does not match the host name `bootstrap.pypa.io`. In addition, the `ez_setup.py` script may then encounter similar problems using -`wget` internally to download `setuptools-x.y.zip`, complaining that the certificate common name of `www.python.org` does not match the -host name `pypi.python.org`. Those are known issues, related to a bug in the older versions of `wget` -(see `Issue 59 `_). If you happen to encounter them, -install Setuptools as follows:: - - > wget --no-check-certificate https://bootstrap.pypa.io/ez_setup.py - > python ez_setup.py --insecure - - -Unix including Mac OS X (curl) -============================== - -If your system has curl installed, follow the ``wget`` instructions but -replace ``wget`` with ``curl`` and ``-O`` with ``-o``. For example:: - - > curl https://bootstrap.pypa.io/ez_setup.py -o - | python - - -Advanced Installation -===================== - -For more advanced installation options, such as installing to custom -locations or prefixes, download and extract the source -tarball from `Setuptools on PyPI `_ -and run setup.py with any supported distutils and Setuptools options. -For example:: - - setuptools-x.x$ python setup.py install --prefix=/opt/setuptools - -Use ``--help`` to get a full options list, but we recommend consulting -the `EasyInstall manual`_ for detailed instructions, especially `the section -on custom installation locations`_. - -.. _EasyInstall manual: https://pythonhosted.org/setuptools/EasyInstall -.. _the section on custom installation locations: https://pythonhosted.org/setuptools/EasyInstall#custom-installation-locations - - -Downloads -========= - -All setuptools downloads can be found at `the project's home page in the Python -Package Index`_. Scroll to the very bottom of the page to find the links. - -.. _the project's home page in the Python Package Index: https://pypi.python.org/pypi/setuptools - -In addition to the PyPI downloads, the development version of ``setuptools`` -is available from the `Bitbucket repo`_, and in-development versions of the -`0.6 branch`_ are available as well. - -.. _Bitbucket repo: https://bitbucket.org/pypa/setuptools/get/default.tar.gz#egg=setuptools-dev -.. _0.6 branch: http://svn.python.org/projects/sandbox/branches/setuptools-0.6/#egg=setuptools-dev06 - -Uninstalling -============ - -On Windows, if Setuptools was installed using an ``.exe`` or ``.msi`` -installer, simply use the uninstall feature of "Add/Remove Programs" in the -Control Panel. - -Otherwise, to uninstall Setuptools or Distribute, regardless of the Python -version, delete all ``setuptools*`` and ``distribute*`` files and -directories from your system's ``site-packages`` directory -(and any other ``sys.path`` directories) FIRST. - -If you are upgrading or otherwise plan to re-install Setuptools or Distribute, -nothing further needs to be done. If you want to completely remove Setuptools, -you may also want to remove the 'easy_install' and 'easy_install-x.x' scripts -and associated executables installed to the Python scripts directory. - --------------------------------- -Using Setuptools and EasyInstall --------------------------------- - -Here are some of the available manuals, tutorials, and other resources for -learning about Setuptools, Python Eggs, and EasyInstall: - -* `The EasyInstall user's guide and reference manual`_ -* `The setuptools Developer's Guide`_ -* `The pkg_resources API reference`_ -* `The Internal Structure of Python Eggs`_ - -Questions, comments, and bug reports should be directed to the `distutils-sig -mailing list`_. If you have written (or know of) any tutorials, documentation, -plug-ins, or other resources for setuptools users, please let us know about -them there, so this reference list can be updated. If you have working, -*tested* patches to correct problems or add features, you may submit them to -the `setuptools bug tracker`_. - -.. _setuptools bug tracker: https://bitbucket.org/pypa/setuptools/issues -.. _The Internal Structure of Python Eggs: https://pythonhosted.org/setuptools/formats.html -.. _The setuptools Developer's Guide: https://pythonhosted.org/setuptools/setuptools.html -.. _The pkg_resources API reference: https://pythonhosted.org/setuptools/pkg_resources.html -.. _The EasyInstall user's guide and reference manual: https://pythonhosted.org/setuptools/easy_install.html -.. _distutils-sig mailing list: http://mail.python.org/pipermail/distutils-sig/ - - -------- -Credits -------- - -* The original design for the ``.egg`` format and the ``pkg_resources`` API was - co-created by Phillip Eby and Bob Ippolito. Bob also implemented the first - version of ``pkg_resources``, and supplied the OS X operating system version - compatibility algorithm. - -* Ian Bicking implemented many early "creature comfort" features of - easy_install, including support for downloading via Sourceforge and - Subversion repositories. Ian's comments on the Web-SIG about WSGI - application deployment also inspired the concept of "entry points" in eggs, - and he has given talks at PyCon and elsewhere to inform and educate the - community about eggs and setuptools. - -* Jim Fulton contributed time and effort to build automated tests of various - aspects of ``easy_install``, and supplied the doctests for the command-line - ``.exe`` wrappers on Windows. - -* Phillip J. Eby is the seminal author of setuptools, and - first proposed the idea of an importable binary distribution format for - Python application plug-ins. - -* Significant parts of the implementation of setuptools were funded by the Open - Source Applications Foundation, to provide a plug-in infrastructure for the - Chandler PIM application. In addition, many OSAF staffers (such as Mike - "Code Bear" Taylor) contributed their time and stress as guinea pigs for the - use of eggs and setuptools, even before eggs were "cool". (Thanks, guys!) - -* Tarek Ziadé is the principal author of the Distribute fork, which - re-invigorated the community on the project, encouraged renewed innovation, - and addressed many defects. - -* Since the merge with Distribute, Jason R. Coombs is the - maintainer of setuptools. The project is maintained in coordination with - the Python Packaging Authority (PyPA) and the larger Python community. - -.. _files: - - ---------------- -Code of Conduct ---------------- - -Everyone interacting in the setuptools project's codebases, issue trackers, -chat rooms, and mailing lists is expected to follow the -`PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - diff --git a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/RECORD deleted file mode 100644 index 43ba25f..0000000 --- a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/RECORD +++ /dev/null @@ -1,104 +0,0 @@ -easy_install.py,sha256=MDC9vt5AxDsXX5qcKlBz2TnW6Tpuv_AobnfhCJ9X3PM,126 -_markerlib/__init__.py,sha256=GSmhZqvAitLJHhSgtqqusfq2nJ_ClP3oy3Lm0uZLIsU,552 -_markerlib/markers.py,sha256=YuFp0-osufFIoqnzG3L0Z2fDCx4Vln3VUDeXJ2DA_1I,3979 -_markerlib-0.0.0.dist-info/DESCRIPTION.rst,sha256=MDsJej8DPV2OKpAKpu74g-2xksRd-uGTeZn4W7D1dnI,9940 -_markerlib-0.0.0.dist-info/METADATA,sha256=l8LCWR8HLdKmOz1QMU2JQREbM9o4dCsMPkBdBSi_Jgo,10997 -_markerlib-0.0.0.dist-info/RECORD,, -_markerlib-0.0.0.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 -_markerlib-0.0.0.dist-info/dependency_links.txt,sha256=oUNXJEArClXFiSSvfFwUKY8TYjeIXhuFfCpXn5K0DCE,226 -_markerlib-0.0.0.dist-info/entry_points.txt,sha256=S6yRfyEABPIKq4cNMNO_7LHXzFVZW-exLSrKSI6kgNU,2779 -_markerlib-0.0.0.dist-info/metadata.json,sha256=OwUAZgU-PBMGwfXh2QKg7ec1Kh9aGVfWnOB5mrc48HA,4242 -_markerlib-0.0.0.dist-info/top_level.txt,sha256=7780fzudMJkykiTcIrAQ8m8Lll6kot3EEePye3VJgEE,49 -_markerlib-0.0.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 -setuptools/__init__.py,sha256=WEGb6BRGN2dz3eJTbNRUfInUAhb6_OZJyYAndPGJm6w,5440 -setuptools/archive_util.py,sha256=N30WE5ZQjkytzhAodAXw4FkK-9J5AP1ChrClHnZthOA,6609 -setuptools/depends.py,sha256=WyJIhjIX7D5-JpGSnMAPHEoDcVPQxaO0405keTQT6jM,6418 -setuptools/dist.py,sha256=txOleyyt2xCSTkUjCGW4MYZB8a1xsbC8MulDhSnoivQ,35701 -setuptools/extension.py,sha256=YvsyGHWVWzhNOXMHU239FR14wxw2WwdMLLzWsRP6_IY,1694 -setuptools/launch.py,sha256=hP3qZxDNu5Hf9C-VAkEP4IC_YYfR1XfxMTj6EguxxCg,730 -setuptools/lib2to3_ex.py,sha256=6jPF9sJuHiz0cyg4cwIBLl2VMAxcl3GYSZwWAOuJplU,1998 -setuptools/msvc9_support.py,sha256=fo2vjb-dna1SEuHezQCTuelCo6XFBv5cqaI56ABJ1vw,2187 -setuptools/package_index.py,sha256=T6tZGPHApup6Gl3kz1sCLtY7kmMUXLBKweSAORYS2Qc,39490 -setuptools/py26compat.py,sha256=1Vvuf-hj5bTM3OAXv6vgJQImulne12ann053caOgikU,481 -setuptools/py27compat.py,sha256=CGj-jZcFgHUkrEdLvArkxHj96tAaMbG2-yJtUVU7QVI,306 -setuptools/py31compat.py,sha256=cqYSVBd2pxvKl75185z40htfEr6EKC29KvSBiSoqHOA,1636 -setuptools/sandbox.py,sha256=tuMRu_8R0_w6Qer9VqDiOTqKy1qr_GjHi-2QAg7TMz0,14210 -setuptools/script (dev).tmpl,sha256=f7MR17dTkzaqkCMSVseyOCMVrPVSMdmTQsaB8cZzfuI,201 -setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138 -setuptools/site-patch.py,sha256=K-0-cAx36mX_PG-qPZwosG9ZLCliRjquKQ4nHiJvvzg,2389 -setuptools/ssl_support.py,sha256=tAFeeyFPVle_GgarPkNrdfnCJgP9PyN_QYGXTgypoyc,8119 -setuptools/unicode_utils.py,sha256=8zVyrL_MFc6P5AmErs21rr7z-3N1pZ_NkOcDC7BPElU,995 -setuptools/utils.py,sha256=08Z7mt-9mvrx-XvmS5EyKoRn2lxNTlgFsUwBU3Eq9JQ,293 -setuptools/version.py,sha256=E3F8rAlTgCNpmTTY2YGy4T_1iQn3gKsePB7TVIcObu0,23 -setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714 -setuptools/command/__init__.py,sha256=1AM3hv_zCixE7kTXA-onWfK_2KF8GC8fUw3WSxzi5Fg,564 -setuptools/command/alias.py,sha256=KjpE0sz_SDIHv3fpZcIQK-sCkJz-SrC6Gmug6b9Nkc8,2426 -setuptools/command/bdist_egg.py,sha256=Km4CsGbevhvej6kKEfvTYxfkPoQijUyXmImNifrO4Tg,17184 -setuptools/command/bdist_rpm.py,sha256=B7l0TnzCGb-0nLlm6rS00jWLkojASwVmdhW2w5Qz_Ak,1508 -setuptools/command/bdist_wininst.py,sha256=_6dz3lpB1tY200LxKPLM7qgwTCceOMgaWFF-jW2-pm0,637 -setuptools/command/build_ext.py,sha256=pkQ8xp3YPVGGLkGv-SvfxC_GqFpboph1AFEoMFOgQMo,11964 -setuptools/command/build_py.py,sha256=HvJ88JuougDccaowYlfMV12kYtd0GLahg2DR2vQRqL4,7983 -setuptools/command/develop.py,sha256=VxSYbpM2jQqtRBn5klIjPVBo3sWKNZMlSbHHiRLUlZo,7383 -setuptools/command/easy_install.py,sha256=WDidYAhIEWCT-63bVvoazy8HcITEWDn4Xzgrj3YZgz0,88492 -setuptools/command/egg_info.py,sha256=0_8eI8hgLAlGt8Xk5kiodY_d9lxG6_RSescJISKBJgA,16890 -setuptools/command/install.py,sha256=QwaFiZRU3ytIHoPh8uJ9EqV3Fu9C4ca4B7UGAo95tws,4685 -setuptools/command/install_egg_info.py,sha256=fEqU1EplTs_vUjAzwiEB7LrtdZBQ3BefwuUZLZBDEQ0,5027 -setuptools/command/install_lib.py,sha256=5IZM251t4DzOdZAXCezdROr3X0SeeE41eyV059RNgZ4,5011 -setuptools/command/install_scripts.py,sha256=vX2JC6v7l090N7CrTfihWBklNbPvfNKAY2LRtukM9XE,2231 -setuptools/command/register.py,sha256=bHlMm1qmBbSdahTOT8w6UhA-EgeQIz7p6cD-qOauaiI,270 -setuptools/command/rotate.py,sha256=QGZS2t4CmBl7t79KQijNCjRMU50lu3nRhu4FXWB5LIE,2038 -setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658 -setuptools/command/sdist.py,sha256=kQetnPMw6ao3nurWGJZgS4HkOH4AknzMOSvqbVA6jGA,7050 -setuptools/command/setopt.py,sha256=cygJaJWJmiVhR0e_Uh_0_fWyCxMJIqK-Bu6K0LyYUtU,5086 -setuptools/command/test.py,sha256=N2f5RwxkjwU3YQzFYHtzHr636-pdX9XJDuPg5Y92kSo,6888 -setuptools/command/upload.py,sha256=OjAryq4ZoARZiaTN_MpuG1X8Pu9CJNCKmmbMg-gab5I,649 -setuptools/command/upload_docs.py,sha256=htXpASci5gKP0RIrGZRRmbll7RnTRuwvKWZkYsBlDMM,6815 -setuptools/extern/__init__.py,sha256=mTrrj4yLMdFeEwwnqKnSuvZM5RM-HPZ1iXLgaYDlB9o,132 -../../../bin/easy_install,sha256=4bXVXBoSo_A1XK3Ga5UMkOREdCSnh8FZIYqtJVSWCa4,298 -../../../bin/easy_install-3.4,sha256=4bXVXBoSo_A1XK3Ga5UMkOREdCSnh8FZIYqtJVSWCa4,298 -_markerlib-0.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -setuptools/__pycache__/py27compat.cpython-34.pyc,, -setuptools/__pycache__/sandbox.cpython-34.pyc,, -setuptools/__pycache__/version.cpython-34.pyc,, -setuptools/__pycache__/launch.cpython-34.pyc,, -setuptools/command/__pycache__/install_egg_info.cpython-34.pyc,, -setuptools/__pycache__/py26compat.cpython-34.pyc,, -setuptools/command/__pycache__/rotate.cpython-34.pyc,, -setuptools/command/__pycache__/install_scripts.cpython-34.pyc,, -setuptools/command/__pycache__/setopt.cpython-34.pyc,, -setuptools/command/__pycache__/alias.cpython-34.pyc,, -setuptools/__pycache__/unicode_utils.cpython-34.pyc,, -_markerlib/__pycache__/markers.cpython-34.pyc,, -setuptools/__pycache__/package_index.cpython-34.pyc,, -setuptools/__pycache__/depends.cpython-34.pyc,, -setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,, -__pycache__/easy_install.cpython-34.pyc,, -setuptools/__pycache__/__init__.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,, -setuptools/command/__pycache__/upload_docs.cpython-34.pyc,, -setuptools/command/__pycache__/register.cpython-34.pyc,, -setuptools/__pycache__/dist.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,, -setuptools/__pycache__/archive_util.cpython-34.pyc,, -setuptools/__pycache__/extension.cpython-34.pyc,, -setuptools/command/__pycache__/test.cpython-34.pyc,, -setuptools/command/__pycache__/__init__.cpython-34.pyc,, -setuptools/__pycache__/utils.cpython-34.pyc,, -setuptools/command/__pycache__/develop.cpython-34.pyc,, -setuptools/extern/__pycache__/__init__.cpython-34.pyc,, -setuptools/__pycache__/windows_support.cpython-34.pyc,, -setuptools/command/__pycache__/build_ext.cpython-34.pyc,, -setuptools/command/__pycache__/sdist.cpython-34.pyc,, -setuptools/command/__pycache__/saveopts.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,, -setuptools/__pycache__/site-patch.cpython-34.pyc,, -setuptools/command/__pycache__/install.cpython-34.pyc,, -_markerlib/__pycache__/__init__.cpython-34.pyc,, -setuptools/command/__pycache__/egg_info.cpython-34.pyc,, -setuptools/command/__pycache__/easy_install.cpython-34.pyc,, -setuptools/__pycache__/msvc9_support.cpython-34.pyc,, -setuptools/__pycache__/ssl_support.cpython-34.pyc,, -setuptools/command/__pycache__/build_py.cpython-34.pyc,, -setuptools/command/__pycache__/install_lib.cpython-34.pyc,, -setuptools/__pycache__/py31compat.cpython-34.pyc,, -setuptools/command/__pycache__/upload.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/dependency_links.txt deleted file mode 100644 index 47d1e81..0000000 --- a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/dependency_links.txt +++ /dev/null @@ -1,2 +0,0 @@ -https://pypi.python.org/packages/source/c/certifi/certifi-2015.11.20.tar.gz#md5=25134646672c695c1ff1593c2dd75d08 -https://pypi.python.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2 diff --git a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/metadata.json deleted file mode 100644 index 885b5f3..0000000 --- a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Archiving :: Packaging", "Topic :: System :: Systems Administration", "Topic :: Utilities"], "extensions": {"python.commands": {"wrap_console": {"easy_install": "setuptools.command.easy_install:main"}}, "python.details": {"contacts": [{"email": "distutils-sig@python.org", "name": "Python Packaging Authority", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://bitbucket.org/pypa/setuptools"}}, "python.exports": {"console_scripts": {"easy_install": "setuptools.command.easy_install:main"}, "distutils.commands": {"alias": "setuptools.command.alias:alias", "bdist_egg": "setuptools.command.bdist_egg:bdist_egg", "bdist_rpm": "setuptools.command.bdist_rpm:bdist_rpm", "bdist_wininst": "setuptools.command.bdist_wininst:bdist_wininst", "build_ext": "setuptools.command.build_ext:build_ext", "build_py": "setuptools.command.build_py:build_py", "develop": "setuptools.command.develop:develop", "easy_install": "setuptools.command.easy_install:easy_install", "egg_info": "setuptools.command.egg_info:egg_info", "install": "setuptools.command.install:install", "install_egg_info": "setuptools.command.install_egg_info:install_egg_info", "install_lib": "setuptools.command.install_lib:install_lib", "install_scripts": "setuptools.command.install_scripts:install_scripts", "register": "setuptools.command.register:register", "rotate": "setuptools.command.rotate:rotate", "saveopts": "setuptools.command.saveopts:saveopts", "sdist": "setuptools.command.sdist:sdist", "setopt": "setuptools.command.setopt:setopt", "test": "setuptools.command.test:test", "upload": "setuptools.command.upload:upload", "upload_docs": "setuptools.command.upload_docs:upload_docs"}, "distutils.setup_keywords": {"convert_2to3_doctests": "setuptools.dist:assert_string_list", "dependency_links": "setuptools.dist:assert_string_list", "eager_resources": "setuptools.dist:assert_string_list", "entry_points": "setuptools.dist:check_entry_points", "exclude_package_data": "setuptools.dist:check_package_data", "extras_require": "setuptools.dist:check_extras", "include_package_data": "setuptools.dist:assert_bool", "install_requires": "setuptools.dist:check_requirements", "namespace_packages": "setuptools.dist:check_nsp", "package_data": "setuptools.dist:check_package_data", "packages": "setuptools.dist:check_packages", "setup_requires": "setuptools.dist:check_requirements", "test_loader": "setuptools.dist:check_importable", "test_runner": "setuptools.dist:check_importable", "test_suite": "setuptools.dist:check_test_suite", "tests_require": "setuptools.dist:check_requirements", "use_2to3": "setuptools.dist:assert_bool", "use_2to3_exclude_fixers": "setuptools.dist:assert_string_list", "use_2to3_fixers": "setuptools.dist:assert_string_list", "zip_safe": "setuptools.dist:assert_bool"}, "egg_info.writers": {"PKG-INFO": "setuptools.command.egg_info:write_pkg_info", "dependency_links.txt": "setuptools.command.egg_info:overwrite_arg", "depends.txt": "setuptools.command.egg_info:warn_depends_obsolete", "eager_resources.txt": "setuptools.command.egg_info:overwrite_arg", "entry_points.txt": "setuptools.command.egg_info:write_entries", "namespace_packages.txt": "setuptools.command.egg_info:overwrite_arg", "requires.txt": "setuptools.command.egg_info:write_requirements", "top_level.txt": "setuptools.command.egg_info:write_toplevel_names"}, "setuptools.installation": {"eggsecutable": "setuptools.command.easy_install:bootstrap"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["CPAN", "PyPI", "distutils", "eggs", "package", "management"], "metadata_version": "2.0", "name": "setuptools", "summary": "Easily download, build, install, upgrade, and uninstall Python packages", "version": "20.1.1"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/top_level.txt deleted file mode 100644 index 5fe9a7e..0000000 --- a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/top_level.txt +++ /dev/null @@ -1,4 +0,0 @@ -_markerlib -easy_install -pkg_resources -setuptools diff --git a/Shared/lib/python3.4/site-packages/_markerlib/__init__.py b/Shared/lib/python3.4/site-packages/_markerlib/__init__.py deleted file mode 100644 index e2b237b..0000000 --- a/Shared/lib/python3.4/site-packages/_markerlib/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -try: - import ast - from _markerlib.markers import default_environment, compile, interpret -except ImportError: - if 'ast' in globals(): - raise - def default_environment(): - return {} - def compile(marker): - def marker_fn(environment=None, override=None): - # 'empty markers are True' heuristic won't install extra deps. - return not marker.strip() - marker_fn.__doc__ = marker - return marker_fn - def interpret(marker, environment=None, override=None): - return compile(marker)() diff --git a/Shared/lib/python3.4/site-packages/_markerlib/markers.py b/Shared/lib/python3.4/site-packages/_markerlib/markers.py deleted file mode 100644 index fa83706..0000000 --- a/Shared/lib/python3.4/site-packages/_markerlib/markers.py +++ /dev/null @@ -1,119 +0,0 @@ -# -*- coding: utf-8 -*- -"""Interpret PEP 345 environment markers. - -EXPR [in|==|!=|not in] EXPR [or|and] ... - -where EXPR belongs to any of those: - - python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1]) - python_full_version = sys.version.split()[0] - os.name = os.name - sys.platform = sys.platform - platform.version = platform.version() - platform.machine = platform.machine() - platform.python_implementation = platform.python_implementation() - a free string, like '2.6', or 'win32' -""" - -__all__ = ['default_environment', 'compile', 'interpret'] - -import ast -import os -import platform -import sys -import weakref - -_builtin_compile = compile - -try: - from platform import python_implementation -except ImportError: - if os.name == "java": - # Jython 2.5 has ast module, but not platform.python_implementation() function. - def python_implementation(): - return "Jython" - else: - raise - - -# restricted set of variables -_VARS = {'sys.platform': sys.platform, - 'python_version': '%s.%s' % sys.version_info[:2], - # FIXME parsing sys.platform is not reliable, but there is no other - # way to get e.g. 2.7.2+, and the PEP is defined with sys.version - 'python_full_version': sys.version.split(' ', 1)[0], - 'os.name': os.name, - 'platform.version': platform.version(), - 'platform.machine': platform.machine(), - 'platform.python_implementation': python_implementation(), - 'extra': None # wheel extension - } - -for var in list(_VARS.keys()): - if '.' in var: - _VARS[var.replace('.', '_')] = _VARS[var] - -def default_environment(): - """Return copy of default PEP 385 globals dictionary.""" - return dict(_VARS) - -class ASTWhitelist(ast.NodeTransformer): - def __init__(self, statement): - self.statement = statement # for error messages - - ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str) - # Bool operations - ALLOWED += (ast.And, ast.Or) - # Comparison operations - ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn) - - def visit(self, node): - """Ensure statement only contains allowed nodes.""" - if not isinstance(node, self.ALLOWED): - raise SyntaxError('Not allowed in environment markers.\n%s\n%s' % - (self.statement, - (' ' * node.col_offset) + '^')) - return ast.NodeTransformer.visit(self, node) - - def visit_Attribute(self, node): - """Flatten one level of attribute access.""" - new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx) - return ast.copy_location(new_node, node) - -def parse_marker(marker): - tree = ast.parse(marker, mode='eval') - new_tree = ASTWhitelist(marker).generic_visit(tree) - return new_tree - -def compile_marker(parsed_marker): - return _builtin_compile(parsed_marker, '', 'eval', - dont_inherit=True) - -_cache = weakref.WeakValueDictionary() - -def compile(marker): - """Return compiled marker as a function accepting an environment dict.""" - try: - return _cache[marker] - except KeyError: - pass - if not marker.strip(): - def marker_fn(environment=None, override=None): - """""" - return True - else: - compiled_marker = compile_marker(parse_marker(marker)) - def marker_fn(environment=None, override=None): - """override updates environment""" - if override is None: - override = {} - if environment is None: - environment = default_environment() - environment.update(override) - return eval(compiled_marker, environment) - marker_fn.__doc__ = marker - _cache[marker] = marker_fn - return _cache[marker] - -def interpret(marker, environment=None): - return compile(marker)(environment) diff --git a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/DESCRIPTION.rst deleted file mode 100644 index 2687f1e..0000000 --- a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,84 +0,0 @@ -============= -ABC-Backports -============= - -Usage: - -.. code-block:: python - - try: - # ABCs live in "collections.abc" in Python >= 3.3 - from collections.abc import Coroutine, Generator - except ImportError: - # fall back to import from "backports_abc" - from backports_abc import Coroutine, Generator - -You can also install the ABCs into the stdlib by calling the ``patch()`` -function: - -.. code-block:: python - - import backports_abc - backports_abc.patch() - - try: - # ABCs live in "collections.abc" in Python >= 3.3 - from collections.abc import Coroutine, Generator - except ImportError: - # fall back to import from "collections" in Python <= 3.2 - from backports_abc import Coroutine, Generator - -Currently, ``patch()`` provides the following names if missing: - -* ``collections.abc.Generator`` -* ``collections.abc.Awaitable`` -* ``collections.abc.Coroutine`` -* ``inspect.isawaitable(obj)`` - -All of them are also available directly from the ``backports_abc`` -module namespace. - -In Python 2.x and Python 3.2, it patches the ``collections`` module -instead of the ``collections.abc`` module. Any names that are already -available when importing this module will not be overwritten. - -The names that were previously patched by ``patch()`` can be queried -through the mapping in ``backports_abc.PATCHED``. - -Changelog -========= - -0.4 (2015-09-14) ----------------- - -* direct wheel building support - -* make all names available at the module level instead of requiring patching - - -0.3 (2015-07-03) ----------------- - -* removed patching of ``inspect.iscoroutine()`` as it is not ABC based - - -0.2 (2015-07-03) ----------------- - -* require explicit ``backports_abc.patch()`` call to do the patching - (avoids side-effects on import and allows future configuration) - -* provide access to patched names through global ``PATCHED`` dict - -* add ABC based implementations of inspect.iscoroutine() and - inspect.isawaitable() - - -0.1 (2015-06-24) ----------------- - -* initial public release - -* provided ABCs: Generator, Coroutine, Awaitable - - diff --git a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/METADATA b/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/METADATA deleted file mode 100644 index 73728cc..0000000 --- a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/METADATA +++ /dev/null @@ -1,101 +0,0 @@ -Metadata-Version: 2.0 -Name: backports-abc -Version: 0.4 -Summary: A backport of recent additions to the 'collections.abc' module. -Home-page: https://github.com/cython/backports_abc -Author: Stefan Behnel et al. -Author-email: cython-devel@python.org -License: UNKNOWN -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: Python Software Foundation License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 3 - -============= -ABC-Backports -============= - -Usage: - -.. code-block:: python - - try: - # ABCs live in "collections.abc" in Python >= 3.3 - from collections.abc import Coroutine, Generator - except ImportError: - # fall back to import from "backports_abc" - from backports_abc import Coroutine, Generator - -You can also install the ABCs into the stdlib by calling the ``patch()`` -function: - -.. code-block:: python - - import backports_abc - backports_abc.patch() - - try: - # ABCs live in "collections.abc" in Python >= 3.3 - from collections.abc import Coroutine, Generator - except ImportError: - # fall back to import from "collections" in Python <= 3.2 - from backports_abc import Coroutine, Generator - -Currently, ``patch()`` provides the following names if missing: - -* ``collections.abc.Generator`` -* ``collections.abc.Awaitable`` -* ``collections.abc.Coroutine`` -* ``inspect.isawaitable(obj)`` - -All of them are also available directly from the ``backports_abc`` -module namespace. - -In Python 2.x and Python 3.2, it patches the ``collections`` module -instead of the ``collections.abc`` module. Any names that are already -available when importing this module will not be overwritten. - -The names that were previously patched by ``patch()`` can be queried -through the mapping in ``backports_abc.PATCHED``. - -Changelog -========= - -0.4 (2015-09-14) ----------------- - -* direct wheel building support - -* make all names available at the module level instead of requiring patching - - -0.3 (2015-07-03) ----------------- - -* removed patching of ``inspect.iscoroutine()`` as it is not ABC based - - -0.2 (2015-07-03) ----------------- - -* require explicit ``backports_abc.patch()`` call to do the patching - (avoids side-effects on import and allows future configuration) - -* provide access to patched names through global ``PATCHED`` dict - -* add ABC based implementations of inspect.iscoroutine() and - inspect.isawaitable() - - -0.1 (2015-06-24) ----------------- - -* initial public release - -* provided ABCs: Generator, Coroutine, Awaitable - - diff --git a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/RECORD b/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/RECORD deleted file mode 100644 index a1f42ef..0000000 --- a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/RECORD +++ /dev/null @@ -1,9 +0,0 @@ -backports_abc.py,sha256=mbCAmZnNay6Xudwu7KSetUPDbbImIfROXSSve8izAEI,5277 -backports_abc-0.4.dist-info/DESCRIPTION.rst,sha256=LhLJKmQBL5GC4g-6f-JhfIY7qNmsd1ROa3n2BNb1syU,2074 -backports_abc-0.4.dist-info/METADATA,sha256=4yoBi66MYiwltr2oDC2O9r__fFUWf8SDlVxZUPbt8ZU,2716 -backports_abc-0.4.dist-info/top_level.txt,sha256=VrQEXq17Ce9X1jfsstW6gYD3gyq3Kjio-8wFYildkzo,14 -backports_abc-0.4.dist-info/RECORD,, -backports_abc-0.4.dist-info/metadata.json,sha256=AIEgf2dZKeoBWp4UUzRRrUljLI9o_lXFgb09KXQXG3w,760 -backports_abc-0.4.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 -backports_abc-0.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -__pycache__/backports_abc.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/metadata.json deleted file mode 100644 index 0a046b2..0000000 --- a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Python Software Foundation License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3"], "metadata_version": "2.0", "extensions": {"python.details": {"contacts": [{"email": "cython-devel@python.org", "name": "Stefan Behnel et al.", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/cython/backports_abc"}}}, "summary": "A backport of recent additions to the 'collections.abc' module.", "version": "0.4", "name": "backports-abc", "generator": "bdist_wheel (0.24.0)"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/top_level.txt deleted file mode 100644 index 2fff0c3..0000000 --- a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -backports_abc diff --git a/Shared/lib/python3.4/site-packages/backports_abc.py b/Shared/lib/python3.4/site-packages/backports_abc.py deleted file mode 100644 index c48b7b0..0000000 --- a/Shared/lib/python3.4/site-packages/backports_abc.py +++ /dev/null @@ -1,202 +0,0 @@ -""" -Patch recently added ABCs into the standard lib module -``collections.abc`` (Py3) or ``collections`` (Py2). - -Usage:: - - import backports_abc - backports_abc.patch() - -or:: - - try: - from collections.abc import Generator - except ImportError: - from backports_abc import Generator -""" - -try: - import collections.abc as _collections_abc -except ImportError: - import collections as _collections_abc - - -def mk_gen(): - from abc import abstractmethod - - required_methods = ( - '__iter__', '__next__' if hasattr(iter(()), '__next__') else 'next', - 'send', 'throw', 'close') - - class Generator(_collections_abc.Iterator): - __slots__ = () - - if '__next__' in required_methods: - def __next__(self): - return self.send(None) - else: - def next(self): - return self.send(None) - - @abstractmethod - def send(self, value): - raise StopIteration - - @abstractmethod - def throw(self, typ, val=None, tb=None): - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - def close(self): - try: - self.throw(GeneratorExit) - except (GeneratorExit, StopIteration): - pass - else: - raise RuntimeError('generator ignored GeneratorExit') - - @classmethod - def __subclasshook__(cls, C): - if cls is Generator: - mro = C.__mro__ - for method in required_methods: - for base in mro: - if method in base.__dict__: - break - else: - return NotImplemented - return True - return NotImplemented - - generator = type((lambda: (yield))()) - Generator.register(generator) - return Generator - - -def mk_awaitable(): - from abc import abstractmethod, ABCMeta - - @abstractmethod - def __await__(self): - yield - - @classmethod - def __subclasshook__(cls, C): - if cls is Awaitable: - for B in C.__mro__: - if '__await__' in B.__dict__: - if B.__dict__['__await__']: - return True - break - return NotImplemented - - # calling metaclass directly as syntax differs in Py2/Py3 - Awaitable = ABCMeta('Awaitable', (), { - '__slots__': (), - '__await__': __await__, - '__subclasshook__': __subclasshook__, - }) - - return Awaitable - - -def mk_coroutine(): - from abc import abstractmethod - - class Coroutine(Awaitable): - __slots__ = () - - @abstractmethod - def send(self, value): - """Send a value into the coroutine. - Return next yielded value or raise StopIteration. - """ - raise StopIteration - - @abstractmethod - def throw(self, typ, val=None, tb=None): - """Raise an exception in the coroutine. - Return next yielded value or raise StopIteration. - """ - if val is None: - if tb is None: - raise typ - val = typ() - if tb is not None: - val = val.with_traceback(tb) - raise val - - def close(self): - """Raise GeneratorExit inside coroutine. - """ - try: - self.throw(GeneratorExit) - except (GeneratorExit, StopIteration): - pass - else: - raise RuntimeError('coroutine ignored GeneratorExit') - - @classmethod - def __subclasshook__(cls, C): - if cls is Coroutine: - mro = C.__mro__ - for method in ('__await__', 'send', 'throw', 'close'): - for base in mro: - if method in base.__dict__: - break - else: - return NotImplemented - return True - return NotImplemented - - return Coroutine - - -### -# make all ABCs available in this module - -try: - Generator = _collections_abc.Generator -except AttributeError: - Generator = mk_gen() - -try: - Awaitable = _collections_abc.Awaitable -except AttributeError: - Awaitable = mk_awaitable() - -try: - Coroutine = _collections_abc.Coroutine -except AttributeError: - Coroutine = mk_coroutine() - -try: - from inspect import isawaitable -except ImportError: - def isawaitable(obj): - return isinstance(obj, Awaitable) - - -### -# allow patching the stdlib - -PATCHED = {} - - -def patch(patch_inspect=True): - """ - Main entry point for patching the ``collections.abc`` and ``inspect`` - standard library modules. - """ - PATCHED['collections.abc.Generator'] = _collections_abc.Generator = Generator - PATCHED['collections.abc.Coroutine'] = _collections_abc.Coroutine = Coroutine - PATCHED['collections.abc.Awaitable'] = _collections_abc.Awaitable = Awaitable - - if patch_inspect: - import inspect - PATCHED['inspect.isawaitable'] = inspect.isawaitable = isawaitable diff --git a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/RECORD deleted file mode 100644 index 6b7b70b..0000000 --- a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/RECORD +++ /dev/null @@ -1,85 +0,0 @@ -chardet/__init__.py,sha256=XuTKCYOR7JwsoHxqZTYH86LVyMDbDI3s1s0W_qoGEBM,1295 -chardet/big5freq.py,sha256=D8oTdz-GM7Jg8TsaWJDm65vM_OLHC3xub6qUJ3rOgsQ,82594 -chardet/big5prober.py,sha256=XX96C--6WKYW36mL-z7pJSAtc169Z8ZImByCP4pEN9A,1684 -chardet/chardetect.py,sha256=f4299UZG6uWd3i3r_N0OdrFj2sA9JFI54PAmDLAFmWA,2504 -chardet/chardistribution.py,sha256=cUARQFr1oTLXeJCDQrDRkUP778AvSMzhSCnG8VLCV58,9226 -chardet/charsetgroupprober.py,sha256=0lKk7VE516fgMw119tNefFqLOxKfIE9WfdkpIT69OKU,3791 -chardet/charsetprober.py,sha256=Z48o2KiOj23FNqYH8FqzhH5m1qdm3rI8DcTm2Yqtklg,1902 -chardet/codingstatemachine.py,sha256=E85rYhHVMw9xDEJVgiQhp0OnLGr6i2r8_7QOWMKTH08,2318 -chardet/compat.py,sha256=5mm6yrHwef1JEG5OxkPJlSq5lkjLVpEGh3iPgFBkpkM,1157 -chardet/constants.py,sha256=-UnY8U7EP7z9fTyd09yq35BEkSFEAUAiv9ohd1DW1s4,1335 -chardet/cp949prober.py,sha256=FMvdLyB7fejPXRsTbca7LK1P3RUvvssmjUNyaEfz8zY,1782 -chardet/escprober.py,sha256=q5TcQKeVq31WxrW7Sv8yjpZkjEoaHO8S92EJZ9hodys,3187 -chardet/escsm.py,sha256=7iljEKN8lXTh8JFXPUSwlibMno6R6ksq4evLxbkzfro,7839 -chardet/eucjpprober.py,sha256=5IpfSEjAb7h3hcGMd6dkU80O900C2N6xku28rdYFKuc,3678 -chardet/euckrfreq.py,sha256=T5saK5mImySG5ygQPtsp6o2uKulouCwYm2ElOyFkJqU,45978 -chardet/euckrprober.py,sha256=Wo7dnZ5Erw_nB4H-m5alMiOxOuJUmGHlwCSaGqExDZA,1675 -chardet/euctwfreq.py,sha256=G_I0BW9i1w0ONeeUwIYqV7_U09buIHdqh-wNHVaql7I,34872 -chardet/euctwprober.py,sha256=upS2P6GuT5ujOxXYw-RJLcT7A4PTuo27KGUKU4UZpIQ,1676 -chardet/gb2312freq.py,sha256=M2gFdo_qQ_BslStEchrPW5CrPEZEacC0uyDLw4ok-kY,36011 -chardet/gb2312prober.py,sha256=VWnjoRa83Y6V6oczMaxyUr0uy48iCnC2nzk9zfEIRHc,1681 -chardet/hebrewprober.py,sha256=8pdoUfsVXf_L4BnJde_BewS6H2yInV5688eu0nFhLHY,13359 -chardet/jisfreq.py,sha256=ZcL4R5ekHHbP2KCYGakVMBsiKqZZZAABzhwi-uRkOps,47315 -chardet/jpcntx.py,sha256=yftmp0QaF6RJO5SJs8I7LU5AF4rwP23ebeCQL4BM1OY,19348 -chardet/langbulgarianmodel.py,sha256=ZyPsA796MSVhYdfWhMCgKWckupAKAnKqWcE3Cl3ej6o,12784 -chardet/langcyrillicmodel.py,sha256=fkcd5OvogUp-GrNDWAZPgkYsSRCD2omotAEvqjlmLKE,17725 -chardet/langgreekmodel.py,sha256=QHMy31CH_ot67UCtmurCEKqKx2WwoaKrw2YCYYBK2Lw,12628 -chardet/langhebrewmodel.py,sha256=4ASl5vzKJPng4H278VHKtRYC03TpQpenlHTcsmZH1rE,11318 -chardet/langhungarianmodel.py,sha256=SXwuUzh49_cBeMXhshRHdrhlkz0T8_pZWV_pdqBKNFk,12536 -chardet/langthaimodel.py,sha256=-k7djh3dGKngAGnt3WfuoJN7acDcWcmHAPojhaUd7q4,11275 -chardet/latin1prober.py,sha256=238JHOxH8aRudJY2NmeSv5s7i0Qe3GuklIU3HlYybvg,5232 -chardet/mbcharsetprober.py,sha256=9rOCjDVsmSMp6e7q2syqak22j7lrbUZhJhMee2gbVL0,3268 -chardet/mbcsgroupprober.py,sha256=SHRzNPLpDXfMJLA8phCHVU0WgqbgDCNxDQMolGX_7yk,1967 -chardet/mbcssm.py,sha256=IKwJXyxu34n6NojmxVxC60MLFtJKm-hIfxaFEnb3uBA,19590 -chardet/sbcharsetprober.py,sha256=Xq0lODqJnDgxglBiQI4BqTFiPbn63-0a5XNA5-hVu7U,4793 -chardet/sbcsgroupprober.py,sha256=8hLyH8RAG-aohBo7o_KciWVgRo42ZE_zEtuNG1JMRYI,3291 -chardet/sjisprober.py,sha256=UYOmiMDzttYIkSDoOB08UEagivJpUXz4tuWiWzTiOr8,3764 -chardet/universaldetector.py,sha256=h-E2x6XSCzlNjycYWG0Fe4Cf1SGdaIzUNu2HCphpMZA,6840 -chardet/utf8prober.py,sha256=7tdNZGrJY7jZUBD483GGMkiP0Tx8Fp-cGvWHoAsilHg,2652 -chardet-2.3.0.dist-info/DESCRIPTION.rst,sha256=7VG_hrAJBS_-fy0UoMy32Nb22Ty98PZZrnB5ZWXXHfA,1465 -chardet-2.3.0.dist-info/METADATA,sha256=weRVfg_3PuhyNaqOTnSN6WxcIfjXb87WQXb-QTLs57E,2429 -chardet-2.3.0.dist-info/RECORD,, -chardet-2.3.0.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 -chardet-2.3.0.dist-info/entry_points.txt,sha256=2T00JXwbiQBZQFSKyCFxud4LEQ3_8TKuOwUsSXT-kUI,56 -chardet-2.3.0.dist-info/metadata.json,sha256=ptG9BSXYY-lmHsq-i6SjRfvRXYfuvmZz8rZfxrO4Pjs,1225 -chardet-2.3.0.dist-info/top_level.txt,sha256=AowzBbZy4x8EirABDdJSLJZMkJ_53iIag8xfKR6D7kI,8 -../../../bin/chardetect,sha256=GQPhfifvGgQPjka4kP_i-SYM5X_TEW0Hu3SV937PX2o,285 -chardet-2.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -chardet/__pycache__/universaldetector.cpython-34.pyc,, -chardet/__pycache__/euckrprober.cpython-34.pyc,, -chardet/__pycache__/sjisprober.cpython-34.pyc,, -chardet/__pycache__/mbcsgroupprober.cpython-34.pyc,, -chardet/__pycache__/compat.cpython-34.pyc,, -chardet/__pycache__/chardistribution.cpython-34.pyc,, -chardet/__pycache__/cp949prober.cpython-34.pyc,, -chardet/__pycache__/big5prober.cpython-34.pyc,, -chardet/__pycache__/gb2312prober.cpython-34.pyc,, -chardet/__pycache__/mbcharsetprober.cpython-34.pyc,, -chardet/__pycache__/escprober.cpython-34.pyc,, -chardet/__pycache__/mbcssm.cpython-34.pyc,, -chardet/__pycache__/langbulgarianmodel.cpython-34.pyc,, -chardet/__pycache__/big5freq.cpython-34.pyc,, -chardet/__pycache__/latin1prober.cpython-34.pyc,, -chardet/__pycache__/escsm.cpython-34.pyc,, -chardet/__pycache__/sbcsgroupprober.cpython-34.pyc,, -chardet/__pycache__/langhungarianmodel.cpython-34.pyc,, -chardet/__pycache__/euctwprober.cpython-34.pyc,, -chardet/__pycache__/hebrewprober.cpython-34.pyc,, -chardet/__pycache__/langthaimodel.cpython-34.pyc,, -chardet/__pycache__/__init__.cpython-34.pyc,, -chardet/__pycache__/charsetprober.cpython-34.pyc,, -chardet/__pycache__/chardetect.cpython-34.pyc,, -chardet/__pycache__/euctwfreq.cpython-34.pyc,, -chardet/__pycache__/langhebrewmodel.cpython-34.pyc,, -chardet/__pycache__/charsetgroupprober.cpython-34.pyc,, -chardet/__pycache__/constants.cpython-34.pyc,, -chardet/__pycache__/euckrfreq.cpython-34.pyc,, -chardet/__pycache__/langgreekmodel.cpython-34.pyc,, -chardet/__pycache__/utf8prober.cpython-34.pyc,, -chardet/__pycache__/codingstatemachine.cpython-34.pyc,, -chardet/__pycache__/jpcntx.cpython-34.pyc,, -chardet/__pycache__/langcyrillicmodel.cpython-34.pyc,, -chardet/__pycache__/sbcharsetprober.cpython-34.pyc,, -chardet/__pycache__/gb2312freq.cpython-34.pyc,, -chardet/__pycache__/eucjpprober.cpython-34.pyc,, -chardet/__pycache__/jisfreq.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/entry_points.txt b/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/entry_points.txt deleted file mode 100644 index 443c583..0000000 --- a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[console_scripts] -chardetect = chardet.chardetect:main - diff --git a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/metadata.json deleted file mode 100644 index 79223ca..0000000 --- a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Linguistic"], "extensions": {"python.commands": {"wrap_console": {"chardetect": "chardet.chardetect:main"}}, "python.details": {"contacts": [{"email": "graffatcolmingov@gmail.com", "name": "Ian Cordasco", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/chardet/chardet"}}, "python.exports": {"console_scripts": {"chardetect": "chardet.chardetect:main"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["encoding", "i18n", "xml"], "license": "LGPL", "metadata_version": "2.0", "name": "chardet", "summary": "Universal encoding detector for Python 2 and 3", "version": "2.3.0"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/DESCRIPTION.rst similarity index 63% rename from Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/DESCRIPTION.rst rename to Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/DESCRIPTION.rst index f980838..c0f044d 100644 --- a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/DESCRIPTION.rst +++ b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/DESCRIPTION.rst @@ -1,20 +1,38 @@ Chardet: The Universal Character Encoding Detector -------------------------------------------------- +.. image:: https://img.shields.io/travis/chardet/chardet/stable.svg + :alt: Build status + :target: https://travis-ci.org/chardet/chardet + +.. image:: https://img.shields.io/coveralls/chardet/chardet/stable.svg + :target: https://coveralls.io/r/chardet/chardet + +.. image:: https://img.shields.io/pypi/v/chardet.svg + :target: https://warehouse.python.org/project/chardet/ + :alt: Latest version on PyPI + +.. image:: https://img.shields.io/pypi/l/chardet.svg + :alt: License + + Detects - ASCII, UTF-8, UTF-16 (2 variants), UTF-32 (4 variants) - Big5, GB2312, EUC-TW, HZ-GB-2312, ISO-2022-CN (Traditional and Simplified Chinese) - EUC-JP, SHIFT_JIS, CP932, ISO-2022-JP (Japanese) - EUC-KR, ISO-2022-KR (Korean) - KOI8-R, MacCyrillic, IBM855, IBM866, ISO-8859-5, windows-1251 (Cyrillic) - - ISO-8859-2, windows-1250 (Hungarian) - ISO-8859-5, windows-1251 (Bulgarian) - - windows-1252 (English) + - ISO-8859-1, windows-1252 (Western European languages) - ISO-8859-7, windows-1253 (Greek) - ISO-8859-8, windows-1255 (Visual and Logical Hebrew) - TIS-620 (Thai) -Requires Python 2.6 or later +.. note:: + Our ISO-8859-2 and windows-1250 (Hungarian) probers have been temporarily + disabled until we can retrain the models. + +Requires Python 2.6, 2.7, or 3.3+. Installation ------------ @@ -23,6 +41,10 @@ Install from `PyPI `_:: pip install chardet +Documentation +------------- + +For users, docs are now available at https://chardet.readthedocs.io/. Command-line Tool ----------------- diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/INSTALLER similarity index 100% rename from Shared/lib/python3.4/site-packages/PyPDF2-1.25.1.dist-info/INSTALLER rename to Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/INSTALLER diff --git a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/METADATA b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/METADATA similarity index 68% rename from Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/METADATA rename to Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/METADATA index ea4fdad..1427867 100644 --- a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/METADATA +++ b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/METADATA @@ -1,10 +1,10 @@ Metadata-Version: 2.0 Name: chardet -Version: 2.3.0 +Version: 3.0.4 Summary: Universal encoding detector for Python 2 and 3 Home-page: https://github.com/chardet/chardet -Author: Ian Cordasco -Author-email: graffatcolmingov@gmail.com +Author: Daniel Blanchard +Author-email: dan.blanchard@gmail.com License: LGPL Keywords: encoding,i18n,xml Platform: UNKNOWN @@ -17,28 +17,48 @@ Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.2 Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Text Processing :: Linguistic Chardet: The Universal Character Encoding Detector -------------------------------------------------- +.. image:: https://img.shields.io/travis/chardet/chardet/stable.svg + :alt: Build status + :target: https://travis-ci.org/chardet/chardet + +.. image:: https://img.shields.io/coveralls/chardet/chardet/stable.svg + :target: https://coveralls.io/r/chardet/chardet + +.. image:: https://img.shields.io/pypi/v/chardet.svg + :target: https://warehouse.python.org/project/chardet/ + :alt: Latest version on PyPI + +.. image:: https://img.shields.io/pypi/l/chardet.svg + :alt: License + + Detects - ASCII, UTF-8, UTF-16 (2 variants), UTF-32 (4 variants) - Big5, GB2312, EUC-TW, HZ-GB-2312, ISO-2022-CN (Traditional and Simplified Chinese) - EUC-JP, SHIFT_JIS, CP932, ISO-2022-JP (Japanese) - EUC-KR, ISO-2022-KR (Korean) - KOI8-R, MacCyrillic, IBM855, IBM866, ISO-8859-5, windows-1251 (Cyrillic) - - ISO-8859-2, windows-1250 (Hungarian) - ISO-8859-5, windows-1251 (Bulgarian) - - windows-1252 (English) + - ISO-8859-1, windows-1252 (Western European languages) - ISO-8859-7, windows-1253 (Greek) - ISO-8859-8, windows-1255 (Visual and Logical Hebrew) - TIS-620 (Thai) -Requires Python 2.6 or later +.. note:: + Our ISO-8859-2 and windows-1250 (Hungarian) probers have been temporarily + disabled until we can retrain the models. + +Requires Python 2.6, 2.7, or 3.3+. Installation ------------ @@ -47,6 +67,10 @@ Install from `PyPI `_:: pip install chardet +Documentation +------------- + +For users, docs are now available at https://chardet.readthedocs.io/. Command-line Tool ----------------- diff --git a/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/RECORD b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/RECORD new file mode 100644 index 0000000..fb56682 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/RECORD @@ -0,0 +1,91 @@ +../../../bin/chardetect,sha256=vFEFbblv8wsETOzxl2JkCwjDz-VbH_4BePgWIT13Xlc,289 +chardet-3.0.4.dist-info/DESCRIPTION.rst,sha256=PQ4sBsMyKFZkjC6QpmbpLn0UtCNyeb-ZqvCGEgyZMGk,2174 +chardet-3.0.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +chardet-3.0.4.dist-info/METADATA,sha256=RV_2I4B1Z586DL8oVO5Kp7X5bUdQ5EuKAvNoAEF8wSw,3239 +chardet-3.0.4.dist-info/RECORD,, +chardet-3.0.4.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +chardet-3.0.4.dist-info/entry_points.txt,sha256=fAMmhu5eJ-zAJ-smfqQwRClQ3-nozOCmvJ6-E8lgGJo,60 +chardet-3.0.4.dist-info/metadata.json,sha256=0htbRM18ujyGZDdfowgAqj6Hq2eQtwzwyhaEveKntgo,1375 +chardet-3.0.4.dist-info/top_level.txt,sha256=AowzBbZy4x8EirABDdJSLJZMkJ_53iIag8xfKR6D7kI,8 +chardet/__init__.py,sha256=YsP5wQlsHJ2auF1RZJfypiSrCA7_bQiRm3ES_NI76-Y,1559 +chardet/__pycache__/__init__.cpython-37.pyc,, +chardet/__pycache__/big5freq.cpython-37.pyc,, +chardet/__pycache__/big5prober.cpython-37.pyc,, +chardet/__pycache__/chardistribution.cpython-37.pyc,, +chardet/__pycache__/charsetgroupprober.cpython-37.pyc,, +chardet/__pycache__/charsetprober.cpython-37.pyc,, +chardet/__pycache__/codingstatemachine.cpython-37.pyc,, +chardet/__pycache__/compat.cpython-37.pyc,, +chardet/__pycache__/cp949prober.cpython-37.pyc,, +chardet/__pycache__/enums.cpython-37.pyc,, +chardet/__pycache__/escprober.cpython-37.pyc,, +chardet/__pycache__/escsm.cpython-37.pyc,, +chardet/__pycache__/eucjpprober.cpython-37.pyc,, +chardet/__pycache__/euckrfreq.cpython-37.pyc,, +chardet/__pycache__/euckrprober.cpython-37.pyc,, +chardet/__pycache__/euctwfreq.cpython-37.pyc,, +chardet/__pycache__/euctwprober.cpython-37.pyc,, +chardet/__pycache__/gb2312freq.cpython-37.pyc,, +chardet/__pycache__/gb2312prober.cpython-37.pyc,, +chardet/__pycache__/hebrewprober.cpython-37.pyc,, +chardet/__pycache__/jisfreq.cpython-37.pyc,, +chardet/__pycache__/jpcntx.cpython-37.pyc,, +chardet/__pycache__/langbulgarianmodel.cpython-37.pyc,, +chardet/__pycache__/langcyrillicmodel.cpython-37.pyc,, +chardet/__pycache__/langgreekmodel.cpython-37.pyc,, +chardet/__pycache__/langhebrewmodel.cpython-37.pyc,, +chardet/__pycache__/langhungarianmodel.cpython-37.pyc,, +chardet/__pycache__/langthaimodel.cpython-37.pyc,, +chardet/__pycache__/langturkishmodel.cpython-37.pyc,, +chardet/__pycache__/latin1prober.cpython-37.pyc,, +chardet/__pycache__/mbcharsetprober.cpython-37.pyc,, +chardet/__pycache__/mbcsgroupprober.cpython-37.pyc,, +chardet/__pycache__/mbcssm.cpython-37.pyc,, +chardet/__pycache__/sbcharsetprober.cpython-37.pyc,, +chardet/__pycache__/sbcsgroupprober.cpython-37.pyc,, +chardet/__pycache__/sjisprober.cpython-37.pyc,, +chardet/__pycache__/universaldetector.cpython-37.pyc,, +chardet/__pycache__/utf8prober.cpython-37.pyc,, +chardet/__pycache__/version.cpython-37.pyc,, +chardet/big5freq.py,sha256=D_zK5GyzoVsRes0HkLJziltFQX0bKCLOrFe9_xDvO_8,31254 +chardet/big5prober.py,sha256=kBxHbdetBpPe7xrlb-e990iot64g_eGSLd32lB7_h3M,1757 +chardet/chardistribution.py,sha256=3woWS62KrGooKyqz4zQSnjFbJpa6V7g02daAibTwcl8,9411 +chardet/charsetgroupprober.py,sha256=6bDu8YIiRuScX4ca9Igb0U69TA2PGXXDej6Cc4_9kO4,3787 +chardet/charsetprober.py,sha256=KSmwJErjypyj0bRZmC5F5eM7c8YQgLYIjZXintZNstg,5110 +chardet/cli/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +chardet/cli/__pycache__/__init__.cpython-37.pyc,, +chardet/cli/__pycache__/chardetect.cpython-37.pyc,, +chardet/cli/chardetect.py,sha256=YBO8L4mXo0WR6_-Fjh_8QxPBoEBNqB9oNxNrdc54AQs,2738 +chardet/codingstatemachine.py,sha256=VYp_6cyyki5sHgXDSZnXW4q1oelHc3cu9AyQTX7uug8,3590 +chardet/compat.py,sha256=PKTzHkSbtbHDqS9PyujMbX74q1a8mMpeQTDVsQhZMRw,1134 +chardet/cp949prober.py,sha256=TZ434QX8zzBsnUvL_8wm4AQVTZ2ZkqEEQL_lNw9f9ow,1855 +chardet/enums.py,sha256=Aimwdb9as1dJKZaFNUH2OhWIVBVd6ZkJJ_WK5sNY8cU,1661 +chardet/escprober.py,sha256=kkyqVg1Yw3DIOAMJ2bdlyQgUFQhuHAW8dUGskToNWSc,3950 +chardet/escsm.py,sha256=RuXlgNvTIDarndvllNCk5WZBIpdCxQ0kcd9EAuxUh84,10510 +chardet/eucjpprober.py,sha256=iD8Jdp0ISRjgjiVN7f0e8xGeQJ5GM2oeZ1dA8nbSeUw,3749 +chardet/euckrfreq.py,sha256=-7GdmvgWez4-eO4SuXpa7tBiDi5vRXQ8WvdFAzVaSfo,13546 +chardet/euckrprober.py,sha256=MqFMTQXxW4HbzIpZ9lKDHB3GN8SP4yiHenTmf8g_PxY,1748 +chardet/euctwfreq.py,sha256=No1WyduFOgB5VITUA7PLyC5oJRNzRyMbBxaKI1l16MA,31621 +chardet/euctwprober.py,sha256=13p6EP4yRaxqnP4iHtxHOJ6R2zxHq1_m8hTRjzVZ95c,1747 +chardet/gb2312freq.py,sha256=JX8lsweKLmnCwmk8UHEQsLgkr_rP_kEbvivC4qPOrlc,20715 +chardet/gb2312prober.py,sha256=gGvIWi9WhDjE-xQXHvNIyrnLvEbMAYgyUSZ65HUfylw,1754 +chardet/hebrewprober.py,sha256=c3SZ-K7hvyzGY6JRAZxJgwJ_sUS9k0WYkvMY00YBYFo,13838 +chardet/jisfreq.py,sha256=vpmJv2Bu0J8gnMVRPHMFefTRvo_ha1mryLig8CBwgOg,25777 +chardet/jpcntx.py,sha256=PYlNqRUQT8LM3cT5FmHGP0iiscFlTWED92MALvBungo,19643 +chardet/langbulgarianmodel.py,sha256=1HqQS9Pbtnj1xQgxitJMvw8X6kKr5OockNCZWfEQrPE,12839 +chardet/langcyrillicmodel.py,sha256=LODajvsetH87yYDDQKA2CULXUH87tI223dhfjh9Zx9c,17948 +chardet/langgreekmodel.py,sha256=8YAW7bU8YwSJap0kIJSbPMw1BEqzGjWzqcqf0WgUKAA,12688 +chardet/langhebrewmodel.py,sha256=JSnqmE5E62tDLTPTvLpQsg5gOMO4PbdWRvV7Avkc0HA,11345 +chardet/langhungarianmodel.py,sha256=RhapYSG5l0ZaO-VV4Fan5sW0WRGQqhwBM61yx3yxyOA,12592 +chardet/langthaimodel.py,sha256=8l0173Gu_W6G8mxmQOTEF4ls2YdE7FxWf3QkSxEGXJQ,11290 +chardet/langturkishmodel.py,sha256=W22eRNJsqI6uWAfwXSKVWWnCerYqrI8dZQTm_M0lRFk,11102 +chardet/latin1prober.py,sha256=S2IoORhFk39FEFOlSFWtgVybRiP6h7BlLldHVclNkU8,5370 +chardet/mbcharsetprober.py,sha256=AR95eFH9vuqSfvLQZN-L5ijea25NOBCoXqw8s5O9xLQ,3413 +chardet/mbcsgroupprober.py,sha256=h6TRnnYq2OxG1WdD5JOyxcdVpn7dG0q-vB8nWr5mbh4,2012 +chardet/mbcssm.py,sha256=SY32wVIF3HzcjY3BaEspy9metbNSKxIIB0RKPn7tjpI,25481 +chardet/sbcharsetprober.py,sha256=LDSpCldDCFlYwUkGkwD2oFxLlPWIWXT09akH_2PiY74,5657 +chardet/sbcsgroupprober.py,sha256=1IprcCB_k1qfmnxGC6MBbxELlKqD3scW6S8YIwdeyXA,3546 +chardet/sjisprober.py,sha256=IIt-lZj0WJqK4rmUZzKZP4GJlE8KUEtFYVuY96ek5MQ,3774 +chardet/universaldetector.py,sha256=qL0174lSZE442eB21nnktT9_VcAye07laFWUeUrjttY,12485 +chardet/utf8prober.py,sha256=IdD8v3zWOsB8OLiyPi-y_fqwipRFxV9Nc1eKBLSuIEw,2766 +chardet/version.py,sha256=sp3B08mrDXB-pf3K9fqJ_zeDHOCLC8RrngQyDFap_7g,242 diff --git a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/WHEEL similarity index 100% rename from Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/WHEEL rename to Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/WHEEL diff --git a/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/entry_points.txt b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/entry_points.txt new file mode 100644 index 0000000..a884269 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +chardetect = chardet.cli.chardetect:main + diff --git a/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/metadata.json new file mode 100644 index 0000000..8cdf025 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Linguistic"], "extensions": {"python.commands": {"wrap_console": {"chardetect": "chardet.cli.chardetect:main"}}, "python.details": {"contacts": [{"email": "dan.blanchard@gmail.com", "name": "Daniel Blanchard", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/chardet/chardet"}}, "python.exports": {"console_scripts": {"chardetect": "chardet.cli.chardetect:main"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["encoding", "i18n", "xml"], "license": "LGPL", "metadata_version": "2.0", "name": "chardet", "summary": "Universal encoding detector for Python 2 and 3", "test_requires": [{"requires": ["hypothesis", "pytest"]}], "version": "3.0.4"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/chardet-3.0.4.dist-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/chardet/__init__.py b/Shared/lib/python3.4/site-packages/chardet/__init__.py index 82c2a48..0f9f820 100644 --- a/Shared/lib/python3.4/site-packages/chardet/__init__.py +++ b/Shared/lib/python3.4/site-packages/chardet/__init__.py @@ -15,18 +15,25 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -__version__ = "2.3.0" -from sys import version_info + +from .compat import PY2, PY3 +from .universaldetector import UniversalDetector +from .version import __version__, VERSION -def detect(aBuf): - if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or - (version_info >= (3, 0) and not isinstance(aBuf, bytes))): - raise ValueError('Expected a bytes object, not a unicode object') +def detect(byte_str): + """ + Detect the encoding of the given byte string. - from . import universaldetector - u = universaldetector.UniversalDetector() - u.reset() - u.feed(aBuf) - u.close() - return u.result + :param byte_str: The byte sequence to examine. + :type byte_str: ``bytes`` or ``bytearray`` + """ + if not isinstance(byte_str, bytearray): + if not isinstance(byte_str, bytes): + raise TypeError('Expected object of type bytes or bytearray, got: ' + '{0}'.format(type(byte_str))) + else: + byte_str = bytearray(byte_str) + detector = UniversalDetector() + detector.feed(byte_str) + return detector.close() diff --git a/Shared/lib/python3.4/site-packages/chardet/big5freq.py b/Shared/lib/python3.4/site-packages/chardet/big5freq.py index 65bffc0..38f3251 100644 --- a/Shared/lib/python3.4/site-packages/chardet/big5freq.py +++ b/Shared/lib/python3.4/site-packages/chardet/big5freq.py @@ -45,7 +45,7 @@ BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75 #Char to FreqOrder table BIG5_TABLE_SIZE = 5376 -Big5CharToFreqOrder = ( +BIG5_CHAR_TO_FREQ_ORDER = ( 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48 @@ -381,545 +381,6 @@ Big5CharToFreqOrder = ( 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328 3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360 -2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512 -#Everything below is of no interest for detection purpose -2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392 -2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408 -5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424 -5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440 -5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456 -5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472 -5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488 -5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504 -5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520 -5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536 -5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552 -5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568 -5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584 -5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600 -6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616 -6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632 -6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648 -6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664 -6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680 -6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696 -6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712 -6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728 -6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744 -6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760 -6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776 -6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792 -6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808 -6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824 -6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840 -6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856 -6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872 -6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888 -6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904 -6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920 -6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936 -6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952 -6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968 -6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984 -6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000 -6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016 -6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032 -6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048 -6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064 -6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080 -6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096 -6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112 -6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128 -6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144 -6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160 -6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176 -6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192 -6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208 -6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224 -6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240 -6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256 -3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272 -6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288 -6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304 -3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320 -6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336 -6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352 -6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368 -6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384 -6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400 -6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416 -6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432 -4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448 -6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464 -6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480 -3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496 -6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512 -6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528 -6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544 -6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560 -6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576 -6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592 -6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608 -6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624 -6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640 -6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656 -6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672 -7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688 -7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704 -7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720 -7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736 -7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752 -7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768 -7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784 -7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800 -7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816 -7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832 -7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848 -7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864 -7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880 -7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896 -7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912 -7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928 -7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944 -7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960 -7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976 -7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992 -7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008 -7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024 -7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040 -7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056 -7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072 -7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088 -7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104 -7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120 -7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136 -7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152 -7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168 -7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184 -7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200 -7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216 -7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232 -7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248 -7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264 -7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280 -7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296 -7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312 -7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328 -7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344 -7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360 -7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376 -7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392 -7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408 -7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424 -7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440 -3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456 -7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472 -7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488 -7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504 -7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520 -4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536 -7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552 -7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568 -7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584 -7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600 -7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616 -7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632 -7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648 -7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664 -7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680 -7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696 -7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712 -8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728 -8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744 -8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760 -8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776 -8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792 -8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808 -8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824 -8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840 -8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856 -8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872 -8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888 -8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904 -8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920 -8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936 -8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952 -8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968 -8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984 -8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000 -8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016 -8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032 -8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048 -8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064 -8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080 -8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096 -8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112 -8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128 -8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144 -8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160 -8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176 -8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192 -8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208 -8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224 -8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240 -8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256 -8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272 -8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288 -8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304 -8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320 -8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336 -8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352 -8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368 -8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384 -8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400 -8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416 -8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432 -8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448 -8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464 -8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480 -8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496 -8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512 -8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528 -8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544 -8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560 -8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576 -8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592 -8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608 -8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624 -8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640 -8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656 -8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672 -8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688 -4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704 -8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720 -8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736 -8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752 -8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768 -9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784 -9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800 -9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816 -9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832 -9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848 -9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864 -9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880 -9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896 -9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912 -9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928 -9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944 -9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960 -9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976 -9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992 -9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008 -9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024 -9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040 -9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056 -9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072 -9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088 -9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104 -9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120 -9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136 -9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152 -9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168 -9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184 -9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200 -9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216 -9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232 -9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248 -9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264 -9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280 -9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296 -9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312 -9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328 -9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344 -9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360 -9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376 -3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392 -9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408 -9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424 -9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440 -4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456 -9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472 -9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488 -9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504 -9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520 -9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536 -9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552 -9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568 -9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584 -9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600 -9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616 -9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632 -9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648 -9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664 -9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680 -9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696 -9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712 -9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728 -9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744 -9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760 -9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776 -9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792 -9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808 -9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824 -10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840 -10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856 -10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872 -10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888 -10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904 -10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920 -10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936 -10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952 -10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968 -4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984 -10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000 -10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016 -10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032 -10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048 -10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064 -10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080 -10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096 -10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112 -4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128 -10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144 -10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160 -10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176 -10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192 -10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208 -10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224 -10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240 -10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256 -10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272 -10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288 -10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304 -10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320 -10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336 -10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352 -10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368 -10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384 -10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400 -4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416 -10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432 -10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448 -10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464 -10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480 -10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496 -10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512 -10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528 -10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544 -10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560 -10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576 -10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592 -10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608 -10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624 -10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640 -10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656 -10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672 -10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688 -10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704 -10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720 -10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736 -10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752 -10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768 -10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784 -10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800 -10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816 -10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832 -10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848 -10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864 -10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880 -10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896 -11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912 -11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928 -11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944 -4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960 -11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976 -11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992 -11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008 -11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024 -11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040 -11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056 -11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072 -11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088 -11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104 -11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120 -11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136 -11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152 -11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168 -11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184 -11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200 -11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216 -11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232 -11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248 -11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264 -11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280 -11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296 -11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312 -11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328 -11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344 -11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360 -11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376 -11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392 -11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408 -11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424 -11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440 -11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456 -11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472 -4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488 -11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504 -11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520 -11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536 -11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552 -11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568 -11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584 -11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600 -11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616 -11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632 -11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648 -11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664 -11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680 -11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696 -11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712 -11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728 -11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744 -11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760 -11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776 -11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792 -11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808 -11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824 -11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840 -11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856 -11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872 -11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888 -11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904 -11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920 -11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936 -12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952 -12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968 -12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984 -12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000 -12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016 -12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032 -12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048 -12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064 -12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080 -12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096 -12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112 -12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128 -12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144 -12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160 -12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176 -4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192 -4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208 -4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224 -12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240 -12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256 -12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272 -12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288 -12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304 -12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320 -12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336 -12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352 -12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368 -12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384 -12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400 -12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416 -12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432 -12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448 -12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464 -12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480 -12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496 -12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512 -12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528 -12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544 -12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560 -12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576 -12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592 -12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608 -12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624 -12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640 -12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656 -12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672 -12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688 -12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704 -12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720 -12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736 -12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752 -12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768 -12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784 -12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800 -12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816 -12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832 -12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848 -12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864 -12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880 -12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896 -12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912 -12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928 -12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944 -12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960 -12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976 -4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992 -13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008 -13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024 -13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040 -13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056 -13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072 -13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088 -13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104 -4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120 -13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136 -13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152 -13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168 -13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184 -13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200 -13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216 -13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232 -13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248 -13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264 -13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280 -13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296 -13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312 -13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328 -13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344 -13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360 -5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376 -13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392 -13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408 -13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424 -13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440 -13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456 -13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472 -13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488 -13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504 -13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520 -13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536 -13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552 -13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568 -13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584 -13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600 -13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616 -13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632 -13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648 -13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664 -13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680 -13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696 -13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712 -13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728 -13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744 -13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760 -13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776 -13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792 -13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808 -13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824 -13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840 -13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856 -13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872 -13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888 -13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904 -13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920 -13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936 -13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952 -13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968 -13968,13969,13970,13971,13972) #13973 +2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 +) -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/big5prober.py b/Shared/lib/python3.4/site-packages/chardet/big5prober.py index becce81..98f9970 100644 --- a/Shared/lib/python3.4/site-packages/chardet/big5prober.py +++ b/Shared/lib/python3.4/site-packages/chardet/big5prober.py @@ -28,15 +28,20 @@ from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import Big5DistributionAnalysis -from .mbcssm import Big5SMModel +from .mbcssm import BIG5_SM_MODEL class Big5Prober(MultiByteCharSetProber): def __init__(self): - MultiByteCharSetProber.__init__(self) - self._mCodingSM = CodingStateMachine(Big5SMModel) - self._mDistributionAnalyzer = Big5DistributionAnalysis() + super(Big5Prober, self).__init__() + self.coding_sm = CodingStateMachine(BIG5_SM_MODEL) + self.distribution_analyzer = Big5DistributionAnalysis() self.reset() - def get_charset_name(self): + @property + def charset_name(self): return "Big5" + + @property + def language(self): + return "Chinese" diff --git a/Shared/lib/python3.4/site-packages/chardet/chardistribution.py b/Shared/lib/python3.4/site-packages/chardet/chardistribution.py index 4e64a00..c0395f4 100644 --- a/Shared/lib/python3.4/site-packages/chardet/chardistribution.py +++ b/Shared/lib/python3.4/site-packages/chardet/chardistribution.py @@ -25,82 +25,84 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE, +from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE, EUCTW_TYPICAL_DISTRIBUTION_RATIO) -from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE, +from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE, EUCKR_TYPICAL_DISTRIBUTION_RATIO) -from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE, +from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE, GB2312_TYPICAL_DISTRIBUTION_RATIO) -from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE, +from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE, BIG5_TYPICAL_DISTRIBUTION_RATIO) -from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE, +from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE, JIS_TYPICAL_DISTRIBUTION_RATIO) -from .compat import wrap_ord - -ENOUGH_DATA_THRESHOLD = 1024 -SURE_YES = 0.99 -SURE_NO = 0.01 -MINIMUM_DATA_THRESHOLD = 3 -class CharDistributionAnalysis: +class CharDistributionAnalysis(object): + ENOUGH_DATA_THRESHOLD = 1024 + SURE_YES = 0.99 + SURE_NO = 0.01 + MINIMUM_DATA_THRESHOLD = 3 + def __init__(self): # Mapping table to get frequency order from char order (get from # GetOrder()) - self._mCharToFreqOrder = None - self._mTableSize = None # Size of above table + self._char_to_freq_order = None + self._table_size = None # Size of above table # This is a constant value which varies from language to language, # used in calculating confidence. See # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html # for further detail. - self._mTypicalDistributionRatio = None + self.typical_distribution_ratio = None + self._done = None + self._total_chars = None + self._freq_chars = None self.reset() def reset(self): """reset analyser, clear any state""" # If this flag is set to True, detection is done and conclusion has # been made - self._mDone = False - self._mTotalChars = 0 # Total characters encountered + self._done = False + self._total_chars = 0 # Total characters encountered # The number of characters whose frequency order is less than 512 - self._mFreqChars = 0 + self._freq_chars = 0 - def feed(self, aBuf, aCharLen): + def feed(self, char, char_len): """feed a character with known length""" - if aCharLen == 2: + if char_len == 2: # we only care about 2-bytes character in our distribution analysis - order = self.get_order(aBuf) + order = self.get_order(char) else: order = -1 if order >= 0: - self._mTotalChars += 1 + self._total_chars += 1 # order is valid - if order < self._mTableSize: - if 512 > self._mCharToFreqOrder[order]: - self._mFreqChars += 1 + if order < self._table_size: + if 512 > self._char_to_freq_order[order]: + self._freq_chars += 1 def get_confidence(self): """return confidence based on existing data""" # if we didn't receive any character in our consideration range, # return negative answer - if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD: - return SURE_NO + if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD: + return self.SURE_NO - if self._mTotalChars != self._mFreqChars: - r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars) - * self._mTypicalDistributionRatio)) - if r < SURE_YES: + if self._total_chars != self._freq_chars: + r = (self._freq_chars / ((self._total_chars - self._freq_chars) + * self.typical_distribution_ratio)) + if r < self.SURE_YES: return r # normalize confidence (we don't want to be 100% sure) - return SURE_YES + return self.SURE_YES def got_enough_data(self): # It is not necessary to receive all data to draw conclusion. # For charset detection, certain amount of data is enough - return self._mTotalChars > ENOUGH_DATA_THRESHOLD + return self._total_chars > self.ENOUGH_DATA_THRESHOLD - def get_order(self, aBuf): + def get_order(self, byte_str): # We do not handle characters based on the original encoding string, # but convert this encoding string to a number, here called order. # This allows multiple encodings of a language to share one frequency @@ -110,55 +112,55 @@ class CharDistributionAnalysis: class EUCTWDistributionAnalysis(CharDistributionAnalysis): def __init__(self): - CharDistributionAnalysis.__init__(self) - self._mCharToFreqOrder = EUCTWCharToFreqOrder - self._mTableSize = EUCTW_TABLE_SIZE - self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO + super(EUCTWDistributionAnalysis, self).__init__() + self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER + self._table_size = EUCTW_TABLE_SIZE + self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aBuf): + def get_order(self, byte_str): # for euc-TW encoding, we are interested # first byte range: 0xc4 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that - first_char = wrap_ord(aBuf[0]) + first_char = byte_str[0] if first_char >= 0xC4: - return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1 + return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1 else: return -1 class EUCKRDistributionAnalysis(CharDistributionAnalysis): def __init__(self): - CharDistributionAnalysis.__init__(self) - self._mCharToFreqOrder = EUCKRCharToFreqOrder - self._mTableSize = EUCKR_TABLE_SIZE - self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO + super(EUCKRDistributionAnalysis, self).__init__() + self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER + self._table_size = EUCKR_TABLE_SIZE + self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aBuf): + def get_order(self, byte_str): # for euc-KR encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that - first_char = wrap_ord(aBuf[0]) + first_char = byte_str[0] if first_char >= 0xB0: - return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1 + return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1 else: return -1 class GB2312DistributionAnalysis(CharDistributionAnalysis): def __init__(self): - CharDistributionAnalysis.__init__(self) - self._mCharToFreqOrder = GB2312CharToFreqOrder - self._mTableSize = GB2312_TABLE_SIZE - self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO + super(GB2312DistributionAnalysis, self).__init__() + self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER + self._table_size = GB2312_TABLE_SIZE + self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aBuf): + def get_order(self, byte_str): # for GB2312 encoding, we are interested # first byte range: 0xb0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that - first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) + first_char, second_char = byte_str[0], byte_str[1] if (first_char >= 0xB0) and (second_char >= 0xA1): return 94 * (first_char - 0xB0) + second_char - 0xA1 else: @@ -167,17 +169,17 @@ class GB2312DistributionAnalysis(CharDistributionAnalysis): class Big5DistributionAnalysis(CharDistributionAnalysis): def __init__(self): - CharDistributionAnalysis.__init__(self) - self._mCharToFreqOrder = Big5CharToFreqOrder - self._mTableSize = BIG5_TABLE_SIZE - self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO + super(Big5DistributionAnalysis, self).__init__() + self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER + self._table_size = BIG5_TABLE_SIZE + self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aBuf): + def get_order(self, byte_str): # for big5 encoding, we are interested # first byte range: 0xa4 -- 0xfe # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe # no validation needed here. State machine has done that - first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) + first_char, second_char = byte_str[0], byte_str[1] if first_char >= 0xA4: if second_char >= 0xA1: return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63 @@ -189,17 +191,17 @@ class Big5DistributionAnalysis(CharDistributionAnalysis): class SJISDistributionAnalysis(CharDistributionAnalysis): def __init__(self): - CharDistributionAnalysis.__init__(self) - self._mCharToFreqOrder = JISCharToFreqOrder - self._mTableSize = JIS_TABLE_SIZE - self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO + super(SJISDistributionAnalysis, self).__init__() + self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER + self._table_size = JIS_TABLE_SIZE + self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aBuf): + def get_order(self, byte_str): # for sjis encoding, we are interested # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe # no validation needed here. State machine has done that - first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1]) + first_char, second_char = byte_str[0], byte_str[1] if (first_char >= 0x81) and (first_char <= 0x9F): order = 188 * (first_char - 0x81) elif (first_char >= 0xE0) and (first_char <= 0xEF): @@ -214,18 +216,18 @@ class SJISDistributionAnalysis(CharDistributionAnalysis): class EUCJPDistributionAnalysis(CharDistributionAnalysis): def __init__(self): - CharDistributionAnalysis.__init__(self) - self._mCharToFreqOrder = JISCharToFreqOrder - self._mTableSize = JIS_TABLE_SIZE - self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO + super(EUCJPDistributionAnalysis, self).__init__() + self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER + self._table_size = JIS_TABLE_SIZE + self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO - def get_order(self, aBuf): + def get_order(self, byte_str): # for euc-JP encoding, we are interested # first byte range: 0xa0 -- 0xfe # second byte range: 0xa1 -- 0xfe # no validation needed here. State machine has done that - char = wrap_ord(aBuf[0]) + char = byte_str[0] if char >= 0xA0: - return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1 + return 94 * (char - 0xA1) + byte_str[1] - 0xa1 else: return -1 diff --git a/Shared/lib/python3.4/site-packages/chardet/charsetgroupprober.py b/Shared/lib/python3.4/site-packages/chardet/charsetgroupprober.py index 85e7a1c..8b3738e 100644 --- a/Shared/lib/python3.4/site-packages/chardet/charsetgroupprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/charsetgroupprober.py @@ -1,11 +1,11 @@ ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. -# +# # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. -# +# # Contributor(s): # Mark Pilgrim - port to Python # @@ -13,94 +13,94 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from . import constants -import sys +from .enums import ProbingState from .charsetprober import CharSetProber class CharSetGroupProber(CharSetProber): - def __init__(self): - CharSetProber.__init__(self) - self._mActiveNum = 0 - self._mProbers = [] - self._mBestGuessProber = None + def __init__(self, lang_filter=None): + super(CharSetGroupProber, self).__init__(lang_filter=lang_filter) + self._active_num = 0 + self.probers = [] + self._best_guess_prober = None def reset(self): - CharSetProber.reset(self) - self._mActiveNum = 0 - for prober in self._mProbers: + super(CharSetGroupProber, self).reset() + self._active_num = 0 + for prober in self.probers: if prober: prober.reset() prober.active = True - self._mActiveNum += 1 - self._mBestGuessProber = None + self._active_num += 1 + self._best_guess_prober = None - def get_charset_name(self): - if not self._mBestGuessProber: + @property + def charset_name(self): + if not self._best_guess_prober: self.get_confidence() - if not self._mBestGuessProber: + if not self._best_guess_prober: return None -# self._mBestGuessProber = self._mProbers[0] - return self._mBestGuessProber.get_charset_name() + return self._best_guess_prober.charset_name - def feed(self, aBuf): - for prober in self._mProbers: + @property + def language(self): + if not self._best_guess_prober: + self.get_confidence() + if not self._best_guess_prober: + return None + return self._best_guess_prober.language + + def feed(self, byte_str): + for prober in self.probers: if not prober: continue if not prober.active: continue - st = prober.feed(aBuf) - if not st: + state = prober.feed(byte_str) + if not state: continue - if st == constants.eFoundIt: - self._mBestGuessProber = prober - return self.get_state() - elif st == constants.eNotMe: + if state == ProbingState.FOUND_IT: + self._best_guess_prober = prober + return self.state + elif state == ProbingState.NOT_ME: prober.active = False - self._mActiveNum -= 1 - if self._mActiveNum <= 0: - self._mState = constants.eNotMe - return self.get_state() - return self.get_state() + self._active_num -= 1 + if self._active_num <= 0: + self._state = ProbingState.NOT_ME + return self.state + return self.state def get_confidence(self): - st = self.get_state() - if st == constants.eFoundIt: + state = self.state + if state == ProbingState.FOUND_IT: return 0.99 - elif st == constants.eNotMe: + elif state == ProbingState.NOT_ME: return 0.01 - bestConf = 0.0 - self._mBestGuessProber = None - for prober in self._mProbers: + best_conf = 0.0 + self._best_guess_prober = None + for prober in self.probers: if not prober: continue if not prober.active: - if constants._debug: - sys.stderr.write(prober.get_charset_name() - + ' not active\n') + self.logger.debug('%s not active', prober.charset_name) continue - cf = prober.get_confidence() - if constants._debug: - sys.stderr.write('%s confidence = %s\n' % - (prober.get_charset_name(), cf)) - if bestConf < cf: - bestConf = cf - self._mBestGuessProber = prober - if not self._mBestGuessProber: + conf = prober.get_confidence() + self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf) + if best_conf < conf: + best_conf = conf + self._best_guess_prober = prober + if not self._best_guess_prober: return 0.0 - return bestConf -# else: -# self._mBestGuessProber = self._mProbers[0] -# return self._mBestGuessProber.get_confidence() + return best_conf diff --git a/Shared/lib/python3.4/site-packages/chardet/charsetprober.py b/Shared/lib/python3.4/site-packages/chardet/charsetprober.py index 9758171..eac4e59 100644 --- a/Shared/lib/python3.4/site-packages/chardet/charsetprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/charsetprober.py @@ -26,37 +26,120 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from . import constants +import logging import re +from .enums import ProbingState -class CharSetProber: - def __init__(self): - pass + +class CharSetProber(object): + + SHORTCUT_THRESHOLD = 0.95 + + def __init__(self, lang_filter=None): + self._state = None + self.lang_filter = lang_filter + self.logger = logging.getLogger(__name__) def reset(self): - self._mState = constants.eDetecting + self._state = ProbingState.DETECTING - def get_charset_name(self): + @property + def charset_name(self): return None - def feed(self, aBuf): + def feed(self, buf): pass - def get_state(self): - return self._mState + @property + def state(self): + return self._state def get_confidence(self): return 0.0 - def filter_high_bit_only(self, aBuf): - aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf) - return aBuf + @staticmethod + def filter_high_byte_only(buf): + buf = re.sub(b'([\x00-\x7F])+', b' ', buf) + return buf - def filter_without_english_letters(self, aBuf): - aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf) - return aBuf + @staticmethod + def filter_international_words(buf): + """ + We define three types of bytes: + alphabet: english alphabets [a-zA-Z] + international: international characters [\x80-\xFF] + marker: everything else [^a-zA-Z\x80-\xFF] - def filter_with_english_letters(self, aBuf): - # TODO - return aBuf + The input buffer can be thought to contain a series of words delimited + by markers. This function works to filter all words that contain at + least one international character. All contiguous sequences of markers + are replaced by a single space ascii character. + + This filter applies to all scripts which do not use English characters. + """ + filtered = bytearray() + + # This regex expression filters out only words that have at-least one + # international character. The word may include one marker character at + # the end. + words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?', + buf) + + for word in words: + filtered.extend(word[:-1]) + + # If the last character in the word is a marker, replace it with a + # space as markers shouldn't affect our analysis (they are used + # similarly across all languages and may thus have similar + # frequencies). + last_char = word[-1:] + if not last_char.isalpha() and last_char < b'\x80': + last_char = b' ' + filtered.extend(last_char) + + return filtered + + @staticmethod + def filter_with_english_letters(buf): + """ + Returns a copy of ``buf`` that retains only the sequences of English + alphabet and high byte characters that are not between <> characters. + Also retains English alphabet and high byte characters immediately + before occurrences of >. + + This filter can be applied to all scripts which contain both English + characters and extended ASCII characters, but is currently only used by + ``Latin1Prober``. + """ + filtered = bytearray() + in_tag = False + prev = 0 + + for curr in range(len(buf)): + # Slice here to get bytes instead of an int with Python 3 + buf_char = buf[curr:curr + 1] + # Check if we're coming out of or entering an HTML tag + if buf_char == b'>': + in_tag = False + elif buf_char == b'<': + in_tag = True + + # If current character is not extended-ASCII and not alphabetic... + if buf_char < b'\x80' and not buf_char.isalpha(): + # ...and we're not in a tag + if curr > prev and not in_tag: + # Keep everything after last non-extended-ASCII, + # non-alphabetic character + filtered.extend(buf[prev:curr]) + # Output a space to delimit stretch we kept + filtered.extend(b' ') + prev = curr + 1 + + # If we're not in a tag... + if not in_tag: + # Keep everything after last non-extended-ASCII, non-alphabetic + # character + filtered.extend(buf[prev:]) + + return filtered diff --git a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/chardet/cli/__init__.py similarity index 100% rename from Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/chardet/cli/__init__.py diff --git a/Shared/lib/python3.4/site-packages/chardet/chardetect.py b/Shared/lib/python3.4/site-packages/chardet/cli/chardetect.py similarity index 83% rename from Shared/lib/python3.4/site-packages/chardet/chardetect.py rename to Shared/lib/python3.4/site-packages/chardet/cli/chardetect.py index ffe892f..f0a4cc5 100644 --- a/Shared/lib/python3.4/site-packages/chardet/chardetect.py +++ b/Shared/lib/python3.4/site-packages/chardet/cli/chardetect.py @@ -17,9 +17,9 @@ from __future__ import absolute_import, print_function, unicode_literals import argparse import sys -from io import open from chardet import __version__ +from chardet.compat import PY2 from chardet.universaldetector import UniversalDetector @@ -35,9 +35,15 @@ def description_of(lines, name='stdin'): """ u = UniversalDetector() for line in lines: + line = bytearray(line) u.feed(line) + # shortcut out of the loop to save reading further - particularly useful if we read a BOM. + if u.done: + break u.close() result = u.result + if PY2: + name = name.decode(sys.getfilesystemencoding(), 'ignore') if result['encoding']: return '{0}: {1} with confidence {2}'.format(name, result['encoding'], result['confidence']) @@ -46,23 +52,22 @@ def description_of(lines, name='stdin'): def main(argv=None): - ''' + """ Handles command line arguments and gets things started. :param argv: List of arguments, as if specified on the command-line. If None, ``sys.argv[1:]`` is used instead. :type argv: list of str - ''' + """ # Get command line arguments parser = argparse.ArgumentParser( description="Takes one or more file paths and reports their detected \ - encodings", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - conflict_handler='resolve') + encodings") parser.add_argument('input', - help='File whose encoding we would like to determine.', + help='File whose encoding we would like to determine. \ + (default: stdin)', type=argparse.FileType('rb'), nargs='*', - default=[sys.stdin]) + default=[sys.stdin if PY2 else sys.stdin.buffer]) parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) args = parser.parse_args(argv) diff --git a/Shared/lib/python3.4/site-packages/chardet/codingstatemachine.py b/Shared/lib/python3.4/site-packages/chardet/codingstatemachine.py index 8dd8c91..68fba44 100644 --- a/Shared/lib/python3.4/site-packages/chardet/codingstatemachine.py +++ b/Shared/lib/python3.4/site-packages/chardet/codingstatemachine.py @@ -25,37 +25,64 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .constants import eStart -from .compat import wrap_ord +import logging + +from .enums import MachineState -class CodingStateMachine: +class CodingStateMachine(object): + """ + A state machine to verify a byte sequence for a particular encoding. For + each byte the detector receives, it will feed that byte to every active + state machine available, one byte at a time. The state machine changes its + state based on its previous state and the byte it receives. There are 3 + states in a state machine that are of interest to an auto-detector: + + START state: This is the state to start with, or a legal byte sequence + (i.e. a valid code point) for character has been identified. + + ME state: This indicates that the state machine identified a byte sequence + that is specific to the charset it is designed for and that + there is no other possible encoding which can contain this byte + sequence. This will to lead to an immediate positive answer for + the detector. + + ERROR state: This indicates the state machine identified an illegal byte + sequence for that encoding. This will lead to an immediate + negative answer for this encoding. Detector will exclude this + encoding from consideration from here on. + """ def __init__(self, sm): - self._mModel = sm - self._mCurrentBytePos = 0 - self._mCurrentCharLen = 0 + self._model = sm + self._curr_byte_pos = 0 + self._curr_char_len = 0 + self._curr_state = None + self.logger = logging.getLogger(__name__) self.reset() def reset(self): - self._mCurrentState = eStart + self._curr_state = MachineState.START def next_state(self, c): # for each byte we get its class # if it is first byte, we also get byte length - # PY3K: aBuf is a byte stream, so c is an int, not a byte - byteCls = self._mModel['classTable'][wrap_ord(c)] - if self._mCurrentState == eStart: - self._mCurrentBytePos = 0 - self._mCurrentCharLen = self._mModel['charLenTable'][byteCls] - # from byte's class and stateTable, we get its next state - curr_state = (self._mCurrentState * self._mModel['classFactor'] - + byteCls) - self._mCurrentState = self._mModel['stateTable'][curr_state] - self._mCurrentBytePos += 1 - return self._mCurrentState + byte_class = self._model['class_table'][c] + if self._curr_state == MachineState.START: + self._curr_byte_pos = 0 + self._curr_char_len = self._model['char_len_table'][byte_class] + # from byte's class and state_table, we get its next state + curr_state = (self._curr_state * self._model['class_factor'] + + byte_class) + self._curr_state = self._model['state_table'][curr_state] + self._curr_byte_pos += 1 + return self._curr_state def get_current_charlen(self): - return self._mCurrentCharLen + return self._curr_char_len def get_coding_state_machine(self): - return self._mModel['name'] + return self._model['name'] + + @property + def language(self): + return self._model['language'] diff --git a/Shared/lib/python3.4/site-packages/chardet/compat.py b/Shared/lib/python3.4/site-packages/chardet/compat.py index d9e30ad..ddd7468 100644 --- a/Shared/lib/python3.4/site-packages/chardet/compat.py +++ b/Shared/lib/python3.4/site-packages/chardet/compat.py @@ -1,6 +1,7 @@ ######################## BEGIN LICENSE BLOCK ######################## # Contributor(s): -# Ian Cordasco - port to Python +# Dan Blanchard +# Ian Cordasco # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public @@ -22,13 +23,12 @@ import sys if sys.version_info < (3, 0): + PY2 = True + PY3 = False base_str = (str, unicode) + text_type = unicode else: + PY2 = False + PY3 = True base_str = (bytes, str) - - -def wrap_ord(a): - if sys.version_info < (3, 0) and isinstance(a, base_str): - return ord(a) - else: - return a + text_type = str diff --git a/Shared/lib/python3.4/site-packages/chardet/constants.py b/Shared/lib/python3.4/site-packages/chardet/constants.py deleted file mode 100644 index e4d148b..0000000 --- a/Shared/lib/python3.4/site-packages/chardet/constants.py +++ /dev/null @@ -1,39 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -_debug = 0 - -eDetecting = 0 -eFoundIt = 1 -eNotMe = 2 - -eStart = 0 -eError = 1 -eItsMe = 2 - -SHORTCUT_THRESHOLD = 0.95 diff --git a/Shared/lib/python3.4/site-packages/chardet/cp949prober.py b/Shared/lib/python3.4/site-packages/chardet/cp949prober.py index ff4272f..efd793a 100644 --- a/Shared/lib/python3.4/site-packages/chardet/cp949prober.py +++ b/Shared/lib/python3.4/site-packages/chardet/cp949prober.py @@ -25,20 +25,25 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine from .chardistribution import EUCKRDistributionAnalysis -from .mbcssm import CP949SMModel +from .codingstatemachine import CodingStateMachine +from .mbcharsetprober import MultiByteCharSetProber +from .mbcssm import CP949_SM_MODEL class CP949Prober(MultiByteCharSetProber): def __init__(self): - MultiByteCharSetProber.__init__(self) - self._mCodingSM = CodingStateMachine(CP949SMModel) + super(CP949Prober, self).__init__() + self.coding_sm = CodingStateMachine(CP949_SM_MODEL) # NOTE: CP949 is a superset of EUC-KR, so the distribution should be # not different. - self._mDistributionAnalyzer = EUCKRDistributionAnalysis() + self.distribution_analyzer = EUCKRDistributionAnalysis() self.reset() - def get_charset_name(self): + @property + def charset_name(self): return "CP949" + + @property + def language(self): + return "Korean" diff --git a/Shared/lib/python3.4/site-packages/chardet/enums.py b/Shared/lib/python3.4/site-packages/chardet/enums.py new file mode 100644 index 0000000..0451207 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/chardet/enums.py @@ -0,0 +1,76 @@ +""" +All of the Enums that are used throughout the chardet package. + +:author: Dan Blanchard (dan.blanchard@gmail.com) +""" + + +class InputState(object): + """ + This enum represents the different states a universal detector can be in. + """ + PURE_ASCII = 0 + ESC_ASCII = 1 + HIGH_BYTE = 2 + + +class LanguageFilter(object): + """ + This enum represents the different language filters we can apply to a + ``UniversalDetector``. + """ + CHINESE_SIMPLIFIED = 0x01 + CHINESE_TRADITIONAL = 0x02 + JAPANESE = 0x04 + KOREAN = 0x08 + NON_CJK = 0x10 + ALL = 0x1F + CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL + CJK = CHINESE | JAPANESE | KOREAN + + +class ProbingState(object): + """ + This enum represents the different states a prober can be in. + """ + DETECTING = 0 + FOUND_IT = 1 + NOT_ME = 2 + + +class MachineState(object): + """ + This enum represents the different states a state machine can be in. + """ + START = 0 + ERROR = 1 + ITS_ME = 2 + + +class SequenceLikelihood(object): + """ + This enum represents the likelihood of a character following the previous one. + """ + NEGATIVE = 0 + UNLIKELY = 1 + LIKELY = 2 + POSITIVE = 3 + + @classmethod + def get_num_categories(cls): + """:returns: The number of likelihood categories in the enum.""" + return 4 + + +class CharacterCategory(object): + """ + This enum represents the different categories language models for + ``SingleByteCharsetProber`` put characters into. + + Anything less than CONTROL is considered a letter. + """ + UNDEFINED = 255 + LINE_BREAK = 254 + SYMBOL = 253 + DIGIT = 252 + CONTROL = 251 diff --git a/Shared/lib/python3.4/site-packages/chardet/escprober.py b/Shared/lib/python3.4/site-packages/chardet/escprober.py index 80a844f..c70493f 100644 --- a/Shared/lib/python3.4/site-packages/chardet/escprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/escprober.py @@ -25,62 +25,77 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from . import constants -from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel, - ISO2022KRSMModel) from .charsetprober import CharSetProber from .codingstatemachine import CodingStateMachine -from .compat import wrap_ord +from .enums import LanguageFilter, ProbingState, MachineState +from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL, + ISO2022KR_SM_MODEL) class EscCharSetProber(CharSetProber): - def __init__(self): - CharSetProber.__init__(self) - self._mCodingSM = [ - CodingStateMachine(HZSMModel), - CodingStateMachine(ISO2022CNSMModel), - CodingStateMachine(ISO2022JPSMModel), - CodingStateMachine(ISO2022KRSMModel) - ] + """ + This CharSetProber uses a "code scheme" approach for detecting encodings, + whereby easily recognizable escape or shift sequences are relied on to + identify these encodings. + """ + + def __init__(self, lang_filter=None): + super(EscCharSetProber, self).__init__(lang_filter=lang_filter) + self.coding_sm = [] + if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED: + self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL)) + self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL)) + if self.lang_filter & LanguageFilter.JAPANESE: + self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL)) + if self.lang_filter & LanguageFilter.KOREAN: + self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL)) + self.active_sm_count = None + self._detected_charset = None + self._detected_language = None + self._state = None self.reset() def reset(self): - CharSetProber.reset(self) - for codingSM in self._mCodingSM: - if not codingSM: + super(EscCharSetProber, self).reset() + for coding_sm in self.coding_sm: + if not coding_sm: continue - codingSM.active = True - codingSM.reset() - self._mActiveSM = len(self._mCodingSM) - self._mDetectedCharset = None + coding_sm.active = True + coding_sm.reset() + self.active_sm_count = len(self.coding_sm) + self._detected_charset = None + self._detected_language = None - def get_charset_name(self): - return self._mDetectedCharset + @property + def charset_name(self): + return self._detected_charset + + @property + def language(self): + return self._detected_language def get_confidence(self): - if self._mDetectedCharset: + if self._detected_charset: return 0.99 else: return 0.00 - def feed(self, aBuf): - for c in aBuf: - # PY3K: aBuf is a byte array, so c is an int, not a byte - for codingSM in self._mCodingSM: - if not codingSM: + def feed(self, byte_str): + for c in byte_str: + for coding_sm in self.coding_sm: + if not coding_sm or not coding_sm.active: continue - if not codingSM.active: - continue - codingState = codingSM.next_state(wrap_ord(c)) - if codingState == constants.eError: - codingSM.active = False - self._mActiveSM -= 1 - if self._mActiveSM <= 0: - self._mState = constants.eNotMe - return self.get_state() - elif codingState == constants.eItsMe: - self._mState = constants.eFoundIt - self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8 - return self.get_state() + coding_state = coding_sm.next_state(c) + if coding_state == MachineState.ERROR: + coding_sm.active = False + self.active_sm_count -= 1 + if self.active_sm_count <= 0: + self._state = ProbingState.NOT_ME + return self.state + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT + self._detected_charset = coding_sm.get_coding_state_machine() + self._detected_language = coding_sm.language + return self.state - return self.get_state() + return self.state diff --git a/Shared/lib/python3.4/site-packages/chardet/escsm.py b/Shared/lib/python3.4/site-packages/chardet/escsm.py index bd302b4..0069523 100644 --- a/Shared/lib/python3.4/site-packages/chardet/escsm.py +++ b/Shared/lib/python3.4/site-packages/chardet/escsm.py @@ -25,9 +25,9 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .constants import eStart, eError, eItsMe +from .enums import MachineState -HZ_cls = ( +HZ_CLS = ( 1,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 @@ -62,24 +62,25 @@ HZ_cls = ( 1,1,1,1,1,1,1,1, # f8 - ff ) -HZ_st = ( -eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07 -eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f -eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17 - 5,eError, 6,eError, 5, 5, 4,eError,# 18-1f - 4,eError, 4, 4, 4,eError, 4,eError,# 20-27 - 4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f +HZ_ST = ( +MachineState.START,MachineState.ERROR, 3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f +MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START, 4,MachineState.ERROR,# 10-17 + 5,MachineState.ERROR, 6,MachineState.ERROR, 5, 5, 4,MachineState.ERROR,# 18-1f + 4,MachineState.ERROR, 4, 4, 4,MachineState.ERROR, 4,MachineState.ERROR,# 20-27 + 4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f ) -HZCharLenTable = (0, 0, 0, 0, 0, 0) +HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) -HZSMModel = {'classTable': HZ_cls, - 'classFactor': 6, - 'stateTable': HZ_st, - 'charLenTable': HZCharLenTable, - 'name': "HZ-GB-2312"} +HZ_SM_MODEL = {'class_table': HZ_CLS, + 'class_factor': 6, + 'state_table': HZ_ST, + 'char_len_table': HZ_CHAR_LEN_TABLE, + 'name': "HZ-GB-2312", + 'language': 'Chinese'} -ISO2022CN_cls = ( +ISO2022CN_CLS = ( 2,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 @@ -114,26 +115,27 @@ ISO2022CN_cls = ( 2,2,2,2,2,2,2,2, # f8 - ff ) -ISO2022CN_st = ( -eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 -eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f -eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 -eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f -eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27 - 5, 6,eError,eError,eError,eError,eError,eError,# 28-2f -eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37 -eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f +ISO2022CN_ST = ( +MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07 +MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f +MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17 +MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,# 18-1f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27 + 5, 6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f ) -ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0) +ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0) -ISO2022CNSMModel = {'classTable': ISO2022CN_cls, - 'classFactor': 9, - 'stateTable': ISO2022CN_st, - 'charLenTable': ISO2022CNCharLenTable, - 'name': "ISO-2022-CN"} +ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS, + 'class_factor': 9, + 'state_table': ISO2022CN_ST, + 'char_len_table': ISO2022CN_CHAR_LEN_TABLE, + 'name': "ISO-2022-CN", + 'language': 'Chinese'} -ISO2022JP_cls = ( +ISO2022JP_CLS = ( 2,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,2,2, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 @@ -168,27 +170,28 @@ ISO2022JP_cls = ( 2,2,2,2,2,2,2,2, # f8 - ff ) -ISO2022JP_st = ( -eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07 -eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f -eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17 -eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f -eError, 5,eError,eError,eError, 4,eError,eError,# 20-27 -eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f -eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37 -eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f -eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47 +ISO2022JP_ST = ( +MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07 +MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17 +MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f +MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 20-27 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47 ) -ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0) +ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0) -ISO2022JPSMModel = {'classTable': ISO2022JP_cls, - 'classFactor': 10, - 'stateTable': ISO2022JP_st, - 'charLenTable': ISO2022JPCharLenTable, - 'name': "ISO-2022-JP"} +ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS, + 'class_factor': 10, + 'state_table': ISO2022JP_ST, + 'char_len_table': ISO2022JP_CHAR_LEN_TABLE, + 'name': "ISO-2022-JP", + 'language': 'Japanese'} -ISO2022KR_cls = ( +ISO2022KR_CLS = ( 2,0,0,0,0,0,0,0, # 00 - 07 0,0,0,0,0,0,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 @@ -223,20 +226,21 @@ ISO2022KR_cls = ( 2,2,2,2,2,2,2,2, # f8 - ff ) -ISO2022KR_st = ( -eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07 -eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f -eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17 -eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f -eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27 +ISO2022KR_ST = ( +MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f +MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 10-17 +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f +MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27 ) -ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0) +ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0) + +ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS, + 'class_factor': 6, + 'state_table': ISO2022KR_ST, + 'char_len_table': ISO2022KR_CHAR_LEN_TABLE, + 'name': "ISO-2022-KR", + 'language': 'Korean'} -ISO2022KRSMModel = {'classTable': ISO2022KR_cls, - 'classFactor': 6, - 'stateTable': ISO2022KR_st, - 'charLenTable': ISO2022KRCharLenTable, - 'name': "ISO-2022-KR"} -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/eucjpprober.py b/Shared/lib/python3.4/site-packages/chardet/eucjpprober.py index 8e64fdc..20ce8f7 100644 --- a/Shared/lib/python3.4/site-packages/chardet/eucjpprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/eucjpprober.py @@ -25,66 +25,68 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import sys -from . import constants +from .enums import ProbingState, MachineState from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCJPDistributionAnalysis from .jpcntx import EUCJPContextAnalysis -from .mbcssm import EUCJPSMModel +from .mbcssm import EUCJP_SM_MODEL class EUCJPProber(MultiByteCharSetProber): def __init__(self): - MultiByteCharSetProber.__init__(self) - self._mCodingSM = CodingStateMachine(EUCJPSMModel) - self._mDistributionAnalyzer = EUCJPDistributionAnalysis() - self._mContextAnalyzer = EUCJPContextAnalysis() + super(EUCJPProber, self).__init__() + self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL) + self.distribution_analyzer = EUCJPDistributionAnalysis() + self.context_analyzer = EUCJPContextAnalysis() self.reset() def reset(self): - MultiByteCharSetProber.reset(self) - self._mContextAnalyzer.reset() + super(EUCJPProber, self).reset() + self.context_analyzer.reset() - def get_charset_name(self): + @property + def charset_name(self): return "EUC-JP" - def feed(self, aBuf): - aLen = len(aBuf) - for i in range(0, aLen): - # PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte - codingState = self._mCodingSM.next_state(aBuf[i]) - if codingState == constants.eError: - if constants._debug: - sys.stderr.write(self.get_charset_name() - + ' prober hit error at byte ' + str(i) - + '\n') - self._mState = constants.eNotMe + @property + def language(self): + return "Japanese" + + def feed(self, byte_str): + for i in range(len(byte_str)): + # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte + coding_state = self.coding_sm.next_state(byte_str[i]) + if coding_state == MachineState.ERROR: + self.logger.debug('%s %s prober hit error at byte %s', + self.charset_name, self.language, i) + self._state = ProbingState.NOT_ME break - elif codingState == constants.eItsMe: - self._mState = constants.eFoundIt + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT break - elif codingState == constants.eStart: - charLen = self._mCodingSM.get_current_charlen() + elif coding_state == MachineState.START: + char_len = self.coding_sm.get_current_charlen() if i == 0: - self._mLastChar[1] = aBuf[0] - self._mContextAnalyzer.feed(self._mLastChar, charLen) - self._mDistributionAnalyzer.feed(self._mLastChar, charLen) + self._last_char[1] = byte_str[0] + self.context_analyzer.feed(self._last_char, char_len) + self.distribution_analyzer.feed(self._last_char, char_len) else: - self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen) - self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], - charLen) + self.context_analyzer.feed(byte_str[i - 1:i + 1], + char_len) + self.distribution_analyzer.feed(byte_str[i - 1:i + 1], + char_len) - self._mLastChar[0] = aBuf[aLen - 1] + self._last_char[0] = byte_str[-1] - if self.get_state() == constants.eDetecting: - if (self._mContextAnalyzer.got_enough_data() and - (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): - self._mState = constants.eFoundIt + if self.state == ProbingState.DETECTING: + if (self.context_analyzer.got_enough_data() and + (self.get_confidence() > self.SHORTCUT_THRESHOLD)): + self._state = ProbingState.FOUND_IT - return self.get_state() + return self.state def get_confidence(self): - contxtCf = self._mContextAnalyzer.get_confidence() - distribCf = self._mDistributionAnalyzer.get_confidence() - return max(contxtCf, distribCf) + context_conf = self.context_analyzer.get_confidence() + distrib_conf = self.distribution_analyzer.get_confidence() + return max(context_conf, distrib_conf) diff --git a/Shared/lib/python3.4/site-packages/chardet/euckrfreq.py b/Shared/lib/python3.4/site-packages/chardet/euckrfreq.py index a179e4c..b68078c 100644 --- a/Shared/lib/python3.4/site-packages/chardet/euckrfreq.py +++ b/Shared/lib/python3.4/site-packages/chardet/euckrfreq.py @@ -13,12 +13,12 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA @@ -35,15 +35,15 @@ # # Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24 # Random Distribution Ration = 512 / (2350-512) = 0.279. -# -# Typical Distribution Ratio +# +# Typical Distribution Ratio EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0 EUCKR_TABLE_SIZE = 2352 -# Char to FreqOrder table , -EUCKRCharToFreqOrder = ( \ +# Char to FreqOrder table , +EUCKR_CHAR_TO_FREQ_ORDER = ( 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87, 1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398, 1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734, @@ -191,406 +191,5 @@ EUCKRCharToFreqOrder = ( \ 1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628, 2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042, 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256 -#Everything below is of no interest for detection purpose -2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658, -2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674, -2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690, -2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704, -2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720, -2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734, -2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750, -2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765, -2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779, -2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793, -2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809, -2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824, -2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840, -2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856, -1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869, -2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883, -2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899, -2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915, -2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331, -2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945, -2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961, -2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976, -2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992, -2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008, -3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021, -3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037, -3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052, -3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066, -3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080, -3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095, -3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110, -3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124, -3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140, -3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156, -3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172, -3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187, -3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201, -3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217, -3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233, -3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248, -3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264, -3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279, -3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295, -3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311, -3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327, -3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343, -3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359, -3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374, -3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389, -3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405, -3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338, -3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432, -3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446, -3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191, -3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471, -3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486, -1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499, -1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513, -3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525, -3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541, -3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557, -3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573, -3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587, -3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603, -3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618, -3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632, -3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648, -3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663, -3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679, -3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695, -3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583, -1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722, -3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738, -3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753, -3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767, -3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782, -3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796, -3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810, -3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591, -1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836, -3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851, -3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866, -3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880, -3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895, -1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905, -3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921, -3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934, -3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603, -3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964, -3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978, -3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993, -3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009, -4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024, -4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040, -1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055, -4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069, -4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083, -4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098, -4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113, -4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610, -4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142, -4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157, -4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173, -4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189, -4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205, -4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220, -4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234, -4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249, -4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265, -4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279, -4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294, -4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310, -4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326, -4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341, -4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357, -4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371, -4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387, -4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403, -4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418, -4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432, -4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446, -4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461, -4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476, -4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491, -4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507, -4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623, -4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536, -4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551, -4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567, -4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581, -4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627, -4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611, -4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626, -4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642, -4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657, -4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672, -4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687, -1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700, -4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715, -4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731, -4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633, -4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758, -4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773, -4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788, -4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803, -4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817, -4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832, -4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847, -4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863, -4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879, -4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893, -4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909, -4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923, -4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938, -4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954, -4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970, -4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645, -4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999, -5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078, -5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028, -1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042, -5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056, -5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072, -5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087, -5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103, -5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118, -1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132, -5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148, -5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161, -5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177, -5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192, -5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206, -1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218, -5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234, -5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249, -5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262, -5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278, -5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293, -5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308, -5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323, -5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338, -5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353, -5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369, -5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385, -5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400, -5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415, -5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430, -5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445, -5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461, -5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477, -5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491, -5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507, -5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523, -5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539, -5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554, -5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570, -1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585, -5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600, -5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615, -5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631, -5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646, -5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660, -1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673, -5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688, -5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703, -5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716, -5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729, -5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744, -1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758, -5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773, -1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786, -5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801, -5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815, -5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831, -5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847, -5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862, -5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876, -5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889, -5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905, -5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, -5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687, -5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951, -5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963, -5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979, -5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993, -5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009, -6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025, -6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039, -6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055, -6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071, -6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086, -6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102, -6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118, -6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133, -6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147, -6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163, -6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179, -6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194, -6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210, -6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225, -6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241, -6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256, -6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024 -6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287, -6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699, -6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317, -6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333, -6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347, -6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363, -6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379, -6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395, -6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411, -6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425, -6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440, -6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456, -6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472, -6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488, -6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266, -6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519, -6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535, -6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551, -1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565, -6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581, -6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597, -6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613, -6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629, -6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644, -1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659, -6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674, -1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689, -6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705, -6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721, -6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736, -1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748, -6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763, -6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779, -6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794, -6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711, -6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825, -6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840, -6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856, -6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872, -6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888, -6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903, -6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918, -6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934, -6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950, -6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966, -6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981, -6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996, -6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011, -7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027, -7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042, -7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058, -7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074, -7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090, -7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106, -7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122, -7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138, -7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154, -7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170, -7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186, -7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202, -7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216, -7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232, -7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248, -7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264, -7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280, -7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296, -7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312, -7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327, -7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343, -7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359, -7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375, -7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391, -7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407, -7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423, -7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439, -7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455, -7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471, -7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487, -7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503, -7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519, -7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535, -7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551, -7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, -7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583, -7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599, -7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615, -7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631, -7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647, -7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663, -7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679, -7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695, -7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711, -7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727, -7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743, -7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759, -7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775, -7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791, -7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807, -7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823, -7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839, -7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855, -7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871, -7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887, -7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903, -7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919, -7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, -7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, -7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, -7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, -7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, -8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, -8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, -8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, -8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, -8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, -8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, -8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, -8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, -8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, -8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, -8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, -8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, -8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, -8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, -8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, -8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, -8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271, -8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287, -8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303, -8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319, -8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335, -8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351, -8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367, -8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383, -8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399, -8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415, -8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431, -8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447, -8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463, -8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479, -8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495, -8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511, -8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527, -8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543, -8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559, -8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575, -8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591, -8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607, -8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623, -8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639, -8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655, -8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671, -8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687, -8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, -8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719, -8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735, -8736,8737,8738,8739,8740,8741) +) -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/euckrprober.py b/Shared/lib/python3.4/site-packages/chardet/euckrprober.py index 5982a46..345a060 100644 --- a/Shared/lib/python3.4/site-packages/chardet/euckrprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/euckrprober.py @@ -28,15 +28,20 @@ from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCKRDistributionAnalysis -from .mbcssm import EUCKRSMModel +from .mbcssm import EUCKR_SM_MODEL class EUCKRProber(MultiByteCharSetProber): def __init__(self): - MultiByteCharSetProber.__init__(self) - self._mCodingSM = CodingStateMachine(EUCKRSMModel) - self._mDistributionAnalyzer = EUCKRDistributionAnalysis() + super(EUCKRProber, self).__init__() + self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL) + self.distribution_analyzer = EUCKRDistributionAnalysis() self.reset() - def get_charset_name(self): + @property + def charset_name(self): return "EUC-KR" + + @property + def language(self): + return "Korean" diff --git a/Shared/lib/python3.4/site-packages/chardet/euctwfreq.py b/Shared/lib/python3.4/site-packages/chardet/euctwfreq.py index 576e750..ed7a995 100644 --- a/Shared/lib/python3.4/site-packages/chardet/euctwfreq.py +++ b/Shared/lib/python3.4/site-packages/chardet/euctwfreq.py @@ -44,385 +44,344 @@ EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75 # Char to FreqOrder table , -EUCTW_TABLE_SIZE = 8102 +EUCTW_TABLE_SIZE = 5376 -EUCTWCharToFreqOrder = ( - 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 -3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 -1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 - 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790 -3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806 -4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822 -7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838 - 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854 - 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870 - 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886 -2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902 -1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918 -3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934 - 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950 -1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966 -3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982 -2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998 - 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014 -3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030 -1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046 -7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062 - 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078 -7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094 -1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110 - 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126 - 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142 -3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158 -3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174 - 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190 -2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206 -2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222 - 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238 - 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254 -3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270 -1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286 -1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302 -1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318 -2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334 - 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350 -4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366 -1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382 -7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398 -2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414 - 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430 - 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446 - 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462 - 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478 -7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494 - 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510 -1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526 - 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542 - 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558 -7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574 -1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590 - 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606 -3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622 -4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638 -3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654 - 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670 - 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686 -1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702 -4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718 -3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734 -3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750 -2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766 -7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782 -3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798 -7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814 -1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830 -2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846 -1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862 - 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878 -1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894 -4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910 -3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926 - 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942 - 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958 - 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974 -2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990 -7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006 -1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022 -2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038 -1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054 -1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070 -7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086 -7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102 -7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118 -3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134 -4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150 -1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166 -7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182 -2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198 -7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214 -3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230 -3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246 -7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262 -2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278 -7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294 - 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310 -4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326 -2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342 -7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358 -3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374 -2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390 -2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406 - 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422 -2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438 -1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454 -1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470 -2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486 -1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502 -7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518 -7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534 -2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550 -4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566 -1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582 -7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598 - 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614 -4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630 - 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646 -2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662 - 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678 -1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694 -1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710 - 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726 -3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742 -3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758 -1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774 -3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790 -7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806 -7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822 -1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838 -2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854 -1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870 -3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886 -2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902 -3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918 -2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934 -4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950 -4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966 -3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982 - 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998 -3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014 - 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030 -3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046 -3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062 -3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078 -1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094 -7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110 - 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126 -7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142 -1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158 - 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174 -4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190 -3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206 - 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222 -2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238 -2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254 -3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270 -1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286 -4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302 -2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318 -1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334 -1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350 -2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366 -3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382 -1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398 -7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414 -1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430 -4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446 -1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462 - 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478 -1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494 -3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510 -3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526 -2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542 -1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558 -4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574 - 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590 -7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606 -2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622 -3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638 -4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654 - 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670 -7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686 -7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702 -1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718 -4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734 -3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750 -2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766 -3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782 -3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798 -2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814 -1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830 -4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846 -3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862 -3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878 -2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894 -4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910 -7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926 -3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942 -2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958 -3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974 -1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990 -2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006 -3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022 -4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038 -2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054 -2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070 -7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086 -1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102 -2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118 -1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134 -3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150 -4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166 -2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182 -3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198 -3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214 -2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230 -4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246 -2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262 -3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278 -4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294 -7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310 -3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326 - 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342 -1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358 -4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374 -1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390 -4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406 -7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422 - 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438 -7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454 -2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470 -1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486 -1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502 -3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518 - 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534 - 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550 - 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566 -3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582 -2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598 - 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614 -7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630 -1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646 -3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662 -7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678 -1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694 -7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710 -4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726 -1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742 -2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758 -2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774 -4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790 - 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806 - 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822 -3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838 -3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854 -1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870 -2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886 -7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902 -1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918 -1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934 -3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950 - 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966 -1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982 -4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998 -7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014 -2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030 -3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046 - 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062 -1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078 -2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094 -2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110 -7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126 -7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142 -7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158 -2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174 -2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190 -1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206 -4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222 -3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238 -3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254 -4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270 -4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286 -2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302 -2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318 -7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334 -4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350 -7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366 -2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382 -1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398 -3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414 -4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430 -2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446 - 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462 -2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478 -1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494 -2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510 -2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526 -4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542 -7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558 -1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574 -3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590 -7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606 -1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622 -8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638 -2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654 -8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670 -2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686 -2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702 -8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718 -8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734 -8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750 - 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766 -8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782 -4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798 -3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814 -8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830 -1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846 -8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862 - 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878 -1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894 - 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910 -4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926 -1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942 -4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958 -1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974 - 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990 -3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006 -4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022 -8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038 - 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054 -3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070 - 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086 -2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102 -#Everything below is of no interest for detection purpose -2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118 -2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134 -8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150 -8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166 -8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182 -8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198 -8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214 -8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230 -8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246 -8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262 -8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278 -8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294 -8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310 -8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326 -8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342 -8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358 -8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374 -8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390 -8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406 -8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422 -8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438 -8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454 -8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470 -8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486 -8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502 -8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518 -8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534 -8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550 -8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566 -8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582 -8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598 -8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614 -8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630 -8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646 -8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662 -8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678 -8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694 -8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710 -8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726 -8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742 +EUCTW_CHAR_TO_FREQ_ORDER = ( + 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742 +3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758 +1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774 + 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790 +3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806 +4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822 +7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838 + 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854 + 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870 + 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886 +2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902 +1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918 +3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934 + 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950 +1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966 +3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982 +2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998 + 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014 +3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030 +1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046 +7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062 + 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078 +7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094 +1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110 + 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126 + 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142 +3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158 +3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174 + 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190 +2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206 +2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222 + 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238 + 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254 +3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270 +1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286 +1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302 +1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318 +2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334 + 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350 +4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366 +1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382 +7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398 +2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414 + 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430 + 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446 + 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462 + 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478 +7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494 + 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510 +1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526 + 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542 + 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558 +7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574 +1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590 + 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606 +3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622 +4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638 +3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654 + 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670 + 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686 +1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702 +4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718 +3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734 +3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750 +2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766 +7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782 +3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798 +7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814 +1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830 +2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846 +1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862 + 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878 +1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894 +4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910 +3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926 + 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942 + 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958 + 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974 +2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990 +7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006 +1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022 +2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038 +1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054 +1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070 +7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086 +7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102 +7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118 +3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134 +4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150 +1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166 +7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182 +2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198 +7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214 +3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230 +3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246 +7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262 +2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278 +7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294 + 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310 +4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326 +2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342 +7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358 +3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374 +2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390 +2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406 + 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422 +2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438 +1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454 +1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470 +2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486 +1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502 +7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518 +7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534 +2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550 +4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566 +1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582 +7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598 + 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614 +4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630 + 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646 +2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662 + 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678 +1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694 +1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710 + 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726 +3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742 +3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758 +1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774 +3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790 +7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806 +7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822 +1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838 +2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854 +1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870 +3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886 +2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902 +3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918 +2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934 +4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950 +4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966 +3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982 + 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998 +3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014 + 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030 +3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046 +3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062 +3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078 +1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094 +7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110 + 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126 +7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142 +1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158 + 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174 +4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190 +3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206 + 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222 +2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238 +2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254 +3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270 +1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286 +4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302 +2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318 +1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334 +1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350 +2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366 +3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382 +1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398 +7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414 +1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430 +4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446 +1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462 + 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478 +1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494 +3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510 +3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526 +2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542 +1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558 +4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574 + 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590 +7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606 +2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622 +3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638 +4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654 + 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670 +7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686 +7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702 +1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718 +4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734 +3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750 +2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766 +3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782 +3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798 +2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814 +1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830 +4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846 +3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862 +3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878 +2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894 +4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910 +7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926 +3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942 +2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958 +3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974 +1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990 +2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006 +3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022 +4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038 +2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054 +2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070 +7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086 +1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102 +2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118 +1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134 +3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150 +4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166 +2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182 +3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198 +3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214 +2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230 +4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246 +2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262 +3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278 +4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294 +7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310 +3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326 + 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342 +1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358 +4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374 +1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390 +4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406 +7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422 + 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438 +7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454 +2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470 +1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486 +1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502 +3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518 + 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534 + 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550 + 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566 +3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582 +2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598 + 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614 +7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630 +1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646 +3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662 +7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678 +1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694 +7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710 +4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726 +1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742 +2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758 +2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774 +4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790 + 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806 + 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822 +3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838 +3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854 +1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870 +2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886 +7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902 +1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918 +1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934 +3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950 + 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966 +1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982 +4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998 +7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014 +2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030 +3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046 + 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062 +1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078 +2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094 +2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110 +7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126 +7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142 +7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158 +2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174 +2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190 +1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206 +4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222 +3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238 +3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254 +4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270 +4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286 +2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302 +2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318 +7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334 +4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350 +7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366 +2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382 +1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398 +3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414 +4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430 +2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446 + 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462 +2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478 +1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494 +2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510 +2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526 +4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542 +7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558 +1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574 +3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590 +7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606 +1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622 +8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638 +2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654 +8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670 +2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686 +2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702 +8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718 +8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734 +8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750 + 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766 +8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782 +4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798 +3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814 +8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830 +1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846 +8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862 + 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878 +1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894 + 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910 +4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926 +1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942 +4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958 +1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974 + 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990 +3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006 +4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022 +8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038 + 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054 +3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070 + 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086 +2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102 +) -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/euctwprober.py b/Shared/lib/python3.4/site-packages/chardet/euctwprober.py index fe652fe..35669cc 100644 --- a/Shared/lib/python3.4/site-packages/chardet/euctwprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/euctwprober.py @@ -13,12 +13,12 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA @@ -28,14 +28,19 @@ from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCTWDistributionAnalysis -from .mbcssm import EUCTWSMModel +from .mbcssm import EUCTW_SM_MODEL class EUCTWProber(MultiByteCharSetProber): def __init__(self): - MultiByteCharSetProber.__init__(self) - self._mCodingSM = CodingStateMachine(EUCTWSMModel) - self._mDistributionAnalyzer = EUCTWDistributionAnalysis() + super(EUCTWProber, self).__init__() + self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL) + self.distribution_analyzer = EUCTWDistributionAnalysis() self.reset() - def get_charset_name(self): + @property + def charset_name(self): return "EUC-TW" + + @property + def language(self): + return "Taiwan" diff --git a/Shared/lib/python3.4/site-packages/chardet/gb2312freq.py b/Shared/lib/python3.4/site-packages/chardet/gb2312freq.py index 1238f51..697837b 100644 --- a/Shared/lib/python3.4/site-packages/chardet/gb2312freq.py +++ b/Shared/lib/python3.4/site-packages/chardet/gb2312freq.py @@ -43,7 +43,7 @@ GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9 GB2312_TABLE_SIZE = 3760 -GB2312CharToFreqOrder = ( +GB2312_CHAR_TO_FREQ_ORDER = ( 1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205, 2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842, 2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409, @@ -278,195 +278,6 @@ GB2312CharToFreqOrder = ( 1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232, 1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624, 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189, - 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512 -#Everything below is of no interest for detection purpose -5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636, -5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874, -5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278, -3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806, -4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827, -5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512, -5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578, -4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828, -4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105, -4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189, -4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561, -3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226, -6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778, -4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039, -6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404, -4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213, -4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739, -4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328, -5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592, -3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424, -4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270, -3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232, -4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456, -4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121, -6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971, -6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409, -5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519, -4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367, -6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834, -4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460, -5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464, -5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709, -5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906, -6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530, -3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262, -6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920, -4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190, -5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318, -6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538, -6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697, -4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544, -5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016, -4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638, -5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006, -5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071, -4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552, -4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556, -5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432, -4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632, -4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885, -5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336, -4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729, -4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854, -4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332, -5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004, -5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419, -4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293, -3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580, -4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339, -6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341, -5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493, -5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046, -4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904, -6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728, -5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350, -6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233, -4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944, -5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413, -5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700, -3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999, -5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694, -6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571, -4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359, -6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178, -4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421, -4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330, -6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855, -3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587, -6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803, -4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791, -3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304, -3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445, -3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506, -4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856, -2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057, -5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777, -4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369, -5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028, -5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914, -5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175, -4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681, -5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534, -4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912, -5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054, -1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336, -3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666, -4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375, -4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113, -6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614, -4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173, -5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197, -3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271, -5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423, -5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529, -5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921, -3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837, -5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922, -5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187, -3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382, -5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628, -5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683, -5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053, -6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928, -4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662, -6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663, -4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554, -3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191, -4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013, -5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932, -5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055, -5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829, -3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096, -3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660, -6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199, -6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748, -5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402, -6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957, -6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668, -6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763, -6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407, -6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051, -5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429, -6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791, -6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028, -3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305, -3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159, -4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683, -4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372, -3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514, -5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544, -5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472, -5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716, -5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905, -5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327, -4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030, -5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281, -6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224, -5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327, -4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062, -4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354, -6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065, -3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953, -4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681, -4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708, -5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442, -6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387, -6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237, -4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713, -6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547, -5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957, -5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337, -5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074, -5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685, -5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455, -4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722, -5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615, -5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093, -5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989, -5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094, -6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212, -4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967, -5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733, -4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260, -4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864, -6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353, -4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095, -6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287, -3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504, -5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539, -6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750, -6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864, -6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213, -5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573, -6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252, -6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970, -3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703, -5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978, -4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767) + 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512 +) -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/gb2312prober.py b/Shared/lib/python3.4/site-packages/chardet/gb2312prober.py index 0325a2d..8446d2d 100644 --- a/Shared/lib/python3.4/site-packages/chardet/gb2312prober.py +++ b/Shared/lib/python3.4/site-packages/chardet/gb2312prober.py @@ -13,12 +13,12 @@ # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. -# +# # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. -# +# # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA @@ -28,14 +28,19 @@ from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import GB2312DistributionAnalysis -from .mbcssm import GB2312SMModel +from .mbcssm import GB2312_SM_MODEL class GB2312Prober(MultiByteCharSetProber): def __init__(self): - MultiByteCharSetProber.__init__(self) - self._mCodingSM = CodingStateMachine(GB2312SMModel) - self._mDistributionAnalyzer = GB2312DistributionAnalysis() + super(GB2312Prober, self).__init__() + self.coding_sm = CodingStateMachine(GB2312_SM_MODEL) + self.distribution_analyzer = GB2312DistributionAnalysis() self.reset() - def get_charset_name(self): + @property + def charset_name(self): return "GB2312" + + @property + def language(self): + return "Chinese" diff --git a/Shared/lib/python3.4/site-packages/chardet/hebrewprober.py b/Shared/lib/python3.4/site-packages/chardet/hebrewprober.py index ba225c5..b0e1bf4 100644 --- a/Shared/lib/python3.4/site-packages/chardet/hebrewprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/hebrewprober.py @@ -26,8 +26,7 @@ ######################### END LICENSE BLOCK ######################### from .charsetprober import CharSetProber -from .constants import eNotMe, eDetecting -from .compat import wrap_ord +from .enums import ProbingState # This prober doesn't actually recognize a language or a charset. # It is a helper prober for the use of the Hebrew model probers @@ -126,56 +125,59 @@ from .compat import wrap_ord # model probers scores. The answer is returned in the form of the name of the # charset identified, either "windows-1255" or "ISO-8859-8". -# windows-1255 / ISO-8859-8 code points of interest -FINAL_KAF = 0xea -NORMAL_KAF = 0xeb -FINAL_MEM = 0xed -NORMAL_MEM = 0xee -FINAL_NUN = 0xef -NORMAL_NUN = 0xf0 -FINAL_PE = 0xf3 -NORMAL_PE = 0xf4 -FINAL_TSADI = 0xf5 -NORMAL_TSADI = 0xf6 - -# Minimum Visual vs Logical final letter score difference. -# If the difference is below this, don't rely solely on the final letter score -# distance. -MIN_FINAL_CHAR_DISTANCE = 5 - -# Minimum Visual vs Logical model score difference. -# If the difference is below this, don't rely at all on the model score -# distance. -MIN_MODEL_DISTANCE = 0.01 - -VISUAL_HEBREW_NAME = "ISO-8859-8" -LOGICAL_HEBREW_NAME = "windows-1255" - - class HebrewProber(CharSetProber): + # windows-1255 / ISO-8859-8 code points of interest + FINAL_KAF = 0xea + NORMAL_KAF = 0xeb + FINAL_MEM = 0xed + NORMAL_MEM = 0xee + FINAL_NUN = 0xef + NORMAL_NUN = 0xf0 + FINAL_PE = 0xf3 + NORMAL_PE = 0xf4 + FINAL_TSADI = 0xf5 + NORMAL_TSADI = 0xf6 + + # Minimum Visual vs Logical final letter score difference. + # If the difference is below this, don't rely solely on the final letter score + # distance. + MIN_FINAL_CHAR_DISTANCE = 5 + + # Minimum Visual vs Logical model score difference. + # If the difference is below this, don't rely at all on the model score + # distance. + MIN_MODEL_DISTANCE = 0.01 + + VISUAL_HEBREW_NAME = "ISO-8859-8" + LOGICAL_HEBREW_NAME = "windows-1255" + def __init__(self): - CharSetProber.__init__(self) - self._mLogicalProber = None - self._mVisualProber = None + super(HebrewProber, self).__init__() + self._final_char_logical_score = None + self._final_char_visual_score = None + self._prev = None + self._before_prev = None + self._logical_prober = None + self._visual_prober = None self.reset() def reset(self): - self._mFinalCharLogicalScore = 0 - self._mFinalCharVisualScore = 0 + self._final_char_logical_score = 0 + self._final_char_visual_score = 0 # The two last characters seen in the previous buffer, # mPrev and mBeforePrev are initialized to space in order to simulate # a word delimiter at the beginning of the data - self._mPrev = ' ' - self._mBeforePrev = ' ' + self._prev = ' ' + self._before_prev = ' ' # These probers are owned by the group prober. def set_model_probers(self, logicalProber, visualProber): - self._mLogicalProber = logicalProber - self._mVisualProber = visualProber + self._logical_prober = logicalProber + self._visual_prober = visualProber def is_final(self, c): - return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE, - FINAL_TSADI] + return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN, + self.FINAL_PE, self.FINAL_TSADI] def is_non_final(self, c): # The normal Tsadi is not a good Non-Final letter due to words like @@ -188,9 +190,10 @@ class HebrewProber(CharSetProber): # for example legally end with a Non-Final Pe or Kaf. However, the # benefit of these letters as Non-Final letters outweighs the damage # since these words are quite rare. - return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE] + return c in [self.NORMAL_KAF, self.NORMAL_MEM, + self.NORMAL_NUN, self.NORMAL_PE] - def feed(self, aBuf): + def feed(self, byte_str): # Final letter analysis for logical-visual decision. # Look for evidence that the received buffer is either logical Hebrew # or visual Hebrew. @@ -217,67 +220,73 @@ class HebrewProber(CharSetProber): # We automatically filter out all 7-bit characters (replace them with # spaces) so the word boundary detection works properly. [MAP] - if self.get_state() == eNotMe: + if self.state == ProbingState.NOT_ME: # Both model probers say it's not them. No reason to continue. - return eNotMe + return ProbingState.NOT_ME - aBuf = self.filter_high_bit_only(aBuf) + byte_str = self.filter_high_byte_only(byte_str) - for cur in aBuf: + for cur in byte_str: if cur == ' ': # We stand on a space - a word just ended - if self._mBeforePrev != ' ': - # next-to-last char was not a space so self._mPrev is not a + if self._before_prev != ' ': + # next-to-last char was not a space so self._prev is not a # 1 letter word - if self.is_final(self._mPrev): + if self.is_final(self._prev): # case (1) [-2:not space][-1:final letter][cur:space] - self._mFinalCharLogicalScore += 1 - elif self.is_non_final(self._mPrev): + self._final_char_logical_score += 1 + elif self.is_non_final(self._prev): # case (2) [-2:not space][-1:Non-Final letter][ # cur:space] - self._mFinalCharVisualScore += 1 + self._final_char_visual_score += 1 else: # Not standing on a space - if ((self._mBeforePrev == ' ') and - (self.is_final(self._mPrev)) and (cur != ' ')): + if ((self._before_prev == ' ') and + (self.is_final(self._prev)) and (cur != ' ')): # case (3) [-2:space][-1:final letter][cur:not space] - self._mFinalCharVisualScore += 1 - self._mBeforePrev = self._mPrev - self._mPrev = cur + self._final_char_visual_score += 1 + self._before_prev = self._prev + self._prev = cur # Forever detecting, till the end or until both model probers return - # eNotMe (handled above) - return eDetecting + # ProbingState.NOT_ME (handled above) + return ProbingState.DETECTING - def get_charset_name(self): + @property + def charset_name(self): # Make the decision: is it Logical or Visual? # If the final letter score distance is dominant enough, rely on it. - finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore - if finalsub >= MIN_FINAL_CHAR_DISTANCE: - return LOGICAL_HEBREW_NAME - if finalsub <= -MIN_FINAL_CHAR_DISTANCE: - return VISUAL_HEBREW_NAME + finalsub = self._final_char_logical_score - self._final_char_visual_score + if finalsub >= self.MIN_FINAL_CHAR_DISTANCE: + return self.LOGICAL_HEBREW_NAME + if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE: + return self.VISUAL_HEBREW_NAME # It's not dominant enough, try to rely on the model scores instead. - modelsub = (self._mLogicalProber.get_confidence() - - self._mVisualProber.get_confidence()) - if modelsub > MIN_MODEL_DISTANCE: - return LOGICAL_HEBREW_NAME - if modelsub < -MIN_MODEL_DISTANCE: - return VISUAL_HEBREW_NAME + modelsub = (self._logical_prober.get_confidence() + - self._visual_prober.get_confidence()) + if modelsub > self.MIN_MODEL_DISTANCE: + return self.LOGICAL_HEBREW_NAME + if modelsub < -self.MIN_MODEL_DISTANCE: + return self.VISUAL_HEBREW_NAME # Still no good, back to final letter distance, maybe it'll save the # day. if finalsub < 0.0: - return VISUAL_HEBREW_NAME + return self.VISUAL_HEBREW_NAME # (finalsub > 0 - Logical) or (don't know what to do) default to # Logical. - return LOGICAL_HEBREW_NAME + return self.LOGICAL_HEBREW_NAME - def get_state(self): + @property + def language(self): + return 'Hebrew' + + @property + def state(self): # Remain active as long as any of the model probers are active. - if (self._mLogicalProber.get_state() == eNotMe) and \ - (self._mVisualProber.get_state() == eNotMe): - return eNotMe - return eDetecting + if (self._logical_prober.state == ProbingState.NOT_ME) and \ + (self._visual_prober.state == ProbingState.NOT_ME): + return ProbingState.NOT_ME + return ProbingState.DETECTING diff --git a/Shared/lib/python3.4/site-packages/chardet/jisfreq.py b/Shared/lib/python3.4/site-packages/chardet/jisfreq.py index 064345b..83fc082 100644 --- a/Shared/lib/python3.4/site-packages/chardet/jisfreq.py +++ b/Shared/lib/python3.4/site-packages/chardet/jisfreq.py @@ -46,7 +46,7 @@ JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0 # Char to FreqOrder table , JIS_TABLE_SIZE = 4368 -JISCharToFreqOrder = ( +JIS_CHAR_TO_FREQ_ORDER = ( 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16 3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32 1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48 @@ -320,250 +320,6 @@ JISCharToFreqOrder = ( 2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336 1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352 2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512 -#Everything below is of no interest for detection purpose -2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384 -6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400 -6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416 -6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432 -6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448 -4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464 -4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480 -3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496 -3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512 -4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528 -3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544 -6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560 -4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576 -6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592 -6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608 -6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624 -6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640 -6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656 -6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672 -3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688 -3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704 -6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720 -2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736 -4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752 -4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768 -4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784 -6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800 -3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816 -4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832 -4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848 -6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864 -4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880 -6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896 -3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912 -2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928 -4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944 -2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960 -6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976 -4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992 -6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008 -6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024 -6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040 -4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056 -6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072 -2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088 -6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104 -4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120 -6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136 -4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152 -4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168 -6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184 -6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200 -6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216 -3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232 -1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248 -3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264 -3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280 -4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296 -6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312 -3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328 -6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344 -3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360 -3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376 -2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392 -6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408 -6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424 -3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440 -6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456 -3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472 -6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488 -6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504 -6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520 -4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536 -6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552 -4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568 -3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584 -3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600 -6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616 -6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632 -4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648 -6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664 -6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680 -6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696 -6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712 -6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728 -6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744 -4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760 -4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776 -3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792 -6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808 -4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824 -2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840 -6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856 -6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872 -4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888 -2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904 -4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920 -2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936 -4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952 -4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968 -4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984 -6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000 -3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016 -6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032 -3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048 -6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064 -2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080 -3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096 -7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112 -2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128 -3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144 -3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160 -3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176 -3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192 -7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208 -7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224 -7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240 -7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256 -7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272 -4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288 -3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304 -3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320 -4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336 -3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352 -3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368 -7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384 -4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400 -7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416 -7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432 -7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448 -7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464 -7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480 -4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496 -4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512 -7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528 -3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544 -4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560 -7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576 -7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592 -4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608 -3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624 -3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640 -7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656 -4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672 -4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688 -4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704 -4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720 -4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736 -4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752 -7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768 -7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784 -7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800 -7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816 -7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832 -2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848 -3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864 -7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880 -7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896 -3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912 -4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928 -3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944 -3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960 -2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976 -7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992 -7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008 -4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024 -3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040 -3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056 -7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072 -7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088 -7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104 -4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120 -7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136 -2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152 -3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168 -4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184 -7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200 -4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216 -4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232 -7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248 -7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264 -5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280 -7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296 -7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312 -7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328 -7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344 -7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360 -5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376 -5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392 -7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408 -3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424 -7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440 -7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456 -3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472 -7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488 -7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504 -1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520 -3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536 -4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552 -2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568 -3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584 -2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600 -5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616 -4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632 -4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648 -5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664 -7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680 -7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696 -7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712 -7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728 -3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744 -7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760 -3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776 -7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792 -4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808 -7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824 -7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840 -7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856 -7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872 -7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888 -7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904 -7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920 -7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936 -7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952 -7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968 -7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984 -7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000 -8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016 -8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032 -8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048 -8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064 -8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080 -8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096 -8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112 -8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128 -8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144 -8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160 -8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176 -8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192 -8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208 -8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224 -8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240 -8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256 -8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272 +) + -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/jpcntx.py b/Shared/lib/python3.4/site-packages/chardet/jpcntx.py index 59aeb6a..20044e4 100644 --- a/Shared/lib/python3.4/site-packages/chardet/jpcntx.py +++ b/Shared/lib/python3.4/site-packages/chardet/jpcntx.py @@ -25,13 +25,6 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .compat import wrap_ord - -NUM_OF_CATEGORY = 6 -DONT_KNOW = -1 -ENOUGH_REL_THRESHOLD = 100 -MAX_REL_THRESHOLD = 1000 -MINIMUM_DATA_THRESHOLD = 4 # This is hiragana 2-char sequence table, the number in each cell represents its frequency category jp2CharContext = ( @@ -120,24 +113,35 @@ jp2CharContext = ( (0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), ) -class JapaneseContextAnalysis: +class JapaneseContextAnalysis(object): + NUM_OF_CATEGORY = 6 + DONT_KNOW = -1 + ENOUGH_REL_THRESHOLD = 100 + MAX_REL_THRESHOLD = 1000 + MINIMUM_DATA_THRESHOLD = 4 + def __init__(self): + self._total_rel = None + self._rel_sample = None + self._need_to_skip_char_num = None + self._last_char_order = None + self._done = None self.reset() def reset(self): - self._mTotalRel = 0 # total sequence received - # category counters, each interger counts sequence in its category - self._mRelSample = [0] * NUM_OF_CATEGORY + self._total_rel = 0 # total sequence received + # category counters, each integer counts sequence in its category + self._rel_sample = [0] * self.NUM_OF_CATEGORY # if last byte in current buffer is not the last byte of a character, # we need to know how many bytes to skip in next buffer - self._mNeedToSkipCharNum = 0 - self._mLastCharOrder = -1 # The order of previous char + self._need_to_skip_char_num = 0 + self._last_char_order = -1 # The order of previous char # If this flag is set to True, detection is done and conclusion has # been made - self._mDone = False + self._done = False - def feed(self, aBuf, aLen): - if self._mDone: + def feed(self, byte_str, num_bytes): + if self._done: return # The buffer we got is byte oriented, and a character may span in more than one @@ -147,81 +151,83 @@ class JapaneseContextAnalysis: # well and analyse the character once it is complete, but since a # character will not make much difference, by simply skipping # this character will simply our logic and improve performance. - i = self._mNeedToSkipCharNum - while i < aLen: - order, charLen = self.get_order(aBuf[i:i + 2]) - i += charLen - if i > aLen: - self._mNeedToSkipCharNum = i - aLen - self._mLastCharOrder = -1 + i = self._need_to_skip_char_num + while i < num_bytes: + order, char_len = self.get_order(byte_str[i:i + 2]) + i += char_len + if i > num_bytes: + self._need_to_skip_char_num = i - num_bytes + self._last_char_order = -1 else: - if (order != -1) and (self._mLastCharOrder != -1): - self._mTotalRel += 1 - if self._mTotalRel > MAX_REL_THRESHOLD: - self._mDone = True + if (order != -1) and (self._last_char_order != -1): + self._total_rel += 1 + if self._total_rel > self.MAX_REL_THRESHOLD: + self._done = True break - self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1 - self._mLastCharOrder = order + self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1 + self._last_char_order = order def got_enough_data(self): - return self._mTotalRel > ENOUGH_REL_THRESHOLD + return self._total_rel > self.ENOUGH_REL_THRESHOLD def get_confidence(self): # This is just one way to calculate confidence. It works well for me. - if self._mTotalRel > MINIMUM_DATA_THRESHOLD: - return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel + if self._total_rel > self.MINIMUM_DATA_THRESHOLD: + return (self._total_rel - self._rel_sample[0]) / self._total_rel else: - return DONT_KNOW + return self.DONT_KNOW - def get_order(self, aBuf): + def get_order(self, byte_str): return -1, 1 class SJISContextAnalysis(JapaneseContextAnalysis): def __init__(self): - self.charset_name = "SHIFT_JIS" + super(SJISContextAnalysis, self).__init__() + self._charset_name = "SHIFT_JIS" - def get_charset_name(self): - return self.charset_name + @property + def charset_name(self): + return self._charset_name - def get_order(self, aBuf): - if not aBuf: + def get_order(self, byte_str): + if not byte_str: return -1, 1 # find out current char's byte length - first_char = wrap_ord(aBuf[0]) - if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)): - charLen = 2 + first_char = byte_str[0] + if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC): + char_len = 2 if (first_char == 0x87) or (0xFA <= first_char <= 0xFC): - self.charset_name = "CP932" + self._charset_name = "CP932" else: - charLen = 1 + char_len = 1 # return its order if it is hiragana - if len(aBuf) > 1: - second_char = wrap_ord(aBuf[1]) + if len(byte_str) > 1: + second_char = byte_str[1] if (first_char == 202) and (0x9F <= second_char <= 0xF1): - return second_char - 0x9F, charLen + return second_char - 0x9F, char_len - return -1, charLen + return -1, char_len class EUCJPContextAnalysis(JapaneseContextAnalysis): - def get_order(self, aBuf): - if not aBuf: + def get_order(self, byte_str): + if not byte_str: return -1, 1 # find out current char's byte length - first_char = wrap_ord(aBuf[0]) + first_char = byte_str[0] if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): - charLen = 2 + char_len = 2 elif first_char == 0x8F: - charLen = 3 + char_len = 3 else: - charLen = 1 + char_len = 1 # return its order if it is hiragana - if len(aBuf) > 1: - second_char = wrap_ord(aBuf[1]) + if len(byte_str) > 1: + second_char = byte_str[1] if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): - return second_char - 0xA1, charLen + return second_char - 0xA1, char_len + + return -1, char_len - return -1, charLen -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/langbulgarianmodel.py b/Shared/lib/python3.4/site-packages/chardet/langbulgarianmodel.py index e5788fc..2aa4fb2 100644 --- a/Shared/lib/python3.4/site-packages/chardet/langbulgarianmodel.py +++ b/Shared/lib/python3.4/site-packages/chardet/langbulgarianmodel.py @@ -210,20 +210,19 @@ BulgarianLangModel = ( ) Latin5BulgarianModel = { - 'charToOrderMap': Latin5_BulgarianCharToOrderMap, - 'precedenceMatrix': BulgarianLangModel, - 'mTypicalPositiveRatio': 0.969392, - 'keepEnglishLetter': False, - 'charsetName': "ISO-8859-5" + 'char_to_order_map': Latin5_BulgarianCharToOrderMap, + 'precedence_matrix': BulgarianLangModel, + 'typical_positive_ratio': 0.969392, + 'keep_english_letter': False, + 'charset_name': "ISO-8859-5", + 'language': 'Bulgairan', } Win1251BulgarianModel = { - 'charToOrderMap': win1251BulgarianCharToOrderMap, - 'precedenceMatrix': BulgarianLangModel, - 'mTypicalPositiveRatio': 0.969392, - 'keepEnglishLetter': False, - 'charsetName': "windows-1251" + 'char_to_order_map': win1251BulgarianCharToOrderMap, + 'precedence_matrix': BulgarianLangModel, + 'typical_positive_ratio': 0.969392, + 'keep_english_letter': False, + 'charset_name': "windows-1251", + 'language': 'Bulgarian', } - - -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/langcyrillicmodel.py b/Shared/lib/python3.4/site-packages/chardet/langcyrillicmodel.py index a86f54b..e5f9a1f 100644 --- a/Shared/lib/python3.4/site-packages/chardet/langcyrillicmodel.py +++ b/Shared/lib/python3.4/site-packages/chardet/langcyrillicmodel.py @@ -27,7 +27,7 @@ # KOI8-R language model # Character Mapping Table: -KOI8R_CharToOrderMap = ( +KOI8R_char_to_order_map = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -46,7 +46,7 @@ KOI8R_CharToOrderMap = ( 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0 ) -win1251_CharToOrderMap = ( +win1251_char_to_order_map = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -65,7 +65,7 @@ win1251_CharToOrderMap = ( 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, ) -latin5_CharToOrderMap = ( +latin5_char_to_order_map = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -84,7 +84,7 @@ latin5_CharToOrderMap = ( 239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, ) -macCyrillic_CharToOrderMap = ( +macCyrillic_char_to_order_map = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -103,7 +103,7 @@ macCyrillic_CharToOrderMap = ( 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255, ) -IBM855_CharToOrderMap = ( +IBM855_char_to_order_map = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -122,7 +122,7 @@ IBM855_CharToOrderMap = ( 250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255, ) -IBM866_CharToOrderMap = ( +IBM866_char_to_order_map = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -279,51 +279,55 @@ RussianLangModel = ( ) Koi8rModel = { - 'charToOrderMap': KOI8R_CharToOrderMap, - 'precedenceMatrix': RussianLangModel, - 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': False, - 'charsetName': "KOI8-R" + 'char_to_order_map': KOI8R_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "KOI8-R", + 'language': 'Russian', } Win1251CyrillicModel = { - 'charToOrderMap': win1251_CharToOrderMap, - 'precedenceMatrix': RussianLangModel, - 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': False, - 'charsetName': "windows-1251" + 'char_to_order_map': win1251_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "windows-1251", + 'language': 'Russian', } Latin5CyrillicModel = { - 'charToOrderMap': latin5_CharToOrderMap, - 'precedenceMatrix': RussianLangModel, - 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': False, - 'charsetName': "ISO-8859-5" + 'char_to_order_map': latin5_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "ISO-8859-5", + 'language': 'Russian', } MacCyrillicModel = { - 'charToOrderMap': macCyrillic_CharToOrderMap, - 'precedenceMatrix': RussianLangModel, - 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': False, - 'charsetName': "MacCyrillic" -}; + 'char_to_order_map': macCyrillic_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "MacCyrillic", + 'language': 'Russian', +} Ibm866Model = { - 'charToOrderMap': IBM866_CharToOrderMap, - 'precedenceMatrix': RussianLangModel, - 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': False, - 'charsetName': "IBM866" + 'char_to_order_map': IBM866_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "IBM866", + 'language': 'Russian', } Ibm855Model = { - 'charToOrderMap': IBM855_CharToOrderMap, - 'precedenceMatrix': RussianLangModel, - 'mTypicalPositiveRatio': 0.976601, - 'keepEnglishLetter': False, - 'charsetName': "IBM855" + 'char_to_order_map': IBM855_char_to_order_map, + 'precedence_matrix': RussianLangModel, + 'typical_positive_ratio': 0.976601, + 'keep_english_letter': False, + 'charset_name': "IBM855", + 'language': 'Russian', } - -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/langgreekmodel.py b/Shared/lib/python3.4/site-packages/chardet/langgreekmodel.py index ddb5837..5332221 100644 --- a/Shared/lib/python3.4/site-packages/chardet/langgreekmodel.py +++ b/Shared/lib/python3.4/site-packages/chardet/langgreekmodel.py @@ -31,7 +31,7 @@ # 252: 0 - 9 # Character Mapping Table: -Latin7_CharToOrderMap = ( +Latin7_char_to_order_map = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -50,7 +50,7 @@ Latin7_CharToOrderMap = ( 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) -win1253_CharToOrderMap = ( +win1253_char_to_order_map = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -207,19 +207,19 @@ GreekLangModel = ( ) Latin7GreekModel = { - 'charToOrderMap': Latin7_CharToOrderMap, - 'precedenceMatrix': GreekLangModel, - 'mTypicalPositiveRatio': 0.982851, - 'keepEnglishLetter': False, - 'charsetName': "ISO-8859-7" + 'char_to_order_map': Latin7_char_to_order_map, + 'precedence_matrix': GreekLangModel, + 'typical_positive_ratio': 0.982851, + 'keep_english_letter': False, + 'charset_name': "ISO-8859-7", + 'language': 'Greek', } Win1253GreekModel = { - 'charToOrderMap': win1253_CharToOrderMap, - 'precedenceMatrix': GreekLangModel, - 'mTypicalPositiveRatio': 0.982851, - 'keepEnglishLetter': False, - 'charsetName': "windows-1253" + 'char_to_order_map': win1253_char_to_order_map, + 'precedence_matrix': GreekLangModel, + 'typical_positive_ratio': 0.982851, + 'keep_english_letter': False, + 'charset_name': "windows-1253", + 'language': 'Greek', } - -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/langhebrewmodel.py b/Shared/lib/python3.4/site-packages/chardet/langhebrewmodel.py index 75f2bc7..58f4c87 100644 --- a/Shared/lib/python3.4/site-packages/chardet/langhebrewmodel.py +++ b/Shared/lib/python3.4/site-packages/chardet/langhebrewmodel.py @@ -34,7 +34,7 @@ # Windows-1255 language model # Character Mapping Table: -win1255_CharToOrderMap = ( +WIN1255_CHAR_TO_ORDER_MAP = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 @@ -59,7 +59,7 @@ win1255_CharToOrderMap = ( # first 1024 sequences: 1.5981% # rest sequences: 0.087% # negative sequences: 0.0015% -HebrewLangModel = ( +HEBREW_LANG_MODEL = ( 0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0, 3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2, @@ -191,11 +191,10 @@ HebrewLangModel = ( ) Win1255HebrewModel = { - 'charToOrderMap': win1255_CharToOrderMap, - 'precedenceMatrix': HebrewLangModel, - 'mTypicalPositiveRatio': 0.984004, - 'keepEnglishLetter': False, - 'charsetName': "windows-1255" + 'char_to_order_map': WIN1255_CHAR_TO_ORDER_MAP, + 'precedence_matrix': HEBREW_LANG_MODEL, + 'typical_positive_ratio': 0.984004, + 'keep_english_letter': False, + 'charset_name': "windows-1255", + 'language': 'Hebrew', } - -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/langhungarianmodel.py b/Shared/lib/python3.4/site-packages/chardet/langhungarianmodel.py index 49d2f0f..bb7c095 100644 --- a/Shared/lib/python3.4/site-packages/chardet/langhungarianmodel.py +++ b/Shared/lib/python3.4/site-packages/chardet/langhungarianmodel.py @@ -207,19 +207,19 @@ HungarianLangModel = ( ) Latin2HungarianModel = { - 'charToOrderMap': Latin2_HungarianCharToOrderMap, - 'precedenceMatrix': HungarianLangModel, - 'mTypicalPositiveRatio': 0.947368, - 'keepEnglishLetter': True, - 'charsetName': "ISO-8859-2" + 'char_to_order_map': Latin2_HungarianCharToOrderMap, + 'precedence_matrix': HungarianLangModel, + 'typical_positive_ratio': 0.947368, + 'keep_english_letter': True, + 'charset_name': "ISO-8859-2", + 'language': 'Hungarian', } Win1250HungarianModel = { - 'charToOrderMap': win1250HungarianCharToOrderMap, - 'precedenceMatrix': HungarianLangModel, - 'mTypicalPositiveRatio': 0.947368, - 'keepEnglishLetter': True, - 'charsetName': "windows-1250" + 'char_to_order_map': win1250HungarianCharToOrderMap, + 'precedence_matrix': HungarianLangModel, + 'typical_positive_ratio': 0.947368, + 'keep_english_letter': True, + 'charset_name': "windows-1250", + 'language': 'Hungarian', } - -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/langthaimodel.py b/Shared/lib/python3.4/site-packages/chardet/langthaimodel.py index 0508b1b..15f94c2 100644 --- a/Shared/lib/python3.4/site-packages/chardet/langthaimodel.py +++ b/Shared/lib/python3.4/site-packages/chardet/langthaimodel.py @@ -190,11 +190,10 @@ ThaiLangModel = ( ) TIS620ThaiModel = { - 'charToOrderMap': TIS620CharToOrderMap, - 'precedenceMatrix': ThaiLangModel, - 'mTypicalPositiveRatio': 0.926386, - 'keepEnglishLetter': False, - 'charsetName': "TIS-620" + 'char_to_order_map': TIS620CharToOrderMap, + 'precedence_matrix': ThaiLangModel, + 'typical_positive_ratio': 0.926386, + 'keep_english_letter': False, + 'charset_name': "TIS-620", + 'language': 'Thai', } - -# flake8: noqa diff --git a/Shared/lib/python3.4/site-packages/chardet/langturkishmodel.py b/Shared/lib/python3.4/site-packages/chardet/langturkishmodel.py new file mode 100644 index 0000000..a427a45 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/chardet/langturkishmodel.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Communicator client code. +# +# The Initial Developer of the Original Code is +# Netscape Communications Corporation. +# Portions created by the Initial Developer are Copyright (C) 1998 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Özgür Baskın - Turkish Language Model +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# 255: Control characters that usually does not exist in any text +# 254: Carriage/Return +# 253: symbol (punctuation) that does not belong to word +# 252: 0 - 9 + +# Character Mapping Table: +Latin5_TurkishCharToOrderMap = ( +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, +255, 23, 37, 47, 39, 29, 52, 36, 45, 53, 60, 16, 49, 20, 46, 42, + 48, 69, 44, 35, 31, 51, 38, 62, 65, 43, 56,255,255,255,255,255, +255, 1, 21, 28, 12, 2, 18, 27, 25, 3, 24, 10, 5, 13, 4, 15, + 26, 64, 7, 8, 9, 14, 32, 57, 58, 11, 22,255,255,255,255,255, +180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165, +164,163,162,161,160,159,101,158,157,156,155,154,153,152,151,106, +150,149,148,147,146,145,144,100,143,142,141,140,139,138,137,136, + 94, 80, 93,135,105,134,133, 63,132,131,130,129,128,127,126,125, +124,104, 73, 99, 79, 85,123, 54,122, 98, 92,121,120, 91,103,119, + 68,118,117, 97,116,115, 50, 90,114,113,112,111, 55, 41, 40, 86, + 89, 70, 59, 78, 71, 82, 88, 33, 77, 66, 84, 83,110, 75, 61, 96, + 30, 67,109, 74, 87,102, 34, 95, 81,108, 76, 72, 17, 6, 19,107, +) + +TurkishLangModel = ( +3,2,3,3,3,1,3,3,3,3,3,3,3,3,2,1,1,3,3,1,3,3,0,3,3,3,3,3,0,3,1,3, +3,2,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1, +3,2,2,3,3,0,3,3,3,3,3,3,3,2,3,1,0,3,3,1,3,3,0,3,3,3,3,3,0,3,0,3, +3,1,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,0,1,0,1, +3,3,2,3,3,0,3,3,3,3,3,3,3,2,3,1,1,3,3,0,3,3,1,2,3,3,3,3,0,3,0,3, +3,1,1,0,0,0,1,0,0,0,0,1,1,0,1,2,1,0,0,0,1,0,0,0,0,2,0,0,0,0,0,1, +3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,1,3,3,2,0,3,2,1,2,2,1,3,3,0,0,0,2, +2,2,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,1,0,0,1, +3,3,3,2,3,3,1,2,3,3,3,3,3,3,3,1,3,2,1,0,3,2,0,1,2,3,3,2,1,0,0,2, +2,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0,0,0, +1,0,1,3,3,1,3,3,3,3,3,3,3,1,2,0,0,2,3,0,2,3,0,0,2,2,2,3,0,3,0,1, +2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,0,3,2,0,2,3,2,3,3,1,0,0,2, +3,2,0,0,1,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,1,1,1,0,2,0,0,1, +3,3,3,2,3,3,2,3,3,3,3,2,3,3,3,0,3,3,0,0,2,1,0,0,2,3,2,2,0,0,0,2, +2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,2,0,0,1, +3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,0,1,3,2,1,1,3,2,3,2,1,0,0,2, +2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, +3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,2,0,2,3,0,0,2,2,2,2,0,0,0,2, +3,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0, +3,3,3,3,3,3,3,2,2,2,2,3,2,3,3,0,3,3,1,1,2,2,0,0,2,2,3,2,0,0,1,3, +0,3,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1, +3,3,3,2,3,3,3,2,1,2,2,3,2,3,3,0,3,2,0,0,1,1,0,1,1,2,1,2,0,0,0,1, +0,3,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0, +3,3,3,2,3,3,2,3,2,2,2,3,3,3,3,1,3,1,1,0,3,2,1,1,3,3,2,3,1,0,0,1, +1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,1, +3,2,2,3,3,0,3,3,3,3,3,3,3,2,2,1,0,3,3,1,3,3,0,1,3,3,2,3,0,3,0,3, +2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, +2,2,2,3,3,0,3,3,3,3,3,3,3,3,3,0,0,3,2,0,3,3,0,3,2,3,3,3,0,3,1,3, +2,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1, +3,3,3,1,2,3,3,1,0,0,1,0,0,3,3,2,3,0,0,2,0,0,2,0,2,0,0,0,2,0,2,0, +0,3,1,0,1,0,0,0,2,2,1,0,1,1,2,1,2,2,2,0,2,1,1,0,0,0,2,0,0,0,0,0, +1,2,1,3,3,0,3,3,3,3,3,2,3,0,0,0,0,2,3,0,2,3,1,0,2,3,1,3,0,3,0,2, +3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,1,3,3,2,2,3,2,2,0,1,2,3,0,1,2,1,0,1,0,0,0,1,0,2,2,0,0,0,1, +1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0, +3,3,3,1,3,3,1,1,3,3,1,1,3,3,1,0,2,1,2,0,2,1,0,0,1,1,2,1,0,0,0,2, +2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,1,0,2,1,3,0,0,2,0,0,3,3,0,3,0,0,1,0,1,2,0,0,1,1,2,2,0,1,0, +0,1,2,1,1,0,1,0,1,1,1,1,1,0,1,1,1,2,2,1,2,0,1,0,0,0,0,0,0,1,0,0, +3,3,3,2,3,2,3,3,0,2,2,2,3,3,3,0,3,0,0,0,2,2,0,1,2,1,1,1,0,0,0,1, +0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0, +3,3,3,3,3,3,2,1,2,2,3,3,3,3,2,0,2,0,0,0,2,2,0,0,2,1,3,3,0,0,1,1, +1,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0, +1,1,2,3,3,0,3,3,3,3,3,3,2,2,0,2,0,2,3,2,3,2,2,2,2,2,2,2,1,3,2,3, +2,0,2,1,2,2,2,2,1,1,2,2,1,2,2,1,2,0,0,2,1,1,0,2,1,0,0,1,0,0,0,1, +2,3,3,1,1,1,0,1,1,1,2,3,2,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0, +0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,2,2,2,3,2,3,2,2,1,3,3,3,0,2,1,2,0,2,1,0,0,1,1,1,1,1,0,0,1, +2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0, +3,3,3,2,3,3,3,3,3,2,3,1,2,3,3,1,2,0,0,0,0,0,0,0,3,2,1,1,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, +3,3,3,2,2,3,3,2,1,1,1,1,1,3,3,0,3,1,0,0,1,1,0,0,3,1,2,1,0,0,0,0, +0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0, +3,3,3,2,2,3,2,2,2,3,2,1,1,3,3,0,3,0,0,0,0,1,0,0,3,1,1,2,0,0,0,1, +1,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, +1,1,1,3,3,0,3,3,3,3,3,2,2,2,1,2,0,2,1,2,2,1,1,0,1,2,2,2,2,2,2,2, +0,0,2,1,2,1,2,1,0,1,1,3,1,2,1,1,2,0,0,2,0,1,0,1,0,1,0,0,0,1,0,1, +3,3,3,1,3,3,3,0,1,1,0,2,2,3,1,0,3,0,0,0,1,0,0,0,1,0,0,1,0,1,0,0, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,2,0,0,2,2,1,0,0,1,0,0,3,3,1,3,0,0,1,1,0,2,0,3,0,0,0,2,0,1,1, +0,1,2,0,1,2,2,0,2,2,2,2,1,0,2,1,1,0,2,0,2,1,2,0,0,0,0,0,0,0,0,0, +3,3,3,1,3,2,3,2,0,2,2,2,1,3,2,0,2,1,2,0,1,2,0,0,1,0,2,2,0,0,0,2, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0, +3,3,3,0,3,3,1,1,2,3,1,0,3,2,3,0,3,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0, +1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,3,3,0,3,3,2,3,3,2,2,0,0,0,0,1,2,0,1,3,0,0,0,3,1,1,0,3,0,2, +2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,1,2,2,1,0,3,1,1,1,1,3,3,2,3,0,0,1,0,1,2,0,2,2,0,2,2,0,2,1, +0,2,2,1,1,1,1,0,2,1,1,0,1,1,1,1,2,1,2,1,2,0,1,0,1,0,0,0,0,0,0,0, +3,3,3,0,1,1,3,0,0,1,1,0,0,2,2,0,3,0,0,1,1,0,1,0,0,0,0,0,2,0,0,0, +0,3,1,0,1,0,1,0,2,0,0,1,0,1,0,1,1,1,2,1,1,0,2,0,0,0,0,0,0,0,0,0, +3,3,3,0,2,0,2,0,1,1,1,0,0,3,3,0,2,0,0,1,0,0,2,1,1,0,1,0,1,0,1,0, +0,2,0,1,2,0,2,0,2,1,1,0,1,0,2,1,1,0,2,1,1,0,1,0,0,0,1,1,0,0,0,0, +3,2,3,0,1,0,0,0,0,0,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,0,2,0,0,0, +0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,2,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,0,0,2,3,0,0,1,0,1,0,2,3,2,3,0,0,1,3,0,2,1,0,0,0,0,2,0,1,0, +0,2,1,0,0,1,1,0,2,1,0,0,1,0,0,1,1,0,1,1,2,0,1,0,0,0,0,1,0,0,0,0, +3,2,2,0,0,1,1,0,0,0,0,0,0,3,1,1,1,0,0,0,0,0,1,0,0,0,0,0,2,0,1,0, +0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,3,3,0,2,3,2,2,1,2,2,1,1,2,0,1,3,2,2,2,0,0,2,2,0,0,0,1,2,1, +3,0,2,1,1,0,1,1,1,0,1,2,2,2,1,1,2,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0, +0,1,1,2,3,0,3,3,3,2,2,2,2,1,0,1,0,1,0,1,2,2,0,0,2,2,1,3,1,1,2,1, +0,0,1,1,2,0,1,1,0,0,1,2,0,2,1,1,2,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0, +3,3,2,0,0,3,1,0,0,0,0,0,0,3,2,1,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0, +0,2,1,1,0,0,1,0,1,2,0,0,1,1,0,0,2,1,1,1,1,0,2,0,0,0,0,0,0,0,0,0, +3,3,2,0,0,1,0,0,0,0,1,0,0,3,3,2,2,0,0,1,0,0,2,0,1,0,0,0,2,0,1,0, +0,0,1,1,0,0,2,0,2,1,0,0,1,1,2,1,2,0,2,1,2,1,1,1,0,0,1,1,0,0,0,0, +3,3,2,0,0,2,2,0,0,0,1,1,0,2,2,1,3,1,0,1,0,1,2,0,0,0,0,0,1,0,1,0, +0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,2,0,0,0,1,0,0,1,0,0,2,3,1,2,0,0,1,0,0,2,0,0,0,1,0,2,0,2,0, +0,1,1,2,2,1,2,0,2,1,1,0,0,1,1,0,1,1,1,1,2,1,1,0,0,0,0,0,0,0,0,0, +3,3,3,0,2,1,2,1,0,0,1,1,0,3,3,1,2,0,0,1,0,0,2,0,2,0,1,1,2,0,0,0, +0,0,1,1,1,1,2,0,1,1,0,1,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0, +3,3,3,0,2,2,3,2,0,0,1,0,0,2,3,1,0,0,0,0,0,0,2,0,2,0,0,0,2,0,0,0, +0,1,1,0,0,0,1,0,0,1,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0, +3,2,3,0,0,0,0,0,0,0,1,0,0,2,2,2,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0, +0,0,2,1,1,0,1,0,2,1,1,0,0,1,1,2,1,0,2,0,2,0,1,0,0,0,2,0,0,0,0,0, +0,0,0,2,2,0,2,1,1,1,1,2,2,0,0,1,0,1,0,0,1,3,0,0,0,0,1,0,0,2,1,0, +0,0,1,0,1,0,0,0,0,0,2,1,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, +2,0,0,2,3,0,2,3,1,2,2,0,2,0,0,2,0,2,1,1,1,2,1,0,0,1,2,1,1,2,1,0, +1,0,2,0,1,0,1,1,0,0,2,2,1,2,1,1,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,0,2,1,2,0,0,0,1,0,0,3,2,0,1,0,0,1,0,0,2,0,0,0,1,2,1,0,1,0, +0,0,0,0,1,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,2,2,0,2,2,1,1,0,1,1,1,1,1,0,0,1,2,1,1,1,0,1,0,0,0,1,1,1,1, +0,0,2,1,0,1,1,1,0,1,1,2,1,2,1,1,2,0,1,1,2,1,0,2,0,0,0,0,0,0,0,0, +3,2,2,0,0,2,0,0,0,0,0,0,0,2,2,0,2,0,0,1,0,0,2,0,0,0,0,0,2,0,0,0, +0,2,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,3,2,0,2,2,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0, +2,0,1,0,1,0,1,1,0,0,1,2,0,1,0,1,1,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0, +2,2,2,0,1,1,0,0,0,1,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,1,2,0,1,0, +0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,2,1,0,1,1,1,0,0,0,0,1,2,0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0, +1,1,2,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,1, +0,0,1,2,2,0,2,1,2,1,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,0,0,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0, +2,2,2,0,0,0,1,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, +0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,1,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +) + +Latin5TurkishModel = { + 'char_to_order_map': Latin5_TurkishCharToOrderMap, + 'precedence_matrix': TurkishLangModel, + 'typical_positive_ratio': 0.970290, + 'keep_english_letter': True, + 'charset_name': "ISO-8859-9", + 'language': 'Turkish', +} diff --git a/Shared/lib/python3.4/site-packages/chardet/latin1prober.py b/Shared/lib/python3.4/site-packages/chardet/latin1prober.py index eef3573..7d1e8c2 100644 --- a/Shared/lib/python3.4/site-packages/chardet/latin1prober.py +++ b/Shared/lib/python3.4/site-packages/chardet/latin1prober.py @@ -27,8 +27,7 @@ ######################### END LICENSE BLOCK ######################### from .charsetprober import CharSetProber -from .constants import eNotMe -from .compat import wrap_ord +from .enums import ProbingState FREQ_CAT_NUM = 4 @@ -82,7 +81,7 @@ Latin1_CharToClass = ( # 2 : normal # 3 : very likely Latin1ClassModel = ( - # UDF OTH ASC ASS ACV ACO ASV ASO +# UDF OTH ASC ASS ACV ACO ASV ASO 0, 0, 0, 0, 0, 0, 0, 0, # UDF 0, 3, 3, 3, 3, 3, 3, 3, # OTH 0, 3, 3, 3, 3, 3, 3, 3, # ASC @@ -96,40 +95,47 @@ Latin1ClassModel = ( class Latin1Prober(CharSetProber): def __init__(self): - CharSetProber.__init__(self) + super(Latin1Prober, self).__init__() + self._last_char_class = None + self._freq_counter = None self.reset() def reset(self): - self._mLastCharClass = OTH - self._mFreqCounter = [0] * FREQ_CAT_NUM + self._last_char_class = OTH + self._freq_counter = [0] * FREQ_CAT_NUM CharSetProber.reset(self) - def get_charset_name(self): - return "windows-1252" + @property + def charset_name(self): + return "ISO-8859-1" - def feed(self, aBuf): - aBuf = self.filter_with_english_letters(aBuf) - for c in aBuf: - charClass = Latin1_CharToClass[wrap_ord(c)] - freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM) - + charClass] + @property + def language(self): + return "" + + def feed(self, byte_str): + byte_str = self.filter_with_english_letters(byte_str) + for c in byte_str: + char_class = Latin1_CharToClass[c] + freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + + char_class] if freq == 0: - self._mState = eNotMe + self._state = ProbingState.NOT_ME break - self._mFreqCounter[freq] += 1 - self._mLastCharClass = charClass + self._freq_counter[freq] += 1 + self._last_char_class = char_class - return self.get_state() + return self.state def get_confidence(self): - if self.get_state() == eNotMe: + if self.state == ProbingState.NOT_ME: return 0.01 - total = sum(self._mFreqCounter) + total = sum(self._freq_counter) if total < 0.01: confidence = 0.0 else: - confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0) + confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0) / total) if confidence < 0.0: confidence = 0.0 diff --git a/Shared/lib/python3.4/site-packages/chardet/mbcharsetprober.py b/Shared/lib/python3.4/site-packages/chardet/mbcharsetprober.py index bb42f2f..6256ecf 100644 --- a/Shared/lib/python3.4/site-packages/chardet/mbcharsetprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/mbcharsetprober.py @@ -27,60 +27,65 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import sys -from . import constants from .charsetprober import CharSetProber +from .enums import ProbingState, MachineState class MultiByteCharSetProber(CharSetProber): - def __init__(self): - CharSetProber.__init__(self) - self._mDistributionAnalyzer = None - self._mCodingSM = None - self._mLastChar = [0, 0] + """ + MultiByteCharSetProber + """ + + def __init__(self, lang_filter=None): + super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter) + self.distribution_analyzer = None + self.coding_sm = None + self._last_char = [0, 0] def reset(self): - CharSetProber.reset(self) - if self._mCodingSM: - self._mCodingSM.reset() - if self._mDistributionAnalyzer: - self._mDistributionAnalyzer.reset() - self._mLastChar = [0, 0] + super(MultiByteCharSetProber, self).reset() + if self.coding_sm: + self.coding_sm.reset() + if self.distribution_analyzer: + self.distribution_analyzer.reset() + self._last_char = [0, 0] - def get_charset_name(self): - pass + @property + def charset_name(self): + raise NotImplementedError - def feed(self, aBuf): - aLen = len(aBuf) - for i in range(0, aLen): - codingState = self._mCodingSM.next_state(aBuf[i]) - if codingState == constants.eError: - if constants._debug: - sys.stderr.write(self.get_charset_name() - + ' prober hit error at byte ' + str(i) - + '\n') - self._mState = constants.eNotMe + @property + def language(self): + raise NotImplementedError + + def feed(self, byte_str): + for i in range(len(byte_str)): + coding_state = self.coding_sm.next_state(byte_str[i]) + if coding_state == MachineState.ERROR: + self.logger.debug('%s %s prober hit error at byte %s', + self.charset_name, self.language, i) + self._state = ProbingState.NOT_ME break - elif codingState == constants.eItsMe: - self._mState = constants.eFoundIt + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT break - elif codingState == constants.eStart: - charLen = self._mCodingSM.get_current_charlen() + elif coding_state == MachineState.START: + char_len = self.coding_sm.get_current_charlen() if i == 0: - self._mLastChar[1] = aBuf[0] - self._mDistributionAnalyzer.feed(self._mLastChar, charLen) + self._last_char[1] = byte_str[0] + self.distribution_analyzer.feed(self._last_char, char_len) else: - self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], - charLen) + self.distribution_analyzer.feed(byte_str[i - 1:i + 1], + char_len) - self._mLastChar[0] = aBuf[aLen - 1] + self._last_char[0] = byte_str[-1] - if self.get_state() == constants.eDetecting: - if (self._mDistributionAnalyzer.got_enough_data() and - (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): - self._mState = constants.eFoundIt + if self.state == ProbingState.DETECTING: + if (self.distribution_analyzer.got_enough_data() and + (self.get_confidence() > self.SHORTCUT_THRESHOLD)): + self._state = ProbingState.FOUND_IT - return self.get_state() + return self.state def get_confidence(self): - return self._mDistributionAnalyzer.get_confidence() + return self.distribution_analyzer.get_confidence() diff --git a/Shared/lib/python3.4/site-packages/chardet/mbcsgroupprober.py b/Shared/lib/python3.4/site-packages/chardet/mbcsgroupprober.py index 03c9dcf..530abe7 100644 --- a/Shared/lib/python3.4/site-packages/chardet/mbcsgroupprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/mbcsgroupprober.py @@ -39,9 +39,9 @@ from .euctwprober import EUCTWProber class MBCSGroupProber(CharSetGroupProber): - def __init__(self): - CharSetGroupProber.__init__(self) - self._mProbers = [ + def __init__(self, lang_filter=None): + super(MBCSGroupProber, self).__init__(lang_filter=lang_filter) + self.probers = [ UTF8Prober(), SJISProber(), EUCJPProber(), diff --git a/Shared/lib/python3.4/site-packages/chardet/mbcssm.py b/Shared/lib/python3.4/site-packages/chardet/mbcssm.py index efe678c..8360d0f 100644 --- a/Shared/lib/python3.4/site-packages/chardet/mbcssm.py +++ b/Shared/lib/python3.4/site-packages/chardet/mbcssm.py @@ -25,11 +25,11 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from .constants import eStart, eError, eItsMe +from .enums import MachineState # BIG5 -BIG5_cls = ( +BIG5_CLS = ( 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 @@ -64,23 +64,23 @@ BIG5_cls = ( 3,3,3,3,3,3,3,0 # f8 - ff ) -BIG5_st = ( - eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 - eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f - eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17 +BIG5_ST = ( + MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f + MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17 ) -Big5CharLenTable = (0, 1, 1, 2, 0) +BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0) -Big5SMModel = {'classTable': BIG5_cls, - 'classFactor': 5, - 'stateTable': BIG5_st, - 'charLenTable': Big5CharLenTable, - 'name': 'Big5'} +BIG5_SM_MODEL = {'class_table': BIG5_CLS, + 'class_factor': 5, + 'state_table': BIG5_ST, + 'char_len_table': BIG5_CHAR_LEN_TABLE, + 'name': 'Big5'} # CP949 -CP949_cls = ( +CP949_CLS = ( 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f 1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f @@ -99,28 +99,28 @@ CP949_cls = ( 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff ) -CP949_st = ( +CP949_ST = ( #cls= 0 1 2 3 4 5 6 7 8 9 # previous state = - eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart - eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError - eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe - eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3 - eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4 - eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5 - eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6 + MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3 + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4 + MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5 + MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6 ) -CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) +CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) -CP949SMModel = {'classTable': CP949_cls, - 'classFactor': 10, - 'stateTable': CP949_st, - 'charLenTable': CP949CharLenTable, - 'name': 'CP949'} +CP949_SM_MODEL = {'class_table': CP949_CLS, + 'class_factor': 10, + 'state_table': CP949_ST, + 'char_len_table': CP949_CHAR_LEN_TABLE, + 'name': 'CP949'} # EUC-JP -EUCJP_cls = ( +EUCJP_CLS = ( 4,4,4,4,4,4,4,4, # 00 - 07 4,4,4,4,4,4,5,5, # 08 - 0f 4,4,4,4,4,4,4,4, # 10 - 17 @@ -155,25 +155,25 @@ EUCJP_cls = ( 0,0,0,0,0,0,0,5 # f8 - ff ) -EUCJP_st = ( - 3, 4, 3, 5,eStart,eError,eError,eError,#00-07 - eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17 - eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f - 3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27 +EUCJP_ST = ( + 3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17 + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f + 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27 ) -EUCJPCharLenTable = (2, 2, 2, 3, 1, 0) +EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0) -EUCJPSMModel = {'classTable': EUCJP_cls, - 'classFactor': 6, - 'stateTable': EUCJP_st, - 'charLenTable': EUCJPCharLenTable, - 'name': 'EUC-JP'} +EUCJP_SM_MODEL = {'class_table': EUCJP_CLS, + 'class_factor': 6, + 'state_table': EUCJP_ST, + 'char_len_table': EUCJP_CHAR_LEN_TABLE, + 'name': 'EUC-JP'} # EUC-KR -EUCKR_cls = ( +EUCKR_CLS = ( 1,1,1,1,1,1,1,1, # 00 - 07 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 @@ -208,22 +208,22 @@ EUCKR_cls = ( 2,2,2,2,2,2,2,0 # f8 - ff ) -EUCKR_st = ( - eError,eStart, 3,eError,eError,eError,eError,eError,#00-07 - eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f +EUCKR_ST = ( + MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f ) -EUCKRCharLenTable = (0, 1, 2, 0) +EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0) -EUCKRSMModel = {'classTable': EUCKR_cls, - 'classFactor': 4, - 'stateTable': EUCKR_st, - 'charLenTable': EUCKRCharLenTable, +EUCKR_SM_MODEL = {'class_table': EUCKR_CLS, + 'class_factor': 4, + 'state_table': EUCKR_ST, + 'char_len_table': EUCKR_CHAR_LEN_TABLE, 'name': 'EUC-KR'} # EUC-TW -EUCTW_cls = ( +EUCTW_CLS = ( 2,2,2,2,2,2,2,2, # 00 - 07 2,2,2,2,2,2,0,0, # 08 - 0f 2,2,2,2,2,2,2,2, # 10 - 17 @@ -258,26 +258,26 @@ EUCTW_cls = ( 3,3,3,3,3,3,3,0 # f8 - ff ) -EUCTW_st = ( - eError,eError,eStart, 3, 3, 3, 4,eError,#00-07 - eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17 - eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f - 5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27 - eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f +EUCTW_ST = ( + MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17 + MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f + 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27 + MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f ) -EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3) +EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3) -EUCTWSMModel = {'classTable': EUCTW_cls, - 'classFactor': 7, - 'stateTable': EUCTW_st, - 'charLenTable': EUCTWCharLenTable, +EUCTW_SM_MODEL = {'class_table': EUCTW_CLS, + 'class_factor': 7, + 'state_table': EUCTW_ST, + 'char_len_table': EUCTW_CHAR_LEN_TABLE, 'name': 'x-euc-tw'} # GB2312 -GB2312_cls = ( +GB2312_CLS = ( 1,1,1,1,1,1,1,1, # 00 - 07 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 @@ -312,31 +312,31 @@ GB2312_cls = ( 6,6,6,6,6,6,6,0 # f8 - ff ) -GB2312_st = ( - eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07 - eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17 - 4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f - eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27 - eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f +GB2312_ST = ( + MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17 + 4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f + MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27 + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f ) # To be accurate, the length of class 6 can be either 2 or 4. # But it is not necessary to discriminate between the two since -# it is used for frequency analysis only, and we are validing +# it is used for frequency analysis only, and we are validating # each code range there as well. So it is safe to set it to be # 2 here. -GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2) +GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2) -GB2312SMModel = {'classTable': GB2312_cls, - 'classFactor': 7, - 'stateTable': GB2312_st, - 'charLenTable': GB2312CharLenTable, - 'name': 'GB2312'} +GB2312_SM_MODEL = {'class_table': GB2312_CLS, + 'class_factor': 7, + 'state_table': GB2312_ST, + 'char_len_table': GB2312_CHAR_LEN_TABLE, + 'name': 'GB2312'} # Shift_JIS -SJIS_cls = ( +SJIS_CLS = ( 1,1,1,1,1,1,1,1, # 00 - 07 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 @@ -373,23 +373,23 @@ SJIS_cls = ( 3,3,3,3,3,0,0,0) # f8 - ff -SJIS_st = ( - eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07 - eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17 +SJIS_ST = ( + MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17 ) -SJISCharLenTable = (0, 1, 1, 2, 0, 0) +SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0) -SJISSMModel = {'classTable': SJIS_cls, - 'classFactor': 6, - 'stateTable': SJIS_st, - 'charLenTable': SJISCharLenTable, +SJIS_SM_MODEL = {'class_table': SJIS_CLS, + 'class_factor': 6, + 'state_table': SJIS_ST, + 'char_len_table': SJIS_CHAR_LEN_TABLE, 'name': 'Shift_JIS'} # UCS2-BE -UCS2BE_cls = ( +UCS2BE_CLS = ( 0,0,0,0,0,0,0,0, # 00 - 07 0,0,1,0,0,2,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 @@ -424,27 +424,27 @@ UCS2BE_cls = ( 0,0,0,0,0,0,4,5 # f8 - ff ) -UCS2BE_st = ( - 5, 7, 7,eError, 4, 3,eError,eError,#00-07 - eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17 - 6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f - 6, 6, 6, 6, 5, 7, 7,eError,#20-27 - 5, 8, 6, 6,eError, 6, 6, 6,#28-2f - 6, 6, 6, 6,eError,eError,eStart,eStart #30-37 +UCS2BE_ST = ( + 5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17 + 6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f + 6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27 + 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f + 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37 ) -UCS2BECharLenTable = (2, 2, 2, 0, 2, 2) +UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2) -UCS2BESMModel = {'classTable': UCS2BE_cls, - 'classFactor': 6, - 'stateTable': UCS2BE_st, - 'charLenTable': UCS2BECharLenTable, - 'name': 'UTF-16BE'} +UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS, + 'class_factor': 6, + 'state_table': UCS2BE_ST, + 'char_len_table': UCS2BE_CHAR_LEN_TABLE, + 'name': 'UTF-16BE'} # UCS2-LE -UCS2LE_cls = ( +UCS2LE_CLS = ( 0,0,0,0,0,0,0,0, # 00 - 07 0,0,1,0,0,2,0,0, # 08 - 0f 0,0,0,0,0,0,0,0, # 10 - 17 @@ -479,27 +479,27 @@ UCS2LE_cls = ( 0,0,0,0,0,0,4,5 # f8 - ff ) -UCS2LE_st = ( - 6, 6, 7, 6, 4, 3,eError,eError,#00-07 - eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f - eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17 - 5, 5, 5,eError, 5,eError, 6, 6,#18-1f - 7, 6, 8, 8, 5, 5, 5,eError,#20-27 - 5, 5, 5,eError,eError,eError, 5, 5,#28-2f - 5, 5, 5,eError, 5,eError,eStart,eStart #30-37 +UCS2LE_ST = ( + 6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f + MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17 + 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f + 7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27 + 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f + 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37 ) -UCS2LECharLenTable = (2, 2, 2, 2, 2, 2) +UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2) -UCS2LESMModel = {'classTable': UCS2LE_cls, - 'classFactor': 6, - 'stateTable': UCS2LE_st, - 'charLenTable': UCS2LECharLenTable, +UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS, + 'class_factor': 6, + 'state_table': UCS2LE_ST, + 'char_len_table': UCS2LE_CHAR_LEN_TABLE, 'name': 'UTF-16LE'} # UTF-8 -UTF8_cls = ( +UTF8_CLS = ( 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value 1,1,1,1,1,1,0,0, # 08 - 0f 1,1,1,1,1,1,1,1, # 10 - 17 @@ -534,39 +534,39 @@ UTF8_cls = ( 12,13,13,13,14,15,0,0 # f8 - ff ) -UTF8_st = ( - eError,eStart,eError,eError,eError,eError, 12, 10,#00-07 +UTF8_ST = ( + MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07 9, 11, 8, 7, 6, 5, 4, 3,#08-0f - eError,eError,eError,eError,eError,eError,eError,eError,#10-17 - eError,eError,eError,eError,eError,eError,eError,eError,#18-1f - eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27 - eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f - eError,eError, 5, 5, 5, 5,eError,eError,#30-37 - eError,eError,eError,eError,eError,eError,eError,eError,#38-3f - eError,eError,eError, 5, 5, 5,eError,eError,#40-47 - eError,eError,eError,eError,eError,eError,eError,eError,#48-4f - eError,eError, 7, 7, 7, 7,eError,eError,#50-57 - eError,eError,eError,eError,eError,eError,eError,eError,#58-5f - eError,eError,eError,eError, 7, 7,eError,eError,#60-67 - eError,eError,eError,eError,eError,eError,eError,eError,#68-6f - eError,eError, 9, 9, 9, 9,eError,eError,#70-77 - eError,eError,eError,eError,eError,eError,eError,eError,#78-7f - eError,eError,eError,eError,eError, 9,eError,eError,#80-87 - eError,eError,eError,eError,eError,eError,eError,eError,#88-8f - eError,eError, 12, 12, 12, 12,eError,eError,#90-97 - eError,eError,eError,eError,eError,eError,eError,eError,#98-9f - eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7 - eError,eError,eError,eError,eError,eError,eError,eError,#a8-af - eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7 - eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf - eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7 - eError,eError,eError,eError,eError,eError,eError,eError #c8-cf + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27 + MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f + MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f + MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f + MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f + MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af + MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf + MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7 + MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf ) -UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) +UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) -UTF8SMModel = {'classTable': UTF8_cls, - 'classFactor': 16, - 'stateTable': UTF8_st, - 'charLenTable': UTF8CharLenTable, - 'name': 'UTF-8'} +UTF8_SM_MODEL = {'class_table': UTF8_CLS, + 'class_factor': 16, + 'state_table': UTF8_ST, + 'char_len_table': UTF8_CHAR_LEN_TABLE, + 'name': 'UTF-8'} diff --git a/Shared/lib/python3.4/site-packages/chardet/sbcharsetprober.py b/Shared/lib/python3.4/site-packages/chardet/sbcharsetprober.py index 37291bd..0adb51d 100644 --- a/Shared/lib/python3.4/site-packages/chardet/sbcharsetprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/sbcharsetprober.py @@ -26,95 +26,107 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import sys -from . import constants from .charsetprober import CharSetProber -from .compat import wrap_ord - -SAMPLE_SIZE = 64 -SB_ENOUGH_REL_THRESHOLD = 1024 -POSITIVE_SHORTCUT_THRESHOLD = 0.95 -NEGATIVE_SHORTCUT_THRESHOLD = 0.05 -SYMBOL_CAT_ORDER = 250 -NUMBER_OF_SEQ_CAT = 4 -POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1 -#NEGATIVE_CAT = 0 +from .enums import CharacterCategory, ProbingState, SequenceLikelihood class SingleByteCharSetProber(CharSetProber): - def __init__(self, model, reversed=False, nameProber=None): - CharSetProber.__init__(self) - self._mModel = model + SAMPLE_SIZE = 64 + SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2 + POSITIVE_SHORTCUT_THRESHOLD = 0.95 + NEGATIVE_SHORTCUT_THRESHOLD = 0.05 + + def __init__(self, model, reversed=False, name_prober=None): + super(SingleByteCharSetProber, self).__init__() + self._model = model # TRUE if we need to reverse every pair in the model lookup - self._mReversed = reversed + self._reversed = reversed # Optional auxiliary prober for name decision - self._mNameProber = nameProber + self._name_prober = name_prober + self._last_order = None + self._seq_counters = None + self._total_seqs = None + self._total_char = None + self._freq_char = None self.reset() def reset(self): - CharSetProber.reset(self) + super(SingleByteCharSetProber, self).reset() # char order of last character - self._mLastOrder = 255 - self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT - self._mTotalSeqs = 0 - self._mTotalChar = 0 + self._last_order = 255 + self._seq_counters = [0] * SequenceLikelihood.get_num_categories() + self._total_seqs = 0 + self._total_char = 0 # characters that fall in our sampling range - self._mFreqChar = 0 + self._freq_char = 0 - def get_charset_name(self): - if self._mNameProber: - return self._mNameProber.get_charset_name() + @property + def charset_name(self): + if self._name_prober: + return self._name_prober.charset_name else: - return self._mModel['charsetName'] + return self._model['charset_name'] - def feed(self, aBuf): - if not self._mModel['keepEnglishLetter']: - aBuf = self.filter_without_english_letters(aBuf) - aLen = len(aBuf) - if not aLen: - return self.get_state() - for c in aBuf: - order = self._mModel['charToOrderMap'][wrap_ord(c)] - if order < SYMBOL_CAT_ORDER: - self._mTotalChar += 1 - if order < SAMPLE_SIZE: - self._mFreqChar += 1 - if self._mLastOrder < SAMPLE_SIZE: - self._mTotalSeqs += 1 - if not self._mReversed: - i = (self._mLastOrder * SAMPLE_SIZE) + order - model = self._mModel['precedenceMatrix'][i] + @property + def language(self): + if self._name_prober: + return self._name_prober.language + else: + return self._model.get('language') + + def feed(self, byte_str): + if not self._model['keep_english_letter']: + byte_str = self.filter_international_words(byte_str) + if not byte_str: + return self.state + char_to_order_map = self._model['char_to_order_map'] + for i, c in enumerate(byte_str): + # XXX: Order is in range 1-64, so one would think we want 0-63 here, + # but that leads to 27 more test failures than before. + order = char_to_order_map[c] + # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but + # CharacterCategory.SYMBOL is actually 253, so we use CONTROL + # to make it closer to the original intent. The only difference + # is whether or not we count digits and control characters for + # _total_char purposes. + if order < CharacterCategory.CONTROL: + self._total_char += 1 + if order < self.SAMPLE_SIZE: + self._freq_char += 1 + if self._last_order < self.SAMPLE_SIZE: + self._total_seqs += 1 + if not self._reversed: + i = (self._last_order * self.SAMPLE_SIZE) + order + model = self._model['precedence_matrix'][i] else: # reverse the order of the letters in the lookup - i = (order * SAMPLE_SIZE) + self._mLastOrder - model = self._mModel['precedenceMatrix'][i] - self._mSeqCounters[model] += 1 - self._mLastOrder = order + i = (order * self.SAMPLE_SIZE) + self._last_order + model = self._model['precedence_matrix'][i] + self._seq_counters[model] += 1 + self._last_order = order - if self.get_state() == constants.eDetecting: - if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD: - cf = self.get_confidence() - if cf > POSITIVE_SHORTCUT_THRESHOLD: - if constants._debug: - sys.stderr.write('%s confidence = %s, we have a' - 'winner\n' % - (self._mModel['charsetName'], cf)) - self._mState = constants.eFoundIt - elif cf < NEGATIVE_SHORTCUT_THRESHOLD: - if constants._debug: - sys.stderr.write('%s confidence = %s, below negative' - 'shortcut threshhold %s\n' % - (self._mModel['charsetName'], cf, - NEGATIVE_SHORTCUT_THRESHOLD)) - self._mState = constants.eNotMe + charset_name = self._model['charset_name'] + if self.state == ProbingState.DETECTING: + if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD: + confidence = self.get_confidence() + if confidence > self.POSITIVE_SHORTCUT_THRESHOLD: + self.logger.debug('%s confidence = %s, we have a winner', + charset_name, confidence) + self._state = ProbingState.FOUND_IT + elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD: + self.logger.debug('%s confidence = %s, below negative ' + 'shortcut threshhold %s', charset_name, + confidence, + self.NEGATIVE_SHORTCUT_THRESHOLD) + self._state = ProbingState.NOT_ME - return self.get_state() + return self.state def get_confidence(self): r = 0.01 - if self._mTotalSeqs > 0: - r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs - / self._mModel['mTypicalPositiveRatio']) - r = r * self._mFreqChar / self._mTotalChar + if self._total_seqs > 0: + r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) / + self._total_seqs / self._model['typical_positive_ratio']) + r = r * self._freq_char / self._total_char if r >= 1.0: r = 0.99 return r diff --git a/Shared/lib/python3.4/site-packages/chardet/sbcsgroupprober.py b/Shared/lib/python3.4/site-packages/chardet/sbcsgroupprober.py index 1b6196c..98e95dc 100644 --- a/Shared/lib/python3.4/site-packages/chardet/sbcsgroupprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/sbcsgroupprober.py @@ -33,16 +33,17 @@ from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, Ibm866Model, Ibm855Model) from .langgreekmodel import Latin7GreekModel, Win1253GreekModel from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel -from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel +# from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel from .langthaimodel import TIS620ThaiModel from .langhebrewmodel import Win1255HebrewModel from .hebrewprober import HebrewProber +from .langturkishmodel import Latin5TurkishModel class SBCSGroupProber(CharSetGroupProber): def __init__(self): - CharSetGroupProber.__init__(self) - self._mProbers = [ + super(SBCSGroupProber, self).__init__() + self.probers = [ SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), @@ -53,17 +54,20 @@ class SBCSGroupProber(CharSetGroupProber): SingleByteCharSetProber(Win1253GreekModel), SingleByteCharSetProber(Latin5BulgarianModel), SingleByteCharSetProber(Win1251BulgarianModel), - SingleByteCharSetProber(Latin2HungarianModel), - SingleByteCharSetProber(Win1250HungarianModel), + # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250) + # after we retrain model. + # SingleByteCharSetProber(Latin2HungarianModel), + # SingleByteCharSetProber(Win1250HungarianModel), SingleByteCharSetProber(TIS620ThaiModel), + SingleByteCharSetProber(Latin5TurkishModel), ] - hebrewProber = HebrewProber() - logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, - False, hebrewProber) - visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True, - hebrewProber) - hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber) - self._mProbers.extend([hebrewProber, logicalHebrewProber, - visualHebrewProber]) + hebrew_prober = HebrewProber() + logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, + False, hebrew_prober) + visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True, + hebrew_prober) + hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober) + self.probers.extend([hebrew_prober, logical_hebrew_prober, + visual_hebrew_prober]) self.reset() diff --git a/Shared/lib/python3.4/site-packages/chardet/sjisprober.py b/Shared/lib/python3.4/site-packages/chardet/sjisprober.py index cd0e9e7..9e29623 100644 --- a/Shared/lib/python3.4/site-packages/chardet/sjisprober.py +++ b/Shared/lib/python3.4/site-packages/chardet/sjisprober.py @@ -25,67 +25,68 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -import sys from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import SJISDistributionAnalysis from .jpcntx import SJISContextAnalysis -from .mbcssm import SJISSMModel -from . import constants +from .mbcssm import SJIS_SM_MODEL +from .enums import ProbingState, MachineState class SJISProber(MultiByteCharSetProber): def __init__(self): - MultiByteCharSetProber.__init__(self) - self._mCodingSM = CodingStateMachine(SJISSMModel) - self._mDistributionAnalyzer = SJISDistributionAnalysis() - self._mContextAnalyzer = SJISContextAnalysis() + super(SJISProber, self).__init__() + self.coding_sm = CodingStateMachine(SJIS_SM_MODEL) + self.distribution_analyzer = SJISDistributionAnalysis() + self.context_analyzer = SJISContextAnalysis() self.reset() def reset(self): - MultiByteCharSetProber.reset(self) - self._mContextAnalyzer.reset() + super(SJISProber, self).reset() + self.context_analyzer.reset() - def get_charset_name(self): - return self._mContextAnalyzer.get_charset_name() + @property + def charset_name(self): + return self.context_analyzer.charset_name - def feed(self, aBuf): - aLen = len(aBuf) - for i in range(0, aLen): - codingState = self._mCodingSM.next_state(aBuf[i]) - if codingState == constants.eError: - if constants._debug: - sys.stderr.write(self.get_charset_name() - + ' prober hit error at byte ' + str(i) - + '\n') - self._mState = constants.eNotMe + @property + def language(self): + return "Japanese" + + def feed(self, byte_str): + for i in range(len(byte_str)): + coding_state = self.coding_sm.next_state(byte_str[i]) + if coding_state == MachineState.ERROR: + self.logger.debug('%s %s prober hit error at byte %s', + self.charset_name, self.language, i) + self._state = ProbingState.NOT_ME break - elif codingState == constants.eItsMe: - self._mState = constants.eFoundIt + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT break - elif codingState == constants.eStart: - charLen = self._mCodingSM.get_current_charlen() + elif coding_state == MachineState.START: + char_len = self.coding_sm.get_current_charlen() if i == 0: - self._mLastChar[1] = aBuf[0] - self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:], - charLen) - self._mDistributionAnalyzer.feed(self._mLastChar, charLen) + self._last_char[1] = byte_str[0] + self.context_analyzer.feed(self._last_char[2 - char_len:], + char_len) + self.distribution_analyzer.feed(self._last_char, char_len) else: - self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3 - - charLen], charLen) - self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], - charLen) + self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3 + - char_len], char_len) + self.distribution_analyzer.feed(byte_str[i - 1:i + 1], + char_len) - self._mLastChar[0] = aBuf[aLen - 1] + self._last_char[0] = byte_str[-1] - if self.get_state() == constants.eDetecting: - if (self._mContextAnalyzer.got_enough_data() and - (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): - self._mState = constants.eFoundIt + if self.state == ProbingState.DETECTING: + if (self.context_analyzer.got_enough_data() and + (self.get_confidence() > self.SHORTCUT_THRESHOLD)): + self._state = ProbingState.FOUND_IT - return self.get_state() + return self.state def get_confidence(self): - contxtCf = self._mContextAnalyzer.get_confidence() - distribCf = self._mDistributionAnalyzer.get_confidence() - return max(contxtCf, distribCf) + context_conf = self.context_analyzer.get_confidence() + distrib_conf = self.distribution_analyzer.get_confidence() + return max(context_conf, distrib_conf) diff --git a/Shared/lib/python3.4/site-packages/chardet/universaldetector.py b/Shared/lib/python3.4/site-packages/chardet/universaldetector.py index 476522b..7b4e92d 100644 --- a/Shared/lib/python3.4/site-packages/chardet/universaldetector.py +++ b/Shared/lib/python3.4/site-packages/chardet/universaldetector.py @@ -25,146 +25,262 @@ # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### +""" +Module containing the UniversalDetector detector class, which is the primary +class a user of ``chardet`` should use. + +:author: Mark Pilgrim (initial port to Python) +:author: Shy Shalom (original C code) +:author: Dan Blanchard (major refactoring for 3.0) +:author: Ian Cordasco +""" + -from . import constants -import sys import codecs -from .latin1prober import Latin1Prober # windows-1252 -from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets -from .sbcsgroupprober import SBCSGroupProber # single-byte character sets -from .escprober import EscCharSetProber # ISO-2122, etc. +import logging import re -MINIMUM_THRESHOLD = 0.20 -ePureAscii = 0 -eEscAscii = 1 -eHighbyte = 2 +from .charsetgroupprober import CharSetGroupProber +from .enums import InputState, LanguageFilter, ProbingState +from .escprober import EscCharSetProber +from .latin1prober import Latin1Prober +from .mbcsgroupprober import MBCSGroupProber +from .sbcsgroupprober import SBCSGroupProber -class UniversalDetector: - def __init__(self): - self._highBitDetector = re.compile(b'[\x80-\xFF]') - self._escDetector = re.compile(b'(\033|~{)') - self._mEscCharSetProber = None - self._mCharSetProbers = [] +class UniversalDetector(object): + """ + The ``UniversalDetector`` class underlies the ``chardet.detect`` function + and coordinates all of the different charset probers. + + To get a ``dict`` containing an encoding and its confidence, you can simply + run: + + .. code:: + + u = UniversalDetector() + u.feed(some_bytes) + u.close() + detected = u.result + + """ + + MINIMUM_THRESHOLD = 0.20 + HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]') + ESC_DETECTOR = re.compile(b'(\033|~{)') + WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]') + ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252', + 'iso-8859-2': 'Windows-1250', + 'iso-8859-5': 'Windows-1251', + 'iso-8859-6': 'Windows-1256', + 'iso-8859-7': 'Windows-1253', + 'iso-8859-8': 'Windows-1255', + 'iso-8859-9': 'Windows-1254', + 'iso-8859-13': 'Windows-1257'} + + def __init__(self, lang_filter=LanguageFilter.ALL): + self._esc_charset_prober = None + self._charset_probers = [] + self.result = None + self.done = None + self._got_data = None + self._input_state = None + self._last_char = None + self.lang_filter = lang_filter + self.logger = logging.getLogger(__name__) + self._has_win_bytes = None self.reset() def reset(self): - self.result = {'encoding': None, 'confidence': 0.0} + """ + Reset the UniversalDetector and all of its probers back to their + initial states. This is called by ``__init__``, so you only need to + call this directly in between analyses of different documents. + """ + self.result = {'encoding': None, 'confidence': 0.0, 'language': None} self.done = False - self._mStart = True - self._mGotData = False - self._mInputState = ePureAscii - self._mLastChar = b'' - if self._mEscCharSetProber: - self._mEscCharSetProber.reset() - for prober in self._mCharSetProbers: + self._got_data = False + self._has_win_bytes = False + self._input_state = InputState.PURE_ASCII + self._last_char = b'' + if self._esc_charset_prober: + self._esc_charset_prober.reset() + for prober in self._charset_probers: prober.reset() - def feed(self, aBuf): + def feed(self, byte_str): + """ + Takes a chunk of a document and feeds it through all of the relevant + charset probers. + + After calling ``feed``, you can check the value of the ``done`` + attribute to see if you need to continue feeding the + ``UniversalDetector`` more data, or if it has made a prediction + (in the ``result`` attribute). + + .. note:: + You should always call ``close`` when you're done feeding in your + document if ``done`` is not already ``True``. + """ if self.done: return - aLen = len(aBuf) - if not aLen: + if not len(byte_str): return - if not self._mGotData: + if not isinstance(byte_str, bytearray): + byte_str = bytearray(byte_str) + + # First check for known BOMs, since these are guaranteed to be correct + if not self._got_data: # If the data starts with BOM, we know it is UTF - if aBuf[:3] == codecs.BOM_UTF8: + if byte_str.startswith(codecs.BOM_UTF8): # EF BB BF UTF-8 with BOM - self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0} - elif aBuf[:4] == codecs.BOM_UTF32_LE: + self.result = {'encoding': "UTF-8-SIG", + 'confidence': 1.0, + 'language': ''} + elif byte_str.startswith((codecs.BOM_UTF32_LE, + codecs.BOM_UTF32_BE)): # FF FE 00 00 UTF-32, little-endian BOM - self.result = {'encoding': "UTF-32LE", 'confidence': 1.0} - elif aBuf[:4] == codecs.BOM_UTF32_BE: # 00 00 FE FF UTF-32, big-endian BOM - self.result = {'encoding': "UTF-32BE", 'confidence': 1.0} - elif aBuf[:4] == b'\xFE\xFF\x00\x00': + self.result = {'encoding': "UTF-32", + 'confidence': 1.0, + 'language': ''} + elif byte_str.startswith(b'\xFE\xFF\x00\x00'): # FE FF 00 00 UCS-4, unusual octet order BOM (3412) - self.result = { - 'encoding': "X-ISO-10646-UCS-4-3412", - 'confidence': 1.0 - } - elif aBuf[:4] == b'\x00\x00\xFF\xFE': + self.result = {'encoding': "X-ISO-10646-UCS-4-3412", + 'confidence': 1.0, + 'language': ''} + elif byte_str.startswith(b'\x00\x00\xFF\xFE'): # 00 00 FF FE UCS-4, unusual octet order BOM (2143) - self.result = { - 'encoding': "X-ISO-10646-UCS-4-2143", - 'confidence': 1.0 - } - elif aBuf[:2] == codecs.BOM_LE: + self.result = {'encoding': "X-ISO-10646-UCS-4-2143", + 'confidence': 1.0, + 'language': ''} + elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)): # FF FE UTF-16, little endian BOM - self.result = {'encoding': "UTF-16LE", 'confidence': 1.0} - elif aBuf[:2] == codecs.BOM_BE: # FE FF UTF-16, big endian BOM - self.result = {'encoding': "UTF-16BE", 'confidence': 1.0} + self.result = {'encoding': "UTF-16", + 'confidence': 1.0, + 'language': ''} - self._mGotData = True - if self.result['encoding'] and (self.result['confidence'] > 0.0): - self.done = True - return - - if self._mInputState == ePureAscii: - if self._highBitDetector.search(aBuf): - self._mInputState = eHighbyte - elif ((self._mInputState == ePureAscii) and - self._escDetector.search(self._mLastChar + aBuf)): - self._mInputState = eEscAscii - - self._mLastChar = aBuf[-1:] - - if self._mInputState == eEscAscii: - if not self._mEscCharSetProber: - self._mEscCharSetProber = EscCharSetProber() - if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt: - self.result = {'encoding': self._mEscCharSetProber.get_charset_name(), - 'confidence': self._mEscCharSetProber.get_confidence()} + self._got_data = True + if self.result['encoding'] is not None: self.done = True - elif self._mInputState == eHighbyte: - if not self._mCharSetProbers: - self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(), - Latin1Prober()] - for prober in self._mCharSetProbers: - if prober.feed(aBuf) == constants.eFoundIt: - self.result = {'encoding': prober.get_charset_name(), - 'confidence': prober.get_confidence()} + return + + # If none of those matched and we've only see ASCII so far, check + # for high bytes and escape sequences + if self._input_state == InputState.PURE_ASCII: + if self.HIGH_BYTE_DETECTOR.search(byte_str): + self._input_state = InputState.HIGH_BYTE + elif self._input_state == InputState.PURE_ASCII and \ + self.ESC_DETECTOR.search(self._last_char + byte_str): + self._input_state = InputState.ESC_ASCII + + self._last_char = byte_str[-1:] + + # If we've seen escape sequences, use the EscCharSetProber, which + # uses a simple state machine to check for known escape sequences in + # HZ and ISO-2022 encodings, since those are the only encodings that + # use such sequences. + if self._input_state == InputState.ESC_ASCII: + if not self._esc_charset_prober: + self._esc_charset_prober = EscCharSetProber(self.lang_filter) + if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT: + self.result = {'encoding': + self._esc_charset_prober.charset_name, + 'confidence': + self._esc_charset_prober.get_confidence(), + 'language': + self._esc_charset_prober.language} + self.done = True + # If we've seen high bytes (i.e., those with values greater than 127), + # we need to do more complicated checks using all our multi-byte and + # single-byte probers that are left. The single-byte probers + # use character bigram distributions to determine the encoding, whereas + # the multi-byte probers use a combination of character unigram and + # bigram distributions. + elif self._input_state == InputState.HIGH_BYTE: + if not self._charset_probers: + self._charset_probers = [MBCSGroupProber(self.lang_filter)] + # If we're checking non-CJK encodings, use single-byte prober + if self.lang_filter & LanguageFilter.NON_CJK: + self._charset_probers.append(SBCSGroupProber()) + self._charset_probers.append(Latin1Prober()) + for prober in self._charset_probers: + if prober.feed(byte_str) == ProbingState.FOUND_IT: + self.result = {'encoding': prober.charset_name, + 'confidence': prober.get_confidence(), + 'language': prober.language} self.done = True break + if self.WIN_BYTE_DETECTOR.search(byte_str): + self._has_win_bytes = True def close(self): + """ + Stop analyzing the current document and come up with a final + prediction. + + :returns: The ``result`` attribute, a ``dict`` with the keys + `encoding`, `confidence`, and `language`. + """ + # Don't bother with checks if we're already done if self.done: - return - if not self._mGotData: - if constants._debug: - sys.stderr.write('no data received!\n') - return + return self.result self.done = True - if self._mInputState == ePureAscii: - self.result = {'encoding': 'ascii', 'confidence': 1.0} - return self.result + if not self._got_data: + self.logger.debug('no data received!') - if self._mInputState == eHighbyte: - proberConfidence = None - maxProberConfidence = 0.0 - maxProber = None - for prober in self._mCharSetProbers: + # Default to ASCII if it is all we've seen so far + elif self._input_state == InputState.PURE_ASCII: + self.result = {'encoding': 'ascii', + 'confidence': 1.0, + 'language': ''} + + # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD + elif self._input_state == InputState.HIGH_BYTE: + prober_confidence = None + max_prober_confidence = 0.0 + max_prober = None + for prober in self._charset_probers: if not prober: continue - proberConfidence = prober.get_confidence() - if proberConfidence > maxProberConfidence: - maxProberConfidence = proberConfidence - maxProber = prober - if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD): - self.result = {'encoding': maxProber.get_charset_name(), - 'confidence': maxProber.get_confidence()} - return self.result + prober_confidence = prober.get_confidence() + if prober_confidence > max_prober_confidence: + max_prober_confidence = prober_confidence + max_prober = prober + if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD): + charset_name = max_prober.charset_name + lower_charset_name = max_prober.charset_name.lower() + confidence = max_prober.get_confidence() + # Use Windows encoding name instead of ISO-8859 if we saw any + # extra Windows-specific bytes + if lower_charset_name.startswith('iso-8859'): + if self._has_win_bytes: + charset_name = self.ISO_WIN_MAP.get(lower_charset_name, + charset_name) + self.result = {'encoding': charset_name, + 'confidence': confidence, + 'language': max_prober.language} - if constants._debug: - sys.stderr.write('no probers hit minimum threshhold\n') - for prober in self._mCharSetProbers[0].mProbers: - if not prober: - continue - sys.stderr.write('%s confidence = %s\n' % - (prober.get_charset_name(), - prober.get_confidence())) + # Log all prober confidences if none met MINIMUM_THRESHOLD + if self.logger.getEffectiveLevel() == logging.DEBUG: + if self.result['encoding'] is None: + self.logger.debug('no probers hit minimum threshold') + for group_prober in self._charset_probers: + if not group_prober: + continue + if isinstance(group_prober, CharSetGroupProber): + for prober in group_prober.probers: + self.logger.debug('%s %s confidence = %s', + prober.charset_name, + prober.language, + prober.get_confidence()) + else: + self.logger.debug('%s %s confidence = %s', + prober.charset_name, + prober.language, + prober.get_confidence()) + return self.result diff --git a/Shared/lib/python3.4/site-packages/chardet/utf8prober.py b/Shared/lib/python3.4/site-packages/chardet/utf8prober.py index 1c0bb5d..6c3196c 100644 --- a/Shared/lib/python3.4/site-packages/chardet/utf8prober.py +++ b/Shared/lib/python3.4/site-packages/chardet/utf8prober.py @@ -25,52 +25,58 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -from . import constants from .charsetprober import CharSetProber +from .enums import ProbingState, MachineState from .codingstatemachine import CodingStateMachine -from .mbcssm import UTF8SMModel +from .mbcssm import UTF8_SM_MODEL -ONE_CHAR_PROB = 0.5 class UTF8Prober(CharSetProber): + ONE_CHAR_PROB = 0.5 + def __init__(self): - CharSetProber.__init__(self) - self._mCodingSM = CodingStateMachine(UTF8SMModel) + super(UTF8Prober, self).__init__() + self.coding_sm = CodingStateMachine(UTF8_SM_MODEL) + self._num_mb_chars = None self.reset() def reset(self): - CharSetProber.reset(self) - self._mCodingSM.reset() - self._mNumOfMBChar = 0 + super(UTF8Prober, self).reset() + self.coding_sm.reset() + self._num_mb_chars = 0 - def get_charset_name(self): + @property + def charset_name(self): return "utf-8" - def feed(self, aBuf): - for c in aBuf: - codingState = self._mCodingSM.next_state(c) - if codingState == constants.eError: - self._mState = constants.eNotMe - break - elif codingState == constants.eItsMe: - self._mState = constants.eFoundIt - break - elif codingState == constants.eStart: - if self._mCodingSM.get_current_charlen() >= 2: - self._mNumOfMBChar += 1 + @property + def language(self): + return "" - if self.get_state() == constants.eDetecting: - if self.get_confidence() > constants.SHORTCUT_THRESHOLD: - self._mState = constants.eFoundIt + def feed(self, byte_str): + for c in byte_str: + coding_state = self.coding_sm.next_state(c) + if coding_state == MachineState.ERROR: + self._state = ProbingState.NOT_ME + break + elif coding_state == MachineState.ITS_ME: + self._state = ProbingState.FOUND_IT + break + elif coding_state == MachineState.START: + if self.coding_sm.get_current_charlen() >= 2: + self._num_mb_chars += 1 - return self.get_state() + if self.state == ProbingState.DETECTING: + if self.get_confidence() > self.SHORTCUT_THRESHOLD: + self._state = ProbingState.FOUND_IT + + return self.state def get_confidence(self): unlike = 0.99 - if self._mNumOfMBChar < 6: - for i in range(0, self._mNumOfMBChar): - unlike = unlike * ONE_CHAR_PROB + if self._num_mb_chars < 6: + unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars return 1.0 - unlike else: return unlike diff --git a/Shared/lib/python3.4/site-packages/chardet/version.py b/Shared/lib/python3.4/site-packages/chardet/version.py new file mode 100644 index 0000000..bb2a34a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/chardet/version.py @@ -0,0 +1,9 @@ +""" +This module exists only to simplify retrieving the version number of chardet +from within setup.py and from chardet subpackages. + +:author: Dan Blanchard (dan.blanchard@gmail.com) +""" + +__version__ = "3.0.4" +VERSION = __version__.split('.') diff --git a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/DESCRIPTION.rst deleted file mode 100644 index aa749cf..0000000 --- a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,9 +0,0 @@ - -enum-compat -=========== - -This is a virtual package, its whole purpose is to install enum34 on -Python older than 3.4. On Python 3.4+ it's a no-op. - - - diff --git a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/METADATA b/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/METADATA deleted file mode 100644 index ff8aca4..0000000 --- a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/METADATA +++ /dev/null @@ -1,30 +0,0 @@ -Metadata-Version: 2.0 -Name: enum-compat -Version: 0.0.2 -Summary: enum/enum34 compatibility package -Home-page: https://github.com/jstasiak/enum-compat -Author: Jakub Stasiak -Author-email: jakub@stasiak.at -License: MIT -Keywords: enum,compatibility,enum34 -Platform: UNKNOWN -Classifier: Intended Audience :: Developers -Classifier: Topic :: Software Development :: Libraries -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 - - -enum-compat -=========== - -This is a virtual package, its whole purpose is to install enum34 on -Python older than 3.4. On Python 3.4+ it's a no-op. - - - diff --git a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/RECORD b/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/RECORD deleted file mode 100644 index 3ded787..0000000 --- a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/RECORD +++ /dev/null @@ -1,7 +0,0 @@ -enum_compat-0.0.2.dist-info/DESCRIPTION.rst,sha256=ZUxgOYtR8j28PbCHss1PpZ8wDHJxnfb_LZB7HO4RciE,150 -enum_compat-0.0.2.dist-info/METADATA,sha256=tuKXeC1xCg2NlzhUoupS_zEYriQ-0Vg8nBI1alGFVqA,908 -enum_compat-0.0.2.dist-info/RECORD,, -enum_compat-0.0.2.dist-info/WHEEL,sha256=lCqt3ViRAf9c8mCs6o7ffkwROUdYSy8_YHn5f_rulB4,93 -enum_compat-0.0.2.dist-info/metadata.json,sha256=_55lhugqrWFsIqTavHtPEOhjQApHbMVyvYnCuJgE514,884 -enum_compat-0.0.2.dist-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 -enum_compat-0.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 diff --git a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/WHEEL deleted file mode 100644 index 6d9801a..0000000 --- a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: cp34-none-any - diff --git a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/metadata.json deleted file mode 100644 index ea0ddf2..0000000 --- a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Intended Audience :: Developers", "Topic :: Software Development :: Libraries", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.details": {"contacts": [{"email": "jakub@stasiak.at", "name": "Jakub Stasiak", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/jstasiak/enum-compat"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["enum", "compatibility", "enum34"], "license": "MIT", "metadata_version": "2.0", "name": "enum-compat", "summary": "enum/enum34 compatibility package", "version": "0.0.2"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/DESCRIPTION.rst deleted file mode 100644 index e118723..0000000 --- a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,3 +0,0 @@ -UNKNOWN - - diff --git a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/METADATA b/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/METADATA deleted file mode 100644 index a3ad760..0000000 --- a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/METADATA +++ /dev/null @@ -1,34 +0,0 @@ -Metadata-Version: 2.0 -Name: feedparser -Version: 5.2.1 -Summary: Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds -Home-page: https://github.com/kurtmckee/feedparser -Author: Kurt McKee -Author-email: contactme@kurtmckee.org -License: UNKNOWN -Download-URL: https://pypi.python.org/pypi/feedparser -Keywords: atom,cdf,feed,parser,rdf,rss -Platform: POSIX -Platform: Windows -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.4 -Classifier: Programming Language :: Python :: 2.5 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.0 -Classifier: Programming Language :: Python :: 3.1 -Classifier: Programming Language :: Python :: 3.2 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: XML - -UNKNOWN - - diff --git a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/RECORD b/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/RECORD deleted file mode 100644 index b3a7a1d..0000000 --- a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/RECORD +++ /dev/null @@ -1,9 +0,0 @@ -feedparser.py,sha256=GFiRC3lJVRu0qHzZrJ1kETY7lt-nBkuGzWzuxQhLmP8,159865 -feedparser-5.2.1.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 -feedparser-5.2.1.dist-info/METADATA,sha256=C5CirzvRirHEViO3C-i2-S1dUQJcJOgZKKLho_Z20Ak,1318 -feedparser-5.2.1.dist-info/RECORD,, -feedparser-5.2.1.dist-info/WHEEL,sha256=lCqt3ViRAf9c8mCs6o7ffkwROUdYSy8_YHn5f_rulB4,93 -feedparser-5.2.1.dist-info/metadata.json,sha256=RMB2LpClHHZBGHSDdUKjdJEieOWn-T0t7Bh6BLaqNrA,1359 -feedparser-5.2.1.dist-info/top_level.txt,sha256=V8OMyOFfOWI46em2E-SGpIywuPZEYtMPOcPua_gzvUk,11 -feedparser-5.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -__pycache__/feedparser.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/WHEEL deleted file mode 100644 index 6d9801a..0000000 --- a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: cp34-none-any - diff --git a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/metadata.json deleted file mode 100644 index 4c0f31b..0000000 --- a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.4", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.0", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: XML"], "download_url": "https://pypi.python.org/pypi/feedparser", "extensions": {"python.details": {"contacts": [{"email": "contactme@kurtmckee.org", "name": "Kurt McKee", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/kurtmckee/feedparser"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["atom", "cdf", "feed", "parser", "rdf", "rss"], "metadata_version": "2.0", "name": "feedparser", "platform": "POSIX", "summary": "Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds", "version": "5.2.1"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/top_level.txt deleted file mode 100755 index 1b25361..0000000 --- a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -feedparser diff --git a/Shared/lib/python3.4/site-packages/feedparser.py b/Shared/lib/python3.4/site-packages/feedparser.py deleted file mode 100644 index 321e323..0000000 --- a/Shared/lib/python3.4/site-packages/feedparser.py +++ /dev/null @@ -1,4007 +0,0 @@ -"""Universal feed parser - -Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds - -Visit https://code.google.com/p/feedparser/ for the latest version -Visit http://packages.python.org/feedparser/ for the latest documentation - -Required: Python 2.4 or later -Recommended: iconv_codec -""" - -__version__ = "5.2.1" -__license__ = """ -Copyright 2010-2015 Kurt McKee -Copyright 2002-2008 Mark Pilgrim -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE.""" -__author__ = "Mark Pilgrim " -__contributors__ = ["Jason Diamond ", - "John Beimler ", - "Fazal Majid ", - "Aaron Swartz ", - "Kevin Marks ", - "Sam Ruby ", - "Ade Oshineye ", - "Martin Pool ", - "Kurt McKee ", - "Bernd Schlapsi ",] - -# HTTP "User-Agent" header to send to servers when downloading feeds. -# If you are embedding feedparser in a larger application, you should -# change this to your application name and URL. -USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__ - -# HTTP "Accept" header to send to servers when downloading feeds. If you don't -# want to send an Accept header, set this to None. -ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" - -# List of preferred XML parsers, by SAX driver name. These will be tried first, -# but if they're not installed, Python will keep searching through its own list -# of pre-installed parsers until it finds one that supports everything we need. -PREFERRED_XML_PARSERS = ["drv_libxml2"] - -# If you want feedparser to automatically resolve all relative URIs, set this -# to 1. -RESOLVE_RELATIVE_URIS = 1 - -# If you want feedparser to automatically sanitize all potentially unsafe -# HTML content, set this to 1. -SANITIZE_HTML = 1 - -# ---------- Python 3 modules (make it work if possible) ---------- -try: - import rfc822 -except ImportError: - from email import _parseaddr as rfc822 - -try: - # Python 3.1 introduces bytes.maketrans and simultaneously - # deprecates string.maketrans; use bytes.maketrans if possible - _maketrans = bytes.maketrans -except (NameError, AttributeError): - import string - _maketrans = string.maketrans - -# base64 support for Atom feeds that contain embedded binary data -try: - import base64, binascii -except ImportError: - base64 = binascii = None -else: - # Python 3.1 deprecates decodestring in favor of decodebytes - _base64decode = getattr(base64, 'decodebytes', base64.decodestring) - -# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3 -# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3 -try: - if bytes is str: - # In Python 2.5 and below, bytes doesn't exist (NameError) - # In Python 2.6 and above, bytes and str are the same type - raise NameError -except NameError: - # Python 2 - def _s2bytes(s): - return s - def _l2bytes(l): - return ''.join(map(chr, l)) -else: - # Python 3 - def _s2bytes(s): - return bytes(s, 'utf8') - def _l2bytes(l): - return bytes(l) - -# If you want feedparser to allow all URL schemes, set this to () -# List culled from Python's urlparse documentation at: -# http://docs.python.org/library/urlparse.html -# as well as from "URI scheme" at Wikipedia: -# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme -# Many more will likely need to be added! -ACCEPTABLE_URI_SCHEMES = ( - 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet', - 'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', - 'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', - 'wais', - # Additional common-but-unofficial schemes - 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs', - 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg', -) -#ACCEPTABLE_URI_SCHEMES = () - -# ---------- required modules (should come with any Python distribution) ---------- -import cgi -import codecs -import copy -import datetime -import itertools -import re -import struct -import time -import types -import urllib.request, urllib.parse, urllib.error -import urllib.request, urllib.error, urllib.parse -import urllib.parse -import warnings - -from html.entities import name2codepoint, codepoint2name, entitydefs -import collections - -try: - from io import BytesIO as _StringIO -except ImportError: - try: - from io import StringIO as _StringIO - except ImportError: - from io import StringIO as _StringIO - -# ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- - -# gzip is included with most Python distributions, but may not be available if you compiled your own -try: - import gzip -except ImportError: - gzip = None -try: - import zlib -except ImportError: - zlib = None - -# If a real XML parser is available, feedparser will attempt to use it. feedparser has -# been tested with the built-in SAX parser and libxml2. On platforms where the -# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some -# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. -try: - import xml.sax - from xml.sax.saxutils import escape as _xmlescape -except ImportError: - _XML_AVAILABLE = 0 - def _xmlescape(data,entities={}): - data = data.replace('&', '&') - data = data.replace('>', '>') - data = data.replace('<', '<') - for char, entity in entities: - data = data.replace(char, entity) - return data -else: - try: - xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers - except xml.sax.SAXReaderNotAvailable: - _XML_AVAILABLE = 0 - else: - _XML_AVAILABLE = 1 - -# sgmllib is not available by default in Python 3; if the end user doesn't have -# it available then we'll lose illformed XML parsing and content santizing -try: - import sgmllib -except ImportError: - # This is probably Python 3, which doesn't include sgmllib anymore - _SGML_AVAILABLE = 0 - - # Mock sgmllib enough to allow subclassing later on - class sgmllib(object): - class SGMLParser(object): - def goahead(self, i): - pass - def parse_starttag(self, i): - pass -else: - _SGML_AVAILABLE = 1 - - # sgmllib defines a number of module-level regular expressions that are - # insufficient for the XML parsing feedparser needs. Rather than modify - # the variables directly in sgmllib, they're defined here using the same - # names, and the compiled code objects of several sgmllib.SGMLParser - # methods are copied into _BaseHTMLProcessor so that they execute in - # feedparser's scope instead of sgmllib's scope. - charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);') - tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') - attrfind = re.compile( - r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*' - r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?' - ) - - # Unfortunately, these must be copied over to prevent NameError exceptions - entityref = sgmllib.entityref - incomplete = sgmllib.incomplete - interesting = sgmllib.interesting - shorttag = sgmllib.shorttag - shorttagopen = sgmllib.shorttagopen - starttagopen = sgmllib.starttagopen - - class _EndBracketRegEx: - def __init__(self): - # Overriding the built-in sgmllib.endbracket regex allows the - # parser to find angle brackets embedded in element attributes. - self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''') - def search(self, target, index=0): - match = self.endbracket.match(target, index) - if match is not None: - # Returning a new object in the calling thread's context - # resolves a thread-safety. - return EndBracketMatch(match) - return None - class EndBracketMatch: - def __init__(self, match): - self.match = match - def start(self, n): - return self.match.end(n) - endbracket = _EndBracketRegEx() - - -# iconv_codec provides support for more character encodings. -# It's available from http://cjkpython.i18n.org/ -try: - import iconv_codec -except ImportError: - pass - -# chardet library auto-detects character encodings -# Download from http://chardet.feedparser.org/ -try: - import chardet -except ImportError: - chardet = None - -# ---------- don't touch these ---------- -class ThingsNobodyCaresAboutButMe(Exception): pass -class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass -class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass -class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass -class UndeclaredNamespace(Exception): pass - -SUPPORTED_VERSIONS = {'': 'unknown', - 'rss090': 'RSS 0.90', - 'rss091n': 'RSS 0.91 (Netscape)', - 'rss091u': 'RSS 0.91 (Userland)', - 'rss092': 'RSS 0.92', - 'rss093': 'RSS 0.93', - 'rss094': 'RSS 0.94', - 'rss20': 'RSS 2.0', - 'rss10': 'RSS 1.0', - 'rss': 'RSS (unknown version)', - 'atom01': 'Atom 0.1', - 'atom02': 'Atom 0.2', - 'atom03': 'Atom 0.3', - 'atom10': 'Atom 1.0', - 'atom': 'Atom (unknown version)', - 'cdf': 'CDF', - } - -class FeedParserDict(dict): - keymap = {'channel': 'feed', - 'items': 'entries', - 'guid': 'id', - 'date': 'updated', - 'date_parsed': 'updated_parsed', - 'description': ['summary', 'subtitle'], - 'description_detail': ['summary_detail', 'subtitle_detail'], - 'url': ['href'], - 'modified': 'updated', - 'modified_parsed': 'updated_parsed', - 'issued': 'published', - 'issued_parsed': 'published_parsed', - 'copyright': 'rights', - 'copyright_detail': 'rights_detail', - 'tagline': 'subtitle', - 'tagline_detail': 'subtitle_detail'} - def __getitem__(self, key): - ''' - :return: A :class:`FeedParserDict`. - ''' - if key == 'category': - try: - return dict.__getitem__(self, 'tags')[0]['term'] - except IndexError: - raise KeyError("object doesn't have key 'category'") - elif key == 'enclosures': - norel = lambda link: FeedParserDict([(name,value) for (name,value) in list(link.items()) if name!='rel']) - return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']=='enclosure'] - elif key == 'license': - for link in dict.__getitem__(self, 'links'): - if link['rel']=='license' and 'href' in link: - return link['href'] - elif key == 'updated': - # Temporarily help developers out by keeping the old - # broken behavior that was reported in issue 310. - # This fix was proposed in issue 328. - if not dict.__contains__(self, 'updated') and \ - dict.__contains__(self, 'published'): - warnings.warn("To avoid breaking existing software while " - "fixing issue 310, a temporary mapping has been created " - "from `updated` to `published` if `updated` doesn't " - "exist. This fallback will be removed in a future version " - "of feedparser.", DeprecationWarning) - return dict.__getitem__(self, 'published') - return dict.__getitem__(self, 'updated') - elif key == 'updated_parsed': - if not dict.__contains__(self, 'updated_parsed') and \ - dict.__contains__(self, 'published_parsed'): - warnings.warn("To avoid breaking existing software while " - "fixing issue 310, a temporary mapping has been created " - "from `updated_parsed` to `published_parsed` if " - "`updated_parsed` doesn't exist. This fallback will be " - "removed in a future version of feedparser.", - DeprecationWarning) - return dict.__getitem__(self, 'published_parsed') - return dict.__getitem__(self, 'updated_parsed') - else: - realkey = self.keymap.get(key, key) - if isinstance(realkey, list): - for k in realkey: - if dict.__contains__(self, k): - return dict.__getitem__(self, k) - elif dict.__contains__(self, realkey): - return dict.__getitem__(self, realkey) - return dict.__getitem__(self, key) - - def __contains__(self, key): - if key in ('updated', 'updated_parsed'): - # Temporarily help developers out by keeping the old - # broken behavior that was reported in issue 310. - # This fix was proposed in issue 328. - return dict.__contains__(self, key) - try: - self.__getitem__(key) - except KeyError: - return False - else: - return True - - has_key = __contains__ - - def get(self, key, default=None): - ''' - :return: A :class:`FeedParserDict`. - ''' - try: - return self.__getitem__(key) - except KeyError: - return default - - def __setitem__(self, key, value): - key = self.keymap.get(key, key) - if isinstance(key, list): - key = key[0] - return dict.__setitem__(self, key, value) - - def setdefault(self, key, value): - if key not in self: - self[key] = value - return value - return self[key] - - def __getattr__(self, key): - # __getattribute__() is called first; this will be called - # only if an attribute was not already found - try: - return self.__getitem__(key) - except KeyError: - raise AttributeError("object has no attribute '%s'" % key) - - def __hash__(self): - return id(self) - -_cp1252 = { - 128: chr(8364), # euro sign - 130: chr(8218), # single low-9 quotation mark - 131: chr( 402), # latin small letter f with hook - 132: chr(8222), # double low-9 quotation mark - 133: chr(8230), # horizontal ellipsis - 134: chr(8224), # dagger - 135: chr(8225), # double dagger - 136: chr( 710), # modifier letter circumflex accent - 137: chr(8240), # per mille sign - 138: chr( 352), # latin capital letter s with caron - 139: chr(8249), # single left-pointing angle quotation mark - 140: chr( 338), # latin capital ligature oe - 142: chr( 381), # latin capital letter z with caron - 145: chr(8216), # left single quotation mark - 146: chr(8217), # right single quotation mark - 147: chr(8220), # left double quotation mark - 148: chr(8221), # right double quotation mark - 149: chr(8226), # bullet - 150: chr(8211), # en dash - 151: chr(8212), # em dash - 152: chr( 732), # small tilde - 153: chr(8482), # trade mark sign - 154: chr( 353), # latin small letter s with caron - 155: chr(8250), # single right-pointing angle quotation mark - 156: chr( 339), # latin small ligature oe - 158: chr( 382), # latin small letter z with caron - 159: chr( 376), # latin capital letter y with diaeresis -} - -_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') -def _urljoin(base, uri): - uri = _urifixer.sub(r'\1\3', uri) - if not isinstance(uri, str): - uri = uri.decode('utf-8', 'ignore') - try: - uri = urllib.parse.urljoin(base, uri) - except ValueError: - uri = '' - if not isinstance(uri, str): - return uri.decode('utf-8', 'ignore') - return uri - -class _FeedParserMixin: - namespaces = { - '': '', - 'http://backend.userland.com/rss': '', - 'http://blogs.law.harvard.edu/tech/rss': '', - 'http://purl.org/rss/1.0/': '', - 'http://my.netscape.com/rdf/simple/0.9/': '', - 'http://example.com/newformat#': '', - 'http://example.com/necho': '', - 'http://purl.org/echo/': '', - 'uri/of/echo/namespace#': '', - 'http://purl.org/pie/': '', - 'http://purl.org/atom/ns#': '', - 'http://www.w3.org/2005/Atom': '', - 'http://purl.org/rss/1.0/modules/rss091#': '', - - 'http://webns.net/mvcb/': 'admin', - 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', - 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', - 'http://media.tangent.org/rss/1.0/': 'audio', - 'http://backend.userland.com/blogChannelModule': 'blogChannel', - 'http://web.resource.org/cc/': 'cc', - 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', - 'http://purl.org/rss/1.0/modules/company': 'co', - 'http://purl.org/rss/1.0/modules/content/': 'content', - 'http://my.theinfo.org/changed/1.0/rss/': 'cp', - 'http://purl.org/dc/elements/1.1/': 'dc', - 'http://purl.org/dc/terms/': 'dcterms', - 'http://purl.org/rss/1.0/modules/email/': 'email', - 'http://purl.org/rss/1.0/modules/event/': 'ev', - 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', - 'http://freshmeat.net/rss/fm/': 'fm', - 'http://xmlns.com/foaf/0.1/': 'foaf', - 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', - 'http://www.georss.org/georss': 'georss', - 'http://www.opengis.net/gml': 'gml', - 'http://postneo.com/icbm/': 'icbm', - 'http://purl.org/rss/1.0/modules/image/': 'image', - 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', - 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', - 'http://purl.org/rss/1.0/modules/link/': 'l', - 'http://search.yahoo.com/mrss': 'media', - # Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace - 'http://search.yahoo.com/mrss/': 'media', - 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', - 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', - 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', - 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', - 'http://purl.org/rss/1.0/modules/reference/': 'ref', - 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', - 'http://purl.org/rss/1.0/modules/search/': 'search', - 'http://purl.org/rss/1.0/modules/slash/': 'slash', - 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', - 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', - 'http://hacks.benhammersley.com/rss/streaming/': 'str', - 'http://purl.org/rss/1.0/modules/subscription/': 'sub', - 'http://purl.org/rss/1.0/modules/syndication/': 'sy', - 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf', - 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', - 'http://purl.org/rss/1.0/modules/threading/': 'thr', - 'http://purl.org/rss/1.0/modules/textinput/': 'ti', - 'http://madskills.com/public/xml/rss/module/trackback/': 'trackback', - 'http://wellformedweb.org/commentAPI/': 'wfw', - 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', - 'http://www.w3.org/1999/xhtml': 'xhtml', - 'http://www.w3.org/1999/xlink': 'xlink', - 'http://www.w3.org/XML/1998/namespace': 'xml', - 'http://podlove.org/simple-chapters': 'psc', - } - _matchnamespaces = {} - - can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']) - can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']) - can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']) - html_types = ['text/html', 'application/xhtml+xml'] - - def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): - if not self._matchnamespaces: - for k, v in list(self.namespaces.items()): - self._matchnamespaces[k.lower()] = v - self.feeddata = FeedParserDict() # feed-level data - self.encoding = encoding # character encoding - self.entries = [] # list of entry-level data - self.version = '' # feed type/version, see SUPPORTED_VERSIONS - self.namespacesInUse = {} # dictionary of namespaces defined by the feed - - # the following are used internally to track state; - # this is really out of control and should be refactored - self.infeed = 0 - self.inentry = 0 - self.incontent = 0 - self.intextinput = 0 - self.inimage = 0 - self.inauthor = 0 - self.incontributor = 0 - self.inpublisher = 0 - self.insource = 0 - - # georss - self.ingeometry = 0 - - self.sourcedata = FeedParserDict() - self.contentparams = FeedParserDict() - self._summaryKey = None - self.namespacemap = {} - self.elementstack = [] - self.basestack = [] - self.langstack = [] - self.baseuri = baseuri or '' - self.lang = baselang or None - self.svgOK = 0 - self.title_depth = -1 - self.depth = 0 - # psc_chapters_flag prevents multiple psc_chapters from being - # captured in a single entry or item. The transition states are - # None -> True -> False. psc_chapter elements will only be - # captured while it is True. - self.psc_chapters_flag = None - if baselang: - self.feeddata['language'] = baselang.replace('_','-') - - # A map of the following form: - # { - # object_that_value_is_set_on: { - # property_name: depth_of_node_property_was_extracted_from, - # other_property: depth_of_node_property_was_extracted_from, - # }, - # } - self.property_depth_map = {} - - def _normalize_attributes(self, kv): - k = kv[0].lower() - v = k in ('rel', 'type') and kv[1].lower() or kv[1] - # the sgml parser doesn't handle entities in attributes, nor - # does it pass the attribute values through as unicode, while - # strict xml parsers do -- account for this difference - if isinstance(self, _LooseFeedParser): - v = v.replace('&', '&') - if not isinstance(v, str): - v = v.decode('utf-8') - return (k, v) - - def unknown_starttag(self, tag, attrs): - # increment depth counter - self.depth += 1 - - # normalize attrs - attrs = list(map(self._normalize_attributes, attrs)) - - # track xml:base and xml:lang - attrsD = dict(attrs) - baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri - if not isinstance(baseuri, str): - baseuri = baseuri.decode(self.encoding, 'ignore') - # ensure that self.baseuri is always an absolute URI that - # uses a whitelisted URI scheme (e.g. not `javscript:`) - if self.baseuri: - self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri - else: - self.baseuri = _urljoin(self.baseuri, baseuri) - lang = attrsD.get('xml:lang', attrsD.get('lang')) - if lang == '': - # xml:lang could be explicitly set to '', we need to capture that - lang = None - elif lang is None: - # if no xml:lang is specified, use parent lang - lang = self.lang - if lang: - if tag in ('feed', 'rss', 'rdf:RDF'): - self.feeddata['language'] = lang.replace('_','-') - self.lang = lang - self.basestack.append(self.baseuri) - self.langstack.append(lang) - - # track namespaces - for prefix, uri in attrs: - if prefix.startswith('xmlns:'): - self.trackNamespace(prefix[6:], uri) - elif prefix == 'xmlns': - self.trackNamespace(None, uri) - - # track inline content - if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'): - if tag in ('xhtml:div', 'div'): - return # typepad does this 10/2007 - # element declared itself as escaped markup, but it isn't really - self.contentparams['type'] = 'application/xhtml+xml' - if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': - if tag.find(':') != -1: - prefix, tag = tag.split(':', 1) - namespace = self.namespacesInUse.get(prefix, '') - if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML': - attrs.append(('xmlns',namespace)) - if tag=='svg' and namespace=='http://www.w3.org/2000/svg': - attrs.append(('xmlns',namespace)) - if tag == 'svg': - self.svgOK += 1 - return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0) - - # match namespaces - if tag.find(':') != -1: - prefix, suffix = tag.split(':', 1) - else: - prefix, suffix = '', tag - prefix = self.namespacemap.get(prefix, prefix) - if prefix: - prefix = prefix + '_' - - # special hack for better tracking of empty textinput/image elements in illformed feeds - if (not prefix) and tag not in ('title', 'link', 'description', 'name'): - self.intextinput = 0 - if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): - self.inimage = 0 - - # call special handler (if defined) or default handler - methodname = '_start_' + prefix + suffix - try: - method = getattr(self, methodname) - return method(attrsD) - except AttributeError: - # Since there's no handler or something has gone wrong we explicitly add the element and its attributes - unknown_tag = prefix + suffix - if len(attrsD) == 0: - # No attributes so merge it into the encosing dictionary - return self.push(unknown_tag, 1) - else: - # Has attributes so create it in its own dictionary - context = self._getContext() - context[unknown_tag] = attrsD - - def unknown_endtag(self, tag): - # match namespaces - if tag.find(':') != -1: - prefix, suffix = tag.split(':', 1) - else: - prefix, suffix = '', tag - prefix = self.namespacemap.get(prefix, prefix) - if prefix: - prefix = prefix + '_' - if suffix == 'svg' and self.svgOK: - self.svgOK -= 1 - - # call special handler (if defined) or default handler - methodname = '_end_' + prefix + suffix - try: - if self.svgOK: - raise AttributeError() - method = getattr(self, methodname) - method() - except AttributeError: - self.pop(prefix + suffix) - - # track inline content - if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'): - # element declared itself as escaped markup, but it isn't really - if tag in ('xhtml:div', 'div'): - return # typepad does this 10/2007 - self.contentparams['type'] = 'application/xhtml+xml' - if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': - tag = tag.split(':')[-1] - self.handle_data('' % tag, escape=0) - - # track xml:base and xml:lang going out of scope - if self.basestack: - self.basestack.pop() - if self.basestack and self.basestack[-1]: - self.baseuri = self.basestack[-1] - if self.langstack: - self.langstack.pop() - if self.langstack: # and (self.langstack[-1] is not None): - self.lang = self.langstack[-1] - - self.depth -= 1 - - def handle_charref(self, ref): - # called for each character reference, e.g. for ' ', ref will be '160' - if not self.elementstack: - return - ref = ref.lower() - if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): - text = '&#%s;' % ref - else: - if ref[0] == 'x': - c = int(ref[1:], 16) - else: - c = int(ref) - text = chr(c).encode('utf-8') - self.elementstack[-1][2].append(text) - - def handle_entityref(self, ref): - # called for each entity reference, e.g. for '©', ref will be 'copy' - if not self.elementstack: - return - if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): - text = '&%s;' % ref - elif ref in self.entities: - text = self.entities[ref] - if text.startswith('&#') and text.endswith(';'): - return self.handle_entityref(text) - else: - try: - name2codepoint[ref] - except KeyError: - text = '&%s;' % ref - else: - text = chr(name2codepoint[ref]).encode('utf-8') - self.elementstack[-1][2].append(text) - - def handle_data(self, text, escape=1): - # called for each block of plain text, i.e. outside of any tag and - # not containing any character or entity references - if not self.elementstack: - return - if escape and self.contentparams.get('type') == 'application/xhtml+xml': - text = _xmlescape(text) - self.elementstack[-1][2].append(text) - - def handle_comment(self, text): - # called for each comment, e.g. - pass - - def handle_pi(self, text): - # called for each processing instruction, e.g. - pass - - def handle_decl(self, text): - pass - - def parse_declaration(self, i): - # override internal declaration handler to handle CDATA blocks - if self.rawdata[i:i+9] == '', i) - if k == -1: - # CDATA block began but didn't finish - k = len(self.rawdata) - return k - self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) - return k+3 - else: - k = self.rawdata.find('>', i) - if k >= 0: - return k+1 - else: - # We have an incomplete CDATA block. - return k - - def mapContentType(self, contentType): - contentType = contentType.lower() - if contentType == 'text' or contentType == 'plain': - contentType = 'text/plain' - elif contentType == 'html': - contentType = 'text/html' - elif contentType == 'xhtml': - contentType = 'application/xhtml+xml' - return contentType - - def trackNamespace(self, prefix, uri): - loweruri = uri.lower() - if not self.version: - if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'): - self.version = 'rss090' - elif loweruri == 'http://purl.org/rss/1.0/': - self.version = 'rss10' - elif loweruri == 'http://www.w3.org/2005/atom': - self.version = 'atom10' - if loweruri.find('backend.userland.com/rss') != -1: - # match any backend.userland.com namespace - uri = 'http://backend.userland.com/rss' - loweruri = uri - if loweruri in self._matchnamespaces: - self.namespacemap[prefix] = self._matchnamespaces[loweruri] - self.namespacesInUse[self._matchnamespaces[loweruri]] = uri - else: - self.namespacesInUse[prefix or ''] = uri - - def resolveURI(self, uri): - return _urljoin(self.baseuri or '', uri) - - def decodeEntities(self, element, data): - return data - - def strattrs(self, attrs): - return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs]) - - def push(self, element, expectingText): - self.elementstack.append([element, expectingText, []]) - - def pop(self, element, stripWhitespace=1): - if not self.elementstack: - return - if self.elementstack[-1][0] != element: - return - - element, expectingText, pieces = self.elementstack.pop() - - if self.version == 'atom10' and self.contentparams.get('type', 'text') == 'application/xhtml+xml': - # remove enclosing child element, but only if it is a
and - # only if all the remaining content is nested underneath it. - # This means that the divs would be retained in the following: - #
foo
bar
- while pieces and len(pieces)>1 and not pieces[-1].strip(): - del pieces[-1] - while pieces and len(pieces)>1 and not pieces[0].strip(): - del pieces[0] - if pieces and (pieces[0] == '
' or pieces[0].startswith('
': - depth = 0 - for piece in pieces[:-1]: - if piece.startswith(''): - depth += 1 - else: - pieces = pieces[1:-1] - - # Ensure each piece is a str for Python 3 - for (i, v) in enumerate(pieces): - if not isinstance(v, str): - pieces[i] = v.decode('utf-8') - - output = ''.join(pieces) - if stripWhitespace: - output = output.strip() - if not expectingText: - return output - - # decode base64 content - if base64 and self.contentparams.get('base64', 0): - try: - output = _base64decode(output) - except binascii.Error: - pass - except binascii.Incomplete: - pass - except TypeError: - # In Python 3, base64 takes and outputs bytes, not str - # This may not be the most correct way to accomplish this - output = _base64decode(output.encode('utf-8')).decode('utf-8') - - # resolve relative URIs - if (element in self.can_be_relative_uri) and output: - # do not resolve guid elements with isPermalink="false" - if not element == 'id' or self.guidislink: - output = self.resolveURI(output) - - # decode entities within embedded markup - if not self.contentparams.get('base64', 0): - output = self.decodeEntities(element, output) - - # some feed formats require consumers to guess - # whether the content is html or plain text - if not self.version.startswith('atom') and self.contentparams.get('type') == 'text/plain': - if self.lookslikehtml(output): - self.contentparams['type'] = 'text/html' - - # remove temporary cruft from contentparams - try: - del self.contentparams['mode'] - except KeyError: - pass - try: - del self.contentparams['base64'] - except KeyError: - pass - - is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types - # resolve relative URIs within embedded markup - if is_htmlish and RESOLVE_RELATIVE_URIS: - if element in self.can_contain_relative_uris: - output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html')) - - # sanitize embedded markup - if is_htmlish and SANITIZE_HTML: - if element in self.can_contain_dangerous_markup: - output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html')) - - if self.encoding and not isinstance(output, str): - output = output.decode(self.encoding, 'ignore') - - # address common error where people take data that is already - # utf-8, presume that it is iso-8859-1, and re-encode it. - if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and isinstance(output, str): - try: - output = output.encode('iso-8859-1').decode('utf-8') - except (UnicodeEncodeError, UnicodeDecodeError): - pass - - # map win-1252 extensions to the proper code points - if isinstance(output, str): - output = output.translate(_cp1252) - - # categories/tags/keywords/whatever are handled in _end_category or _end_tags or _end_itunes_keywords - if element in ('category', 'tags', 'itunes_keywords'): - return output - - if element == 'title' and -1 < self.title_depth <= self.depth: - return output - - # store output in appropriate place(s) - if self.inentry and not self.insource: - if element == 'content': - self.entries[-1].setdefault(element, []) - contentparams = copy.deepcopy(self.contentparams) - contentparams['value'] = output - self.entries[-1][element].append(contentparams) - elif element == 'link': - if not self.inimage: - # query variables in urls in link elements are improperly - # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're - # unhandled character references. fix this special case. - output = output.replace('&', '&') - output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) - self.entries[-1][element] = output - if output: - self.entries[-1]['links'][-1]['href'] = output - else: - if element == 'description': - element = 'summary' - old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element) - if old_value_depth is None or self.depth <= old_value_depth: - self.property_depth_map[self.entries[-1]][element] = self.depth - self.entries[-1][element] = output - if self.incontent: - contentparams = copy.deepcopy(self.contentparams) - contentparams['value'] = output - self.entries[-1][element + '_detail'] = contentparams - elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage): - context = self._getContext() - if element == 'description': - element = 'subtitle' - context[element] = output - if element == 'link': - # fix query variables; see above for the explanation - output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) - context[element] = output - context['links'][-1]['href'] = output - elif self.incontent: - contentparams = copy.deepcopy(self.contentparams) - contentparams['value'] = output - context[element + '_detail'] = contentparams - return output - - def pushContent(self, tag, attrsD, defaultContentType, expectingText): - self.incontent += 1 - if self.lang: - self.lang=self.lang.replace('_','-') - self.contentparams = FeedParserDict({ - 'type': self.mapContentType(attrsD.get('type', defaultContentType)), - 'language': self.lang, - 'base': self.baseuri}) - self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) - self.push(tag, expectingText) - - def popContent(self, tag): - value = self.pop(tag) - self.incontent -= 1 - self.contentparams.clear() - return value - - # a number of elements in a number of RSS variants are nominally plain - # text, but this is routinely ignored. This is an attempt to detect - # the most common cases. As false positives often result in silent - # data loss, this function errs on the conservative side. - @staticmethod - def lookslikehtml(s): - # must have a close tag or an entity reference to qualify - if not (re.search(r'',s) or re.search("&#?\w+;",s)): - return - - # all tags must be in a restricted subset of valid HTML tags - if [t for t in re.findall(r'', '') - author = author.replace('<>', '') - author = author.strip() - if author and (author[0] == '('): - author = author[1:] - if author and (author[-1] == ')'): - author = author[:-1] - author = author.strip() - if author or email: - context.setdefault('%s_detail' % key, detail) - if author: - detail['name'] = author - if email: - detail['email'] = email - - def _start_subtitle(self, attrsD): - self.pushContent('subtitle', attrsD, 'text/plain', 1) - _start_tagline = _start_subtitle - _start_itunes_subtitle = _start_subtitle - - def _end_subtitle(self): - self.popContent('subtitle') - _end_tagline = _end_subtitle - _end_itunes_subtitle = _end_subtitle - - def _start_rights(self, attrsD): - self.pushContent('rights', attrsD, 'text/plain', 1) - _start_dc_rights = _start_rights - _start_copyright = _start_rights - - def _end_rights(self): - self.popContent('rights') - _end_dc_rights = _end_rights - _end_copyright = _end_rights - - def _start_item(self, attrsD): - self.entries.append(FeedParserDict()) - self.push('item', 0) - self.inentry = 1 - self.guidislink = 0 - self.title_depth = -1 - self.psc_chapters_flag = None - id = self._getAttribute(attrsD, 'rdf:about') - if id: - context = self._getContext() - context['id'] = id - self._cdf_common(attrsD) - _start_entry = _start_item - - def _end_item(self): - self.pop('item') - self.inentry = 0 - _end_entry = _end_item - - def _start_dc_language(self, attrsD): - self.push('language', 1) - _start_language = _start_dc_language - - def _end_dc_language(self): - self.lang = self.pop('language') - _end_language = _end_dc_language - - def _start_dc_publisher(self, attrsD): - self.push('publisher', 1) - _start_webmaster = _start_dc_publisher - - def _end_dc_publisher(self): - self.pop('publisher') - self._sync_author_detail('publisher') - _end_webmaster = _end_dc_publisher - - def _start_dcterms_valid(self, attrsD): - self.push('validity', 1) - - def _end_dcterms_valid(self): - for validity_detail in self.pop('validity').split(';'): - if '=' in validity_detail: - key, value = validity_detail.split('=', 1) - if key == 'start': - self._save('validity_start', value, overwrite=True) - self._save('validity_start_parsed', _parse_date(value), overwrite=True) - elif key == 'end': - self._save('validity_end', value, overwrite=True) - self._save('validity_end_parsed', _parse_date(value), overwrite=True) - - def _start_published(self, attrsD): - self.push('published', 1) - _start_dcterms_issued = _start_published - _start_issued = _start_published - _start_pubdate = _start_published - - def _end_published(self): - value = self.pop('published') - self._save('published_parsed', _parse_date(value), overwrite=True) - _end_dcterms_issued = _end_published - _end_issued = _end_published - _end_pubdate = _end_published - - def _start_updated(self, attrsD): - self.push('updated', 1) - _start_modified = _start_updated - _start_dcterms_modified = _start_updated - _start_dc_date = _start_updated - _start_lastbuilddate = _start_updated - - def _end_updated(self): - value = self.pop('updated') - parsed_value = _parse_date(value) - self._save('updated_parsed', parsed_value, overwrite=True) - _end_modified = _end_updated - _end_dcterms_modified = _end_updated - _end_dc_date = _end_updated - _end_lastbuilddate = _end_updated - - def _start_created(self, attrsD): - self.push('created', 1) - _start_dcterms_created = _start_created - - def _end_created(self): - value = self.pop('created') - self._save('created_parsed', _parse_date(value), overwrite=True) - _end_dcterms_created = _end_created - - def _start_expirationdate(self, attrsD): - self.push('expired', 1) - - def _end_expirationdate(self): - self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True) - - # geospatial location, or "where", from georss.org - - def _start_georssgeom(self, attrsD): - self.push('geometry', 0) - context = self._getContext() - context['where'] = FeedParserDict() - - _start_georss_point = _start_georssgeom - _start_georss_line = _start_georssgeom - _start_georss_polygon = _start_georssgeom - _start_georss_box = _start_georssgeom - - def _save_where(self, geometry): - context = self._getContext() - context['where'].update(geometry) - - def _end_georss_point(self): - geometry = _parse_georss_point(self.pop('geometry')) - if geometry: - self._save_where(geometry) - - def _end_georss_line(self): - geometry = _parse_georss_line(self.pop('geometry')) - if geometry: - self._save_where(geometry) - - def _end_georss_polygon(self): - this = self.pop('geometry') - geometry = _parse_georss_polygon(this) - if geometry: - self._save_where(geometry) - - def _end_georss_box(self): - geometry = _parse_georss_box(self.pop('geometry')) - if geometry: - self._save_where(geometry) - - def _start_where(self, attrsD): - self.push('where', 0) - context = self._getContext() - context['where'] = FeedParserDict() - _start_georss_where = _start_where - - def _parse_srs_attrs(self, attrsD): - srsName = attrsD.get('srsname') - try: - srsDimension = int(attrsD.get('srsdimension', '2')) - except ValueError: - srsDimension = 2 - context = self._getContext() - context['where']['srsName'] = srsName - context['where']['srsDimension'] = srsDimension - - def _start_gml_point(self, attrsD): - self._parse_srs_attrs(attrsD) - self.ingeometry = 1 - self.push('geometry', 0) - - def _start_gml_linestring(self, attrsD): - self._parse_srs_attrs(attrsD) - self.ingeometry = 'linestring' - self.push('geometry', 0) - - def _start_gml_polygon(self, attrsD): - self._parse_srs_attrs(attrsD) - self.push('geometry', 0) - - def _start_gml_exterior(self, attrsD): - self.push('geometry', 0) - - def _start_gml_linearring(self, attrsD): - self.ingeometry = 'polygon' - self.push('geometry', 0) - - def _start_gml_pos(self, attrsD): - self.push('pos', 0) - - def _end_gml_pos(self): - this = self.pop('pos') - context = self._getContext() - srsName = context['where'].get('srsName') - srsDimension = context['where'].get('srsDimension', 2) - swap = True - if srsName and "EPSG" in srsName: - epsg = int(srsName.split(":")[-1]) - swap = bool(epsg in _geogCS) - geometry = _parse_georss_point(this, swap=swap, dims=srsDimension) - if geometry: - self._save_where(geometry) - - def _start_gml_poslist(self, attrsD): - self.push('pos', 0) - - def _end_gml_poslist(self): - this = self.pop('pos') - context = self._getContext() - srsName = context['where'].get('srsName') - srsDimension = context['where'].get('srsDimension', 2) - swap = True - if srsName and "EPSG" in srsName: - epsg = int(srsName.split(":")[-1]) - swap = bool(epsg in _geogCS) - geometry = _parse_poslist( - this, self.ingeometry, swap=swap, dims=srsDimension) - if geometry: - self._save_where(geometry) - - def _end_geom(self): - self.ingeometry = 0 - self.pop('geometry') - _end_gml_point = _end_geom - _end_gml_linestring = _end_geom - _end_gml_linearring = _end_geom - _end_gml_exterior = _end_geom - _end_gml_polygon = _end_geom - - def _end_where(self): - self.pop('where') - _end_georss_where = _end_where - - # end geospatial - - def _start_cc_license(self, attrsD): - context = self._getContext() - value = self._getAttribute(attrsD, 'rdf:resource') - attrsD = FeedParserDict() - attrsD['rel'] = 'license' - if value: - attrsD['href']=value - context.setdefault('links', []).append(attrsD) - - def _start_creativecommons_license(self, attrsD): - self.push('license', 1) - _start_creativeCommons_license = _start_creativecommons_license - - def _end_creativecommons_license(self): - value = self.pop('license') - context = self._getContext() - attrsD = FeedParserDict() - attrsD['rel'] = 'license' - if value: - attrsD['href'] = value - context.setdefault('links', []).append(attrsD) - del context['license'] - _end_creativeCommons_license = _end_creativecommons_license - - def _addTag(self, term, scheme, label): - context = self._getContext() - tags = context.setdefault('tags', []) - if (not term) and (not scheme) and (not label): - return - value = FeedParserDict(term=term, scheme=scheme, label=label) - if value not in tags: - tags.append(value) - - def _start_tags(self, attrsD): - # This is a completely-made up element. Its semantics are determined - # only by a single feed that precipitated bug report 392 on Google Code. - # In short, this is junk code. - self.push('tags', 1) - - def _end_tags(self): - for term in self.pop('tags').split(','): - self._addTag(term.strip(), None, None) - - def _start_category(self, attrsD): - term = attrsD.get('term') - scheme = attrsD.get('scheme', attrsD.get('domain')) - label = attrsD.get('label') - self._addTag(term, scheme, label) - self.push('category', 1) - _start_dc_subject = _start_category - _start_keywords = _start_category - - def _start_media_category(self, attrsD): - attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema') - self._start_category(attrsD) - - def _end_itunes_keywords(self): - for term in self.pop('itunes_keywords').split(','): - if term.strip(): - self._addTag(term.strip(), 'http://www.itunes.com/', None) - - def _end_media_keywords(self): - for term in self.pop('media_keywords').split(','): - if term.strip(): - self._addTag(term.strip(), None, None) - - def _start_itunes_category(self, attrsD): - self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) - self.push('category', 1) - - def _end_category(self): - value = self.pop('category') - if not value: - return - context = self._getContext() - tags = context['tags'] - if value and len(tags) and not tags[-1]['term']: - tags[-1]['term'] = value - else: - self._addTag(value, None, None) - _end_dc_subject = _end_category - _end_keywords = _end_category - _end_itunes_category = _end_category - _end_media_category = _end_category - - def _start_cloud(self, attrsD): - self._getContext()['cloud'] = FeedParserDict(attrsD) - - def _start_link(self, attrsD): - attrsD.setdefault('rel', 'alternate') - if attrsD['rel'] == 'self': - attrsD.setdefault('type', 'application/atom+xml') - else: - attrsD.setdefault('type', 'text/html') - context = self._getContext() - attrsD = self._itsAnHrefDamnIt(attrsD) - if 'href' in attrsD: - attrsD['href'] = self.resolveURI(attrsD['href']) - expectingText = self.infeed or self.inentry or self.insource - context.setdefault('links', []) - if not (self.inentry and self.inimage): - context['links'].append(FeedParserDict(attrsD)) - if 'href' in attrsD: - expectingText = 0 - if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): - context['link'] = attrsD['href'] - else: - self.push('link', expectingText) - - def _end_link(self): - value = self.pop('link') - - def _start_guid(self, attrsD): - self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') - self.push('id', 1) - _start_id = _start_guid - - def _end_guid(self): - value = self.pop('id') - self._save('guidislink', self.guidislink and 'link' not in self._getContext()) - if self.guidislink: - # guid acts as link, but only if 'ispermalink' is not present or is 'true', - # and only if the item doesn't already have a link element - self._save('link', value) - _end_id = _end_guid - - def _start_title(self, attrsD): - if self.svgOK: - return self.unknown_starttag('title', list(attrsD.items())) - self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) - _start_dc_title = _start_title - _start_media_title = _start_title - - def _end_title(self): - if self.svgOK: - return - value = self.popContent('title') - if not value: - return - self.title_depth = self.depth - _end_dc_title = _end_title - - def _end_media_title(self): - title_depth = self.title_depth - self._end_title() - self.title_depth = title_depth - - def _start_description(self, attrsD): - context = self._getContext() - if 'summary' in context: - self._summaryKey = 'content' - self._start_content(attrsD) - else: - self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) - _start_dc_description = _start_description - _start_media_description = _start_description - - def _start_abstract(self, attrsD): - self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) - - def _end_description(self): - if self._summaryKey == 'content': - self._end_content() - else: - value = self.popContent('description') - self._summaryKey = None - _end_abstract = _end_description - _end_dc_description = _end_description - _end_media_description = _end_description - - def _start_info(self, attrsD): - self.pushContent('info', attrsD, 'text/plain', 1) - _start_feedburner_browserfriendly = _start_info - - def _end_info(self): - self.popContent('info') - _end_feedburner_browserfriendly = _end_info - - def _start_generator(self, attrsD): - if attrsD: - attrsD = self._itsAnHrefDamnIt(attrsD) - if 'href' in attrsD: - attrsD['href'] = self.resolveURI(attrsD['href']) - self._getContext()['generator_detail'] = FeedParserDict(attrsD) - self.push('generator', 1) - - def _end_generator(self): - value = self.pop('generator') - context = self._getContext() - if 'generator_detail' in context: - context['generator_detail']['name'] = value - - def _start_admin_generatoragent(self, attrsD): - self.push('generator', 1) - value = self._getAttribute(attrsD, 'rdf:resource') - if value: - self.elementstack[-1][2].append(value) - self.pop('generator') - self._getContext()['generator_detail'] = FeedParserDict({'href': value}) - - def _start_admin_errorreportsto(self, attrsD): - self.push('errorreportsto', 1) - value = self._getAttribute(attrsD, 'rdf:resource') - if value: - self.elementstack[-1][2].append(value) - self.pop('errorreportsto') - - def _start_summary(self, attrsD): - context = self._getContext() - if 'summary' in context: - self._summaryKey = 'content' - self._start_content(attrsD) - else: - self._summaryKey = 'summary' - self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) - _start_itunes_summary = _start_summary - - def _end_summary(self): - if self._summaryKey == 'content': - self._end_content() - else: - self.popContent(self._summaryKey or 'summary') - self._summaryKey = None - _end_itunes_summary = _end_summary - - def _start_enclosure(self, attrsD): - attrsD = self._itsAnHrefDamnIt(attrsD) - context = self._getContext() - attrsD['rel'] = 'enclosure' - context.setdefault('links', []).append(FeedParserDict(attrsD)) - - def _start_source(self, attrsD): - if 'url' in attrsD: - # This means that we're processing a source element from an RSS 2.0 feed - self.sourcedata['href'] = attrsD['url'] - self.push('source', 1) - self.insource = 1 - self.title_depth = -1 - - def _end_source(self): - self.insource = 0 - value = self.pop('source') - if value: - self.sourcedata['title'] = value - self._getContext()['source'] = copy.deepcopy(self.sourcedata) - self.sourcedata.clear() - - def _start_content(self, attrsD): - self.pushContent('content', attrsD, 'text/plain', 1) - src = attrsD.get('src') - if src: - self.contentparams['src'] = src - self.push('content', 1) - - def _start_body(self, attrsD): - self.pushContent('content', attrsD, 'application/xhtml+xml', 1) - _start_xhtml_body = _start_body - - def _start_content_encoded(self, attrsD): - self.pushContent('content', attrsD, 'text/html', 1) - _start_fullitem = _start_content_encoded - - def _end_content(self): - copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) - value = self.popContent('content') - if copyToSummary: - self._save('summary', value) - - _end_body = _end_content - _end_xhtml_body = _end_content - _end_content_encoded = _end_content - _end_fullitem = _end_content - - def _start_itunes_image(self, attrsD): - self.push('itunes_image', 0) - if attrsD.get('href'): - self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) - elif attrsD.get('url'): - self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')}) - _start_itunes_link = _start_itunes_image - - def _end_itunes_block(self): - value = self.pop('itunes_block', 0) - self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 - - def _end_itunes_explicit(self): - value = self.pop('itunes_explicit', 0) - # Convert 'yes' -> True, 'clean' to False, and any other value to None - # False and None both evaluate as False, so the difference can be ignored - # by applications that only need to know if the content is explicit. - self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0] - - def _start_media_group(self, attrsD): - # don't do anything, but don't break the enclosed tags either - pass - - def _start_media_rating(self, attrsD): - context = self._getContext() - context.setdefault('media_rating', attrsD) - self.push('rating', 1) - - def _end_media_rating(self): - rating = self.pop('rating') - if rating is not None and rating.strip(): - context = self._getContext() - context['media_rating']['content'] = rating - - def _start_media_credit(self, attrsD): - context = self._getContext() - context.setdefault('media_credit', []) - context['media_credit'].append(attrsD) - self.push('credit', 1) - - def _end_media_credit(self): - credit = self.pop('credit') - if credit != None and len(credit.strip()) != 0: - context = self._getContext() - context['media_credit'][-1]['content'] = credit - - def _start_media_restriction(self, attrsD): - context = self._getContext() - context.setdefault('media_restriction', attrsD) - self.push('restriction', 1) - - def _end_media_restriction(self): - restriction = self.pop('restriction') - if restriction != None and len(restriction.strip()) != 0: - context = self._getContext() - context['media_restriction']['content'] = [cc.strip().lower() for cc in restriction.split(' ')] - - def _start_media_license(self, attrsD): - context = self._getContext() - context.setdefault('media_license', attrsD) - self.push('license', 1) - - def _end_media_license(self): - license = self.pop('license') - if license != None and len(license.strip()) != 0: - context = self._getContext() - context['media_license']['content'] = license - - def _start_media_content(self, attrsD): - context = self._getContext() - context.setdefault('media_content', []) - context['media_content'].append(attrsD) - - def _start_media_thumbnail(self, attrsD): - context = self._getContext() - context.setdefault('media_thumbnail', []) - self.push('url', 1) # new - context['media_thumbnail'].append(attrsD) - - def _end_media_thumbnail(self): - url = self.pop('url') - context = self._getContext() - if url != None and len(url.strip()) != 0: - if 'url' not in context['media_thumbnail'][-1]: - context['media_thumbnail'][-1]['url'] = url - - def _start_media_player(self, attrsD): - self.push('media_player', 0) - self._getContext()['media_player'] = FeedParserDict(attrsD) - - def _end_media_player(self): - value = self.pop('media_player') - context = self._getContext() - context['media_player']['content'] = value - - def _start_newlocation(self, attrsD): - self.push('newlocation', 1) - - def _end_newlocation(self): - url = self.pop('newlocation') - context = self._getContext() - # don't set newlocation if the context isn't right - if context is not self.feeddata: - return - context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip()) - - def _start_psc_chapters(self, attrsD): - if self.psc_chapters_flag is None: - # Transition from None -> True - self.psc_chapters_flag = True - attrsD['chapters'] = [] - self._getContext()['psc_chapters'] = FeedParserDict(attrsD) - - def _end_psc_chapters(self): - # Transition from True -> False - self.psc_chapters_flag = False - - def _start_psc_chapter(self, attrsD): - if self.psc_chapters_flag: - start = self._getAttribute(attrsD, 'start') - attrsD['start_parsed'] = _parse_psc_chapter_start(start) - - context = self._getContext()['psc_chapters'] - context['chapters'].append(FeedParserDict(attrsD)) - - -if _XML_AVAILABLE: - class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): - def __init__(self, baseuri, baselang, encoding): - xml.sax.handler.ContentHandler.__init__(self) - _FeedParserMixin.__init__(self, baseuri, baselang, encoding) - self.bozo = 0 - self.exc = None - self.decls = {} - - def startPrefixMapping(self, prefix, uri): - if not uri: - return - # Jython uses '' instead of None; standardize on None - prefix = prefix or None - self.trackNamespace(prefix, uri) - if prefix and uri == 'http://www.w3.org/1999/xlink': - self.decls['xmlns:' + prefix] = uri - - def startElementNS(self, name, qname, attrs): - namespace, localname = name - lowernamespace = str(namespace or '').lower() - if lowernamespace.find('backend.userland.com/rss') != -1: - # match any backend.userland.com namespace - namespace = 'http://backend.userland.com/rss' - lowernamespace = namespace - if qname and qname.find(':') > 0: - givenprefix = qname.split(':')[0] - else: - givenprefix = None - prefix = self._matchnamespaces.get(lowernamespace, givenprefix) - if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse: - raise UndeclaredNamespace("'%s' is not associated with a namespace" % givenprefix) - localname = str(localname).lower() - - # qname implementation is horribly broken in Python 2.1 (it - # doesn't report any), and slightly broken in Python 2.2 (it - # doesn't report the xml: namespace). So we match up namespaces - # with a known list first, and then possibly override them with - # the qnames the SAX parser gives us (if indeed it gives us any - # at all). Thanks to MatejC for helping me test this and - # tirelessly telling me that it didn't work yet. - attrsD, self.decls = self.decls, {} - if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML': - attrsD['xmlns']=namespace - if localname=='svg' and namespace=='http://www.w3.org/2000/svg': - attrsD['xmlns']=namespace - - if prefix: - localname = prefix.lower() + ':' + localname - elif namespace and not qname: #Expat - for name,value in list(self.namespacesInUse.items()): - if name and value == namespace: - localname = name + ':' + localname - break - - for (namespace, attrlocalname), attrvalue in list(attrs.items()): - lowernamespace = (namespace or '').lower() - prefix = self._matchnamespaces.get(lowernamespace, '') - if prefix: - attrlocalname = prefix + ':' + attrlocalname - attrsD[str(attrlocalname).lower()] = attrvalue - for qname in attrs.getQNames(): - attrsD[str(qname).lower()] = attrs.getValueByQName(qname) - localname = str(localname).lower() - self.unknown_starttag(localname, list(attrsD.items())) - - def characters(self, text): - self.handle_data(text) - - def endElementNS(self, name, qname): - namespace, localname = name - lowernamespace = str(namespace or '').lower() - if qname and qname.find(':') > 0: - givenprefix = qname.split(':')[0] - else: - givenprefix = '' - prefix = self._matchnamespaces.get(lowernamespace, givenprefix) - if prefix: - localname = prefix + ':' + localname - elif namespace and not qname: #Expat - for name,value in list(self.namespacesInUse.items()): - if name and value == namespace: - localname = name + ':' + localname - break - localname = str(localname).lower() - self.unknown_endtag(localname) - - def error(self, exc): - self.bozo = 1 - self.exc = exc - - # drv_libxml2 calls warning() in some cases - warning = error - - def fatalError(self, exc): - self.error(exc) - raise exc - -class _BaseHTMLProcessor(sgmllib.SGMLParser): - special = re.compile('''[<>'"]''') - bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)") - elements_no_end_tag = set([ - 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', - 'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param', - 'source', 'track', 'wbr' - ]) - - def __init__(self, encoding, _type): - self.encoding = encoding - self._type = _type - sgmllib.SGMLParser.__init__(self) - - def reset(self): - self.pieces = [] - sgmllib.SGMLParser.reset(self) - - def _shorttag_replace(self, match): - tag = match.group(1) - if tag in self.elements_no_end_tag: - return '<' + tag + ' />' - else: - return '<' + tag + '>' - - # By declaring these methods and overriding their compiled code - # with the code from sgmllib, the original code will execute in - # feedparser's scope instead of sgmllib's. This means that the - # `tagfind` and `charref` regular expressions will be found as - # they're declared above, not as they're declared in sgmllib. - def goahead(self, i): - pass - goahead.__code__ = sgmllib.SGMLParser.goahead.__code__ - - def __parse_starttag(self, i): - pass - __parse_starttag.__code__ = sgmllib.SGMLParser.parse_starttag.__code__ - - def parse_starttag(self,i): - j = self.__parse_starttag(i) - if self._type == 'application/xhtml+xml': - if j>2 and self.rawdata[j-2:j]=='/>': - self.unknown_endtag(self.lasttag) - return j - - def feed(self, data): - data = re.compile(r'\s]+?)\s*/>', self._shorttag_replace, data) - data = data.replace(''', "'") - data = data.replace('"', '"') - try: - bytes - if bytes is str: - raise NameError - self.encoding = self.encoding + '_INVALID_PYTHON_3' - except NameError: - if self.encoding and isinstance(data, str): - data = data.encode(self.encoding) - sgmllib.SGMLParser.feed(self, data) - sgmllib.SGMLParser.close(self) - - def normalize_attrs(self, attrs): - if not attrs: - return attrs - # utility method to be called by descendants - attrs = list(dict([(k.lower(), v) for k, v in attrs]).items()) - attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] - attrs.sort() - return attrs - - def unknown_starttag(self, tag, attrs): - # called for each start tag - # attrs is a list of (attr, value) tuples - # e.g. for
, tag='pre', attrs=[('class', 'screen')]
-        uattrs = []
-        strattrs=''
-        if attrs:
-            for key, value in attrs:
-                value=value.replace('>','>').replace('<','<').replace('"','"')
-                value = self.bare_ampersand.sub("&", value)
-                # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
-                if not isinstance(value, str):
-                    value = value.decode(self.encoding, 'ignore')
-                try:
-                    # Currently, in Python 3 the key is already a str, and cannot be decoded again
-                    uattrs.append((str(key, self.encoding), value))
-                except TypeError:
-                    uattrs.append((key, value))
-            strattrs = ''.join([' %s="%s"' % (key, value) for key, value in uattrs])
-            if self.encoding:
-                try:
-                    strattrs = strattrs.encode(self.encoding)
-                except (UnicodeEncodeError, LookupError):
-                    pass
-        if tag in self.elements_no_end_tag:
-            self.pieces.append('<%s%s />' % (tag, strattrs))
-        else:
-            self.pieces.append('<%s%s>' % (tag, strattrs))
-
-    def unknown_endtag(self, tag):
-        # called for each end tag, e.g. for 
, tag will be 'pre' - # Reconstruct the original end tag. - if tag not in self.elements_no_end_tag: - self.pieces.append("" % tag) - - def handle_charref(self, ref): - # called for each character reference, e.g. for ' ', ref will be '160' - # Reconstruct the original character reference. - ref = ref.lower() - if ref.startswith('x'): - value = int(ref[1:], 16) - else: - value = int(ref) - - if value in _cp1252: - self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:]) - else: - self.pieces.append('&#%s;' % ref) - - def handle_entityref(self, ref): - # called for each entity reference, e.g. for '©', ref will be 'copy' - # Reconstruct the original entity reference. - if ref in name2codepoint or ref == 'apos': - self.pieces.append('&%s;' % ref) - else: - self.pieces.append('&%s' % ref) - - def handle_data(self, text): - # called for each block of plain text, i.e. outside of any tag and - # not containing any character or entity references - # Store the original text verbatim. - self.pieces.append(text) - - def handle_comment(self, text): - # called for each HTML comment, e.g. - # Reconstruct the original comment. - self.pieces.append('' % text) - - def handle_pi(self, text): - # called for each processing instruction, e.g. - # Reconstruct original processing instruction. - self.pieces.append('' % text) - - def handle_decl(self, text): - # called for the DOCTYPE, if present, e.g. - # - # Reconstruct original DOCTYPE - self.pieces.append('' % text) - - _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match - def _scan_name(self, i, declstartpos): - rawdata = self.rawdata - n = len(rawdata) - if i == n: - return None, -1 - m = self._new_declname_match(rawdata, i) - if m: - s = m.group() - name = s.strip() - if (i + len(s)) == n: - return None, -1 # end of buffer - return name.lower(), m.end() - else: - self.handle_data(rawdata) -# self.updatepos(declstartpos, i) - return None, -1 - - def convert_charref(self, name): - return '&#%s;' % name - - def convert_entityref(self, name): - return '&%s;' % name - - def output(self): - '''Return processed HTML as a single string''' - return ''.join([str(p) for p in self.pieces]) - - def parse_declaration(self, i): - try: - return sgmllib.SGMLParser.parse_declaration(self, i) - except sgmllib.SGMLParseError: - # escape the doctype declaration and continue parsing - self.handle_data('<') - return i+1 - -class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): - def __init__(self, baseuri, baselang, encoding, entities): - sgmllib.SGMLParser.__init__(self) - _FeedParserMixin.__init__(self, baseuri, baselang, encoding) - _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml') - self.entities=entities - - def decodeEntities(self, element, data): - data = data.replace('<', '<') - data = data.replace('<', '<') - data = data.replace('<', '<') - data = data.replace('>', '>') - data = data.replace('>', '>') - data = data.replace('>', '>') - data = data.replace('&', '&') - data = data.replace('&', '&') - data = data.replace('"', '"') - data = data.replace('"', '"') - data = data.replace(''', ''') - data = data.replace(''', ''') - if not self.contentparams.get('type', 'xml').endswith('xml'): - data = data.replace('<', '<') - data = data.replace('>', '>') - data = data.replace('&', '&') - data = data.replace('"', '"') - data = data.replace(''', "'") - data = data.replace('/', '/') - data = data.replace('/', '/') - return data - - def strattrs(self, attrs): - return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs]) - -class _RelativeURIResolver(_BaseHTMLProcessor): - relative_uris = set([('a', 'href'), - ('applet', 'codebase'), - ('area', 'href'), - ('audio', 'src'), - ('blockquote', 'cite'), - ('body', 'background'), - ('del', 'cite'), - ('form', 'action'), - ('frame', 'longdesc'), - ('frame', 'src'), - ('iframe', 'longdesc'), - ('iframe', 'src'), - ('head', 'profile'), - ('img', 'longdesc'), - ('img', 'src'), - ('img', 'usemap'), - ('input', 'src'), - ('input', 'usemap'), - ('ins', 'cite'), - ('link', 'href'), - ('object', 'classid'), - ('object', 'codebase'), - ('object', 'data'), - ('object', 'usemap'), - ('q', 'cite'), - ('script', 'src'), - ('source', 'src'), - ('video', 'poster'), - ('video', 'src')]) - - def __init__(self, baseuri, encoding, _type): - _BaseHTMLProcessor.__init__(self, encoding, _type) - self.baseuri = baseuri - - def resolveURI(self, uri): - return _makeSafeAbsoluteURI(self.baseuri, uri.strip()) - - def unknown_starttag(self, tag, attrs): - attrs = self.normalize_attrs(attrs) - attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] - _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) - -def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type): - if not _SGML_AVAILABLE: - return htmlSource - - p = _RelativeURIResolver(baseURI, encoding, _type) - p.feed(htmlSource) - return p.output() - -def _makeSafeAbsoluteURI(base, rel=None): - # bail if ACCEPTABLE_URI_SCHEMES is empty - if not ACCEPTABLE_URI_SCHEMES: - return _urljoin(base, rel or '') - if not base: - return rel or '' - if not rel: - try: - scheme = urllib.parse.urlparse(base)[0] - except ValueError: - return '' - if not scheme or scheme in ACCEPTABLE_URI_SCHEMES: - return base - return '' - uri = _urljoin(base, rel) - if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES: - return '' - return uri - -class _HTMLSanitizer(_BaseHTMLProcessor): - acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area', - 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', - 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', - 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', - 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', - 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1', - 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', - 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', - 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', - 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', - 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', - 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', - 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']) - - acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey', - 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis', - 'background', 'balance', 'bgcolor', 'bgproperties', 'border', - 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', - 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', - 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols', - 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data', - 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay', - 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for', - 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus', - 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode', - 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc', - 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max', - 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref', - 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size', - 'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel', - 'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', - 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', - 'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target', - 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap', - 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml', - 'width', 'wrap', 'xml:lang']) - - unacceptable_elements_with_end_tag = set(['script', 'applet', 'style']) - - acceptable_css_properties = set(['azimuth', 'background-color', - 'border-bottom-color', 'border-collapse', 'border-color', - 'border-left-color', 'border-right-color', 'border-top-color', 'clear', - 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', - 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', - 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', - 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', - 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', - 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', - 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', - 'white-space', 'width']) - - # survey of common keywords found in feeds - acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue', - 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', - 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', - 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', - 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', - 'transparent', 'underline', 'white', 'yellow']) - - valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' + - '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$') - - mathml_elements = set([ - 'annotation', - 'annotation-xml', - 'maction', - 'maligngroup', - 'malignmark', - 'math', - 'menclose', - 'merror', - 'mfenced', - 'mfrac', - 'mglyph', - 'mi', - 'mlabeledtr', - 'mlongdiv', - 'mmultiscripts', - 'mn', - 'mo', - 'mover', - 'mpadded', - 'mphantom', - 'mprescripts', - 'mroot', - 'mrow', - 'ms', - 'mscarries', - 'mscarry', - 'msgroup', - 'msline', - 'mspace', - 'msqrt', - 'msrow', - 'mstack', - 'mstyle', - 'msub', - 'msubsup', - 'msup', - 'mtable', - 'mtd', - 'mtext', - 'mtr', - 'munder', - 'munderover', - 'none', - 'semantics', - ]) - - mathml_attributes = set([ - 'accent', - 'accentunder', - 'actiontype', - 'align', - 'alignmentscope', - 'altimg', - 'altimg-height', - 'altimg-valign', - 'altimg-width', - 'alttext', - 'bevelled', - 'charalign', - 'close', - 'columnalign', - 'columnlines', - 'columnspacing', - 'columnspan', - 'columnwidth', - 'crossout', - 'decimalpoint', - 'denomalign', - 'depth', - 'dir', - 'display', - 'displaystyle', - 'edge', - 'encoding', - 'equalcolumns', - 'equalrows', - 'fence', - 'fontstyle', - 'fontweight', - 'form', - 'frame', - 'framespacing', - 'groupalign', - 'height', - 'href', - 'id', - 'indentalign', - 'indentalignfirst', - 'indentalignlast', - 'indentshift', - 'indentshiftfirst', - 'indentshiftlast', - 'indenttarget', - 'infixlinebreakstyle', - 'largeop', - 'length', - 'linebreak', - 'linebreakmultchar', - 'linebreakstyle', - 'lineleading', - 'linethickness', - 'location', - 'longdivstyle', - 'lquote', - 'lspace', - 'mathbackground', - 'mathcolor', - 'mathsize', - 'mathvariant', - 'maxsize', - 'minlabelspacing', - 'minsize', - 'movablelimits', - 'notation', - 'numalign', - 'open', - 'other', - 'overflow', - 'position', - 'rowalign', - 'rowlines', - 'rowspacing', - 'rowspan', - 'rquote', - 'rspace', - 'scriptlevel', - 'scriptminsize', - 'scriptsizemultiplier', - 'selection', - 'separator', - 'separators', - 'shift', - 'side', - 'src', - 'stackalign', - 'stretchy', - 'subscriptshift', - 'superscriptshift', - 'symmetric', - 'voffset', - 'width', - 'xlink:href', - 'xlink:show', - 'xlink:type', - 'xmlns', - 'xmlns:xlink', - ]) - - # svgtiny - foreignObject + linearGradient + radialGradient + stop - svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion', - 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject', - 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', - 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath', - 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', - 'svg', 'switch', 'text', 'title', 'tspan', 'use']) - - # svgtiny + class + opacity + offset + xmlns + xmlns:xlink - svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic', - 'arabic-form', 'ascent', 'attributeName', 'attributeType', - 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', - 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', - 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity', - 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style', - 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', - 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', - 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines', - 'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid', - 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max', - 'min', 'name', 'offset', 'opacity', 'orient', 'origin', - 'overline-position', 'overline-thickness', 'panose-1', 'path', - 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY', - 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures', - 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', - 'stop-color', 'stop-opacity', 'strikethrough-position', - 'strikethrough-thickness', 'stroke', 'stroke-dasharray', - 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', - 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage', - 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2', - 'underline-position', 'underline-thickness', 'unicode', 'unicode-range', - 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width', - 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', - 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', - 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', - 'y2', 'zoomAndPan']) - - svg_attr_map = None - svg_elem_map = None - - acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule', - 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', - 'stroke-opacity']) - - def reset(self): - _BaseHTMLProcessor.reset(self) - self.unacceptablestack = 0 - self.mathmlOK = 0 - self.svgOK = 0 - - def unknown_starttag(self, tag, attrs): - acceptable_attributes = self.acceptable_attributes - keymap = {} - if not tag in self.acceptable_elements or self.svgOK: - if tag in self.unacceptable_elements_with_end_tag: - self.unacceptablestack += 1 - - # add implicit namespaces to html5 inline svg/mathml - if self._type.endswith('html'): - if not dict(attrs).get('xmlns'): - if tag=='svg': - attrs.append( ('xmlns','http://www.w3.org/2000/svg') ) - if tag=='math': - attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') ) - - # not otherwise acceptable, perhaps it is MathML or SVG? - if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs: - self.mathmlOK += 1 - if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs: - self.svgOK += 1 - - # chose acceptable attributes based on tag class, else bail - if self.mathmlOK and tag in self.mathml_elements: - acceptable_attributes = self.mathml_attributes - elif self.svgOK and tag in self.svg_elements: - # for most vocabularies, lowercasing is a good idea. Many - # svg elements, however, are camel case - if not self.svg_attr_map: - lower=[attr.lower() for attr in self.svg_attributes] - mix=[a for a in self.svg_attributes if a not in lower] - self.svg_attributes = lower - self.svg_attr_map = dict([(a.lower(),a) for a in mix]) - - lower=[attr.lower() for attr in self.svg_elements] - mix=[a for a in self.svg_elements if a not in lower] - self.svg_elements = lower - self.svg_elem_map = dict([(a.lower(),a) for a in mix]) - acceptable_attributes = self.svg_attributes - tag = self.svg_elem_map.get(tag,tag) - keymap = self.svg_attr_map - elif not tag in self.acceptable_elements: - return - - # declare xlink namespace, if needed - if self.mathmlOK or self.svgOK: - if [n_v for n_v in attrs if n_v[0].startswith('xlink:')]: - if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs: - attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink')) - - clean_attrs = [] - for key, value in self.normalize_attrs(attrs): - if key in acceptable_attributes: - key=keymap.get(key,key) - # make sure the uri uses an acceptable uri scheme - if key == 'href': - value = _makeSafeAbsoluteURI(value) - clean_attrs.append((key,value)) - elif key=='style': - clean_value = self.sanitize_style(value) - if clean_value: - clean_attrs.append((key,clean_value)) - _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs) - - def unknown_endtag(self, tag): - if not tag in self.acceptable_elements: - if tag in self.unacceptable_elements_with_end_tag: - self.unacceptablestack -= 1 - if self.mathmlOK and tag in self.mathml_elements: - if tag == 'math' and self.mathmlOK: - self.mathmlOK -= 1 - elif self.svgOK and tag in self.svg_elements: - tag = self.svg_elem_map.get(tag,tag) - if tag == 'svg' and self.svgOK: - self.svgOK -= 1 - else: - return - _BaseHTMLProcessor.unknown_endtag(self, tag) - - def handle_pi(self, text): - pass - - def handle_decl(self, text): - pass - - def handle_data(self, text): - if not self.unacceptablestack: - _BaseHTMLProcessor.handle_data(self, text) - - def sanitize_style(self, style): - # disallow urls - style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style) - - # gauntlet - if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): - return '' - # This replaced a regexp that used re.match and was prone to pathological back-tracking. - if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): - return '' - - clean = [] - for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style): - if not value: - continue - if prop.lower() in self.acceptable_css_properties: - clean.append(prop + ': ' + value + ';') - elif prop.split('-')[0].lower() in ['background','border','margin','padding']: - for keyword in value.split(): - if not keyword in self.acceptable_css_keywords and \ - not self.valid_css_values.match(keyword): - break - else: - clean.append(prop + ': ' + value + ';') - elif self.svgOK and prop.lower() in self.acceptable_svg_properties: - clean.append(prop + ': ' + value + ';') - - return ' '.join(clean) - - def parse_comment(self, i, report=1): - ret = _BaseHTMLProcessor.parse_comment(self, i, report) - if ret >= 0: - return ret - # if ret == -1, this may be a malicious attempt to circumvent - # sanitization, or a page-destroying unclosed comment - match = re.compile(r'--[^>]*>').search(self.rawdata, i+4) - if match: - return match.end() - # unclosed comment; deliberately fail to handle_data() - return len(self.rawdata) - - -def _sanitizeHTML(htmlSource, encoding, _type): - if not _SGML_AVAILABLE: - return htmlSource - p = _HTMLSanitizer(encoding, _type) - htmlSource = htmlSource.replace(' stream - - This function lets you define parsers that take any input source - (URL, pathname to local or network file, or actual data as a string) - and deal with it in a uniform manner. Returned object is guaranteed - to have all the basic stdio read methods (read, readline, readlines). - Just .close() the object when you're done with it. - - If the etag argument is supplied, it will be used as the value of an - If-None-Match request header. - - If the modified argument is supplied, it can be a tuple of 9 integers - (as returned by gmtime() in the standard Python time module) or a date - string in any format supported by feedparser. Regardless, it MUST - be in GMT (Greenwich Mean Time). It will be reformatted into an - RFC 1123-compliant date and used as the value of an If-Modified-Since - request header. - - If the agent argument is supplied, it will be used as the value of a - User-Agent request header. - - If the referrer argument is supplied, it will be used as the value of a - Referer[sic] request header. - - If handlers is supplied, it is a list of handlers used to build a - urllib2 opener. - - if request_headers is supplied it is a dictionary of HTTP request headers - that will override the values generated by FeedParser. - - :return: A :class:`StringIO.StringIO` or :class:`io.BytesIO`. - """ - - if hasattr(url_file_stream_or_string, 'read'): - return url_file_stream_or_string - - if isinstance(url_file_stream_or_string, str) \ - and urllib.parse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'): - # Deal with the feed URI scheme - if url_file_stream_or_string.startswith('feed:http'): - url_file_stream_or_string = url_file_stream_or_string[5:] - elif url_file_stream_or_string.startswith('feed:'): - url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:] - if not agent: - agent = USER_AGENT - # Test for inline user:password credentials for HTTP basic auth - auth = None - if base64 and not url_file_stream_or_string.startswith('ftp:'): - urltype, rest = urllib.parse.splittype(url_file_stream_or_string) - realhost, rest = urllib.parse.splithost(rest) - if realhost: - user_passwd, realhost = urllib.parse.splituser(realhost) - if user_passwd: - url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) - auth = base64.standard_b64encode(user_passwd).strip() - - # iri support - if isinstance(url_file_stream_or_string, str): - url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string) - - # try to open with urllib2 (to use optional headers) - request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers) - opener = urllib.request.build_opener(*tuple(handlers + [_FeedURLHandler()])) - opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent - try: - return opener.open(request) - finally: - opener.close() # JohnD - - # try to open with native open function (if url_file_stream_or_string is a filename) - try: - return open(url_file_stream_or_string, 'rb') - except (IOError, UnicodeEncodeError, TypeError): - # if url_file_stream_or_string is a unicode object that - # cannot be converted to the encoding returned by - # sys.getfilesystemencoding(), a UnicodeEncodeError - # will be thrown - # If url_file_stream_or_string is a string that contains NULL - # (such as an XML document encoded in UTF-32), TypeError will - # be thrown. - pass - - # treat url_file_stream_or_string as string - if isinstance(url_file_stream_or_string, str): - return _StringIO(url_file_stream_or_string.encode('utf-8')) - return _StringIO(url_file_stream_or_string) - -def _convert_to_idn(url): - """Convert a URL to IDN notation""" - # this function should only be called with a unicode string - # strategy: if the host cannot be encoded in ascii, then - # it'll be necessary to encode it in idn form - parts = list(urllib.parse.urlsplit(url)) - try: - parts[1].encode('ascii') - except UnicodeEncodeError: - # the url needs to be converted to idn notation - host = parts[1].rsplit(':', 1) - newhost = [] - port = '' - if len(host) == 2: - port = host.pop() - for h in host[0].split('.'): - newhost.append(h.encode('idna').decode('utf-8')) - parts[1] = '.'.join(newhost) - if port: - parts[1] += ':' + port - return urllib.parse.urlunsplit(parts) - else: - return url - -def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers): - request = urllib.request.Request(url) - request.add_header('User-Agent', agent) - if etag: - request.add_header('If-None-Match', etag) - if isinstance(modified, str): - modified = _parse_date(modified) - elif isinstance(modified, datetime.datetime): - modified = modified.utctimetuple() - if modified: - # format into an RFC 1123-compliant timestamp. We can't use - # time.strftime() since the %a and %b directives can be affected - # by the current locale, but RFC 2616 states that dates must be - # in English. - short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) - if referrer: - request.add_header('Referer', referrer) - if gzip and zlib: - request.add_header('Accept-encoding', 'gzip, deflate') - elif gzip: - request.add_header('Accept-encoding', 'gzip') - elif zlib: - request.add_header('Accept-encoding', 'deflate') - else: - request.add_header('Accept-encoding', '') - if auth: - request.add_header('Authorization', 'Basic %s' % auth) - if ACCEPT_HEADER: - request.add_header('Accept', ACCEPT_HEADER) - # use this for whatever -- cookies, special headers, etc - # [('Cookie','Something'),('x-special-header','Another Value')] - for header_name, header_value in list(request_headers.items()): - request.add_header(header_name, header_value) - request.add_header('A-IM', 'feed') # RFC 3229 support - return request - -def _parse_psc_chapter_start(start): - FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$' - - m = re.compile(FORMAT).match(start) - if m is None: - return None - - _, h, m, s, _, ms = m.groups() - h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0)) - return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000) - -_date_handlers = [] -def registerDateHandler(func): - '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' - _date_handlers.insert(0, func) - -# ISO-8601 date parsing routines written by Fazal Majid. -# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 -# parser is beyond the scope of feedparser and would be a worthwhile addition -# to the Python library. -# A single regular expression cannot parse ISO 8601 date formats into groups -# as the standard is highly irregular (for instance is 030104 2003-01-04 or -# 0301-04-01), so we use templates instead. -# Please note the order in templates is significant because we need a -# greedy match. -_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO', - 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', - '-YY-?MM', '-OOO', '-YY', - '--MM-?DD', '--MM', - '---DD', - 'CC', ''] -_iso8601_re = [ - tmpl.replace( - 'YYYY', r'(?P\d{4})').replace( - 'YY', r'(?P\d\d)').replace( - 'MM', r'(?P[01]\d)').replace( - 'DD', r'(?P[0123]\d)').replace( - 'OOO', r'(?P[0123]\d\d)').replace( - 'CC', r'(?P\d\d$)') - + r'(T?(?P\d{2}):(?P\d{2})' - + r'(:(?P\d{2}))?' - + r'(\.(?P\d+))?' - + r'(?P[+-](?P\d{2})(:(?P\d{2}))?|Z)?)?' - for tmpl in _iso8601_tmpl] -try: - del tmpl -except NameError: - pass -_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] -try: - del regex -except NameError: - pass - -def _parse_date_iso8601(dateString): - '''Parse a variety of ISO-8601-compatible formats like 20040105''' - m = None - for _iso8601_match in _iso8601_matches: - m = _iso8601_match(dateString) - if m: - break - if not m: - return - if m.span() == (0, 0): - return - params = m.groupdict() - ordinal = params.get('ordinal', 0) - if ordinal: - ordinal = int(ordinal) - else: - ordinal = 0 - year = params.get('year', '--') - if not year or year == '--': - year = time.gmtime()[0] - elif len(year) == 2: - # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 - year = 100 * int(time.gmtime()[0] / 100) + int(year) - else: - year = int(year) - month = params.get('month', '-') - if not month or month == '-': - # ordinals are NOT normalized by mktime, we simulate them - # by setting month=1, day=ordinal - if ordinal: - month = 1 - else: - month = time.gmtime()[1] - month = int(month) - day = params.get('day', 0) - if not day: - # see above - if ordinal: - day = ordinal - elif params.get('century', 0) or \ - params.get('year', 0) or params.get('month', 0): - day = 1 - else: - day = time.gmtime()[2] - else: - day = int(day) - # special case of the century - is the first year of the 21st century - # 2000 or 2001 ? The debate goes on... - if 'century' in params: - year = (int(params['century']) - 1) * 100 + 1 - # in ISO 8601 most fields are optional - for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: - if not params.get(field, None): - params[field] = 0 - hour = int(params.get('hour', 0)) - minute = int(params.get('minute', 0)) - second = int(float(params.get('second', 0))) - # weekday is normalized by mktime(), we can ignore it - weekday = 0 - daylight_savings_flag = -1 - tm = [year, month, day, hour, minute, second, weekday, - ordinal, daylight_savings_flag] - # ISO 8601 time zone adjustments - tz = params.get('tz') - if tz and tz != 'Z': - if tz[0] == '-': - tm[3] += int(params.get('tzhour', 0)) - tm[4] += int(params.get('tzmin', 0)) - elif tz[0] == '+': - tm[3] -= int(params.get('tzhour', 0)) - tm[4] -= int(params.get('tzmin', 0)) - else: - return None - # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) - # which is guaranteed to normalize d/m/y/h/m/s. - # Many implementations have bugs, but we'll pretend they don't. - return time.localtime(time.mktime(tuple(tm))) -registerDateHandler(_parse_date_iso8601) - -# 8-bit date handling routines written by ytrewq1. -_korean_year = '\ub144' # b3e2 in euc-kr -_korean_month = '\uc6d4' # bff9 in euc-kr -_korean_day = '\uc77c' # c0cf in euc-kr -_korean_am = '\uc624\uc804' # bfc0 c0fc in euc-kr -_korean_pm = '\uc624\ud6c4' # bfc0 c8c4 in euc-kr - -_korean_onblog_date_re = \ - re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ - (_korean_year, _korean_month, _korean_day)) -_korean_nate_date_re = \ - re.compile('(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ - (_korean_am, _korean_pm)) -def _parse_date_onblog(dateString): - '''Parse a string according to the OnBlog 8-bit date format''' - m = _korean_onblog_date_re.match(dateString) - if not m: - return - w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ - {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ - 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ - 'zonediff': '+09:00'} - return _parse_date_w3dtf(w3dtfdate) -registerDateHandler(_parse_date_onblog) - -def _parse_date_nate(dateString): - '''Parse a string according to the Nate 8-bit date format''' - m = _korean_nate_date_re.match(dateString) - if not m: - return - hour = int(m.group(5)) - ampm = m.group(4) - if (ampm == _korean_pm): - hour += 12 - hour = str(hour) - if len(hour) == 1: - hour = '0' + hour - w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ - {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ - 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ - 'zonediff': '+09:00'} - return _parse_date_w3dtf(w3dtfdate) -registerDateHandler(_parse_date_nate) - -# Unicode strings for Greek date strings -_greek_months = \ - { \ - '\u0399\u03b1\u03bd': 'Jan', # c9e1ed in iso-8859-7 - '\u03a6\u03b5\u03b2': 'Feb', # d6e5e2 in iso-8859-7 - '\u039c\u03ac\u03ce': 'Mar', # ccdcfe in iso-8859-7 - '\u039c\u03b1\u03ce': 'Mar', # cce1fe in iso-8859-7 - '\u0391\u03c0\u03c1': 'Apr', # c1f0f1 in iso-8859-7 - '\u039c\u03ac\u03b9': 'May', # ccdce9 in iso-8859-7 - '\u039c\u03b1\u03ca': 'May', # cce1fa in iso-8859-7 - '\u039c\u03b1\u03b9': 'May', # cce1e9 in iso-8859-7 - '\u0399\u03bf\u03cd\u03bd': 'Jun', # c9effded in iso-8859-7 - '\u0399\u03bf\u03bd': 'Jun', # c9efed in iso-8859-7 - '\u0399\u03bf\u03cd\u03bb': 'Jul', # c9effdeb in iso-8859-7 - '\u0399\u03bf\u03bb': 'Jul', # c9f9eb in iso-8859-7 - '\u0391\u03cd\u03b3': 'Aug', # c1fde3 in iso-8859-7 - '\u0391\u03c5\u03b3': 'Aug', # c1f5e3 in iso-8859-7 - '\u03a3\u03b5\u03c0': 'Sep', # d3e5f0 in iso-8859-7 - '\u039f\u03ba\u03c4': 'Oct', # cfeaf4 in iso-8859-7 - '\u039d\u03bf\u03ad': 'Nov', # cdefdd in iso-8859-7 - '\u039d\u03bf\u03b5': 'Nov', # cdefe5 in iso-8859-7 - '\u0394\u03b5\u03ba': 'Dec', # c4e5ea in iso-8859-7 - } - -_greek_wdays = \ - { \ - '\u039a\u03c5\u03c1': 'Sun', # caf5f1 in iso-8859-7 - '\u0394\u03b5\u03c5': 'Mon', # c4e5f5 in iso-8859-7 - '\u03a4\u03c1\u03b9': 'Tue', # d4f1e9 in iso-8859-7 - '\u03a4\u03b5\u03c4': 'Wed', # d4e5f4 in iso-8859-7 - '\u03a0\u03b5\u03bc': 'Thu', # d0e5ec in iso-8859-7 - '\u03a0\u03b1\u03c1': 'Fri', # d0e1f1 in iso-8859-7 - '\u03a3\u03b1\u03b2': 'Sat', # d3e1e2 in iso-8859-7 - } - -_greek_date_format_re = \ - re.compile('([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') - -def _parse_date_greek(dateString): - '''Parse a string according to a Greek 8-bit date format.''' - m = _greek_date_format_re.match(dateString) - if not m: - return - wday = _greek_wdays[m.group(1)] - month = _greek_months[m.group(3)] - rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ - {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ - 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ - 'zonediff': m.group(8)} - return _parse_date_rfc822(rfc822date) -registerDateHandler(_parse_date_greek) - -# Unicode strings for Hungarian date strings -_hungarian_months = \ - { \ - 'janu\u00e1r': '01', # e1 in iso-8859-2 - 'febru\u00e1ri': '02', # e1 in iso-8859-2 - 'm\u00e1rcius': '03', # e1 in iso-8859-2 - '\u00e1prilis': '04', # e1 in iso-8859-2 - 'm\u00e1ujus': '05', # e1 in iso-8859-2 - 'j\u00fanius': '06', # fa in iso-8859-2 - 'j\u00falius': '07', # fa in iso-8859-2 - 'augusztus': '08', - 'szeptember': '09', - 'okt\u00f3ber': '10', # f3 in iso-8859-2 - 'november': '11', - 'december': '12', - } - -_hungarian_date_format_re = \ - re.compile('(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') - -def _parse_date_hungarian(dateString): - '''Parse a string according to a Hungarian 8-bit date format.''' - m = _hungarian_date_format_re.match(dateString) - if not m or m.group(2) not in _hungarian_months: - return None - month = _hungarian_months[m.group(2)] - day = m.group(3) - if len(day) == 1: - day = '0' + day - hour = m.group(4) - if len(hour) == 1: - hour = '0' + hour - w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ - {'year': m.group(1), 'month': month, 'day': day,\ - 'hour': hour, 'minute': m.group(5),\ - 'zonediff': m.group(6)} - return _parse_date_w3dtf(w3dtfdate) -registerDateHandler(_parse_date_hungarian) - -timezonenames = { - 'ut': 0, 'gmt': 0, 'z': 0, - 'adt': -3, 'ast': -4, 'at': -4, - 'edt': -4, 'est': -5, 'et': -5, - 'cdt': -5, 'cst': -6, 'ct': -6, - 'mdt': -6, 'mst': -7, 'mt': -7, - 'pdt': -7, 'pst': -8, 'pt': -8, - 'a': -1, 'n': 1, - 'm': -12, 'y': 12, -} -# W3 date and time format parser -# http://www.w3.org/TR/NOTE-datetime -# Also supports MSSQL-style datetimes as defined at: -# http://msdn.microsoft.com/en-us/library/ms186724.aspx -# (basically, allow a space as a date/time/timezone separator) -def _parse_date_w3dtf(datestr): - if not datestr.strip(): - return None - parts = datestr.lower().split('t') - if len(parts) == 1: - # This may be a date only, or may be an MSSQL-style date - parts = parts[0].split() - if len(parts) == 1: - # Treat this as a date only - parts.append('00:00:00z') - elif len(parts) > 2: - return None - date = parts[0].split('-', 2) - if not date or len(date[0]) != 4: - return None - # Ensure that `date` has 3 elements. Using '1' sets the default - # month to January and the default day to the 1st of the month. - date.extend(['1'] * (3 - len(date))) - try: - year, month, day = [int(i) for i in date] - except ValueError: - # `date` may have more than 3 elements or may contain - # non-integer strings. - return None - if parts[1].endswith('z'): - parts[1] = parts[1][:-1] - parts.append('z') - # Append the numeric timezone offset, if any, to parts. - # If this is an MSSQL-style date then parts[2] already contains - # the timezone information, so `append()` will not affect it. - # Add 1 to each value so that if `find()` returns -1 it will be - # treated as False. - loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1 - loc = loc - 1 - parts.append(parts[1][loc:]) - parts[1] = parts[1][:loc] - time = parts[1].split(':', 2) - # Ensure that time has 3 elements. Using '0' means that the - # minutes and seconds, if missing, will default to 0. - time.extend(['0'] * (3 - len(time))) - tzhour = 0 - tzmin = 0 - if parts[2][:1] in ('-', '+'): - try: - tzhour = int(parts[2][1:3]) - tzmin = int(parts[2][4:]) - except ValueError: - return None - if parts[2].startswith('-'): - tzhour = tzhour * -1 - tzmin = tzmin * -1 - else: - tzhour = timezonenames.get(parts[2], 0) - try: - hour, minute, second = [int(float(i)) for i in time] - except ValueError: - return None - # Create the datetime object and timezone delta objects - try: - stamp = datetime.datetime(year, month, day, hour, minute, second) - except ValueError: - return None - delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour) - # Return the date and timestamp in a UTC 9-tuple - try: - return (stamp - delta).utctimetuple() - except (OverflowError, ValueError): - # IronPython throws ValueErrors instead of OverflowErrors - return None - -registerDateHandler(_parse_date_w3dtf) - -def _parse_date_rfc822(date): - """Parse RFC 822 dates and times - http://tools.ietf.org/html/rfc822#section-5 - - There are some formatting differences that are accounted for: - 1. Years may be two or four digits. - 2. The month and day can be swapped. - 3. Additional timezone names are supported. - 4. A default time and timezone are assumed if only a date is present. - """ - daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']) - months = { - 'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, - 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12, - } - - parts = date.lower().split() - if len(parts) < 5: - # Assume that the time and timezone are missing - parts.extend(('00:00:00', '0000')) - # Remove the day name - if parts[0][:3] in daynames: - parts = parts[1:] - if len(parts) < 5: - # If there are still fewer than five parts, there's not enough - # information to interpret this - return None - try: - day = int(parts[0]) - except ValueError: - # Check if the day and month are swapped - if months.get(parts[0][:3]): - try: - day = int(parts[1]) - except ValueError: - return None - else: - parts[1] = parts[0] - else: - return None - month = months.get(parts[1][:3]) - if not month: - return None - try: - year = int(parts[2]) - except ValueError: - return None - # Normalize two-digit years: - # Anything in the 90's is interpreted as 1990 and on - # Anything 89 or less is interpreted as 2089 or before - if len(parts[2]) <= 2: - year += (1900, 2000)[year < 90] - timeparts = parts[3].split(':') - timeparts = timeparts + ([0] * (3 - len(timeparts))) - try: - (hour, minute, second) = list(map(int, timeparts)) - except ValueError: - return None - tzhour = 0 - tzmin = 0 - # Strip 'Etc/' from the timezone - if parts[4].startswith('etc/'): - parts[4] = parts[4][4:] - # Normalize timezones that start with 'gmt': - # GMT-05:00 => -0500 - # GMT => GMT - if parts[4].startswith('gmt'): - parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt' - # Handle timezones like '-0500', '+0500', and 'EST' - if parts[4] and parts[4][0] in ('-', '+'): - try: - tzhour = int(parts[4][1:3]) - tzmin = int(parts[4][3:]) - except ValueError: - return None - if parts[4].startswith('-'): - tzhour = tzhour * -1 - tzmin = tzmin * -1 - else: - tzhour = timezonenames.get(parts[4], 0) - # Create the datetime object and timezone delta objects - try: - stamp = datetime.datetime(year, month, day, hour, minute, second) - except ValueError: - return None - delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour) - # Return the date and timestamp in a UTC 9-tuple - try: - return (stamp - delta).utctimetuple() - except (OverflowError, ValueError): - # IronPython throws ValueErrors instead of OverflowErrors - return None -registerDateHandler(_parse_date_rfc822) - -_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', - 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] -def _parse_date_asctime(dt): - """Parse asctime-style dates. - - Converts asctime to RFC822-compatible dates and uses the RFC822 parser - to do the actual parsing. - - Supported formats (format is standardized to the first one listed): - - * {weekday name} {month name} dd hh:mm:ss {+-tz} yyyy - * {weekday name} {month name} dd hh:mm:ss yyyy - """ - - parts = dt.split() - - # Insert a GMT timezone, if needed. - if len(parts) == 5: - parts.insert(4, '+0000') - - # Exit if there are not six parts. - if len(parts) != 6: - return None - - # Reassemble the parts in an RFC822-compatible order and parse them. - return _parse_date_rfc822(' '.join([ - parts[0], parts[2], parts[1], parts[5], parts[3], parts[4], - ])) -registerDateHandler(_parse_date_asctime) - -def _parse_date_perforce(aDateString): - """parse a date in yyyy/mm/dd hh:mm:ss TTT format""" - # Fri, 2006/09/15 08:19:53 EDT - _my_date_pattern = re.compile( \ - r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})') - - m = _my_date_pattern.search(aDateString) - if m is None: - return None - dow, year, month, day, hour, minute, second, tz = m.groups() - months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz) - tm = rfc822.parsedate_tz(dateString) - if tm: - return time.gmtime(rfc822.mktime_tz(tm)) -registerDateHandler(_parse_date_perforce) - -def _parse_date(dateString): - '''Parses a variety of date formats into a 9-tuple in GMT''' - if not dateString: - return None - for handler in _date_handlers: - try: - date9tuple = handler(dateString) - except (KeyError, OverflowError, ValueError): - continue - if not date9tuple: - continue - if len(date9tuple) != 9: - continue - return date9tuple - return None - -# Each marker represents some of the characters of the opening XML -# processing instruction (' -RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>') - -# Capture the value of the XML processing instruction's encoding attribute. -# Example: -RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')) - -def convert_to_utf8(http_headers, data): - '''Detect and convert the character encoding to UTF-8. - - http_headers is a dictionary - data is a raw string (not Unicode)''' - - # This is so much trickier than it sounds, it's not even funny. - # According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type - # is application/xml, application/*+xml, - # application/xml-external-parsed-entity, or application/xml-dtd, - # the encoding given in the charset parameter of the HTTP Content-Type - # takes precedence over the encoding given in the XML prefix within the - # document, and defaults to 'utf-8' if neither are specified. But, if - # the HTTP Content-Type is text/xml, text/*+xml, or - # text/xml-external-parsed-entity, the encoding given in the XML prefix - # within the document is ALWAYS IGNORED and only the encoding given in - # the charset parameter of the HTTP Content-Type header should be - # respected, and it defaults to 'us-ascii' if not specified. - - # Furthermore, discussion on the atom-syntax mailing list with the - # author of RFC 3023 leads me to the conclusion that any document - # served with a Content-Type of text/* and no charset parameter - # must be treated as us-ascii. (We now do this.) And also that it - # must always be flagged as non-well-formed. (We now do this too.) - - # If Content-Type is unspecified (input was local file or non-HTTP source) - # or unrecognized (server just got it totally wrong), then go by the - # encoding given in the XML prefix of the document and default to - # 'iso-8859-1' as per the HTTP specification (RFC 2616). - - # Then, assuming we didn't find a character encoding in the HTTP headers - # (and the HTTP Content-type allowed us to look in the body), we need - # to sniff the first few bytes of the XML data and try to determine - # whether the encoding is ASCII-compatible. Section F of the XML - # specification shows the way here: - # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info - - # If the sniffed encoding is not ASCII-compatible, we need to make it - # ASCII compatible so that we can sniff further into the XML declaration - # to find the encoding attribute, which will tell us the true encoding. - - # Of course, none of this guarantees that we will be able to parse the - # feed in the declared character encoding (assuming it was declared - # correctly, which many are not). iconv_codec can help a lot; - # you should definitely install it if you can. - # http://cjkpython.i18n.org/ - - bom_encoding = '' - xml_encoding = '' - rfc3023_encoding = '' - - # Look at the first few bytes of the document to guess what - # its encoding may be. We only need to decode enough of the - # document that we can use an ASCII-compatible regular - # expression to search for an XML encoding declaration. - # The heuristic follows the XML specification, section F: - # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info - # Check for BOMs first. - if data[:4] == codecs.BOM_UTF32_BE: - bom_encoding = 'utf-32be' - data = data[4:] - elif data[:4] == codecs.BOM_UTF32_LE: - bom_encoding = 'utf-32le' - data = data[4:] - elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES: - bom_encoding = 'utf-16be' - data = data[2:] - elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES: - bom_encoding = 'utf-16le' - data = data[2:] - elif data[:3] == codecs.BOM_UTF8: - bom_encoding = 'utf-8' - data = data[3:] - # Check for the characters '''' - if RE_XML_DECLARATION.search(data): - data = RE_XML_DECLARATION.sub(new_declaration, data) - else: - data = new_declaration + '\n' + data - data = data.encode('utf-8') - break - # if still no luck, give up - if not known_encoding: - error = CharacterEncodingUnknown( - 'document encoding unknown, I tried ' + - '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % - (rfc3023_encoding, xml_encoding)) - rfc3023_encoding = '' - elif proposed_encoding != rfc3023_encoding: - error = CharacterEncodingOverride( - 'document declared as %s, but parsed as %s' % - (rfc3023_encoding, proposed_encoding)) - rfc3023_encoding = proposed_encoding - - return data, rfc3023_encoding, error - -# Match XML entity declarations. -# Example: -RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*]*?)>'), re.MULTILINE) - -# Match XML DOCTYPE declarations. -# Example: -RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*]*?)>'), re.MULTILINE) - -# Match safe entity declarations. -# This will allow hexadecimal character references through, -# as well as text, but not arbitrary nested entities. -# Example: cubed "³" -# Example: copyright "(C)" -# Forbidden: explode1 "&explode2;&explode2;" -RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')) - -def replace_doctype(data): - '''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data) - - rss_version may be 'rss091n' or None - stripped_data is the same XML document with a replaced DOCTYPE - ''' - - # Divide the document into two groups by finding the location - # of the first element that doesn't begin with '\n\n]>') - data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data - - # Precompute the safe entities for the loose parser. - safe_entities = dict((k.decode('utf-8'), v.decode('utf-8')) - for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement)) - return version, data, safe_entities - - -# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates' -# items, or None in the case of a parsing error. - -def _parse_poslist(value, geom_type, swap=True, dims=2): - if geom_type == 'linestring': - return _parse_georss_line(value, swap, dims) - elif geom_type == 'polygon': - ring = _parse_georss_line(value, swap, dims) - return {'type': 'Polygon', 'coordinates': (ring['coordinates'],)} - else: - return None - -def _gen_georss_coords(value, swap=True, dims=2): - # A generator of (lon, lat) pairs from a string of encoded GeoRSS - # coordinates. Converts to floats and swaps order. - latlons = map(float, value.strip().replace(',', ' ').split()) - nxt = latlons.__next__ - while True: - t = [nxt(), nxt()][::swap and -1 or 1] - if dims == 3: - t.append(nxt()) - yield tuple(t) - -def _parse_georss_point(value, swap=True, dims=2): - # A point contains a single latitude-longitude pair, separated by - # whitespace. We'll also handle comma separators. - try: - coords = list(_gen_georss_coords(value, swap, dims)) - return {'type': 'Point', 'coordinates': coords[0]} - except (IndexError, ValueError): - return None - -def _parse_georss_line(value, swap=True, dims=2): - # A line contains a space separated list of latitude-longitude pairs in - # WGS84 coordinate reference system, with each pair separated by - # whitespace. There must be at least two pairs. - try: - coords = list(_gen_georss_coords(value, swap, dims)) - return {'type': 'LineString', 'coordinates': coords} - except (IndexError, ValueError): - return None - -def _parse_georss_polygon(value, swap=True, dims=2): - # A polygon contains a space separated list of latitude-longitude pairs, - # with each pair separated by whitespace. There must be at least four - # pairs, with the last being identical to the first (so a polygon has a - # minimum of three actual points). - try: - ring = list(_gen_georss_coords(value, swap, dims)) - except (IndexError, ValueError): - return None - if len(ring) < 4: - return None - return {'type': 'Polygon', 'coordinates': (ring,)} - -def _parse_georss_box(value, swap=True, dims=2): - # A bounding box is a rectangular region, often used to define the extents - # of a map or a rough area of interest. A box contains two space seperate - # latitude-longitude pairs, with each pair separated by whitespace. The - # first pair is the lower corner, the second is the upper corner. - try: - coords = list(_gen_georss_coords(value, swap, dims)) - return {'type': 'Box', 'coordinates': tuple(coords)} - except (IndexError, ValueError): - return None - -# end geospatial parsers - - -def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None): - '''Parse a feed from a URL, file, stream, or string. - - request_headers, if given, is a dict from http header name to value to add - to the request; this overrides internally generated values. - - :return: A :class:`FeedParserDict`. - ''' - - if handlers is None: - handlers = [] - if request_headers is None: - request_headers = {} - if response_headers is None: - response_headers = {} - - result = FeedParserDict() - result['feed'] = FeedParserDict() - result['entries'] = [] - result['bozo'] = 0 - if not isinstance(handlers, list): - handlers = [handlers] - try: - f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers) - data = f.read() - except Exception as e: - result['bozo'] = 1 - result['bozo_exception'] = e - data = None - f = None - - if hasattr(f, 'headers'): - result['headers'] = dict(f.headers) - # overwrite existing headers using response_headers - if 'headers' in result: - result['headers'].update(response_headers) - elif response_headers: - result['headers'] = copy.deepcopy(response_headers) - - # lowercase all of the HTTP headers for comparisons per RFC 2616 - if 'headers' in result: - http_headers = dict((k.lower(), v) for k, v in list(result['headers'].items())) - else: - http_headers = {} - - # if feed is gzip-compressed, decompress it - if f and data and http_headers: - if gzip and 'gzip' in http_headers.get('content-encoding', ''): - try: - data = gzip.GzipFile(fileobj=_StringIO(data)).read() - except (IOError, struct.error) as e: - # IOError can occur if the gzip header is bad. - # struct.error can occur if the data is damaged. - result['bozo'] = 1 - result['bozo_exception'] = e - if isinstance(e, struct.error): - # A gzip header was found but the data is corrupt. - # Ideally, we should re-request the feed without the - # 'Accept-encoding: gzip' header, but we don't. - data = None - elif zlib and 'deflate' in http_headers.get('content-encoding', ''): - try: - data = zlib.decompress(data) - except zlib.error as e: - try: - # The data may have no headers and no checksum. - data = zlib.decompress(data, -15) - except zlib.error as e: - result['bozo'] = 1 - result['bozo_exception'] = e - - # save HTTP headers - if http_headers: - if 'etag' in http_headers: - etag = http_headers.get('etag', '') - if not isinstance(etag, str): - etag = etag.decode('utf-8', 'ignore') - if etag: - result['etag'] = etag - if 'last-modified' in http_headers: - modified = http_headers.get('last-modified', '') - if modified: - result['modified'] = modified - result['modified_parsed'] = _parse_date(modified) - if hasattr(f, 'url'): - if not isinstance(f.url, str): - result['href'] = f.url.decode('utf-8', 'ignore') - else: - result['href'] = f.url - result['status'] = 200 - if hasattr(f, 'status'): - result['status'] = f.status - if hasattr(f, 'close'): - f.close() - - if data is None: - return result - - # Stop processing if the server sent HTTP 304 Not Modified. - if getattr(f, 'code', 0) == 304: - result['version'] = '' - result['debug_message'] = 'The feed has not changed since you last checked, ' + \ - 'so the server sent no data. This is a feature, not a bug!' - return result - - data, result['encoding'], error = convert_to_utf8(http_headers, data) - use_strict_parser = result['encoding'] and True or False - if error is not None: - result['bozo'] = 1 - result['bozo_exception'] = error - - result['version'], data, entities = replace_doctype(data) - - # Ensure that baseuri is an absolute URI using an acceptable URI scheme. - contentloc = http_headers.get('content-location', '') - href = result.get('href', '') - baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href - - baselang = http_headers.get('content-language', None) - if not isinstance(baselang, str) and baselang is not None: - baselang = baselang.decode('utf-8', 'ignore') - - if not _XML_AVAILABLE: - use_strict_parser = 0 - if use_strict_parser: - # initialize the SAX parser - feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') - saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) - saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) - try: - # disable downloading external doctype references, if possible - saxparser.setFeature(xml.sax.handler.feature_external_ges, 0) - except xml.sax.SAXNotSupportedException: - pass - saxparser.setContentHandler(feedparser) - saxparser.setErrorHandler(feedparser) - source = xml.sax.xmlreader.InputSource() - source.setByteStream(_StringIO(data)) - try: - saxparser.parse(source) - except xml.sax.SAXException as e: - result['bozo'] = 1 - result['bozo_exception'] = feedparser.exc or e - use_strict_parser = 0 - if not use_strict_parser and _SGML_AVAILABLE: - feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities) - feedparser.feed(data.decode('utf-8', 'replace')) - result['feed'] = feedparser.feeddata - result['entries'] = feedparser.entries - result['version'] = result['version'] or feedparser.version - result['namespaces'] = feedparser.namespacesInUse - return result - -# The list of EPSG codes for geographic (latitude/longitude) coordinate -# systems to support decoding of GeoRSS GML profiles. -_geogCS = [ -3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, -4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022, -4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, -4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081, -4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132, -4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145, -4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, -4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, -4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185, -4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200, -4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, -4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, -4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240, -4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253, -4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, -4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, -4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293, -4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307, -4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322, -4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603, -4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, -4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, -4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, -4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, -4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, -4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, -4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, -4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, -4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, -4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, -4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756, -4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804, -4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818, -4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ] diff --git a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/RECORD b/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/RECORD deleted file mode 100644 index 831d998..0000000 --- a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/RECORD +++ /dev/null @@ -1,79 +0,0 @@ -html5lib/__init__.py,sha256=OeEYU2bhPKOq9YE4ueQUsKrdSN2Ly_FdIp8GjNL5AeQ,783 -html5lib/constants.py,sha256=B5LN2DMP-6lEp9wpON4ecX3Kx01n_cbMjuGd6AteixE,86873 -html5lib/html5parser.py,sha256=P1fmBDiTFMZgTxwiNAuPA8P0VR96QSnlp7i-xLNHYnc,117335 -html5lib/ihatexml.py,sha256=MT12cVXAKaW-ALUkUeN175HpUP73xK8wAIpPzQ8cgfI,16581 -html5lib/inputstream.py,sha256=MmlG5JLwn2MvxPIEeSjJWkJZOzzg4aWvMyKhMk5k4qs,31641 -html5lib/sanitizer.py,sha256=sbyGySzFzCD_v0JYYSr6sLYVLpO6bpVmRiDMKbFRcCw,17804 -html5lib/tokenizer.py,sha256=6Uf8sDUkvNn661bcBSBYUCTfXzSs9EyCTiPcj5PAjYI,76929 -html5lib/utils.py,sha256=-kEjo4p5eVd2szBNnIuLExc6MOvKF0F01jQBP_9E0Oc,3255 -html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -html5lib/filters/_base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286 -html5lib/filters/alphabeticalattributes.py,sha256=fpRLbz6TCe5yXEkGmyMlJ80FekWsTR-sHk3Ano0U9LQ,624 -html5lib/filters/inject_meta_charset.py,sha256=xllv1I7unxhcyZTf3LTsv30wh2mAkT7wmTZx7zIhpuY,2746 -html5lib/filters/lint.py,sha256=8eJo0SXDcY40OhsNd0Cft36kUXCZ5t-30mNFSUf4LnE,4208 -html5lib/filters/optionaltags.py,sha256=4ozLwBgMRaxe7iqxefLQpDhp3irK7YHo9LgSGsvZYMw,10500 -html5lib/filters/sanitizer.py,sha256=MvGUs_v2taWPgGhjxswRSUiHfxrqMUhsNPz-eSeUYUQ,352 -html5lib/filters/whitespace.py,sha256=LbOUcC0zQ9z703KNZrArOr0kVBO7OMXjKjucDW32LU4,1142 -html5lib/serializer/__init__.py,sha256=xFXFP-inaTNlbnau5c5DGrH_O8yPm-C6HWbJxpiSqFE,490 -html5lib/serializer/htmlserializer.py,sha256=TeR9CNlbKV6QPhkJhBHyuy8hgaAPZc0JsV0mnMv1-oM,12843 -html5lib/treeadapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -html5lib/treeadapters/sax.py,sha256=3of4vvaUYIAic7pngebwJV24hpOS7Zg9ggJa_WQegy4,1661 -html5lib/treebuilders/__init__.py,sha256=Xz4X6B5DA1R-5GyRa44j0sJwfl6dUNyb0NBu9-7sK3U,3405 -html5lib/treebuilders/_base.py,sha256=rzLhQqEJsI1qgh8__6kJ7So4Z4qUl4vSJYFtqzHBw6E,13699 -html5lib/treebuilders/dom.py,sha256=jvmtvnERtpxXpHvBgiq1FpzAUYAAzoolOTx_DoXwGEI,8469 -html5lib/treebuilders/etree.py,sha256=1HKcq5Np0PgUNSjhcsdjZVWbgouxp5KeL_8MjUuUuKM,12609 -html5lib/treebuilders/etree_lxml.py,sha256=z3Bnfm2MstEEb_lbaAeicl5l-ab6MSQa5Q1ZZreK7Pc,14031 -html5lib/treewalkers/__init__.py,sha256=m2-4a5P4dMNlQb26MNIhgj69p6ms1i-JD2HPDr7iTfw,5766 -html5lib/treewalkers/_base.py,sha256=Ms4kXXrxceHBnCsVBZ04LFIR7V3e2AiSL41QIaBTWT4,7002 -html5lib/treewalkers/dom.py,sha256=Lb63Nuz8HtgvkuuvSmU5LOyUkEtstH5saPPAg5xN4r8,1421 -html5lib/treewalkers/etree.py,sha256=qJklMXBbBYqHxQfAeyAxW57x2xGt-nqOe80KDbv4MM0,4588 -html5lib/treewalkers/genshistream.py,sha256=IbBFrlgi-59-K7P1zm0d7ZFIknBN4c5E57PHJDkx39s,2278 -html5lib/treewalkers/lxmletree.py,sha256=1vMqSFN6IhwFtH3eqAXSNm-55I-5NwSb_-oBOCAvLv8,5980 -html5lib/treewalkers/pulldom.py,sha256=9W6i8yWtUzayV6EwX-okVacttHaqpQZwdBCc2S3XeQ4,2302 -html5lib/trie/__init__.py,sha256=mec5zyJ5wIKRM8819gIcIsYQwncg91rEmPwGH1dG3Ho,212 -html5lib/trie/_base.py,sha256=WGY8SGptFmx4O0aKLJ54zrIQOoyuvhS0ngA36vAcIcc,927 -html5lib/trie/datrie.py,sha256=rGMj61020CBiR97e4kyMyqn_FSIJzgDcYT2uj7PZkoo,1166 -html5lib/trie/py.py,sha256=zg7RZSHxJ8mLmuI_7VEIV8AomISrgkvqCP477AgXaG0,1763 -html5lib-0.9999999.dist-info/DESCRIPTION.rst,sha256=4symxPlAGFfTe4DH5texohsZDD1gYeIxBpeE-9Uh0CY,9899 -html5lib-0.9999999.dist-info/METADATA,sha256=KM-AHI5RI5O7BSTrqF8Acd8jX_7yodP5PmZ1V8tREck,10902 -html5lib-0.9999999.dist-info/RECORD,, -html5lib-0.9999999.dist-info/WHEEL,sha256=lCqt3ViRAf9c8mCs6o7ffkwROUdYSy8_YHn5f_rulB4,93 -html5lib-0.9999999.dist-info/metadata.json,sha256=ptJ1V8e-ESI9H736RKJ5AJZ3I0T31YwhyIB0Zu2ru4o,1116 -html5lib-0.9999999.dist-info/top_level.txt,sha256=XEX6CHpskSmvjJB4tP6m4Q5NYXhIf_0ceMc0PNbzJPQ,9 -html5lib-0.9999999.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -html5lib/treebuilders/__pycache__/etree_lxml.cpython-34.pyc,, -html5lib/serializer/__pycache__/htmlserializer.cpython-34.pyc,, -html5lib/filters/__pycache__/sanitizer.cpython-34.pyc,, -html5lib/__pycache__/utils.cpython-34.pyc,, -html5lib/filters/__pycache__/optionaltags.cpython-34.pyc,, -html5lib/treewalkers/__pycache__/dom.cpython-34.pyc,, -html5lib/__pycache__/constants.cpython-34.pyc,, -html5lib/treewalkers/__pycache__/lxmletree.cpython-34.pyc,, -html5lib/treewalkers/__pycache__/genshistream.cpython-34.pyc,, -html5lib/trie/__pycache__/datrie.cpython-34.pyc,, -html5lib/treewalkers/__pycache__/__init__.cpython-34.pyc,, -html5lib/treebuilders/__pycache__/__init__.cpython-34.pyc,, -html5lib/trie/__pycache__/_base.cpython-34.pyc,, -html5lib/treewalkers/__pycache__/pulldom.cpython-34.pyc,, -html5lib/filters/__pycache__/whitespace.cpython-34.pyc,, -html5lib/filters/__pycache__/inject_meta_charset.cpython-34.pyc,, -html5lib/treewalkers/__pycache__/_base.cpython-34.pyc,, -html5lib/treebuilders/__pycache__/_base.cpython-34.pyc,, -html5lib/trie/__pycache__/__init__.cpython-34.pyc,, -html5lib/filters/__pycache__/lint.cpython-34.pyc,, -html5lib/__pycache__/sanitizer.cpython-34.pyc,, -html5lib/filters/__pycache__/alphabeticalattributes.cpython-34.pyc,, -html5lib/treewalkers/__pycache__/etree.cpython-34.pyc,, -html5lib/treebuilders/__pycache__/dom.cpython-34.pyc,, -html5lib/__pycache__/html5parser.cpython-34.pyc,, -html5lib/__pycache__/inputstream.cpython-34.pyc,, -html5lib/treebuilders/__pycache__/etree.cpython-34.pyc,, -html5lib/filters/__pycache__/_base.cpython-34.pyc,, -html5lib/trie/__pycache__/py.cpython-34.pyc,, -html5lib/__pycache__/tokenizer.cpython-34.pyc,, -html5lib/treeadapters/__pycache__/sax.cpython-34.pyc,, -html5lib/__pycache__/ihatexml.cpython-34.pyc,, -html5lib/treeadapters/__pycache__/__init__.cpython-34.pyc,, -html5lib/__pycache__/__init__.cpython-34.pyc,, -html5lib/filters/__pycache__/__init__.cpython-34.pyc,, -html5lib/serializer/__pycache__/__init__.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/WHEEL deleted file mode 100644 index 6d9801a..0000000 --- a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: cp34-none-any - diff --git a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/metadata.json deleted file mode 100644 index 172fe27..0000000 --- a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "extensions": {"python.details": {"contacts": [{"email": "james@hoppipolla.co.uk", "name": "James Graham", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/html5lib/html5lib-python"}}}, "extras": [], "generator": "bdist_wheel (0.29.0)", "license": "MIT License", "metadata_version": "2.0", "name": "html5lib", "run_requires": [{"requires": ["six"]}], "summary": "HTML parser based on the WHATWG HTML specification", "version": "0.9999999"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/METADATA b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/DESCRIPTION.rst similarity index 64% rename from Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/METADATA rename to Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/DESCRIPTION.rst index 5bb7fd0..c05f8c0 100644 --- a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/METADATA +++ b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/DESCRIPTION.rst @@ -1,28 +1,3 @@ -Metadata-Version: 2.0 -Name: html5lib -Version: 0.9999999 -Summary: HTML parser based on the WHATWG HTML specification -Home-page: https://github.com/html5lib/html5lib-python -Author: James Graham -Author-email: james@hoppipolla.co.uk -License: MIT License -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.2 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML -Requires-Dist: six - html5lib ======== @@ -76,7 +51,7 @@ pass into html5lib as follows: import html5lib with closing(urlopen("http://example.com/")) as f: - document = html5lib.parse(f, encoding=f.info().getparam("charset")) + document = html5lib.parse(f, transport_encoding=f.info().getparam("charset")) When using with ``urllib.request`` (Python 3), the charset from HTTP should be pass into html5lib as follows: @@ -87,7 +62,7 @@ should be pass into html5lib as follows: import html5lib with urlopen("http://example.com/") as f: - document = html5lib.parse(f, encoding=f.info().get_content_charset()) + document = html5lib.parse(f, transport_encoding=f.info().get_content_charset()) To have more control over the parser, create a parser object explicitly. For instance, to make the parser raise exceptions on parse errors, use: @@ -109,13 +84,13 @@ format: parser = html5lib.HTMLParser(tree=html5lib.getTreeBuilder("dom")) minidom_document = parser.parse("

Hello World!") -More documentation is available at http://html5lib.readthedocs.org/. +More documentation is available at https://html5lib.readthedocs.io/. Installation ------------ -html5lib works on CPython 2.6+, CPython 3.2+ and PyPy. To install it, +html5lib works on CPython 2.7+, CPython 3.3+ and PyPy. To install it, use: .. code-block:: bash @@ -129,8 +104,8 @@ Optional Dependencies The following third-party libraries may be used for additional functionality: -- ``datrie`` can be used to improve parsing performance (though in - almost all cases the improvement is marginal); +- ``datrie`` can be used under CPython to improve parsing performance + (though in almost all cases the improvement is marginal); - ``lxml`` is supported as a tree format (for both building and walking) under CPython (but *not* PyPy where it is known to cause @@ -138,13 +113,8 @@ functionality: - ``genshi`` has a treewalker (but not builder); and -- ``charade`` can be used as a fallback when character encoding cannot - be determined; ``chardet``, from which it was forked, can also be used - on Python 2. - -- ``ordereddict`` can be used under Python 2.6 - (``collections.OrderedDict`` is used instead on later versions) to - serialize attributes in alphabetical order. +- ``chardet`` can be used as a fallback when character encoding cannot + be determined. Bugs @@ -157,9 +127,8 @@ Please report any bugs on the `issue tracker Tests ----- -Unit tests require the ``nose`` library and can be run using the -``nosetests`` command in the root directory; ``ordereddict`` is -required under Python 2.6. All should pass. +Unit tests require the ``pytest`` and ``mock`` libraries and can be +run using the ``py.test`` command in the root directory. Test data are contained in a separate `html5lib-tests `_ repository and included @@ -184,6 +153,123 @@ irc.freenode.net `_. Change Log ---------- +1.0.1 +~~~~~ + +Released on December 7, 2017 + +Breaking changes: + +* Drop support for Python 2.6. (#330) (Thank you, Hugo, Will Kahn-Greene!) +* Remove ``utils/spider.py`` (#353) (Thank you, Jon Dufresne!) + +Features: + +* Improve documentation. (#300, #307) (Thank you, Jon Dufresne, Tom Most, + Will Kahn-Greene!) +* Add iframe seamless boolean attribute. (Thank you, Ritwik Gupta!) +* Add itemscope as a boolean attribute. (#194) (Thank you, Jonathan Vanasco!) +* Support Python 3.6. (#333) (Thank you, Jon Dufresne!) +* Add CI support for Windows using AppVeyor. (Thank you, John Vandenberg!) +* Improve testing and CI and add code coverage (#323, #334), (Thank you, Jon + Dufresne, John Vandenberg, Geoffrey Sneddon, Will Kahn-Greene!) +* Semver-compliant version number. + +Bug fixes: + +* Add support for setuptools < 18.5 to support environment markers. (Thank you, + John Vandenberg!) +* Add explicit dependency for six >= 1.9. (Thank you, Eric Amorde!) +* Fix regexes to work with Python 3.7 regex adjustments. (#318, #379) (Thank + you, Benedikt Morbach, Ville Skyttä, Mark Vasilkov!) +* Fix alphabeticalattributes filter namespace bug. (#324) (Thank you, Will + Kahn-Greene!) +* Include license file in generated wheel package. (#350) (Thank you, Jon + Dufresne!) +* Fix annotation-xml typo. (#339) (Thank you, Will Kahn-Greene!) +* Allow uppercase hex chararcters in CSS colour check. (#377) (Thank you, + Komal Dembla, Hugo!) + + +1.0 +~~~ + +Released and unreleased on December 7, 2017. Badly packaged release. + + +0.999999999/1.0b10 +~~~~~~~~~~~~~~~~~~ + +Released on July 15, 2016 + +* Fix attribute order going to the tree builder to be document order + instead of reverse document order(!). + + +0.99999999/1.0b9 +~~~~~~~~~~~~~~~~ + +Released on July 14, 2016 + +* **Added ordereddict as a mandatory dependency on Python 2.6.** + +* Added ``lxml``, ``genshi``, ``datrie``, ``charade``, and ``all`` + extras that will do the right thing based on the specific + interpreter implementation. + +* Now requires the ``mock`` package for the testsuite. + +* Cease supporting DATrie under PyPy. + +* **Remove PullDOM support, as this hasn't ever been properly + tested, doesn't entirely work, and as far as I can tell is + completely unused by anyone.** + +* Move testsuite to ``py.test``. + +* **Fix #124: move to webencodings for decoding the input byte stream; + this makes html5lib compliant with the Encoding Standard, and + introduces a required dependency on webencodings.** + +* **Cease supporting Python 3.2 (in both CPython and PyPy forms).** + +* **Fix comments containing double-dash with lxml 3.5 and above.** + +* **Use scripting disabled by default (as we don't implement + scripting).** + +* **Fix #11, avoiding the XSS bug potentially caused by serializer + allowing attribute values to be escaped out of in old browser versions, + changing the quote_attr_values option on serializer to take one of + three values, "always" (the old True value), "legacy" (the new option, + and the new default), and "spec" (the old False value, and the old + default).** + +* **Fix #72 by rewriting the sanitizer to apply only to treewalkers + (instead of the tokenizer); as such, this will require amending all + callers of it to use it via the treewalker API.** + +* **Drop support of charade, now that chardet is supported once more.** + +* **Replace the charset keyword argument on parse and related methods + with a set of keyword arguments: override_encoding, transport_encoding, + same_origin_parent_encoding, likely_encoding, and default_encoding.** + +* **Move filters._base, treebuilder._base, and treewalkers._base to .base + to clarify their status as public.** + +* **Get rid of the sanitizer package. Merge sanitizer.sanitize into the + sanitizer.htmlsanitizer module and move that to sanitizer. This means + anyone who used sanitizer.sanitize or sanitizer.HTMLSanitizer needs no + code changes.** + +* **Rename treewalkers.lxmletree to .etree_lxml and + treewalkers.genshistream to .genshi to have a consistent API.** + +* Move a whole load of stuff (inputstream, ihatexml, trie, tokenizer, + utils) to be underscore prefixed to clarify their status as private. + + 0.9999999/1.0b8 ~~~~~~~~~~~~~~~ @@ -356,7 +442,7 @@ Released on May 17, 2013 * Test harness has been improved and now depends on ``nose``. -* Documentation updated and moved to http://html5lib.readthedocs.org/. +* Documentation updated and moved to https://html5lib.readthedocs.io/. 0.95 diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/INSTALLER similarity index 100% rename from Shared/lib/python3.4/site-packages/PySocks-1.5.6.dist-info/INSTALLER rename to Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/INSTALLER diff --git a/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/LICENSE.txt b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/LICENSE.txt new file mode 100644 index 0000000..c87fa7a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2006-2013 James Graham and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/METADATA similarity index 57% rename from Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/DESCRIPTION.rst rename to Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/METADATA index 79e525f..f8131d7 100644 --- a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/DESCRIPTION.rst +++ b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/METADATA @@ -1,3 +1,44 @@ +Metadata-Version: 2.0 +Name: html5lib +Version: 1.0.1 +Summary: HTML parser based on the WHATWG HTML specification +Home-page: https://github.com/html5lib/html5lib-python +Author: James Graham +Author-email: james@hoppipolla.co.uk +License: MIT License +Description-Content-Type: UNKNOWN +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Dist: six (>=1.9) +Requires-Dist: webencodings +Provides-Extra: all +Requires-Dist: genshi; extra == 'all' +Requires-Dist: chardet (>=2.2); extra == 'all' +Provides-Extra: all +Requires-Dist: datrie; platform_python_implementation == 'CPython' and extra == 'all' +Requires-Dist: lxml; platform_python_implementation == 'CPython' and extra == 'all' +Provides-Extra: chardet +Requires-Dist: chardet (>=2.2); extra == 'chardet' +Provides-Extra: datrie +Requires-Dist: datrie; platform_python_implementation == 'CPython' and extra == 'datrie' +Provides-Extra: genshi +Requires-Dist: genshi; extra == 'genshi' +Provides-Extra: lxml +Requires-Dist: lxml; platform_python_implementation == 'CPython' and extra == 'lxml' + html5lib ======== @@ -51,7 +92,7 @@ pass into html5lib as follows: import html5lib with closing(urlopen("http://example.com/")) as f: - document = html5lib.parse(f, encoding=f.info().getparam("charset")) + document = html5lib.parse(f, transport_encoding=f.info().getparam("charset")) When using with ``urllib.request`` (Python 3), the charset from HTTP should be pass into html5lib as follows: @@ -62,7 +103,7 @@ should be pass into html5lib as follows: import html5lib with urlopen("http://example.com/") as f: - document = html5lib.parse(f, encoding=f.info().get_content_charset()) + document = html5lib.parse(f, transport_encoding=f.info().get_content_charset()) To have more control over the parser, create a parser object explicitly. For instance, to make the parser raise exceptions on parse errors, use: @@ -84,13 +125,13 @@ format: parser = html5lib.HTMLParser(tree=html5lib.getTreeBuilder("dom")) minidom_document = parser.parse("

Hello World!") -More documentation is available at http://html5lib.readthedocs.org/. +More documentation is available at https://html5lib.readthedocs.io/. Installation ------------ -html5lib works on CPython 2.6+, CPython 3.2+ and PyPy. To install it, +html5lib works on CPython 2.7+, CPython 3.3+ and PyPy. To install it, use: .. code-block:: bash @@ -104,8 +145,8 @@ Optional Dependencies The following third-party libraries may be used for additional functionality: -- ``datrie`` can be used to improve parsing performance (though in - almost all cases the improvement is marginal); +- ``datrie`` can be used under CPython to improve parsing performance + (though in almost all cases the improvement is marginal); - ``lxml`` is supported as a tree format (for both building and walking) under CPython (but *not* PyPy where it is known to cause @@ -113,13 +154,8 @@ functionality: - ``genshi`` has a treewalker (but not builder); and -- ``charade`` can be used as a fallback when character encoding cannot - be determined; ``chardet``, from which it was forked, can also be used - on Python 2. - -- ``ordereddict`` can be used under Python 2.6 - (``collections.OrderedDict`` is used instead on later versions) to - serialize attributes in alphabetical order. +- ``chardet`` can be used as a fallback when character encoding cannot + be determined. Bugs @@ -132,9 +168,8 @@ Please report any bugs on the `issue tracker Tests ----- -Unit tests require the ``nose`` library and can be run using the -``nosetests`` command in the root directory; ``ordereddict`` is -required under Python 2.6. All should pass. +Unit tests require the ``pytest`` and ``mock`` libraries and can be +run using the ``py.test`` command in the root directory. Test data are contained in a separate `html5lib-tests `_ repository and included @@ -159,6 +194,123 @@ irc.freenode.net `_. Change Log ---------- +1.0.1 +~~~~~ + +Released on December 7, 2017 + +Breaking changes: + +* Drop support for Python 2.6. (#330) (Thank you, Hugo, Will Kahn-Greene!) +* Remove ``utils/spider.py`` (#353) (Thank you, Jon Dufresne!) + +Features: + +* Improve documentation. (#300, #307) (Thank you, Jon Dufresne, Tom Most, + Will Kahn-Greene!) +* Add iframe seamless boolean attribute. (Thank you, Ritwik Gupta!) +* Add itemscope as a boolean attribute. (#194) (Thank you, Jonathan Vanasco!) +* Support Python 3.6. (#333) (Thank you, Jon Dufresne!) +* Add CI support for Windows using AppVeyor. (Thank you, John Vandenberg!) +* Improve testing and CI and add code coverage (#323, #334), (Thank you, Jon + Dufresne, John Vandenberg, Geoffrey Sneddon, Will Kahn-Greene!) +* Semver-compliant version number. + +Bug fixes: + +* Add support for setuptools < 18.5 to support environment markers. (Thank you, + John Vandenberg!) +* Add explicit dependency for six >= 1.9. (Thank you, Eric Amorde!) +* Fix regexes to work with Python 3.7 regex adjustments. (#318, #379) (Thank + you, Benedikt Morbach, Ville Skyttä, Mark Vasilkov!) +* Fix alphabeticalattributes filter namespace bug. (#324) (Thank you, Will + Kahn-Greene!) +* Include license file in generated wheel package. (#350) (Thank you, Jon + Dufresne!) +* Fix annotation-xml typo. (#339) (Thank you, Will Kahn-Greene!) +* Allow uppercase hex chararcters in CSS colour check. (#377) (Thank you, + Komal Dembla, Hugo!) + + +1.0 +~~~ + +Released and unreleased on December 7, 2017. Badly packaged release. + + +0.999999999/1.0b10 +~~~~~~~~~~~~~~~~~~ + +Released on July 15, 2016 + +* Fix attribute order going to the tree builder to be document order + instead of reverse document order(!). + + +0.99999999/1.0b9 +~~~~~~~~~~~~~~~~ + +Released on July 14, 2016 + +* **Added ordereddict as a mandatory dependency on Python 2.6.** + +* Added ``lxml``, ``genshi``, ``datrie``, ``charade``, and ``all`` + extras that will do the right thing based on the specific + interpreter implementation. + +* Now requires the ``mock`` package for the testsuite. + +* Cease supporting DATrie under PyPy. + +* **Remove PullDOM support, as this hasn't ever been properly + tested, doesn't entirely work, and as far as I can tell is + completely unused by anyone.** + +* Move testsuite to ``py.test``. + +* **Fix #124: move to webencodings for decoding the input byte stream; + this makes html5lib compliant with the Encoding Standard, and + introduces a required dependency on webencodings.** + +* **Cease supporting Python 3.2 (in both CPython and PyPy forms).** + +* **Fix comments containing double-dash with lxml 3.5 and above.** + +* **Use scripting disabled by default (as we don't implement + scripting).** + +* **Fix #11, avoiding the XSS bug potentially caused by serializer + allowing attribute values to be escaped out of in old browser versions, + changing the quote_attr_values option on serializer to take one of + three values, "always" (the old True value), "legacy" (the new option, + and the new default), and "spec" (the old False value, and the old + default).** + +* **Fix #72 by rewriting the sanitizer to apply only to treewalkers + (instead of the tokenizer); as such, this will require amending all + callers of it to use it via the treewalker API.** + +* **Drop support of charade, now that chardet is supported once more.** + +* **Replace the charset keyword argument on parse and related methods + with a set of keyword arguments: override_encoding, transport_encoding, + same_origin_parent_encoding, likely_encoding, and default_encoding.** + +* **Move filters._base, treebuilder._base, and treewalkers._base to .base + to clarify their status as public.** + +* **Get rid of the sanitizer package. Merge sanitizer.sanitize into the + sanitizer.htmlsanitizer module and move that to sanitizer. This means + anyone who used sanitizer.sanitize or sanitizer.HTMLSanitizer needs no + code changes.** + +* **Rename treewalkers.lxmletree to .etree_lxml and + treewalkers.genshistream to .genshi to have a consistent API.** + +* Move a whole load of stuff (inputstream, ihatexml, trie, tokenizer, + utils) to be underscore prefixed to clarify their status as private. + + 0.9999999/1.0b8 ~~~~~~~~~~~~~~~ @@ -331,7 +483,7 @@ Released on May 17, 2013 * Test harness has been improved and now depends on ``nose``. -* Documentation updated and moved to http://html5lib.readthedocs.org/. +* Documentation updated and moved to https://html5lib.readthedocs.io/. 0.95 diff --git a/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/RECORD b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/RECORD new file mode 100644 index 0000000..5af8016 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/RECORD @@ -0,0 +1,76 @@ +html5lib-1.0.1.dist-info/DESCRIPTION.rst,sha256=1QkiA38mSikkzyQO1kAQXkBUtQSTl-MR63Zd2TMe06s,13763 +html5lib-1.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +html5lib-1.0.1.dist-info/LICENSE.txt,sha256=FqOZkWGekvGGgJMtoqkZn999ld8-yu3FLqBiGKq6_W8,1084 +html5lib-1.0.1.dist-info/METADATA,sha256=ViKKPHTrTam-_oHIB2cxmtg4gKgqdfl4ahDnIWBdyUE,15484 +html5lib-1.0.1.dist-info/RECORD,, +html5lib-1.0.1.dist-info/WHEEL,sha256=5wvfB7GvgZAbKBSE9uX9Zbi6LCL-_KgezgHblXhCRnM,113 +html5lib-1.0.1.dist-info/metadata.json,sha256=bbLAvG6pYvgK2EdWNVi1mNa5pu9bI4qLwGm0IvuesFA,1731 +html5lib-1.0.1.dist-info/top_level.txt,sha256=XEX6CHpskSmvjJB4tP6m4Q5NYXhIf_0ceMc0PNbzJPQ,9 +html5lib/__init__.py,sha256=q1D20NqqzRVgmTHW2xiVtaQT2eKna-iit3tL62Yn5OI,1145 +html5lib/__pycache__/__init__.cpython-37.pyc,, +html5lib/__pycache__/_ihatexml.cpython-37.pyc,, +html5lib/__pycache__/_inputstream.cpython-37.pyc,, +html5lib/__pycache__/_tokenizer.cpython-37.pyc,, +html5lib/__pycache__/_utils.cpython-37.pyc,, +html5lib/__pycache__/constants.cpython-37.pyc,, +html5lib/__pycache__/html5parser.cpython-37.pyc,, +html5lib/__pycache__/serializer.cpython-37.pyc,, +html5lib/_ihatexml.py,sha256=3LBtJMlzgwM8vpQiU1TvGmEEmNH72sV0yD8yS53y07A,16705 +html5lib/_inputstream.py,sha256=WtC-hb3nS7Du6XvdL9JACOQgD5ydPKb7f9z0q4OIvRM,32499 +html5lib/_tokenizer.py,sha256=JFZ4kiYfas1f62q2bdXH8Ch5DtXAWEZg0KYkRF4boRQ,76568 +html5lib/_trie/__init__.py,sha256=8VR1bcgD2OpeS2XExpu5yBhP_Q1K-lwKbBKICBPf1kU,289 +html5lib/_trie/__pycache__/__init__.cpython-37.pyc,, +html5lib/_trie/__pycache__/_base.cpython-37.pyc,, +html5lib/_trie/__pycache__/datrie.cpython-37.pyc,, +html5lib/_trie/__pycache__/py.cpython-37.pyc,, +html5lib/_trie/_base.py,sha256=uJHVhzif9S0MJXgy9F98iEev5evi_rgUk5BmEbUSp8c,930 +html5lib/_trie/datrie.py,sha256=rGMj61020CBiR97e4kyMyqn_FSIJzgDcYT2uj7PZkoo,1166 +html5lib/_trie/py.py,sha256=zg7RZSHxJ8mLmuI_7VEIV8AomISrgkvqCP477AgXaG0,1763 +html5lib/_utils.py,sha256=UHC4fXEZRJ0YM44Z4DeLem66auCjb08vSPcN6Y714Iw,4003 +html5lib/constants.py,sha256=4lmZWLtEPRLnl8NzftOoYTJdo6jpeMtP6dqQC0g_bWQ,83518 +html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +html5lib/filters/__pycache__/__init__.cpython-37.pyc,, +html5lib/filters/__pycache__/alphabeticalattributes.cpython-37.pyc,, +html5lib/filters/__pycache__/base.cpython-37.pyc,, +html5lib/filters/__pycache__/inject_meta_charset.cpython-37.pyc,, +html5lib/filters/__pycache__/lint.cpython-37.pyc,, +html5lib/filters/__pycache__/optionaltags.cpython-37.pyc,, +html5lib/filters/__pycache__/sanitizer.cpython-37.pyc,, +html5lib/filters/__pycache__/whitespace.cpython-37.pyc,, +html5lib/filters/alphabeticalattributes.py,sha256=lViZc2JMCclXi_5gduvmdzrRxtO5Xo9ONnbHBVCsykU,919 +html5lib/filters/base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286 +html5lib/filters/inject_meta_charset.py,sha256=egDXUEHXmAG9504xz0K6ALDgYkvUrC2q15YUVeNlVQg,2945 +html5lib/filters/lint.py,sha256=upXATs6By7cot7o0bnNqR15sPq2Fn6Vnjvoy3gyO_rY,3631 +html5lib/filters/optionaltags.py,sha256=8lWT75J0aBOHmPgfmqTHSfPpPMp01T84NKu0CRedxcE,10588 +html5lib/filters/sanitizer.py,sha256=V6_cpCq9EXgXkL1CblWEUxSgHy466Hy8k0453x8PSs8,26236 +html5lib/filters/whitespace.py,sha256=8eWqZxd4UC4zlFGW6iyY6f-2uuT8pOCSALc3IZt7_t4,1214 +html5lib/html5parser.py,sha256=eeMsctZUonbJZPegB_CElFye2lGufMcMsxQxsJtf7Mg,118951 +html5lib/serializer.py,sha256=cmZQjjaXThEe2_6yzDqeb3yXS_hUggv0cCa2VBD9e2Y,15746 +html5lib/treeadapters/__init__.py,sha256=18hyI-at2aBsdKzpwRwa5lGF1ipgctaTYXoU9En2ZQg,650 +html5lib/treeadapters/__pycache__/__init__.cpython-37.pyc,, +html5lib/treeadapters/__pycache__/genshi.cpython-37.pyc,, +html5lib/treeadapters/__pycache__/sax.cpython-37.pyc,, +html5lib/treeadapters/genshi.py,sha256=CH27pAsDKmu4ZGkAUrwty7u0KauGLCZRLPMzaO3M5vo,1715 +html5lib/treeadapters/sax.py,sha256=BKS8woQTnKiqeffHsxChUqL4q2ZR_wb5fc9MJ3zQC8s,1776 +html5lib/treebuilders/__init__.py,sha256=AysSJyvPfikCMMsTVvaxwkgDieELD5dfR8FJIAuq7hY,3592 +html5lib/treebuilders/__pycache__/__init__.cpython-37.pyc,, +html5lib/treebuilders/__pycache__/base.cpython-37.pyc,, +html5lib/treebuilders/__pycache__/dom.cpython-37.pyc,, +html5lib/treebuilders/__pycache__/etree.cpython-37.pyc,, +html5lib/treebuilders/__pycache__/etree_lxml.cpython-37.pyc,, +html5lib/treebuilders/base.py,sha256=JEFLxUEsluRl7vY-6cnAk44HxgCAkaj4GpEOBpg8tao,14567 +html5lib/treebuilders/dom.py,sha256=SY3MsijXyzdNPc8aK5IQsupBoM8J67y56DgNtGvsb9g,8835 +html5lib/treebuilders/etree.py,sha256=R0zaNrdtPel3XHV8PUVcQzVnMuiOm_8fpZof7tU7ips,12752 +html5lib/treebuilders/etree_lxml.py,sha256=9V0dXxbJYYq-Skgb5-_OL2NkVYpjioEb4CHajo0e9yI,14122 +html5lib/treewalkers/__init__.py,sha256=yhXxHpjlSqfQyUag3v8-vWjMPriFBU8YRAPNpDgBTn8,5714 +html5lib/treewalkers/__pycache__/__init__.cpython-37.pyc,, +html5lib/treewalkers/__pycache__/base.cpython-37.pyc,, +html5lib/treewalkers/__pycache__/dom.cpython-37.pyc,, +html5lib/treewalkers/__pycache__/etree.cpython-37.pyc,, +html5lib/treewalkers/__pycache__/etree_lxml.cpython-37.pyc,, +html5lib/treewalkers/__pycache__/genshi.cpython-37.pyc,, +html5lib/treewalkers/base.py,sha256=ouiOsuSzvI0KgzdWP8PlxIaSNs9falhbiinAEc_UIJY,7476 +html5lib/treewalkers/dom.py,sha256=EHyFR8D8lYNnyDU9lx_IKigVJRyecUGua0mOi7HBukc,1413 +html5lib/treewalkers/etree.py,sha256=gRzfuNnWg6r-fvtXRp4xPVTC1CHPowcn8Dc4-WcDoOg,4538 +html5lib/treewalkers/etree_lxml.py,sha256=AR07dDrdkDqrQT4yNK_5WeGiZMHfOrM3ZmmII6YrSgs,6297 +html5lib/treewalkers/genshi.py,sha256=4D2PECZ5n3ZN3qu3jMl9yY7B81jnQApBQSVlfaIuYbA,2309 diff --git a/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/WHEEL new file mode 100644 index 0000000..7bf9daa --- /dev/null +++ b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.30.0.a0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/metadata.json new file mode 100644 index 0000000..23cd6e4 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "james@hoppipolla.co.uk", "name": "James Graham", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/html5lib/html5lib-python"}}}, "extras": ["all", "chardet", "datrie", "genshi", "lxml"], "generator": "bdist_wheel (0.30.0.a0)", "license": "MIT License", "metadata_version": "2.0", "name": "html5lib", "run_requires": [{"extra": "all", "requires": ["chardet (>=2.2)", "genshi"]}, {"extra": "chardet", "requires": ["chardet (>=2.2)"]}, {"extra": "genshi", "requires": ["genshi"]}, {"requires": ["six (>=1.9)", "webencodings"]}, {"environment": "platform_python_implementation == 'CPython'", "extra": "all", "requires": ["datrie", "lxml"]}, {"environment": "platform_python_implementation == 'CPython'", "extra": "datrie", "requires": ["datrie"]}, {"environment": "platform_python_implementation == 'CPython'", "extra": "lxml", "requires": ["lxml"]}], "summary": "HTML parser based on the WHATWG HTML specification", "version": "1.0.1"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/html5lib-0.9999999.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/html5lib-1.0.1.dist-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/html5lib/__init__.py b/Shared/lib/python3.4/site-packages/html5lib/__init__.py index 962536c..ba01065 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/__init__.py +++ b/Shared/lib/python3.4/site-packages/html5lib/__init__.py @@ -1,14 +1,23 @@ """ -HTML parsing library based on the WHATWG "HTML5" -specification. The parser is designed to be compatible with existing -HTML found in the wild and implements well-defined error recovery that +HTML parsing library based on the `WHATWG HTML specification +`_. The parser is designed to be compatible with +existing HTML found in the wild and implements well-defined error recovery that is largely compatible with modern desktop web browsers. -Example usage: +Example usage:: -import html5lib -f = open("my_document.html") -tree = html5lib.parse(f) + import html5lib + with open("my_document.html", "rb") as f: + tree = html5lib.parse(f) + +For convenience, this module re-exports the following names: + +* :func:`~.html5parser.parse` +* :func:`~.html5parser.parseFragment` +* :class:`~.html5parser.HTMLParser` +* :func:`~.treebuilders.getTreeBuilder` +* :func:`~.treewalkers.getTreeWalker` +* :func:`~.serializer.serialize` """ from __future__ import absolute_import, division, unicode_literals @@ -22,4 +31,5 @@ __all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", "getTreeWalker", "serialize"] # this has to be at the top level, see how setup.py parses this -__version__ = "0.9999999" +#: Distribution version number. +__version__ = "1.0.1" diff --git a/Shared/lib/python3.4/site-packages/html5lib/ihatexml.py b/Shared/lib/python3.4/site-packages/html5lib/_ihatexml.py similarity index 97% rename from Shared/lib/python3.4/site-packages/html5lib/ihatexml.py rename to Shared/lib/python3.4/site-packages/html5lib/_ihatexml.py index 0fc7930..4c77717 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/ihatexml.py +++ b/Shared/lib/python3.4/site-packages/html5lib/_ihatexml.py @@ -175,18 +175,18 @@ def escapeRegexp(string): return string # output from the above -nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') +nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa -nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') +nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa # Simpler things -nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]") +nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]") class InfosetFilter(object): replacementRegexp = re.compile(r"U[\dA-F]{5,5}") - def __init__(self, replaceChars=None, + def __init__(self, dropXmlnsLocalName=False, dropXmlnsAttrNs=False, preventDoubleDashComments=False, @@ -217,7 +217,7 @@ class InfosetFilter(object): else: return self.toXmlName(name) - def coerceElement(self, name, namespace=None): + def coerceElement(self, name): return self.toXmlName(name) def coerceComment(self, data): @@ -225,11 +225,14 @@ class InfosetFilter(object): while "--" in data: warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning) data = data.replace("--", "- -") + if data.endswith("-"): + warnings.warn("Comments cannot end in a dash", DataLossWarning) + data += " " return data def coerceCharacters(self, data): if self.replaceFormFeedCharacters: - for i in range(data.count("\x0C")): + for _ in range(data.count("\x0C")): warnings.warn("Text cannot contain U+000C", DataLossWarning) data = data.replace("\x0C", " ") # Other non-xml characters diff --git a/Shared/lib/python3.4/site-packages/html5lib/inputstream.py b/Shared/lib/python3.4/site-packages/html5lib/_inputstream.py similarity index 83% rename from Shared/lib/python3.4/site-packages/html5lib/inputstream.py rename to Shared/lib/python3.4/site-packages/html5lib/_inputstream.py index 7020aa6..177f0ab 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/inputstream.py +++ b/Shared/lib/python3.4/site-packages/html5lib/_inputstream.py @@ -1,13 +1,16 @@ from __future__ import absolute_import, division, unicode_literals -from six import text_type -from six.moves import http_client + +from six import text_type, binary_type +from six.moves import http_client, urllib import codecs import re +import webencodings + from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase -from .constants import encodings, ReparseException -from . import utils +from .constants import _ReparseException +from . import _utils from io import StringIO @@ -16,12 +19,6 @@ try: except ImportError: BytesIO = StringIO -try: - from io import BufferedIOBase -except ImportError: - class BufferedIOBase(object): - pass - # Non-unicode versions of constants for use in the pre-parser spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters]) asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters]) @@ -29,15 +26,17 @@ asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"]) -invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" +invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa -if utils.supports_lone_surrogates: +if _utils.supports_lone_surrogates: # Use one extra step of indirection and create surrogates with - # unichr. Not using this indirection would introduce an illegal + # eval. Not using this indirection would introduce an illegal # unicode literal on platforms not supporting such lone # surrogates. - invalid_unicode_re = re.compile(invalid_unicode_no_surrogate + - eval('"\\uD800-\\uDFFF"')) + assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1 + invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] + + eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used + "]") else: invalid_unicode_re = re.compile(invalid_unicode_no_surrogate) @@ -49,7 +48,7 @@ non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF]) -ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]") +ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]") # Cache for charsUntil() charsUntilRegEx = {} @@ -129,10 +128,13 @@ class BufferedStream(object): return b"".join(rv) -def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True): - if isinstance(source, http_client.HTTPResponse): - # Work around Python bug #20007: read(0) closes the connection. - # http://bugs.python.org/issue20007 +def HTMLInputStream(source, **kwargs): + # Work around Python bug #20007: read(0) closes the connection. + # http://bugs.python.org/issue20007 + if (isinstance(source, http_client.HTTPResponse) or + # Also check for addinfourl wrapping HTTPResponse + (isinstance(source, urllib.response.addbase) and + isinstance(source.fp, http_client.HTTPResponse))): isUnicode = False elif hasattr(source, "read"): isUnicode = isinstance(source.read(0), text_type) @@ -140,12 +142,13 @@ def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True): isUnicode = isinstance(source, text_type) if isUnicode: - if encoding is not None: - raise TypeError("Cannot explicitly set an encoding with a unicode string") + encodings = [x for x in kwargs if x.endswith("_encoding")] + if encodings: + raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings) - return HTMLUnicodeInputStream(source) + return HTMLUnicodeInputStream(source, **kwargs) else: - return HTMLBinaryInputStream(source, encoding, parseMeta, chardet) + return HTMLBinaryInputStream(source, **kwargs) class HTMLUnicodeInputStream(object): @@ -171,27 +174,21 @@ class HTMLUnicodeInputStream(object): regardless of any BOM or later declaration (such as in a meta element) - parseMeta - Look for a element containing encoding information - """ - if not utils.supports_lone_surrogates: + if not _utils.supports_lone_surrogates: # Such platforms will have already checked for such # surrogate errors, so no need to do this checking. self.reportCharacterErrors = None - self.replaceCharactersRegexp = None elif len("\U0010FFFF") == 1: self.reportCharacterErrors = self.characterErrorsUCS4 - self.replaceCharactersRegexp = re.compile(eval('"[\\uD800-\\uDFFF]"')) else: self.reportCharacterErrors = self.characterErrorsUCS2 - self.replaceCharactersRegexp = re.compile( - eval('"([\\uD800-\\uDBFF](?![\\uDC00-\\uDFFF])|(? Normalized stream from source @@ -408,8 +404,6 @@ class HTMLBinaryInputStream(HTMLUnicodeInputStream): regardless of any BOM or later declaration (such as in a meta element) - parseMeta - Look for a element containing encoding information - """ # Raw Stream - for unicode objects this will encode to utf-8 and set # self.charEncoding as appropriate @@ -417,27 +411,28 @@ class HTMLBinaryInputStream(HTMLUnicodeInputStream): HTMLUnicodeInputStream.__init__(self, self.rawStream) - self.charEncoding = (codecName(encoding), "certain") - # Encoding Information # Number of bytes to use when looking for a meta element with # encoding information - self.numBytesMeta = 512 + self.numBytesMeta = 1024 # Number of bytes to use when using detecting encoding using chardet self.numBytesChardet = 100 - # Encoding to use if no other information can be found - self.defaultEncoding = "windows-1252" + # Things from args + self.override_encoding = override_encoding + self.transport_encoding = transport_encoding + self.same_origin_parent_encoding = same_origin_parent_encoding + self.likely_encoding = likely_encoding + self.default_encoding = default_encoding - # Detect encoding iff no explicit "transport level" encoding is supplied - if (self.charEncoding[0] is None): - self.charEncoding = self.detectEncoding(parseMeta, chardet) + # Determine encoding + self.charEncoding = self.determineEncoding(useChardet) + assert self.charEncoding[0] is not None # Call superclass self.reset() def reset(self): - self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream, - 'replace') + self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace') HTMLUnicodeInputStream.reset(self) def openStream(self, source): @@ -454,29 +449,50 @@ class HTMLBinaryInputStream(HTMLUnicodeInputStream): try: stream.seek(stream.tell()) - except: + except: # pylint:disable=bare-except stream = BufferedStream(stream) return stream - def detectEncoding(self, parseMeta=True, chardet=True): - # First look for a BOM + def determineEncoding(self, chardet=True): + # BOMs take precedence over everything # This will also read past the BOM if present - encoding = self.detectBOM() - confidence = "certain" - # If there is no BOM need to look for meta elements with encoding - # information - if encoding is None and parseMeta: - encoding = self.detectEncodingMeta() - confidence = "tentative" - # Guess with chardet, if avaliable - if encoding is None and chardet: - confidence = "tentative" + charEncoding = self.detectBOM(), "certain" + if charEncoding[0] is not None: + return charEncoding + + # If we've been overriden, we've been overriden + charEncoding = lookupEncoding(self.override_encoding), "certain" + if charEncoding[0] is not None: + return charEncoding + + # Now check the transport layer + charEncoding = lookupEncoding(self.transport_encoding), "certain" + if charEncoding[0] is not None: + return charEncoding + + # Look for meta elements with encoding information + charEncoding = self.detectEncodingMeta(), "tentative" + if charEncoding[0] is not None: + return charEncoding + + # Parent document encoding + charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative" + if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"): + return charEncoding + + # "likely" encoding + charEncoding = lookupEncoding(self.likely_encoding), "tentative" + if charEncoding[0] is not None: + return charEncoding + + # Guess with chardet, if available + if chardet: try: - try: - from charade.universaldetector import UniversalDetector - except ImportError: - from chardet.universaldetector import UniversalDetector + from chardet.universaldetector import UniversalDetector + except ImportError: + pass + else: buffers = [] detector = UniversalDetector() while not detector.done: @@ -487,37 +503,34 @@ class HTMLBinaryInputStream(HTMLUnicodeInputStream): buffers.append(buffer) detector.feed(buffer) detector.close() - encoding = detector.result['encoding'] + encoding = lookupEncoding(detector.result['encoding']) self.rawStream.seek(0) - except ImportError: - pass - # If all else fails use the default encoding - if encoding is None: - confidence = "tentative" - encoding = self.defaultEncoding + if encoding is not None: + return encoding, "tentative" - # Substitute for equivalent encodings: - encodingSub = {"iso-8859-1": "windows-1252"} + # Try the default encoding + charEncoding = lookupEncoding(self.default_encoding), "tentative" + if charEncoding[0] is not None: + return charEncoding - if encoding.lower() in encodingSub: - encoding = encodingSub[encoding.lower()] - - return encoding, confidence + # Fallback to html5lib's default if even that hasn't worked + return lookupEncoding("windows-1252"), "tentative" def changeEncoding(self, newEncoding): assert self.charEncoding[1] != "certain" - newEncoding = codecName(newEncoding) - if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"): - newEncoding = "utf-8" + newEncoding = lookupEncoding(newEncoding) if newEncoding is None: return + if newEncoding.name in ("utf-16be", "utf-16le"): + newEncoding = lookupEncoding("utf-8") + assert newEncoding is not None elif newEncoding == self.charEncoding[0]: self.charEncoding = (self.charEncoding[0], "certain") else: self.rawStream.seek(0) - self.reset() self.charEncoding = (newEncoding, "certain") - raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding)) + self.reset() + raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding)) def detectBOM(self): """Attempts to detect at BOM at the start of the stream. If @@ -525,8 +538,8 @@ class HTMLBinaryInputStream(HTMLUnicodeInputStream): encoding otherwise return None""" bomDict = { codecs.BOM_UTF8: 'utf-8', - codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be', - codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be' + codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be', + codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be' } # Go to beginning of file and read in 4 bytes @@ -546,9 +559,12 @@ class HTMLBinaryInputStream(HTMLUnicodeInputStream): # Set the read position past the BOM if one was found, otherwise # set it to the start of the stream - self.rawStream.seek(encoding and seek or 0) - - return encoding + if encoding: + self.rawStream.seek(seek) + return lookupEncoding(encoding) + else: + self.rawStream.seek(0) + return None def detectEncodingMeta(self): """Report the encoding declared by the meta element @@ -559,8 +575,8 @@ class HTMLBinaryInputStream(HTMLUnicodeInputStream): self.rawStream.seek(0) encoding = parser.getEncoding() - if encoding in ("utf-16", "utf-16-be", "utf-16-le"): - encoding = "utf-8" + if encoding is not None and encoding.name in ("utf-16be", "utf-16le"): + encoding = lookupEncoding("utf-8") return encoding @@ -574,6 +590,7 @@ class EncodingBytes(bytes): return bytes.__new__(self, value.lower()) def __init__(self, value): + # pylint:disable=unused-argument self._position = -1 def __iter__(self): @@ -684,7 +701,7 @@ class EncodingParser(object): (b" 0: - for i in range(nullCount): + for _ in range(nullCount): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) data = data.replace("\u0000", "\uFFFD") diff --git a/Shared/lib/python3.4/site-packages/html5lib/trie/__init__.py b/Shared/lib/python3.4/site-packages/html5lib/_trie/__init__.py similarity index 73% rename from Shared/lib/python3.4/site-packages/html5lib/trie/__init__.py rename to Shared/lib/python3.4/site-packages/html5lib/_trie/__init__.py index a8cca8a..a5ba4bf 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/trie/__init__.py +++ b/Shared/lib/python3.4/site-packages/html5lib/_trie/__init__.py @@ -4,9 +4,11 @@ from .py import Trie as PyTrie Trie = PyTrie +# pylint:disable=wrong-import-position try: from .datrie import Trie as DATrie except ImportError: pass else: Trie = DATrie +# pylint:enable=wrong-import-position diff --git a/Shared/lib/python3.4/site-packages/html5lib/trie/_base.py b/Shared/lib/python3.4/site-packages/html5lib/_trie/_base.py similarity index 85% rename from Shared/lib/python3.4/site-packages/html5lib/trie/_base.py rename to Shared/lib/python3.4/site-packages/html5lib/_trie/_base.py index 724486b..a1158bb 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/trie/_base.py +++ b/Shared/lib/python3.4/site-packages/html5lib/_trie/_base.py @@ -7,13 +7,13 @@ class Trie(Mapping): """Abstract base class for tries""" def keys(self, prefix=None): - keys = super().keys() + # pylint:disable=arguments-differ + keys = super(Trie, self).keys() if prefix is None: return set(keys) - # Python 2.6: no set comprehensions - return set([x for x in keys if x.startswith(prefix)]) + return {x for x in keys if x.startswith(prefix)} def has_keys_with_prefix(self, prefix): for key in self.keys(): diff --git a/Shared/lib/python3.4/site-packages/html5lib/trie/datrie.py b/Shared/lib/python3.4/site-packages/html5lib/_trie/datrie.py similarity index 100% rename from Shared/lib/python3.4/site-packages/html5lib/trie/datrie.py rename to Shared/lib/python3.4/site-packages/html5lib/_trie/datrie.py diff --git a/Shared/lib/python3.4/site-packages/html5lib/trie/py.py b/Shared/lib/python3.4/site-packages/html5lib/_trie/py.py similarity index 100% rename from Shared/lib/python3.4/site-packages/html5lib/trie/py.py rename to Shared/lib/python3.4/site-packages/html5lib/_trie/py.py diff --git a/Shared/lib/python3.4/site-packages/html5lib/utils.py b/Shared/lib/python3.4/site-packages/html5lib/_utils.py similarity index 73% rename from Shared/lib/python3.4/site-packages/html5lib/utils.py rename to Shared/lib/python3.4/site-packages/html5lib/_utils.py index fdc18fe..91252f2 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/utils.py +++ b/Shared/lib/python3.4/site-packages/html5lib/_utils.py @@ -22,12 +22,12 @@ __all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair", # surrogates, and there is no mechanism to further escape such # escapes. try: - _x = eval('"\\uD800"') + _x = eval('"\\uD800"') # pylint:disable=eval-used if not isinstance(_x, text_type): # We need this with u"" because of http://bugs.jython.org/issue2039 - _x = eval('u"\\uD800"') + _x = eval('u"\\uD800"') # pylint:disable=eval-used assert isinstance(_x, text_type) -except: +except: # pylint:disable=bare-except supports_lone_surrogates = False else: supports_lone_surrogates = True @@ -52,19 +52,20 @@ class MethodDispatcher(dict): # anything here. _dictEntries = [] for name, value in items: - if type(name) in (list, tuple, frozenset, set): + if isinstance(name, (list, tuple, frozenset, set)): for item in name: _dictEntries.append((item, value)) else: _dictEntries.append((name, value)) dict.__init__(self, _dictEntries) + assert len(self) == len(_dictEntries) self.default = None def __getitem__(self, key): return dict.get(self, key, self.default) -# Some utility functions to dal with weirdness around UCS2 vs UCS4 +# Some utility functions to deal with weirdness around UCS2 vs UCS4 # python builds def isSurrogatePair(data): @@ -91,13 +92,33 @@ def moduleFactoryFactory(factory): else: name = b"_%s_factory" % baseModule.__name__ - if name in moduleCache: - return moduleCache[name] - else: + kwargs_tuple = tuple(kwargs.items()) + + try: + return moduleCache[name][args][kwargs_tuple] + except KeyError: mod = ModuleType(name) objs = factory(baseModule, *args, **kwargs) mod.__dict__.update(objs) - moduleCache[name] = mod + if "name" not in moduleCache: + moduleCache[name] = {} + if "args" not in moduleCache[name]: + moduleCache[name][args] = {} + if "kwargs" not in moduleCache[name][args]: + moduleCache[name][args][kwargs_tuple] = {} + moduleCache[name][args][kwargs_tuple] = mod return mod return moduleFactory + + +def memoize(func): + cache = {} + + def wrapped(*args, **kwargs): + key = (tuple(args), tuple(kwargs.items())) + if key not in cache: + cache[key] = func(*args, **kwargs) + return cache[key] + + return wrapped diff --git a/Shared/lib/python3.4/site-packages/html5lib/constants.py b/Shared/lib/python3.4/site-packages/html5lib/constants.py index d938e0a..1ff8041 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/constants.py +++ b/Shared/lib/python3.4/site-packages/html5lib/constants.py @@ -283,6 +283,12 @@ E = { "Element %(name)s not allowed in a non-html context", "unexpected-end-tag-before-html": "Unexpected end tag (%(name)s) before html.", + "unexpected-inhead-noscript-tag": + "Element %(name)s not allowed in a inhead-noscript context", + "eof-in-head-noscript": + "Unexpected end of file. Expected inhead-noscript content", + "char-in-head-noscript": + "Unexpected non-space character. Expected inhead-noscript content", "XXX-undefined-error": "Undefined error (this sucks and should be fixed)", } @@ -417,7 +423,7 @@ specialElements = frozenset([ ]) htmlIntegrationPointElements = frozenset([ - (namespaces["mathml"], "annotaion-xml"), + (namespaces["mathml"], "annotation-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title") @@ -431,6 +437,73 @@ mathmlTextIntegrationPointElements = frozenset([ (namespaces["mathml"], "mtext") ]) +adjustSVGAttributes = { + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan" +} + +adjustMathMLAttributes = {"definitionurl": "definitionURL"} + adjustForeignAttributes = { "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), @@ -515,7 +588,7 @@ rcdataElements = frozenset([ ]) booleanAttributes = { - "": frozenset(["irrelevant"]), + "": frozenset(["irrelevant", "itemscope"]), "style": frozenset(["scoped"]), "img": frozenset(["ismap"]), "audio": frozenset(["autoplay", "controls"]), @@ -533,6 +606,7 @@ booleanAttributes = { "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), "output": frozenset(["disabled", "readonly"]), + "iframe": frozenset(["seamless"]), } # entitiesWindows1252 has to be _ordered_ and needs to have an index. It @@ -2813,7 +2887,6 @@ replacementCharacters = { 0x0d: "\u000D", 0x80: "\u20AC", 0x81: "\u0081", - 0x81: "\u0081", 0x82: "\u201A", 0x83: "\u0192", 0x84: "\u201E", @@ -2846,235 +2919,6 @@ replacementCharacters = { 0x9F: "\u0178", } -encodings = { - '437': 'cp437', - '850': 'cp850', - '852': 'cp852', - '855': 'cp855', - '857': 'cp857', - '860': 'cp860', - '861': 'cp861', - '862': 'cp862', - '863': 'cp863', - '865': 'cp865', - '866': 'cp866', - '869': 'cp869', - 'ansix341968': 'ascii', - 'ansix341986': 'ascii', - 'arabic': 'iso8859-6', - 'ascii': 'ascii', - 'asmo708': 'iso8859-6', - 'big5': 'big5', - 'big5hkscs': 'big5hkscs', - 'chinese': 'gbk', - 'cp037': 'cp037', - 'cp1026': 'cp1026', - 'cp154': 'ptcp154', - 'cp367': 'ascii', - 'cp424': 'cp424', - 'cp437': 'cp437', - 'cp500': 'cp500', - 'cp775': 'cp775', - 'cp819': 'windows-1252', - 'cp850': 'cp850', - 'cp852': 'cp852', - 'cp855': 'cp855', - 'cp857': 'cp857', - 'cp860': 'cp860', - 'cp861': 'cp861', - 'cp862': 'cp862', - 'cp863': 'cp863', - 'cp864': 'cp864', - 'cp865': 'cp865', - 'cp866': 'cp866', - 'cp869': 'cp869', - 'cp936': 'gbk', - 'cpgr': 'cp869', - 'cpis': 'cp861', - 'csascii': 'ascii', - 'csbig5': 'big5', - 'cseuckr': 'cp949', - 'cseucpkdfmtjapanese': 'euc_jp', - 'csgb2312': 'gbk', - 'cshproman8': 'hp-roman8', - 'csibm037': 'cp037', - 'csibm1026': 'cp1026', - 'csibm424': 'cp424', - 'csibm500': 'cp500', - 'csibm855': 'cp855', - 'csibm857': 'cp857', - 'csibm860': 'cp860', - 'csibm861': 'cp861', - 'csibm863': 'cp863', - 'csibm864': 'cp864', - 'csibm865': 'cp865', - 'csibm866': 'cp866', - 'csibm869': 'cp869', - 'csiso2022jp': 'iso2022_jp', - 'csiso2022jp2': 'iso2022_jp_2', - 'csiso2022kr': 'iso2022_kr', - 'csiso58gb231280': 'gbk', - 'csisolatin1': 'windows-1252', - 'csisolatin2': 'iso8859-2', - 'csisolatin3': 'iso8859-3', - 'csisolatin4': 'iso8859-4', - 'csisolatin5': 'windows-1254', - 'csisolatin6': 'iso8859-10', - 'csisolatinarabic': 'iso8859-6', - 'csisolatincyrillic': 'iso8859-5', - 'csisolatingreek': 'iso8859-7', - 'csisolatinhebrew': 'iso8859-8', - 'cskoi8r': 'koi8-r', - 'csksc56011987': 'cp949', - 'cspc775baltic': 'cp775', - 'cspc850multilingual': 'cp850', - 'cspc862latinhebrew': 'cp862', - 'cspc8codepage437': 'cp437', - 'cspcp852': 'cp852', - 'csptcp154': 'ptcp154', - 'csshiftjis': 'shift_jis', - 'csunicode11utf7': 'utf-7', - 'cyrillic': 'iso8859-5', - 'cyrillicasian': 'ptcp154', - 'ebcdiccpbe': 'cp500', - 'ebcdiccpca': 'cp037', - 'ebcdiccpch': 'cp500', - 'ebcdiccphe': 'cp424', - 'ebcdiccpnl': 'cp037', - 'ebcdiccpus': 'cp037', - 'ebcdiccpwt': 'cp037', - 'ecma114': 'iso8859-6', - 'ecma118': 'iso8859-7', - 'elot928': 'iso8859-7', - 'eucjp': 'euc_jp', - 'euckr': 'cp949', - 'extendedunixcodepackedformatforjapanese': 'euc_jp', - 'gb18030': 'gb18030', - 'gb2312': 'gbk', - 'gb231280': 'gbk', - 'gbk': 'gbk', - 'greek': 'iso8859-7', - 'greek8': 'iso8859-7', - 'hebrew': 'iso8859-8', - 'hproman8': 'hp-roman8', - 'hzgb2312': 'hz', - 'ibm037': 'cp037', - 'ibm1026': 'cp1026', - 'ibm367': 'ascii', - 'ibm424': 'cp424', - 'ibm437': 'cp437', - 'ibm500': 'cp500', - 'ibm775': 'cp775', - 'ibm819': 'windows-1252', - 'ibm850': 'cp850', - 'ibm852': 'cp852', - 'ibm855': 'cp855', - 'ibm857': 'cp857', - 'ibm860': 'cp860', - 'ibm861': 'cp861', - 'ibm862': 'cp862', - 'ibm863': 'cp863', - 'ibm864': 'cp864', - 'ibm865': 'cp865', - 'ibm866': 'cp866', - 'ibm869': 'cp869', - 'iso2022jp': 'iso2022_jp', - 'iso2022jp2': 'iso2022_jp_2', - 'iso2022kr': 'iso2022_kr', - 'iso646irv1991': 'ascii', - 'iso646us': 'ascii', - 'iso88591': 'windows-1252', - 'iso885910': 'iso8859-10', - 'iso8859101992': 'iso8859-10', - 'iso885911987': 'windows-1252', - 'iso885913': 'iso8859-13', - 'iso885914': 'iso8859-14', - 'iso8859141998': 'iso8859-14', - 'iso885915': 'iso8859-15', - 'iso885916': 'iso8859-16', - 'iso8859162001': 'iso8859-16', - 'iso88592': 'iso8859-2', - 'iso885921987': 'iso8859-2', - 'iso88593': 'iso8859-3', - 'iso885931988': 'iso8859-3', - 'iso88594': 'iso8859-4', - 'iso885941988': 'iso8859-4', - 'iso88595': 'iso8859-5', - 'iso885951988': 'iso8859-5', - 'iso88596': 'iso8859-6', - 'iso885961987': 'iso8859-6', - 'iso88597': 'iso8859-7', - 'iso885971987': 'iso8859-7', - 'iso88598': 'iso8859-8', - 'iso885981988': 'iso8859-8', - 'iso88599': 'windows-1254', - 'iso885991989': 'windows-1254', - 'isoceltic': 'iso8859-14', - 'isoir100': 'windows-1252', - 'isoir101': 'iso8859-2', - 'isoir109': 'iso8859-3', - 'isoir110': 'iso8859-4', - 'isoir126': 'iso8859-7', - 'isoir127': 'iso8859-6', - 'isoir138': 'iso8859-8', - 'isoir144': 'iso8859-5', - 'isoir148': 'windows-1254', - 'isoir149': 'cp949', - 'isoir157': 'iso8859-10', - 'isoir199': 'iso8859-14', - 'isoir226': 'iso8859-16', - 'isoir58': 'gbk', - 'isoir6': 'ascii', - 'koi8r': 'koi8-r', - 'koi8u': 'koi8-u', - 'korean': 'cp949', - 'ksc5601': 'cp949', - 'ksc56011987': 'cp949', - 'ksc56011989': 'cp949', - 'l1': 'windows-1252', - 'l10': 'iso8859-16', - 'l2': 'iso8859-2', - 'l3': 'iso8859-3', - 'l4': 'iso8859-4', - 'l5': 'windows-1254', - 'l6': 'iso8859-10', - 'l8': 'iso8859-14', - 'latin1': 'windows-1252', - 'latin10': 'iso8859-16', - 'latin2': 'iso8859-2', - 'latin3': 'iso8859-3', - 'latin4': 'iso8859-4', - 'latin5': 'windows-1254', - 'latin6': 'iso8859-10', - 'latin8': 'iso8859-14', - 'latin9': 'iso8859-15', - 'ms936': 'gbk', - 'mskanji': 'shift_jis', - 'pt154': 'ptcp154', - 'ptcp154': 'ptcp154', - 'r8': 'hp-roman8', - 'roman8': 'hp-roman8', - 'shiftjis': 'shift_jis', - 'tis620': 'cp874', - 'unicode11utf7': 'utf-7', - 'us': 'ascii', - 'usascii': 'ascii', - 'utf16': 'utf-16', - 'utf16be': 'utf-16-be', - 'utf16le': 'utf-16-le', - 'utf8': 'utf-8', - 'windows1250': 'cp1250', - 'windows1251': 'cp1251', - 'windows1252': 'cp1252', - 'windows1253': 'cp1253', - 'windows1254': 'cp1254', - 'windows1255': 'cp1255', - 'windows1256': 'cp1256', - 'windows1257': 'cp1257', - 'windows1258': 'cp1258', - 'windows936': 'gbk', - 'x-x-big5': 'big5'} - tokenTypes = { "Doctype": 0, "Characters": 1, @@ -3095,8 +2939,9 @@ prefixes["http://www.w3.org/1998/Math/MathML"] = "math" class DataLossWarning(UserWarning): + """Raised when the current tree is unable to represent the input data""" pass -class ReparseException(Exception): +class _ReparseException(Exception): pass diff --git a/Shared/lib/python3.4/site-packages/html5lib/filters/alphabeticalattributes.py b/Shared/lib/python3.4/site-packages/html5lib/filters/alphabeticalattributes.py index fed6996..5ba926e 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/filters/alphabeticalattributes.py +++ b/Shared/lib/python3.4/site-packages/html5lib/filters/alphabeticalattributes.py @@ -1,20 +1,29 @@ from __future__ import absolute_import, division, unicode_literals -from . import _base +from . import base -try: - from collections import OrderedDict -except ImportError: - from ordereddict import OrderedDict +from collections import OrderedDict -class Filter(_base.Filter): +def _attr_key(attr): + """Return an appropriate key for an attribute for sorting + + Attributes have a namespace that can be either ``None`` or a string. We + can't compare the two because they're different types, so we convert + ``None`` to an empty string first. + + """ + return (attr[0][0] or ''), attr[0][1] + + +class Filter(base.Filter): + """Alphabetizes attributes for elements""" def __iter__(self): - for token in _base.Filter.__iter__(self): + for token in base.Filter.__iter__(self): if token["type"] in ("StartTag", "EmptyTag"): attrs = OrderedDict() for name, value in sorted(token["data"].items(), - key=lambda x: x[0]): + key=_attr_key): attrs[name] = value token["data"] = attrs yield token diff --git a/Shared/lib/python3.4/site-packages/html5lib/filters/_base.py b/Shared/lib/python3.4/site-packages/html5lib/filters/base.py similarity index 100% rename from Shared/lib/python3.4/site-packages/html5lib/filters/_base.py rename to Shared/lib/python3.4/site-packages/html5lib/filters/base.py diff --git a/Shared/lib/python3.4/site-packages/html5lib/filters/inject_meta_charset.py b/Shared/lib/python3.4/site-packages/html5lib/filters/inject_meta_charset.py index ca33b70..aefb5c8 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/filters/inject_meta_charset.py +++ b/Shared/lib/python3.4/site-packages/html5lib/filters/inject_meta_charset.py @@ -1,11 +1,19 @@ from __future__ import absolute_import, division, unicode_literals -from . import _base +from . import base -class Filter(_base.Filter): +class Filter(base.Filter): + """Injects ```` tag into head of document""" def __init__(self, source, encoding): - _base.Filter.__init__(self, source) + """Creates a Filter + + :arg source: the source token stream + + :arg encoding: the encoding to set + + """ + base.Filter.__init__(self, source) self.encoding = encoding def __iter__(self): @@ -13,7 +21,7 @@ class Filter(_base.Filter): meta_found = (self.encoding is None) pending = [] - for token in _base.Filter.__iter__(self): + for token in base.Filter.__iter__(self): type = token["type"] if type == "StartTag": if token["name"].lower() == "head": diff --git a/Shared/lib/python3.4/site-packages/html5lib/filters/lint.py b/Shared/lib/python3.4/site-packages/html5lib/filters/lint.py index 8884696..acd4d7a 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/filters/lint.py +++ b/Shared/lib/python3.4/site-packages/html5lib/filters/lint.py @@ -1,90 +1,93 @@ from __future__ import absolute_import, division, unicode_literals -from . import _base -from ..constants import cdataElements, rcdataElements, voidElements +from six import text_type + +from . import base +from ..constants import namespaces, voidElements from ..constants import spaceCharacters spaceCharacters = "".join(spaceCharacters) -class LintError(Exception): - pass +class Filter(base.Filter): + """Lints the token stream for errors + If it finds any errors, it'll raise an ``AssertionError``. + + """ + def __init__(self, source, require_matching_tags=True): + """Creates a Filter + + :arg source: the source token stream + + :arg require_matching_tags: whether or not to require matching tags + + """ + super(Filter, self).__init__(source) + self.require_matching_tags = require_matching_tags -class Filter(_base.Filter): def __iter__(self): open_elements = [] - contentModelFlag = "PCDATA" - for token in _base.Filter.__iter__(self): + for token in base.Filter.__iter__(self): type = token["type"] if type in ("StartTag", "EmptyTag"): + namespace = token["namespace"] name = token["name"] - if contentModelFlag != "PCDATA": - raise LintError("StartTag not in PCDATA content model flag: %(tag)s" % {"tag": name}) - if not isinstance(name, str): - raise LintError("Tag name is not a string: %(tag)r" % {"tag": name}) - if not name: - raise LintError("Empty tag name") - if type == "StartTag" and name in voidElements: - raise LintError("Void element reported as StartTag token: %(tag)s" % {"tag": name}) - elif type == "EmptyTag" and name not in voidElements: - raise LintError("Non-void element reported as EmptyTag token: %(tag)s" % {"tag": token["name"]}) - if type == "StartTag": - open_elements.append(name) - for name, value in token["data"]: - if not isinstance(name, str): - raise LintError("Attribute name is not a string: %(name)r" % {"name": name}) - if not name: - raise LintError("Empty attribute name") - if not isinstance(value, str): - raise LintError("Attribute value is not a string: %(value)r" % {"value": value}) - if name in cdataElements: - contentModelFlag = "CDATA" - elif name in rcdataElements: - contentModelFlag = "RCDATA" - elif name == "plaintext": - contentModelFlag = "PLAINTEXT" + assert namespace is None or isinstance(namespace, text_type) + assert namespace != "" + assert isinstance(name, text_type) + assert name != "" + assert isinstance(token["data"], dict) + if (not namespace or namespace == namespaces["html"]) and name in voidElements: + assert type == "EmptyTag" + else: + assert type == "StartTag" + if type == "StartTag" and self.require_matching_tags: + open_elements.append((namespace, name)) + for (namespace, name), value in token["data"].items(): + assert namespace is None or isinstance(namespace, text_type) + assert namespace != "" + assert isinstance(name, text_type) + assert name != "" + assert isinstance(value, text_type) elif type == "EndTag": + namespace = token["namespace"] name = token["name"] - if not isinstance(name, str): - raise LintError("Tag name is not a string: %(tag)r" % {"tag": name}) - if not name: - raise LintError("Empty tag name") - if name in voidElements: - raise LintError("Void element reported as EndTag token: %(tag)s" % {"tag": name}) - start_name = open_elements.pop() - if start_name != name: - raise LintError("EndTag (%(end)s) does not match StartTag (%(start)s)" % {"end": name, "start": start_name}) - contentModelFlag = "PCDATA" + assert namespace is None or isinstance(namespace, text_type) + assert namespace != "" + assert isinstance(name, text_type) + assert name != "" + if (not namespace or namespace == namespaces["html"]) and name in voidElements: + assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name} + elif self.require_matching_tags: + start = open_elements.pop() + assert start == (namespace, name) elif type == "Comment": - if contentModelFlag != "PCDATA": - raise LintError("Comment not in PCDATA content model flag") + data = token["data"] + assert isinstance(data, text_type) elif type in ("Characters", "SpaceCharacters"): data = token["data"] - if not isinstance(data, str): - raise LintError("Attribute name is not a string: %(name)r" % {"name": data}) - if not data: - raise LintError("%(type)s token with empty data" % {"type": type}) + assert isinstance(data, text_type) + assert data != "" if type == "SpaceCharacters": - data = data.strip(spaceCharacters) - if data: - raise LintError("Non-space character(s) found in SpaceCharacters token: %(token)r" % {"token": data}) + assert data.strip(spaceCharacters) == "" elif type == "Doctype": name = token["name"] - if contentModelFlag != "PCDATA": - raise LintError("Doctype not in PCDATA content model flag: %(name)s" % {"name": name}) - if not isinstance(name, str): - raise LintError("Tag name is not a string: %(tag)r" % {"tag": name}) - # XXX: what to do with token["data"] ? + assert name is None or isinstance(name, text_type) + assert token["publicId"] is None or isinstance(name, text_type) + assert token["systemId"] is None or isinstance(name, text_type) - elif type in ("ParseError", "SerializeError"): - pass + elif type == "Entity": + assert isinstance(token["name"], text_type) + + elif type == "SerializerError": + assert isinstance(token["data"], text_type) else: - raise LintError("Unknown token type: %(type)s" % {"type": type}) + assert False, "Unknown token type: %(type)s" % {"type": type} yield token diff --git a/Shared/lib/python3.4/site-packages/html5lib/filters/optionaltags.py b/Shared/lib/python3.4/site-packages/html5lib/filters/optionaltags.py index fefe0b3..4a86501 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/filters/optionaltags.py +++ b/Shared/lib/python3.4/site-packages/html5lib/filters/optionaltags.py @@ -1,9 +1,10 @@ from __future__ import absolute_import, division, unicode_literals -from . import _base +from . import base -class Filter(_base.Filter): +class Filter(base.Filter): + """Removes optional tags from the token stream""" def slider(self): previous1 = previous2 = None for token in self.source: @@ -11,7 +12,8 @@ class Filter(_base.Filter): yield previous2, previous1, token previous2 = previous1 previous1 = token - yield previous2, previous1, None + if previous1 is not None: + yield previous2, previous1, None def __iter__(self): for previous, token, next in self.slider(): @@ -58,7 +60,7 @@ class Filter(_base.Filter): elif tagname == 'colgroup': # A colgroup element's start tag may be omitted if the first thing # inside the colgroup element is a col element, and if the element - # is not immediately preceeded by another colgroup element whose + # is not immediately preceded by another colgroup element whose # end tag has been omitted. if type in ("StartTag", "EmptyTag"): # XXX: we do not look at the preceding event, so instead we never @@ -70,7 +72,7 @@ class Filter(_base.Filter): elif tagname == 'tbody': # A tbody element's start tag may be omitted if the first thing # inside the tbody element is a tr element, and if the element is - # not immediately preceeded by a tbody, thead, or tfoot element + # not immediately preceded by a tbody, thead, or tfoot element # whose end tag has been omitted. if type == "StartTag": # omit the thead and tfoot elements' end tag when they are diff --git a/Shared/lib/python3.4/site-packages/html5lib/filters/sanitizer.py b/Shared/lib/python3.4/site-packages/html5lib/filters/sanitizer.py index b206b54..e852f53 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/filters/sanitizer.py +++ b/Shared/lib/python3.4/site-packages/html5lib/filters/sanitizer.py @@ -1,12 +1,896 @@ from __future__ import absolute_import, division, unicode_literals -from . import _base -from ..sanitizer import HTMLSanitizerMixin +import re +from xml.sax.saxutils import escape, unescape + +from six.moves import urllib_parse as urlparse + +from . import base +from ..constants import namespaces, prefixes + +__all__ = ["Filter"] -class Filter(_base.Filter, HTMLSanitizerMixin): +allowed_elements = frozenset(( + (namespaces['html'], 'a'), + (namespaces['html'], 'abbr'), + (namespaces['html'], 'acronym'), + (namespaces['html'], 'address'), + (namespaces['html'], 'area'), + (namespaces['html'], 'article'), + (namespaces['html'], 'aside'), + (namespaces['html'], 'audio'), + (namespaces['html'], 'b'), + (namespaces['html'], 'big'), + (namespaces['html'], 'blockquote'), + (namespaces['html'], 'br'), + (namespaces['html'], 'button'), + (namespaces['html'], 'canvas'), + (namespaces['html'], 'caption'), + (namespaces['html'], 'center'), + (namespaces['html'], 'cite'), + (namespaces['html'], 'code'), + (namespaces['html'], 'col'), + (namespaces['html'], 'colgroup'), + (namespaces['html'], 'command'), + (namespaces['html'], 'datagrid'), + (namespaces['html'], 'datalist'), + (namespaces['html'], 'dd'), + (namespaces['html'], 'del'), + (namespaces['html'], 'details'), + (namespaces['html'], 'dfn'), + (namespaces['html'], 'dialog'), + (namespaces['html'], 'dir'), + (namespaces['html'], 'div'), + (namespaces['html'], 'dl'), + (namespaces['html'], 'dt'), + (namespaces['html'], 'em'), + (namespaces['html'], 'event-source'), + (namespaces['html'], 'fieldset'), + (namespaces['html'], 'figcaption'), + (namespaces['html'], 'figure'), + (namespaces['html'], 'footer'), + (namespaces['html'], 'font'), + (namespaces['html'], 'form'), + (namespaces['html'], 'header'), + (namespaces['html'], 'h1'), + (namespaces['html'], 'h2'), + (namespaces['html'], 'h3'), + (namespaces['html'], 'h4'), + (namespaces['html'], 'h5'), + (namespaces['html'], 'h6'), + (namespaces['html'], 'hr'), + (namespaces['html'], 'i'), + (namespaces['html'], 'img'), + (namespaces['html'], 'input'), + (namespaces['html'], 'ins'), + (namespaces['html'], 'keygen'), + (namespaces['html'], 'kbd'), + (namespaces['html'], 'label'), + (namespaces['html'], 'legend'), + (namespaces['html'], 'li'), + (namespaces['html'], 'm'), + (namespaces['html'], 'map'), + (namespaces['html'], 'menu'), + (namespaces['html'], 'meter'), + (namespaces['html'], 'multicol'), + (namespaces['html'], 'nav'), + (namespaces['html'], 'nextid'), + (namespaces['html'], 'ol'), + (namespaces['html'], 'output'), + (namespaces['html'], 'optgroup'), + (namespaces['html'], 'option'), + (namespaces['html'], 'p'), + (namespaces['html'], 'pre'), + (namespaces['html'], 'progress'), + (namespaces['html'], 'q'), + (namespaces['html'], 's'), + (namespaces['html'], 'samp'), + (namespaces['html'], 'section'), + (namespaces['html'], 'select'), + (namespaces['html'], 'small'), + (namespaces['html'], 'sound'), + (namespaces['html'], 'source'), + (namespaces['html'], 'spacer'), + (namespaces['html'], 'span'), + (namespaces['html'], 'strike'), + (namespaces['html'], 'strong'), + (namespaces['html'], 'sub'), + (namespaces['html'], 'sup'), + (namespaces['html'], 'table'), + (namespaces['html'], 'tbody'), + (namespaces['html'], 'td'), + (namespaces['html'], 'textarea'), + (namespaces['html'], 'time'), + (namespaces['html'], 'tfoot'), + (namespaces['html'], 'th'), + (namespaces['html'], 'thead'), + (namespaces['html'], 'tr'), + (namespaces['html'], 'tt'), + (namespaces['html'], 'u'), + (namespaces['html'], 'ul'), + (namespaces['html'], 'var'), + (namespaces['html'], 'video'), + (namespaces['mathml'], 'maction'), + (namespaces['mathml'], 'math'), + (namespaces['mathml'], 'merror'), + (namespaces['mathml'], 'mfrac'), + (namespaces['mathml'], 'mi'), + (namespaces['mathml'], 'mmultiscripts'), + (namespaces['mathml'], 'mn'), + (namespaces['mathml'], 'mo'), + (namespaces['mathml'], 'mover'), + (namespaces['mathml'], 'mpadded'), + (namespaces['mathml'], 'mphantom'), + (namespaces['mathml'], 'mprescripts'), + (namespaces['mathml'], 'mroot'), + (namespaces['mathml'], 'mrow'), + (namespaces['mathml'], 'mspace'), + (namespaces['mathml'], 'msqrt'), + (namespaces['mathml'], 'mstyle'), + (namespaces['mathml'], 'msub'), + (namespaces['mathml'], 'msubsup'), + (namespaces['mathml'], 'msup'), + (namespaces['mathml'], 'mtable'), + (namespaces['mathml'], 'mtd'), + (namespaces['mathml'], 'mtext'), + (namespaces['mathml'], 'mtr'), + (namespaces['mathml'], 'munder'), + (namespaces['mathml'], 'munderover'), + (namespaces['mathml'], 'none'), + (namespaces['svg'], 'a'), + (namespaces['svg'], 'animate'), + (namespaces['svg'], 'animateColor'), + (namespaces['svg'], 'animateMotion'), + (namespaces['svg'], 'animateTransform'), + (namespaces['svg'], 'clipPath'), + (namespaces['svg'], 'circle'), + (namespaces['svg'], 'defs'), + (namespaces['svg'], 'desc'), + (namespaces['svg'], 'ellipse'), + (namespaces['svg'], 'font-face'), + (namespaces['svg'], 'font-face-name'), + (namespaces['svg'], 'font-face-src'), + (namespaces['svg'], 'g'), + (namespaces['svg'], 'glyph'), + (namespaces['svg'], 'hkern'), + (namespaces['svg'], 'linearGradient'), + (namespaces['svg'], 'line'), + (namespaces['svg'], 'marker'), + (namespaces['svg'], 'metadata'), + (namespaces['svg'], 'missing-glyph'), + (namespaces['svg'], 'mpath'), + (namespaces['svg'], 'path'), + (namespaces['svg'], 'polygon'), + (namespaces['svg'], 'polyline'), + (namespaces['svg'], 'radialGradient'), + (namespaces['svg'], 'rect'), + (namespaces['svg'], 'set'), + (namespaces['svg'], 'stop'), + (namespaces['svg'], 'svg'), + (namespaces['svg'], 'switch'), + (namespaces['svg'], 'text'), + (namespaces['svg'], 'title'), + (namespaces['svg'], 'tspan'), + (namespaces['svg'], 'use'), +)) + +allowed_attributes = frozenset(( + # HTML attributes + (None, 'abbr'), + (None, 'accept'), + (None, 'accept-charset'), + (None, 'accesskey'), + (None, 'action'), + (None, 'align'), + (None, 'alt'), + (None, 'autocomplete'), + (None, 'autofocus'), + (None, 'axis'), + (None, 'background'), + (None, 'balance'), + (None, 'bgcolor'), + (None, 'bgproperties'), + (None, 'border'), + (None, 'bordercolor'), + (None, 'bordercolordark'), + (None, 'bordercolorlight'), + (None, 'bottompadding'), + (None, 'cellpadding'), + (None, 'cellspacing'), + (None, 'ch'), + (None, 'challenge'), + (None, 'char'), + (None, 'charoff'), + (None, 'choff'), + (None, 'charset'), + (None, 'checked'), + (None, 'cite'), + (None, 'class'), + (None, 'clear'), + (None, 'color'), + (None, 'cols'), + (None, 'colspan'), + (None, 'compact'), + (None, 'contenteditable'), + (None, 'controls'), + (None, 'coords'), + (None, 'data'), + (None, 'datafld'), + (None, 'datapagesize'), + (None, 'datasrc'), + (None, 'datetime'), + (None, 'default'), + (None, 'delay'), + (None, 'dir'), + (None, 'disabled'), + (None, 'draggable'), + (None, 'dynsrc'), + (None, 'enctype'), + (None, 'end'), + (None, 'face'), + (None, 'for'), + (None, 'form'), + (None, 'frame'), + (None, 'galleryimg'), + (None, 'gutter'), + (None, 'headers'), + (None, 'height'), + (None, 'hidefocus'), + (None, 'hidden'), + (None, 'high'), + (None, 'href'), + (None, 'hreflang'), + (None, 'hspace'), + (None, 'icon'), + (None, 'id'), + (None, 'inputmode'), + (None, 'ismap'), + (None, 'keytype'), + (None, 'label'), + (None, 'leftspacing'), + (None, 'lang'), + (None, 'list'), + (None, 'longdesc'), + (None, 'loop'), + (None, 'loopcount'), + (None, 'loopend'), + (None, 'loopstart'), + (None, 'low'), + (None, 'lowsrc'), + (None, 'max'), + (None, 'maxlength'), + (None, 'media'), + (None, 'method'), + (None, 'min'), + (None, 'multiple'), + (None, 'name'), + (None, 'nohref'), + (None, 'noshade'), + (None, 'nowrap'), + (None, 'open'), + (None, 'optimum'), + (None, 'pattern'), + (None, 'ping'), + (None, 'point-size'), + (None, 'poster'), + (None, 'pqg'), + (None, 'preload'), + (None, 'prompt'), + (None, 'radiogroup'), + (None, 'readonly'), + (None, 'rel'), + (None, 'repeat-max'), + (None, 'repeat-min'), + (None, 'replace'), + (None, 'required'), + (None, 'rev'), + (None, 'rightspacing'), + (None, 'rows'), + (None, 'rowspan'), + (None, 'rules'), + (None, 'scope'), + (None, 'selected'), + (None, 'shape'), + (None, 'size'), + (None, 'span'), + (None, 'src'), + (None, 'start'), + (None, 'step'), + (None, 'style'), + (None, 'summary'), + (None, 'suppress'), + (None, 'tabindex'), + (None, 'target'), + (None, 'template'), + (None, 'title'), + (None, 'toppadding'), + (None, 'type'), + (None, 'unselectable'), + (None, 'usemap'), + (None, 'urn'), + (None, 'valign'), + (None, 'value'), + (None, 'variable'), + (None, 'volume'), + (None, 'vspace'), + (None, 'vrml'), + (None, 'width'), + (None, 'wrap'), + (namespaces['xml'], 'lang'), + # MathML attributes + (None, 'actiontype'), + (None, 'align'), + (None, 'columnalign'), + (None, 'columnalign'), + (None, 'columnalign'), + (None, 'columnlines'), + (None, 'columnspacing'), + (None, 'columnspan'), + (None, 'depth'), + (None, 'display'), + (None, 'displaystyle'), + (None, 'equalcolumns'), + (None, 'equalrows'), + (None, 'fence'), + (None, 'fontstyle'), + (None, 'fontweight'), + (None, 'frame'), + (None, 'height'), + (None, 'linethickness'), + (None, 'lspace'), + (None, 'mathbackground'), + (None, 'mathcolor'), + (None, 'mathvariant'), + (None, 'mathvariant'), + (None, 'maxsize'), + (None, 'minsize'), + (None, 'other'), + (None, 'rowalign'), + (None, 'rowalign'), + (None, 'rowalign'), + (None, 'rowlines'), + (None, 'rowspacing'), + (None, 'rowspan'), + (None, 'rspace'), + (None, 'scriptlevel'), + (None, 'selection'), + (None, 'separator'), + (None, 'stretchy'), + (None, 'width'), + (None, 'width'), + (namespaces['xlink'], 'href'), + (namespaces['xlink'], 'show'), + (namespaces['xlink'], 'type'), + # SVG attributes + (None, 'accent-height'), + (None, 'accumulate'), + (None, 'additive'), + (None, 'alphabetic'), + (None, 'arabic-form'), + (None, 'ascent'), + (None, 'attributeName'), + (None, 'attributeType'), + (None, 'baseProfile'), + (None, 'bbox'), + (None, 'begin'), + (None, 'by'), + (None, 'calcMode'), + (None, 'cap-height'), + (None, 'class'), + (None, 'clip-path'), + (None, 'color'), + (None, 'color-rendering'), + (None, 'content'), + (None, 'cx'), + (None, 'cy'), + (None, 'd'), + (None, 'dx'), + (None, 'dy'), + (None, 'descent'), + (None, 'display'), + (None, 'dur'), + (None, 'end'), + (None, 'fill'), + (None, 'fill-opacity'), + (None, 'fill-rule'), + (None, 'font-family'), + (None, 'font-size'), + (None, 'font-stretch'), + (None, 'font-style'), + (None, 'font-variant'), + (None, 'font-weight'), + (None, 'from'), + (None, 'fx'), + (None, 'fy'), + (None, 'g1'), + (None, 'g2'), + (None, 'glyph-name'), + (None, 'gradientUnits'), + (None, 'hanging'), + (None, 'height'), + (None, 'horiz-adv-x'), + (None, 'horiz-origin-x'), + (None, 'id'), + (None, 'ideographic'), + (None, 'k'), + (None, 'keyPoints'), + (None, 'keySplines'), + (None, 'keyTimes'), + (None, 'lang'), + (None, 'marker-end'), + (None, 'marker-mid'), + (None, 'marker-start'), + (None, 'markerHeight'), + (None, 'markerUnits'), + (None, 'markerWidth'), + (None, 'mathematical'), + (None, 'max'), + (None, 'min'), + (None, 'name'), + (None, 'offset'), + (None, 'opacity'), + (None, 'orient'), + (None, 'origin'), + (None, 'overline-position'), + (None, 'overline-thickness'), + (None, 'panose-1'), + (None, 'path'), + (None, 'pathLength'), + (None, 'points'), + (None, 'preserveAspectRatio'), + (None, 'r'), + (None, 'refX'), + (None, 'refY'), + (None, 'repeatCount'), + (None, 'repeatDur'), + (None, 'requiredExtensions'), + (None, 'requiredFeatures'), + (None, 'restart'), + (None, 'rotate'), + (None, 'rx'), + (None, 'ry'), + (None, 'slope'), + (None, 'stemh'), + (None, 'stemv'), + (None, 'stop-color'), + (None, 'stop-opacity'), + (None, 'strikethrough-position'), + (None, 'strikethrough-thickness'), + (None, 'stroke'), + (None, 'stroke-dasharray'), + (None, 'stroke-dashoffset'), + (None, 'stroke-linecap'), + (None, 'stroke-linejoin'), + (None, 'stroke-miterlimit'), + (None, 'stroke-opacity'), + (None, 'stroke-width'), + (None, 'systemLanguage'), + (None, 'target'), + (None, 'text-anchor'), + (None, 'to'), + (None, 'transform'), + (None, 'type'), + (None, 'u1'), + (None, 'u2'), + (None, 'underline-position'), + (None, 'underline-thickness'), + (None, 'unicode'), + (None, 'unicode-range'), + (None, 'units-per-em'), + (None, 'values'), + (None, 'version'), + (None, 'viewBox'), + (None, 'visibility'), + (None, 'width'), + (None, 'widths'), + (None, 'x'), + (None, 'x-height'), + (None, 'x1'), + (None, 'x2'), + (namespaces['xlink'], 'actuate'), + (namespaces['xlink'], 'arcrole'), + (namespaces['xlink'], 'href'), + (namespaces['xlink'], 'role'), + (namespaces['xlink'], 'show'), + (namespaces['xlink'], 'title'), + (namespaces['xlink'], 'type'), + (namespaces['xml'], 'base'), + (namespaces['xml'], 'lang'), + (namespaces['xml'], 'space'), + (None, 'y'), + (None, 'y1'), + (None, 'y2'), + (None, 'zoomAndPan'), +)) + +attr_val_is_uri = frozenset(( + (None, 'href'), + (None, 'src'), + (None, 'cite'), + (None, 'action'), + (None, 'longdesc'), + (None, 'poster'), + (None, 'background'), + (None, 'datasrc'), + (None, 'dynsrc'), + (None, 'lowsrc'), + (None, 'ping'), + (namespaces['xlink'], 'href'), + (namespaces['xml'], 'base'), +)) + +svg_attr_val_allows_ref = frozenset(( + (None, 'clip-path'), + (None, 'color-profile'), + (None, 'cursor'), + (None, 'fill'), + (None, 'filter'), + (None, 'marker'), + (None, 'marker-start'), + (None, 'marker-mid'), + (None, 'marker-end'), + (None, 'mask'), + (None, 'stroke'), +)) + +svg_allow_local_href = frozenset(( + (None, 'altGlyph'), + (None, 'animate'), + (None, 'animateColor'), + (None, 'animateMotion'), + (None, 'animateTransform'), + (None, 'cursor'), + (None, 'feImage'), + (None, 'filter'), + (None, 'linearGradient'), + (None, 'pattern'), + (None, 'radialGradient'), + (None, 'textpath'), + (None, 'tref'), + (None, 'set'), + (None, 'use') +)) + +allowed_css_properties = frozenset(( + 'azimuth', + 'background-color', + 'border-bottom-color', + 'border-collapse', + 'border-color', + 'border-left-color', + 'border-right-color', + 'border-top-color', + 'clear', + 'color', + 'cursor', + 'direction', + 'display', + 'elevation', + 'float', + 'font', + 'font-family', + 'font-size', + 'font-style', + 'font-variant', + 'font-weight', + 'height', + 'letter-spacing', + 'line-height', + 'overflow', + 'pause', + 'pause-after', + 'pause-before', + 'pitch', + 'pitch-range', + 'richness', + 'speak', + 'speak-header', + 'speak-numeral', + 'speak-punctuation', + 'speech-rate', + 'stress', + 'text-align', + 'text-decoration', + 'text-indent', + 'unicode-bidi', + 'vertical-align', + 'voice-family', + 'volume', + 'white-space', + 'width', +)) + +allowed_css_keywords = frozenset(( + 'auto', + 'aqua', + 'black', + 'block', + 'blue', + 'bold', + 'both', + 'bottom', + 'brown', + 'center', + 'collapse', + 'dashed', + 'dotted', + 'fuchsia', + 'gray', + 'green', + '!important', + 'italic', + 'left', + 'lime', + 'maroon', + 'medium', + 'none', + 'navy', + 'normal', + 'nowrap', + 'olive', + 'pointer', + 'purple', + 'red', + 'right', + 'solid', + 'silver', + 'teal', + 'top', + 'transparent', + 'underline', + 'white', + 'yellow', +)) + +allowed_svg_properties = frozenset(( + 'fill', + 'fill-opacity', + 'fill-rule', + 'stroke', + 'stroke-width', + 'stroke-linecap', + 'stroke-linejoin', + 'stroke-opacity', +)) + +allowed_protocols = frozenset(( + 'ed2k', + 'ftp', + 'http', + 'https', + 'irc', + 'mailto', + 'news', + 'gopher', + 'nntp', + 'telnet', + 'webcal', + 'xmpp', + 'callto', + 'feed', + 'urn', + 'aim', + 'rsync', + 'tag', + 'ssh', + 'sftp', + 'rtsp', + 'afs', + 'data', +)) + +allowed_content_types = frozenset(( + 'image/png', + 'image/jpeg', + 'image/gif', + 'image/webp', + 'image/bmp', + 'text/plain', +)) + + +data_content_type = re.compile(r''' + ^ + # Match a content type / + (?P[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+) + # Match any character set and encoding + (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?) + |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?) + # Assume the rest is data + ,.* + $ + ''', + re.VERBOSE) + + +class Filter(base.Filter): + """Sanitizes token stream of XHTML+MathML+SVG and of inline style attributes""" + def __init__(self, + source, + allowed_elements=allowed_elements, + allowed_attributes=allowed_attributes, + allowed_css_properties=allowed_css_properties, + allowed_css_keywords=allowed_css_keywords, + allowed_svg_properties=allowed_svg_properties, + allowed_protocols=allowed_protocols, + allowed_content_types=allowed_content_types, + attr_val_is_uri=attr_val_is_uri, + svg_attr_val_allows_ref=svg_attr_val_allows_ref, + svg_allow_local_href=svg_allow_local_href): + """Creates a Filter + + :arg allowed_elements: set of elements to allow--everything else will + be escaped + + :arg allowed_attributes: set of attributes to allow in + elements--everything else will be stripped + + :arg allowed_css_properties: set of CSS properties to allow--everything + else will be stripped + + :arg allowed_css_keywords: set of CSS keywords to allow--everything + else will be stripped + + :arg allowed_svg_properties: set of SVG properties to allow--everything + else will be removed + + :arg allowed_protocols: set of allowed protocols for URIs + + :arg allowed_content_types: set of allowed content types for ``data`` URIs. + + :arg attr_val_is_uri: set of attributes that have URI values--values + that have a scheme not listed in ``allowed_protocols`` are removed + + :arg svg_attr_val_allows_ref: set of SVG attributes that can have + references + + :arg svg_allow_local_href: set of SVG elements that can have local + hrefs--these are removed + + """ + super(Filter, self).__init__(source) + self.allowed_elements = allowed_elements + self.allowed_attributes = allowed_attributes + self.allowed_css_properties = allowed_css_properties + self.allowed_css_keywords = allowed_css_keywords + self.allowed_svg_properties = allowed_svg_properties + self.allowed_protocols = allowed_protocols + self.allowed_content_types = allowed_content_types + self.attr_val_is_uri = attr_val_is_uri + self.svg_attr_val_allows_ref = svg_attr_val_allows_ref + self.svg_allow_local_href = svg_allow_local_href + def __iter__(self): - for token in _base.Filter.__iter__(self): + for token in base.Filter.__iter__(self): token = self.sanitize_token(token) if token: yield token + + # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and + # stripping out all attributes not in ALLOWED_ATTRIBUTES. Style attributes + # are parsed, and a restricted set, specified by ALLOWED_CSS_PROPERTIES and + # ALLOWED_CSS_KEYWORDS, are allowed through. attributes in ATTR_VAL_IS_URI + # are scanned, and only URI schemes specified in ALLOWED_PROTOCOLS are + # allowed. + # + # sanitize_html('') + # => <script> do_nasty_stuff() </script> + # sanitize_html('Click here for $100') + # => Click here for $100 + def sanitize_token(self, token): + + # accommodate filters which use token_type differently + token_type = token["type"] + if token_type in ("StartTag", "EndTag", "EmptyTag"): + name = token["name"] + namespace = token["namespace"] + if ((namespace, name) in self.allowed_elements or + (namespace is None and + (namespaces["html"], name) in self.allowed_elements)): + return self.allowed_token(token) + else: + return self.disallowed_token(token) + elif token_type == "Comment": + pass + else: + return token + + def allowed_token(self, token): + if "data" in token: + attrs = token["data"] + attr_names = set(attrs.keys()) + + # Remove forbidden attributes + for to_remove in (attr_names - self.allowed_attributes): + del token["data"][to_remove] + attr_names.remove(to_remove) + + # Remove attributes with disallowed URL values + for attr in (attr_names & self.attr_val_is_uri): + assert attr in attrs + # I don't have a clue where this regexp comes from or why it matches those + # characters, nor why we call unescape. I just know it's always been here. + # Should you be worried by this comment in a sanitizer? Yes. On the other hand, all + # this will do is remove *more* than it otherwise would. + val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\\s]+", '', + unescape(attrs[attr])).lower() + # remove replacement characters from unescaped characters + val_unescaped = val_unescaped.replace("\ufffd", "") + try: + uri = urlparse.urlparse(val_unescaped) + except ValueError: + uri = None + del attrs[attr] + if uri and uri.scheme: + if uri.scheme not in self.allowed_protocols: + del attrs[attr] + if uri.scheme == 'data': + m = data_content_type.match(uri.path) + if not m: + del attrs[attr] + elif m.group('content_type') not in self.allowed_content_types: + del attrs[attr] + + for attr in self.svg_attr_val_allows_ref: + if attr in attrs: + attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', + ' ', + unescape(attrs[attr])) + if (token["name"] in self.svg_allow_local_href and + (namespaces['xlink'], 'href') in attrs and re.search(r'^\s*[^#\s].*', + attrs[(namespaces['xlink'], 'href')])): + del attrs[(namespaces['xlink'], 'href')] + if (None, 'style') in attrs: + attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')]) + token["data"] = attrs + return token + + def disallowed_token(self, token): + token_type = token["type"] + if token_type == "EndTag": + token["data"] = "" % token["name"] + elif token["data"]: + assert token_type in ("StartTag", "EmptyTag") + attrs = [] + for (ns, name), v in token["data"].items(): + attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v))) + token["data"] = "<%s%s>" % (token["name"], ''.join(attrs)) + else: + token["data"] = "<%s>" % token["name"] + if token.get("selfClosing"): + token["data"] = token["data"][:-1] + "/>" + + token["type"] = "Characters" + + del token["name"] + return token + + def sanitize_css(self, style): + # disallow urls + style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) + + # gauntlet + if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): + return '' + if not re.match(r"^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): + return '' + + clean = [] + for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style): + if not value: + continue + if prop.lower() in self.allowed_css_properties: + clean.append(prop + ': ' + value + ';') + elif prop.split('-')[0].lower() in ['background', 'border', 'margin', + 'padding']: + for keyword in value.split(): + if keyword not in self.allowed_css_keywords and \ + not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa + break + else: + clean.append(prop + ': ' + value + ';') + elif prop.lower() in self.allowed_svg_properties: + clean.append(prop + ': ' + value + ';') + + return ' '.join(clean) diff --git a/Shared/lib/python3.4/site-packages/html5lib/filters/whitespace.py b/Shared/lib/python3.4/site-packages/html5lib/filters/whitespace.py index dfc60ee..0d12584 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/filters/whitespace.py +++ b/Shared/lib/python3.4/site-packages/html5lib/filters/whitespace.py @@ -2,20 +2,20 @@ from __future__ import absolute_import, division, unicode_literals import re -from . import _base +from . import base from ..constants import rcdataElements, spaceCharacters spaceCharacters = "".join(spaceCharacters) SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) -class Filter(_base.Filter): - +class Filter(base.Filter): + """Collapses whitespace except in pre, textarea, and script elements""" spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) def __iter__(self): preserve = 0 - for token in _base.Filter.__iter__(self): + for token in base.Filter.__iter__(self): type = token["type"] if type == "StartTag" \ and (preserve or token["name"] in self.spacePreserveElements): diff --git a/Shared/lib/python3.4/site-packages/html5lib/html5parser.py b/Shared/lib/python3.4/site-packages/html5lib/html5parser.py index 12aa6a3..9d39b9d 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/html5parser.py +++ b/Shared/lib/python3.4/site-packages/html5lib/html5parser.py @@ -1,39 +1,75 @@ from __future__ import absolute_import, division, unicode_literals -from six import with_metaclass +from six import with_metaclass, viewkeys import types +from collections import OrderedDict -from . import inputstream -from . import tokenizer +from . import _inputstream +from . import _tokenizer from . import treebuilders -from .treebuilders._base import Marker +from .treebuilders.base import Marker -from . import utils -from . import constants -from .constants import spaceCharacters, asciiUpper2Lower -from .constants import specialElements -from .constants import headingElements -from .constants import cdataElements, rcdataElements -from .constants import tokenTypes, ReparseException, namespaces -from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements -from .constants import adjustForeignAttributes as adjustForeignAttributesMap -from .constants import E +from . import _utils +from .constants import ( + spaceCharacters, asciiUpper2Lower, + specialElements, headingElements, cdataElements, rcdataElements, + tokenTypes, tagTokenTypes, + namespaces, + htmlIntegrationPointElements, mathmlTextIntegrationPointElements, + adjustForeignAttributes as adjustForeignAttributesMap, + adjustMathMLAttributes, adjustSVGAttributes, + E, + _ReparseException +) -def parse(doc, treebuilder="etree", encoding=None, - namespaceHTMLElements=True): - """Parse a string or file-like object into a tree""" +def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs): + """Parse an HTML document as a string or file-like object into a tree + + :arg doc: the document to parse as a string or file-like object + + :arg treebuilder: the treebuilder to use when parsing + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + :returns: parsed tree + + Example: + + >>> from html5lib.html5parser import parse + >>> parse('

This is a doc

') + + + """ tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) - return p.parse(doc, encoding=encoding) + return p.parse(doc, **kwargs) -def parseFragment(doc, container="div", treebuilder="etree", encoding=None, - namespaceHTMLElements=True): +def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): + """Parse an HTML fragment as a string or file-like object into a tree + + :arg doc: the fragment to parse as a string or file-like object + + :arg container: the container context to parse the fragment in + + :arg treebuilder: the treebuilder to use when parsing + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + :returns: parsed tree + + Example: + + >>> from html5lib.html5libparser import parseFragment + >>> parseFragment('this is a fragment') + + + """ tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) - return p.parseFragment(doc, container=container, encoding=encoding) + return p.parseFragment(doc, container=container, **kwargs) def method_decorator_metaclass(function): @@ -49,21 +85,30 @@ def method_decorator_metaclass(function): class HTMLParser(object): - """HTML parser. Generates a tree structure from a stream of (possibly - malformed) HTML""" + """HTML parser - def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer, - strict=False, namespaceHTMLElements=True, debug=False): + Generates a tree structure from a stream of (possibly malformed) HTML. + + """ + + def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False): """ - strict - raise an exception when a parse error is encountered + :arg tree: a treebuilder class controlling the type of tree that will be + returned. Built in treebuilders can be accessed through + html5lib.treebuilders.getTreeBuilder(treeType) - tree - a treebuilder class controlling the type of tree that will be - returned. Built in treebuilders can be accessed through - html5lib.treebuilders.getTreeBuilder(treeType) + :arg strict: raise an exception when a parse error is encountered + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + :arg debug: whether or not to enable debug mode which logs things + + Example: + + >>> from html5lib.html5parser import HTMLParser + >>> parser = HTMLParser() # generates parser with etree builder + >>> parser = HTMLParser('lxml', strict=True) # generates parser with lxml builder which is strict - tokenizer - a class that provides a stream of tokens to the treebuilder. - This may be replaced for e.g. a sanitizer which converts some tags to - text """ # Raise an exception on the first error encountered @@ -72,29 +117,24 @@ class HTMLParser(object): if tree is None: tree = treebuilders.getTreeBuilder("etree") self.tree = tree(namespaceHTMLElements) - self.tokenizer_class = tokenizer self.errors = [] self.phases = dict([(name, cls(self, self.tree)) for name, cls in getPhases(debug).items()]) - def _parse(self, stream, innerHTML=False, container="div", - encoding=None, parseMeta=True, useChardet=True, **kwargs): + def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): self.innerHTMLMode = innerHTML self.container = container - self.tokenizer = self.tokenizer_class(stream, encoding=encoding, - parseMeta=parseMeta, - useChardet=useChardet, - parser=self, **kwargs) + self.scripting = scripting + self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs) self.reset() - while True: - try: - self.mainLoop() - break - except ReparseException: - self.reset() + try: + self.mainLoop() + except _ReparseException: + self.reset() + self.mainLoop() def reset(self): self.tree.reset() @@ -121,7 +161,7 @@ class HTMLParser(object): self.phase.insertHtmlElement() self.resetInsertionMode() else: - self.innerHTML = False + self.innerHTML = False # pylint:disable=redefined-variable-type self.phase = self.phases["initial"] self.lastPhase = None @@ -132,14 +172,13 @@ class HTMLParser(object): @property def documentEncoding(self): - """The name of the character encoding - that was used to decode the input stream, - or :obj:`None` if that is not determined yet. + """Name of the character encoding that was used to decode the input stream, or + :obj:`None` if that is not determined yet """ if not hasattr(self, 'tokenizer'): return None - return self.tokenizer.stream.charEncoding[0] + return self.tokenizer.stream.charEncoding[0].name def isHTMLIntegrationPoint(self, element): if (element.name == "annotation-xml" and @@ -164,8 +203,10 @@ class HTMLParser(object): ParseErrorToken = tokenTypes["ParseError"] for token in self.normalizedTokens(): + prev_token = None new_token = token while new_token is not None: + prev_token = new_token currentNode = self.tree.openElements[-1] if self.tree.openElements else None currentNodeNamespace = currentNode.namespace if currentNode else None currentNodeName = currentNode.name if currentNode else None @@ -184,6 +225,7 @@ class HTMLParser(object): type in (CharactersToken, SpaceCharactersToken))) or (currentNodeNamespace == namespaces["mathml"] and currentNodeName == "annotation-xml" and + type == StartTagToken and token["name"] == "svg") or (self.isHTMLIntegrationPoint(currentNode) and type in (StartTagToken, CharactersToken, SpaceCharactersToken))): @@ -204,10 +246,10 @@ class HTMLParser(object): elif type == DoctypeToken: new_token = phase.processDoctype(new_token) - if (type == StartTagToken and token["selfClosing"] - and not token["selfClosingAcknowledged"]): + if (type == StartTagToken and prev_token["selfClosing"] and + not prev_token["selfClosingAcknowledged"]): self.parseError("non-void-element-with-trailing-solidus", - {"name": token["name"]}) + {"name": prev_token["name"]}) # When the loop finishes it's EOF reprocess = True @@ -222,139 +264,89 @@ class HTMLParser(object): for token in self.tokenizer: yield self.normalizeToken(token) - def parse(self, stream, encoding=None, parseMeta=True, useChardet=True): + def parse(self, stream, *args, **kwargs): """Parse a HTML document into a well-formed tree - stream - a filelike object or string containing the HTML to be parsed + :arg stream: a file-like object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element). + + :arg scripting: treat noscript elements as if JavaScript was turned on + + :returns: parsed tree + + Example: + + >>> from html5lib.html5parser import HTMLParser + >>> parser = HTMLParser() + >>> parser.parse('

This is a doc

') + - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element) """ - self._parse(stream, innerHTML=False, encoding=encoding, - parseMeta=parseMeta, useChardet=useChardet) + self._parse(stream, False, None, *args, **kwargs) return self.tree.getDocument() - def parseFragment(self, stream, container="div", encoding=None, - parseMeta=False, useChardet=True): + def parseFragment(self, stream, *args, **kwargs): """Parse a HTML fragment into a well-formed tree fragment - container - name of the element we're setting the innerHTML property - if set to None, default to 'div' + :arg container: name of the element we're setting the innerHTML + property if set to None, default to 'div' - stream - a filelike object or string containing the HTML to be parsed + :arg stream: a file-like object or string containing the HTML to be parsed + + The optional encoding parameter must be a string that indicates + the encoding. If specified, that encoding will be used, + regardless of any BOM or later declaration (such as in a meta + element) + + :arg scripting: treat noscript elements as if JavaScript was turned on + + :returns: parsed tree + + Example: + + >>> from html5lib.html5libparser import HTMLParser + >>> parser = HTMLParser() + >>> parser.parseFragment('this is a fragment') + - The optional encoding parameter must be a string that indicates - the encoding. If specified, that encoding will be used, - regardless of any BOM or later declaration (such as in a meta - element) """ - self._parse(stream, True, container=container, encoding=encoding) + self._parse(stream, True, *args, **kwargs) return self.tree.getFragment() - def parseError(self, errorcode="XXX-undefined-error", datavars={}): + def parseError(self, errorcode="XXX-undefined-error", datavars=None): # XXX The idea is to make errorcode mandatory. + if datavars is None: + datavars = {} self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) if self.strict: raise ParseError(E[errorcode] % datavars) def normalizeToken(self, token): - """ HTML5 specific normalizations to the token stream """ - + # HTML5 specific normalizations to the token stream if token["type"] == tokenTypes["StartTag"]: - token["data"] = dict(token["data"][::-1]) + raw = token["data"] + token["data"] = OrderedDict(raw) + if len(raw) > len(token["data"]): + # we had some duplicated attribute, fix so first wins + token["data"].update(raw[::-1]) return token def adjustMathMLAttributes(self, token): - replacements = {"definitionurl": "definitionURL"} - for k, v in replacements.items(): - if k in token["data"]: - token["data"][v] = token["data"][k] - del token["data"][k] + adjust_attributes(token, adjustMathMLAttributes) def adjustSVGAttributes(self, token): - replacements = { - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan" - } - for originalName in list(token["data"].keys()): - if originalName in replacements: - svgName = replacements[originalName] - token["data"][svgName] = token["data"][originalName] - del token["data"][originalName] + adjust_attributes(token, adjustSVGAttributes) def adjustForeignAttributes(self, token): - replacements = adjustForeignAttributesMap - - for originalName in token["data"].keys(): - if originalName in replacements: - foreignName = replacements[originalName] - token["data"][foreignName] = token["data"][originalName] - del token["data"][originalName] + adjust_attributes(token, adjustForeignAttributesMap) def reparseTokenNormal(self, token): + # pylint:disable=unused-argument self.parser.phase() def resetInsertionMode(self): @@ -402,9 +394,7 @@ class HTMLParser(object): self.phase = new_phase def parseRCDataRawtext(self, token, contentType): - """Generic RCDATA/RAWTEXT Parsing algorithm - contentType - RCDATA or RAWTEXT - """ + # Generic RCDATA/RAWTEXT Parsing algorithm assert contentType in ("RAWTEXT", "RCDATA") self.tree.insertElement(token) @@ -419,11 +409,12 @@ class HTMLParser(object): self.phase = self.phases["text"] +@_utils.memoize def getPhases(debug): def log(function): """Logger that records which phase processes each token""" type_names = dict((value, key) for key, value in - constants.tokenTypes.items()) + tokenTypes.items()) def wrapped(self, *args, **kwargs): if function.__name__.startswith("process") and len(args) > 0: @@ -432,7 +423,7 @@ def getPhases(debug): info = {"type": type_names[token['type']]} except: raise - if token['type'] in constants.tagTokenTypes: + if token['type'] in tagTokenTypes: info["name"] = token['name'] self.parser.log.append((self.parser.tokenizer.state.__name__, @@ -451,6 +442,7 @@ def getPhases(debug): else: return type + # pylint:disable=unused-argument class Phase(with_metaclass(getMetaclass(debug, log))): """Base class for helper object that implements each phase of processing """ @@ -517,77 +509,76 @@ def getPhases(debug): if publicId != "": publicId = publicId.translate(asciiUpper2Lower) - if (not correct or token["name"] != "html" - or publicId.startswith( - ("+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//")) - or publicId in - ("-//w3o//dtd w3 html strict 3.0//en//", - "-/w3c/dtd html 4.0 transitional/en", - "html") - or publicId.startswith( - ("-//w3c//dtd html 4.01 frameset//", - "-//w3c//dtd html 4.01 transitional//")) and - systemId is None - or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): + if (not correct or token["name"] != "html" or + publicId.startswith( + ("+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//")) or + publicId in ("-//w3o//dtd w3 html strict 3.0//en//", + "-/w3c/dtd html 4.0 transitional/en", + "html") or + publicId.startswith( + ("-//w3c//dtd html 4.01 frameset//", + "-//w3c//dtd html 4.01 transitional//")) and + systemId is None or + systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): self.parser.compatMode = "quirks" elif (publicId.startswith( ("-//w3c//dtd xhtml 1.0 frameset//", - "-//w3c//dtd xhtml 1.0 transitional//")) - or publicId.startswith( + "-//w3c//dtd xhtml 1.0 transitional//")) or + publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is not None): @@ -660,13 +651,13 @@ def getPhases(debug): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) - self.startTagHandler = utils.MethodDispatcher([ + self.startTagHandler = _utils.MethodDispatcher([ ("html", self.startTagHtml), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther - self.endTagHandler = utils.MethodDispatcher([ + self.endTagHandler = _utils.MethodDispatcher([ (("head", "body", "html", "br"), self.endTagImplyHead) ]) self.endTagHandler.default = self.endTagOther @@ -706,10 +697,11 @@ def getPhases(debug): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) - self.startTagHandler = utils.MethodDispatcher([ + self.startTagHandler = _utils.MethodDispatcher([ ("html", self.startTagHtml), ("title", self.startTagTitle), - (("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle), + (("noframes", "style"), self.startTagNoFramesStyle), + ("noscript", self.startTagNoscript), ("script", self.startTagScript), (("base", "basefont", "bgsound", "command", "link"), self.startTagBaseLinkCommand), @@ -718,7 +710,7 @@ def getPhases(debug): ]) self.startTagHandler.default = self.startTagOther - self. endTagHandler = utils.MethodDispatcher([ + self.endTagHandler = _utils.MethodDispatcher([ ("head", self.endTagHead), (("br", "html", "body"), self.endTagHtmlBodyBr) ]) @@ -760,18 +752,25 @@ def getPhases(debug): # the abstract Unicode string, and just use the # ContentAttrParser on that, but using UTF-8 allows all chars # to be encoded and as a ASCII-superset works. - data = inputstream.EncodingBytes(attributes["content"].encode("utf-8")) - parser = inputstream.ContentAttrParser(data) + data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8")) + parser = _inputstream.ContentAttrParser(data) codec = parser.parse() self.parser.tokenizer.stream.changeEncoding(codec) def startTagTitle(self, token): self.parser.parseRCDataRawtext(token, "RCDATA") - def startTagNoScriptNoFramesStyle(self, token): + def startTagNoFramesStyle(self, token): # Need to decide whether to implement the scripting-disabled case self.parser.parseRCDataRawtext(token, "RAWTEXT") + def startTagNoscript(self, token): + if self.parser.scripting: + self.parser.parseRCDataRawtext(token, "RAWTEXT") + else: + self.tree.insertElement(token) + self.parser.phase = self.parser.phases["inHeadNoscript"] + def startTagScript(self, token): self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState @@ -797,15 +796,75 @@ def getPhases(debug): def anythingElse(self): self.endTagHead(impliedTagToken("head")) - # XXX If we implement a parser for which scripting is disabled we need to - # implement this phase. - # - # class InHeadNoScriptPhase(Phase): + class InHeadNoscriptPhase(Phase): + def __init__(self, parser, tree): + Phase.__init__(self, parser, tree) + + self.startTagHandler = _utils.MethodDispatcher([ + ("html", self.startTagHtml), + (("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand), + (("head", "noscript"), self.startTagHeadNoscript), + ]) + self.startTagHandler.default = self.startTagOther + + self.endTagHandler = _utils.MethodDispatcher([ + ("noscript", self.endTagNoscript), + ("br", self.endTagBr), + ]) + self.endTagHandler.default = self.endTagOther + + def processEOF(self): + self.parser.parseError("eof-in-head-noscript") + self.anythingElse() + return True + + def processComment(self, token): + return self.parser.phases["inHead"].processComment(token) + + def processCharacters(self, token): + self.parser.parseError("char-in-head-noscript") + self.anythingElse() + return token + + def processSpaceCharacters(self, token): + return self.parser.phases["inHead"].processSpaceCharacters(token) + + def startTagHtml(self, token): + return self.parser.phases["inBody"].processStartTag(token) + + def startTagBaseLinkCommand(self, token): + return self.parser.phases["inHead"].processStartTag(token) + + def startTagHeadNoscript(self, token): + self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) + + def startTagOther(self, token): + self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) + self.anythingElse() + return token + + def endTagNoscript(self, token): + node = self.parser.tree.openElements.pop() + assert node.name == "noscript", "Expected noscript got %s" % node.name + self.parser.phase = self.parser.phases["inHead"] + + def endTagBr(self, token): + self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) + self.anythingElse() + return token + + def endTagOther(self, token): + self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) + + def anythingElse(self): + # Caller must raise parse error first! + self.endTagNoscript(impliedTagToken("noscript")) + class AfterHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) - self.startTagHandler = utils.MethodDispatcher([ + self.startTagHandler = _utils.MethodDispatcher([ ("html", self.startTagHtml), ("body", self.startTagBody), ("frameset", self.startTagFrameset), @@ -815,8 +874,8 @@ def getPhases(debug): ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther - self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"), - self.endTagHtmlBodyBr)]) + self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"), + self.endTagHtmlBodyBr)]) self.endTagHandler.default = self.endTagOther def processEOF(self): @@ -874,10 +933,10 @@ def getPhases(debug): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) - # Keep a ref to this for special handling of whitespace in
-            self.processSpaceCharactersNonPre = self.processSpaceCharacters
+            # Set this to the default handler
+            self.processSpaceCharacters = self.processSpaceCharactersNonPre
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 (("base", "basefont", "bgsound", "command", "link", "meta",
                   "script", "style", "title"),
@@ -885,7 +944,7 @@ def getPhases(debug):
                 ("body", self.startTagBody),
                 ("frameset", self.startTagFrameset),
                 (("address", "article", "aside", "blockquote", "center", "details",
-                  "details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
+                  "dir", "div", "dl", "fieldset", "figcaption", "figure",
                   "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
                   "section", "summary", "ul"),
                  self.startTagCloseP),
@@ -911,7 +970,8 @@ def getPhases(debug):
                 ("isindex", self.startTagIsIndex),
                 ("textarea", self.startTagTextarea),
                 ("iframe", self.startTagIFrame),
-                (("noembed", "noframes", "noscript"), self.startTagRawtext),
+                ("noscript", self.startTagNoscript),
+                (("noembed", "noframes"), self.startTagRawtext),
                 ("select", self.startTagSelect),
                 (("rp", "rt"), self.startTagRpRt),
                 (("option", "optgroup"), self.startTagOpt),
@@ -923,7 +983,7 @@ def getPhases(debug):
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 ("body", self.endTagBody),
                 ("html", self.endTagHtml),
                 (("address", "article", "aside", "blockquote", "button", "center",
@@ -942,17 +1002,9 @@ def getPhases(debug):
             self.endTagHandler.default = self.endTagOther
 
         def isMatchingFormattingElement(self, node1, node2):
-            if node1.name != node2.name or node1.namespace != node2.namespace:
-                return False
-            elif len(node1.attributes) != len(node2.attributes):
-                return False
-            else:
-                attributes1 = sorted(node1.attributes.items())
-                attributes2 = sorted(node2.attributes.items())
-                for attr1, attr2 in zip(attributes1, attributes2):
-                    if attr1 != attr2:
-                        return False
-            return True
+            return (node1.name == node2.name and
+                    node1.namespace == node2.namespace and
+                    node1.attributes == node2.attributes)
 
         # helper
         def addFormattingElement(self, token):
@@ -988,8 +1040,8 @@ def getPhases(debug):
             data = token["data"]
             self.processSpaceCharacters = self.processSpaceCharactersNonPre
             if (data.startswith("\n") and
-                self.tree.openElements[-1].name in ("pre", "listing", "textarea")
-                    and not self.tree.openElements[-1].hasContent()):
+                self.tree.openElements[-1].name in ("pre", "listing", "textarea") and
+                    not self.tree.openElements[-1].hasContent()):
                 data = data[1:]
             if data:
                 self.tree.reconstructActiveFormattingElements()
@@ -1007,7 +1059,7 @@ def getPhases(debug):
                      for char in token["data"]])):
                 self.parser.framesetOK = False
 
-        def processSpaceCharacters(self, token):
+        def processSpaceCharactersNonPre(self, token):
             self.tree.reconstructActiveFormattingElements()
             self.tree.insertText(token["data"])
 
@@ -1016,8 +1068,8 @@ def getPhases(debug):
 
         def startTagBody(self, token):
             self.parser.parseError("unexpected-start-tag", {"name": "body"})
-            if (len(self.tree.openElements) == 1
-                    or self.tree.openElements[1].name != "body"):
+            if (len(self.tree.openElements) == 1 or
+                    self.tree.openElements[1].name != "body"):
                 assert self.parser.innerHTML
             else:
                 self.parser.framesetOK = False
@@ -1232,6 +1284,12 @@ def getPhases(debug):
             self.parser.framesetOK = False
             self.startTagRawtext(token)
 
+        def startTagNoscript(self, token):
+            if self.parser.scripting:
+                self.startTagRawtext(token)
+            else:
+                self.startTagOther(token)
+
         def startTagRawtext(self, token):
             """iframe, noembed noframes, noscript(if scripting enabled)"""
             self.parser.parseRCDataRawtext(token, "RAWTEXT")
@@ -1327,7 +1385,7 @@ def getPhases(debug):
                         # Not sure this is the correct name for the parse error
                         self.parser.parseError(
                             "expected-one-end-tag-but-got-another",
-                            {"expectedName": "body", "gotName": node.name})
+                            {"gotName": "body", "expectedName": node.name})
                         break
             self.parser.phase = self.parser.phases["afterBody"]
 
@@ -1595,9 +1653,9 @@ def getPhases(debug):
     class TextPhase(Phase):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
-            self.startTagHandler = utils.MethodDispatcher([])
+            self.startTagHandler = _utils.MethodDispatcher([])
             self.startTagHandler.default = self.startTagOther
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 ("script", self.endTagScript)])
             self.endTagHandler.default = self.endTagOther
 
@@ -1629,7 +1687,7 @@ def getPhases(debug):
         # http://www.whatwg.org/specs/web-apps/current-work/#in-table
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 ("caption", self.startTagCaption),
                 ("colgroup", self.startTagColgroup),
@@ -1643,7 +1701,7 @@ def getPhases(debug):
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 ("table", self.endTagTable),
                 (("body", "caption", "col", "colgroup", "html", "tbody", "td",
                   "tfoot", "th", "thead", "tr"), self.endTagIgnore)
@@ -1820,14 +1878,14 @@ def getPhases(debug):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
                   "thead", "tr"), self.startTagTableElement)
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 ("caption", self.endTagCaption),
                 ("table", self.endTagTable),
                 (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
@@ -1892,13 +1950,13 @@ def getPhases(debug):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 ("col", self.startTagCol)
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 ("colgroup", self.endTagColgroup),
                 ("col", self.endTagCol)
             ])
@@ -1926,6 +1984,7 @@ def getPhases(debug):
         def startTagCol(self, token):
             self.tree.insertElement(token)
             self.tree.openElements.pop()
+            token["selfClosingAcknowledged"] = True
 
         def startTagOther(self, token):
             ignoreEndTag = self.ignoreEndTagColgroup()
@@ -1955,7 +2014,7 @@ def getPhases(debug):
         # http://www.whatwg.org/specs/web-apps/current-work/#in-table0
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 ("tr", self.startTagTr),
                 (("td", "th"), self.startTagTableCell),
@@ -1964,7 +2023,7 @@ def getPhases(debug):
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 (("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
                 ("table", self.endTagTable),
                 (("body", "caption", "col", "colgroup", "html", "td", "th",
@@ -2053,7 +2112,7 @@ def getPhases(debug):
         # http://www.whatwg.org/specs/web-apps/current-work/#in-row
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 (("td", "th"), self.startTagTableCell),
                 (("caption", "col", "colgroup", "tbody", "tfoot", "thead",
@@ -2061,7 +2120,7 @@ def getPhases(debug):
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 ("tr", self.endTagTr),
                 ("table", self.endTagTable),
                 (("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
@@ -2142,14 +2201,14 @@ def getPhases(debug):
         # http://www.whatwg.org/specs/web-apps/current-work/#in-cell
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
                   "thead", "tr"), self.startTagTableOther)
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 (("td", "th"), self.endTagTableCell),
                 (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
                 (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
@@ -2218,7 +2277,7 @@ def getPhases(debug):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 ("option", self.startTagOption),
                 ("optgroup", self.startTagOptgroup),
@@ -2228,7 +2287,7 @@ def getPhases(debug):
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 ("option", self.endTagOption),
                 ("optgroup", self.endTagOptgroup),
                 ("select", self.endTagSelect)
@@ -2318,13 +2377,13 @@ def getPhases(debug):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
                  self.startTagTable)
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
                  self.endTagTable)
             ])
@@ -2445,7 +2504,7 @@ def getPhases(debug):
         def processEndTag(self, token):
             nodeIndex = len(self.tree.openElements) - 1
             node = self.tree.openElements[-1]
-            if node.name != token["name"]:
+            if node.name.translate(asciiUpper2Lower) != token["name"]:
                 self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
 
             while True:
@@ -2472,12 +2531,12 @@ def getPhases(debug):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml)
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
+            self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)])
             self.endTagHandler.default = self.endTagOther
 
         def processEOF(self):
@@ -2520,7 +2579,7 @@ def getPhases(debug):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 ("frameset", self.startTagFrameset),
                 ("frame", self.startTagFrame),
@@ -2528,7 +2587,7 @@ def getPhases(debug):
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 ("frameset", self.endTagFrameset)
             ])
             self.endTagHandler.default = self.endTagOther
@@ -2564,7 +2623,7 @@ def getPhases(debug):
                 self.tree.openElements.pop()
             if (not self.parser.innerHTML and
                     self.tree.openElements[-1].name != "frameset"):
-                # If we're not in innerHTML mode and the the current node is not a
+                # If we're not in innerHTML mode and the current node is not a
                 # "frameset" element (anymore) then switch.
                 self.parser.phase = self.parser.phases["afterFrameset"]
 
@@ -2577,13 +2636,13 @@ def getPhases(debug):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 ("noframes", self.startTagNoframes)
             ])
             self.startTagHandler.default = self.startTagOther
 
-            self.endTagHandler = utils.MethodDispatcher([
+            self.endTagHandler = _utils.MethodDispatcher([
                 ("html", self.endTagHtml)
             ])
             self.endTagHandler.default = self.endTagOther
@@ -2613,7 +2672,7 @@ def getPhases(debug):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml)
             ])
             self.startTagHandler.default = self.startTagOther
@@ -2651,7 +2710,7 @@ def getPhases(debug):
         def __init__(self, parser, tree):
             Phase.__init__(self, parser, tree)
 
-            self.startTagHandler = utils.MethodDispatcher([
+            self.startTagHandler = _utils.MethodDispatcher([
                 ("html", self.startTagHtml),
                 ("noframes", self.startTagNoFrames)
             ])
@@ -2682,13 +2741,14 @@ def getPhases(debug):
         def processEndTag(self, token):
             self.parser.parseError("expected-eof-but-got-end-tag",
                                    {"name": token["name"]})
+    # pylint:enable=unused-argument
 
     return {
         "initial": InitialPhase,
         "beforeHtml": BeforeHtmlPhase,
         "beforeHead": BeforeHeadPhase,
         "inHead": InHeadPhase,
-        # XXX "inHeadNoscript": InHeadNoScriptPhase,
+        "inHeadNoscript": InHeadNoscriptPhase,
         "afterHead": AfterHeadPhase,
         "inBody": InBodyPhase,
         "text": TextPhase,
@@ -2711,6 +2771,13 @@ def getPhases(debug):
     }
 
 
+def adjust_attributes(token, replacements):
+    needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
+    if needs_adjustment:
+        token['data'] = OrderedDict((replacements.get(k, k), v)
+                                    for k, v in token['data'].items())
+
+
 def impliedTagToken(name, type="EndTag", attributes=None,
                     selfClosing=False):
     if attributes is None:
diff --git a/Shared/lib/python3.4/site-packages/html5lib/sanitizer.py b/Shared/lib/python3.4/site-packages/html5lib/sanitizer.py
deleted file mode 100644
index b714e8c..0000000
--- a/Shared/lib/python3.4/site-packages/html5lib/sanitizer.py
+++ /dev/null
@@ -1,300 +0,0 @@
-from __future__ import absolute_import, division, unicode_literals
-
-import re
-from xml.sax.saxutils import escape, unescape
-from six.moves import urllib_parse as urlparse
-
-from .tokenizer import HTMLTokenizer
-from .constants import tokenTypes
-
-
-content_type_rgx = re.compile(r'''
-                               ^
-                               # Match a content type /
-                               (?P[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+)
-                               # Match any character set and encoding
-                               (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?)
-                                 |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?)
-                               # Assume the rest is data
-                               ,.*
-                               $
-                               ''',
-                              re.VERBOSE)
-
-
-class HTMLSanitizerMixin(object):
-    """ sanitization of XHTML+MathML+SVG and of inline style attributes."""
-
-    acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
-                           'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
-                           'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
-                           'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
-                           'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
-                           'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
-                           'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
-                           'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
-                           'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
-                           'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
-                           'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
-                           'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
-                           'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
-
-    mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
-                       'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
-                       'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
-                       'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
-                       'munderover', 'none']
-
-    svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
-                    'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
-                    'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
-                    'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
-                    'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
-                    'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
-
-    acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
-                             'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
-                             'background', 'balance', 'bgcolor', 'bgproperties', 'border',
-                             'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
-                             'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
-                             'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
-                             'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
-                             'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
-                             'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
-                             'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
-                             'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
-                             'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
-                             'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
-                             'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
-                             'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
-                             'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
-                             'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
-                             'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
-                             'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
-                             'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
-                             'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
-                             'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
-                             'width', 'wrap', 'xml:lang']
-
-    mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
-                         'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
-                         'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
-                         'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
-                         'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
-                         'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
-                         'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
-                         'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
-                         'xlink:type', 'xmlns', 'xmlns:xlink']
-
-    svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
-                      'arabic-form', 'ascent', 'attributeName', 'attributeType',
-                      'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
-                      'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
-                      'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
-                      'fill-opacity', 'fill-rule', 'font-family', 'font-size',
-                      'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
-                      'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
-                      'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
-                      'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
-                      'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
-                      'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
-                      'opacity', 'orient', 'origin', 'overline-position',
-                      'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
-                      'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
-                      'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
-                      'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
-                      'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
-                      'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
-                      'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
-                      'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
-                      'transform', 'type', 'u1', 'u2', 'underline-position',
-                      'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
-                      'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
-                      'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
-                      'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
-                      'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
-                      'y1', 'y2', 'zoomAndPan']
-
-    attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster', 'background', 'datasrc',
-                       'dynsrc', 'lowsrc', 'ping', 'poster', 'xlink:href', 'xml:base']
-
-    svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
-                               'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
-                               'mask', 'stroke']
-
-    svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
-                            'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
-                            'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
-                            'set', 'use']
-
-    acceptable_css_properties = ['azimuth', 'background-color',
-                                 'border-bottom-color', 'border-collapse', 'border-color',
-                                 'border-left-color', 'border-right-color', 'border-top-color', 'clear',
-                                 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
-                                 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
-                                 'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
-                                 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
-                                 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
-                                 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
-                                 'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
-                                 'white-space', 'width']
-
-    acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
-                               'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
-                               'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
-                               'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
-                               'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
-                               'transparent', 'underline', 'white', 'yellow']
-
-    acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
-                                 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
-                                 'stroke-opacity']
-
-    acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
-                            'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
-                            'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
-                            'ssh', 'sftp', 'rtsp', 'afs', 'data']
-
-    acceptable_content_types = ['image/png', 'image/jpeg', 'image/gif', 'image/webp', 'image/bmp', 'text/plain']
-
-    # subclasses may define their own versions of these constants
-    allowed_elements = acceptable_elements + mathml_elements + svg_elements
-    allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
-    allowed_css_properties = acceptable_css_properties
-    allowed_css_keywords = acceptable_css_keywords
-    allowed_svg_properties = acceptable_svg_properties
-    allowed_protocols = acceptable_protocols
-    allowed_content_types = acceptable_content_types
-
-    # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
-    # stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
-    # attributes are parsed, and a restricted set, # specified by
-    # ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
-    # attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
-    # in ALLOWED_PROTOCOLS are allowed.
-    #
-    #   sanitize_html('')
-    #    => <script> do_nasty_stuff() </script>
-    #   sanitize_html('Click here for $100')
-    #    => Click here for $100
-    def sanitize_token(self, token):
-
-        # accommodate filters which use token_type differently
-        token_type = token["type"]
-        if token_type in list(tokenTypes.keys()):
-            token_type = tokenTypes[token_type]
-
-        if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
-                          tokenTypes["EmptyTag"]):
-            if token["name"] in self.allowed_elements:
-                return self.allowed_token(token, token_type)
-            else:
-                return self.disallowed_token(token, token_type)
-        elif token_type == tokenTypes["Comment"]:
-            pass
-        else:
-            return token
-
-    def allowed_token(self, token, token_type):
-        if "data" in token:
-            attrs = dict([(name, val) for name, val in
-                          token["data"][::-1]
-                          if name in self.allowed_attributes])
-            for attr in self.attr_val_is_uri:
-                if attr not in attrs:
-                    continue
-                val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
-                                       unescape(attrs[attr])).lower()
-                # remove replacement characters from unescaped characters
-                val_unescaped = val_unescaped.replace("\ufffd", "")
-                try:
-                    uri = urlparse.urlparse(val_unescaped)
-                except ValueError:
-                    uri = None
-                    del attrs[attr]
-                if uri and uri.scheme:
-                    if uri.scheme not in self.allowed_protocols:
-                        del attrs[attr]
-                    if uri.scheme == 'data':
-                        m = content_type_rgx.match(uri.path)
-                        if not m:
-                            del attrs[attr]
-                        elif m.group('content_type') not in self.allowed_content_types:
-                            del attrs[attr]
-
-            for attr in self.svg_attr_val_allows_ref:
-                if attr in attrs:
-                    attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
-                                         ' ',
-                                         unescape(attrs[attr]))
-            if (token["name"] in self.svg_allow_local_href and
-                'xlink:href' in attrs and re.search('^\s*[^#\s].*',
-                                                    attrs['xlink:href'])):
-                del attrs['xlink:href']
-            if 'style' in attrs:
-                attrs['style'] = self.sanitize_css(attrs['style'])
-            token["data"] = [[name, val] for name, val in list(attrs.items())]
-        return token
-
-    def disallowed_token(self, token, token_type):
-        if token_type == tokenTypes["EndTag"]:
-            token["data"] = "" % token["name"]
-        elif token["data"]:
-            attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
-            token["data"] = "<%s%s>" % (token["name"], attrs)
-        else:
-            token["data"] = "<%s>" % token["name"]
-        if token.get("selfClosing"):
-            token["data"] = token["data"][:-1] + "/>"
-
-        if token["type"] in list(tokenTypes.keys()):
-            token["type"] = "Characters"
-        else:
-            token["type"] = tokenTypes["Characters"]
-
-        del token["name"]
-        return token
-
-    def sanitize_css(self, style):
-        # disallow urls
-        style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
-
-        # gauntlet
-        if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
-            return ''
-        if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
-            return ''
-
-        clean = []
-        for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
-            if not value:
-                continue
-            if prop.lower() in self.allowed_css_properties:
-                clean.append(prop + ': ' + value + ';')
-            elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
-                                                'padding']:
-                for keyword in value.split():
-                    if keyword not in self.acceptable_css_keywords and \
-                            not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
-                        break
-                else:
-                    clean.append(prop + ': ' + value + ';')
-            elif prop.lower() in self.allowed_svg_properties:
-                clean.append(prop + ': ' + value + ';')
-
-        return ' '.join(clean)
-
-
-class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
-    def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
-                 lowercaseElementName=False, lowercaseAttrName=False, parser=None):
-        # Change case matching defaults as we only output lowercase html anyway
-        # This solution doesn't seem ideal...
-        HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
-                               lowercaseElementName, lowercaseAttrName, parser=parser)
-
-    def __iter__(self):
-        for token in HTMLTokenizer.__iter__(self):
-            token = self.sanitize_token(token)
-            if token:
-                yield token
diff --git a/Shared/lib/python3.4/site-packages/html5lib/serializer.py b/Shared/lib/python3.4/site-packages/html5lib/serializer.py
new file mode 100644
index 0000000..d6b7105
--- /dev/null
+++ b/Shared/lib/python3.4/site-packages/html5lib/serializer.py
@@ -0,0 +1,409 @@
+from __future__ import absolute_import, division, unicode_literals
+from six import text_type
+
+import re
+
+from codecs import register_error, xmlcharrefreplace_errors
+
+from .constants import voidElements, booleanAttributes, spaceCharacters
+from .constants import rcdataElements, entities, xmlEntities
+from . import treewalkers, _utils
+from xml.sax.saxutils import escape
+
+_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
+_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
+_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
+                                   "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
+                                   "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
+                                   "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+                                   "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
+                                   "\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
+                                   "\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
+                                   "\u3000]")
+
+
+_encode_entity_map = {}
+_is_ucs4 = len("\U0010FFFF") == 1
+for k, v in list(entities.items()):
+    # skip multi-character entities
+    if ((_is_ucs4 and len(v) > 1) or
+            (not _is_ucs4 and len(v) > 2)):
+        continue
+    if v != "&":
+        if len(v) == 2:
+            v = _utils.surrogatePairToCodepoint(v)
+        else:
+            v = ord(v)
+        if v not in _encode_entity_map or k.islower():
+            # prefer < over < and similarly for &, >, etc.
+            _encode_entity_map[v] = k
+
+
+def htmlentityreplace_errors(exc):
+    if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
+        res = []
+        codepoints = []
+        skip = False
+        for i, c in enumerate(exc.object[exc.start:exc.end]):
+            if skip:
+                skip = False
+                continue
+            index = i + exc.start
+            if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
+                codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
+                skip = True
+            else:
+                codepoint = ord(c)
+            codepoints.append(codepoint)
+        for cp in codepoints:
+            e = _encode_entity_map.get(cp)
+            if e:
+                res.append("&")
+                res.append(e)
+                if not e.endswith(";"):
+                    res.append(";")
+            else:
+                res.append("&#x%s;" % (hex(cp)[2:]))
+        return ("".join(res), exc.end)
+    else:
+        return xmlcharrefreplace_errors(exc)
+
+
+register_error("htmlentityreplace", htmlentityreplace_errors)
+
+
+def serialize(input, tree="etree", encoding=None, **serializer_opts):
+    """Serializes the input token stream using the specified treewalker
+
+    :arg input: the token stream to serialize
+
+    :arg tree: the treewalker to use
+
+    :arg encoding: the encoding to use
+
+    :arg serializer_opts: any options to pass to the
+        :py:class:`html5lib.serializer.HTMLSerializer` that gets created
+
+    :returns: the tree serialized as a string
+
+    Example:
+
+    >>> from html5lib.html5parser import parse
+    >>> from html5lib.serializer import serialize
+    >>> token_stream = parse('

Hi!

') + >>> serialize(token_stream, omit_optional_tags=False) + '

Hi!

' + + """ + # XXX: Should we cache this? + walker = treewalkers.getTreeWalker(tree) + s = HTMLSerializer(**serializer_opts) + return s.render(walker(input), encoding) + + +class HTMLSerializer(object): + + # attribute quoting options + quote_attr_values = "legacy" # be secure by default + quote_char = '"' + use_best_quote_char = True + + # tag syntax options + omit_optional_tags = True + minimize_boolean_attributes = True + use_trailing_solidus = False + space_before_trailing_solidus = True + + # escaping options + escape_lt_in_attrs = False + escape_rcdata = False + resolve_entities = True + + # miscellaneous options + alphabetical_attributes = False + inject_meta_charset = True + strip_whitespace = False + sanitize = False + + options = ("quote_attr_values", "quote_char", "use_best_quote_char", + "omit_optional_tags", "minimize_boolean_attributes", + "use_trailing_solidus", "space_before_trailing_solidus", + "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", + "alphabetical_attributes", "inject_meta_charset", + "strip_whitespace", "sanitize") + + def __init__(self, **kwargs): + """Initialize HTMLSerializer + + :arg inject_meta_charset: Whether or not to inject the meta charset. + + Defaults to ``True``. + + :arg quote_attr_values: Whether to quote attribute values that don't + require quoting per legacy browser behavior (``"legacy"``), when + required by the standard (``"spec"``), or always (``"always"``). + + Defaults to ``"legacy"``. + + :arg quote_char: Use given quote character for attribute quoting. + + Defaults to ``"`` which will use double quotes unless attribute + value contains a double quote, in which case single quotes are + used. + + :arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute + values. + + Defaults to ``False``. + + :arg escape_rcdata: Whether to escape characters that need to be + escaped within normal elements within rcdata elements such as + style. + + Defaults to ``False``. + + :arg resolve_entities: Whether to resolve named character entities that + appear in the source tree. The XML predefined entities < > + & " ' are unaffected by this setting. + + Defaults to ``True``. + + :arg strip_whitespace: Whether to remove semantically meaningless + whitespace. (This compresses all whitespace to a single space + except within ``pre``.) + + Defaults to ``False``. + + :arg minimize_boolean_attributes: Shortens boolean attributes to give + just the attribute value, for example:: + + + + becomes:: + + + + Defaults to ``True``. + + :arg use_trailing_solidus: Includes a close-tag slash at the end of the + start tag of void elements (empty elements whose end tag is + forbidden). E.g. ``
``. + + Defaults to ``False``. + + :arg space_before_trailing_solidus: Places a space immediately before + the closing slash in a tag using a trailing solidus. E.g. + ``
``. Requires ``use_trailing_solidus=True``. + + Defaults to ``True``. + + :arg sanitize: Strip all unsafe or unknown constructs from output. + See :py:class:`html5lib.filters.sanitizer.Filter`. + + Defaults to ``False``. + + :arg omit_optional_tags: Omit start/end tags that are optional. + + Defaults to ``True``. + + :arg alphabetical_attributes: Reorder attributes to be in alphabetical order. + + Defaults to ``False``. + + """ + unexpected_args = frozenset(kwargs) - frozenset(self.options) + if len(unexpected_args) > 0: + raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args))) + if 'quote_char' in kwargs: + self.use_best_quote_char = False + for attr in self.options: + setattr(self, attr, kwargs.get(attr, getattr(self, attr))) + self.errors = [] + self.strict = False + + def encode(self, string): + assert(isinstance(string, text_type)) + if self.encoding: + return string.encode(self.encoding, "htmlentityreplace") + else: + return string + + def encodeStrict(self, string): + assert(isinstance(string, text_type)) + if self.encoding: + return string.encode(self.encoding, "strict") + else: + return string + + def serialize(self, treewalker, encoding=None): + # pylint:disable=too-many-nested-blocks + self.encoding = encoding + in_cdata = False + self.errors = [] + + if encoding and self.inject_meta_charset: + from .filters.inject_meta_charset import Filter + treewalker = Filter(treewalker, encoding) + # Alphabetical attributes is here under the assumption that none of + # the later filters add or change order of attributes; it needs to be + # before the sanitizer so escaped elements come out correctly + if self.alphabetical_attributes: + from .filters.alphabeticalattributes import Filter + treewalker = Filter(treewalker) + # WhitespaceFilter should be used before OptionalTagFilter + # for maximum efficiently of this latter filter + if self.strip_whitespace: + from .filters.whitespace import Filter + treewalker = Filter(treewalker) + if self.sanitize: + from .filters.sanitizer import Filter + treewalker = Filter(treewalker) + if self.omit_optional_tags: + from .filters.optionaltags import Filter + treewalker = Filter(treewalker) + + for token in treewalker: + type = token["type"] + if type == "Doctype": + doctype = "= 0: + if token["systemId"].find("'") >= 0: + self.serializeError("System identifer contains both single and double quote characters") + quote_char = "'" + else: + quote_char = '"' + doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) + + doctype += ">" + yield self.encodeStrict(doctype) + + elif type in ("Characters", "SpaceCharacters"): + if type == "SpaceCharacters" or in_cdata: + if in_cdata and token["data"].find("= 0: + self.serializeError("Unexpected ") + + elif type == "EndTag": + name = token["name"] + if name in rcdataElements: + in_cdata = False + elif in_cdata: + self.serializeError("Unexpected child element of a CDATA element") + yield self.encodeStrict("" % name) + + elif type == "Comment": + data = token["data"] + if data.find("--") >= 0: + self.serializeError("Comment contains --") + yield self.encodeStrict("" % token["data"]) + + elif type == "Entity": + name = token["name"] + key = name + ";" + if key not in entities: + self.serializeError("Entity %s not recognized" % name) + if self.resolve_entities and key not in xmlEntities: + data = entities[key] + else: + data = "&%s;" % name + yield self.encodeStrict(data) + + else: + self.serializeError(token["data"]) + + def render(self, treewalker, encoding=None): + """Serializes the stream from the treewalker into a string + + :arg treewalker: the treewalker to serialize + + :arg encoding: the string encoding to use + + :returns: the serialized tree + + Example: + + >>> from html5lib import parse, getTreeWalker + >>> from html5lib.serializer import HTMLSerializer + >>> token_stream = parse('Hi!') + >>> walker = getTreeWalker('etree') + >>> serializer = HTMLSerializer(omit_optional_tags=False) + >>> serializer.render(walker(token_stream)) + 'Hi!' + + """ + if encoding: + return b"".join(list(self.serialize(treewalker, encoding))) + else: + return "".join(list(self.serialize(treewalker))) + + def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): + # XXX The idea is to make data mandatory. + self.errors.append(data) + if self.strict: + raise SerializeError + + +class SerializeError(Exception): + """Error in serialized tree""" + pass diff --git a/Shared/lib/python3.4/site-packages/html5lib/serializer/__init__.py b/Shared/lib/python3.4/site-packages/html5lib/serializer/__init__.py deleted file mode 100644 index 8380839..0000000 --- a/Shared/lib/python3.4/site-packages/html5lib/serializer/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from .. import treewalkers - -from .htmlserializer import HTMLSerializer - - -def serialize(input, tree="etree", format="html", encoding=None, - **serializer_opts): - # XXX: Should we cache this? - walker = treewalkers.getTreeWalker(tree) - if format == "html": - s = HTMLSerializer(**serializer_opts) - else: - raise ValueError("type must be html") - return s.render(walker(input), encoding) diff --git a/Shared/lib/python3.4/site-packages/html5lib/serializer/htmlserializer.py b/Shared/lib/python3.4/site-packages/html5lib/serializer/htmlserializer.py deleted file mode 100644 index be4d634..0000000 --- a/Shared/lib/python3.4/site-packages/html5lib/serializer/htmlserializer.py +++ /dev/null @@ -1,317 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from six import text_type - -try: - from functools import reduce -except ImportError: - pass - -from ..constants import voidElements, booleanAttributes, spaceCharacters -from ..constants import rcdataElements, entities, xmlEntities -from .. import utils -from xml.sax.saxutils import escape - -spaceCharacters = "".join(spaceCharacters) - -try: - from codecs import register_error, xmlcharrefreplace_errors -except ImportError: - unicode_encode_errors = "strict" -else: - unicode_encode_errors = "htmlentityreplace" - - encode_entity_map = {} - is_ucs4 = len("\U0010FFFF") == 1 - for k, v in list(entities.items()): - # skip multi-character entities - if ((is_ucs4 and len(v) > 1) or - (not is_ucs4 and len(v) > 2)): - continue - if v != "&": - if len(v) == 2: - v = utils.surrogatePairToCodepoint(v) - else: - v = ord(v) - if v not in encode_entity_map or k.islower(): - # prefer < over < and similarly for &, >, etc. - encode_entity_map[v] = k - - def htmlentityreplace_errors(exc): - if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): - res = [] - codepoints = [] - skip = False - for i, c in enumerate(exc.object[exc.start:exc.end]): - if skip: - skip = False - continue - index = i + exc.start - if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): - codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2]) - skip = True - else: - codepoint = ord(c) - codepoints.append(codepoint) - for cp in codepoints: - e = encode_entity_map.get(cp) - if e: - res.append("&") - res.append(e) - if not e.endswith(";"): - res.append(";") - else: - res.append("&#x%s;" % (hex(cp)[2:])) - return ("".join(res), exc.end) - else: - return xmlcharrefreplace_errors(exc) - - register_error(unicode_encode_errors, htmlentityreplace_errors) - - del register_error - - -class HTMLSerializer(object): - - # attribute quoting options - quote_attr_values = False - quote_char = '"' - use_best_quote_char = True - - # tag syntax options - omit_optional_tags = True - minimize_boolean_attributes = True - use_trailing_solidus = False - space_before_trailing_solidus = True - - # escaping options - escape_lt_in_attrs = False - escape_rcdata = False - resolve_entities = True - - # miscellaneous options - alphabetical_attributes = False - inject_meta_charset = True - strip_whitespace = False - sanitize = False - - options = ("quote_attr_values", "quote_char", "use_best_quote_char", - "omit_optional_tags", "minimize_boolean_attributes", - "use_trailing_solidus", "space_before_trailing_solidus", - "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", - "alphabetical_attributes", "inject_meta_charset", - "strip_whitespace", "sanitize") - - def __init__(self, **kwargs): - """Initialize HTMLSerializer. - - Keyword options (default given first unless specified) include: - - inject_meta_charset=True|False - Whether it insert a meta element to define the character set of the - document. - quote_attr_values=True|False - Whether to quote attribute values that don't require quoting - per HTML5 parsing rules. - quote_char=u'"'|u"'" - Use given quote character for attribute quoting. Default is to - use double quote unless attribute value contains a double quote, - in which case single quotes are used instead. - escape_lt_in_attrs=False|True - Whether to escape < in attribute values. - escape_rcdata=False|True - Whether to escape characters that need to be escaped within normal - elements within rcdata elements such as style. - resolve_entities=True|False - Whether to resolve named character entities that appear in the - source tree. The XML predefined entities < > & " ' - are unaffected by this setting. - strip_whitespace=False|True - Whether to remove semantically meaningless whitespace. (This - compresses all whitespace to a single space except within pre.) - minimize_boolean_attributes=True|False - Shortens boolean attributes to give just the attribute value, - for example becomes . - use_trailing_solidus=False|True - Includes a close-tag slash at the end of the start tag of void - elements (empty elements whose end tag is forbidden). E.g.
. - space_before_trailing_solidus=True|False - Places a space immediately before the closing slash in a tag - using a trailing solidus. E.g.
. Requires use_trailing_solidus. - sanitize=False|True - Strip all unsafe or unknown constructs from output. - See `html5lib user documentation`_ - omit_optional_tags=True|False - Omit start/end tags that are optional. - alphabetical_attributes=False|True - Reorder attributes to be in alphabetical order. - - .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation - """ - if 'quote_char' in kwargs: - self.use_best_quote_char = False - for attr in self.options: - setattr(self, attr, kwargs.get(attr, getattr(self, attr))) - self.errors = [] - self.strict = False - - def encode(self, string): - assert(isinstance(string, text_type)) - if self.encoding: - return string.encode(self.encoding, unicode_encode_errors) - else: - return string - - def encodeStrict(self, string): - assert(isinstance(string, text_type)) - if self.encoding: - return string.encode(self.encoding, "strict") - else: - return string - - def serialize(self, treewalker, encoding=None): - self.encoding = encoding - in_cdata = False - self.errors = [] - - if encoding and self.inject_meta_charset: - from ..filters.inject_meta_charset import Filter - treewalker = Filter(treewalker, encoding) - # WhitespaceFilter should be used before OptionalTagFilter - # for maximum efficiently of this latter filter - if self.strip_whitespace: - from ..filters.whitespace import Filter - treewalker = Filter(treewalker) - if self.sanitize: - from ..filters.sanitizer import Filter - treewalker = Filter(treewalker) - if self.omit_optional_tags: - from ..filters.optionaltags import Filter - treewalker = Filter(treewalker) - # Alphabetical attributes must be last, as other filters - # could add attributes and alter the order - if self.alphabetical_attributes: - from ..filters.alphabeticalattributes import Filter - treewalker = Filter(treewalker) - - for token in treewalker: - type = token["type"] - if type == "Doctype": - doctype = "= 0: - if token["systemId"].find("'") >= 0: - self.serializeError("System identifer contains both single and double quote characters") - quote_char = "'" - else: - quote_char = '"' - doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) - - doctype += ">" - yield self.encodeStrict(doctype) - - elif type in ("Characters", "SpaceCharacters"): - if type == "SpaceCharacters" or in_cdata: - if in_cdata and token["data"].find("= 0: - self.serializeError("Unexpected \"'=", False) - v = v.replace("&", "&") - if self.escape_lt_in_attrs: - v = v.replace("<", "<") - if quote_attr: - quote_char = self.quote_char - if self.use_best_quote_char: - if "'" in v and '"' not in v: - quote_char = '"' - elif '"' in v and "'" not in v: - quote_char = "'" - if quote_char == "'": - v = v.replace("'", "'") - else: - v = v.replace('"', """) - yield self.encodeStrict(quote_char) - yield self.encode(v) - yield self.encodeStrict(quote_char) - else: - yield self.encode(v) - if name in voidElements and self.use_trailing_solidus: - if self.space_before_trailing_solidus: - yield self.encodeStrict(" /") - else: - yield self.encodeStrict("/") - yield self.encode(">") - - elif type == "EndTag": - name = token["name"] - if name in rcdataElements: - in_cdata = False - elif in_cdata: - self.serializeError("Unexpected child element of a CDATA element") - yield self.encodeStrict("" % name) - - elif type == "Comment": - data = token["data"] - if data.find("--") >= 0: - self.serializeError("Comment contains --") - yield self.encodeStrict("" % token["data"]) - - elif type == "Entity": - name = token["name"] - key = name + ";" - if key not in entities: - self.serializeError("Entity %s not recognized" % name) - if self.resolve_entities and key not in xmlEntities: - data = entities[key] - else: - data = "&%s;" % name - yield self.encodeStrict(data) - - else: - self.serializeError(token["data"]) - - def render(self, treewalker, encoding=None): - if encoding: - return b"".join(list(self.serialize(treewalker, encoding))) - else: - return "".join(list(self.serialize(treewalker))) - - def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): - # XXX The idea is to make data mandatory. - self.errors.append(data) - if self.strict: - raise SerializeError - - -def SerializeError(Exception): - """Error in serialized tree""" - pass diff --git a/Shared/lib/python3.4/site-packages/html5lib/treeadapters/__init__.py b/Shared/lib/python3.4/site-packages/html5lib/treeadapters/__init__.py index e69de29..dfeb0ba 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treeadapters/__init__.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treeadapters/__init__.py @@ -0,0 +1,30 @@ +"""Tree adapters let you convert from one tree structure to another + +Example: + +.. code-block:: python + + import html5lib + from html5lib.treeadapters import genshi + + doc = 'Hi!' + treebuilder = html5lib.getTreeBuilder('etree') + parser = html5lib.HTMLParser(tree=treebuilder) + tree = parser.parse(doc) + TreeWalker = html5lib.getTreeWalker('etree') + + genshi_tree = genshi.to_genshi(TreeWalker(tree)) + +""" +from __future__ import absolute_import, division, unicode_literals + +from . import sax + +__all__ = ["sax"] + +try: + from . import genshi # noqa +except ImportError: + pass +else: + __all__.append("genshi") diff --git a/Shared/lib/python3.4/site-packages/html5lib/treeadapters/genshi.py b/Shared/lib/python3.4/site-packages/html5lib/treeadapters/genshi.py new file mode 100644 index 0000000..61d5fb6 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/html5lib/treeadapters/genshi.py @@ -0,0 +1,54 @@ +from __future__ import absolute_import, division, unicode_literals + +from genshi.core import QName, Attrs +from genshi.core import START, END, TEXT, COMMENT, DOCTYPE + + +def to_genshi(walker): + """Convert a tree to a genshi tree + + :arg walker: the treewalker to use to walk the tree to convert it + + :returns: generator of genshi nodes + + """ + text = [] + for token in walker: + type = token["type"] + if type in ("Characters", "SpaceCharacters"): + text.append(token["data"]) + elif text: + yield TEXT, "".join(text), (None, -1, -1) + text = [] + + if type in ("StartTag", "EmptyTag"): + if token["namespace"]: + name = "{%s}%s" % (token["namespace"], token["name"]) + else: + name = token["name"] + attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value) + for attr, value in token["data"].items()]) + yield (START, (QName(name), attrs), (None, -1, -1)) + if type == "EmptyTag": + type = "EndTag" + + if type == "EndTag": + if token["namespace"]: + name = "{%s}%s" % (token["namespace"], token["name"]) + else: + name = token["name"] + + yield END, QName(name), (None, -1, -1) + + elif type == "Comment": + yield COMMENT, token["data"], (None, -1, -1) + + elif type == "Doctype": + yield DOCTYPE, (token["name"], token["publicId"], + token["systemId"]), (None, -1, -1) + + else: + pass # FIXME: What to do? + + if text: + yield TEXT, "".join(text), (None, -1, -1) diff --git a/Shared/lib/python3.4/site-packages/html5lib/treeadapters/sax.py b/Shared/lib/python3.4/site-packages/html5lib/treeadapters/sax.py index ad47df9..f4ccea5 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treeadapters/sax.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treeadapters/sax.py @@ -11,7 +11,13 @@ for prefix, localName, namespace in adjustForeignAttributes.values(): def to_sax(walker, handler): - """Call SAX-like content handler based on treewalker walker""" + """Call SAX-like content handler based on treewalker walker + + :arg walker: the treewalker to use to walk the tree to convert it + + :arg handler: SAX handler to use + + """ handler.startDocument() for prefix, namespace in prefix_mapping.items(): handler.startPrefixMapping(prefix, namespace) diff --git a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/__init__.py b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/__init__.py index 6a6b2a4..d44447e 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/__init__.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/__init__.py @@ -1,56 +1,68 @@ -"""A collection of modules for building different kinds of tree from -HTML documents. +"""A collection of modules for building different kinds of trees from HTML +documents. To create a treebuilder for a new type of tree, you need to do implement several things: -1) A set of classes for various types of elements: Document, Doctype, -Comment, Element. These must implement the interface of -_base.treebuilders.Node (although comment nodes have a different -signature for their constructor, see treebuilders.etree.Comment) -Textual content may also be implemented as another node type, or not, as -your tree implementation requires. +1. A set of classes for various types of elements: Document, Doctype, Comment, + Element. These must implement the interface of ``base.treebuilders.Node`` + (although comment nodes have a different signature for their constructor, + see ``treebuilders.etree.Comment``) Textual content may also be implemented + as another node type, or not, as your tree implementation requires. -2) A treebuilder object (called TreeBuilder by convention) that -inherits from treebuilders._base.TreeBuilder. This has 4 required attributes: -documentClass - the class to use for the bottommost node of a document -elementClass - the class to use for HTML Elements -commentClass - the class to use for comments -doctypeClass - the class to use for doctypes -It also has one required method: -getDocument - Returns the root node of the complete document tree +2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits + from ``treebuilders.base.TreeBuilder``. This has 4 required attributes: + + * ``documentClass`` - the class to use for the bottommost node of a document + * ``elementClass`` - the class to use for HTML Elements + * ``commentClass`` - the class to use for comments + * ``doctypeClass`` - the class to use for doctypes + + It also has one required method: + + * ``getDocument`` - Returns the root node of the complete document tree + +3. If you wish to run the unit tests, you must also create a ``testSerializer`` + method on your treebuilder which accepts a node and returns a string + containing Node and its children serialized according to the format used in + the unittests -3) If you wish to run the unit tests, you must also create a -testSerializer method on your treebuilder which accepts a node and -returns a string containing Node and its children serialized according -to the format used in the unittests """ from __future__ import absolute_import, division, unicode_literals -from ..utils import default_etree +from .._utils import default_etree treeBuilderCache = {} def getTreeBuilder(treeType, implementation=None, **kwargs): - """Get a TreeBuilder class for various types of tree with built-in support + """Get a TreeBuilder class for various types of trees with built-in support - treeType - the name of the tree type required (case-insensitive). Supported - values are: + :arg treeType: the name of the tree type required (case-insensitive). Supported + values are: - "dom" - A generic builder for DOM implementations, defaulting to - a xml.dom.minidom based implementation. - "etree" - A generic builder for tree implementations exposing an - ElementTree-like interface, defaulting to - xml.etree.cElementTree if available and - xml.etree.ElementTree if not. - "lxml" - A etree-based builder for lxml.etree, handling - limitations of lxml's implementation. + * "dom" - A generic builder for DOM implementations, defaulting to a + xml.dom.minidom based implementation. + * "etree" - A generic builder for tree implementations exposing an + ElementTree-like interface, defaulting to xml.etree.cElementTree if + available and xml.etree.ElementTree if not. + * "lxml" - A etree-based builder for lxml.etree, handling limitations + of lxml's implementation. - implementation - (Currently applies to the "etree" and "dom" tree types). A - module implementing the tree type e.g. - xml.etree.ElementTree or xml.etree.cElementTree.""" + :arg implementation: (Currently applies to the "etree" and "dom" tree + types). A module implementing the tree type e.g. xml.etree.ElementTree + or xml.etree.cElementTree. + + :arg kwargs: Any additional options to pass to the TreeBuilder when + creating it. + + Example: + + >>> from html5lib.treebuilders import getTreeBuilder + >>> builder = getTreeBuilder('etree') + + """ treeType = treeType.lower() if treeType not in treeBuilderCache: diff --git a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/_base.py b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/base.py similarity index 86% rename from Shared/lib/python3.4/site-packages/html5lib/treebuilders/_base.py rename to Shared/lib/python3.4/site-packages/html5lib/treebuilders/base.py index 8b97cc1..05d97ec 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/_base.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/base.py @@ -21,22 +21,25 @@ listElementsMap = { class Node(object): + """Represents an item in the tree""" def __init__(self, name): - """Node representing an item in the tree. - name - The tag name associated with the node - parent - The parent of the current node (or None for the document node) - value - The value of the current node (applies to text nodes and - comments - attributes - a dict holding name, value pairs for attributes of the node - childNodes - a list of child nodes of the current node. This must - include all elements but not necessarily other node types - _flags - A list of miscellaneous flags that can be set on the node + """Creates a Node + + :arg name: The tag name associated with the node + """ + # The tag name assocaited with the node self.name = name + # The parent of the current node (or None for the document node) self.parent = None + # The value of the current node (applies to text nodes and comments) self.value = None + # A dict holding name -> value pairs for attributes of the node self.attributes = {} + # A list of child nodes of the current node. This must include all + # elements but not necessarily other node types. self.childNodes = [] + # A list of miscellaneous flags that can be set on the node. self._flags = [] def __str__(self): @@ -53,23 +56,41 @@ class Node(object): def appendChild(self, node): """Insert node as a child of the current node + + :arg node: the node to insert + """ raise NotImplementedError def insertText(self, data, insertBefore=None): """Insert data as text in the current node, positioned before the start of node insertBefore or to the end of the node's text. + + :arg data: the data to insert + + :arg insertBefore: True if you want to insert the text before the node + and False if you want to insert it after the node + """ raise NotImplementedError def insertBefore(self, node, refNode): """Insert node as a child of the current node, before refNode in the list of child nodes. Raises ValueError if refNode is not a child of - the current node""" + the current node + + :arg node: the node to insert + + :arg refNode: the child node to insert the node before + + """ raise NotImplementedError def removeChild(self, node): """Remove node from the children of the current node + + :arg node: the child node to remove + """ raise NotImplementedError @@ -77,6 +98,9 @@ class Node(object): """Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way + + :arg newParent: the node to move all this node's children to + """ # XXX - should this method be made more general? for child in self.childNodes: @@ -121,11 +145,14 @@ class ActiveFormattingElements(list): class TreeBuilder(object): """Base treebuilder implementation - documentClass - the class to use for the bottommost node of a document - elementClass - the class to use for HTML Elements - commentClass - the class to use for comments - doctypeClass - the class to use for doctypes + + * documentClass - the class to use for the bottommost node of a document + * elementClass - the class to use for HTML Elements + * commentClass - the class to use for comments + * doctypeClass - the class to use for doctypes + """ + # pylint:disable=not-callable # Document class documentClass = None @@ -143,6 +170,11 @@ class TreeBuilder(object): fragmentClass = None def __init__(self, namespaceHTMLElements): + """Create a TreeBuilder + + :arg namespaceHTMLElements: whether or not to namespace HTML elements + + """ if namespaceHTMLElements: self.defaultNamespace = "http://www.w3.org/1999/xhtml" else: @@ -166,12 +198,17 @@ class TreeBuilder(object): # If we pass a node in we match that. if we pass a string # match any node with that name exactNode = hasattr(target, "nameTuple") + if not exactNode: + if isinstance(target, text_type): + target = (namespaces["html"], target) + assert isinstance(target, tuple) listElements, invert = listElementsMap[variant] for node in reversed(self.openElements): - if (node.name == target and not exactNode or - node == target and exactNode): + if exactNode and node == target: + return True + elif not exactNode and node.nameTuple == target: return True elif (invert ^ (node.nameTuple in listElements)): return False @@ -353,19 +390,19 @@ class TreeBuilder(object): def generateImpliedEndTags(self, exclude=None): name = self.openElements[-1].name # XXX td, th and tr are not actually needed - if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) - and name != exclude): + if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and + name != exclude): self.openElements.pop() # XXX This is not entirely what the specification says. We should # investigate it more closely. self.generateImpliedEndTags(exclude) def getDocument(self): - "Return the final tree" + """Return the final tree""" return self.document def getFragment(self): - "Return the final fragment" + """Return the final fragment""" # assert self.innerHTML fragment = self.fragmentClass() self.openElements[0].reparentChildren(fragment) @@ -373,5 +410,8 @@ class TreeBuilder(object): def testSerializer(self, node): """Serialize the subtree of node in the format required by unit tests - node - the node from which to start serializing""" + + :arg node: the node from which to start serializing + + """ raise NotImplementedError diff --git a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/dom.py b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/dom.py index 234233b..dcfac22 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/dom.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/dom.py @@ -1,54 +1,62 @@ from __future__ import absolute_import, division, unicode_literals +from collections import MutableMapping from xml.dom import minidom, Node import weakref -from . import _base +from . import base from .. import constants from ..constants import namespaces -from ..utils import moduleFactoryFactory +from .._utils import moduleFactoryFactory def getDomBuilder(DomImplementation): Dom = DomImplementation - class AttrList(object): + class AttrList(MutableMapping): def __init__(self, element): self.element = element def __iter__(self): - return list(self.element.attributes.items()).__iter__() + return iter(self.element.attributes.keys()) def __setitem__(self, name, value): - self.element.setAttribute(name, value) - - def __len__(self): - return len(list(self.element.attributes.items())) - - def items(self): - return [(item[0], item[1]) for item in - list(self.element.attributes.items())] - - def keys(self): - return list(self.element.attributes.keys()) - - def __getitem__(self, name): - return self.element.getAttribute(name) - - def __contains__(self, name): if isinstance(name, tuple): raise NotImplementedError else: - return self.element.hasAttribute(name) + attr = self.element.ownerDocument.createAttribute(name) + attr.value = value + self.element.attributes[name] = attr - class NodeBuilder(_base.Node): + def __len__(self): + return len(self.element.attributes) + + def items(self): + return list(self.element.attributes.items()) + + def values(self): + return list(self.element.attributes.values()) + + def __getitem__(self, name): + if isinstance(name, tuple): + raise NotImplementedError + else: + return self.element.attributes[name].value + + def __delitem__(self, name): + if isinstance(name, tuple): + raise NotImplementedError + else: + del self.element.attributes[name] + + class NodeBuilder(base.Node): def __init__(self, element): - _base.Node.__init__(self, element.nodeName) + base.Node.__init__(self, element.nodeName) self.element = element - namespace = property(lambda self: hasattr(self.element, "namespaceURI") - and self.element.namespaceURI or None) + namespace = property(lambda self: hasattr(self.element, "namespaceURI") and + self.element.namespaceURI or None) def appendChild(self, node): node.parent = self @@ -109,7 +117,7 @@ def getDomBuilder(DomImplementation): nameTuple = property(getNameTuple) - class TreeBuilder(_base.TreeBuilder): + class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable def documentClass(self): self.dom = Dom.getDOMImplementation().createDocument(None, None, None) return weakref.proxy(self) @@ -149,15 +157,16 @@ def getDomBuilder(DomImplementation): return self.dom def getFragment(self): - return _base.TreeBuilder.getFragment(self).element + return base.TreeBuilder.getFragment(self).element def insertText(self, data, parent=None): data = data if parent != self: - _base.TreeBuilder.insertText(self, data, parent) + base.TreeBuilder.insertText(self, data, parent) else: # HACK: allow text nodes as children of the document node if hasattr(self.dom, '_child_node_types'): + # pylint:disable=protected-access if Node.TEXT_NODE not in self.dom._child_node_types: self.dom._child_node_types = list(self.dom._child_node_types) self.dom._child_node_types.append(Node.TEXT_NODE) diff --git a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/etree.py b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/etree.py index 2c8ed19..cb1d4ae 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/etree.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/etree.py @@ -1,13 +1,15 @@ from __future__ import absolute_import, division, unicode_literals +# pylint:disable=protected-access + from six import text_type import re -from . import _base -from .. import ihatexml +from . import base +from .. import _ihatexml from .. import constants from ..constants import namespaces -from ..utils import moduleFactoryFactory +from .._utils import moduleFactoryFactory tag_regexp = re.compile("{([^}]*)}(.*)") @@ -16,7 +18,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False): ElementTree = ElementTreeImplementation ElementTreeCommentType = ElementTree.Comment("asd").tag - class Element(_base.Node): + class Element(base.Node): def __init__(self, name, namespace=None): self._name = name self._namespace = namespace @@ -98,6 +100,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False): node.parent = self def removeChild(self, node): + self._childNodes.remove(node) self._element.remove(node._element) node.parent = None @@ -139,7 +142,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False): if self._element.text is not None: newParent._element.text += self._element.text self._element.text = "" - _base.Node.reparentChildren(self, newParent) + base.Node.reparentChildren(self, newParent) class Comment(Element): def __init__(self, data): @@ -253,10 +256,10 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False): return "\n".join(rv) - def tostring(element): + def tostring(element): # pylint:disable=unused-variable """Serialize an element and its child nodes to a string""" rv = [] - filter = ihatexml.InfosetFilter() + filter = _ihatexml.InfosetFilter() def serializeElement(element): if isinstance(element, ElementTree.ElementTree): @@ -307,7 +310,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False): return "".join(rv) - class TreeBuilder(_base.TreeBuilder): + class TreeBuilder(base.TreeBuilder): # pylint:disable=unused-variable documentClass = Document doctypeClass = DocumentType elementClass = Element @@ -329,7 +332,7 @@ def getETreeBuilder(ElementTreeImplementation, fullTree=False): return self.document._element.find("html") def getFragment(self): - return _base.TreeBuilder.getFragment(self)._element + return base.TreeBuilder.getFragment(self)._element return locals() diff --git a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/etree_lxml.py b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/etree_lxml.py index 35d08ef..ca12a99 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treebuilders/etree_lxml.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treebuilders/etree_lxml.py @@ -10,16 +10,17 @@ When any of these things occur, we emit a DataLossWarning """ from __future__ import absolute_import, division, unicode_literals +# pylint:disable=protected-access import warnings import re import sys -from . import _base +from . import base from ..constants import DataLossWarning from .. import constants from . import etree as etree_builders -from .. import ihatexml +from .. import _ihatexml import lxml.etree as etree @@ -53,8 +54,7 @@ class Document(object): def testSerializer(element): rv = [] - finalText = None - infosetFilter = ihatexml.InfosetFilter() + infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) def serializeElement(element, indent=0): if not hasattr(element, "tag"): @@ -79,7 +79,7 @@ def testSerializer(element): next_element = next_element.getnext() elif isinstance(element, str) or isinstance(element, bytes): # Text in a fragment - assert isinstance(element, str) or sys.version_info.major == 2 + assert isinstance(element, str) or sys.version_info[0] == 2 rv.append("|%s\"%s\"" % (' ' * indent, element)) else: # Fragment case @@ -128,16 +128,12 @@ def testSerializer(element): rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) serializeElement(element, 0) - if finalText is not None: - rv.append("|%s\"%s\"" % (' ' * 2, finalText)) - return "\n".join(rv) def tostring(element): """Serialize an element and its child nodes to a string""" rv = [] - finalText = None def serializeElement(element): if not hasattr(element, "tag"): @@ -173,13 +169,10 @@ def tostring(element): serializeElement(element) - if finalText is not None: - rv.append("%s\"" % (' ' * 2, finalText)) - return "".join(rv) -class TreeBuilder(_base.TreeBuilder): +class TreeBuilder(base.TreeBuilder): documentClass = Document doctypeClass = DocumentType elementClass = None @@ -189,13 +182,15 @@ class TreeBuilder(_base.TreeBuilder): def __init__(self, namespaceHTMLElements, fullTree=False): builder = etree_builders.getETreeModule(etree, fullTree=fullTree) - infosetFilter = self.infosetFilter = ihatexml.InfosetFilter() + infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True) self.namespaceHTMLElements = namespaceHTMLElements class Attributes(dict): - def __init__(self, element, value={}): + def __init__(self, element, value=None): + if value is None: + value = {} self._element = element - dict.__init__(self, value) + dict.__init__(self, value) # pylint:disable=non-parent-init-called for key, value in self.items(): if isinstance(key, tuple): name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) @@ -257,12 +252,12 @@ class TreeBuilder(_base.TreeBuilder): data = property(_getData, _setData) self.elementClass = Element - self.commentClass = builder.Comment + self.commentClass = Comment # self.fragmentClass = builder.DocumentFragment - _base.TreeBuilder.__init__(self, namespaceHTMLElements) + base.TreeBuilder.__init__(self, namespaceHTMLElements) def reset(self): - _base.TreeBuilder.reset(self) + base.TreeBuilder.reset(self) self.insertComment = self.insertCommentInitial self.initial_comments = [] self.doctype = None @@ -303,19 +298,20 @@ class TreeBuilder(_base.TreeBuilder): self.doctype = doctype def insertCommentInitial(self, data, parent=None): + assert parent is None or parent is self.document + assert self.document._elementTree is None self.initial_comments.append(data) def insertCommentMain(self, data, parent=None): if (parent == self.document and self.document._elementTree.getroot()[-1].tag == comment_type): - warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) + warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) super(TreeBuilder, self).insertComment(data, parent) def insertRoot(self, token): - """Create the document root""" # Because of the way libxml2 works, it doesn't seem to be possible to # alter information like the doctype after the tree has been parsed. - # Therefore we need to use the built-in parser to create our iniial + # Therefore we need to use the built-in parser to create our initial # tree, after which we can add elements like normal docStr = "" if self.doctype: @@ -344,7 +340,8 @@ class TreeBuilder(_base.TreeBuilder): # Append the initial comments: for comment_token in self.initial_comments: - root.addprevious(etree.Comment(comment_token["data"])) + comment = self.commentClass(comment_token["data"]) + root.addprevious(comment._element) # Create the root document and add the ElementTree to it self.document = self.documentClass() diff --git a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/__init__.py b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/__init__.py index 20b91b1..9bec207 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/__init__.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/__init__.py @@ -10,13 +10,10 @@ returning an iterator generating tokens. from __future__ import absolute_import, division, unicode_literals -__all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshistream", "lxmletree", - "pulldom"] - -import sys - from .. import constants -from ..utils import default_etree +from .._utils import default_etree + +__all__ = ["getTreeWalker", "pprint"] treeWalkerCache = {} @@ -24,34 +21,38 @@ treeWalkerCache = {} def getTreeWalker(treeType, implementation=None, **kwargs): """Get a TreeWalker class for various types of tree with built-in support - treeType - the name of the tree type required (case-insensitive). Supported - values are: + :arg str treeType: the name of the tree type required (case-insensitive). + Supported values are: - "dom" - The xml.dom.minidom DOM implementation - "pulldom" - The xml.dom.pulldom event stream - "etree" - A generic walker for tree implementations exposing an - elementtree-like interface (known to work with - ElementTree, cElementTree and lxml.etree). - "lxml" - Optimized walker for lxml.etree - "genshi" - a Genshi stream + * "dom": The xml.dom.minidom DOM implementation + * "etree": A generic walker for tree implementations exposing an + elementtree-like interface (known to work with ElementTree, + cElementTree and lxml.etree). + * "lxml": Optimized walker for lxml.etree + * "genshi": a Genshi stream - implementation - (Currently applies to the "etree" tree type only). A module - implementing the tree type e.g. xml.etree.ElementTree or - cElementTree.""" + :arg implementation: A module implementing the tree type e.g. + xml.etree.ElementTree or cElementTree (Currently applies to the "etree" + tree type only). + + :arg kwargs: keyword arguments passed to the etree walker--for other + walkers, this has no effect + + :returns: a TreeWalker class + + """ treeType = treeType.lower() if treeType not in treeWalkerCache: - if treeType in ("dom", "pulldom"): - name = "%s.%s" % (__name__, treeType) - __import__(name) - mod = sys.modules[name] - treeWalkerCache[treeType] = mod.TreeWalker + if treeType == "dom": + from . import dom + treeWalkerCache[treeType] = dom.TreeWalker elif treeType == "genshi": - from . import genshistream - treeWalkerCache[treeType] = genshistream.TreeWalker + from . import genshi + treeWalkerCache[treeType] = genshi.TreeWalker elif treeType == "lxml": - from . import lxmletree - treeWalkerCache[treeType] = lxmletree.TreeWalker + from . import etree_lxml + treeWalkerCache[treeType] = etree_lxml.TreeWalker elif treeType == "etree": from . import etree if implementation is None: @@ -77,7 +78,13 @@ def concatenateCharacterTokens(tokens): def pprint(walker): - """Pretty printer for tree walkers""" + """Pretty printer for tree walkers + + Takes a TreeWalker instance and pretty prints the output of walking the tree. + + :arg walker: a TreeWalker instance + + """ output = [] indent = 0 for token in concatenateCharacterTokens(walker): diff --git a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/_base.py b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/base.py similarity index 55% rename from Shared/lib/python3.4/site-packages/html5lib/treewalkers/_base.py rename to Shared/lib/python3.4/site-packages/html5lib/treewalkers/base.py index 4e11cd0..80c474c 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/_base.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/base.py @@ -1,11 +1,11 @@ from __future__ import absolute_import, division, unicode_literals -from six import text_type, string_types + +from xml.dom import Node +from ..constants import namespaces, voidElements, spaceCharacters __all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN", "TreeWalker", "NonRecursiveTreeWalker"] -from xml.dom import Node - DOCUMENT = Node.DOCUMENT_NODE DOCTYPE = Node.DOCUMENT_TYPE_NODE TEXT = Node.TEXT_NODE @@ -14,80 +14,115 @@ COMMENT = Node.COMMENT_NODE ENTITY = Node.ENTITY_NODE UNKNOWN = "<#UNKNOWN#>" -from ..constants import voidElements, spaceCharacters spaceCharacters = "".join(spaceCharacters) -def to_text(s, blank_if_none=True): - """Wrapper around six.text_type to convert None to empty string""" - if s is None: - if blank_if_none: - return "" - else: - return None - elif isinstance(s, text_type): - return s - else: - return text_type(s) - - -def is_text_or_none(string): - """Wrapper around isinstance(string_types) or is None""" - return string is None or isinstance(string, string_types) - - class TreeWalker(object): + """Walks a tree yielding tokens + + Tokens are dicts that all have a ``type`` field specifying the type of the + token. + + """ def __init__(self, tree): + """Creates a TreeWalker + + :arg tree: the tree to walk + + """ self.tree = tree def __iter__(self): raise NotImplementedError def error(self, msg): + """Generates an error token with the given message + + :arg msg: the error message + + :returns: SerializeError token + + """ return {"type": "SerializeError", "data": msg} def emptyTag(self, namespace, name, attrs, hasChildren=False): - assert namespace is None or isinstance(namespace, string_types), type(namespace) - assert isinstance(name, string_types), type(name) - assert all((namespace is None or isinstance(namespace, string_types)) and - isinstance(name, string_types) and - isinstance(value, string_types) - for (namespace, name), value in attrs.items()) + """Generates an EmptyTag token - yield {"type": "EmptyTag", "name": to_text(name, False), - "namespace": to_text(namespace), + :arg namespace: the namespace of the token--can be ``None`` + + :arg name: the name of the element + + :arg attrs: the attributes of the element as a dict + + :arg hasChildren: whether or not to yield a SerializationError because + this tag shouldn't have children + + :returns: EmptyTag token + + """ + yield {"type": "EmptyTag", "name": name, + "namespace": namespace, "data": attrs} if hasChildren: yield self.error("Void element has children") def startTag(self, namespace, name, attrs): - assert namespace is None or isinstance(namespace, string_types), type(namespace) - assert isinstance(name, string_types), type(name) - assert all((namespace is None or isinstance(namespace, string_types)) and - isinstance(name, string_types) and - isinstance(value, string_types) - for (namespace, name), value in attrs.items()) + """Generates a StartTag token + :arg namespace: the namespace of the token--can be ``None`` + + :arg name: the name of the element + + :arg attrs: the attributes of the element as a dict + + :returns: StartTag token + + """ return {"type": "StartTag", - "name": text_type(name), - "namespace": to_text(namespace), - "data": dict(((to_text(namespace, False), to_text(name)), - to_text(value, False)) - for (namespace, name), value in attrs.items())} + "name": name, + "namespace": namespace, + "data": attrs} def endTag(self, namespace, name): - assert namespace is None or isinstance(namespace, string_types), type(namespace) - assert isinstance(name, string_types), type(namespace) + """Generates an EndTag token + :arg namespace: the namespace of the token--can be ``None`` + + :arg name: the name of the element + + :returns: EndTag token + + """ return {"type": "EndTag", - "name": to_text(name, False), - "namespace": to_text(namespace), - "data": {}} + "name": name, + "namespace": namespace} def text(self, data): - assert isinstance(data, string_types), type(data) + """Generates SpaceCharacters and Characters tokens - data = to_text(data) + Depending on what's in the data, this generates one or more + ``SpaceCharacters`` and ``Characters`` tokens. + + For example: + + >>> from html5lib.treewalkers.base import TreeWalker + >>> # Give it an empty tree just so it instantiates + >>> walker = TreeWalker([]) + >>> list(walker.text('')) + [] + >>> list(walker.text(' ')) + [{u'data': ' ', u'type': u'SpaceCharacters'}] + >>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE + [{u'data': ' ', u'type': u'SpaceCharacters'}, + {u'data': u'abc', u'type': u'Characters'}, + {u'data': u' ', u'type': u'SpaceCharacters'}] + + :arg data: the text data + + :returns: one or more ``SpaceCharacters`` and ``Characters`` tokens + + """ + data = data middle = data.lstrip(spaceCharacters) left = data[:len(data) - len(middle)] if left: @@ -101,27 +136,44 @@ class TreeWalker(object): yield {"type": "SpaceCharacters", "data": right} def comment(self, data): - assert isinstance(data, string_types), type(data) + """Generates a Comment token - return {"type": "Comment", "data": text_type(data)} + :arg data: the comment - def doctype(self, name, publicId=None, systemId=None, correct=True): - assert is_text_or_none(name), type(name) - assert is_text_or_none(publicId), type(publicId) - assert is_text_or_none(systemId), type(systemId) + :returns: Comment token + """ + return {"type": "Comment", "data": data} + + def doctype(self, name, publicId=None, systemId=None): + """Generates a Doctype token + + :arg name: + + :arg publicId: + + :arg systemId: + + :returns: the Doctype token + + """ return {"type": "Doctype", - "name": to_text(name), - "publicId": to_text(publicId), - "systemId": to_text(systemId), - "correct": to_text(correct)} + "name": name, + "publicId": publicId, + "systemId": systemId} def entity(self, name): - assert isinstance(name, string_types), type(name) + """Generates an Entity token - return {"type": "Entity", "name": text_type(name)} + :arg name: the entity name + + :returns: an Entity token + + """ + return {"type": "Entity", "name": name} def unknown(self, nodeType): + """Handles unknown node types""" return self.error("Unknown node type: " + nodeType) @@ -154,7 +206,7 @@ class NonRecursiveTreeWalker(TreeWalker): elif type == ELEMENT: namespace, name, attributes, hasChildren = details - if name in voidElements: + if (not namespace or namespace == namespaces["html"]) and name in voidElements: for token in self.emptyTag(namespace, name, attributes, hasChildren): yield token @@ -187,7 +239,7 @@ class NonRecursiveTreeWalker(TreeWalker): type, details = details[0], details[1:] if type == ELEMENT: namespace, name, attributes, hasChildren = details - if name not in voidElements: + if (namespace and namespace != namespaces["html"]) or name not in voidElements: yield self.endTag(namespace, name) if self.tree is currentNode: currentNode = None diff --git a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/dom.py b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/dom.py index ac4dcf3..b0c89b0 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/dom.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/dom.py @@ -2,16 +2,16 @@ from __future__ import absolute_import, division, unicode_literals from xml.dom import Node -from . import _base +from . import base -class TreeWalker(_base.NonRecursiveTreeWalker): +class TreeWalker(base.NonRecursiveTreeWalker): def getNodeDetails(self, node): if node.nodeType == Node.DOCUMENT_TYPE_NODE: - return _base.DOCTYPE, node.name, node.publicId, node.systemId + return base.DOCTYPE, node.name, node.publicId, node.systemId elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): - return _base.TEXT, node.nodeValue + return base.TEXT, node.nodeValue elif node.nodeType == Node.ELEMENT_NODE: attrs = {} @@ -21,17 +21,17 @@ class TreeWalker(_base.NonRecursiveTreeWalker): attrs[(attr.namespaceURI, attr.localName)] = attr.value else: attrs[(None, attr.name)] = attr.value - return (_base.ELEMENT, node.namespaceURI, node.nodeName, + return (base.ELEMENT, node.namespaceURI, node.nodeName, attrs, node.hasChildNodes()) elif node.nodeType == Node.COMMENT_NODE: - return _base.COMMENT, node.nodeValue + return base.COMMENT, node.nodeValue elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): - return (_base.DOCUMENT,) + return (base.DOCUMENT,) else: - return _base.UNKNOWN, node.nodeType + return base.UNKNOWN, node.nodeType def getFirstChild(self, node): return node.firstChild diff --git a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/etree.py b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/etree.py index 69840c2..d15a7ee 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/etree.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/etree.py @@ -1,19 +1,12 @@ from __future__ import absolute_import, division, unicode_literals -try: - from collections import OrderedDict -except ImportError: - try: - from ordereddict import OrderedDict - except ImportError: - OrderedDict = dict - +from collections import OrderedDict import re from six import string_types -from . import _base -from ..utils import moduleFactoryFactory +from . import base +from .._utils import moduleFactoryFactory tag_regexp = re.compile("{([^}]*)}(.*)") @@ -22,7 +15,7 @@ def getETreeBuilder(ElementTreeImplementation): ElementTree = ElementTreeImplementation ElementTreeCommentType = ElementTree.Comment("asd").tag - class TreeWalker(_base.NonRecursiveTreeWalker): + class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable """Given the particular ElementTree representation, this implementation, to avoid using recursion, returns "nodes" as tuples with the following content: @@ -38,9 +31,9 @@ def getETreeBuilder(ElementTreeImplementation): """ def getNodeDetails(self, node): if isinstance(node, tuple): # It might be the root Element - elt, key, parents, flag = node + elt, _, _, flag = node if flag in ("text", "tail"): - return _base.TEXT, getattr(elt, flag) + return base.TEXT, getattr(elt, flag) else: node = elt @@ -48,14 +41,14 @@ def getETreeBuilder(ElementTreeImplementation): node = node.getroot() if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): - return (_base.DOCUMENT,) + return (base.DOCUMENT,) elif node.tag == "": - return (_base.DOCTYPE, node.text, + return (base.DOCTYPE, node.text, node.get("publicId"), node.get("systemId")) elif node.tag == ElementTreeCommentType: - return _base.COMMENT, node.text + return base.COMMENT, node.text else: assert isinstance(node.tag, string_types), type(node.tag) @@ -73,7 +66,7 @@ def getETreeBuilder(ElementTreeImplementation): attrs[(match.group(1), match.group(2))] = value else: attrs[(None, name)] = value - return (_base.ELEMENT, namespace, tag, + return (base.ELEMENT, namespace, tag, attrs, len(node) or node.text) def getFirstChild(self, node): @@ -129,6 +122,7 @@ def getETreeBuilder(ElementTreeImplementation): if not parents: return parent else: + assert list(parents[-1]).count(parent) == 1 return parent, list(parents[-1]).index(parent), parents, None return locals() diff --git a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/lxmletree.py b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/etree_lxml.py similarity index 77% rename from Shared/lib/python3.4/site-packages/html5lib/treewalkers/lxmletree.py rename to Shared/lib/python3.4/site-packages/html5lib/treewalkers/etree_lxml.py index 90e116d..fb23631 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/lxmletree.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/etree_lxml.py @@ -4,9 +4,9 @@ from six import text_type from lxml import etree from ..treebuilders.etree import tag_regexp -from . import _base +from . import base -from .. import ihatexml +from .. import _ihatexml def ensure_str(s): @@ -15,20 +15,27 @@ def ensure_str(s): elif isinstance(s, text_type): return s else: - return s.decode("utf-8", "strict") + return s.decode("ascii", "strict") class Root(object): def __init__(self, et): self.elementtree = et self.children = [] - if et.docinfo.internalDTD: - self.children.append(Doctype(self, - ensure_str(et.docinfo.root_name), - ensure_str(et.docinfo.public_id), - ensure_str(et.docinfo.system_url))) - root = et.getroot() - node = root + + try: + if et.docinfo.internalDTD: + self.children.append(Doctype(self, + ensure_str(et.docinfo.root_name), + ensure_str(et.docinfo.public_id), + ensure_str(et.docinfo.system_url))) + except AttributeError: + pass + + try: + node = et.getroot() + except AttributeError: + node = et while node.getprevious() is not None: node = node.getprevious() @@ -115,35 +122,38 @@ class FragmentWrapper(object): return len(self.obj) -class TreeWalker(_base.NonRecursiveTreeWalker): +class TreeWalker(base.NonRecursiveTreeWalker): def __init__(self, tree): - if hasattr(tree, "getroot"): - tree = Root(tree) - elif isinstance(tree, list): + # pylint:disable=redefined-variable-type + if isinstance(tree, list): + self.fragmentChildren = set(tree) tree = FragmentRoot(tree) - _base.NonRecursiveTreeWalker.__init__(self, tree) - self.filter = ihatexml.InfosetFilter() + else: + self.fragmentChildren = set() + tree = Root(tree) + base.NonRecursiveTreeWalker.__init__(self, tree) + self.filter = _ihatexml.InfosetFilter() def getNodeDetails(self, node): if isinstance(node, tuple): # Text node node, key = node assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - return _base.TEXT, ensure_str(getattr(node, key)) + return base.TEXT, ensure_str(getattr(node, key)) elif isinstance(node, Root): - return (_base.DOCUMENT,) + return (base.DOCUMENT,) elif isinstance(node, Doctype): - return _base.DOCTYPE, node.name, node.public_id, node.system_id + return base.DOCTYPE, node.name, node.public_id, node.system_id elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"): - return _base.TEXT, node.obj + return base.TEXT, ensure_str(node.obj) elif node.tag == etree.Comment: - return _base.COMMENT, ensure_str(node.text) + return base.COMMENT, ensure_str(node.text) elif node.tag == etree.Entity: - return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &; + return base.ENTITY, ensure_str(node.text)[1:-1] # strip &; else: # This is assumed to be an ordinary element @@ -162,7 +172,7 @@ class TreeWalker(_base.NonRecursiveTreeWalker): attrs[(match.group(1), match.group(2))] = value else: attrs[(None, name)] = value - return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag), + return (base.ELEMENT, namespace, self.filter.fromXmlName(tag), attrs, len(node) > 0 or node.text) def getFirstChild(self, node): @@ -197,5 +207,7 @@ class TreeWalker(_base.NonRecursiveTreeWalker): if key == "text": return node # else: fallback to "normal" processing + elif node in self.fragmentChildren: + return None return node.getparent() diff --git a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/genshistream.py b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/genshi.py similarity index 90% rename from Shared/lib/python3.4/site-packages/html5lib/treewalkers/genshistream.py rename to Shared/lib/python3.4/site-packages/html5lib/treewalkers/genshi.py index f559c45..7483be2 100644 --- a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/genshistream.py +++ b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/genshi.py @@ -4,12 +4,12 @@ from genshi.core import QName from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT -from . import _base +from . import base from ..constants import voidElements, namespaces -class TreeWalker(_base.TreeWalker): +class TreeWalker(base.TreeWalker): def __iter__(self): # Buffer the events so we can pass in the following one previous = None @@ -25,7 +25,7 @@ class TreeWalker(_base.TreeWalker): yield token def tokens(self, event, next): - kind, data, pos = event + kind, data, _ = event if kind == START: tag, attribs = data name = tag.localname @@ -39,8 +39,8 @@ class TreeWalker(_base.TreeWalker): if namespace == namespaces["html"] and name in voidElements: for token in self.emptyTag(namespace, name, converted_attribs, - not next or next[0] != END - or next[1] != tag): + not next or next[0] != END or + next[1] != tag): yield token else: yield self.startTag(namespace, name, converted_attribs) @@ -48,7 +48,7 @@ class TreeWalker(_base.TreeWalker): elif kind == END: name = data.localname namespace = data.namespace - if name not in voidElements: + if namespace != namespaces["html"] or name not in voidElements: yield self.endTag(namespace, name) elif kind == COMMENT: diff --git a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/pulldom.py b/Shared/lib/python3.4/site-packages/html5lib/treewalkers/pulldom.py deleted file mode 100644 index 0b0f515..0000000 --- a/Shared/lib/python3.4/site-packages/html5lib/treewalkers/pulldom.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals - -from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \ - COMMENT, IGNORABLE_WHITESPACE, CHARACTERS - -from . import _base - -from ..constants import voidElements - - -class TreeWalker(_base.TreeWalker): - def __iter__(self): - ignore_until = None - previous = None - for event in self.tree: - if previous is not None and \ - (ignore_until is None or previous[1] is ignore_until): - if previous[1] is ignore_until: - ignore_until = None - for token in self.tokens(previous, event): - yield token - if token["type"] == "EmptyTag": - ignore_until = previous[1] - previous = event - if ignore_until is None or previous[1] is ignore_until: - for token in self.tokens(previous, None): - yield token - elif ignore_until is not None: - raise ValueError("Illformed DOM event stream: void element without END_ELEMENT") - - def tokens(self, event, next): - type, node = event - if type == START_ELEMENT: - name = node.nodeName - namespace = node.namespaceURI - attrs = {} - for attr in list(node.attributes.keys()): - attr = node.getAttributeNode(attr) - attrs[(attr.namespaceURI, attr.localName)] = attr.value - if name in voidElements: - for token in self.emptyTag(namespace, - name, - attrs, - not next or next[1] is not node): - yield token - else: - yield self.startTag(namespace, name, attrs) - - elif type == END_ELEMENT: - name = node.nodeName - namespace = node.namespaceURI - if name not in voidElements: - yield self.endTag(namespace, name) - - elif type == COMMENT: - yield self.comment(node.nodeValue) - - elif type in (IGNORABLE_WHITESPACE, CHARACTERS): - for token in self.text(node.nodeValue): - yield token - - else: - yield self.unknown(type) diff --git a/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/PKG-INFO new file mode 100644 index 0000000..98485e8 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/PKG-INFO @@ -0,0 +1,83 @@ +Metadata-Version: 1.0 +Name: ifaddr +Version: 0.1.6 +Summary: Enumerates all IP addresses on all network adapters of the system. +Home-page: https://github.com/pydron/ifaddr +Author: Stefan C. Mueller +Author-email: scm@smurn.org +License: UNKNOWN +Description: + + ifaddr - Enumerate IP addresses on the local network adapters + ============================================================= + + `ifaddr` is a small Python library that allows you to find all the + IP addresses of the computer. It is tested on **Linux**, **OS X**, and + **Windows**. Other BSD derivatives like **OpenBSD**, **FreeBSD**, and + **NetBSD** should work too, but I haven't personally tested those. + + This library is open source and released under the MIT License. + + You can install it with `pip install ifaddr`. It doesn't need to + compile anything, so there shouldn't be any surprises. Even on Windows. + + ---------------------- + Let's get going! + ---------------------- + + .. code-block:: python + + import ifaddr + + adapters = ifaddr.get_adapters() + + for adapter in adapters: + print "IPs of network adapter " + adapter.nice_name + for ip in adapter.ips: + print " %s/%s" % (ip.ip, ip.network_prefix) + + This will print: + + .. code-block:: python + + IPs of network adapter H5321 gw Mobile Broadband Driver + IP ('fe80::9:ebdf:30ab:39a3', 0L, 17L)/64 + IP 169.254.57.163/16 + IPs of network adapter Intel(R) Centrino(R) Advanced-N 6205 + IP ('fe80::481f:3c9d:c3f6:93f8', 0L, 12L)/64 + IP 192.168.0.51/24 + IPs of network adapter Intel(R) 82579LM Gigabit Network Connection + IP ('fe80::85cd:e07e:4f7a:6aa6', 0L, 11L)/64 + IP 192.168.0.53/24 + IPs of network adapter Software Loopback Interface 1 + IP ('::1', 0L, 0L)/128 + IP 127.0.0.1/8 + + You get both IPv4 and IPv6 addresses. The later complete with + flowinfo and scope_id. + + ------------- + Documentation + ------------- + + The complete documentation (there isn't much to document) can be found here: + `ifaddr Documentation `_. + + ----------------------------------- + Bug Reports and other contributions + ----------------------------------- + + This project is hosted here `ifaddr github page `_. + + ------------ + Alternatives + ------------ + + Alastair Houghton develops `netifaces `_ + which can do everything this library can, and more. The only drawback is that it needs + to be compiled, which can make the installation difficult. + + + + +Platform: UNKNOWN diff --git a/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/SOURCES.txt new file mode 100644 index 0000000..a110a35 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/SOURCES.txt @@ -0,0 +1,13 @@ +MANIFEST.in +README.rst +setup.cfg +setup.py +ifaddr/__init__.py +ifaddr/_posix.py +ifaddr/_shared.py +ifaddr/_win32.py +ifaddr/test_ifaddr.py +ifaddr.egg-info/PKG-INFO +ifaddr.egg-info/SOURCES.txt +ifaddr.egg-info/dependency_links.txt +ifaddr.egg-info/top_level.txt \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/zip-safe b/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/dependency_links.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/zip-safe rename to Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/dependency_links.txt diff --git a/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/installed-files.txt new file mode 100644 index 0000000..bab581a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/installed-files.txt @@ -0,0 +1,14 @@ +../ifaddr/__init__.py +../ifaddr/__pycache__/__init__.cpython-37.pyc +../ifaddr/__pycache__/_posix.cpython-37.pyc +../ifaddr/__pycache__/_shared.cpython-37.pyc +../ifaddr/__pycache__/_win32.cpython-37.pyc +../ifaddr/__pycache__/test_ifaddr.cpython-37.pyc +../ifaddr/_posix.py +../ifaddr/_shared.py +../ifaddr/_win32.py +../ifaddr/test_ifaddr.py +PKG-INFO +SOURCES.txt +dependency_links.txt +top_level.txt diff --git a/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/top_level.txt b/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/top_level.txt new file mode 100644 index 0000000..1c16621 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ifaddr-0.1.6.egg-info/top_level.txt @@ -0,0 +1 @@ +ifaddr diff --git a/Shared/lib/python3.4/site-packages/ifaddr/__init__.py b/Shared/lib/python3.4/site-packages/ifaddr/__init__.py new file mode 100644 index 0000000..76418a8 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ifaddr/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2014 Stefan C. Mueller + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +import os + +from ifaddr._shared import Adapter, IP + +if os.name == "nt": + from ifaddr._win32 import get_adapters +elif os.name == "posix": + from ifaddr._posix import get_adapters +else: + raise RuntimeError("Unsupported Operating System: %s" % os.name) + +__all__ = ['Adapter', 'IP', 'get_adapters'] diff --git a/Shared/lib/python3.4/site-packages/ifaddr/_posix.py b/Shared/lib/python3.4/site-packages/ifaddr/_posix.py new file mode 100644 index 0000000..7b86254 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ifaddr/_posix.py @@ -0,0 +1,85 @@ +# Copyright (c) 2014 Stefan C. Mueller + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +import sys +import os +import ctypes.util +import ipaddress +import collections + +import ifaddr._shared as shared +#from ifaddr._shared import sockaddr, Interface, sockaddr_to_ip, ipv6_prefixlength + +class ifaddrs(ctypes.Structure): + pass +ifaddrs._fields_ = [('ifa_next', ctypes.POINTER(ifaddrs)), + ('ifa_name', ctypes.c_char_p), + ('ifa_flags', ctypes.c_uint), + ('ifa_addr', ctypes.POINTER(shared.sockaddr)), + ('ifa_netmask', ctypes.POINTER(shared.sockaddr))] + +libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True) + +def get_adapters(): + + addr0 = addr = ctypes.POINTER(ifaddrs)() + retval = libc.getifaddrs(ctypes.byref(addr)) + if retval != 0: + eno = ctypes.get_errno() + raise OSError(eno, os.strerror(eno)) + + ips = collections.OrderedDict() + + def add_ip(adapter_name, ip): + if not adapter_name in ips: + ips[adapter_name] = shared.Adapter(adapter_name, adapter_name, []) + ips[adapter_name].ips.append(ip) + + + while addr: + name = addr[0].ifa_name + if sys.version_info[0] > 2: + name = name.decode(encoding='UTF-8') + ip = shared.sockaddr_to_ip(addr[0].ifa_addr) + if ip: + if addr[0].ifa_netmask and not addr[0].ifa_netmask[0].sa_familiy: + addr[0].ifa_netmask[0].sa_familiy = addr[0].ifa_addr[0].sa_familiy + netmask = shared.sockaddr_to_ip(addr[0].ifa_netmask) + if isinstance(netmask, tuple): + netmask = netmask[0] + if sys.version_info[0] > 2: + netmaskStr = str(netmask) + else: + netmaskStr = unicode(netmask) + prefixlen = shared.ipv6_prefixlength(ipaddress.IPv6Address(netmaskStr)) + else: + if sys.version_info[0] > 2: + netmaskStr = str('0.0.0.0/' + netmask) + else: + netmaskStr = unicode('0.0.0.0/' + netmask) + prefixlen = ipaddress.IPv4Network(netmaskStr).prefixlen + ip = shared.IP(ip, prefixlen, name) + add_ip(name, ip) + addr = addr[0].ifa_next + + libc.freeifaddrs(addr0) + + return ips.values() diff --git a/Shared/lib/python3.4/site-packages/ifaddr/_shared.py b/Shared/lib/python3.4/site-packages/ifaddr/_shared.py new file mode 100644 index 0000000..9090d7c --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ifaddr/_shared.py @@ -0,0 +1,182 @@ +# Copyright (c) 2014 Stefan C. Mueller + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +import ctypes +import socket +import ipaddress +import platform + +class Adapter(object): + """ + Represents a network interface device controller (NIC), such as a + network card. An adapter can have multiple IPs. + + On Linux aliasing (multiple IPs per physical NIC) is implemented + by creating 'virtual' adapters, each represented by an instance + of this class. Each of those 'virtual' adapters can have both + a IPv4 and an IPv6 IP address. + """ + + def __init__(self, name, nice_name, ips): + + #: Unique name that identifies the adapter in the system. + #: On Linux this is of the form of `eth0` or `eth0:1`, on + #: Windows it is a UUID in string representation, such as + #: `{846EE342-7039-11DE-9D20-806E6F6E6963}`. + self.name = name + + #: Human readable name of the adpater. On Linux this + #: is currently the same as :attr:`name`. On Windows + #: this is the name of the device. + self.nice_name = nice_name + + #: List of :class:`ifaddr.IP` instances in the order they were + #: reported by the system. + self.ips = ips + + def __repr__(self): + return "Adapter(name={name}, nice_name={nice_name}, ips={ips})".format( + name = repr(self.name), + nice_name = repr(self.nice_name), + ips = repr(self.ips) + ) + + +class IP(object): + """ + Represents an IP address of an adapter. + """ + + def __init__(self, ip, network_prefix, nice_name): + + #: IP address. For IPv4 addresses this is a string in + #: "xxx.xxx.xxx.xxx" format. For IPv6 addresses this + #: is a three-tuple `(ip, flowinfo, scope_id)`, where + #: `ip` is a string in the usual collon separated + #: hex format. + self.ip = ip + + #: Number of bits of the IP that represent the + #: network. For a `255.255.255.0` netmask, this + #: number would be `24`. + self.network_prefix = network_prefix + + #: Human readable name for this IP. + #: On Linux is this currently the same as the adapter name. + #: On Windows this is the name of the network connection + #: as configured in the system control panel. + self.nice_name = nice_name + + @property + def is_IPv4(self): + """ + Returns `True` if this IP is an IPv4 address and `False` + if it is an IPv6 address. + """ + return not isinstance(self.ip, tuple) + + @property + def is_IPv6(self): + """ + Returns `True` if this IP is an IPv6 address and `False` + if it is an IPv4 address. + """ + return isinstance(self.ip, tuple) + + + def __repr__(self): + return "IP(ip={ip}, network_prefix={network_prefix}, nice_name={nice_name})".format( + ip = repr(self.ip), + network_prefix = repr(self.network_prefix), + nice_name = repr(self.nice_name) + ) + + +if platform.system() == "Darwin" or "BSD" in platform.system(): + + # BSD derived systems use marginally different structures + # than either Linux or Windows. + # I still keep it in `shared` since we can use + # both structures equally. + + class sockaddr(ctypes.Structure): + _fields_= [('sa_len', ctypes.c_uint8), + ('sa_familiy', ctypes.c_uint8), + ('sa_data', ctypes.c_uint8 * 14)] + + class sockaddr_in(ctypes.Structure): + _fields_= [('sa_len', ctypes.c_uint8), + ('sa_familiy', ctypes.c_uint8), + ('sin_port', ctypes.c_uint16), + ('sin_addr', ctypes.c_uint8 * 4), + ('sin_zero', ctypes.c_uint8 * 8)] + + class sockaddr_in6(ctypes.Structure): + _fields_= [('sa_len', ctypes.c_uint8), + ('sa_familiy', ctypes.c_uint8), + ('sin6_port', ctypes.c_uint16), + ('sin6_flowinfo', ctypes.c_uint32), + ('sin6_addr', ctypes.c_uint8 * 16), + ('sin6_scope_id', ctypes.c_uint32)] + +else: + + class sockaddr(ctypes.Structure): + _fields_= [('sa_familiy', ctypes.c_uint16), + ('sa_data', ctypes.c_uint8 * 14)] + + class sockaddr_in(ctypes.Structure): + _fields_= [('sin_familiy', ctypes.c_uint16), + ('sin_port', ctypes.c_uint16), + ('sin_addr', ctypes.c_uint8 * 4), + ('sin_zero', ctypes.c_uint8 * 8)] + + class sockaddr_in6(ctypes.Structure): + _fields_= [('sin6_familiy', ctypes.c_uint16), + ('sin6_port', ctypes.c_uint16), + ('sin6_flowinfo', ctypes.c_uint32), + ('sin6_addr', ctypes.c_uint8 * 16), + ('sin6_scope_id', ctypes.c_uint32)] + + +def sockaddr_to_ip(sockaddr_ptr): + if sockaddr_ptr: + if sockaddr_ptr[0].sa_familiy == socket.AF_INET: + ipv4 = ctypes.cast(sockaddr_ptr, ctypes.POINTER(sockaddr_in)) + ippacked = bytes(bytearray(ipv4[0].sin_addr)) + ip = str(ipaddress.ip_address(ippacked)) + return ip + elif sockaddr_ptr[0].sa_familiy == socket.AF_INET6: + ipv6 = ctypes.cast(sockaddr_ptr, ctypes.POINTER(sockaddr_in6)) + flowinfo = ipv6[0].sin6_flowinfo + ippacked = bytes(bytearray(ipv6[0].sin6_addr)) + ip = str(ipaddress.ip_address(ippacked)) + scope_id = ipv6[0].sin6_scope_id + return(ip, flowinfo, scope_id) + return None + + +def ipv6_prefixlength(address): + prefix_length = 0 + for i in range(address.max_prefixlen): + if int(address) >> i & 1: + prefix_length = prefix_length + 1 + return prefix_length diff --git a/Shared/lib/python3.4/site-packages/ifaddr/_win32.py b/Shared/lib/python3.4/site-packages/ifaddr/_win32.py new file mode 100644 index 0000000..eb1a501 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ifaddr/_win32.py @@ -0,0 +1,129 @@ +# Copyright (c) 2014 Stefan C. Mueller + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +import ctypes +from ctypes import wintypes + +import ifaddr._shared as shared + +NO_ERROR=0 +ERROR_BUFFER_OVERFLOW = 111 +MAX_ADAPTER_NAME_LENGTH = 256 +MAX_ADAPTER_DESCRIPTION_LENGTH = 128 +MAX_ADAPTER_ADDRESS_LENGTH = 8 +AF_UNSPEC = 0 + + + +class SOCKET_ADDRESS(ctypes.Structure): + _fields_ = [('lpSockaddr', ctypes.POINTER(shared.sockaddr)), + ('iSockaddrLength', wintypes.INT)] + +class IP_ADAPTER_UNICAST_ADDRESS(ctypes.Structure): + pass +IP_ADAPTER_UNICAST_ADDRESS._fields_ = \ + [('Length', wintypes.ULONG), + ('Flags', wintypes.DWORD), + ('Next', ctypes.POINTER(IP_ADAPTER_UNICAST_ADDRESS)), + ('Address', SOCKET_ADDRESS), + ('PrefixOrigin', ctypes.c_uint), + ('SuffixOrigin', ctypes.c_uint), + ('DadState', ctypes.c_uint), + ('ValidLifetime', wintypes.ULONG), + ('PreferredLifetime', wintypes.ULONG), + ('LeaseLifetime', wintypes.ULONG), + ('OnLinkPrefixLength', ctypes.c_uint8), + ] + +class IP_ADAPTER_ADDRESSES(ctypes.Structure): + pass +IP_ADAPTER_ADDRESSES._fields_ = [('Length', wintypes.ULONG), + ('IfIndex', wintypes.DWORD), + ('Next', ctypes.POINTER(IP_ADAPTER_ADDRESSES)), + ('AdapterName', ctypes.c_char_p), + ('FirstUnicastAddress', ctypes.POINTER(IP_ADAPTER_UNICAST_ADDRESS)), + ('FirstAnycastAddress', ctypes.POINTER(None)), + ('FirstMulticastAddress', ctypes.POINTER(None)), + ('FirstDnsServerAddress', ctypes.POINTER(None)), + ('DnsSuffix', ctypes.c_wchar_p), + ('Description', ctypes.c_wchar_p), + ('FriendlyName', ctypes.c_wchar_p) + ] + + +iphlpapi = ctypes.windll.LoadLibrary("Iphlpapi") + + +def enumerate_interfaces_of_adapter(nice_name, address): + + # Iterate through linked list and fill list + addresses = [] + while True: + addresses.append(address) + if not address.Next: + break + address = address.Next[0] + + for address in addresses: + ip = shared.sockaddr_to_ip(address.Address.lpSockaddr) + network_prefix = address.OnLinkPrefixLength + yield shared.IP(ip, network_prefix, nice_name) + + +def get_adapters(): + + # Call GetAdaptersAddresses() with error and buffer size handling + + addressbuffersize = wintypes.ULONG(15*1024) + retval = ERROR_BUFFER_OVERFLOW + while retval == ERROR_BUFFER_OVERFLOW: + addressbuffer = ctypes.create_string_buffer(addressbuffersize.value) + retval = iphlpapi.GetAdaptersAddresses(wintypes.ULONG(AF_UNSPEC), + wintypes.ULONG(0), + None, + ctypes.byref(addressbuffer), + ctypes.byref(addressbuffersize)) + if retval != NO_ERROR: + raise ctypes.WinError() + + # Iterate through adapters fill array + address_infos = [] + address_info = IP_ADAPTER_ADDRESSES.from_buffer(addressbuffer) + while True: + address_infos.append(address_info) + if not address_info.Next: + break + address_info = address_info.Next[0] + + + # Iterate through unicast addresses + result = [] + for adapter_info in address_infos: + + name = adapter_info.AdapterName + nice_name = adapter_info.Description + + if adapter_info.FirstUnicastAddress: + ips = enumerate_interfaces_of_adapter(adapter_info.FriendlyName, adapter_info.FirstUnicastAddress[0]) + ips = list(ips) + result.append(shared.Adapter(name, nice_name, ips)) + + return result diff --git a/Shared/lib/python3.4/site-packages/ifaddr/test_ifaddr.py b/Shared/lib/python3.4/site-packages/ifaddr/test_ifaddr.py new file mode 100644 index 0000000..a5d4427 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ifaddr/test_ifaddr.py @@ -0,0 +1,24 @@ +# Copyright (C) 2015 Stefan C. Mueller + +import unittest +import ifaddr + +class TestIfaddr(unittest.TestCase): + """ + Unittests for :mod:`ifaddr`. + + There isn't much unit-testing that can be done without making assumptions + on the system or mocking of operating system APIs. So this just contains + a sanity check for the moment. + """ + + def test_get_adapters_contains_localhost(self): + + found = False + adapters = ifaddr.get_adapters() + for adapter in adapters: + for ip in adapter.ips: + if ip.ip == "127.0.0.1": + found = True + + self.assertTrue(found, "No adapter has IP 127.0.0.1: %s" % str(adapters)) \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/PKG-INFO similarity index 80% rename from Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/PKG-INFO rename to Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/PKG-INFO index 05a31b9..48f8d9c 100644 --- a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/PKG-INFO +++ b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/PKG-INFO @@ -1,12 +1,11 @@ Metadata-Version: 1.1 Name: ox -Version: 2.3.b-786- +Version: 2.3.895 Summary: python-ox - the web in a dict -Home-page: https://wiki.0x2620.org/wiki/python-ox +Home-page: https://code.0x2620.org/0x2620/python-ox Author: 0x2620 Author-email: 0x2620@0x2620.org License: GPLv3 -Download-URL: https://code.0x2620.org/python-ox/download Description: UNKNOWN Platform: UNKNOWN Classifier: Operating System :: OS Independent @@ -15,4 +14,5 @@ Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/SOURCES.txt similarity index 95% rename from Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/SOURCES.txt rename to Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/SOURCES.txt index 0eabe7d..00ee661 100644 --- a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/SOURCES.txt +++ b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/SOURCES.txt @@ -1,4 +1,4 @@ -README +README.md ox/__init__.py ox/__version.py ox/api.py @@ -53,7 +53,6 @@ ox/web/itunes.py ox/web/lookupbyisbn.py ox/web/lyricsfly.py ox/web/metacritic.py -ox/web/mininova.py ox/web/movieposterdb.py ox/web/opensubtitles.py ox/web/oxdb.py @@ -63,7 +62,6 @@ ox/web/siteparser.py ox/web/spiegel.py ox/web/startpage.py ox/web/thepiratebay.py -ox/web/torrent.py ox/web/tv.py ox/web/twitter.py ox/web/ubu.py diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/installed-files.txt new file mode 100644 index 0000000..a279f68 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/installed-files.txt @@ -0,0 +1,133 @@ +../ox/__init__.py +../ox/__pycache__/__init__.cpython-37.pyc +../ox/__pycache__/__version.cpython-37.pyc +../ox/__pycache__/api.cpython-37.pyc +../ox/__pycache__/cache.cpython-37.pyc +../ox/__pycache__/file.cpython-37.pyc +../ox/__pycache__/fixunicode.cpython-37.pyc +../ox/__pycache__/form.cpython-37.pyc +../ox/__pycache__/format.cpython-37.pyc +../ox/__pycache__/geo.cpython-37.pyc +../ox/__pycache__/html.cpython-37.pyc +../ox/__pycache__/image.cpython-37.pyc +../ox/__pycache__/iso.cpython-37.pyc +../ox/__pycache__/js.cpython-37.pyc +../ox/__pycache__/jsonc.cpython-37.pyc +../ox/__pycache__/location.cpython-37.pyc +../ox/__pycache__/movie.cpython-37.pyc +../ox/__pycache__/net.cpython-37.pyc +../ox/__pycache__/normalize.cpython-37.pyc +../ox/__pycache__/oembed.cpython-37.pyc +../ox/__pycache__/srt.cpython-37.pyc +../ox/__pycache__/text.cpython-37.pyc +../ox/__pycache__/utils.cpython-37.pyc +../ox/__pycache__/vtt.cpython-37.pyc +../ox/__version.py +../ox/api.py +../ox/cache.py +../ox/file.py +../ox/fixunicode.py +../ox/form.py +../ox/format.py +../ox/geo.py +../ox/html.py +../ox/image.py +../ox/iso.py +../ox/js.py +../ox/jsonc.py +../ox/location.py +../ox/movie.py +../ox/net.py +../ox/normalize.py +../ox/oembed.py +../ox/srt.py +../ox/text.py +../ox/torrent/__init__.py +../ox/torrent/__pycache__/__init__.cpython-37.pyc +../ox/torrent/__pycache__/bencode.cpython-37.pyc +../ox/torrent/__pycache__/bencode3.cpython-37.pyc +../ox/torrent/__pycache__/makemetafile.cpython-37.pyc +../ox/torrent/bencode.py +../ox/torrent/bencode3.py +../ox/torrent/makemetafile.py +../ox/utils.py +../ox/vtt.py +../ox/web/__init__.py +../ox/web/__pycache__/__init__.cpython-37.pyc +../ox/web/__pycache__/abebooks.cpython-37.pyc +../ox/web/__pycache__/allmovie.cpython-37.pyc +../ox/web/__pycache__/amazon.cpython-37.pyc +../ox/web/__pycache__/apple.cpython-37.pyc +../ox/web/__pycache__/archive.cpython-37.pyc +../ox/web/__pycache__/arsenalberlin.cpython-37.pyc +../ox/web/__pycache__/auth.cpython-37.pyc +../ox/web/__pycache__/criterion.cpython-37.pyc +../ox/web/__pycache__/dailymotion.cpython-37.pyc +../ox/web/__pycache__/duckduckgo.cpython-37.pyc +../ox/web/__pycache__/epguides.cpython-37.pyc +../ox/web/__pycache__/filmsdivision.cpython-37.pyc +../ox/web/__pycache__/flixter.cpython-37.pyc +../ox/web/__pycache__/freebase.cpython-37.pyc +../ox/web/__pycache__/google.cpython-37.pyc +../ox/web/__pycache__/imdb.cpython-37.pyc +../ox/web/__pycache__/impawards.cpython-37.pyc +../ox/web/__pycache__/itunes.cpython-37.pyc +../ox/web/__pycache__/lookupbyisbn.cpython-37.pyc +../ox/web/__pycache__/lyricsfly.cpython-37.pyc +../ox/web/__pycache__/metacritic.cpython-37.pyc +../ox/web/__pycache__/movieposterdb.cpython-37.pyc +../ox/web/__pycache__/opensubtitles.cpython-37.pyc +../ox/web/__pycache__/oxdb.cpython-37.pyc +../ox/web/__pycache__/piratecinema.cpython-37.pyc +../ox/web/__pycache__/rottentomatoes.cpython-37.pyc +../ox/web/__pycache__/siteparser.cpython-37.pyc +../ox/web/__pycache__/spiegel.cpython-37.pyc +../ox/web/__pycache__/startpage.cpython-37.pyc +../ox/web/__pycache__/thepiratebay.cpython-37.pyc +../ox/web/__pycache__/tv.cpython-37.pyc +../ox/web/__pycache__/twitter.cpython-37.pyc +../ox/web/__pycache__/ubu.cpython-37.pyc +../ox/web/__pycache__/vimeo.cpython-37.pyc +../ox/web/__pycache__/wikipedia.cpython-37.pyc +../ox/web/__pycache__/youtube.cpython-37.pyc +../ox/web/abebooks.py +../ox/web/allmovie.py +../ox/web/amazon.py +../ox/web/apple.py +../ox/web/archive.py +../ox/web/arsenalberlin.py +../ox/web/auth.py +../ox/web/criterion.py +../ox/web/dailymotion.py +../ox/web/duckduckgo.py +../ox/web/epguides.py +../ox/web/filmsdivision.py +../ox/web/flixter.py +../ox/web/freebase.py +../ox/web/google.py +../ox/web/imdb.py +../ox/web/impawards.py +../ox/web/itunes.py +../ox/web/lookupbyisbn.py +../ox/web/lyricsfly.py +../ox/web/metacritic.py +../ox/web/movieposterdb.py +../ox/web/opensubtitles.py +../ox/web/oxdb.py +../ox/web/piratecinema.py +../ox/web/rottentomatoes.py +../ox/web/siteparser.py +../ox/web/spiegel.py +../ox/web/startpage.py +../ox/web/thepiratebay.py +../ox/web/tv.py +../ox/web/twitter.py +../ox/web/ubu.py +../ox/web/vimeo.py +../ox/web/wikipedia.py +../ox/web/youtube.py +PKG-INFO +SOURCES.txt +dependency_links.txt +requires.txt +top_level.txt diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/requires.txt b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/requires.txt similarity index 63% rename from Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/requires.txt rename to Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/requires.txt index 95a4bf3..b7509ec 100644 --- a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/requires.txt +++ b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/requires.txt @@ -1,3 +1,2 @@ -six>=1.5.2 chardet -feedparser +six>=1.5.2 diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/top_level.txt b/Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/top_level.txt rename to Shared/lib/python3.4/site-packages/ox-2.3.895.egg-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/installed-files.txt deleted file mode 100644 index 634e938..0000000 --- a/Shared/lib/python3.4/site-packages/ox-2.3.b_786_-py3.4.egg-info/installed-files.txt +++ /dev/null @@ -1,137 +0,0 @@ -../ox/__init__.py -../ox/api.py -../ox/cache.py -../ox/file.py -../ox/fixunicode.py -../ox/form.py -../ox/format.py -../ox/geo.py -../ox/html.py -../ox/image.py -../ox/iso.py -../ox/js.py -../ox/jsonc.py -../ox/location.py -../ox/movie.py -../ox/net.py -../ox/normalize.py -../ox/oembed.py -../ox/srt.py -../ox/text.py -../ox/utils.py -../ox/vtt.py -../ox/__version.py -../ox/torrent/__init__.py -../ox/torrent/bencode.py -../ox/torrent/bencode3.py -../ox/torrent/makemetafile.py -../ox/web/__init__.py -../ox/web/abebooks.py -../ox/web/allmovie.py -../ox/web/amazon.py -../ox/web/apple.py -../ox/web/archive.py -../ox/web/arsenalberlin.py -../ox/web/auth.py -../ox/web/criterion.py -../ox/web/dailymotion.py -../ox/web/duckduckgo.py -../ox/web/epguides.py -../ox/web/filmsdivision.py -../ox/web/flixter.py -../ox/web/freebase.py -../ox/web/google.py -../ox/web/imdb.py -../ox/web/impawards.py -../ox/web/itunes.py -../ox/web/lookupbyisbn.py -../ox/web/lyricsfly.py -../ox/web/metacritic.py -../ox/web/mininova.py -../ox/web/movieposterdb.py -../ox/web/opensubtitles.py -../ox/web/oxdb.py -../ox/web/piratecinema.py -../ox/web/rottentomatoes.py -../ox/web/siteparser.py -../ox/web/spiegel.py -../ox/web/startpage.py -../ox/web/thepiratebay.py -../ox/web/torrent.py -../ox/web/tv.py -../ox/web/twitter.py -../ox/web/ubu.py -../ox/web/vimeo.py -../ox/web/wikipedia.py -../ox/web/youtube.py -../ox/__pycache__/__init__.cpython-34.pyc -../ox/__pycache__/api.cpython-34.pyc -../ox/__pycache__/cache.cpython-34.pyc -../ox/__pycache__/file.cpython-34.pyc -../ox/__pycache__/fixunicode.cpython-34.pyc -../ox/__pycache__/form.cpython-34.pyc -../ox/__pycache__/format.cpython-34.pyc -../ox/__pycache__/geo.cpython-34.pyc -../ox/__pycache__/html.cpython-34.pyc -../ox/__pycache__/image.cpython-34.pyc -../ox/__pycache__/iso.cpython-34.pyc -../ox/__pycache__/js.cpython-34.pyc -../ox/__pycache__/jsonc.cpython-34.pyc -../ox/__pycache__/location.cpython-34.pyc -../ox/__pycache__/movie.cpython-34.pyc -../ox/__pycache__/net.cpython-34.pyc -../ox/__pycache__/normalize.cpython-34.pyc -../ox/__pycache__/oembed.cpython-34.pyc -../ox/__pycache__/srt.cpython-34.pyc -../ox/__pycache__/text.cpython-34.pyc -../ox/__pycache__/utils.cpython-34.pyc -../ox/__pycache__/vtt.cpython-34.pyc -../ox/__pycache__/__version.cpython-34.pyc -../ox/torrent/__pycache__/__init__.cpython-34.pyc -../ox/torrent/__pycache__/bencode.cpython-34.pyc -../ox/torrent/__pycache__/bencode3.cpython-34.pyc -../ox/torrent/__pycache__/makemetafile.cpython-34.pyc -../ox/web/__pycache__/__init__.cpython-34.pyc -../ox/web/__pycache__/abebooks.cpython-34.pyc -../ox/web/__pycache__/allmovie.cpython-34.pyc -../ox/web/__pycache__/amazon.cpython-34.pyc -../ox/web/__pycache__/apple.cpython-34.pyc -../ox/web/__pycache__/archive.cpython-34.pyc -../ox/web/__pycache__/arsenalberlin.cpython-34.pyc -../ox/web/__pycache__/auth.cpython-34.pyc -../ox/web/__pycache__/criterion.cpython-34.pyc -../ox/web/__pycache__/dailymotion.cpython-34.pyc -../ox/web/__pycache__/duckduckgo.cpython-34.pyc -../ox/web/__pycache__/epguides.cpython-34.pyc -../ox/web/__pycache__/filmsdivision.cpython-34.pyc -../ox/web/__pycache__/flixter.cpython-34.pyc -../ox/web/__pycache__/freebase.cpython-34.pyc -../ox/web/__pycache__/google.cpython-34.pyc -../ox/web/__pycache__/imdb.cpython-34.pyc -../ox/web/__pycache__/impawards.cpython-34.pyc -../ox/web/__pycache__/itunes.cpython-34.pyc -../ox/web/__pycache__/lookupbyisbn.cpython-34.pyc -../ox/web/__pycache__/lyricsfly.cpython-34.pyc -../ox/web/__pycache__/metacritic.cpython-34.pyc -../ox/web/__pycache__/mininova.cpython-34.pyc -../ox/web/__pycache__/movieposterdb.cpython-34.pyc -../ox/web/__pycache__/opensubtitles.cpython-34.pyc -../ox/web/__pycache__/oxdb.cpython-34.pyc -../ox/web/__pycache__/piratecinema.cpython-34.pyc -../ox/web/__pycache__/rottentomatoes.cpython-34.pyc -../ox/web/__pycache__/siteparser.cpython-34.pyc -../ox/web/__pycache__/spiegel.cpython-34.pyc -../ox/web/__pycache__/startpage.cpython-34.pyc -../ox/web/__pycache__/thepiratebay.cpython-34.pyc -../ox/web/__pycache__/torrent.cpython-34.pyc -../ox/web/__pycache__/tv.cpython-34.pyc -../ox/web/__pycache__/twitter.cpython-34.pyc -../ox/web/__pycache__/ubu.cpython-34.pyc -../ox/web/__pycache__/vimeo.cpython-34.pyc -../ox/web/__pycache__/wikipedia.cpython-34.pyc -../ox/web/__pycache__/youtube.cpython-34.pyc -dependency_links.txt -PKG-INFO -top_level.txt -requires.txt -SOURCES.txt diff --git a/Shared/lib/python3.4/site-packages/ox/__version.py b/Shared/lib/python3.4/site-packages/ox/__version.py index 7e67bfd..0203b01 100644 --- a/Shared/lib/python3.4/site-packages/ox/__version.py +++ b/Shared/lib/python3.4/site-packages/ox/__version.py @@ -1 +1 @@ -VERSION="2.3.b'786'" \ No newline at end of file +VERSION="2.3.895" \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/ox/api.py b/Shared/lib/python3.4/site-packages/ox/api.py index 9717ff2..3b75e97 100644 --- a/Shared/lib/python3.4/site-packages/ox/api.py +++ b/Shared/lib/python3.4/site-packages/ox/api.py @@ -1,13 +1,18 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 # GPL 2011 -from __future__ import with_statement +from __future__ import print_function +from types import MethodType +import gzip +import os +import shutil +import sys +import time from six.moves import http_cookiejar as cookielib -import gzip from six import BytesIO, PY2 from six.moves import urllib -from types import MethodType +from six.moves.urllib.parse import urlparse from . import __version__ from .utils import json @@ -15,6 +20,8 @@ from .form import MultiPartForm __all__ = ['getAPI', 'API'] +CHUNK_SIZE = 1024*1024*5 + def getAPI(url, cj=None): return API(url, cj) @@ -101,7 +108,7 @@ class API(object): result = result.decode('utf-8') result = json.loads(result) except: - result = {'status':{}} + result = {'status': {}} result['status']['code'] = e.code result['status']['text'] = str(e) return result @@ -123,3 +130,112 @@ class API(object): form.add_field('data', json.dumps(data)) return self._json_request(self.url, form) + def save_url(self, url, filename, overwrite=False): + chunk_size = 16 * 1024 + if not os.path.exists(filename) or overwrite: + dirname = os.path.dirname(filename) + if dirname and not os.path.exists(dirname): + os.makedirs(dirname) + request = urllib.request.Request(url, method='GET') + tmpname = filename + '.tmp' + with open(tmpname, 'wb') as fd: + u = self._opener.open(request) + for chunk in iter(lambda: u.read(chunk_size), b''): + fd.write(chunk) + shutil.move(tmpname, filename) + + def upload_chunks(self, url, filename, data=None): + form = MultiPartForm() + if data: + for key in data: + form.add_field(key, data[key]) + data = self._json_request(url, form) + + def full_url(path): + if path.startswith('/'): + u = urlparse(url) + path = '%s://%s%s' % (u.scheme, u.netloc, path) + return path + + if 'uploadUrl' in data: + uploadUrl = full_url(data['uploadUrl']) + f = open(filename, 'rb') + fsize = os.stat(filename).st_size + done = 0 + if 'offset' in data and data['offset'] < fsize: + done = data['offset'] + f.seek(done) + resume_offset = done + else: + resume_offset = 0 + chunk = f.read(CHUNK_SIZE) + fname = os.path.basename(filename) + if not isinstance(fname, bytes): + fname = fname.encode('utf-8') + while chunk: + form = MultiPartForm() + form.add_file('chunk', fname, chunk) + if len(chunk) < CHUNK_SIZE or f.tell() == fsize: + form.add_field('done', '1') + form.add_field('offset', str(done)) + try: + data = self._json_request(uploadUrl, form) + except KeyboardInterrupt: + print("\ninterrupted by user.") + sys.exit(1) + except: + print("uploading chunk failed, will try again in 5 seconds\r", end='') + sys.stdout.flush() + data = {'result': -1} + time.sleep(5) + if data and 'status' in data: + if data['status']['code'] == 403: + print("login required") + return False + if data['status']['code'] != 200: + print("request returned error, will try again in 5 seconds") + if DEBUG: + print(data) + time.sleep(5) + if data and data.get('result') == 1: + done += len(chunk) + if data.get('offset') not in (None, done): + print('server offset out of sync, continue from', data['offset']) + done = data['offset'] + f.seek(done) + chunk = f.read(CHUNK_SIZE) + if data and 'result' in data and data.get('result') == 1: + return data.get('id', True) + else: + return False + return False + +def signin(url): + import sys + from getpass import getpass + from .web import auth + + if not url.startswith('http'): + site = url + url = 'https://%s/api/' % url + else: + site = url.split('/')[2] + api = API(url) + update = False + try: + credentials = auth.get(site) + except: + credentials = {} + print('Please provide your username and password for %s:' % site) + credentials['username'] = input('Username: ') + credentials['password'] = getpass('Password: ') + update = True + r = api.signin(**credentials) + if 'errors' in r.get('data', {}): + for kv in r['data']['errors'].items(): + print('%s: %s' % kv) + sys.exit(1) + if update: + auth.update(site, credentials) + return api + diff --git a/Shared/lib/python3.4/site-packages/ox/cache.py b/Shared/lib/python3.4/site-packages/ox/cache.py index b8264de..c475322 100644 --- a/Shared/lib/python3.4/site-packages/ox/cache.py +++ b/Shared/lib/python3.4/site-packages/ox/cache.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 # GPL 2011 -from __future__ import with_statement, print_function +from __future__ import print_function import gzip -import zlib import hashlib import os -from six import BytesIO +import sqlite3 import time +import zlib + +from six import BytesIO from six.moves import urllib from six import PY2 -import sqlite3 +try: + import requests + USE_REQUESTS = True +except: + USE_REQUESTS = False from .utils import json from .file import makedirs @@ -19,12 +25,14 @@ from .file import makedirs from . import net from .net import DEFAULT_HEADERS, detect_encoding -cache_timeout = 30*24*60*60 # default is 30 days + +cache_timeout = 30*24*60*60 # default is 30 days COMPRESS_TYPES = ( 'text/html', 'text/plain', 'text/xml', + 'text/x-wiki', 'application/json', 'application/xhtml+xml', 'application/x-javascript', @@ -33,7 +41,7 @@ COMPRESS_TYPES = ( 'application/rss+xml' ) -def status(url, data=None, headers=DEFAULT_HEADERS, timeout=cache_timeout): +def status(url, data=None, headers=None, timeout=cache_timeout): ''' >>> status('http://google.com') 200 @@ -43,7 +51,7 @@ def status(url, data=None, headers=DEFAULT_HEADERS, timeout=cache_timeout): headers = get_headers(url, data, headers) return int(headers['status']) -def exists(url, data=None, headers=DEFAULT_HEADERS, timeout=cache_timeout): +def exists(url, data=None, headers=None, timeout=cache_timeout): ''' >>> exists('http://google.com') True @@ -55,14 +63,14 @@ def exists(url, data=None, headers=DEFAULT_HEADERS, timeout=cache_timeout): return True return False -def get_headers(url, data=None, headers=DEFAULT_HEADERS, timeout=cache_timeout): +def get_headers(url, data=None, headers=None, timeout=cache_timeout): url_headers = store.get(url, data, headers, timeout, "headers") if not url_headers: url_headers = net.get_headers(url, data, headers) store.set(url, data, -1, url_headers) return url_headers -def get_json(url, data=None, headers=DEFAULT_HEADERS, timeout=cache_timeout): +def get_json(url, data=None, headers=None, timeout=cache_timeout): return json.loads(read_url(url, data, headers, timeout).decode('utf-8')) class InvalidResult(Exception): @@ -76,7 +84,7 @@ def _fix_unicode_url(url): url = url.encode('utf-8') return url -def read_url(url, data=None, headers=DEFAULT_HEADERS, timeout=cache_timeout, valid=None, unicode=False): +def read_url(url, data=None, headers=None, timeout=cache_timeout, valid=None, unicode=False): ''' url - url to load data - possible post data @@ -87,24 +95,35 @@ def read_url(url, data=None, headers=DEFAULT_HEADERS, timeout=cache_timeout, val ''' if net.DEBUG: print('ox.cache.read_url', url) - #FIXME: send last-modified / etag from cache and only update if needed - #url = _fix_unicode_url(url) + # FIXME: send last-modified / etag from cache and only update if needed + # url = _fix_unicode_url(url) result = store.get(url, data, headers, timeout) url_headers = {} if not result: - try: - url_headers, result = net.read_url(url, data, headers, return_headers=True) - except urllib.error.HTTPError as e: - e.headers['Status'] = "%s" % e.code - for key in e.headers: - url_headers[key.lower()] = e.headers[key] - result = e.read() - if url_headers.get('content-encoding', None) == 'gzip': - result = gzip.GzipFile(fileobj=BytesIO(result)).read() - if not valid or valid(result, url_headers): - store.set(url, post_data=data, data=result, headers=url_headers) + if USE_REQUESTS: + r = requests.get(url, headers=headers) + for key in r.headers: + url_headers[key.lower()] = r.headers[key] + result = r.content + url_headers['Status'] = "%s" % r.status_code + if not valid or valid(result, url_headers): + store.set(url, post_data=data, data=result, headers=url_headers) + else: + raise InvalidResult(result, url_headers) else: - raise InvalidResult(result, url_headers) + try: + url_headers, result = net.read_url(url, data, headers, return_headers=True) + except urllib.error.HTTPError as e: + e.headers['Status'] = "%s" % e.code + for key in e.headers: + url_headers[key.lower()] = e.headers[key] + result = e.read() + if url_headers.get('content-encoding', None) == 'gzip': + result = gzip.GzipFile(fileobj=BytesIO(result)).read() + if not valid or valid(result, url_headers): + store.set(url, post_data=data, data=result, headers=url_headers) + else: + raise InvalidResult(result, url_headers) if unicode: ctype = url_headers.get('content-type', '').lower() if 'charset' in ctype: @@ -116,13 +135,13 @@ def read_url(url, data=None, headers=DEFAULT_HEADERS, timeout=cache_timeout, val result = result.decode(encoding) return result -get_url=read_url +get_url = read_url def save_url(url, filename, overwrite=False): if not os.path.exists(filename) or overwrite: dirname = os.path.dirname(filename) if dirname and not os.path.exists(dirname): - os.makedirs(dirname) + makedirs(dirname) data = read_url(url) with open(filename, 'wb') as f: f.write(data) @@ -134,7 +153,7 @@ class Cache: def __init__(self): pass - def get(self, url, data, headers=DEFAULT_HEADERS, timeout=-1, value="data"): + def get(self, url, data, headers=None, timeout=-1, value="data"): ''' if value == 'data' return data of url if its in the cache else None if value == 'headers' return headers for url @@ -159,7 +178,7 @@ class SQLiteCache(Cache): def __init__(self): path = cache_path() if not os.path.exists(path): - os.makedirs(path) + makedirs(path) self.db = os.path.join(path, "cache.sqlite") self.create() @@ -192,7 +211,7 @@ class SQLiteCache(Cache): def set_setting(self, c, key, value): c.execute(u'INSERT OR REPLACE INTO setting values (?, ?)', (key, str(value))) - def get(self, url, data={}, headers=DEFAULT_HEADERS, timeout=-1, value="data"): + def get(self, url, data={}, headers=None, timeout=-1, value="data"): r = None if timeout == 0: return r @@ -225,7 +244,7 @@ class SQLiteCache(Cache): conn.close() return r - def delete(self, url, data=None, headers=DEFAULT_HEADERS): + def delete(self, url, data=None, headers=None): url_hash = self.get_url_hash(url, data) conn = self.connect() c = conn.cursor() @@ -244,7 +263,8 @@ class SQLiteCache(Cache): c = conn.cursor() # Insert a row of data - if not post_data: post_data="" + if not post_data: + post_data = "" only_headers = 0 if data == -1: only_headers = 1 @@ -280,11 +300,11 @@ class FileCache(Cache): def files(self, domain, h): prefix = os.path.join(self.root, domain, h[:2], h[2:4], h[4:6], h[6:8]) - i = os.path.join(prefix, '%s.json'%h) - f = os.path.join(prefix, '%s.dat'%h) + i = os.path.join(prefix, '%s.json' % h) + f = os.path.join(prefix, '%s.dat' % h) return prefix, i, f - def get(self, url, data={}, headers=DEFAULT_HEADERS, timeout=-1, value="data"): + def get(self, url, data={}, headers=None, timeout=-1, value="data"): r = None if timeout == 0: return r @@ -308,13 +328,13 @@ class FileCache(Cache): if value == 'headers': r = info['headers'] else: - with open(f, 'rb') as data: - r = data.read() + with open(f, 'rb') as fd: + r = fd.read() if info['compressed']: r = zlib.decompress(r) return r - def delete(self, url, data=None, headers=DEFAULT_HEADERS): + def delete(self, url, data=None, headers=None): url_hash = self.get_url_hash(url, data) domain = self.get_domain(url) @@ -344,15 +364,104 @@ class FileCache(Cache): if not info['only_headers']: if info['compressed']: data = zlib.compress(data) - elif not isinstance(data, str): + elif not isinstance(data, bytes): data = data.encode('utf-8') with open(f, 'wb') as _f: _f.write(data) - with open(i, 'wb') as _i: + with open(i, 'w') as _i: json.dump(info, _i) + +class KVCache(Cache): + _bytes_only = False + + def _keys(self, url, data, headers=None): + url_hash = self.get_url_hash(url, data) + domain = self.get_domain(url) + key = 'ox:%s:%s' % (domain, url_hash) + return key, key + ':data' + + def get(self, url, data={}, headers=None, timeout=-1, value="data"): + if timeout == 0: + return None + + r = None + info_key, data_key = self._keys(url, data, headers) + info = self.backend.get(info_key) + if info: + if self._bytes_only: + info = json.loads(info.decode()) + now = time.mktime(time.localtime()) + expired = now-timeout + + if value != 'headers' and info['only_headers']: + return None + if timeout < 0 or info['created'] > expired: + if value == 'headers': + r = info['headers'] + else: + r = self.backend.get(data_key) + if r and info['compressed']: + r = zlib.decompress(r) + return r + + def delete(self, url, data=None, headers=None): + for key in self._keys(url, data, headers): + self.backend.delete(key) + + def set(self, url, post_data, data, headers): + info_key, data_key = self._keys(url, post_data, headers) + + created = time.mktime(time.localtime()) + content_type = headers.get('content-type', '').split(';')[0].strip() + + info = { + 'compressed': content_type in COMPRESS_TYPES, + 'only_headers': data == -1, + 'created': created, + 'headers': headers, + 'url': url, + } + if post_data: + info['post_data'] = post_data + if not info['only_headers']: + if info['compressed']: + data = zlib.compress(data) + elif not isinstance(data, bytes): + data = data.encode('utf-8') + self.backend.set(data_key, data) + if self._bytes_only: + info = json.dumps(info, ensure_ascii=False).encode('utf-8') + self.backend.set(info_key, info) + + +class MemCache(KVCache): + _bytes_only = False + + def __init__(self): + import pylibmc + + f, self.host = cache_path().split(':', 1) + self.backend = pylibmc.Client([self.host]) + self.backend.behaviors['connect_timeout'] = 60000 + + +class RedisCache(KVCache): + _bytes_only = True + + def __init__(self): + import redis + + f, self.url = cache_path().split(':', 1) + self.backend = redis.from_url(self.url) + + if cache_path().startswith('fs:'): store = FileCache() +elif cache_path().startswith('redis:'): + store = RedisCache() +elif cache_path().startswith('memcache:'): + store = MemCache() else: store = SQLiteCache() diff --git a/Shared/lib/python3.4/site-packages/ox/file.py b/Shared/lib/python3.4/site-packages/ox/file.py index 1791061..ab789a3 100644 --- a/Shared/lib/python3.4/site-packages/ox/file.py +++ b/Shared/lib/python3.4/site-packages/ox/file.py @@ -1,36 +1,37 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 # GPL 2008 -from __future__ import division, with_statement, print_function -import os +from __future__ import division, print_function +from distutils.spawn import find_executable +from glob import glob import hashlib +import os import re import shutil +import sqlite3 import struct import subprocess -import sqlite3 -from distutils.spawn import find_executable from .utils import json -__all__ = ['sha1sum', 'oshash', 'avinfo', 'makedirs'] +__all__ = ['sha1sum', 'oshash', 'avinfo', 'makedirs', 'iexists'] EXTENSIONS = { 'audio': [ - 'aac', 'aif', 'aiff', - 'flac', 'm4a', 'mp3', 'oga', 'ogg', 'wav', 'wma' + 'aac', 'aif', 'aiff', 'amr', + 'flac', 'm4a', 'mp3', 'oga', 'ogg', 'wav', 'wma', 'opus' ], 'image': [ 'bmp', 'gif', 'jpeg', 'jpg', 'png', 'svg', 'webp' ], 'subtitle': [ - 'idx', 'srt', 'sub' + 'idx', 'srt', 'sub', 'vtt' ], 'video': [ '3gp', - 'avi', 'divx', 'dv', 'flv', 'm2t', 'm4v', 'mkv', 'mov', 'mp4', - 'mpeg', 'mpg', 'mts', 'ogm', 'ogv', 'rm', 'rmvb', 'vob', 'webm', 'wmv', - 'mod', 'tod', # http://en.wikipedia.org/wiki/MOD_and_TOD + 'avi', 'divx', 'dv', 'flv', 'm2t', 'm2ts', 'm4v', 'mkv', 'mov', 'mp4', + 'mpeg', 'mpg', 'mts', 'ogm', 'ogv', 'rm', 'rmvb', 'vob', 'webm', 'wmv', 'asf', + 'mod', 'tod', # http://en.wikipedia.org/wiki/MOD_and_TOD 'mxf', 'ts' ], } @@ -131,25 +132,25 @@ def oshash(filename, cached=True): if filesize < 65536: for x in range(int(filesize/bytesize)): buffer = f.read(bytesize) - (l_value,)= struct.unpack(longlongformat, buffer) + (l_value,) = struct.unpack(longlongformat, buffer) hash += l_value - hash = hash & 0xFFFFFFFFFFFFFFFF #to remain as 64bit number + hash = hash & 0xFFFFFFFFFFFFFFFF # to remain as 64bit number else: for x in range(int(65536/bytesize)): buffer = f.read(bytesize) - (l_value,)= struct.unpack(longlongformat, buffer) + (l_value,) = struct.unpack(longlongformat, buffer) hash += l_value - hash = hash & 0xFFFFFFFFFFFFFFFF #to remain as 64bit number - f.seek(max(0,filesize-65536),0) + hash = hash & 0xFFFFFFFFFFFFFFFF # to remain as 64bit number + f.seek(max(0, filesize-65536), 0) for x in range(int(65536/bytesize)): buffer = f.read(bytesize) - (l_value,)= struct.unpack(longlongformat, buffer) + (l_value,) = struct.unpack(longlongformat, buffer) hash += l_value hash = hash & 0xFFFFFFFFFFFFFFFF f.close() - returnedhash = "%016x" % hash + returnedhash = "%016x" % hash return returnedhash - except(IOError): + except IOError: return "IOError" def avinfo(filename, cached=True): @@ -160,23 +161,25 @@ def avinfo(filename, cached=True): return ffprobe(filename) ffmpeg2theora = cmd('ffmpeg2theora') p = subprocess.Popen([ffmpeg2theora], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - info, error = p.communicate() - version = info.split('\n')[0].split(' - ')[0].split(' ')[-1] + stdout, error = p.communicate() + stdout = stdout.decode('utf-8') + version = stdout.split('\n')[0].split(' - ')[0].split(' ')[-1] if version < '0.27': raise EnvironmentError('version of ffmpeg2theora needs to be 0.27 or later, found %s' % version) p = subprocess.Popen([ffmpeg2theora, '--info', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - info, error = p.communicate() + stdout, error = p.communicate() + stdout = stdout.decode('utf-8') try: - info = json.loads(info) + info = json.loads(stdout) except: - #remove metadata, can be broken + # remove metadata, can be broken reg = re.compile('"metadata": {.*?},', re.DOTALL) - info = re.sub(reg, '', info) - info = json.loads(info) + stdout = re.sub(reg, '', stdout) + info = json.loads(stdout) if 'video' in info: for v in info['video']: - if not 'display_aspect_ratio' in v and 'width' in v: + if 'display_aspect_ratio' not in v and 'width' in v: v['display_aspect_ratio'] = '%d:%d' % (v['width'], v['height']) v['pixel_aspect_ratio'] = '1:1' if len(info.get('audio', [])) > 1: @@ -189,12 +192,14 @@ def avinfo(filename, cached=True): ffmpeg = cmd('ffmpeg') p = subprocess.Popen([ffmpeg, '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() + stderr = stderr.decode('utf-8') languages = [re.compile('\((.+?)\):').findall(l) for l in stderr.split('\n') if 'Stream' in l and 'Audio' in l] if len(languages) == len(info['audio']): for i, stream in enumerate(info['audio']): language = languages[i] if language and language[0] != 'und': stream['language'] = language[0] + fix_coverart(info) return info return {'path': filename, 'size': 0} @@ -203,6 +208,7 @@ def ffprobe(filename): p = subprocess.Popen([ cmd('ffprobe'), '-show_format', + '-show_chapters', '-show_streams', '-print_format', 'json', @@ -210,6 +216,7 @@ def ffprobe(filename): ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) info, error = p.communicate() + info = info.decode('utf-8') ffinfo = json.loads(info) def fix_value(key, value): @@ -224,7 +231,7 @@ def ffprobe(filename): return value info = {} - if not 'format' in ffinfo: + if 'format' not in ffinfo: info['error'] = 'badfile' else: for key in ('duration', 'size', 'bit_rate'): @@ -235,8 +242,18 @@ def ffprobe(filename): info['audio'] = [] info['video'] = [] info['metadata'] = ffinfo['format'].get('tags', {}) + chapters = ffinfo.get('chapters', []) + if chapters: + info['chapters'] = [ + { + 'in': float(chapter['start_time']), + 'out': float(chapter['end_time']), + 'value': chapter.get('tags', {}).get('title') + } + for chapter in chapters if chapter.get('tags', {}).get('title') + ] for s in ffinfo['streams']: - tags = s.pop('tags', {}) + tags = s.pop('tags', {}) language = None for t in tags: if t == 'language': @@ -278,17 +295,29 @@ def ffprobe(filename): info[s['codec_type']].append(stream) else: pass - #print s + # print s for v in info['video']: + if 'rotate' in info.get('metadata', {}) and int(info['metadata']['rotate']) in (-180, -90, 90, 180): + v['width'], v['height'] = v['height'], v['width'] k = 'display_aspect_ratio' - if not k in v and 'width' in v \ + if k not in v and 'width' in v \ or (k in v and v[k] == '0:1'): v[k] = '%d:%d' % (v['width'], v['height']) v['pixel_aspect_ratio'] = '1:1' info['oshash'] = oshash(filename) info['path'] = filename - if not 'size' in info: + if 'size' not in info: info['size'] = os.path.getsize(filename) + + fix_coverart(info) + return info + +def fix_coverart(info): + if info.get('video') \ + and info['path'].split('.')[-1] in EXTENSIONS['audio'] \ + and info['video'][0]['codec'] in EXTENSIONS['image'] + ['mjpeg']: + info['cover'] = info.pop('video') + info['video'] = [] return info def makedirs(path): @@ -353,3 +382,17 @@ def write_path(file): path = os.path.split(file)[0] if path and not os.path.exists(path): os.makedirs(path) + +def iexists(path): + parts = path.split(os.sep) + name = parts[-1].lower() + if len(parts) == 1: + folder = '.' + else: + folder = os.path.dirname(path) + try: + files = os.listdir(folder) + except FileNotFoundError: + return False + files = {os.path.basename(f).lower() for f in files} + return name in files diff --git a/Shared/lib/python3.4/site-packages/ox/fixunicode.py b/Shared/lib/python3.4/site-packages/ox/fixunicode.py index b649a58..d3a162d 100644 --- a/Shared/lib/python3.4/site-packages/ox/fixunicode.py +++ b/Shared/lib/python3.4/site-packages/ox/fixunicode.py @@ -6,7 +6,7 @@ from __future__ import print_function import unicodedata -from six import unichr, PY2 +from six import unichr, text_type __all__ = ['fix_bad_unicode'] @@ -151,10 +151,7 @@ def text_badness(text): - Improbable single-byte characters, such as ƒ or ¬ - Letters in somewhat rare scripts ''' - if PY2: - assert isinstance(text, unicode) - else: - assert isinstance(text, str) + assert isinstance(text, text_type) errors = 0 very_weird_things = 0 weird_things = 0 diff --git a/Shared/lib/python3.4/site-packages/ox/form.py b/Shared/lib/python3.4/site-packages/ox/form.py index d9fe66d..faa1551 100644 --- a/Shared/lib/python3.4/site-packages/ox/form.py +++ b/Shared/lib/python3.4/site-packages/ox/form.py @@ -1,11 +1,12 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 # GPL 2014 -from __future__ import with_statement, print_function +from __future__ import print_function import itertools import mimetypes -import random +import os +import hashlib import sys from six import PY2 @@ -20,8 +21,7 @@ _fmt = '%%0%dd' % _width def _make_boundary(): # Craft a random boundary. - token = random.randrange(sys.maxsize) - boundary = ('=' * 15) + (_fmt % token) + '==' + boundary = ('=' * 15) + hashlib.sha1(os.urandom(32)).hexdigest() + '==' return boundary class MultiPartForm(object): @@ -75,7 +75,7 @@ class MultiPartForm(object): # line is separated by '\r\n'. parts = [] part_boundary = '--' + self.boundary - + # Add the form fields parts.extend( [ part_boundary, @@ -85,7 +85,7 @@ class MultiPartForm(object): ] for name, value in self.form_fields ) - + # Add the files to upload parts.extend( [ part_boundary, @@ -97,7 +97,7 @@ class MultiPartForm(object): ] for field_name, filename, content_type, body in self.files ) - + # Flatten the list and add closing boundary marker, # then return CR+LF separated data flattened = list(itertools.chain(*parts)) diff --git a/Shared/lib/python3.4/site-packages/ox/format.py b/Shared/lib/python3.4/site-packages/ox/format.py index aafd89c..ad18c31 100644 --- a/Shared/lib/python3.4/site-packages/ox/format.py +++ b/Shared/lib/python3.4/site-packages/ox/format.py @@ -4,13 +4,14 @@ import math import re import string +from six import text_type def toAZ(num): """ Converts an integer to bijective base 26 string using A-Z >>> for i in range(1, 1000): assert fromAZ(toAZ(i)) == i - + >>> toAZ(1) 'A' @@ -20,7 +21,8 @@ def toAZ(num): >>> toAZ(1234567890) 'CYWOQVJ' """ - if num < 1: raise ValueError("must supply a positive integer") + if num < 1: + raise ValueError("must supply a positive integer") digits = string.ascii_uppercase az = '' while num != 0: @@ -30,7 +32,7 @@ def toAZ(num): az = digits[r] + az return az -encode_base26=toAZ +encode_base26 = toAZ def fromAZ(num): """ @@ -45,7 +47,7 @@ def fromAZ(num): >>> fromAZ('FOO') 4461 """ - num = num.replace('-','') + num = num.replace('-', '') digits = string.ascii_uppercase r = 0 for exp, char in enumerate(reversed(num)): @@ -64,7 +66,8 @@ def to26(q): >>> to26(347485647) 'BDGKMAP' """ - if q < 0: raise ValueError("must supply a positive integer") + if q < 0: + raise ValueError("must supply a positive integer") base26 = string.ascii_uppercase converted = [] while q != 0: @@ -73,7 +76,7 @@ def to26(q): converted.insert(0, l) return "".join(converted) or 'A' -decode_base26=toAZ +decode_base26 = toAZ def from26(q): """ @@ -82,7 +85,7 @@ def from26(q): 0 """ base26 = string.ascii_uppercase - q = q.replace('-','') + q = q.replace('-', '') r = 0 for i in q: r = r * 26 + base26.index(i.upper()) @@ -123,7 +126,8 @@ def to32(q): ValueError: must supply a positive integer """ - if q < 0: raise ValueError("must supply a positive integer") + if q < 0: + raise ValueError("must supply a positive integer") letters = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" converted = [] while q != 0: @@ -188,7 +192,7 @@ def from32(q): 'Z': 31, } base32 = ('0123456789' + string.ascii_uppercase)[:32] - q = q.replace('-','') + q = q.replace('-', '') q = ''.join([base32[_32map[i.upper()]] for i in q]) return int(q, 32) @@ -210,7 +214,8 @@ def to36(q): ... ValueError: must supply a positive integer """ - if q < 0: raise ValueError("must supply a positive integer") + if q < 0: + raise ValueError("must supply a positive integer") letters = "0123456789abcdefghijklmnopqrstuvwxyz" converted = [] while q != 0: @@ -233,7 +238,7 @@ def int_value(strValue, default=u''): u'' """ try: - val = re.compile('(\d+)').findall(unicode(strValue).strip())[0] + val = re.compile('(\d+)').findall(text_type(strValue).strip())[0] except: val = default return val @@ -250,7 +255,7 @@ def float_value(strValue, default=u''): u'' """ try: - val = re.compile('([\d.]+)').findall(unicode(strValue).strip())[0] + val = re.compile('([\d.]+)').findall(text_type(strValue).strip())[0] except: val = default return val @@ -286,7 +291,7 @@ def format_number(number, longName, shortName): n = number / math.pow(1024, i + 1) return '%s %s%s' % (format_thousands('%.*f' % (i, n)), prefix[i], shortName) -def format_thousands(number, separator = ','): +def format_thousands(number, separator=','): """ Return the number with separators (1,000,000) @@ -316,18 +321,18 @@ def format_pixels(number): return format_number(number, 'pixel', 'px') def format_currency(amount, currency="$"): - if amount: - temp = "%.2f" % amount - profile=re.compile(r"(\d)(\d\d\d[.,])") - while 1: - temp, count = re.subn(profile,r"\1,\2",temp) - if not count: - break - if temp.startswith('-'): - return "-"+ currency + temp[1:-3] - return currency + temp[:-3] - else: - return "" + if amount: + temp = "%.2f" % amount + profile = re.compile(r"(\d)(\d\d\d[.,])") + while 1: + temp, count = re.subn(profile, r"\1,\2", temp) + if not count: + break + if temp.startswith('-'): + return "-" + currency + temp[1:-3] + return currency + temp[:-3] + else: + return "" def plural(amount, unit, plural='s'): ''' @@ -339,7 +344,8 @@ def plural(amount, unit, plural='s'): if abs(amount) != 1: if plural == 's': unit = unit + plural - else: unit = plural + else: + unit = plural return "%s %s" % (format_thousands(amount), unit) def format_duration(ms, verbosity=0, years=True, hours=True, milliseconds=True): @@ -390,14 +396,14 @@ def format_duration(ms, verbosity=0, years=True, hours=True, milliseconds=True): duration += ".%03d" % ms else: if verbosity == 1: - durations = ["%sd" % d, "%sh" % h, "%sm" % m, "%ss" % s] + durations = ["%sd" % d, "%sh" % h, "%sm" % m, "%ss" % s] if years: durations.insert(0, "%sy" % y) if milliseconds: durations.append("%sms" % ms) else: - durations = [plural(d, 'day'), plural(h,'hour'), - plural(m, 'minute'), plural(s, 'second')] + durations = [plural(d, 'day'), plural(h, 'hour'), + plural(m, 'minute'), plural(s, 'second')] if years: durations.insert(0, plural(y, 'year')) if milliseconds: @@ -434,7 +440,7 @@ def parse_timecode(string): ''' timecode = 0 for i, v in enumerate(list(reversed(string.split(':')))[:4]): - timecode += float(v) * ( 86400 if i == 3 else pow(60, i)) + timecode += float(v) * (86400 if i == 3 else pow(60, i)) return timecode def ms2runtime(ms, shortenLong=False): @@ -482,7 +488,8 @@ def time2ms(timeString): p = timeString.split(':') for i in range(len(p)): _p = p[i] - if _p.endswith('.'): _p =_p[:-1] + if _p.endswith('.'): + _p = _p[:-1] ms = ms * 60 + float(_p) return int(ms * 1000) diff --git a/Shared/lib/python3.4/site-packages/ox/geo.py b/Shared/lib/python3.4/site-packages/ox/geo.py index 56733f6..85c1b72 100644 --- a/Shared/lib/python3.4/site-packages/ox/geo.py +++ b/Shared/lib/python3.4/site-packages/ox/geo.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 # GPL 2008 -from __future__ import with_statement, print_function +from __future__ import print_function import math diff --git a/Shared/lib/python3.4/site-packages/ox/html.py b/Shared/lib/python3.4/site-packages/ox/html.py index 7154e21..bd59ace 100644 --- a/Shared/lib/python3.4/site-packages/ox/html.py +++ b/Shared/lib/python3.4/site-packages/ox/html.py @@ -10,7 +10,7 @@ letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' # Configuration for add_links() function -LEADING_PUNCTUATION = ['(', '<', '<'] +LEADING_PUNCTUATION = ['(', '<', '<'] TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '>', "'", '"'] # list of possible strings used for bullets in bulleted lists @@ -18,16 +18,16 @@ DOTS = ['·', '*', '\xe2\x80\xa2', '•', '•', '•'] unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)') word_split_re = re.compile(r'(\s+)') -punctuation_re = re.compile('^(?P(?:%s)*)(?P.*?)(?P(?:%s)*)$' % \ - ('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]), - '|'.join([re.escape(x) for x in TRAILING_PUNCTUATION]))) +punctuation_re = re.compile('^(?P(?:%s)*)(?P.*?)(?P(?:%s)*)$' % ( + '|'.join([re.escape(x) for x in LEADING_PUNCTUATION]), + '|'.join([re.escape(x) for x in TRAILING_PUNCTUATION]))) simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$') link_target_attribute_re = re.compile(r'(]*?)target=[^\s>]+') html_gunk_re = re.compile(r'(?:
|<\/i>|<\/b>|<\/em>|<\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE) hard_coded_bullets_re = re.compile(r'((?:

(?:%s).*?[a-zA-Z].*?

\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL) trailing_empty_content_re = re.compile(r'(?:

(?: |\s|
)*?

\s*)+\Z') if PY2: - del x # Temporary variable + del x # Temporary variable def escape(html): ''' @@ -44,7 +44,7 @@ def linebreaks(value): ''' Converts newlines into

and
''' - value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines + value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines paras = re.split('\n{2,}', value) paras = ['

%s

' % p.strip().replace('\n', '
') for p in paras] return '\n\n'.join(paras) @@ -83,21 +83,23 @@ def add_links(text, trim_url_limit=None, nofollow=False): If nofollow is True, the URLs in link text will get a rel="nofollow" attribute. """ - trim_url = lambda x, limit=trim_url_limit: limit is not None and (x[:limit] + (len(x) >=limit and '...' or '')) or x + trim_url = lambda x, limit=trim_url_limit: limit is not None and (x[:limit] + (len(x) >= limit and '...' or '')) or x words = word_split_re.split(text) nofollow_attr = nofollow and ' rel="nofollow"' or '' for i, word in enumerate(words): match = punctuation_re.match(word) if match: lead, middle, trail = match.groups() - if middle.startswith('www.') or ('@' not in middle and not middle.startswith('http://') and \ - len(middle) > 0 and middle[0] in letters + string.digits and \ - (middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))): + if middle.startswith('www.') or ('@' not in middle and not middle.startswith('http://') and + len(middle) > 0 and middle[0] in letters + string.digits and + (middle.endswith('.org') or + middle.endswith('.net') or + middle.endswith('.com'))): middle = '
%s' % (middle, nofollow_attr, trim_url(middle)) if middle.startswith('http://') or middle.startswith('https://'): middle = '%s' % (middle, nofollow_attr, trim_url(middle)) - if '@' in middle and not middle.startswith('www.') and not ':' in middle \ - and simple_email_re.match(middle): + if '@' in middle and not middle.startswith('www.') and ':' not in middle \ + and simple_email_re.match(middle): middle = '%s' % (middle, middle) if lead + middle + trail != word: words[i] = lead + middle + trail @@ -127,6 +129,7 @@ def clean_html(text): # Trim stupid HTML such as
. text = html_gunk_re.sub('', text) # Convert hard-coded bullets into HTML unordered lists. + def replace_p_tags(match): s = match.group().replace('

', '') for d in DOTS: @@ -153,6 +156,7 @@ def decode_html(html): if isinstance(html, bytes): html = html.decode('utf-8') uchr = unichr + def entitydecode(match, uchr=uchr): entity = match.group(1) if entity == '#x80': @@ -282,7 +286,7 @@ def sanitize_html(html, tags=None, global_attributes=[]): {'name': 'thead'}, {'name': 'tr'}, # other - {'name': '[]'}, + {'name': '[]'}, { 'name': 'a', 'required': ['href'], @@ -328,15 +332,14 @@ def sanitize_html(html, tags=None, global_attributes=[]): for tag in tags: valid_attributes[tag['name']] = tag.get('required', []) \ - + tag.get('optional', []) \ - + global_attributes + + tag.get('optional', []) + global_attributes required_attributes[tag['name']] = tag.get('required', []) validation[tag['name']] = tag.get('validation', {}) if '[]' in validation: html = re.sub( re.compile('\[((https?:\/\/|\/).+?) (.+?)\]', re.IGNORECASE), - '\\3', html); + '\\3', html) parts = split_tags(html) for i, part in enumerate(parts): @@ -351,17 +354,17 @@ def sanitize_html(html, tags=None, global_attributes=[]): a = attr_re.findall(attributes) attrs = dict(a) - if not closing and not name in non_closing_tags: + if not closing and name not in non_closing_tags: level += 1 - if not attrs and attributes or name not in valid_tags: + if not attrs and attributes or name not in valid_tags: valid = False else: valid = True for key in set(attrs) - set(valid_attributes[name]): del attrs[key] for key in required_attributes[tag['name']]: - if not key in attrs: + if key not in attrs: valid = False if valid: @@ -395,6 +398,7 @@ def sanitize_html(html, tags=None, global_attributes=[]): def split_tags(string): tags = [] + def collect(match): tags.append(match.group(0)) return '\0' diff --git a/Shared/lib/python3.4/site-packages/ox/image.py b/Shared/lib/python3.4/site-packages/ox/image.py index e7c7541..0fad5eb 100644 --- a/Shared/lib/python3.4/site-packages/ox/image.py +++ b/Shared/lib/python3.4/site-packages/ox/image.py @@ -14,12 +14,13 @@ except: import ImageFont -ZONE_INDEX = [] -for pixel_index in range(64): - x, y = pixel_index % 8, int(pixel_index / 8) - ZONE_INDEX.append(int(x / 2) + int(y / 4) * 4) -del x -del y +ZONE_INDEX = [ + (int(x / 2) + int(y / 4) * 4) + for x, y in [ + (pixel_index % 8, int(pixel_index / 8)) + for pixel_index in range(64) + ] +] def drawText(image, position, text, font_file, font_size, color): draw = ImageDraw.Draw(image) @@ -165,8 +166,10 @@ def wrapText(text, max_width, max_lines, font_file, font_size): if width <= max_width and width > min_width: min_width = width return min_width + def get_width(string): return draw.textsize(string, font=font)[0] + image = Image.new('RGB', (1, 1)) draw = ImageDraw.Draw(image) font = ImageFont.truetype(font_file, font_size, encoding='unic') diff --git a/Shared/lib/python3.4/site-packages/ox/iso.py b/Shared/lib/python3.4/site-packages/ox/iso.py index 6c28435..2b63e8c 100644 --- a/Shared/lib/python3.4/site-packages/ox/iso.py +++ b/Shared/lib/python3.4/site-packages/ox/iso.py @@ -208,7 +208,7 @@ def langTo3Code(lang): if lang: lang = langEnglishName(lang) if lang: - lang=lang.lower() + lang = lang.lower() for l in _iso639_languages: if l[0].lower() == lang: return l[3] @@ -218,7 +218,7 @@ def langTo2Code(lang): if lang: lang = langEnglishName(lang) if lang: - lang=lang.lower() + lang = lang.lower() for l in _iso639_languages: if l[0].lower() == lang: return l[2] diff --git a/Shared/lib/python3.4/site-packages/ox/js.py b/Shared/lib/python3.4/site-packages/ox/js.py index 5bc68d5..43b6fb0 100644 --- a/Shared/lib/python3.4/site-packages/ox/js.py +++ b/Shared/lib/python3.4/site-packages/ox/js.py @@ -11,9 +11,9 @@ def minify(source, comment=''): pass # python2 performance with unicode string is terrible if PY2: - if isinstance(source, unicode): + if isinstance(source, unicode): # pylint: disable=undefined-variable source = source.encode('utf-8') - if isinstance(comment, unicode): + if isinstance(comment, unicode): # pylint: disable=undefined-variable comment = comment.encode('utf-8') tokens = tokenize(source) length = len(tokens) @@ -30,20 +30,20 @@ def minify(source, comment=''): # numbers or strings or unary operators or grouping operators # with a single newline, otherwise remove it if prevToken and nextToken\ - and (prevToken['type'] in ['identifier', 'number', 'string']\ - or prevToken['value'] in ['++', '--', ')', ']', '}'])\ - and (nextToken['type'] in ['identifier', 'number', 'string']\ - or nextToken['value'] in ['+', '-', '++', '--', '~', '!', '(', '[', '{']): + and (prevToken['type'] in ['identifier', 'number', 'string'] + or prevToken['value'] in ['++', '--', ')', ']', '}']) \ + and (nextToken['type'] in ['identifier', 'number', 'string'] + or nextToken['value'] in ['+', '-', '++', '--', '~', '!', '(', '[', '{']): minified += '\n' elif token['type'] == 'whitespace': # replace whitespace between two tokens that are identifiers or # numbers, or between a token that ends with "+" or "-" and one that # begins with "+" or "-", with a single space, otherwise remove it - if prevToken and nextToken\ - and ((prevToken['type'] in ['identifier', 'number']\ - and nextToken['type'] in ['identifier', 'number']) - or (prevToken['value'] in ['+', '-', '++', '--'] - and nextToken['value'] in ['+', '-', '++', '--'])): + if prevToken and nextToken \ + and ((prevToken['type'] in ['identifier', 'number'] and + nextToken['type'] in ['identifier', 'number']) or + (prevToken['value'] in ['+', '-', '++', '--'] and + nextToken['value'] in ['+', '-', '++', '--'])): minified += ' ' elif token['type'] != 'comment': # remove comments and leave all other tokens untouched @@ -178,7 +178,7 @@ def tokenize(source): 'value': value }) if type == 'comment': - lines = value.split('\n'); + lines = value.split('\n') column = len(lines[-1]) line += len(lines) - 1 elif type == 'linebreak': diff --git a/Shared/lib/python3.4/site-packages/ox/jsonc.py b/Shared/lib/python3.4/site-packages/ox/jsonc.py index 83751ea..41dd306 100644 --- a/Shared/lib/python3.4/site-packages/ox/jsonc.py +++ b/Shared/lib/python3.4/site-packages/ox/jsonc.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 -from __future__ import with_statement, print_function +from __future__ import print_function import re @@ -23,11 +23,11 @@ def loads(source): try: m = re.search(r'line (\d+) column (\d+)', msg) if m: - (lineno, colno) = map(int, m.groups()) + (lineno, colno) = [int(n) for n in m.groups()] except: pass if lineno and colno: s = minified.split('\n') context = s[lineno-1][max(0, colno-30):colno+30] - msg += ' at:\n\n %s\n %s\033[1m^\033[0m' %(context, ' ' * (colno - max(0, colno-30) - 2)) + msg += ' at:\n\n %s\n %s\033[1m^\033[0m' % (context, ' ' * (colno - max(0, colno-30) - 2)) raise ValueError(msg) diff --git a/Shared/lib/python3.4/site-packages/ox/movie.py b/Shared/lib/python3.4/site-packages/ox/movie.py index bb93101..cbf591d 100644 --- a/Shared/lib/python3.4/site-packages/ox/movie.py +++ b/Shared/lib/python3.4/site-packages/ox/movie.py @@ -29,7 +29,7 @@ def format_path(data, directory_key='director'): director = data['directorSort'] or ['Unknown Director'] title = data['seriesTitle' if data['isEpisode'] else 'title'] or 'Untitled' year = data['seriesYear' if data['isEpisode'] else 'year'] or None - parts = list(map(format_underscores, filter(lambda x: x != None, [ + parts = list(map(format_underscores, filter(lambda x: x is not None, [ u'; '.join(director[:10]), u'%s%s' % (title, u' (%s)' % year if year else ''), u'%s%s%s%s%s%s' % ( @@ -60,7 +60,7 @@ def parse_item_files(files): def get_version_key(file, extension=True): return '%s/%s-part/%s' % ( file['version'] or '', - 'single' if file['part'] == None else 'multi', + 'single' if file['part'] is None else 'multi', file['extension'] if extension else '' ) # filter out duplicate files (keep shortest path, sorted alphabetically) @@ -70,7 +70,7 @@ def parse_item_files(files): duplicate_files = [] for key in [get_file_key(file) for file in files]: key_files = sorted( - sorted([file for file in files if get_file_key(file) == key]), + [file for file in files if get_file_key(file) == key], key=lambda x: len(x['path']) ) unique_files.append(key_files[0]) @@ -114,10 +114,8 @@ def parse_item_files(files): # determine preferred subtitle language language[version_key] = None subtitle_files = [file for file in version_files[version_key] if file['extension'] == 'srt'] - for subtitle_language in sorted( - list(set([file['language'] for file in subtitle_files])), - key=lambda x: LANGUAGES.index(x) if x in LANGUAGES else x - ): + subtitle_languages = list(set([file['language'] for file in subtitle_files])) + for subtitle_language in sorted(subtitle_languages, key=subtitle_sort): language_files = [file for file in subtitle_files if file['language'] == subtitle_language] if len(subtitle_files) == len(parts): language[version_key] = subtitle_language @@ -188,25 +186,30 @@ def parse_path(path, directory_key='director'): # TODO: '.com.avi' ''' + def parse_type(string): for type in EXTENSIONS: if string in EXTENSIONS[type]: return type return None + def parse_underscores(string): + string = unicodedata.normalize('NFC', string) # '^_' or '_$' is '.' string = re.sub('^_', '.', string) string = re.sub('_$', '.', string) # '_.foo$' or '_ (' is '?' - string = re.sub('_(?=(\.\w+$| \())', '?', string) + string = re.sub(re.compile('_(?=(\.\w+$| \())', re.U), '?', string) # ' _..._ ' is '<...>' string = re.sub('(?<= )_(.+)_(?= )', '<\g<1>>', string) # 'foo_bar' or 'foo _ bar' is '/' - string = re.sub('(?<=\w)_(?=\w)', '/', string) + string = re.sub(re.compile('(?<=\w)_(?=\w)', re.U), '/', string) string = re.sub(' _ ', ' / ', string) # 'foo_ ' is ':' - string = re.sub('(?<=\w)_ ', ': ', string) + string = re.sub(re.compile('(?<=\w)_ ', re.U), ': ', string) + string = unicodedata.normalize('NFD', string) return string + data = {} parts = list(map(lambda x: parse_underscores(x.strip()), unicodedata.normalize('NFD', path).split('/'))) # subdirectory @@ -269,12 +272,12 @@ def parse_path(path, directory_key='director'): # isEpisode, seriesTitle, seriesYear data['isEpisode'] = False data['seriesTitle'] = data['seriesYear'] = None - if data['season'] != None or data['episode'] != None or data['episodes']: + if data['season'] is not None or data['episode'] is not None or data['episodes']: data['isEpisode'] = True data['seriesTitle'] = data['title'] - season = 'S%02d' % data['season'] if data['season'] != None else '' + season = 'S%02d' % data['season'] if data['season'] is not None else '' episode = '' - if data['episode'] != None: + if data['episode'] is not None: episode = 'E%02d' % data['episode'] elif data['episodes']: episode = 'E%02d%s%02d' % ( @@ -356,7 +359,7 @@ def parse_movie_path(path): director = "%s." % director[:-1] director = director.split('; ') director = [normalize_name(d).strip() for d in director] - director = filter(lambda d: d not in ('Unknown Director', 'Various Directors'), director) + director = list(filter(lambda d: d not in ('Unknown Director', 'Various Directors'), director)) else: director = [] @@ -376,9 +379,9 @@ def parse_movie_path(path): season = match.group(3) episode = match.group(5) episodeTitle = (match.group(6) or '').strip() - if episode != None: + if episode is not None: episode = int(episode) - if season != None: + if season is not None: season = int(season) if episode and not season: season = 1 @@ -396,7 +399,7 @@ def parse_movie_path(path): else: episode = None - if episode and 'Episode %d'%episode in fileparts: + if episode and 'Episode %d' % episode in fileparts: episodeTitle = fileparts.index('Episode %d' % episode) + 1 episodeTitle = fileparts[episodeTitle] if episodeTitle == extension or episodeTitle.startswith('Part'): @@ -482,3 +485,11 @@ def get_oxid(title, director=[], year='', oxid = get_hash('\n'.join([director, title, str(year), str(season)]))[:8] + \ get_hash('\n'.join([str(episode), episode_director, episode_title, str(episode_year)]))[:8] return u'0x' + oxid + +def subtitle_sort(language): + if language in LANGUAGES: + return str(LANGUAGES.index(language)) + elif language is None: + return str(len(LANGUAGES)) + else: + return language diff --git a/Shared/lib/python3.4/site-packages/ox/net.py b/Shared/lib/python3.4/site-packages/ox/net.py index 46ef0e1..02c7156 100644 --- a/Shared/lib/python3.4/site-packages/ox/net.py +++ b/Shared/lib/python3.4/site-packages/ox/net.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 # GPL 2008 -from __future__ import with_statement, print_function +from __future__ import print_function import gzip import json import os @@ -16,14 +16,14 @@ from chardet.universaldetector import UniversalDetector DEBUG = False # Default headers for HTTP requests. DEFAULT_HEADERS = { - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', - 'Accept-Language': 'en-us,en;q=0.5', - 'Accept-Encoding': 'gzip' + 'Accept-Language': 'en-US,en;q=0.8,fr;q=0.6,de;q=0.4', + 'Accept-Encoding': 'gzip', } -def status(url, data=None, headers=DEFAULT_HEADERS): +def status(url, data=None, headers=None): try: f = open_url(url, data, headers) s = f.code @@ -31,13 +31,13 @@ def status(url, data=None, headers=DEFAULT_HEADERS): s = e.code return s -def exists(url, data=None, headers=DEFAULT_HEADERS): +def exists(url, data=None, headers=None): s = status(url, data, headers) if s >= 200 and s < 400: return True return False -def get_headers(url, data=None, headers=DEFAULT_HEADERS): +def get_headers(url, data=None, headers=None): try: f = open_url(url, data, headers) f.headers['Status'] = "%s" % f.code @@ -48,10 +48,12 @@ def get_headers(url, data=None, headers=DEFAULT_HEADERS): headers = e.headers return dict(headers) -def get_json(url, data=None, headers=DEFAULT_HEADERS): - return json.loads(read_url(url, data, headers).decode('utf-8')) +def get_json(url, data=None, headers=None): + return json.loads(read_url(url, data, headers).decode('utf-8')) # pylint: disable=no-member -def open_url(url, data=None, headers=DEFAULT_HEADERS): +def open_url(url, data=None, headers=None): + if headers is None: + headers = DEFAULT_HEADERS.copy() if PY2: if not isinstance(url, bytes): url = url.encode('utf-8') @@ -64,7 +66,7 @@ def open_url(url, data=None, headers=DEFAULT_HEADERS): req = urllib.request.Request(url, data, headers) return urllib.request.urlopen(req) -def read_url(url, data=None, headers=DEFAULT_HEADERS, return_headers=False, unicode=False): +def read_url(url, data=None, headers=None, return_headers=False, unicode=False): if DEBUG: print('ox.net.read_url', url) f = open_url(url, data, headers) @@ -108,7 +110,7 @@ def detect_encoding(data): detector.close() return detector.result['encoding'] -get_url=read_url +get_url = read_url def save_url(url, filename, overwrite=False): if not os.path.exists(filename) or overwrite: @@ -119,51 +121,50 @@ def save_url(url, filename, overwrite=False): with open(filename, 'wb') as f: f.write(data) +def _get_size(url): + req = urllib.request.Request(url, headers=DEFAULT_HEADERS.copy()) + req.get_method = lambda: 'HEAD' + u = urllib.request.urlopen(req) + if u.code != 200 or 'Content-Length' not in u.headers: + raise IOError + return int(u.headers['Content-Length']) + +def _get_range(url, start, end): + headers = DEFAULT_HEADERS.copy() + headers['Range'] = 'bytes=%s-%s' % (start, end) + req = urllib.request.Request(url, headers=headers) + u = urllib.request.urlopen(req) + return u.read() + def oshash(url): - def get_size(url): - req = urllib.request.Request(url, headers=DEFAULT_HEADERS.copy()) - req.get_method = lambda : 'HEAD' - u = urllib.request.urlopen(req) - if u.code != 200 or not 'Content-Length' in u.headers: - raise IOError - return int(u.headers['Content-Length']) - - def get_range(url, start, end): - headers = DEFAULT_HEADERS.copy() - headers['Range'] = 'bytes=%s-%s' % (start, end) - req = urllib.request.Request(url, headers=headers) - u = urllib.request.urlopen(req) - return u.read() - try: longlongformat = 'q' # long long bytesize = struct.calcsize(longlongformat) - filesize = get_size(url) - hash = filesize - head = get_range(url, 0, min(filesize, 65536)) + filesize = _get_size(url) + hash_ = filesize + head = _get_range(url, 0, min(filesize, 65536)) if filesize > 65536: - tail = get_range(url, filesize-65536, filesize) + tail = _get_range(url, filesize-65536, filesize) if filesize < 65536: f = BytesIO(head) - for x in range(int(filesize/bytesize)): + for _ in range(int(filesize/bytesize)): buffer = f.read(bytesize) - (l_value,)= struct.unpack(longlongformat, buffer) - hash += l_value - hash = hash & 0xFFFFFFFFFFFFFFFF #cut off 64bit overflow + (l_value,) = struct.unpack(longlongformat, buffer) + hash_ += l_value + hash_ = hash_ & 0xFFFFFFFFFFFFFFFF # cut off 64bit overflow else: for offset in range(0, 65536, bytesize): buffer = head[offset:offset+bytesize] - (l_value,)= struct.unpack(longlongformat, buffer) - hash += l_value - hash = hash & 0xFFFFFFFFFFFFFFFF #cut of 64bit overflow + (l_value,) = struct.unpack(longlongformat, buffer) + hash_ += l_value + hash_ = hash_ & 0xFFFFFFFFFFFFFFFF # cut of 64bit overflow for offset in range(0, 65536, bytesize): buffer = tail[offset:offset+bytesize] - (l_value,)= struct.unpack(longlongformat, buffer) - hash += l_value - hash = hash & 0xFFFFFFFFFFFFFFFF - returnedhash = "%016x" % hash + (l_value,) = struct.unpack(longlongformat, buffer) + hash_ += l_value + hash_ = hash_ & 0xFFFFFFFFFFFFFFFF + returnedhash = "%016x" % hash_ return returnedhash - except(IOError): + except IOError: return "IOError" - diff --git a/Shared/lib/python3.4/site-packages/ox/normalize.py b/Shared/lib/python3.4/site-packages/ox/normalize.py index 128f33c..dea40ae 100644 --- a/Shared/lib/python3.4/site-packages/ox/normalize.py +++ b/Shared/lib/python3.4/site-packages/ox/normalize.py @@ -18,7 +18,8 @@ _articles = ('the', 'la', 'a', 'die', 'der', 'le', 'el', _articlesDict = dict([(x, x) for x in _articles]) _spArticles = [] for article in _articles: - if article[-1] not in ("'", '-'): article += ' ' + if article[-1] not in ("'", '-'): + article += ' ' _spArticles.append(article) _noarticles = ( @@ -50,8 +51,10 @@ def canonical_title(title): 'Los Angeles Plays Itself' """ try: - if _articlesDict.has_key(title.split(', ')[-1].lower()): return title - except IndexError: pass + if title.split(', ')[-1].lower() in _articlesDict: + return title + except IndexError: + pass ltitle = title.lower() for start in _noarticles: if ltitle.startswith(start): @@ -60,7 +63,8 @@ def canonical_title(title): if ltitle.startswith(article): lart = len(article) title = '%s, %s' % (title[lart:], title[:lart]) - if article[-1] == ' ': title = title[:-1] + if article[-1] == ' ': + title = title[:-1] break ## XXX: an attempt using a dictionary lookup. ##for artSeparator in (' ', "'", '-'): @@ -82,9 +86,10 @@ def normalize_title(title): 'The Movie Title' """ stitle = title.split(', ') - if len(stitle) > 1 and _articlesDict.has_key(stitle[-1].lower()): + if len(stitle) > 1 and stitle[-1].lower() in _articlesDict: sep = ' ' - if stitle[-1][-1] in ("'", '-'): sep = '' + if stitle[-1][-1] in ("'", '-'): + sep = '' title = '%s%s%s' % (stitle[-1], sep, ', '.join(stitle[:-1])) return title @@ -139,7 +144,8 @@ def canonical_name(name): # Don't convert names already in the canonical format. if name in ('Unknown Director', ): return name - if name.find(', ') != -1: return name + if name.find(', ') != -1: + return name sname = name.split(' ') snl = len(sname) if snl == 2: @@ -147,11 +153,14 @@ def canonical_name(name): name = '%s, %s' % (sname[1], sname[0]) elif snl > 2: lsname = [x.lower() for x in sname] - if snl == 3: _indexes = (0, snl-2) - else: _indexes = (0, snl-2, snl-3) + if snl == 3: + _indexes = (0, snl-2) + else: + _indexes = (0, snl-2, snl-3) # Check for common surname prefixes at the beginning and near the end. for index in _indexes: - if lsname[index] not in _sname_suffixes: continue + if lsname[index] not in _sname_suffixes: + continue try: # Build the surname. surn = '%s %s' % (sname[index], sname[index+1]) @@ -194,11 +203,12 @@ def normalize_name(name): def normalize_path(path): path = path.replace(':', '_').replace('/', '_') - if path.endswith('.'): path = path[:-1] + '_' + if path.endswith('.'): + path = path[:-1] + '_' return path def strip_accents(s): if isinstance(s, str): - s = unicode(s) + s = s.decode('utf-8') return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')) diff --git a/Shared/lib/python3.4/site-packages/ox/oembed.py b/Shared/lib/python3.4/site-packages/ox/oembed.py index f9f1717..b5a02dc 100644 --- a/Shared/lib/python3.4/site-packages/ox/oembed.py +++ b/Shared/lib/python3.4/site-packages/ox/oembed.py @@ -6,13 +6,16 @@ from . import cache from .text import find_re from .utils import json, ET + def get_embed_code(url, maxwidth=None, maxheight=None): embed = {} header = cache.get_headers(url) if header.get('content-type', '').startswith('text/html'): html = cache.read_url(url) - json_oembed = filter(lambda l: 'json+oembed' in l, re.compile('').findall(html)) - xml_oembed = filter(lambda l: 'xml+oembed' in l, re.compile('').findall(html)) + links = re.compile('').findall(html) + json_oembed = [l for l in links if 'json+oembed' in l] + xml_oembed = [l for l in links if 'xml+oembed' in l] + if json_oembed: oembed_url = find_re(json_oembed[0], 'href="(.*?)"') if maxwidth: @@ -21,7 +24,7 @@ def get_embed_code(url, maxwidth=None, maxheight=None): oembed_url += '&maxheight=%d' % maxheight embed = json.loads(cache.read_url(oembed_url)) elif xml_oembed: - oembed_url = find_re(json_oembed[0], 'href="(.*?)"') + oembed_url = find_re(xml_oembed[0], 'href="(.*?)"') if maxwidth: oembed_url += '&maxwidth=%d' % maxwidth if maxheight: diff --git a/Shared/lib/python3.4/site-packages/ox/srt.py b/Shared/lib/python3.4/site-packages/ox/srt.py index 1b3f8b3..5191a55 100644 --- a/Shared/lib/python3.4/site-packages/ox/srt.py +++ b/Shared/lib/python3.4/site-packages/ox/srt.py @@ -1,10 +1,11 @@ # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 -from __future__ import with_statement, division, print_function -import chardet -import re +from __future__ import division, print_function import codecs +import re +import chardet +from six import PY2 import ox @@ -12,18 +13,21 @@ __all__ = [] def _detect_encoding(fp): - bomDict={ # bytepattern : name - (0x00, 0x00, 0xFE, 0xFF): "utf_32_be", - (0xFF, 0xFE, 0x00, 0x00): "utf_32_le", - (0xFE, 0xFF, None, None): "utf_16_be", - (0xFF, 0xFE, None, None): "utf_16_le", - (0xEF, 0xBB, 0xBF, None): "utf_8", - } + bomDict = { # bytepattern : name + (0x00, 0x00, 0xFE, 0xFF): "utf_32_be", + (0xFF, 0xFE, 0x00, 0x00): "utf_32_le", + (0xFE, 0xFF, None, None): "utf_16_be", + (0xFF, 0xFE, None, None): "utf_16_le", + (0xEF, 0xBB, 0xBF, None): "utf_8", + } # go to beginning of file and get the first 4 bytes oldFP = fp.tell() fp.seek(0) - (byte1, byte2, byte3, byte4) = tuple(map(ord, fp.read(4))) + if PY2: + (byte1, byte2, byte3, byte4) = [ord(b) for b in fp.read(4)] + else: + (byte1, byte2, byte3, byte4) = fp.read(4) # try bom detection using 4 bytes, 3 bytes, or 2 bytes bomDetection = bomDict.get((byte1, byte2, byte3, byte4)) @@ -31,18 +35,18 @@ def _detect_encoding(fp): bomDetection = bomDict.get((byte1, byte2, byte3, None)) if not bomDetection: bomDetection = bomDict.get((byte1, byte2, None, None)) - ## if BOM detected, we're done :-) + # if BOM detected, we're done :-) fp.seek(oldFP) if bomDetection: return bomDetection encoding = 'latin-1' - #more character detecting magick using http://chardet.feedparser.org/ + # more character detecting magick using http://chardet.feedparser.org/ fp.seek(0) rawdata = fp.read() - #if data can be decoded as utf-8 use that, try chardet otherwise - #chardet detects utf-8 as ISO-8859-2 most of the time + # if data can be decoded as utf-8 use that, try chardet otherwise + # chardet detects utf-8 as ISO-8859-2 most of the time try: - data = unicode(rawdata, 'utf-8') + rawdata.decode('utf-8') encoding = 'utf-8' except: encoding = chardet.detect(rawdata)['encoding'] @@ -63,26 +67,30 @@ def load(filename, offset=0): def parse_time(t): return offset + ox.time2ms(t.replace(',', '.')) / 1000 - with open(filename) as f: + with open(filename, 'rb') as f: encoding = _detect_encoding(f) data = f.read() try: - data = unicode(data, encoding) + data = data.decode(encoding) except: try: - data = unicode(data, 'latin-1') + data = data.decode('latin-1') except: print("failed to detect encoding, giving up") return srt data = data.replace('\r\n', '\n') - srts = re.compile('(\d\d:\d\d:\d\d[,.]\d\d\d)\s*?-->\s*?(\d\d:\d\d:\d\d[,.]\d\d\d).*?\n(.*?)\n\n', re.DOTALL) + if not data.endswith('\n\n'): + data += '\n\n' + regexp = r'(\d\d:\d\d:\d\d[,.]\d\d\d)\s*?-->\s*?(\d\d:\d\d:\d\d[,.]\d\d\d).*?\n(.*?)\n\n' + srts = re.compile(regexp, re.DOTALL) i = 0 for s in srts.findall(data): - _s = {'id': str(i), - 'in': parse_time(s[0]), - 'out': parse_time(s[1]), - 'value': s[2].strip() + _s = { + 'id': str(i), + 'in': parse_time(s[0]), + 'out': parse_time(s[1]), + 'value': s[2].strip() } srt.append(_s) i += 1 diff --git a/Shared/lib/python3.4/site-packages/ox/text.py b/Shared/lib/python3.4/site-packages/ox/text.py index 8bb8127..282afa2 100644 --- a/Shared/lib/python3.4/site-packages/ox/text.py +++ b/Shared/lib/python3.4/site-packages/ox/text.py @@ -5,20 +5,67 @@ import math import re import unicodedata +from six.moves import reduce + ARTICLES = list(set([ # def sg, def pl, indef sg, indef pl (each m/f/n) - 'der', 'die', 'das', 'ein', 'eine', # de - 'the', 'a', 'an', # en - 'el', 'la', 'lo', 'los', 'las', 'un', 'una', 'unos', 'unas', # es - 'le', "l'", 'la', 'les', 'un', 'une', 'des', # fr - 'il', 'lo', "l'" 'la', '_i', 'gli', 'le', # it - 'de', 'het', 'een', # nl - 'o', 'a', 'os', '_as', 'um', 'uma', '_uns', 'umas' # pt - # some _disabled because of collisions + 'der', 'die', 'das', 'ein', 'eine', # de + 'the', 'a', 'an', # en + 'el', 'la', 'lo', 'los', 'las', 'un', 'una', 'unos', 'unas', # es + 'le', "l'", 'la', 'les', 'un', 'une', 'des', # fr + 'il', 'lo', "l'" 'la', '_i', 'gli', 'le', # it + 'de', 'het', 'een', # nl + 'o', 'a', 'os', '_as', 'um', 'uma', '_uns', 'umas' # pt + # some _disabled because of collisions ])) +# every given name in 0xDB that matches Xxxx-yyyy Lastname +ASIAN_FIRST_NAMES = [ + 'a', 'ae', 'aeng', 'ah', 'ai', 'an', 'back', 'bae', 'ban', 'bang', 'bao', + 'beom', 'bi', 'bin', 'bo', 'bok', 'bon', 'bong', 'bu', 'bum', 'byeong', + 'byoung', 'byung', 'cai', 'chae', 'chan', 'chang', 'chao', 'cheal', 'chen', + 'cheng', 'cheol', 'cheon', 'cheong', 'cheul', 'chi', 'chia', 'chiao', + 'chieh', 'chien', 'chih', 'chin', 'ching', 'cho', 'choi', 'chong', 'choo', + 'chu', 'chuan', 'chuen', 'chul', 'chun', 'chung', 'chuo', 'chyi', 'da', + 'dae', 'dah', 'dal', 'dan', 'deok', 'do', 'dong', 'doo', 'duek', 'duk', + 'e', 'el', 'en', 'eui', 'eul', 'eun', 'eung', 'fai', 'fan', 'fang', 'fei', + 'fen', 'feng', 'fo', 'foo', 'fu', 'ga', 'gae', 'gam', 'gang', 'ge', 'gen', + 'geon', 'geun', 'gi', 'gil', 'gin', 'gnad', 'gok', 'goo', 'gook', 'gu', + 'gun', 'gwan', 'gye', 'gyeong', 'gyu', 'gyun', 'ha', 'hae', 'hak', 'han', + 'hang', 'hao', 'he', 'hee', 'heng', 'heon', 'hie', 'ho', 'hoi', 'hong', + 'hoo', 'hoon', 'hou', 'hsi', 'hsiang', 'hsiao', 'hsieh', 'hsien', 'hsin', + 'hsing', 'hsiung', 'hu', 'hua', 'huai', 'huang', 'hue', 'hui', 'hun', + 'hung', 'hwa', 'hwan', 'hwang', 'hye', 'hyeok', 'hyeon', 'hyeong', 'hyo', + 'hyuk', 'hyun', 'hyung', 'i', 'ik', 'il', 'in', 'ja', 'jae', 'jan', 'jang', + 'je', 'jee', 'jen', 'jeok', 'jeong', 'jeung', 'ji', 'jia', 'jian', 'jik', + 'jin', 'jing', 'jo', 'jong', 'joo', 'joon', 'ju', 'juan', 'jun', 'jung', + 'ka', 'kai', 'kam', 'kan', 'kang', 'kap', 'kar', 'ke', 'kee', 'kei', + 'keng', 'keum', 'keung', 'ki', 'kil', 'kin', 'kit', 'kot', 'ku', 'kua', + 'kuan', 'kuang', 'kuen', 'kun', 'kuo', 'kwang', 'kwok', 'kwon', 'kwong', + 'kyeong', 'kyo', 'kyoon', 'kyou', 'kyoung', 'kyu', 'kyun', 'kyung', 'lai', + 'lau', 'lee', 'lei', 'leng', 'leung', 'li', 'liang', 'lien', 'lin', 'ling', + 'lock', 'long', 'lun', 'lung', 'maeng', 'man', 'mei', 'mi', 'miao', 'min', + 'ming', 'mo', 'mok', 'moo', 'mook', 'moon', 'mu', 'mun', 'myeong', + 'myoeng', 'myong', 'myung', 'na', 'nae', 'nai', 'nam', 'nan', 'neung', + 'ngaru', 'ni', 'no', 'nyeo', 'oh', 'ok', 'ou', 'pai', 'pei', 'pen', 'peng', + 'pi', 'pil', 'pin', 'ping', 'po', 'pui', 'pyo', 'pyung', 'qing', 'qun', + 'ra', 'rak', 'ram', 'ran', 'reum', 'ri', 'rim', 'rin', 'roe', 'rok', 'ru', + 'rui', 'ryeon', 'ryol', 'ryong', 'sa', 'sae', 'san', 'sang', 'se', 'seo', + 'seob', 'seok', 'seol', 'seon', 'seong', 'seung', 'shan', 'shen', 'sheng', + 'shi', 'shia', 'shiang', 'shih', 'shik', 'shim', 'shin', 'shing', 'shou', + 'shu', 'shun', 'si', 'sik', 'sin', 'siu', 'so', 'song', 'soo', 'sook', + 'soon', 'su', 'suk', 'sun', 'sung', 'sup', 'szu', "t'ien", 'ta', 'tae', + 'taek', 'tai', 'tak', 'te', 'ti', 'tian', 'ting', 'to', 'toa', 'tsai', + 'tsan', 'tse', 'tso', 'tsui', 'tung', 'tzu', 'ua', 'ui', 'un', 'wah', + 'wai', 'wan', 'wei', 'wen', 'weon', 'wing', 'wit', 'wol', 'won', 'woo', + 'wook', 'woon', 'woong', 'wuk', 'xiao', 'ya', 'yan', 'yang', 'yao', 'ye', + 'yea', 'yee', 'yeh', 'yen', 'yeo', 'yeol', 'yeon', 'yeong', 'yeop', 'yi', + 'yin', 'ying', 'yiu', 'yoeng', 'yong', 'yoo', 'yoon', 'you', 'young', 'yu', + 'yuan', 'yue', 'yuen', 'yuk', 'yull', 'yun', 'yune', 'yung', 'zhi', + 'zhong', 'zhu' +] # see http://en.wikipedia.org/wiki/List_of_common_Chinese_surnames # and http://en.wikipedia.org/wiki/List_of_Korean_family_names -ASIAN_NAMES = [ +ASIAN_LAST_NAMES = [ 'chan', 'chang', 'chao', 'chen', 'cheong', 'cheung', 'chong', 'choo', @@ -88,8 +135,8 @@ UA_REGEXPS = { '(Chimera)\/(\d+)', '(chromeframe)\/(\d+)', '(Edge)\/(\d+)', - '(Epiphany)\/(\d+)', # before Chrome, Chromium and Safari - '(Chromium)\/(\d+)', # before Chrome + '(Epiphany)\/(\d+)', # before Chrome, Chromium and Safari + '(Chromium)\/(\d+)', # before Chrome '(Chrome)\/(\d+)', '(FBForIPhone)', '(Firefox)\/(\d+)', @@ -107,7 +154,7 @@ UA_REGEXPS = { '(OviBrowser)\/(\d+)', 'Version\/(\d+).+(Safari)', '(WebKit)\/(\d+)', - '(MSIE) (\d\d?(?!\d))', # last, since Opera used to mask as MSIE + '(MSIE) (\d\d?(?!\d))', # last, since Opera used to mask as MSIE '(Trident)\/.*?rv:(\d+)', '(Gecko)', '(Mozilla)\/(3|4)' @@ -117,7 +164,9 @@ UA_REGEXPS = { '(Google Web Preview).+Chrome\/(\d+)', '(Googlebot)\/(\d+)', '(WebCrawler)\/(\d+)', - '(Yahoo! Slurp)\/(\d+)' + '(Yahoo! Slurp)\/(\d+)', + '(YandexBot)\/([\d\.]+)', + '(YandexMobileBot)\/([\d\.]+)', ], 'system': [ '(Android) (\d+)', @@ -130,7 +179,7 @@ UA_REGEXPS = { '(BSD) (FreeBSD|NetBSD|OpenBSD)', '(CPU OS) (\d+)', '(iPhone OS) (\d+)', - '(iPhone)', # Opera + '(iPhone)', # Opera '(J2ME\/MIDP)', '(Linux).+(CentOS|CrOS|Debian|Fedora|Gentoo|Mandriva|MeeGo|Mint|Red Hat|SUSE|Ubuntu|webOS)', '(CentOS|CrOS|Debian|Fedora|Gentoo|Mandriva|MeeGo|Mint|Red Hat|SUSE|Ubuntu|webOS).+(Linux)', @@ -155,12 +204,12 @@ UA_REGEXPS = { '(Windows) (NT \d\.\d)', '(Windows Phone) (\d+)', '(Windows Phone OS) (\d+)', - '(Windows) (3\.1|95|98|2000|2003|CE|ME|Mobile|NT|XP)', # Opera - '(Win) (9x 4\.90)', # Firefox - '(Win)(16)', # Firefox - '(Win)(9\d)', # Firefox - '(Win)(NT)', # Firefox - '(Win)(NT4\.0)', # Firefox + '(Windows) (3\.1|95|98|2000|2003|CE|ME|Mobile|NT|XP)', # Opera + '(Win) (9x 4\.90)', # Firefox + '(Win)(16)', # Firefox + '(Win)(9\d)', # Firefox + '(Win)(NT)', # Firefox + '(Win)(NT4\.0)', # Firefox '(X11)' ] } @@ -244,15 +293,41 @@ def get_sort_name(name): >>> get_sort_name('Scorsese, Martin') 'Scorsese, Martin' """ - if not ' ' in name or ', ' in name: + if ' ' not in name or ', ' in name: return name if name.lower().startswith('the '): return get_sort_title(name) + def add_name(): if len(first_names): last_names.insert(0, first_names.pop()) + def find_name(names): return len(first_names) and first_names[-1].lower() in names + + if is_asian_name(name): + names = name.replace('-', ' ').split(' ') + if len(names) == 2: + if names[0].lower() in ASIAN_LAST_NAMES: + lastname, firstname = names + else: + firstname, lastname = names + else: + names_ = name.split(' ') + if '-' in names_[0]: + lastname, firstname = [names[2], names[0] + '-' + names[1].lower()] + elif '-' in names_[1]: + lastname, firstname = [names[0], names[1] + '-' + names[2].lower()] + elif names[0].lower() in ASIAN_FIRST_NAMES and names[2].lower() not in ASIAN_FIRST_NAMES: + lastname, firstname = [names[2], names[0] + ' ' + names[1]] + elif names[0].lower() not in ASIAN_FIRST_NAMES and names[2].lower() in ASIAN_FIRST_NAMES: + lastname, firstname = [names[0], names[1] + ' ' + names[2]] + elif names[0].lower() in ASIAN_LAST_NAMES: + lastname, firstname = [names[0], names[1] + ' ' + names[2]] + else: + lastname, firstname = [names[2], names[0] + ' ' + names[1]] + return lastname + ' ' + firstname + first_names = name.split(' ') last_names = [] if re.search('^[0-9]+$', first_names[-1]): @@ -269,7 +344,7 @@ def get_sort_name(name): add_name() name = ' '.join(last_names) if len(first_names): - separator = ' ' if last_names[0].lower() in ASIAN_NAMES else ', ' + separator = ' ' if last_names[0].lower() in ASIAN_LAST_NAMES else ', ' name += separator + ' '.join(first_names) return name @@ -299,8 +374,8 @@ def find_re(string, regexp): return result[0].strip() return '' -def find_string(string, string0='', string1 = ''): - """Return the string between string0 and string1. +def find_string(string, string0='', string1=''): + """Return the string between string0 and string1. If string0 or string1 is left out, begining or end of string is used. @@ -324,12 +399,23 @@ def find_string(string, string0='', string1 = ''): string1 = '$' return find_re(string, string0 + '(.*?)' + string1) +def is_asian_name(name): + names = name.replace('-', ' ').lower().split(' ') + return (len(names) == 2 and not '-' in name and ( + (names[0] in ASIAN_FIRST_NAMES and names[1] in ASIAN_LAST_NAMES) or + (names[0] in ASIAN_LAST_NAMES and names[1] in ASIAN_FIRST_NAMES) + )) or ( + len(names) == 3 and names[1] in ASIAN_FIRST_NAMES and ( + names[0] in ASIAN_FIRST_NAMES or names[2] in ASIAN_FIRST_NAMES + ) + ) + def parse_useragent(useragent): data = {} for key in UA_REGEXPS: for alias, regexp in UA_ALIASES[key].items(): alias = alias if key == 'browser' else alias + ' \\1' - useragent = re.sub(regexp, alias, useragent) + useragent = re.sub(regexp, alias, useragent) for regexp in UA_REGEXPS[key]: data[key] = {'name': '', 'version': '', 'string': ''} match = re.compile(regexp).search(useragent) @@ -352,7 +438,7 @@ def parse_useragent(useragent): 'version': version, 'string': string } - break; + break return data def remove_special_characters(text): @@ -373,14 +459,17 @@ def wrap(text, width): the text. Expects that existing line breaks are posix newlines (\n). See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061 """ - return reduce(lambda line, word, width=width: '%s%s%s' % - (line, - ' \n'[(len(line[line.rfind('\n')+1:]) - + len(word.split('\n',1)[0] - ) >= width)], - word), - text.split(' ') - ) + + def reduce_line(line, word): + return '%s%s%s' % ( + line, + ' \n'[ + (len(line[line.rfind('\n')+1:]) + len(word.split('\n', 1)[0]) >= width) + ], + word + ) + + return reduce(reduce_line, text.split(' ')) def wrap_string(string, length=80, separator='\n', balance=False): ''' @@ -404,7 +493,7 @@ def wrap_string(string, length=80, separator='\n', balance=False): for word in words: if len(lines[len(lines) - 1] + word + u' ') <= length + 1: # word fits in current line - lines[len(lines) - 1] += word + u' '; + lines[len(lines) - 1] += word + u' ' else: if len(word) <= length: # word fits in next line @@ -414,7 +503,7 @@ def wrap_string(string, length=80, separator='\n', balance=False): position = length - len(lines[len(lines) - 1]) lines[len(lines) - 1] += word[0:position] for i in range(position, len(word), length): - lines.append(word[i:i+length]); + lines.append(word[i:i+length]) lines[len(lines) - 1] += u' ' return separator.join(lines).strip() @@ -425,7 +514,7 @@ def truncate_string(string, length, padding='...', position='right'): # 'anticon...lement' # >>> truncate_string('anticonstitutionellement', 16, '...', 'right') # 'anticonstitut...' - stringLength = len(string); + stringLength = len(string) paddingLength = len(padding) if stringLength > length: if position == 'left': @@ -436,7 +525,7 @@ def truncate_string(string, length, padding='...', position='right'): string = '%s%s%s' % (string[:left], padding, string[right:]) elif position == 'right': string = '%s%s' % (string[:length - paddingLength], padding) - return string; + return string def truncate_words(s, num): """Truncates a string after a certain number of chacters, but ends with a word @@ -473,7 +562,7 @@ def trim_string(string, num): def get_valid_filename(s): """ Returns the given string converted to a string that can be used for a clean - filename. Specifically, leading and trailing spaces are removed; + filename. Specifically, leading and trailing spaces are removed; all non-filename-safe characters are removed. >>> get_valid_filename("john's portrait in 2004.jpg") @@ -498,9 +587,11 @@ def get_text_list(list_, last_word='or'): >>> get_text_list([]) '' """ - if len(list_) == 0: return '' - if len(list_) == 1: return list_[0] - return u'%s %s %s' % (u', '.join([unicode(i) for i in list_][:-1]), last_word, list_[-1]) + if len(list_) == 0: + return '' + if len(list_) == 1: + return list_[0] + return u'%s %s %s' % (u', '.join([i for i in list_][:-1]), last_word, list_[-1]) def get_list_text(text, last_word='or'): """ @@ -519,7 +610,7 @@ def get_list_text(text, last_word='or'): if text: list_ = text.split(u', ') if list_: - i=len(list_)-1 + i = len(list_)-1 last = list_[i].split(last_word) if len(last) == 2: list_[i] = last[0].strip() @@ -531,11 +622,11 @@ def normalize_newlines(text): def recapitalize(text): "Recapitalizes text, placing caps after end-of-sentence punctuation." - #capwords = () + # capwords = () text = text.lower() capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])') text = capsRE.sub(lambda x: x.group(1).upper(), text) - #for capword in capwords: + # for capword in capwords: # capwordRE = re.compile(r'\b%s\b' % capword, re.I) # text = capwordRE.sub(capword, text) return text @@ -543,22 +634,28 @@ def recapitalize(text): def phone2numeric(phone): "Converts a phone number with letters into its numeric equivalent." letters = re.compile(r'[A-PR-Y]', re.I) - char2number = lambda m: {'a': '2', 'c': '2', 'b': '2', 'e': '3', - 'd': '3', 'g': '4', 'f': '3', 'i': '4', 'h': '4', 'k': '5', - 'j': '5', 'm': '6', 'l': '5', 'o': '6', 'n': '6', 'p': '7', - 's': '7', 'r': '7', 'u': '8', 't': '8', 'w': '9', 'v': '8', - 'y': '9', 'x': '9'}.get(m.group(0).lower()) + + def char2number(m): + return { + 'a': '2', 'c': '2', 'b': '2', 'e': '3', + 'd': '3', 'g': '4', 'f': '3', 'i': '4', 'h': '4', 'k': '5', + 'j': '5', 'm': '6', 'l': '5', 'o': '6', 'n': '6', 'p': '7', + 's': '7', 'r': '7', 'u': '8', 't': '8', 'w': '9', 'v': '8', + 'y': '9', 'x': '9' + }.get(m.group(0).lower()) return letters.sub(char2number, phone) def compress_string(s): - import cStringIO, gzip - zbuf = cStringIO.StringIO() + import gzip + from six import BytesIO + zbuf = BytesIO() zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) zfile.write(s) zfile.close() return zbuf.getvalue() smart_split_re = re.compile('("(?:[^"\\\\]*(?:\\\\.[^"\\\\]*)*)"|\'(?:[^\'\\\\]*(?:\\\\.[^\'\\\\]*)*)\'|[^\\s]+)') + def smart_split(text): """ Generator that splits a string by spaces, leaving quoted phrases together. @@ -582,17 +679,17 @@ def words(text): returns words in text, removing punctuation """ text = text.split() - return map(lambda x: re.sub("(([.!?:-_]|'s)$)", '', x), text) + return [re.sub("(([.!?:-_]|'s)$)", '', x) for x in text] def sort_string(string): string = string.replace(u'Æ', 'AE').replace(u'Ø', 'O').replace(u'Þ', 'Th') - #pad numbered titles + # pad numbered titles string = re.sub('(\d),(\d{3})', '\\1\\2', string) string = re.sub('(\d+)', lambda x: '%010d' % int(x.group(0)), string) return unicodedata.normalize('NFKD', string) def sorted_strings(strings, key=None): if not key: - key = lambda k: sort_string(k) + key = sort_string return sorted(strings, key=key) diff --git a/Shared/lib/python3.4/site-packages/ox/torrent/__init__.py b/Shared/lib/python3.4/site-packages/ox/torrent/__init__.py index 9e96bad..7818bc7 100644 --- a/Shared/lib/python3.4/site-packages/ox/torrent/__init__.py +++ b/Shared/lib/python3.4/site-packages/ox/torrent/__init__.py @@ -14,8 +14,8 @@ else: __all__ = ['create_torrent', 'get_info_hash', 'get_torrent_info', 'get_files', 'get_torrent_size'] -def create_torrent(file, url, params = {}, flag = Event(), - progress = lambda x: None, progress_percent = 1): +def create_torrent(file, url, params={}, flag=Event(), + progress=lambda x: None, progress_percent=1): "Creates a torrent for a given file, using url as tracker url" from .makemetafile import make_meta_file return make_meta_file(file, url, params, flag, progress, progress_percent) diff --git a/Shared/lib/python3.4/site-packages/ox/torrent/bencode.py b/Shared/lib/python3.4/site-packages/ox/torrent/bencode.py index 611c531..b586001 100644 --- a/Shared/lib/python3.4/site-packages/ox/torrent/bencode.py +++ b/Shared/lib/python3.4/site-packages/ox/torrent/bencode.py @@ -1,5 +1,6 @@ # Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman # see LICENSE.txt for license information +from __future__ import print_function from types import IntType, LongType, StringType, ListType, TupleType, DictType try: @@ -53,8 +54,8 @@ def decode_dict(x, f): lastkey = None while x[f] != 'e': k, f = decode_string(x, f) - #why is this needed - #if lastkey >= k: + # why is this needed + # if lastkey >= k: # raise ValueError lastkey = k r[k], f = decode_func[x[f]](x, f) @@ -81,9 +82,9 @@ def bdecode(x, sloppy = 1): r, l = decode_func[x[0]](x, 0) # except (IndexError, KeyError): except (IndexError, KeyError, ValueError): - raise ValueError, "bad bencoded data" + raise ValueError("bad bencoded data") if not sloppy and l != len(x): - raise ValueError, "bad bencoded data" + raise ValueError("bad bencoded data") return r def test_bdecode(): @@ -102,10 +103,10 @@ def test_bdecode(): assert 0 except ValueError: pass - assert bdecode('i4e') == 4L - assert bdecode('i0e') == 0L - assert bdecode('i123456789e') == 123456789L - assert bdecode('i-10e') == -10L + assert bdecode('i4e') == 4 + assert bdecode('i0e') == 0 + assert bdecode('i123456789e') == 123456789 + assert bdecode('i-10e') == -10 try: bdecode('i-0e') assert 0 @@ -287,7 +288,7 @@ def bencode(x): try: encode_func[type(x)](x, r) except: - print "*** error *** could not encode type %s (value: %s)" % (type(x), x) + print("*** error *** could not encode type %s (value: %s)" % (type(x), x)) assert 0 return ''.join(r) @@ -295,7 +296,7 @@ def test_bencode(): assert bencode(4) == 'i4e' assert bencode(0) == 'i0e' assert bencode(-10) == 'i-10e' - assert bencode(12345678901234567890L) == 'i12345678901234567890e' + assert bencode(12345678901234567890) == 'i12345678901234567890e' assert bencode('') == '0:' assert bencode('abc') == '3:abc' assert bencode('1234567890') == '10:1234567890' diff --git a/Shared/lib/python3.4/site-packages/ox/torrent/bencode3.py b/Shared/lib/python3.4/site-packages/ox/torrent/bencode3.py index d2a2906..31c4523 100644 --- a/Shared/lib/python3.4/site-packages/ox/torrent/bencode3.py +++ b/Shared/lib/python3.4/site-packages/ox/torrent/bencode3.py @@ -4,139 +4,151 @@ # ## -def _decode_int(data): - """ - decode integer from bytearray - return int, remaining data - """ - data = data[1:] - end = data.index(b'e') - return int(data[:end],10), data[end+1:] +class Decoder(object): -def _decode_str(data): - """ - decode string from bytearray - return string, remaining data - """ - start = data.index(b':') - l = int(data[:start].decode(),10) - if l <= 0: - raise Exception('invalid string size: %d'%d) - start += 1 - ret = bytes(data[start:start+l]) - data = data[start+l:] - return ret, data + def _decode_int(self): + """ + decode integer from bytearray + return int + """ + self.idx += 1 + start = self.idx + end = self.data.index(b'e', self.idx) + self.idx = end + 1 + return int(self.data[start:end]) -def _decode_list(data): - """ - decode list from bytearray - return list, remaining data - """ - ls = [] - data = data[1:] - while data[0] != ord(b'e'): - elem, data = _decode(data) - ls.append(elem) - return ls, data[1:] + def _decode_str(self): + """ + decode string from bytearray + return string + """ + start = self.data.index(b':', self.idx) + l = int(self.data[self.idx:start].decode(), 10) + if l < 0: + raise Exception('invalid string size: %d' % l) + start += 1 + ret = self.data[start:start+l] + try: + ret = ret.decode('utf-8') + except: + pass + self.idx = start + l + return ret -def _decode_dict(data): - """ - decode dict from bytearray - return dict, remaining data - """ - d = {} - data = data[1:] - while data[0] != ord(b'e'): - k, data = _decode_str(data) - v, data = _decode(data) - d[k.decode()] = v - return d, data[1:] + def _decode_list(self): + """ + decode list from bytearray + return list + """ + ls = [] + self.idx += 1 + while self.data[self.idx] != ord(b'e'): + ls.append(self._decode()) + self.idx += 1 + return ls -def _decode(data): - """ - decode a bytearray - return deserialized object, remaining data - """ - ch = chr(data[0]) - if ch == 'l': - return _decode_list(data) - elif ch == 'i': - return _decode_int(data) - elif ch == 'd': - return _decode_dict(data) - elif ch.isdigit(): - return _decode_str(data) - else: - raise Exception('could not deserialize data: %s'%data) + def _decode_dict(self): + """ + decode dict from bytearray + return dict + """ + d = {} + self.idx += 1 + while self.data[self.idx] != ord(b'e'): + k = self._decode_str() + v = self._decode() + d[k] = v + self.idx += 1 + return d + + def _decode(self): + ch = chr(self.data[self.idx]) + if ch == 'l': + return self._decode_list() + elif ch == 'i': + return self._decode_int() + elif ch == 'd': + return self._decode_dict() + elif ch.isdigit(): + return self._decode_str() + else: + raise Exception('could not decode data: %s' % data) + + def decode(self, data): + self.idx = 0 + self.data = data + obj = self._decode() + if len(data) != self.idx: + raise Exception('failed to decode, extra data: %s' % data) + return obj def bdecode(data): """ decode a bytearray - return deserialized object + return decoded object """ - obj , data = _decode(data) - if len(data) > 0: - raise Exception('failed to deserialize, extra data: %s'%data) - return obj + return Decoder().decode(data) -def _encode_str(s,buff): +def _encode_str(s, buff): """ encode string to a buffer """ s = bytearray(s) l = len(s) - buff.append(bytearray(str(l)+':','utf-8')) + buff.append(bytearray(str(l)+':', 'utf-8')) buff.append(s) - -def _encode_int(i,buff): + +def _encode_int(i, buff): """ encode integer to a buffer """ buff.append(b'i') - buff.append(bytearray(str(i),'ascii')) + buff.append(bytearray(str(i), 'ascii')) buff.append(b'e') -def _encode_list(l,buff): +def _encode_list(l, buff): """ encode list of elements to a buffer """ buff.append(b'l') for i in l: - _encode(i,buff) + _encode(i, buff) buff.append(b'e') -def _encode_dict(d,buff): +def _encode_dict(d, buff): """ encode dict """ buff.append(b'd') - l = list(d.keys()) - l.sort() - for k in l: - _encode(str(k),buff) - _encode(d[k],buff) + for k in sorted(d): + if not isinstance(k, (bytes, str)): + k = str(k) + _encode(k, buff) + _encode(d[k], buff) buff.append(b'e') -def _encode(obj,buff): +def _encode(obj, buff): """ encode element obj to a buffer buff """ - if isinstance(obj,str): - _encode_str(bytearray(obj,'utf-8'),buff) - elif isinstance(obj,bytes): - _encode_str(bytearray(obj),buff) - elif isinstance(obj,bytearray): - _encode_str(obj,buff) + if isinstance(obj, str): + _encode_str(bytearray(obj, 'utf-8'), buff) + elif isinstance(obj, bytes): + _encode_str(bytearray(obj), buff) + elif isinstance(obj, bytearray): + _encode_str(obj, buff) elif str(obj).isdigit(): - _encode_int(obj,buff) - elif isinstance(obj,list): - _encode_list(obj,buff) - elif hasattr(obj,'keys') and hasattr(obj,'values'): - _encode_dict(obj,buff) - elif str(obj) in ['True','False']: - _encode_int(int(obj and '1' or '0'),buff) + _encode_int(obj, buff) + elif isinstance(obj, int): + _encode_int(obj, buff) + elif isinstance(obj, list): + _encode_list(obj, buff) + elif hasattr(obj, 'keys') and hasattr(obj, 'values'): + _encode_dict(obj, buff) + elif str(obj) in ['True', 'False']: + _encode_int(int(obj and '1' or '0'), buff) else: - raise Exception('non serializable object: %s'%obj) + raise Exception('non serializable object: %s [%s]' % (obj, type(obj))) def bencode(obj): @@ -144,8 +156,8 @@ def bencode(obj): bencode element, return bytearray """ buff = [] - _encode(obj,buff) - ret = bytearray() + _encode(obj, buff) + ret = bytearray() for ba in buff: - ret += ba + ret += ba return bytes(ret) diff --git a/Shared/lib/python3.4/site-packages/ox/vtt.py b/Shared/lib/python3.4/site-packages/ox/vtt.py index 6dd7070..899497c 100644 --- a/Shared/lib/python3.4/site-packages/ox/vtt.py +++ b/Shared/lib/python3.4/site-packages/ox/vtt.py @@ -3,7 +3,7 @@ import codecs import ox - +from . import srt def _webvtt_timecode(t): return ox.format_duration(t * 1000, years=False) @@ -30,3 +30,13 @@ def encode(data, webvtt=False): ) return codecs.BOM_UTF8 + srt.encode('utf-8') + +def load(filename, offset=0): + '''Parses vtt file + + filename: path to an vtt file + offset (float, seconds): shift all in/out points by offset + + Returns list with dicts that have in, out, value and id + ''' + return srt.load(filename, offset) diff --git a/Shared/lib/python3.4/site-packages/ox/web/apple.py b/Shared/lib/python3.4/site-packages/ox/web/apple.py index 57093a2..099d6cb 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/apple.py +++ b/Shared/lib/python3.4/site-packages/ox/web/apple.py @@ -2,6 +2,7 @@ from __future__ import print_function import json import re +from six import text_type from ox.cache import read_url HEADERS = { @@ -16,9 +17,9 @@ USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7) ' USER_AGENT += 'AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 Safari/534.48.3' def get_movie_data(title, director): - if isinstance(title, unicode): + if isinstance(title, text_type): title = title.encode('utf-8') - if isinstance(director, unicode): + if isinstance(director, text_type): director = director.encode('utf-8') data = {} # itunes section (preferred source for link) @@ -45,7 +46,7 @@ def get_movie_data(title, director): results = js['results'] if results: url = host + results[0]['location'] - if not 'link' in data: + if 'link' not in data: data['link'] = url headers = { 'User-Agent': USER_AGENT diff --git a/Shared/lib/python3.4/site-packages/ox/web/auth.py b/Shared/lib/python3.4/site-packages/ox/web/auth.py index e610959..d3a8b9a 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/auth.py +++ b/Shared/lib/python3.4/site-packages/ox/web/auth.py @@ -17,7 +17,7 @@ def get(key): if key in auth: return auth[key] print("please add key %s to json file '%s'" % (key, user_auth)) - raise Exception,"no key %s found" % key + raise Exception("no key %s found" % key) def update(key, value): user_auth = os.environ.get('oxAUTH', os.path.expanduser('~/.ox/auth.json')) @@ -31,4 +31,3 @@ def update(key, value): f = open(user_auth, "w") f.write(json.dumps(auth, indent=2)) f.close() - diff --git a/Shared/lib/python3.4/site-packages/ox/web/criterion.py b/Shared/lib/python3.4/site-packages/ox/web/criterion.py index 2e81ce1..6cef01e 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/criterion.py +++ b/Shared/lib/python3.4/site-packages/ox/web/criterion.py @@ -8,13 +8,13 @@ from ox.cache import read_url from ox.html import strip_tags, decode_html from ox.text import find_re -import imdb +from . import imdb def get_id(url): return url.split("/")[-1] def get_url(id): - return "http://www.criterion.com/films/%s" % id + return "https://www.criterion.com/films/%s" % id def get_data(id, timeout=ox.cache.cache_timeout, get_imdb=False): ''' @@ -28,23 +28,34 @@ def get_data(id, timeout=ox.cache.cache_timeout, get_imdb=False): u'http://s3.amazonaws.com/criterion-production/product_images/185/343_box_348x490.jpg' ''' data = { + "id": id, "url": get_url(id) } try: html = read_url(data["url"], timeout=timeout, unicode=True) except: - html = ox.cache.read_url(data["url"], timeout=timeout) - data["number"] = find_re(html, "
  • Spine #(\d+)") + html = read_url(data["url"], timeout=timeout).decode('utf-8', 'ignore') - data["title"] = decode_html(find_re(html, "

    (.*?)

    ")) + data["number"] = find_re(html, "Spine #(\d+)") + + data["title"] = decode_html(find_re(html, "

    (.*?)

    ")) data["title"] = data["title"].split(u' \u2014 The Television Version')[0].strip() - data["director"] = strip_tags(find_re(html, "

    (.*?)

    ")) - results = find_re(html, '
    (.*?)
    ') - results = re.compile("
  • (.*?)
  • ").findall(results) - data["country"] = results[0] - data["year"] = results[1] + results = find_re(html, '
      (.*?)
    ') + info = re.compile('
  • (.*?)
  • ', re.DOTALL).findall(results) + info = {k: strip_tags(v).strip() for k, v in info} + if 'director' in info: + data['director'] = info['director'] + if 'countryOfOrigin' in info: + data['country'] = [c.strip() for c in decode_html(info['countryOfOrigin']).split(', ')] + if 'inLanguage' in info: + data['language'] = [l.strip() for l in decode_html(info['inLanguage']).split(', ')] + for v in re.compile('
  • (.*?)
  • ', re.DOTALL).findall(results): + if 'datePublished' in v: + data['year'] = strip_tags(v).strip() + elif 'duration' in v: + data['duration'] = strip_tags(v).strip() data["synopsis"] = decode_html(strip_tags(find_re(html, - "
    .*?

    (.*?)

    "))) + "
    .*?

    (.*?)

    "))) result = find_re(html, "
    (.*?)
    ") if 'Blu-Ray' in result or 'Essential Art House DVD' in result: @@ -56,47 +67,46 @@ def get_data(id, timeout=ox.cache.cache_timeout, get_imdb=False): data["posters"] = [result] else: html_ = read_url(result, unicode=True) - result = find_re(html_, '(.*?)' % id) + result = find_re(html_, '//www.criterion.com/films/%s.*?">(.*?)' % id) result = find_re(result, "src=\"(.*?)\"") if result: data["posters"] = [result.replace("_w100", "")] else: data["posters"] = [] data['posters'] = [re.sub('(\?\d+)$', '', p) for p in data['posters']] + data['posters'] = [p for p in data['posters'] if p] + + posters = find_re(html, '
    (.*?)
    ') + for poster in re.compile('\"Film>> get_video_url('http://www.dailymotion.com/relevance/search/priere%2Bpour%2Brefuznik/video/x3opar_priere-pour-refuznik-1-jeanluc-goda_shortfilms').split('?auth')[0] - 'http://www.dailymotion.com/cdn/FLV-320x240/video/x3opar_priere-pour-refuznik-1-jean-luc-god_shortfilms.flv' - - >>> get_video_url('http://www.dailymotion.com/relevance/search/priere%2Bpour%2Brefuznik/video/x3ou94_priere-pour-refuznik-2-jeanluc-goda_shortfilms').split('?auth')[0] - 'http://www.dailymotion.com/cdn/FLV-320x240/video/x3ou94_priere-pour-refuznik-2-jean-luc-god_shortfilms.flv' - ''' - data = read_url(url) - video = re.compile('''video", "(.*?)"''').findall(data) - for v in video: - v = unquote(v).split('@@')[0] - return v - return '' +# -*- coding: utf-8 -*- +# vi:si:et:sw=4:sts=4:ts=4 +import re +from six.moves.urllib.parse import unquote +from ox.cache import read_url + + +def get_video_url(url): + ''' + >>> get_video_url('http://www.dailymotion.com/relevance/search/priere%2Bpour%2Brefuznik/video/x3opar_priere-pour-refuznik-1-jeanluc-goda_shortfilms').split('?auth')[0] + 'http://www.dailymotion.com/cdn/FLV-320x240/video/x3opar_priere-pour-refuznik-1-jean-luc-god_shortfilms.flv' + + >>> get_video_url('http://www.dailymotion.com/relevance/search/priere%2Bpour%2Brefuznik/video/x3ou94_priere-pour-refuznik-2-jeanluc-goda_shortfilms').split('?auth')[0] + 'http://www.dailymotion.com/cdn/FLV-320x240/video/x3ou94_priere-pour-refuznik-2-jean-luc-god_shortfilms.flv' + ''' + data = read_url(url) + video = re.compile('''video", "(.*?)"''').findall(data) + for v in video: + v = unquote(v).split('@@')[0] + return v + return '' diff --git a/Shared/lib/python3.4/site-packages/ox/web/duckduckgo.py b/Shared/lib/python3.4/site-packages/ox/web/duckduckgo.py index a8f7869..b4b3494 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/duckduckgo.py +++ b/Shared/lib/python3.4/site-packages/ox/web/duckduckgo.py @@ -6,17 +6,25 @@ from six.moves import urllib import ox from ox import strip_tags, decode_html from ox.cache import read_url +import lxml.html def find(query, timeout=ox.cache.cache_timeout): + """ + Returns tuples with title, url, description + """ if not isinstance(query, bytes): query = query.encode('utf-8') params = urllib.parse.urlencode({'q': query}) url = 'http://duckduckgo.com/html/?' + params data = read_url(url, timeout=timeout).decode('utf-8') + doc = lxml.html.document_fromstring(data) results = [] - regex = '(.*?).*?
    (.*?)
    ' - for r in re.compile(regex, re.DOTALL).findall(data): - results.append((strip_tags(decode_html(r[1])), r[0], strip_tags(decode_html(r[2])))) + for e in doc.xpath("//a[contains(@class, 'result__a')]"): + url = e.attrib['href'] + if 'uddg=' in url: + url = urllib.parse.unquote(url.split('&uddg=')[-1]) + title = e.text_content() + description = '' + results.append((title, url, description)) return results - diff --git a/Shared/lib/python3.4/site-packages/ox/web/epguides.py b/Shared/lib/python3.4/site-packages/ox/web/epguides.py index bb0e551..65670e7 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/epguides.py +++ b/Shared/lib/python3.4/site-packages/ox/web/epguides.py @@ -7,7 +7,7 @@ import time from ox import strip_tags, find_re from ox.cache import read_url -import google +from . import google def get_show_url(title): diff --git a/Shared/lib/python3.4/site-packages/ox/web/google.py b/Shared/lib/python3.4/site-packages/ox/web/google.py index fc1f420..72aa32f 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/google.py +++ b/Shared/lib/python3.4/site-packages/ox/web/google.py @@ -21,11 +21,11 @@ def find(query, max_results=DEFAULT_MAX_RESULTS, timeout=DEFAULT_TIMEOUT): """ Return max_results tuples with title, url, description - >>> find("The Matrix site:imdb.com", 1)[0][0] - u'The Matrix (1999) - IMDb' + >>> str(find("The Matrix site:imdb.com", 1)[0][0]) + 'The Matrix (1999) - IMDb' - >>> find("The Matrix site:imdb.com", 1)[0][1] - u'http://www.imdb.com/title/tt0133093/' + >>> str(find("The Matrix site:imdb.com", 1)[0][1]) + 'http://www.imdb.com/title/tt0133093/' """ results = [] offset = 0 diff --git a/Shared/lib/python3.4/site-packages/ox/web/imdb.py b/Shared/lib/python3.4/site-packages/ox/web/imdb.py index cc0cc48..c810b80 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/imdb.py +++ b/Shared/lib/python3.4/site-packages/ox/web/imdb.py @@ -7,7 +7,7 @@ import time import unicodedata from six.moves.urllib.parse import urlencode -from six import string_types +from six import text_type, string_types from .. import find_re, strip_tags, decode_html from .. import cache @@ -18,22 +18,95 @@ from . import duckduckgo from ..utils import datetime from ..geo import normalize_country_name -def read_url(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cache_timeout, valid=None, unicode=False): + +def prepare_url(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cache_timeout, valid=None, unicode=False): headers = headers.copy() + # https://webapps.stackexchange.com/questions/11003/how-can-i-disable-reconfigure-imdbs-automatic-geo-location-so-it-does-not-defau + headers['X-Forwarded-For'] = '72.21.206.80' + return url, data, headers, timeout, unicode + +def read_url(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cache_timeout, valid=None, unicode=False): + url, data, headers, timeout, unicode = prepare_url(url, data, headers, timeout, valid, unicode) return cache.read_url(url, data, headers, timeout, unicode=unicode) +def delete_url(url, data=None, headers=cache.DEFAULT_HEADERS): + url, data, headers, timeout, unicode = prepare_url(url, data, headers) + cache.store.delete(url, data, headers) + def get_url(id): return "http://www.imdb.com/title/tt%s/" % id + +def reference_section(id): + return { + 'page': 'reference', + 're': [ + '

    '.format(id=id), + '' + label + '.*?', + '(.*?)' + ], + 'type': 'list', + } + if more: + conditions['re'] += more + return conditions + +def zebra_table(label, more=None, type='string'): + conditions = { + 'page': 'reference', + 're': [ + '_label">' + label + '.*?(.*?)', + ], + 'type': type, + } + if more: + conditions['re'] += more + return conditions + +def parse_aspectratio(value): + r = value + if ':' in value: + r = value.split(':') + n = r[0] + d = r[1].strip().split(' ')[0] + try: + if float(d): + value = str(float(n) / float(d)) + else: + value = str(float(n)) + except: + print('failed to parse aspect: %s' % value) + else: + value = '.'.join(value.strip().split('.')[:2]) + return value + +''' +'posterIds': { + 'page': 'posters', + 're': '/unknown-thumbnail/media/rm(.*?)/tt', + 'type': 'list' +}, +''' + class Imdb(SiteParser): ''' - >>> Imdb('0068646')['title'] - u'The Godfather' + >>> Imdb('0068646')['title'] == text_type(u'The Godfather') + True - >>> Imdb('0133093')['title'] - u'The Matrix' + >>> Imdb('0133093')['title'] == text_type(u'The Matrix') + True ''' - regex = { + regex = { 'alternativeTitles': { 'page': 'releaseinfo', 're': [ @@ -41,98 +114,49 @@ class Imdb(SiteParser): "td>(.*?).*?(.*?)" ], 'type': 'list' - }, 'aspectratio': { - 'page': 'combined', - 're': 'Aspect Ratio:

    ([\d\.]+)', + 'page': 'reference', + 're': [ + 'Aspect Ratio.*?ipl-inline-list__item">\s+([\d\.\:\ ]+)', + parse_aspectratio, + ], 'type': 'float', }, - 'budget': { - 'page': 'business', - 're': [ - '
    Budget
    \s*?\$(.*?).*?>(.*?)
    .*?(.*?)', - lambda ll: [strip_tags(l) for l in ll] - ], - 'type': 'list' - }, - 'cinematographer': { - 'page': 'combined', - 're': [ - lambda data: data.split('Series Crew')[0], - 'Cinematography by(.*?)', - '(.*?)' + ' (.*?)
    ', + '.*?>(.*?).*?(.*?)', + lambda ll: [strip_tags(l) for l in ll] if isinstance(ll, list) else strip_tags(ll) ], 'type': 'list' }, + 'cinematographer': reference_section('cinematographers'), 'connections': { 'page': 'movieconnections', 're': '

    (.*?)

    (.*?)(<\/div>\n
    Country:
    .*?
    ', - #'(.*?)', #links changed to work with existing caches, just take all links - '(.*?)', - ], - 'type': 'list' - }, + 'country': zebra_list('Country', more=['(.*?)']), 'creator': { - 'page': 'combined', + 'page': '', 're': [ - '
    Creator.?:
    .*?
    (.*?)
    ', - '(.*?)' - ], - 'type': 'list' - }, - '_director': { - 'page': 'combined', - 're': [ - '
    Director:
    .*?
    (.*?)
    ', - '(.*?)' - ], - 'type': 'list' - }, - 'composer': { - 'page': 'combined', - 're': [ - lambda data: data.split('Series Crew')[0], - 'Original Music by(.*?)', - '.*?Creator.?:(.*?)
    ', + '.*?(.*?)', + 'page': 'reference', + 're': '

    (.*?)<', 'type': 'string' }, 'filmingLocations': { @@ -143,71 +167,44 @@ class Imdb(SiteParser): ], 'type': 'list' }, - 'genre': { - 'page': 'combined', - 're': [ - '

    Genre:
    (.*?)(.*?)
    ' - ], - 'type': 'list' - }, - 'gross': { - 'page': 'business', - 're': [ - '
    Gross
    \s*?\$(.*?)(.*?)']), + 'gross': zebra_table('Cumulative Worldwide Gross', more=[ + lambda data: find_re(decode_html(data).replace(',', ''), '\d+') + ], type='int'), 'keyword': { 'page': 'keywords', 're': '
    Language:
    .*?
    ', - #'(.*?)', #links changed to work with existing caches, just take all links - '(.*?)', - ], - 'type': 'list' - }, - 'summary': { - 'page': 'plotsummary', - 're': '

    (.*?)<\/p>', + 'language': zebra_list('Language', more=['(.*?)']), + 'originalTitle': { + 'page': 'releaseinfo', + 're': '\(original title\)\s*(.*?)', 'type': 'string' }, + 'summary': zebra_table('Plot Summary', more=[ + '

    (.*?)', 'type': 'string' }, - 'posterIds': { - 'page': 'posters', - 're': '/unknown-thumbnail/media/rm(.*?)/tt', - 'type': 'list' - }, - 'producer': { - 'page': 'combined', - 're': [ - lambda data: data.split('Series Crew')[0], - 'Produced by(.*?)', - '(.*?)' - ], - 'type': 'list' - }, + 'producer': reference_section('producers'), 'productionCompany': { - 'page': 'combined', + 'page': 'reference', 're': [ - 'Production Companies

      (.*?)
    ', + 'Production Companies.*?', '(.*?)' ], 'type': 'list' }, 'rating': { - 'page': 'combined', - 're': '
    .*?([\d,.]+?)/10', + 'page': 'reference', + 're': [ + '
    (.*?)
    ', + 'ipl-rating-star__rating">([\d,.]+?)', + ], 'type': 'float' }, 'releasedate': { @@ -218,64 +215,55 @@ class Imdb(SiteParser): ], 'type': 'list' }, - 'reviews': { - 'page': 'externalreviews', - 're': [ - '
      (.*?)
    ', - '
  • (.*?)
  • ' - ], - 'type': 'list' - }, - 'runtime': { - 'page': 'combined', - 're': '
    Runtime:
    .*?([0-9]+ sec|[0-9]+ min).*?
    ', - 'type': 'string' - }, - 'color': { - 'page': 'combined', - 're': [ - '
    Color:
    (.*?)
    ', - '(.*?)' - ], - 'type': 'list' - }, - 'sound': { - 'page': 'combined', - 're': [ - '
    Sound Mix:
    (.*?)
    ', - '(.*?)' - ], - 'type': 'list' - }, + #FIXME using some /offsite/ redirect now + #'reviews': { + # 'page': 'externalreviews', + # 're': [ + # '
      (.*?)
    ', + # '
  • .*?(.*?).*?
  • ' + # ], + # 'type': 'list' + #}, + 'runtime': zebra_list('Runtime'), + 'color': zebra_list('Color', more=[ + '([^(<]+)', + lambda r: r[0] if isinstance(r, list) else r, + strip_tags + ]), + 'sound': zebra_list('Sound Mix', more=[ + '([^(<]+)', + lambda r: r[0] if isinstance(r, list) else r, + strip_tags + ]), 'season': { - 'page': 'combined', + 'page': 'reference', 're': [ - '
    Original Air Date:
    .*?
    (.*?)
    ', - '\(Season (\d+), Episode \d+\)', + '
      (.*?)
    ', + 'Season (\d+)', ], 'type': 'int' }, 'episode': { - 'page': 'combined', + 'page': 'reference', 're': [ - '
    Original Air Date:
    .*?
    (.*?)
    ', - '\(Season \d+, Episode (\d+)\)', + '
      (.*?)
    ', + 'Episode (\d+)', ], 'type': 'int' }, 'series': { - 'page': 'combined', - 're': '
    TV Series:
    .*?.*?(TV series|TV mini-series) ', + 'page': 'reference', + 're': 'property=\'og:title\'.*?content=".*?(TV series|TV mini-series).*?"', 'type': 'string' }, 'title': { - 'page': 'combined', - 're': '

    (.*?) ', + 'page': 'releaseinfo', + 're': 'h3 itemprop="name">.*?>(.*?)', 'type': 'string' }, 'trivia': { @@ -287,38 +275,45 @@ class Imdb(SiteParser): 'type': 'list', }, 'votes': { - 'page': 'combined', - 're': '([\d,]*?) votes', + 'page': 'reference', + 're': [ + 'class="ipl-rating-star__total-votes">\((.*?)\)', + lambda r: r.replace(',', '') + ], 'type': 'string' }, - 'writer': { - 'page': 'combined', + 'writer': reference_section('writers'), + 'year': { + 'page': 'reference', 're': [ - lambda data: data.split('Series Crew')[0], - 'Writing credits(.*?)', - '(.*?)' + '(.*?)', + '(\d+)', + ], + 'type': 'int' + }, + 'credits': { + 'page': 'fullcredits', + 're': [ + lambda data: data.split('(.*?)

    .*?()', + lambda data: [d for d in data if d] ], 'type': 'list' }, - 'year': { - 'page': 'combined', - 're': '="og:title" content="[^"]*?\((\d{4}).*?"', - 'type': 'int' - } } def read_url(self, url, timeout): - if not url in self._cache: + if url not in self._cache: self._cache[url] = read_url(url, timeout=timeout, unicode=True) return self._cache[url] def __init__(self, id, timeout=-1): - #use akas.imdb.com to always get original title: - #http://www.imdb.com/help/show_leaf?titlelanguagedisplay - self.baseUrl = "http://akas.imdb.com/title/tt%s/" % id + # use akas.imdb.com to always get original title: + # http://www.imdb.com/help/show_leaf?titlelanguagedisplay + self.baseUrl = "http://www.imdb.com/title/tt%s/" % id super(Imdb, self).__init__(timeout) - - url = self.baseUrl + 'combined' + + url = self.baseUrl + 'reference' page = self.read_url(url, timeout=-1) if 'IMDb: Page not found' in page \ or 'The requested URL was not found on our server.' in page: @@ -332,119 +327,15 @@ class Imdb(SiteParser): isinstance(self['alternativeTitles'][0], string_types): self['alternativeTitles'] = [self['alternativeTitles']] + for key in ('country', 'genre', 'language', 'sound', 'color'): + if key in self: + self[key] = [x[0] if len(x) == 1 and isinstance(x, list) else x for x in self[key]] + self[key] = list(filter(lambda x: x.lower() != 'home', self[key])) + #normalize country names if 'country' in self: self['country'] = [normalize_country_name(c) or c for c in self['country']] - if 'sound' in self: - self['sound'] = list(set(self['sound'])) - - types = {} - stop_words = [ - 'alternative spelling', - 'alternative title', - 'alternative transliteration', - 'closing credits title', - 'complete title', - 'IMAX version', - 'informal short title', - 'International (Spanish title)', - 'Japan (imdb display title)', - 'longer version', - 'new title', - 'original subtitled version', - 'pre-release title', - 'promotional abbreviation', - 'recut version', - 'reissue title', - 'restored version', - 'script title', - 'short title', - '(subtitle)', - 'TV title', - 'working title', - 'World-wide (Spanish title)', - ] - #ignore english japanese titles - #for movies that are not only from japan - if ['Japan'] != self.get('country', []): - stop_words += [ - 'Japan (English title)' - ] - for t in self.get('alternativeTitles', []): - for type in t[0].split('/'): - type = type.strip() - stop_word = False - for key in stop_words: - if key in type: - stop_word = True - break - if not stop_word: - if not type in types: - types[type] = [] - types[type].append(t[1]) - titles = {} - for type in types: - for title in types[type]: - if not title in titles: - titles[title] = [] - titles[title].append(type) - def select_title(type): - title = types[type][0] - count = 0 - if len(types[type]) > 1: - for t in types[type]: - if len(titles[t]) > count: - count = len(titles[t]) - title = t - return title - - #FIXME: does work in python2.6, possible to import from __future__? - #types = {type: select_title(type) for type in types} - _types = {} - for type in types: - _types[type] = select_title(type) - types = _types - - regexps = [ - "^.+ \(imdb display title\) \(English title\)$", - "^USA \(imdb display title\)$", - "^International \(English title\)$", - "^International \(English title\)$", - "^UK \(imdb display title\)$", - "^International \(.+\) \(English title\)$", - "^World-wide \(English title\)$", - ] - if 'Hong Kong' in self.get('country', []): - regexps += [ - "Hong Kong \(English title\)" - ] - english_countries = ( - 'USA', 'UK', 'United States', 'United Kingdom', - 'Australia', 'New Zealand' - ) - if not filter(lambda c: c in english_countries, self.get('country', [])): - regexps += [ - "^[^(]+ \(English title\)$", - "^.+ \(.+\) \(English title\)$", - "^USA$", - "^UK$", - "^USA \(.+\)$", - "^UK \(.+\)$", - "^Australia \(.+\)$", - "World-wide \(English title\)", - "\(literal English title\)", - "^International \(.+ title\)$", - "^International \(.+\) \(.+ title\)$", - ] - for regexp in regexps: - for type in types: - if re.compile(regexp).findall(type): - #print types[type], type - self['internationalTitle'] = types[type] - break - if 'internationalTitle' in self: - break def cleanup_title(title): if title.startswith('"') and title.endswith('"'): @@ -454,44 +345,43 @@ class Imdb(SiteParser): title = re.sub('\(\#[.\d]+\)', '', title) return title.strip() - for t in ('title', 'internationalTitle'): + for t in ('title', 'originalTitle'): if t in self: self[t] = cleanup_title(self[t]) - if 'internationalTitle' in self and \ - self.get('title', '').lower() == self['internationalTitle'].lower(): - del self['internationalTitle'] - if 'alternativeTitles' in self: alt = {} for t in self['alternativeTitles']: title = cleanup_title(t[1]) - if title not in (self.get('title'), self.get('internationalTitle')): + if title.lower() not in (self.get('title', '').lower(), self.get('originalTitle', '').lower()): if title not in alt: alt[title] = [] for c in t[0].split('/'): - if not '(working title)' in c: - c = c.replace('International', '').replace('World-wide', '').split('(')[0].strip() - if c: - alt[title].append(c) + for cleanup in ('International', '(working title)', 'World-wide'): + c = c.replace(cleanup, '') + c = c.split('(')[0].strip() + if c: + alt[title].append(c) self['alternativeTitles'] = [] for t in sorted(alt, key=lambda a: sorted(alt[a])): - countries = sorted([normalize_country_name(c) or c for c in alt[t]]) + countries = sorted(set([normalize_country_name(c) or c for c in alt[t]])) self['alternativeTitles'].append((t, countries)) if not self['alternativeTitles']: del self['alternativeTitles'] - if 'internationalTitle' in self: - self['originalTitle'] = self['title'] - self['title'] = self.pop('internationalTitle') - if 'runtime' in self and self['runtime']: - if 'min' in self['runtime']: base=60 - else: base=1 + if isinstance(self['runtime'], list): + self['runtime'] = self['runtime'][0] + if 'min' in self['runtime']: + base = 60 + else: + base = 1 self['runtime'] = int(find_re(self['runtime'], '([0-9]+)')) * base if 'runtime' in self and not self['runtime']: del self['runtime'] - if 'votes' in self: self['votes'] = self['votes'].replace(',', '') + + if 'sound' in self: + self['sound'] = list(sorted(set(self['sound']))) if 'cast' in self: if isinstance(self['cast'][0], string_types): @@ -499,6 +389,7 @@ class Imdb(SiteParser): self['actor'] = [c[0] for c in self['cast']] def cleanup_character(c): c = c.replace('(uncredited)', '').strip() + c = re.sub('\s+', ' ', c) return c self['cast'] = [{'actor': x[0], 'character': cleanup_character(x[1])} for x in self['cast']] @@ -522,18 +413,8 @@ class Imdb(SiteParser): return r cc[rel] = list(map(get_conn, re.compile('(.*?)(.*?)<\/div', re.DOTALL).findall(data))) - self['connections'] = cc - for key in ('country', 'genre'): - if key in self: - self[key] = list(filter(lambda x: x.lower() != 'home', self[key])) - #0092999 - if '_director' in self: - if 'series' in self or 'isSeries' in self: - self['creator'] = self.pop('_director') - else: - del self['_director'] if 'isSeries' in self: del self['isSeries'] self['isSeries'] = True @@ -555,7 +436,7 @@ class Imdb(SiteParser): if 'director' in self: self['episodeDirector'] = self['director'] - if not 'creator' in series and 'director' in series: + if 'creator' not in series and 'director' in series: series['creator'] = series['director'] if len(series['creator']) > 10: series['creator'] = series['director'][:1] @@ -566,7 +447,7 @@ class Imdb(SiteParser): if 'year' in series: self['seriesYear'] = series['year'] - if not 'year' in self: + if 'year' not in self: self['year'] = series['year'] if 'year' in self: @@ -620,11 +501,48 @@ class Imdb(SiteParser): self['summary'] = self['summary'][0] self['summary'] = self['summary'].split('(.*?).*?(.*?).*?(.*?)', re.DOTALL).findall(d[1]) + ] + ] for d in self['credits'] if d + ] + credits = [c for c in credits if c[1]] + + self['credits'] = [] + self['lyricist'] = [] + self['singer'] = [] + for department, crew in credits: + department = department.replace('(in alphabetical order)', '').strip() + for c in crew: + name = c[0] + roles = c[1] + self['credits'].append({ + 'name': name, + 'roles': roles, + 'deparment': department + }) + if department == 'Music Department': + if 'lyricist' in roles: + self['lyricist'].append(name) + if 'playback singer' in roles: + self['singer'].append(name) + if not self['credits']: + del self['credits'] + class ImdbCombined(Imdb): def __init__(self, id, timeout=-1): _regex = {} for key in self.regex: - if self.regex[key]['page'] in ('combined', 'releaseinfo'): + if self.regex[key]['page'] in ('releaseinfo', 'reference'): _regex[key] = self.regex[key] self.regex = _regex super(ImdbCombined, self).__init__(id, timeout) @@ -640,25 +558,25 @@ def get_movie_by_title(title, timeout=-1): If there is more than one film with that title for the year Title (Year/I) - >>> get_movie_by_title(u'"Father Knows Best" (1954) {(#5.34)}') - u'1602860' + >>> str(get_movie_by_title(u'"Father Knows Best" (1954) {(#5.34)}')) + '1602860' - >>> get_movie_by_title(u'The Matrix (1999)') - u'0133093' + >>> str(get_movie_by_title(u'The Matrix (1999)')) + '0133093' - >>> get_movie_by_title(u'Little Egypt (1951)') - u'0043748' + >>> str(get_movie_by_title(u'Little Egypt (1951)')) + '0043748' + + >>> str(get_movie_by_title(u'Little Egypt (1897/I)')) + '0214882' - >>> get_movie_by_title(u'Little Egypt (1897/I)') - u'0214882' - >>> get_movie_by_title(u'Little Egypt') None - >>> get_movie_by_title(u'"Dexter" (2006) {Father Knows Best (#1.9)}') - u'0866567' + >>> str(get_movie_by_title(u'"Dexter" (2006) {Father Knows Best (#1.9)}')) + '0866567' ''' - params = {'s':'tt','q': title} + params = {'s': 'tt', 'q': title} if not isinstance(title, bytes): try: params['q'] = unicodedata.normalize('NFKC', params['q']).encode('latin-1') @@ -676,20 +594,21 @@ def get_movie_by_title(title, timeout=-1): def get_movie_id(title, director='', year='', timeout=-1): ''' - >>> get_movie_id('The Matrix') - u'0133093' + >>> str(get_movie_id('The Matrix')) + '0133093' - >>> get_movie_id('2 or 3 Things I Know About Her', 'Jean-Luc Godard') - u'0060304' + >>> str(get_movie_id('2 or 3 Things I Know About Her', 'Jean-Luc Godard')) + '0060304' - >>> get_movie_id('2 or 3 Things I Know About Her', 'Jean-Luc Godard', '1967') - u'0060304' + >>> str(get_movie_id('2 or 3 Things I Know About Her', 'Jean-Luc Godard', '1967')) + '0060304' - >>> get_movie_id(u"Histoire(s) du cinema: Le controle de l'univers", 'Jean-Luc Godard') - u'0179214' + >>> str(get_movie_id(u"Histoire(s) du cinema: Le controle de l'univers", u'Jean-Luc Godard')) + '0179214' + + >>> str(get_movie_id(u"Histoire(s) du cinéma: Le contrôle de l'univers", u'Jean-Luc Godard')) + '0179214' - >>> get_movie_id(u"Histoire(s) du cinéma: Le contrôle de l'univers", 'Jean-Luc Godard') - u'0179214' ''' imdbId = { (u'Le jour se l\xe8ve', u'Marcel Carn\xe9'): '0031514', @@ -729,7 +648,7 @@ def get_movie_id(title, director='', year='', timeout=-1): }.get((title, director), None) if imdbId: return imdbId - params = {'s':'tt','q': title} + params = {'s': 'tt', 'q': title} if director: params['q'] = u'"%s" %s' % (title, director) if year: @@ -756,8 +675,8 @@ def get_movie_id(title, director='', year='', timeout=-1): if results: return results[0] - #print (title, director), ": ''," - #print google_query + #print((title, director), ": '',") + #print(google_query) #results = google.find(google_query, timeout=timeout) results = duckduckgo.find(google_query, timeout=timeout) if results: @@ -772,15 +691,12 @@ def get_movie_poster(imdbId): ''' >>> get_movie_poster('0133093') 'http://ia.media-imdb.com/images/M/MV5BMjEzNjg1NTg2NV5BMl5BanBnXkFtZTYwNjY3MzQ5._V1._SX338_SY475_.jpg' - - >>> get_movie_poster('0994352') - 'http://ia.media-imdb.com/images/M/MV5BMjA3NzMyMzU1MV5BMl5BanBnXkFtZTcwNjc1ODUwMg@@._V1._SX594_SY755_.jpg' ''' info = ImdbCombined(imdbId) if 'posterId' in info: - url = "http://www.imdb.com/media/rm%s/tt%s" % (info['posterId'], imdbId) - data = read_url(url).decode('utf-8', 'ignore') - poster = find_re(data, 'img.*?id="primary-img".*?src="(.*?)"') + poster = info['posterId'] + if '@._V' in poster: + poster = poster.split('@._V')[0] + '@.jpg' return poster elif 'series' in info: return get_movie_poster(info['series']) @@ -793,7 +709,7 @@ def get_episodes(imdbId, season=None): url += '?season=%d' % season data = cache.read_url(url) for e in re.compile('
    .*?
    S(\d+), Ep(\d+)<\/div>\n<\/div>', re.DOTALL).findall(data): - episodes['S%02dE%02d' %(int(e[1]), int(e[2]))] = e[0] + episodes['S%02dE%02d' % (int(e[1]), int(e[2]))] = e[0] else: data = cache.read_url(url) match = re.compile('Season (\d+)').findall(data) @@ -804,9 +720,11 @@ def get_episodes(imdbId, season=None): def max_votes(): url = 'http://www.imdb.com/search/title?num_votes=500000,&sort=num_votes,desc' - data = cache.read_url(url) - votes = max([int(v.replace(',', '')) - for v in re.compile('([\d,]+)').findall(data)]) + data = cache.read_url(url).decode('utf-8', 'ignore') + votes = max([ + int(v.replace(',', '')) + for v in re.compile('>> get_data('1991/silence_of_the_lambs')['imdbId'] - u'0102926' + >>> str(get_data('1991/silence_of_the_lambs')['imdbId']) + '0102926' - >>> get_data('1991/silence_of_the_lambs')['posters'][0] - u'http://www.impawards.com/1991/posters/silence_of_the_lambs_ver1.jpg' + >>> str(get_data('1991/silence_of_the_lambs')['posters'][0]) + 'http://www.impawards.com/1991/posters/silence_of_the_lambs_ver1.jpg' - >>> get_data('1991/silence_of_the_lambs')['url'] - u'http://www.impawards.com/1991/silence_of_the_lambs_ver1.html' + >>> str(get_data('1991/silence_of_the_lambs')['url']) + 'http://www.impawards.com/1991/silence_of_the_lambs_ver1.html' ''' data = { 'url': get_url(id) } - html = read_url(data['url'], unicode=True) + html = read_url(data['url']) data['imdbId'] = find_re(html, 'imdb.com/title/tt(\d{7})') if not data['imdbId']: data['imdbId'] = _id_map.get(id, '') @@ -37,16 +45,15 @@ def get_data(id): for result in results: result = result.replace('_xlg.html', '.html') url = 'http://www.impawards.com/%s/%s' % (data['year'], result) - html = read_url(url, unicode=True) + html = read_url(url) result = find_re(html, '', re.DOTALL).findall(html) for result in results: url = 'http://impawards.com/%s' % result ids.append(get_id(url)) return set(ids) - #get all - html = read_url('http://www.impawards.com/archives/latest.html', timeout = 60*60, unicode=True) + # get all + html = read_url('http://www.impawards.com/archives/latest.html', timeout=60*60) pages = int(find_re(html, '')) + 1 for page in range(pages, 0, -1): for id in get_ids(page): - if not id in ids: + if id not in ids: ids.append(id) return ids + def get_url(id): url = u"http://www.impawards.com/%s.html" % id - html = read_url(url, unicode=True) + html = read_url(url) if find_re(html, "No Movie Posters on This Page"): url = u"http://www.impawards.com/%s_ver1.html" % id return url diff --git a/Shared/lib/python3.4/site-packages/ox/web/metacritic.py b/Shared/lib/python3.4/site-packages/ox/web/metacritic.py index 0e43c80..2ecded5 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/metacritic.py +++ b/Shared/lib/python3.4/site-packages/ox/web/metacritic.py @@ -28,22 +28,32 @@ def get_show_url(title): def get_data(url): data = read_url(url, unicode=True) doc = document_fromstring(data) - score = filter(lambda s: s.attrib.get('property') == 'v:average', - doc.xpath('//span[@class="score_value"]')) + score = [s for s in doc.xpath('//span[@class="score_value"]') + if s.attrib.get('property') == 'v:average'] if score: score = int(score[0].text) else: score = -1 - authors = [a.text - for a in doc.xpath('//div[@class="review_content"]//div[@class="author"]//a')] - sources = [d.text - for d in doc.xpath('//div[@class="review_content"]//div[@class="source"]/a')] - reviews = [d.text - for d in doc.xpath('//div[@class="review_content"]//div[@class="review_body"]')] - scores = [int(d.text.strip()) - for d in doc.xpath('//div[@class="review_content"]//div[contains(@class, "critscore")]')] - urls = [a.attrib['href'] - for a in doc.xpath('//div[@class="review_content"]//a[contains(@class, "external")]')] + authors = [ + a.text + for a in doc.xpath('//div[@class="review_content"]//div[@class="author"]//a') + ] + sources = [ + d.text + for d in doc.xpath('//div[@class="review_content"]//div[@class="source"]/a') + ] + reviews = [ + d.text + for d in doc.xpath('//div[@class="review_content"]//div[@class="review_body"]') + ] + scores = [ + int(d.text.strip()) + for d in doc.xpath('//div[@class="review_content"]//div[contains(@class, "critscore")]') + ] + urls = [ + a.attrib['href'] + for a in doc.xpath('//div[@class="review_content"]//a[contains(@class, "external")]') + ] metacritics = [] for i in range(len(authors)): @@ -54,7 +64,7 @@ def get_data(url): 'quote': strip_tags(reviews[i]).strip(), 'score': scores[i], }) - + return { 'critics': metacritics, 'id': get_id(url), diff --git a/Shared/lib/python3.4/site-packages/ox/web/mininova.py b/Shared/lib/python3.4/site-packages/ox/web/mininova.py deleted file mode 100644 index 799390c..0000000 --- a/Shared/lib/python3.4/site-packages/ox/web/mininova.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -# vi:si:et:sw=4:sts=4:ts=4 -from datetime import datetime -import re -import socket -from six.moves.urllib.parse import quote - -from ox.cache import read_url -from ox import find_re, cache, strip_tags, decode_html, get_torrent_info, int_value, normalize_newlines -from ox.normalize import normalize_imdbid -import ox - -from torrent import Torrent - - -def _parse_results_page(data, max_results=10): - results=[] - regexp = '''(.*?)(.*?)(.*?).*?.*?''' - for row in re.compile(regexp, re.DOTALL).findall(data): - torrentDate = row[0] - torrentExtra = row[1] - torrentId = row[2] - torrentTitle = decode_html(row[3]).strip() - torrentLink = "http://www.mininova.org/tor/" + torrentId - privateTracker = 'priv.gif' in torrentExtra - if not privateTracker: - results.append((torrentTitle, torrentLink, '')) - return results - -def find_movie(query=None, imdb=None, max_results=10): - '''search for torrents on mininova - ''' - if imdb: - url = "http://www.mininova.org/imdb/?imdb=%s" % normalize_imdbid(imdb) - else: - url = "http://www.mininova.org/search/%s/seeds" % quote(query) - data = read_url(url, unicode=True) - return _parse_results_page(data, max_results) - -def get_id(mininovaId): - mininovaId = unicode(mininovaId) - d = find_re(mininovaId, "/(\d+)") - if d: - return d - mininovaId = mininovaId.split('/') - if len(mininovaId) == 1: - return mininovaId[0] - else: - return mininovaId[-1] - -def exists(mininovaId): - mininovaId = get_id(mininovaId) - data = ox.net.read_url("http://www.mininova.org/tor/%s" % mininovaId) - if not data or 'Torrent not found...' in data: - return False - if 'tracker of this torrent requires registration.' in data: - return False - return True - -def get_data(mininovaId): - _key_map = { - 'by': u'uploader', - } - mininovaId = get_id(mininovaId) - torrent = dict() - torrent[u'id'] = mininovaId - torrent[u'domain'] = 'mininova.org' - torrent[u'comment_link'] = "http://www.mininova.org/tor/%s" % mininovaId - torrent[u'torrent_link'] = "http://www.mininova.org/get/%s" % mininovaId - torrent[u'details_link'] = "http://www.mininova.org/det/%s" % mininovaId - - data = read_url(torrent['comment_link'], unicode=True) + read_url(torrent['details_link'], unicode=True) - if '

    Torrent not found...

    ' in data: - return None - - for d in re.compile('

    .(.*?):(.*?)

    ', re.DOTALL).findall(data): - key = d[0].lower().strip() - key = _key_map.get(key, key) - value = decode_html(strip_tags(d[1].strip())) - torrent[key] = value - - torrent[u'title'] = find_re(data, '(.*?):.*?') - torrent[u'imdbId'] = find_re(data, 'title/tt(\d{7})') - torrent[u'description'] = find_re(data, '
    (.*?)
    ') - if torrent['description']: - torrent['description'] = normalize_newlines(decode_html(strip_tags(torrent['description']))).strip() - t = read_url(torrent[u'torrent_link']) - torrent[u'torrent_info'] = get_torrent_info(t) - return torrent - -class Mininova(Torrent): - ''' - >>> Mininova('123') - {} - >>> Mininova('1072195')['infohash'] - '72dfa59d2338e4a48c78cec9de25964cddb64104' - ''' - def __init__(self, mininovaId): - self.data = get_data(mininovaId) - if not self.data: - return - Torrent.__init__(self) - ratio = self.data['share ratio'].split(',') - self['seeder'] = -1 - self['leecher'] = -1 - if len(ratio) == 2: - val = int_value(ratio[0].replace(',','').strip()) - if val: - self['seeder'] = int(val) - val = int_value(ratio[1].replace(',','').strip()) - if val: - self['leecher'] = int(val) - val = int_value(self.data['downloads'].replace(',','').strip()) - if val: - self['downloaded'] = int(val) - else: - self['downloaded'] = -1 - published = self.data['added on'] - published = published.split(' +')[0] - self['published'] = datetime.strptime(published, "%a, %d %b %Y %H:%M:%S") - diff --git a/Shared/lib/python3.4/site-packages/ox/web/opensubtitles.py b/Shared/lib/python3.4/site-packages/ox/web/opensubtitles.py index 7684402..2346a7d 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/opensubtitles.py +++ b/Shared/lib/python3.4/site-packages/ox/web/opensubtitles.py @@ -2,12 +2,12 @@ # vi:si:et:sw=4:sts=4:ts=4 import re -import feedparser from ox.cache import read_url from ox import find_re, strip_tags from ox.iso import langCode2To3, langTo3Code def find_subtitles(imdb, parts = 1, language = "eng"): + import feedparser if len(language) == 2: language = langCode2To3(language) elif len(language) != 3: diff --git a/Shared/lib/python3.4/site-packages/ox/web/rottentomatoes.py b/Shared/lib/python3.4/site-packages/ox/web/rottentomatoes.py index fd3265d..605f313 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/rottentomatoes.py +++ b/Shared/lib/python3.4/site-packages/ox/web/rottentomatoes.py @@ -32,7 +32,7 @@ def get_data(url): r['summary'] = get_og(data, 'description') meter = re.compile('(.*?)').findall(data) - meter = filter(lambda m: m[1].isdigit(), meter) + meter = [m for m in meter if m[1].isdigit()] if meter: r['tomatometer'] = meter[0][1] r['rating'] = find_re(data, 'Average Rating: ([\d.]+)/10') diff --git a/Shared/lib/python3.4/site-packages/ox/web/siteparser.py b/Shared/lib/python3.4/site-packages/ox/web/siteparser.py index fa21948..61a79bd 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/siteparser.py +++ b/Shared/lib/python3.4/site-packages/ox/web/siteparser.py @@ -33,7 +33,7 @@ class SiteParser(dict): return "%s%s" % (self.baseUrl, page) def read_url(self, url, timeout): - if not url in self._cache: + if url not in self._cache: self._cache[url] = read_url(url, timeout=timeout, unicode=True) return self._cache[url] diff --git a/Shared/lib/python3.4/site-packages/ox/web/spiegel.py b/Shared/lib/python3.4/site-packages/ox/web/spiegel.py index 8f20b39..455aec8 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/spiegel.py +++ b/Shared/lib/python3.4/site-packages/ox/web/spiegel.py @@ -95,7 +95,7 @@ def format_subsection(string): 'ussports': 'US-Sports', 'wunderbar': 'wunderBAR' } - if subsection.has_key(string): + if string in subsection: return subsection[string].replace(u'\xc3', 'ae') return string[:1].upper() + string[1:] @@ -219,8 +219,8 @@ def archive_news(): else: dMax = days[m] for d in range(dMax, 0, -1): - print('getNews(%d, %d, %d)' % (y, m, d)) - news = getNews(y, m ,d) + print('get_news(%d, %d, %d)' % (y, m, d)) + news = get_news(y, m, d) for new in news: dirname = archivePath + '/' + new['date'][0:4] + '/' + new['date'][5:7] + new['date'][8:10] + '/' + new['date'][11:13] + new['date'][14:16] if not os.path.exists(dirname): @@ -230,7 +230,7 @@ def archive_news(): else: filename = dirname + '/' + new['url'] + '.json' if not os.path.exists(filename) or True: - data = json.dumps(new, ensure_ascii = False) + data = json.dumps(new, ensure_ascii=False) f = open(filename, 'w') f.write(data) f.close() @@ -253,7 +253,7 @@ def archive_news(): string = strings[3] if len(strings) == 6: string += '/' + strings[4] - if not count.has_key(string): + if string not in count: count[string] = {'count': 1, 'string': '%s %s http://www.spiegel.de/%s/0,1518,archiv-%d-%03d,00.html' % (new['date'], new['date'], new['section'].lower(), y, int(datetime(y, m, d).strftime('%j')))} else: count[string] = {'count': count[string]['count'] + 1, 'string': '%s %s' % (new['date'], count[string]['string'][17:])} @@ -269,12 +269,12 @@ if __name__ == '__main__': # spiegel = Spiegel(2008, 8) # print(spiegel.getContents()) # news = News(2001, 9, 10) - # output(news.getNews()) + # output(news.get_news()) ''' x = [] for d in range(10, 30): print('2/%d' % d) - news = getNews(2008, 2, d) + news = get_news(2008, 2, d) for new in news: strings = new['url'].split('/') string = format_section(strings[3]) diff --git a/Shared/lib/python3.4/site-packages/ox/web/startpage.py b/Shared/lib/python3.4/site-packages/ox/web/startpage.py index 1df25a4..ca18437 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/startpage.py +++ b/Shared/lib/python3.4/site-packages/ox/web/startpage.py @@ -21,10 +21,10 @@ def find(query, max_results=DEFAULT_MAX_RESULTS, timeout=DEFAULT_TIMEOUT): Return max_results tuples with title, url, description >>> find("The Matrix site:imdb.com", 1)[0][0] - u'The Matrix (1999) - IMDb' + 'The Matrix (1999) - IMDb' >>> find("The Matrix site:imdb.com", 1)[0][1] - u'http://www.imdb.com/title/tt0133093/' + 'http://www.imdb.com/title/tt0133093/' """ results = [] url = 'https://eu1.startpage.com/do/search?nosteeraway=1&abp=1&language=english&cmd=process_search&query=%s&x=0&y=0&cat=web&engine0=v1all' % quote_plus(query) diff --git a/Shared/lib/python3.4/site-packages/ox/web/thepiratebay.py b/Shared/lib/python3.4/site-packages/ox/web/thepiratebay.py index 125ce7d..e9a6445 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/thepiratebay.py +++ b/Shared/lib/python3.4/site-packages/ox/web/thepiratebay.py @@ -9,11 +9,10 @@ from ox import find_re, cache, strip_tags, decode_html, get_torrent_info, normal from ox.normalize import normalize_imdbid import ox -from torrent import Torrent - cache_timeout = 24*60*60 # cache search only for 24 hours season_episode = re.compile("S..E..", re.IGNORECASE) +baseurl = "https://thepiratebay.org/" def read_url(url, data=None, headers=cache.DEFAULT_HEADERS, timeout=cache.cache_timeout, valid=None, unicode=False): @@ -25,7 +24,7 @@ def find_movies(query=None, imdb=None, max_results=10): if imdb: query = "tt" + normalize_imdbid(imdb) results = [] - next = ["https://thepiratebay.se/search/%s/0/3/200" % quote(query), ] + next = [baseurl + "hsearch/%s/0/3/200" % quote(query), ] page_count = 1 while next and page_count < 4: page_count += 1 @@ -33,12 +32,12 @@ def find_movies(query=None, imdb=None, max_results=10): if not url.startswith('http'): if not url.startswith('/'): url = "/" + url - url = "https://thepiratebay.se" + url + url = baseurl + url data = read_url(url, timeout=cache_timeout, unicode=True) regexp = '''(.*?).*?''' for row in re.compile(regexp, re.DOTALL).findall(data): torrentType = row[0] - torrentLink = "https://thepiratebay.se" + row[1] + torrentLink = baseurl + row[1] torrentTitle = decode_html(row[2]) # 201 = Movies , 202 = Movie DVDR, 205 TV Shows if torrentType in ['201']: @@ -61,7 +60,7 @@ def get_id(piratebayId): def exists(piratebayId): piratebayId = get_id(piratebayId) - return ox.net.exists("https://thepiratebay.se/torrent/%s" % piratebayId) + return ox.net.exists(baseurl + "torrent/%s" % piratebayId) def get_data(piratebayId): _key_map = { @@ -75,7 +74,7 @@ def get_data(piratebayId): torrent = dict() torrent[u'id'] = piratebayId torrent[u'domain'] = 'thepiratebay.org' - torrent[u'comment_link'] = 'https://thepiratebay.se/torrent/%s' % piratebayId + torrent[u'comment_link'] = baseurl + 'torrent/%s' % piratebayId data = read_url(torrent['comment_link'], unicode=True) torrent[u'title'] = find_re(data, '(.*?) \(download torrent\) - TPB') @@ -84,33 +83,15 @@ def get_data(piratebayId): torrent[u'title'] = decode_html(torrent[u'title']).strip() torrent[u'imdbId'] = find_re(data, 'title/tt(\d{7})') title = quote(torrent['title'].encode('utf-8')) - torrent[u'torrent_link']="http://torrents.thepiratebay.org/%s/%s.torrent" % (piratebayId, title) + torrent[u'magent_link']= find_re(data, '"(magnet:.*?)"') + torrent[u'infohash'] = find_re(torrent[u'magent_link'], "btih:(.*?)&") for d in re.compile('dt>(.*?):.*?(.*?)', re.DOTALL).findall(data): key = d[0].lower().strip() key = _key_map.get(key, key) value = decode_html(strip_tags(d[1].strip())) - torrent[key] = value + if not '<' in key: + torrent[key] = value torrent[u'description'] = find_re(data, '
    (.*?)
    ') if torrent[u'description']: torrent['description'] = normalize_newlines(decode_html(strip_tags(torrent['description']))).strip() - t = read_url(torrent[u'torrent_link']) - torrent[u'torrent_info'] = get_torrent_info(t) return torrent - -class Thepiratebay(Torrent): - ''' - >>> Thepiratebay('123') - {} - - >>> Thepiratebay('3951349')['infohash'] - '4e84415d36ed7b54066160c05a0b0f061898d12b' - ''' - def __init__(self, piratebayId): - self.data = get_data(piratebayId) - if not self.data: - return - Torrent.__init__(self) - published = self.data['uploaded'] - published = published.replace(' GMT', '').split(' +')[0] - self['published'] = datetime.strptime(published, "%Y-%m-%d %H:%M:%S") - diff --git a/Shared/lib/python3.4/site-packages/ox/web/torrent.py b/Shared/lib/python3.4/site-packages/ox/web/torrent.py deleted file mode 100644 index 1312075..0000000 --- a/Shared/lib/python3.4/site-packages/ox/web/torrent.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# vi:si:et:sw=4:sts=4:ts=4 -from ox import int_value - - -class Torrent(dict): - ''' - >>> Torrent() - {'files': 1, 'domain': u'', 'subtitle language': u'', 'seeder': -1, 'description': u'', 'language': u'', 'title': u'', 'imdbId': u'', 'downloaded': -1, 'leecher': -1, 'torrent_link': u'', 'torrent_info': {}, 'published': u'', 'announce': '', 'infohash': '', 'id': u'', 'comment_link': u'', 'size': -1} - ''' - _string_keys = ('id', 'title', 'description', 'infohash', 'torrent_link', 'comment_link', - 'imdbId', 'announce', 'domain', 'published', 'language', 'subtitle language') - _int_keys = ('size', 'seeder', 'leecher', 'downloaded', 'files') - _dict_keys = ('torrent_info', ) - _list_keys = () - data = {'torrent_info': {}} - - def __init__(self): - for key in self._string_keys: - self[key] = self.data.get(key, u'') - for key in self._dict_keys: - self[key] = self.data.get(key, {}) - for key in self._list_keys: - self[key] = self.data.get(key, []) - for key in self._int_keys: - value = self.data.get(key, -1) - if not isinstance(value, int): - value = int(int_value(value)) - self[key] = value - self['infohash'] = self.data['torrent_info'].get('hash', '') - self['size'] = self.data['torrent_info'].get('size', -1) - self['announce'] = self.data['torrent_info'].get('announce', '') - if 'files' in self.data['torrent_info']: - self['files'] = len(self.data['torrent_info']['files']) - else: - self['files'] = 1 - diff --git a/Shared/lib/python3.4/site-packages/ox/web/wikipedia.py b/Shared/lib/python3.4/site-packages/ox/web/wikipedia.py index beacdac..cb73758 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/wikipedia.py +++ b/Shared/lib/python3.4/site-packages/ox/web/wikipedia.py @@ -116,7 +116,7 @@ def get_movie_data(wikipedia_url): def get_image_url(name): url = 'http://en.wikipedia.org/wiki/Image:' + name.replace(' ', '%20') - data = read_url(url) + data = read_url(url).decode('utf-8') url = find_re(data, 'href="(http://upload.wikimedia.org/.*?)"') if not url: url = find_re(data, 'href="(//upload.wikimedia.org/.*?)"') @@ -145,7 +145,7 @@ def find(query, max_results=10): url = "http://en.wikipedia.org/w/api.php?" + urllib.parse.urlencode(query) data = read_url(url) if not data: - data = read_url(url, timeout=0) + data = read_url(url, timeout=0) result = json.loads(data.decode('utf-8')) results = [] if result and 'query' in result: diff --git a/Shared/lib/python3.4/site-packages/ox/web/youtube.py b/Shared/lib/python3.4/site-packages/ox/web/youtube.py index 7268598..805f716 100644 --- a/Shared/lib/python3.4/site-packages/ox/web/youtube.py +++ b/Shared/lib/python3.4/site-packages/ox/web/youtube.py @@ -7,7 +7,6 @@ import re from xml.dom.minidom import parseString import json -import feedparser import ox from ox.cache import read_url, cache_timeout @@ -27,15 +26,15 @@ def video_url(youtubeId, format='mp4', timeout=cache_timeout): """ fmt = None if format == '4k': - fmt=38 + fmt = 38 elif format == '1080p': - fmt=37 + fmt = 37 elif format == '720p': - fmt=22 + fmt = 22 elif format == 'mp4': - fmt=18 + fmt = 18 elif format == 'high': - fmt=35 + fmt = 35 elif format == 'webm': streams = videos(youtubeId, 'webm') return streams[max(streams.keys())]['url'] @@ -46,14 +45,14 @@ def video_url(youtubeId, format='mp4', timeout=cache_timeout): def get_video_info(id): eurl = get_url(id) - data = read_url(eurl) + data = read_url(eurl).decode('utf-8') t = re.compile('\W[\'"]?t[\'"]?: ?[\'"](.+?)[\'"]').findall(data) if t: t = t[0] else: raise IOError url = "http://www.youtube.com/get_video_info?&video_id=%s&el=$el&ps=default&eurl=%s&hl=en_US&t=%s" % (id, quote(eurl), quote(t)) - data = read_url(url) + data = read_url(url).decode('utf-8') info = {} for part in data.split('&'): key, value = part.split('=') @@ -61,6 +60,7 @@ def get_video_info(id): return info def find(query, max_results=10, offset=1, orderBy='relevance'): + import feedparser query = quote(query) url = "http://gdata.youtube.com/feeds/api/videos?vq=%s&orderby=%s&start-index=%s&max-results=%s" % (query, orderBy, offset, max_results) data = read_url(url) @@ -104,14 +104,20 @@ def info(id, timeout=cache_timeout): info['license'] = match[0].strip() info['license'] = re.sub('<.+?>', '', info['license']).strip() + subs = subtitles(id, timeout) + if subs: + info['subtitles'] = subs + return info + +def subtitles(id, timeout=cache_timeout): url = "http://www.youtube.com/api/timedtext?hl=en&type=list&tlangs=1&v=%s&asrs=1" % id data = read_url(url, timeout=timeout) xml = parseString(data) languages = [t.getAttribute('lang_code') for t in xml.getElementsByTagName('track')] + subtitles = {} if languages: - info['subtitles'] = {} for language in languages: - url = "http://www.youtube.com/api/timedtext?hl=en&v=%s&type=track&lang=%s&name&kind"%(id, language) + url = "http://www.youtube.com/api/timedtext?hl=en&v=%s&type=track&lang=%s&name&kind" % (id, language) data = read_url(url, timeout=timeout) xml = parseString(data) subs = [] @@ -128,8 +134,8 @@ def info(id, timeout=cache_timeout): 'out': end, 'value': ox.decode_html(text), }) - info['subtitles'][language] = subs - return info + subtitles[language] = subs + return subtitles def videos(id, format=''): stream_type = { @@ -154,7 +160,7 @@ def videos(id, format=''): return streams def playlist(url): - data = read_url(url) + data = read_url(url).decode('utf-8') items = [] for i in list(set(re.compile('=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* + +pip +=== + +The `PyPA recommended`_ tool for installing Python packages. + +.. image:: https://img.shields.io/pypi/v/pip.svg + :target: https://pypi.org/project/pip/ + +.. image:: https://img.shields.io/travis/pypa/pip/master.svg?label=travis-ci + :target: https://travis-ci.org/pypa/pip + +.. image:: https://img.shields.io/appveyor/ci/pypa/pip.svg?label=appveyor-ci + :target: https://ci.appveyor.com/project/pypa/pip/history + +.. image:: https://readthedocs.org/projects/pip/badge/?version=latest + :target: https://pip.pypa.io/en/latest + +* `Installation`_ +* `Documentation`_ +* `Changelog`_ +* `GitHub Page`_ +* `Issue Tracking`_ +* `User mailing list`_ +* `Dev mailing list`_ +* User IRC: #pypa on Freenode. +* Dev IRC: #pypa-dev on Freenode. + +Code of Conduct +--------------- + +Everyone interacting in the pip project's codebases, issue trackers, chat +rooms and mailing lists is expected to follow the `PyPA Code of Conduct`_. + +.. _PyPA recommended: https://packaging.python.org/en/latest/current/ +.. _Installation: https://pip.pypa.io/en/stable/installing.html +.. _Documentation: https://pip.pypa.io/en/stable/ +.. _Changelog: https://pip.pypa.io/en/stable/news.html +.. _GitHub Page: https://github.com/pypa/pip +.. _Issue Tracking: https://github.com/pypa/pip/issues +.. _User mailing list: https://groups.google.com/forum/#!forum/python-virtualenv +.. _Dev mailing list: https://groups.google.com/forum/#!forum/pypa-dev +.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ + + diff --git a/Shared/lib/python3.4/site-packages/pip-18.1.dist-info/RECORD b/Shared/lib/python3.4/site-packages/pip-18.1.dist-info/RECORD new file mode 100644 index 0000000..76ea6e1 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip-18.1.dist-info/RECORD @@ -0,0 +1,172 @@ +../../../bin/pip,sha256=T1rf4yE8EwZwE4dkp4KfcouU1ALgs6uTULDZyYzT9Nc,280 +../../../bin/pip3,sha256=T1rf4yE8EwZwE4dkp4KfcouU1ALgs6uTULDZyYzT9Nc,280 +../../../bin/pip3.7,sha256=T1rf4yE8EwZwE4dkp4KfcouU1ALgs6uTULDZyYzT9Nc,280 +pip-18.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-18.1.dist-info/LICENSE.txt,sha256=ORqHhOMZ2uVDFHfUzJvFBPxdcf2eieHIDxzThV9dfPo,1090 +pip-18.1.dist-info/METADATA,sha256=D7pqBJTuqM9w_HTW91a0XGjLT9vynlBAE4pPCt_W_UE,2588 +pip-18.1.dist-info/RECORD,, +pip-18.1.dist-info/WHEEL,sha256=_wJFdOYk7i3xxT8ElOkUJvOdOvfNGbR9g-bf6UQT6sU,110 +pip-18.1.dist-info/entry_points.txt,sha256=S_zfxY25QtQDVY1BiLAmOKSkkI5llzCKPLiYOSEupsY,98 +pip-18.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip/__init__.py,sha256=nO-iphoXiDoci_ZAMl-PG2zdd4Y7m88jBDILTYzwGy4,21 +pip/__main__.py,sha256=L3IHqBeasELUHvwy5CT_izVEMhM12tve289qut49DvU,623 +pip/__pycache__/__init__.cpython-37.pyc,, +pip/__pycache__/__main__.cpython-37.pyc,, +pip/_internal/__init__.py,sha256=b0jSFCCViGhB1RWni35_NMkH3Y-mbZrV648DGMagDjs,2869 +pip/_internal/__pycache__/__init__.cpython-37.pyc,, +pip/_internal/__pycache__/build_env.cpython-37.pyc,, +pip/_internal/__pycache__/cache.cpython-37.pyc,, +pip/_internal/__pycache__/configuration.cpython-37.pyc,, +pip/_internal/__pycache__/download.cpython-37.pyc,, +pip/_internal/__pycache__/exceptions.cpython-37.pyc,, +pip/_internal/__pycache__/index.cpython-37.pyc,, +pip/_internal/__pycache__/locations.cpython-37.pyc,, +pip/_internal/__pycache__/pep425tags.cpython-37.pyc,, +pip/_internal/__pycache__/pyproject.cpython-37.pyc,, +pip/_internal/__pycache__/resolve.cpython-37.pyc,, +pip/_internal/__pycache__/wheel.cpython-37.pyc,, +pip/_internal/build_env.py,sha256=zKhqmDMnrX5OTSNQ4xBw-mN5mTGVu6wjiNFW-ajWYEI,4797 +pip/_internal/cache.py,sha256=96_aKtDbwgLEVNgNabOT8GrFCYZEACedoiucqU5ccg8,6829 +pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132 +pip/_internal/cli/__pycache__/__init__.cpython-37.pyc,, +pip/_internal/cli/__pycache__/autocompletion.cpython-37.pyc,, +pip/_internal/cli/__pycache__/base_command.cpython-37.pyc,, +pip/_internal/cli/__pycache__/cmdoptions.cpython-37.pyc,, +pip/_internal/cli/__pycache__/main_parser.cpython-37.pyc,, +pip/_internal/cli/__pycache__/parser.cpython-37.pyc,, +pip/_internal/cli/__pycache__/status_codes.cpython-37.pyc,, +pip/_internal/cli/autocompletion.py,sha256=ptvsMdGjq42pzoY4skABVF43u2xAtLJlXAulPi-A10Y,6083 +pip/_internal/cli/base_command.py,sha256=ke6af4iWzrZoc3HtiPKnCZJvD6GlX8dRwBwpFCg1axc,9963 +pip/_internal/cli/cmdoptions.py,sha256=klAO3AxS0_xoZY_3LwwRjT4TbxtdIwBrmnLJvgG6sGI,19467 +pip/_internal/cli/main_parser.py,sha256=Ga_kT7if-Gg0rmmRqlGEHW6JWVm9zwzO7igJm6RE9EI,2763 +pip/_internal/cli/parser.py,sha256=VZKUKJPbU6I2cHPLDOikin-aCx7OvLcZ3fzYp3xytd8,9378 +pip/_internal/cli/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156 +pip/_internal/commands/__init__.py,sha256=CQAzhVx9ViPtqLNUvAeqnKj5iWfFEcqMx5RlZWjJ30c,2251 +pip/_internal/commands/__pycache__/__init__.cpython-37.pyc,, +pip/_internal/commands/__pycache__/check.cpython-37.pyc,, +pip/_internal/commands/__pycache__/completion.cpython-37.pyc,, +pip/_internal/commands/__pycache__/configuration.cpython-37.pyc,, +pip/_internal/commands/__pycache__/download.cpython-37.pyc,, +pip/_internal/commands/__pycache__/freeze.cpython-37.pyc,, +pip/_internal/commands/__pycache__/hash.cpython-37.pyc,, +pip/_internal/commands/__pycache__/help.cpython-37.pyc,, +pip/_internal/commands/__pycache__/install.cpython-37.pyc,, +pip/_internal/commands/__pycache__/list.cpython-37.pyc,, +pip/_internal/commands/__pycache__/search.cpython-37.pyc,, +pip/_internal/commands/__pycache__/show.cpython-37.pyc,, +pip/_internal/commands/__pycache__/uninstall.cpython-37.pyc,, +pip/_internal/commands/__pycache__/wheel.cpython-37.pyc,, +pip/_internal/commands/check.py,sha256=CyeYH2kfDKSGSURoBfWtx-sTcZZQP-bK170NmKYlmsg,1398 +pip/_internal/commands/completion.py,sha256=hqvCvoxsIHjysiD7olHKTqK2lzE1_lS6LWn69kN5qyI,2929 +pip/_internal/commands/configuration.py,sha256=265HWuUxPggCNcIeWHA3p-LDDiRVnexwFgwmHGgWOHY,7125 +pip/_internal/commands/download.py,sha256=D_iGMp3xX2iD7KZYZAjXlYT3rf3xjwxyYe05KE-DVzE,6514 +pip/_internal/commands/freeze.py,sha256=VvS3G0wrm_9BH3B7Ex5msLL_1UQTtCq5G8dDI63Iemo,3259 +pip/_internal/commands/hash.py,sha256=K1JycsD-rpjqrRcL_ijacY9UKmI82pQcLYq4kCM4Pv0,1681 +pip/_internal/commands/help.py,sha256=MwBhPJpW1Dt3GfJV3V8V6kgAy_pXT0jGrZJB1wCTW-E,1090 +pip/_internal/commands/install.py,sha256=I_zZhkmIbDm_HqLI2WWC9vjXEnd5kNAdQ2k1xtU38zg,21874 +pip/_internal/commands/list.py,sha256=n740MsR0cG34EuvGWMzdVl0uIA3UIYx1_95FUsTktN0,10272 +pip/_internal/commands/search.py,sha256=sLZ9icKMEEGekHvzRRZMiTd1zCFIZeDptyyU1mQCYzk,4728 +pip/_internal/commands/show.py,sha256=9EVh86vY0NZdlhT-wsuV-zq_MAV6qqV4S1Akn3wkUuw,6289 +pip/_internal/commands/uninstall.py,sha256=h0gfPF5jylDESx_IHgF6bZME7QAEOHzQHdn65GP-jrE,2963 +pip/_internal/commands/wheel.py,sha256=ZuVf_DMpKCUzBVstolvQPAeajQRC51Oky5_hDHzhhFs,7020 +pip/_internal/configuration.py,sha256=KMgG3ufFrUKX_QESi2cMVvFi47tl845Bg1ZkNthlWik,13243 +pip/_internal/download.py,sha256=c5Hkimq39eJdZ6DN0_0etjK43-0a5CK_W_3sVLqH87g,33300 +pip/_internal/exceptions.py,sha256=EIGotnq6qM2nbGtnlgZ8Xp5VfP2W4-9UOCzQGMwy5MY,8899 +pip/_internal/index.py,sha256=6CAtZ8QTLcpw0fJqQ9OPu-Os1ettLZtVY1pPSKia8r8,34789 +pip/_internal/locations.py,sha256=ujNrLnA04Y_EmSriO0nS6qkkw_BkPfobB_hdwIDPvpM,6307 +pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63 +pip/_internal/models/__pycache__/__init__.cpython-37.pyc,, +pip/_internal/models/__pycache__/candidate.cpython-37.pyc,, +pip/_internal/models/__pycache__/format_control.cpython-37.pyc,, +pip/_internal/models/__pycache__/index.cpython-37.pyc,, +pip/_internal/models/__pycache__/link.cpython-37.pyc,, +pip/_internal/models/candidate.py,sha256=zq2Vb5l5JflrVX7smHTJHQciZWHyoJZuYTLeQa1G16c,741 +pip/_internal/models/format_control.py,sha256=aDbH4D2XuyaGjtRjTLQhNzClAcLZdJCKSHO8xbZSmFA,2202 +pip/_internal/models/index.py,sha256=YI1WlhWfS9mVPY0bIboA5la2pjJ2J0qgPJIbvdEjZBk,996 +pip/_internal/models/link.py,sha256=E61PvS2Wrmb9-zT-eAc_8_xI3C-89wJlpL8SL-mlQmg,3998 +pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/operations/__pycache__/__init__.cpython-37.pyc,, +pip/_internal/operations/__pycache__/check.cpython-37.pyc,, +pip/_internal/operations/__pycache__/freeze.cpython-37.pyc,, +pip/_internal/operations/__pycache__/prepare.cpython-37.pyc,, +pip/_internal/operations/check.py,sha256=ahcOg5p68nNow6_wy5prYYK0KZq22lm0CsJn8AyDMCI,4937 +pip/_internal/operations/freeze.py,sha256=lskaBcqf3bPZupG032fuLf76QYv5wpAQ6jsiXac56Bg,10450 +pip/_internal/operations/prepare.py,sha256=atoLFj3OD5KfXsa5dYBMC_mI06l068F5yZhF4jle1JA,14280 +pip/_internal/pep425tags.py,sha256=TQhxOPss4RjxgyVgxpSRe31HaTcWmn-LVjWBbkvkjzk,10845 +pip/_internal/pyproject.py,sha256=fpO52MCa3w5xSlXIBXw39BDTGzP8G4570EW34hVvIKQ,5481 +pip/_internal/req/__init__.py,sha256=JnNZWvKUQuqAwHh64LCD3zprzWIVQEXChTo2UGHzVqo,2093 +pip/_internal/req/__pycache__/__init__.cpython-37.pyc,, +pip/_internal/req/__pycache__/constructors.cpython-37.pyc,, +pip/_internal/req/__pycache__/req_file.cpython-37.pyc,, +pip/_internal/req/__pycache__/req_install.cpython-37.pyc,, +pip/_internal/req/__pycache__/req_set.cpython-37.pyc,, +pip/_internal/req/__pycache__/req_tracker.cpython-37.pyc,, +pip/_internal/req/__pycache__/req_uninstall.cpython-37.pyc,, +pip/_internal/req/constructors.py,sha256=97WQp9Svh-Jw3oLZL9_57gJ3zihm5LnWlSRjOwOorDU,9573 +pip/_internal/req/req_file.py,sha256=ORA0GKUjGd6vy7pmBwXR55FFj4h_OxYykFQ6gHuWvt0,11940 +pip/_internal/req/req_install.py,sha256=ry1RtNNCefDHAnf3EeGMpea-9pC6Yk1uHzP0Q5p2Un0,34046 +pip/_internal/req/req_set.py,sha256=nE6oagXJSiQREuuebX3oJO5OHSOVUIlvLLilodetBzc,7264 +pip/_internal/req/req_tracker.py,sha256=zH28YHV5TXAVh1ZOEZi6Z1Edkiu26dN2tXfR6VbQ3B4,2370 +pip/_internal/req/req_uninstall.py,sha256=ORSPah64KOVrKo-InMM3zgS5HQqbl5TLHFnE_Lxstq8,16737 +pip/_internal/resolve.py,sha256=tdepxCewsXXNFKSIYGSxiLvzi1xCv7UVFT9jRCDO90A,13578 +pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pip/_internal/utils/__pycache__/__init__.cpython-37.pyc,, +pip/_internal/utils/__pycache__/appdirs.cpython-37.pyc,, +pip/_internal/utils/__pycache__/compat.cpython-37.pyc,, +pip/_internal/utils/__pycache__/deprecation.cpython-37.pyc,, +pip/_internal/utils/__pycache__/encoding.cpython-37.pyc,, +pip/_internal/utils/__pycache__/filesystem.cpython-37.pyc,, +pip/_internal/utils/__pycache__/glibc.cpython-37.pyc,, +pip/_internal/utils/__pycache__/hashes.cpython-37.pyc,, +pip/_internal/utils/__pycache__/logging.cpython-37.pyc,, +pip/_internal/utils/__pycache__/misc.cpython-37.pyc,, +pip/_internal/utils/__pycache__/models.cpython-37.pyc,, +pip/_internal/utils/__pycache__/outdated.cpython-37.pyc,, +pip/_internal/utils/__pycache__/packaging.cpython-37.pyc,, +pip/_internal/utils/__pycache__/setuptools_build.cpython-37.pyc,, +pip/_internal/utils/__pycache__/temp_dir.cpython-37.pyc,, +pip/_internal/utils/__pycache__/typing.cpython-37.pyc,, +pip/_internal/utils/__pycache__/ui.cpython-37.pyc,, +pip/_internal/utils/appdirs.py,sha256=SPfibHtvOKzD_sHrpEZ60HfLae3GharU4Tg7SB3c-XM,9120 +pip/_internal/utils/compat.py,sha256=LSAvzXcsGY2O2drKIPszR5Ja2G0kup__51l3bx1jR_Q,8015 +pip/_internal/utils/deprecation.py,sha256=yQTe6dyWlBfxSBrOv_MdRXF1RPLER_EWOp-pa2zLoZc,3021 +pip/_internal/utils/encoding.py,sha256=D8tmfStCah6xh9OLhH9mWLr77q4akhg580YHJMKpq3Y,1025 +pip/_internal/utils/filesystem.py,sha256=ZOIHbacJ-SJtuZru4GoA5DuSIYyeaE4G5kfZPf5cn1A,915 +pip/_internal/utils/glibc.py,sha256=prOrsBjmgkDE-hY4Pl120yF5MIlkkmGrFLs8XfIyT-w,3004 +pip/_internal/utils/hashes.py,sha256=rJk-gj6F-sHggXAG97dhynqUHFFgApyZLWgaG2xCHME,2900 +pip/_internal/utils/logging.py,sha256=BQeUDEER3zlK0O4yv6DBfz6TK3f9XoLXyDlnB0mZVf0,6295 +pip/_internal/utils/misc.py,sha256=K5ouAkGO96le5zhngk_hSo7eysD-vMRYMqmkWnEaIFc,30639 +pip/_internal/utils/models.py,sha256=DQYZSRhjvSdDTAaJLLCpDtxAn1S_-v_8nlNjv4T2jwY,1042 +pip/_internal/utils/outdated.py,sha256=BXtCMKR6gjTrvMfP3MWzZ1Y4ZU4qqoCfbRNqQCusVt8,5642 +pip/_internal/utils/packaging.py,sha256=Ru8ls_S8PPKR8RKEn7jMetENY_A9jPet1HlhTZwpFxU,2443 +pip/_internal/utils/setuptools_build.py,sha256=0blfscmNJW_iZ5DcswJeDB_PbtTEjfK9RL1R1WEDW2E,278 +pip/_internal/utils/temp_dir.py,sha256=n2FkVlwRX_hS61fYt3nSAh2e2V6CcZn_dfbPId1pAQE,2615 +pip/_internal/utils/typing.py,sha256=ztYtZAcqjCYDwP-WlF6EiAAskAsZBMMXtuqvfgZIlgQ,1139 +pip/_internal/utils/ui.py,sha256=FW8wdtc7DvNwJClGr_TvGZlqcoO482GYe0UY9nKmpso,13657 +pip/_internal/vcs/__init__.py,sha256=2Ct9ogOwzS6ZKKaEXKN2XDiBOiFHMcejnN1KM21mLrQ,16319 +pip/_internal/vcs/__pycache__/__init__.cpython-37.pyc,, +pip/_internal/vcs/__pycache__/bazaar.cpython-37.pyc,, +pip/_internal/vcs/__pycache__/git.cpython-37.pyc,, +pip/_internal/vcs/__pycache__/mercurial.cpython-37.pyc,, +pip/_internal/vcs/__pycache__/subversion.cpython-37.pyc,, +pip/_internal/vcs/bazaar.py,sha256=rjskVmSSn68O7lC5JrGmDTWXneXFMMJJvj_bbdSM8QA,3669 +pip/_internal/vcs/git.py,sha256=n1cFBqTnLIcxAOClZMgOBqELjEjygDBPZ9z-Q7g0qVQ,12580 +pip/_internal/vcs/mercurial.py,sha256=jVTa0XQpFR6EiBcaqW4E4JjTce_t1tFnKRaIhaIPlS8,3471 +pip/_internal/vcs/subversion.py,sha256=vDLTfcjj0kgqcEsbPBfveC4CRxyhWiOjke-qN0Zr8CE,7676 +pip/_internal/wheel.py,sha256=fg9E936DaI1LyrBPHqtzHG_WEVyuUwipHISkD6N3jNw,32007 +pip/_vendor/__init__.py,sha256=bdhl7DUZ1z7eukZLktoO1vhki9sC576gBWcFgel4684,4890 +pip/_vendor/__pycache__/__init__.cpython-37.pyc,, +pip/_vendor/pep517/__init__.py,sha256=GH4HshnLERtjAjkY0zHoz3f7-35UcIvr27iFWSOUazU,82 +pip/_vendor/pep517/__pycache__/__init__.cpython-37.pyc,, +pip/_vendor/pep517/__pycache__/_in_process.cpython-37.pyc,, +pip/_vendor/pep517/__pycache__/check.cpython-37.pyc,, +pip/_vendor/pep517/__pycache__/colorlog.cpython-37.pyc,, +pip/_vendor/pep517/__pycache__/compat.cpython-37.pyc,, +pip/_vendor/pep517/__pycache__/envbuild.cpython-37.pyc,, +pip/_vendor/pep517/__pycache__/wrappers.cpython-37.pyc,, +pip/_vendor/pep517/_in_process.py,sha256=iWpagFk2GhNBbvl-Ca2RagfD0ALuits4WWSM6nQMTdg,5831 +pip/_vendor/pep517/check.py,sha256=Yp2NHW71DIOCgkFb7HKJOzKmsum_s_OokRP6HnR3bTg,5761 +pip/_vendor/pep517/colorlog.py,sha256=2AJuPI_DHM5T9IDgcTwf0E8suyHAFnfsesogr0AB7RQ,4048 +pip/_vendor/pep517/compat.py,sha256=4SFG4QN-cNj8ebSa0wV0HUtEEQWwmbok2a0uk1gYEOM,631 +pip/_vendor/pep517/envbuild.py,sha256=osRsJVd7hir1w_uFXiVeeWxfJ3iYhwxsKRgNBWpqtCI,5672 +pip/_vendor/pep517/wrappers.py,sha256=RhgWm-MLxpYPgc9cZ3-A3ToN99ZzgM8-ia4FDB58koM,5018 diff --git a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/pip-18.1.dist-info/WHEEL similarity index 70% rename from Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/WHEEL rename to Shared/lib/python3.4/site-packages/pip-18.1.dist-info/WHEEL index 8b6dd1b..c4bde30 100644 --- a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/WHEEL +++ b/Shared/lib/python3.4/site-packages/pip-18.1.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) +Generator: bdist_wheel (0.32.3) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any diff --git a/Shared/lib/python3.4/site-packages/pip-18.1.dist-info/entry_points.txt b/Shared/lib/python3.4/site-packages/pip-18.1.dist-info/entry_points.txt new file mode 100644 index 0000000..f5809cb --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip-18.1.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +pip = pip._internal:main +pip3 = pip._internal:main +pip3.7 = pip._internal:main + diff --git a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/pip-18.1.dist-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/pip-18.1.dist-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/DESCRIPTION.rst deleted file mode 100644 index 39586d2..0000000 --- a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,36 +0,0 @@ -pip -=== - -The `PyPA recommended -`_ -tool for installing Python packages. - -* `Installation `_ -* `Documentation `_ -* `Changelog `_ -* `Github Page `_ -* `Issue Tracking `_ -* `User mailing list `_ -* `Dev mailing list `_ -* User IRC: #pypa on Freenode. -* Dev IRC: #pypa-dev on Freenode. - - -.. image:: https://img.shields.io/pypi/v/pip.svg - :target: https://pypi.python.org/pypi/pip - -.. image:: https://img.shields.io/travis/pypa/pip/develop.svg - :target: http://travis-ci.org/pypa/pip - -.. image:: https://readthedocs.org/projects/pip/badge/?version=stable - :target: https://pip.pypa.io/en/stable - -Code of Conduct ---------------- - -Everyone interacting in the pip project's codebases, issue trackers, chat -rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - diff --git a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/METADATA b/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/METADATA deleted file mode 100644 index 2a5912c..0000000 --- a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/METADATA +++ /dev/null @@ -1,65 +0,0 @@ -Metadata-Version: 2.0 -Name: pip -Version: 8.1.0 -Summary: The PyPA recommended tool for installing Python packages. -Home-page: https://pip.pypa.io/ -Author: The pip developers -Author-email: python-virtualenv@groups.google.com -License: MIT -Keywords: easy_install distutils setuptools egg virtualenv -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Topic :: Software Development :: Build Tools -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: Implementation :: PyPy -Provides-Extra: testing -Requires-Dist: mock; extra == 'testing' -Requires-Dist: pretend; extra == 'testing' -Requires-Dist: pytest; extra == 'testing' -Requires-Dist: scripttest (>=1.3); extra == 'testing' -Requires-Dist: virtualenv (>=1.10); extra == 'testing' - -pip -=== - -The `PyPA recommended -`_ -tool for installing Python packages. - -* `Installation `_ -* `Documentation `_ -* `Changelog `_ -* `Github Page `_ -* `Issue Tracking `_ -* `User mailing list `_ -* `Dev mailing list `_ -* User IRC: #pypa on Freenode. -* Dev IRC: #pypa-dev on Freenode. - - -.. image:: https://img.shields.io/pypi/v/pip.svg - :target: https://pypi.python.org/pypi/pip - -.. image:: https://img.shields.io/travis/pypa/pip/develop.svg - :target: http://travis-ci.org/pypa/pip - -.. image:: https://readthedocs.org/projects/pip/badge/?version=stable - :target: https://pip.pypa.io/en/stable - -Code of Conduct ---------------- - -Everyone interacting in the pip project's codebases, issue trackers, chat -rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - diff --git a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/RECORD deleted file mode 100644 index 5d24ea6..0000000 --- a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/RECORD +++ /dev/null @@ -1,117 +0,0 @@ -pip/__init__.py,sha256=5_Hqv55mmr33A0LuCQpbLFyrUwiJ2kophGibMYYTcyw,10273 -pip/__main__.py,sha256=V6Kh-IEDEFpt1cahRE6MajUF_14qJR_Qsvn4MjWZXzE,584 -pip/basecommand.py,sha256=Zlg6SE42TIjRyt1mct0LCkgNxcKKnss3xvASJyDqucE,11429 -pip/baseparser.py,sha256=Nlc7Un9gat27xtB24SnKL_3pZZOoh62gNNRdS6tDRZY,10465 -pip/cmdoptions.py,sha256=OJhbVR6zQ8kbbGcnv0RTZyvwvFqzKxtmO4lPYymMBKM,15877 -pip/download.py,sha256=oJ3sZ8I6ct9X3eoXQ9xm_Ne0e6N85G_rWaERmMCVF2k,31722 -pip/exceptions.py,sha256=GdDhHOROBj-kW2rgerLJYXsxN8ENy1BX5RUb_Vs9TXM,7980 -pip/index.py,sha256=kpyj_O5c0VVlvhg5VuVm4oAGGh6RvD7Xr0syPN-eGa0,37191 -pip/locations.py,sha256=MqUzS8YI2wDa7oFzTQw4zM4s0Hci05yubxfU_kTXXlU,5632 -pip/pep425tags.py,sha256=4PNr9hd8OsXnKYR2q2oLzfDDhF5bFBwUZA-ZQxAClSI,11318 -pip/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156 -pip/wheel.py,sha256=qg1DgjXtiQCnY-IJY5HC5VgpeQm9WCjDKYmefSfOjq0,32088 -pip/_vendor/__init__.py,sha256=9EPZ-JLxtXMt71Fp5_pKTTe1QbJZZVlN81rsRYEvlpA,4781 -pip/commands/__init__.py,sha256=naZ1iIWRutNznOVpLj8qyn1GPE0B5rhCWCrSUOZSt4M,2145 -pip/commands/completion.py,sha256=2BEUY3jowgemiIGgUP3rpk6A9My4Eu8rTPosFxlESOE,1967 -pip/commands/download.py,sha256=dMRtH0JMBhNGlJWr1qC29vOeiBzG2K0OjOAfzdxSVgA,4804 -pip/commands/freeze.py,sha256=KmQoLf-HruqBDzc-F2-ganGVn2lboNQqppfyrMsx3SU,2774 -pip/commands/hash.py,sha256=MCt4jEFyfoce0lVeNEz1x49uaTY-VDkKiBvvxrVcHkw,1597 -pip/commands/help.py,sha256=84HWkEdnGP_AEBHnn8gJP2Te0XTXRKFoXqXopbOZTNo,982 -pip/commands/install.py,sha256=DvRVVwfUy6LV-AtNcxl9kLl7XOc7G7087ZhdD4QbP60,15628 -pip/commands/list.py,sha256=u76U5TLODQ2g53sSUA4q6WhYus7usbuWuITQJsCnP3E,7412 -pip/commands/search.py,sha256=9ClAcFzkJ_7AksTkNrUed5qzsplpBtMlJByJLqiZFqw,4777 -pip/commands/show.py,sha256=dytBbI9XV-ChpV51tsuBygZJJO-QaO2Gtz5kbLkBCZE,5815 -pip/commands/uninstall.py,sha256=tz8cXz4WdpUdnt3RvpdQwH6_SNMB50egBIZWa1dwfcc,2884 -pip/commands/wheel.py,sha256=iT92Uo8qpVILl_Yk8L7AtkFVYGmY0ep5oDeyQSpwkLs,7528 -pip/compat/__init__.py,sha256=7WN0B0XMYIldfminnT679VoEJLxNQPi9MFwCIt1_llU,4669 -pip/compat/dictconfig.py,sha256=dRrelPDWrceDSzFT51RTEVY2GuM7UDyc5Igh_tn4Fvk,23096 -pip/compat/ordereddict.py,sha256=6RQCd4PyTE4tvLUoAnsygvrreOSTV4BRDbc_4gCSkTs,4110 -pip/models/__init__.py,sha256=0Rs7_RA4DxeOkWT5Cq4CQzDrSEhvYcN3TH2cazr72PE,71 -pip/models/index.py,sha256=pUfbO__v3mD9j-2n_ClwPS8pVyx4l2wIwyvWt8GMCRA,487 -pip/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/operations/freeze.py,sha256=H6xpxe1XgoNm5f3UXK47kNy0OQfM5jzo4UUwQu7G-Lo,4048 -pip/req/__init__.py,sha256=vFwZY8_Vc1WU1zFAespg1My_r_AT3n7cN0W9eX0EFqk,276 -pip/req/req_file.py,sha256=3eaVnPMUAjikLdC5i8hZUAf8aAOby2UxmAVFf94FOXY,11928 -pip/req/req_install.py,sha256=aG0_hj8WqLLUH5tO40OFIncIxU50Vm4rFqYcx5hmoYk,45589 -pip/req/req_set.py,sha256=Xwia1h7o2Z3Qogae3RHIDCGlXS3w2AeQPG8LBz7GmFM,32312 -pip/req/req_uninstall.py,sha256=fdH2VgCjEC8NRYDS7fRu3ZJaBBUEy-N5muwxDX5MBNM,6897 -pip/utils/__init__.py,sha256=SSixMJeh2SdjNgra_50jaC0jdmXFewLkFh_-a3tw9ks,28256 -pip/utils/appdirs.py,sha256=KTpZANfjYw5K2tZ0_jNNdP_kMxQAns79qZWelwaJo0c,7896 -pip/utils/build.py,sha256=4smLRrfSCmXmjEnVnMFh2tBEpNcSLRe6J0ejZJ-wWJE,1312 -pip/utils/deprecation.py,sha256=DR3cKqzovYu9Pif7c9bT2KmwekfW95N3BsI45_5u38I,2239 -pip/utils/encoding.py,sha256=rRSzAWfZTyOM-9u5LqwAVijcmoj2BRRQgP9d2IpEHQM,643 -pip/utils/filesystem.py,sha256=ZEVBuYM3fqr2_lgOESh4Y7fPFszGD474zVm_M3Mb5Tk,899 -pip/utils/hashes.py,sha256=oMk7cd3PbJgzpSQyXq1MytMud5f6H5Oa2YY5hYuCq6I,2866 -pip/utils/logging.py,sha256=7yWu4gZw-Qclj7X80QVdpGWkdTWGKT4LiUVKcE04pro,3327 -pip/utils/outdated.py,sha256=fNwOCL5r2EftPGhgCYGMKu032HC8cV-JAr9lp0HmToM,5455 -pip/utils/setuptools_build.py,sha256=8IGop-SZ6lxUl5HMOjLRaDlORPugIH_b_b2Y67x4jQc,240 -pip/utils/ui.py,sha256=pbDkSAeumZ6jdZcOJ2yAbx8iBgeP2zfpqNnLJK1gskQ,11597 -pip/vcs/__init__.py,sha256=lnea41zMq9HqB1Qo7hxy2IjUzk5WtBvnoloCCMR6Vk4,12349 -pip/vcs/bazaar.py,sha256=tYTwc4b4off8mr0O2o8SiGejqBDJxcbDBMSMd9-ISYc,3803 -pip/vcs/git.py,sha256=u16VCiNW_a9AaYqLri2b8-f4lOZlOYwsGpHHV3uv_dQ,10218 -pip/vcs/mercurial.py,sha256=xG6rDiwHCRytJEs23SIHBXl_SwQo2jkkdD_6rVVP5h4,3472 -pip/vcs/subversion.py,sha256=mGT7sAzuVc1u-9MPoXJNyShnRzhdJpDdGNuhhzUPv6w,8687 -pip-8.1.0.dist-info/DESCRIPTION.rst,sha256=jSvW1qOjwzndvm_p_DexGCVJfwgg3rWPMJWzf6Rmsfc,1167 -pip-8.1.0.dist-info/METADATA,sha256=NIsJafU0Eg3jm5vpz5ntbIXKUUIhMqJpjuV_OVpPlCo,2362 -pip-8.1.0.dist-info/RECORD,, -pip-8.1.0.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 -pip-8.1.0.dist-info/entry_points.txt,sha256=GWc-Wb9WUKZ1EuVWNz-G0l3BeIpbNJLx0OJbZ61AAV0,68 -pip-8.1.0.dist-info/metadata.json,sha256=zCqgFRL4piTEzAhAk_56ay7wvcWRWZkDtEj0eHkT6g8,1513 -pip-8.1.0.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -../../../bin/pip,sha256=tirEqfn4_1wbMXwJJlFXKaCNMvF9Ha-hQeGqLUeRl10,270 -../../../bin/pip3,sha256=tirEqfn4_1wbMXwJJlFXKaCNMvF9Ha-hQeGqLUeRl10,270 -../../../bin/pip3.4,sha256=tirEqfn4_1wbMXwJJlFXKaCNMvF9Ha-hQeGqLUeRl10,270 -pip-8.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pip/__pycache__/baseparser.cpython-34.pyc,, -pip/__pycache__/index.cpython-34.pyc,, -pip/utils/__pycache__/encoding.cpython-34.pyc,, -pip/commands/__pycache__/search.cpython-34.pyc,, -pip/utils/__pycache__/__init__.cpython-34.pyc,, -pip/operations/__pycache__/freeze.cpython-34.pyc,, -pip/commands/__pycache__/download.cpython-34.pyc,, -pip/__pycache__/pep425tags.cpython-34.pyc,, -pip/__pycache__/wheel.cpython-34.pyc,, -pip/utils/__pycache__/appdirs.cpython-34.pyc,, -pip/__pycache__/status_codes.cpython-34.pyc,, -pip/req/__pycache__/req_file.cpython-34.pyc,, -pip/vcs/__pycache__/__init__.cpython-34.pyc,, -pip/req/__pycache__/req_uninstall.cpython-34.pyc,, -pip/req/__pycache__/req_set.cpython-34.pyc,, -pip/commands/__pycache__/completion.cpython-34.pyc,, -pip/utils/__pycache__/build.cpython-34.pyc,, -pip/commands/__pycache__/list.cpython-34.pyc,, -pip/__pycache__/__main__.cpython-34.pyc,, -pip/vcs/__pycache__/subversion.cpython-34.pyc,, -pip/__pycache__/download.cpython-34.pyc,, -pip/__pycache__/cmdoptions.cpython-34.pyc,, -pip/_vendor/__pycache__/__init__.cpython-34.pyc,, -pip/vcs/__pycache__/git.cpython-34.pyc,, -pip/utils/__pycache__/setuptools_build.cpython-34.pyc,, -pip/utils/__pycache__/deprecation.cpython-34.pyc,, -pip/req/__pycache__/req_install.cpython-34.pyc,, -pip/commands/__pycache__/uninstall.cpython-34.pyc,, -pip/utils/__pycache__/filesystem.cpython-34.pyc,, -pip/models/__pycache__/index.cpython-34.pyc,, -pip/vcs/__pycache__/bazaar.cpython-34.pyc,, -pip/commands/__pycache__/help.cpython-34.pyc,, -pip/req/__pycache__/__init__.cpython-34.pyc,, -pip/compat/__pycache__/__init__.cpython-34.pyc,, -pip/commands/__pycache__/freeze.cpython-34.pyc,, -pip/operations/__pycache__/__init__.cpython-34.pyc,, -pip/commands/__pycache__/wheel.cpython-34.pyc,, -pip/compat/__pycache__/dictconfig.cpython-34.pyc,, -pip/compat/__pycache__/ordereddict.cpython-34.pyc,, -pip/__pycache__/__init__.cpython-34.pyc,, -pip/utils/__pycache__/outdated.cpython-34.pyc,, -pip/commands/__pycache__/install.cpython-34.pyc,, -pip/utils/__pycache__/logging.cpython-34.pyc,, -pip/models/__pycache__/__init__.cpython-34.pyc,, -pip/__pycache__/exceptions.cpython-34.pyc,, -pip/commands/__pycache__/hash.cpython-34.pyc,, -pip/utils/__pycache__/hashes.cpython-34.pyc,, -pip/utils/__pycache__/ui.cpython-34.pyc,, -pip/commands/__pycache__/__init__.cpython-34.pyc,, -pip/__pycache__/basecommand.cpython-34.pyc,, -pip/vcs/__pycache__/mercurial.cpython-34.pyc,, -pip/__pycache__/locations.cpython-34.pyc,, -pip/commands/__pycache__/show.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/entry_points.txt b/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/entry_points.txt deleted file mode 100644 index c02a8d5..0000000 --- a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/entry_points.txt +++ /dev/null @@ -1,5 +0,0 @@ -[console_scripts] -pip = pip:main -pip3 = pip:main -pip3.5 = pip:main - diff --git a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/metadata.json deleted file mode 100644 index d25f0fc..0000000 --- a/Shared/lib/python3.4/site-packages/pip-8.1.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Build Tools", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: Implementation :: PyPy"], "extensions": {"python.commands": {"wrap_console": {"pip": "pip:main", "pip3": "pip:main", "pip3.5": "pip:main"}}, "python.details": {"contacts": [{"email": "python-virtualenv@groups.google.com", "name": "The pip developers", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://pip.pypa.io/"}}, "python.exports": {"console_scripts": {"pip": "pip:main", "pip3": "pip:main", "pip3.5": "pip:main"}}}, "extras": ["testing"], "generator": "bdist_wheel (0.29.0)", "keywords": ["easy_install", "distutils", "setuptools", "egg", "virtualenv"], "license": "MIT", "metadata_version": "2.0", "name": "pip", "run_requires": [{"extra": "testing", "requires": ["mock", "pretend", "pytest", "scripttest (>=1.3)", "virtualenv (>=1.10)"]}], "summary": "The PyPA recommended tool for installing Python packages.", "test_requires": [{"requires": ["mock", "pretend", "pytest", "scripttest (>=1.3)", "virtualenv (>=1.10)"]}], "version": "8.1.0"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/pip/__init__.py b/Shared/lib/python3.4/site-packages/pip/__init__.py index c688a5a..ae265fa 100644 --- a/Shared/lib/python3.4/site-packages/pip/__init__.py +++ b/Shared/lib/python3.4/site-packages/pip/__init__.py @@ -1,311 +1 @@ -#!/usr/bin/env python -from __future__ import absolute_import - -import logging -import os -import optparse -import warnings - -import sys -import re - -from pip.exceptions import InstallationError, CommandError, PipError -from pip.utils import get_installed_distributions, get_prog -from pip.utils import deprecation, dist_is_editable -from pip.vcs import git, mercurial, subversion, bazaar # noqa -from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter -from pip.commands import get_summaries, get_similar_commands -from pip.commands import commands_dict -from pip._vendor.requests.packages.urllib3.exceptions import ( - InsecureRequestWarning, -) - - -# assignment for flake8 to be happy - -# This fixes a peculiarity when importing via __import__ - as we are -# initialising the pip module, "from pip import cmdoptions" is recursive -# and appears not to work properly in that situation. -import pip.cmdoptions -cmdoptions = pip.cmdoptions - -# The version as used in the setup.py and the docs conf.py -__version__ = "8.1.0" - - -logger = logging.getLogger(__name__) - -# Hide the InsecureRequestWArning from urllib3 -warnings.filterwarnings("ignore", category=InsecureRequestWarning) - - -def autocomplete(): - """Command and option completion for the main option parser (and options) - and its subcommands (and options). - - Enable by sourcing one of the completion shell scripts (bash or zsh). - """ - # Don't complete if user hasn't sourced bash_completion file. - if 'PIP_AUTO_COMPLETE' not in os.environ: - return - cwords = os.environ['COMP_WORDS'].split()[1:] - cword = int(os.environ['COMP_CWORD']) - try: - current = cwords[cword - 1] - except IndexError: - current = '' - - subcommands = [cmd for cmd, summary in get_summaries()] - options = [] - # subcommand - try: - subcommand_name = [w for w in cwords if w in subcommands][0] - except IndexError: - subcommand_name = None - - parser = create_main_parser() - # subcommand options - if subcommand_name: - # special case: 'help' subcommand has no options - if subcommand_name == 'help': - sys.exit(1) - # special case: list locally installed dists for uninstall command - if subcommand_name == 'uninstall' and not current.startswith('-'): - installed = [] - lc = current.lower() - for dist in get_installed_distributions(local_only=True): - if dist.key.startswith(lc) and dist.key not in cwords[1:]: - installed.append(dist.key) - # if there are no dists installed, fall back to option completion - if installed: - for dist in installed: - print(dist) - sys.exit(1) - - subcommand = commands_dict[subcommand_name]() - options += [(opt.get_opt_string(), opt.nargs) - for opt in subcommand.parser.option_list_all - if opt.help != optparse.SUPPRESS_HELP] - - # filter out previously specified options from available options - prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] - options = [(x, v) for (x, v) in options if x not in prev_opts] - # filter options by current input - options = [(k, v) for k, v in options if k.startswith(current)] - for option in options: - opt_label = option[0] - # append '=' to options which require args - if option[1]: - opt_label += '=' - print(opt_label) - else: - # show main parser options only when necessary - if current.startswith('-') or current.startswith('--'): - opts = [i.option_list for i in parser.option_groups] - opts.append(parser.option_list) - opts = (o for it in opts for o in it) - - subcommands += [i.get_opt_string() for i in opts - if i.help != optparse.SUPPRESS_HELP] - - print(' '.join([x for x in subcommands if x.startswith(current)])) - sys.exit(1) - - -def create_main_parser(): - parser_kw = { - 'usage': '\n%prog [options]', - 'add_help_option': False, - 'formatter': UpdatingDefaultsHelpFormatter(), - 'name': 'global', - 'prog': get_prog(), - } - - parser = ConfigOptionParser(**parser_kw) - parser.disable_interspersed_args() - - pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - parser.version = 'pip %s from %s (python %s)' % ( - __version__, pip_pkg_dir, sys.version[:3]) - - # add the general options - gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) - parser.add_option_group(gen_opts) - - parser.main = True # so the help formatter knows - - # create command listing for description - command_summaries = get_summaries() - description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] - parser.description = '\n'.join(description) - - return parser - - -def parseopts(args): - parser = create_main_parser() - - # Note: parser calls disable_interspersed_args(), so the result of this - # call is to split the initial args into the general options before the - # subcommand and everything else. - # For example: - # args: ['--timeout=5', 'install', '--user', 'INITools'] - # general_options: ['--timeout==5'] - # args_else: ['install', '--user', 'INITools'] - general_options, args_else = parser.parse_args(args) - - # --version - if general_options.version: - sys.stdout.write(parser.version) - sys.stdout.write(os.linesep) - sys.exit() - - # pip || pip help -> print_help() - if not args_else or (args_else[0] == 'help' and len(args_else) == 1): - parser.print_help() - sys.exit() - - # the subcommand name - cmd_name = args_else[0] - - if cmd_name not in commands_dict: - guess = get_similar_commands(cmd_name) - - msg = ['unknown command "%s"' % cmd_name] - if guess: - msg.append('maybe you meant "%s"' % guess) - - raise CommandError(' - '.join(msg)) - - # all the args without the subcommand - cmd_args = args[:] - cmd_args.remove(cmd_name) - - return cmd_name, cmd_args - - -def check_isolated(args): - isolated = False - - if "--isolated" in args: - isolated = True - - return isolated - - -def main(args=None): - if args is None: - args = sys.argv[1:] - - # Configure our deprecation warnings to be sent through loggers - deprecation.install_warning_logger() - - autocomplete() - - try: - cmd_name, cmd_args = parseopts(args) - except PipError as exc: - sys.stderr.write("ERROR: %s" % exc) - sys.stderr.write(os.linesep) - sys.exit(1) - - command = commands_dict[cmd_name](isolated=check_isolated(cmd_args)) - return command.main(cmd_args) - - -# ########################################################### -# # Writing freeze files - -class FrozenRequirement(object): - - def __init__(self, name, req, editable, comments=()): - self.name = name - self.req = req - self.editable = editable - self.comments = comments - - _rev_re = re.compile(r'-r(\d+)$') - _date_re = re.compile(r'-(20\d\d\d\d\d\d)$') - - @classmethod - def from_dist(cls, dist, dependency_links): - location = os.path.normcase(os.path.abspath(dist.location)) - comments = [] - from pip.vcs import vcs, get_src_requirement - if dist_is_editable(dist) and vcs.get_backend_name(location): - editable = True - try: - req = get_src_requirement(dist, location) - except InstallationError as exc: - logger.warning( - "Error when trying to get requirement for VCS system %s, " - "falling back to uneditable format", exc - ) - req = None - if req is None: - logger.warning( - 'Could not determine repository location of %s', location - ) - comments.append( - '## !! Could not determine repository location' - ) - req = dist.as_requirement() - editable = False - else: - editable = False - req = dist.as_requirement() - specs = req.specs - assert len(specs) == 1 and specs[0][0] in ["==", "==="], \ - 'Expected 1 spec with == or ===; specs = %r; dist = %r' % \ - (specs, dist) - version = specs[0][1] - ver_match = cls._rev_re.search(version) - date_match = cls._date_re.search(version) - if ver_match or date_match: - svn_backend = vcs.get_backend('svn') - if svn_backend: - svn_location = svn_backend().get_location( - dist, - dependency_links, - ) - if not svn_location: - logger.warning( - 'Warning: cannot find svn location for %s', req) - comments.append( - '## FIXME: could not find svn URL in dependency_links ' - 'for this package:' - ) - else: - comments.append( - '# Installing as editable to satisfy requirement %s:' % - req - ) - if ver_match: - rev = ver_match.group(1) - else: - rev = '{%s}' % date_match.group(1) - editable = True - req = '%s@%s#egg=%s' % ( - svn_location, - rev, - cls.egg_name(dist) - ) - return cls(dist.project_name, req, editable, comments) - - @staticmethod - def egg_name(dist): - name = dist.egg_name() - match = re.search(r'-py\d\.\d$', name) - if match: - name = name[:match.start()] - return name - - def __str__(self): - req = self.req - if self.editable: - req = '-e %s' % req - return '\n'.join(list(self.comments) + [str(req)]) + '\n' - - -if __name__ == '__main__': - sys.exit(main()) +__version__ = "18.1" diff --git a/Shared/lib/python3.4/site-packages/pip/__main__.py b/Shared/lib/python3.4/site-packages/pip/__main__.py index 5556539..0c223f8 100644 --- a/Shared/lib/python3.4/site-packages/pip/__main__.py +++ b/Shared/lib/python3.4/site-packages/pip/__main__.py @@ -13,7 +13,7 @@ if __package__ == '': path = os.path.dirname(os.path.dirname(__file__)) sys.path.insert(0, path) -import pip # noqa +from pip._internal import main as _main # isort:skip # noqa if __name__ == '__main__': - sys.exit(pip.main()) + sys.exit(_main()) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/__init__.py b/Shared/lib/python3.4/site-packages/pip/_internal/__init__.py new file mode 100644 index 0000000..276124d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/__init__.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +from __future__ import absolute_import + +import locale +import logging +import os +import warnings + +import sys + +# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks, +# but if invoked (i.e. imported), it will issue a warning to stderr if socks +# isn't available. requests unconditionally imports urllib3's socks contrib +# module, triggering this warning. The warning breaks DEP-8 tests (because of +# the stderr output) and is just plain annoying in normal usage. I don't want +# to add socks as yet another dependency for pip, nor do I want to allow-stder +# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to +# be done before the import of pip.vcs. +from pip._vendor.urllib3.exceptions import DependencyWarning +warnings.filterwarnings("ignore", category=DependencyWarning) # noqa + +# We want to inject the use of SecureTransport as early as possible so that any +# references or sessions or what have you are ensured to have it, however we +# only want to do this in the case that we're running on macOS and the linked +# OpenSSL is too old to handle TLSv1.2 +try: + import ssl +except ImportError: + pass +else: + # Checks for OpenSSL 1.0.1 on MacOS + if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f: + try: + from pip._vendor.urllib3.contrib import securetransport + except (ImportError, OSError): + pass + else: + securetransport.inject_into_urllib3() + +from pip._internal.cli.autocompletion import autocomplete +from pip._internal.cli.main_parser import parse_command +from pip._internal.commands import commands_dict +from pip._internal.exceptions import PipError +from pip._internal.utils import deprecation +from pip._internal.vcs import git, mercurial, subversion, bazaar # noqa +from pip._vendor.urllib3.exceptions import InsecureRequestWarning + +logger = logging.getLogger(__name__) + +# Hide the InsecureRequestWarning from urllib3 +warnings.filterwarnings("ignore", category=InsecureRequestWarning) + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + # Configure our deprecation warnings to be sent through loggers + deprecation.install_warning_logger() + + autocomplete() + + try: + cmd_name, cmd_args = parse_command(args) + except PipError as exc: + sys.stderr.write("ERROR: %s" % exc) + sys.stderr.write(os.linesep) + sys.exit(1) + + # Needed for locale.getpreferredencoding(False) to work + # in pip._internal.utils.encoding.auto_decode + try: + locale.setlocale(locale.LC_ALL, '') + except locale.Error as e: + # setlocale can apparently crash if locale are uninitialized + logger.debug("Ignoring error %s when setting locale", e) + command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args)) + return command.main(cmd_args) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/build_env.py b/Shared/lib/python3.4/site-packages/pip/_internal/build_env.py new file mode 100644 index 0000000..673409d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/build_env.py @@ -0,0 +1,142 @@ +"""Build Environment used for isolation during sdist building +""" + +import logging +import os +import sys +from distutils.sysconfig import get_python_lib +from sysconfig import get_paths + +from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet + +from pip._internal.utils.misc import call_subprocess +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.ui import open_spinner + +logger = logging.getLogger(__name__) + + +class BuildEnvironment(object): + """Creates and manages an isolated environment to install build deps + """ + + def __init__(self): + self._temp_dir = TempDirectory(kind="build-env") + self._temp_dir.create() + + @property + def path(self): + return self._temp_dir.path + + def __enter__(self): + self.save_path = os.environ.get('PATH', None) + self.save_pythonpath = os.environ.get('PYTHONPATH', None) + self.save_nousersite = os.environ.get('PYTHONNOUSERSITE', None) + + install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' + install_dirs = get_paths(install_scheme, vars={ + 'base': self.path, + 'platbase': self.path, + }) + + scripts = install_dirs['scripts'] + if self.save_path: + os.environ['PATH'] = scripts + os.pathsep + self.save_path + else: + os.environ['PATH'] = scripts + os.pathsep + os.defpath + + # Note: prefer distutils' sysconfig to get the + # library paths so PyPy is correctly supported. + purelib = get_python_lib(plat_specific=0, prefix=self.path) + platlib = get_python_lib(plat_specific=1, prefix=self.path) + if purelib == platlib: + lib_dirs = purelib + else: + lib_dirs = purelib + os.pathsep + platlib + if self.save_pythonpath: + os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ + self.save_pythonpath + else: + os.environ['PYTHONPATH'] = lib_dirs + + os.environ['PYTHONNOUSERSITE'] = '1' + + return self.path + + def __exit__(self, exc_type, exc_val, exc_tb): + def restore_var(varname, old_value): + if old_value is None: + os.environ.pop(varname, None) + else: + os.environ[varname] = old_value + + restore_var('PATH', self.save_path) + restore_var('PYTHONPATH', self.save_pythonpath) + restore_var('PYTHONNOUSERSITE', self.save_nousersite) + + def cleanup(self): + self._temp_dir.cleanup() + + def missing_requirements(self, reqs): + """Return a list of the requirements from reqs that are not present + """ + missing = [] + with self: + ws = WorkingSet(os.environ["PYTHONPATH"].split(os.pathsep)) + for req in reqs: + try: + if ws.find(Requirement.parse(req)) is None: + missing.append(req) + except VersionConflict: + missing.append(req) + return missing + + def install_requirements(self, finder, requirements, message): + args = [ + sys.executable, '-m', 'pip', 'install', '--ignore-installed', + '--no-user', '--prefix', self.path, '--no-warn-script-location', + ] + if logger.getEffectiveLevel() <= logging.DEBUG: + args.append('-v') + for format_control in ('no_binary', 'only_binary'): + formats = getattr(finder.format_control, format_control) + args.extend(('--' + format_control.replace('_', '-'), + ','.join(sorted(formats or {':none:'})))) + if finder.index_urls: + args.extend(['-i', finder.index_urls[0]]) + for extra_index in finder.index_urls[1:]: + args.extend(['--extra-index-url', extra_index]) + else: + args.append('--no-index') + for link in finder.find_links: + args.extend(['--find-links', link]) + for _, host, _ in finder.secure_origins: + args.extend(['--trusted-host', host]) + if finder.allow_all_prereleases: + args.append('--pre') + if finder.process_dependency_links: + args.append('--process-dependency-links') + args.append('--') + args.extend(requirements) + with open_spinner(message) as spinner: + call_subprocess(args, show_stdout=False, spinner=spinner) + + +class NoOpBuildEnvironment(BuildEnvironment): + """A no-op drop-in replacement for BuildEnvironment + """ + + def __init__(self): + pass + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + def cleanup(self): + pass + + def install_requirements(self, finder, requirements, message): + raise NotImplementedError() diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/cache.py b/Shared/lib/python3.4/site-packages/pip/_internal/cache.py new file mode 100644 index 0000000..33bec97 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/cache.py @@ -0,0 +1,202 @@ +"""Cache Management +""" + +import errno +import hashlib +import logging +import os + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.download import path_to_url +from pip._internal.models.link import Link +from pip._internal.utils.compat import expanduser +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.wheel import InvalidWheelFilename, Wheel + +logger = logging.getLogger(__name__) + + +class Cache(object): + """An abstract class - provides cache directories for data from links + + + :param cache_dir: The root of the cache. + :param format_control: An object of FormatControl class to limit + binaries being read from the cache. + :param allowed_formats: which formats of files the cache should store. + ('binary' and 'source' are the only allowed values) + """ + + def __init__(self, cache_dir, format_control, allowed_formats): + super(Cache, self).__init__() + self.cache_dir = expanduser(cache_dir) if cache_dir else None + self.format_control = format_control + self.allowed_formats = allowed_formats + + _valid_formats = {"source", "binary"} + assert self.allowed_formats.union(_valid_formats) == _valid_formats + + def _get_cache_path_parts(self, link): + """Get parts of part that must be os.path.joined with cache_dir + """ + + # We want to generate an url to use as our cache key, we don't want to + # just re-use the URL because it might have other items in the fragment + # and we don't care about those. + key_parts = [link.url_without_fragment] + if link.hash_name is not None and link.hash is not None: + key_parts.append("=".join([link.hash_name, link.hash])) + key_url = "#".join(key_parts) + + # Encode our key url with sha224, we'll use this because it has similar + # security properties to sha256, but with a shorter total output (and + # thus less secure). However the differences don't make a lot of + # difference for our use case here. + hashed = hashlib.sha224(key_url.encode()).hexdigest() + + # We want to nest the directories some to prevent having a ton of top + # level directories where we might run out of sub directories on some + # FS. + parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]] + + return parts + + def _get_candidates(self, link, package_name): + can_not_cache = ( + not self.cache_dir or + not package_name or + not link + ) + if can_not_cache: + return [] + + canonical_name = canonicalize_name(package_name) + formats = self.format_control.get_allowed_formats( + canonical_name + ) + if not self.allowed_formats.intersection(formats): + return [] + + root = self.get_path_for_link(link) + try: + return os.listdir(root) + except OSError as err: + if err.errno in {errno.ENOENT, errno.ENOTDIR}: + return [] + raise + + def get_path_for_link(self, link): + """Return a directory to store cached items in for link. + """ + raise NotImplementedError() + + def get(self, link, package_name): + """Returns a link to a cached item if it exists, otherwise returns the + passed link. + """ + raise NotImplementedError() + + def _link_for_candidate(self, link, candidate): + root = self.get_path_for_link(link) + path = os.path.join(root, candidate) + + return Link(path_to_url(path)) + + def cleanup(self): + pass + + +class SimpleWheelCache(Cache): + """A cache of wheels for future installs. + """ + + def __init__(self, cache_dir, format_control): + super(SimpleWheelCache, self).__init__( + cache_dir, format_control, {"binary"} + ) + + def get_path_for_link(self, link): + """Return a directory to store cached wheels for link + + Because there are M wheels for any one sdist, we provide a directory + to cache them in, and then consult that directory when looking up + cache hits. + + We only insert things into the cache if they have plausible version + numbers, so that we don't contaminate the cache with things that were + not unique. E.g. ./package might have dozens of installs done for it + and build a version of 0.0...and if we built and cached a wheel, we'd + end up using the same wheel even if the source has been edited. + + :param link: The link of the sdist for which this will cache wheels. + """ + parts = self._get_cache_path_parts(link) + + # Store wheels within the root cache_dir + return os.path.join(self.cache_dir, "wheels", *parts) + + def get(self, link, package_name): + candidates = [] + + for wheel_name in self._get_candidates(link, package_name): + try: + wheel = Wheel(wheel_name) + except InvalidWheelFilename: + continue + if not wheel.supported(): + # Built for a different python/arch/etc + continue + candidates.append((wheel.support_index_min(), wheel_name)) + + if not candidates: + return link + + return self._link_for_candidate(link, min(candidates)[1]) + + +class EphemWheelCache(SimpleWheelCache): + """A SimpleWheelCache that creates it's own temporary cache directory + """ + + def __init__(self, format_control): + self._temp_dir = TempDirectory(kind="ephem-wheel-cache") + self._temp_dir.create() + + super(EphemWheelCache, self).__init__( + self._temp_dir.path, format_control + ) + + def cleanup(self): + self._temp_dir.cleanup() + + +class WheelCache(Cache): + """Wraps EphemWheelCache and SimpleWheelCache into a single Cache + + This Cache allows for gracefully degradation, using the ephem wheel cache + when a certain link is not found in the simple wheel cache first. + """ + + def __init__(self, cache_dir, format_control): + super(WheelCache, self).__init__( + cache_dir, format_control, {'binary'} + ) + self._wheel_cache = SimpleWheelCache(cache_dir, format_control) + self._ephem_cache = EphemWheelCache(format_control) + + def get_path_for_link(self, link): + return self._wheel_cache.get_path_for_link(link) + + def get_ephem_path_for_link(self, link): + return self._ephem_cache.get_path_for_link(link) + + def get(self, link, package_name): + retval = self._wheel_cache.get(link, package_name) + if retval is link: + retval = self._ephem_cache.get(link, package_name) + return retval + + def cleanup(self): + self._wheel_cache.cleanup() + self._ephem_cache.cleanup() diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/cli/__init__.py b/Shared/lib/python3.4/site-packages/pip/_internal/cli/__init__.py new file mode 100644 index 0000000..e589bb9 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/cli/__init__.py @@ -0,0 +1,4 @@ +"""Subpackage containing all of pip's command line interface related code +""" + +# This file intentionally does not import submodules diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/cli/autocompletion.py b/Shared/lib/python3.4/site-packages/pip/_internal/cli/autocompletion.py new file mode 100644 index 0000000..0a04199 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/cli/autocompletion.py @@ -0,0 +1,152 @@ +"""Logic that powers autocompletion installed by ``pip completion``. +""" + +import optparse +import os +import sys + +from pip._internal.cli.main_parser import create_main_parser +from pip._internal.commands import commands_dict, get_summaries +from pip._internal.utils.misc import get_installed_distributions + + +def autocomplete(): + """Entry Point for completion of main and subcommand options. + """ + # Don't complete if user hasn't sourced bash_completion file. + if 'PIP_AUTO_COMPLETE' not in os.environ: + return + cwords = os.environ['COMP_WORDS'].split()[1:] + cword = int(os.environ['COMP_CWORD']) + try: + current = cwords[cword - 1] + except IndexError: + current = '' + + subcommands = [cmd for cmd, summary in get_summaries()] + options = [] + # subcommand + try: + subcommand_name = [w for w in cwords if w in subcommands][0] + except IndexError: + subcommand_name = None + + parser = create_main_parser() + # subcommand options + if subcommand_name: + # special case: 'help' subcommand has no options + if subcommand_name == 'help': + sys.exit(1) + # special case: list locally installed dists for show and uninstall + should_list_installed = ( + subcommand_name in ['show', 'uninstall'] and + not current.startswith('-') + ) + if should_list_installed: + installed = [] + lc = current.lower() + for dist in get_installed_distributions(local_only=True): + if dist.key.startswith(lc) and dist.key not in cwords[1:]: + installed.append(dist.key) + # if there are no dists installed, fall back to option completion + if installed: + for dist in installed: + print(dist) + sys.exit(1) + + subcommand = commands_dict[subcommand_name]() + + for opt in subcommand.parser.option_list_all: + if opt.help != optparse.SUPPRESS_HELP: + for opt_str in opt._long_opts + opt._short_opts: + options.append((opt_str, opt.nargs)) + + # filter out previously specified options from available options + prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] + options = [(x, v) for (x, v) in options if x not in prev_opts] + # filter options by current input + options = [(k, v) for k, v in options if k.startswith(current)] + # get completion type given cwords and available subcommand options + completion_type = get_path_completion_type( + cwords, cword, subcommand.parser.option_list_all, + ) + # get completion files and directories if ``completion_type`` is + # ````, ```` or ```` + if completion_type: + options = auto_complete_paths(current, completion_type) + options = ((opt, 0) for opt in options) + for option in options: + opt_label = option[0] + # append '=' to options which require args + if option[1] and option[0][:2] == "--": + opt_label += '=' + print(opt_label) + else: + # show main parser options only when necessary + + opts = [i.option_list for i in parser.option_groups] + opts.append(parser.option_list) + opts = (o for it in opts for o in it) + if current.startswith('-'): + for opt in opts: + if opt.help != optparse.SUPPRESS_HELP: + subcommands += opt._long_opts + opt._short_opts + else: + # get completion type given cwords and all available options + completion_type = get_path_completion_type(cwords, cword, opts) + if completion_type: + subcommands = auto_complete_paths(current, completion_type) + + print(' '.join([x for x in subcommands if x.startswith(current)])) + sys.exit(1) + + +def get_path_completion_type(cwords, cword, opts): + """Get the type of path completion (``file``, ``dir``, ``path`` or None) + + :param cwords: same as the environmental variable ``COMP_WORDS`` + :param cword: same as the environmental variable ``COMP_CWORD`` + :param opts: The available options to check + :return: path completion type (``file``, ``dir``, ``path`` or None) + """ + if cword < 2 or not cwords[cword - 2].startswith('-'): + return + for opt in opts: + if opt.help == optparse.SUPPRESS_HELP: + continue + for o in str(opt).split('/'): + if cwords[cword - 2].split('=')[0] == o: + if not opt.metavar or any( + x in ('path', 'file', 'dir') + for x in opt.metavar.split('/')): + return opt.metavar + + +def auto_complete_paths(current, completion_type): + """If ``completion_type`` is ``file`` or ``path``, list all regular files + and directories starting with ``current``; otherwise only list directories + starting with ``current``. + + :param current: The word to be completed + :param completion_type: path completion type(`file`, `path` or `dir`)i + :return: A generator of regular files and/or directories + """ + directory, filename = os.path.split(current) + current_path = os.path.abspath(directory) + # Don't complete paths if they can't be accessed + if not os.access(current_path, os.R_OK): + return + filename = os.path.normcase(filename) + # list all files that start with ``filename`` + file_list = (x for x in os.listdir(current_path) + if os.path.normcase(x).startswith(filename)) + for f in file_list: + opt = os.path.join(current_path, f) + comp_file = os.path.normcase(os.path.join(directory, f)) + # complete regular files when there is not ```` after option + # complete directories when there is ````, ```` or + # ````after option + if completion_type != 'dir' and os.path.isfile(opt): + yield comp_file + elif os.path.isdir(opt): + yield os.path.join(comp_file, '') diff --git a/Shared/lib/python3.4/site-packages/pip/basecommand.py b/Shared/lib/python3.4/site-packages/pip/_internal/cli/base_command.py similarity index 50% rename from Shared/lib/python3.4/site-packages/pip/basecommand.py rename to Shared/lib/python3.4/site-packages/pip/_internal/cli/base_command.py index a07043a..dac4b05 100644 --- a/Shared/lib/python3.4/site-packages/pip/basecommand.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/cli/base_command.py @@ -2,41 +2,48 @@ from __future__ import absolute_import import logging +import logging.config +import optparse import os import sys -import optparse -import warnings -from pip import cmdoptions -from pip.index import PackageFinder -from pip.locations import running_under_virtualenv -from pip.download import PipSession -from pip.exceptions import (BadCommand, InstallationError, UninstallationError, - CommandError, PreviousBuildDirError) - -from pip.compat import logging_dictConfig -from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter -from pip.req import InstallRequirement, parse_requirements -from pip.status_codes import ( - SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND, - PREVIOUS_BUILD_DIR_ERROR, +from pip._internal.cli import cmdoptions +from pip._internal.cli.parser import ( + ConfigOptionParser, UpdatingDefaultsHelpFormatter, ) -from pip.utils import deprecation, get_prog, normalize_path -from pip.utils.logging import IndentingFormatter -from pip.utils.outdated import pip_version_check +from pip._internal.cli.status_codes import ( + ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR, + VIRTUALENV_NOT_FOUND, +) +from pip._internal.download import PipSession +from pip._internal.exceptions import ( + BadCommand, CommandError, InstallationError, PreviousBuildDirError, + UninstallationError, +) +from pip._internal.index import PackageFinder +from pip._internal.locations import running_under_virtualenv +from pip._internal.req.constructors import ( + install_req_from_editable, install_req_from_line, +) +from pip._internal.req.req_file import parse_requirements +from pip._internal.utils.logging import setup_logging +from pip._internal.utils.misc import get_prog, normalize_path +from pip._internal.utils.outdated import pip_version_check +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +if MYPY_CHECK_RUNNING: + from typing import Optional # noqa: F401 __all__ = ['Command'] - logger = logging.getLogger(__name__) class Command(object): - name = None - usage = None - hidden = False - log_streams = ("ext://sys.stdout", "ext://sys.stderr") + name = None # type: Optional[str] + usage = None # type: Optional[str] + hidden = False # type: bool + ignore_require_venv = False # type: bool def __init__(self, isolated=False): parser_kw = { @@ -105,91 +112,18 @@ class Command(object): def main(self, args): options, args = self.parse_args(args) - if options.quiet: - if options.quiet == 1: - level = "WARNING" - if options.quiet == 2: - level = "ERROR" - else: - level = "CRITICAL" - elif options.verbose: - level = "DEBUG" - else: - level = "INFO" + # Set verbosity so that it can be used elsewhere. + self.verbosity = options.verbose - options.quiet - logging_dictConfig({ - "version": 1, - "disable_existing_loggers": False, - "filters": { - "exclude_warnings": { - "()": "pip.utils.logging.MaxLevelFilter", - "level": logging.WARNING, - }, - }, - "formatters": { - "indent": { - "()": IndentingFormatter, - "format": "%(message)s", - }, - }, - "handlers": { - "console": { - "level": level, - "class": "pip.utils.logging.ColorizedStreamHandler", - "stream": self.log_streams[0], - "filters": ["exclude_warnings"], - "formatter": "indent", - }, - "console_errors": { - "level": "WARNING", - "class": "pip.utils.logging.ColorizedStreamHandler", - "stream": self.log_streams[1], - "formatter": "indent", - }, - "user_log": { - "level": "DEBUG", - "class": "pip.utils.logging.BetterRotatingFileHandler", - "filename": options.log or "/dev/null", - "delay": True, - "formatter": "indent", - }, - }, - "root": { - "level": level, - "handlers": list(filter(None, [ - "console", - "console_errors", - "user_log" if options.log else None, - ])), - }, - # Disable any logging besides WARNING unless we have DEBUG level - # logging enabled. These use both pip._vendor and the bare names - # for the case where someone unbundles our libraries. - "loggers": dict( - ( - name, - { - "level": ( - "WARNING" - if level in ["INFO", "ERROR"] - else "DEBUG" - ), - }, - ) - for name in ["pip._vendor", "distlib", "requests", "urllib3"] - ), - }) + setup_logging( + verbosity=self.verbosity, + no_color=options.no_color, + user_log_file=options.log, + ) - if sys.version_info[:2] == (2, 6): - warnings.warn( - "Python 2.6 is no longer supported by the Python core team, " - "please upgrade your Python. A future version of pip will " - "drop support for Python 2.6", - deprecation.Python26DeprecationWarning - ) - - # TODO: try to get these passing down from the command? - # without resorting to os.environ to hold these. + # TODO: Try to get these passing down from the command? + # without resorting to os.environ to hold these. + # This also affects isolated builds and it should. if options.no_input: os.environ['PIP_NO_INPUT'] = '1' @@ -197,7 +131,7 @@ class Command(object): if options.exists_action: os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action) - if options.require_venv: + if options.require_venv and not self.ignore_require_venv: # If a venv is required check if it can really be found if not running_under_virtualenv(): logger.critical( @@ -231,19 +165,29 @@ class Command(object): logger.debug('Exception information:', exc_info=True) return ERROR - except: + except BaseException: logger.critical('Exception:', exc_info=True) return UNKNOWN_ERROR finally: + allow_version_check = ( + # Does this command have the index_group options? + hasattr(options, "no_index") and + # Is this command allowed to perform this check? + not (options.disable_pip_version_check or options.no_index) + ) # Check if we're using the latest version of pip available - if (not options.disable_pip_version_check and not - getattr(options, "no_index", False)): - with self._build_session( - options, - retries=0, - timeout=min(5, options.timeout)) as session: - pip_version_check(session) + if allow_version_check: + session = self._build_session( + options, + retries=0, + timeout=min(5, options.timeout) + ) + with session: + pip_version_check(session, options) + + # Shutdown the logging module + logging.shutdown() return SUCCESS @@ -256,62 +200,66 @@ class RequirementCommand(Command): """ Marshal cmd line args into a requirement set. """ + # NOTE: As a side-effect, options.require_hashes and + # requirement_set.require_hashes may be updated + for filename in options.constraints: - for req in parse_requirements( + for req_to_add in parse_requirements( filename, constraint=True, finder=finder, options=options, session=session, wheel_cache=wheel_cache): - requirement_set.add_requirement(req) + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) for req in args: - requirement_set.add_requirement( - InstallRequirement.from_line( - req, None, isolated=options.isolated_mode, - wheel_cache=wheel_cache - ) + req_to_add = install_req_from_line( + req, None, isolated=options.isolated_mode, + wheel_cache=wheel_cache ) + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) for req in options.editables: - requirement_set.add_requirement( - InstallRequirement.from_editable( - req, - default_vcs=options.default_vcs, - isolated=options.isolated_mode, - wheel_cache=wheel_cache - ) + req_to_add = install_req_from_editable( + req, + isolated=options.isolated_mode, + wheel_cache=wheel_cache ) + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) - found_req_in_file = False for filename in options.requirements: - for req in parse_requirements( + for req_to_add in parse_requirements( filename, finder=finder, options=options, session=session, wheel_cache=wheel_cache): - found_req_in_file = True - requirement_set.add_requirement(req) + req_to_add.is_direct = True + requirement_set.add_requirement(req_to_add) # If --require-hashes was a line in a requirements file, tell # RequirementSet about it: requirement_set.require_hashes = options.require_hashes - if not (args or options.editables or found_req_in_file): + if not (args or options.editables or options.requirements): opts = {'name': name} if options.find_links: - msg = ('You must give at least one requirement to ' - '%(name)s (maybe you meant "pip %(name)s ' - '%(links)s"?)' % - dict(opts, links=' '.join(options.find_links))) + raise CommandError( + 'You must give at least one requirement to %(name)s ' + '(maybe you meant "pip %(name)s %(links)s"?)' % + dict(opts, links=' '.join(options.find_links))) else: - msg = ('You must give at least one requirement ' - 'to %(name)s (see "pip help %(name)s")' % opts) - logger.warning(msg) + raise CommandError( + 'You must give at least one requirement to %(name)s ' + '(see "pip help %(name)s")' % opts) - def _build_package_finder(self, options, session): + def _build_package_finder(self, options, session, + platform=None, python_versions=None, + abi=None, implementation=None): """ Create a package finder appropriate to this requirement command. """ index_urls = [options.index_url] + options.extra_index_urls if options.no_index: - logger.info('Ignoring indexes: %s', ','.join(index_urls)) + logger.debug('Ignoring indexes: %s', ','.join(index_urls)) index_urls = [] return PackageFinder( @@ -322,4 +270,9 @@ class RequirementCommand(Command): allow_all_prereleases=options.pre, process_dependency_links=options.process_dependency_links, session=session, + platform=platform, + versions=python_versions, + abi=abi, + implementation=implementation, + prefer_binary=options.prefer_binary, ) diff --git a/Shared/lib/python3.4/site-packages/pip/cmdoptions.py b/Shared/lib/python3.4/site-packages/pip/_internal/cli/cmdoptions.py similarity index 61% rename from Shared/lib/python3.4/site-packages/pip/cmdoptions.py rename to Shared/lib/python3.4/site-packages/pip/_internal/cli/cmdoptions.py index aced0c0..29b758f 100644 --- a/Shared/lib/python3.4/site-packages/pip/cmdoptions.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/cli/cmdoptions.py @@ -9,16 +9,20 @@ pass on state. To be consistent, all options will follow this design. """ from __future__ import absolute_import -from functools import partial -from optparse import OptionGroup, SUPPRESS_HELP, Option import warnings +from functools import partial +from optparse import SUPPRESS_HELP, Option, OptionGroup -from pip.index import ( - FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary, - fmt_ctl_no_use_wheel) -from pip.models import PyPI -from pip.locations import USER_CACHE_DIR, src_prefix -from pip.utils.hashes import STRONG_HASHES +from pip._internal.exceptions import CommandError +from pip._internal.locations import USER_CACHE_DIR, src_prefix +from pip._internal.models.format_control import FormatControl +from pip._internal.models.index import PyPI +from pip._internal.utils.hashes import STRONG_HASHES +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.utils.ui import BAR_TYPES + +if MYPY_CHECK_RUNNING: + from typing import Any # noqa: F401 def make_option_group(group, parser): @@ -33,12 +37,6 @@ def make_option_group(group, parser): return option_group -def resolve_wheel_no_use_binary(options): - if not options.use_wheel: - control = options.format_control - fmt_ctl_no_use_wheel(control) - - def check_install_build_global(options, check_options=None): """Disable wheels if per-setup.py call options are set. @@ -54,10 +52,50 @@ def check_install_build_global(options, check_options=None): names = ["build_options", "global_options", "install_options"] if any(map(getname, names)): control = options.format_control - fmt_ctl_no_binary(control) + control.disallow_binaries() warnings.warn( 'Disabling all use of wheels due to the use of --build-options ' - '/ --global-options / --install-options.', stacklevel=2) + '/ --global-options / --install-options.', stacklevel=2, + ) + + +def check_dist_restriction(options, check_target=False): + """Function for determining if custom platform options are allowed. + + :param options: The OptionParser options. + :param check_target: Whether or not to check if --target is being used. + """ + dist_restriction_set = any([ + options.python_version, + options.platform, + options.abi, + options.implementation, + ]) + + binary_only = FormatControl(set(), {':all:'}) + sdist_dependencies_allowed = ( + options.format_control != binary_only and + not options.ignore_dependencies + ) + + # Installations or downloads using dist restrictions must not combine + # source distributions and dist-specific wheels, as they are not + # gauranteed to be locally compatible. + if dist_restriction_set and sdist_dependencies_allowed: + raise CommandError( + "When restricting platform and interpreter constraints using " + "--python-version, --platform, --abi, or --implementation, " + "either --no-deps must be set, or --only-binary=:all: must be " + "set and --no-binary must not be set (or must be set to " + ":none:)." + ) + + if check_target: + if dist_restriction_set and not options.target_dir: + raise CommandError( + "Can not use any platform or abi specific options unless " + "installing via '--target'" + ) ########### @@ -69,7 +107,8 @@ help_ = partial( '-h', '--help', dest='help', action='help', - help='Show help.') + help='Show help.', +) # type: Any isolated_mode = partial( Option, @@ -90,7 +129,8 @@ require_virtualenv = partial( dest='require_venv', action='store_true', default=False, - help=SUPPRESS_HELP) + help=SUPPRESS_HELP +) # type: Any verbose = partial( Option, @@ -101,12 +141,22 @@ verbose = partial( help='Give more output. Option is additive, and can be used up to 3 times.' ) +no_color = partial( + Option, + '--no-color', + dest='no_color', + action='store_true', + default=False, + help="Suppress colored output", +) + version = partial( Option, '-V', '--version', dest='version', action='store_true', - help='Show version and exit.') + help='Show version and exit.', +) # type: Any quiet = partial( Option, @@ -114,7 +164,25 @@ quiet = partial( dest='quiet', action='count', default=0, - help='Give less output.') + help=( + 'Give less output. Option is additive, and can be used up to 3' + ' times (corresponding to WARNING, ERROR, and CRITICAL logging' + ' levels).' + ), +) # type: Any + +progress_bar = partial( + Option, + '--progress-bar', + dest='progress_bar', + type='choice', + choices=list(BAR_TYPES.keys()), + default='on', + help=( + 'Specify type of progress to be displayed [' + + '|'.join(BAR_TYPES.keys()) + '] (default: %default)' + ), +) # type: Any log = partial( Option, @@ -122,7 +190,7 @@ log = partial( dest="log", metavar="path", help="Path to a verbose appending log." -) +) # type: Any no_input = partial( Option, @@ -131,7 +199,8 @@ no_input = partial( dest='no_input', action='store_true', default=False, - help=SUPPRESS_HELP) + help=SUPPRESS_HELP +) # type: Any proxy = partial( Option, @@ -139,7 +208,8 @@ proxy = partial( dest='proxy', type='str', default='', - help="Specify a proxy in the form [user:passwd@]proxy.server:port.") + help="Specify a proxy in the form [user:passwd@]proxy.server:port." +) # type: Any retries = partial( Option, @@ -148,7 +218,8 @@ retries = partial( type='int', default=5, help="Maximum number of retries each connection should attempt " - "(default %default times).") + "(default %default times).", +) # type: Any timeout = partial( Option, @@ -157,16 +228,8 @@ timeout = partial( dest='timeout', type='float', default=15, - help='Set the socket timeout (default %default seconds).') - -default_vcs = partial( - Option, - # The default version control system for editables, e.g. 'svn' - '--default-vcs', - dest='default_vcs', - type='str', - default='', - help=SUPPRESS_HELP) + help='Set the socket timeout (default %default seconds).', +) # type: Any skip_requirements_regex = partial( Option, @@ -175,7 +238,8 @@ skip_requirements_regex = partial( dest='skip_requirements_regex', type='str', default='', - help=SUPPRESS_HELP) + help=SUPPRESS_HELP, +) # type: Any def exists_action(): @@ -184,12 +248,13 @@ def exists_action(): '--exists-action', dest='exists_action', type='choice', - choices=['s', 'i', 'w', 'b'], + choices=['s', 'i', 'w', 'b', 'a'], default=[], action='append', metavar='action', help="Default action when a path already exists: " - "(s)witch, (i)gnore, (w)ipe, (b)ackup.") + "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort).", + ) cert = partial( @@ -198,7 +263,8 @@ cert = partial( dest='cert', type='str', metavar='path', - help="Path to alternate CA bundle.") + help="Path to alternate CA bundle.", +) # type: Any client_cert = partial( Option, @@ -208,7 +274,8 @@ client_cert = partial( default=None, metavar='path', help="Path to SSL client certificate, a single file containing the " - "private key and the certificate in PEM format.") + "private key and the certificate in PEM format.", +) # type: Any index_url = partial( Option, @@ -216,7 +283,11 @@ index_url = partial( dest='index_url', metavar='URL', default=PyPI.simple_url, - help='Base URL of Python Package Index (default %default).') + help="Base URL of Python Package Index (default %default). " + "This should point to a repository compliant with PEP 503 " + "(the simple repository API) or a local directory laid out " + "in the same format.", +) # type: Any def extra_index_url(): @@ -226,7 +297,9 @@ def extra_index_url(): metavar='URL', action='append', default=[], - help='Extra URLs of package indexes to use in addition to --index-url.' + help="Extra URLs of package indexes to use in addition to " + "--index-url. Should follow the same rules as " + "--index-url.", ) @@ -236,7 +309,8 @@ no_index = partial( dest='no_index', action='store_true', default=False, - help='Ignore package index (only looking at --find-links URLs instead).') + help='Ignore package index (only looking at --find-links URLs instead).', +) # type: Any def find_links(): @@ -247,31 +321,11 @@ def find_links(): default=[], metavar='url', help="If a url or path to an html file, then parse for links to " - "archives. If a local path or file:// url that's a directory," - "then look for archives in the directory listing.") - - -def allow_external(): - return Option( - "--allow-external", - dest="allow_external", - action="append", - default=[], - metavar="PACKAGE", - help=SUPPRESS_HELP, + "archives. If a local path or file:// url that's a directory, " + "then look for archives in the directory listing.", ) -allow_all_external = partial( - Option, - "--allow-all-external", - dest="allow_all_external", - action="store_true", - default=False, - help=SUPPRESS_HELP, -) - - def trusted_host(): return Option( "--trusted-host", @@ -284,38 +338,6 @@ def trusted_host(): ) -# Remove after 7.0 -no_allow_external = partial( - Option, - "--no-allow-external", - dest="allow_all_external", - action="store_false", - default=False, - help=SUPPRESS_HELP, -) - - -# Remove --allow-insecure after 7.0 -def allow_unsafe(): - return Option( - "--allow-unverified", "--allow-insecure", - dest="allow_unverified", - action="append", - default=[], - metavar="PACKAGE", - help=SUPPRESS_HELP, - ) - -# Remove after 7.0 -no_allow_unsafe = partial( - Option, - "--no-allow-insecure", - dest="allow_all_insecure", - action="store_false", - default=False, - help=SUPPRESS_HELP -) - # Remove after 1.5 process_dependency_links = partial( Option, @@ -324,7 +346,7 @@ process_dependency_links = partial( action="store_true", default=False, help="Enable the processing of dependency links.", -) +) # type: Any def constraints(): @@ -335,7 +357,8 @@ def constraints(): default=[], metavar='file', help='Constrain versions using the given constraints file. ' - 'This option can be used multiple times.') + 'This option can be used multiple times.' + ) def requirements(): @@ -346,7 +369,8 @@ def requirements(): default=[], metavar='file', help='Install from the given requirements file. ' - 'This option can be used multiple times.') + 'This option can be used multiple times.' + ) def editable(): @@ -360,6 +384,7 @@ def editable(): '"develop mode") from a local project path or a VCS url.'), ) + src = partial( Option, '--src', '--source', '--source-dir', '--source-directory', @@ -369,28 +394,7 @@ src = partial( help='Directory to check out editable projects into. ' 'The default in a virtualenv is "/src". ' 'The default for global installs is "/src".' -) - -# XXX: deprecated, remove in 9.0 -use_wheel = partial( - Option, - '--use-wheel', - dest='use_wheel', - action='store_true', - default=True, - help=SUPPRESS_HELP, -) - -# XXX: deprecated, remove in 9.0 -no_use_wheel = partial( - Option, - '--no-use-wheel', - dest='use_wheel', - action='store_false', - default=True, - help=('Do not Find and prefer wheel archives when searching indexes and ' - 'find-links locations. DEPRECATED in favour of --no-binary.'), -) +) # type: Any def _get_format_control(values, option): @@ -399,41 +403,112 @@ def _get_format_control(values, option): def _handle_no_binary(option, opt_str, value, parser): - existing = getattr(parser.values, option.dest) - fmt_ctl_handle_mutual_exclude( - value, existing.no_binary, existing.only_binary) + existing = _get_format_control(parser.values, option) + FormatControl.handle_mutual_excludes( + value, existing.no_binary, existing.only_binary, + ) def _handle_only_binary(option, opt_str, value, parser): - existing = getattr(parser.values, option.dest) - fmt_ctl_handle_mutual_exclude( - value, existing.only_binary, existing.no_binary) + existing = _get_format_control(parser.values, option) + FormatControl.handle_mutual_excludes( + value, existing.only_binary, existing.no_binary, + ) def no_binary(): + format_control = FormatControl(set(), set()) return Option( "--no-binary", dest="format_control", action="callback", callback=_handle_no_binary, type="str", - default=FormatControl(set(), set()), + default=format_control, help="Do not use binary packages. Can be supplied multiple times, and " "each time adds to the existing value. Accepts either :all: to " "disable all binary packages, :none: to empty the set, or one or " "more package names with commas between them. Note that some " "packages are tricky to compile and may fail to install when " - "this option is used on them.") + "this option is used on them.", + ) def only_binary(): + format_control = FormatControl(set(), set()) return Option( "--only-binary", dest="format_control", action="callback", callback=_handle_only_binary, type="str", - default=FormatControl(set(), set()), + default=format_control, help="Do not use source packages. Can be supplied multiple times, and " "each time adds to the existing value. Accepts either :all: to " "disable all source packages, :none: to empty the set, or one or " "more package names with commas between them. Packages without " "binary distributions will fail to install when this option is " - "used on them.") + "used on them.", + ) + + +platform = partial( + Option, + '--platform', + dest='platform', + metavar='platform', + default=None, + help=("Only use wheels compatible with . " + "Defaults to the platform of the running system."), +) + + +python_version = partial( + Option, + '--python-version', + dest='python_version', + metavar='python_version', + default=None, + help=("Only use wheels compatible with Python " + "interpreter version . If not specified, then the " + "current system interpreter minor version is used. A major " + "version (e.g. '2') can be specified to match all " + "minor revs of that major version. A minor version " + "(e.g. '34') can also be specified."), +) + + +implementation = partial( + Option, + '--implementation', + dest='implementation', + metavar='implementation', + default=None, + help=("Only use wheels compatible with Python " + "implementation , e.g. 'pp', 'jy', 'cp', " + " or 'ip'. If not specified, then the current " + "interpreter implementation is used. Use 'py' to force " + "implementation-agnostic wheels."), +) + + +abi = partial( + Option, + '--abi', + dest='abi', + metavar='abi', + default=None, + help=("Only use wheels compatible with Python " + "abi , e.g. 'pypy_41'. If not specified, then the " + "current interpreter abi tag is used. Generally " + "you will need to specify --implementation, " + "--platform, and --python-version when using " + "this option."), +) + + +def prefer_binary(): + return Option( + "--prefer-binary", + dest="prefer_binary", + action="store_true", + default=False, + help="Prefer older binary packages over newer source packages." + ) cache_dir = partial( @@ -459,15 +534,39 @@ no_deps = partial( dest='ignore_dependencies', action='store_true', default=False, - help="Don't install package dependencies.") + help="Don't install package dependencies.", +) # type: Any build_dir = partial( Option, '-b', '--build', '--build-dir', '--build-directory', dest='build_dir', metavar='dir', - help='Directory to unpack packages into and build in.' -) + help='Directory to unpack packages into and build in. Note that ' + 'an initial build still takes place in a temporary directory. ' + 'The location of temporary directories can be controlled by setting ' + 'the TMPDIR environment variable (TEMP on Windows) appropriately. ' + 'When passed, build directories are not cleaned in case of failures.' +) # type: Any + +ignore_requires_python = partial( + Option, + '--ignore-requires-python', + dest='ignore_requires_python', + action='store_true', + help='Ignore the Requires-Python information.' +) # type: Any + +no_build_isolation = partial( + Option, + '--no-build-isolation', + dest='build_isolation', + action='store_false', + default=True, + help='Disable isolation when building a modern source distribution. ' + 'Build dependencies specified by PEP 518 must be already installed ' + 'if this option is used.' +) # type: Any install_options = partial( Option, @@ -479,7 +578,8 @@ install_options = partial( "command (use like --install-option=\"--install-scripts=/usr/local/" "bin\"). Use multiple --install-option options to pass multiple " "options to setup.py install. If you are using an option with a " - "directory path, be sure to use absolute path.") + "directory path, be sure to use absolute path.", +) # type: Any global_options = partial( Option, @@ -488,14 +588,16 @@ global_options = partial( action='append', metavar='options', help="Extra global options to be supplied to the setup.py " - "call before the install command.") + "call before the install command.", +) # type: Any no_clean = partial( Option, '--no-clean', action='store_true', default=False, - help="Don't clean up build directories.") + help="Don't clean up build directories." +) # type: Any pre = partial( Option, @@ -503,16 +605,19 @@ pre = partial( action='store_true', default=False, help="Include pre-release and development versions. By default, " - "pip only finds stable versions.") + "pip only finds stable versions.", +) # type: Any disable_pip_version_check = partial( Option, "--disable-pip-version-check", dest="disable_pip_version_check", action="store_true", - default=False, + default=True, help="Don't periodically check PyPI to determine whether a new version " - "of pip is available for download. Implied with --no-index.") + "of pip is available for download. Implied with --no-index.", +) # type: Any + # Deprecated, Remove later always_unzip = partial( @@ -521,7 +626,7 @@ always_unzip = partial( dest='always_unzip', action='store_true', help=SUPPRESS_HELP, -) +) # type: Any def _merge_hash(option, opt_str, value, parser): @@ -551,7 +656,8 @@ hash = partial( callback=_merge_hash, type='string', help="Verify that the package's archive matches this " - 'hash before installing. Example: --hash=sha256:abcdef...') + 'hash before installing. Example: --hash=sha256:abcdef...', +) # type: Any require_hashes = partial( @@ -562,7 +668,8 @@ require_hashes = partial( default=False, help='Require a hash to check each requirement against, for ' 'repeatable installs. This option is implied when any package in a ' - 'requirements file has a --hash option.') + 'requirements file has a --hash option.', +) # type: Any ########## @@ -583,7 +690,6 @@ general_group = { proxy, retries, timeout, - default_vcs, skip_requirements_regex, exists_action, trusted_host, @@ -592,10 +698,11 @@ general_group = { cache_dir, no_cache, disable_pip_version_check, + no_color, ] } -non_deprecated_index_group = { +index_group = { 'name': 'Package Index Options', 'options': [ index_url, @@ -605,14 +712,3 @@ non_deprecated_index_group = { process_dependency_links, ] } - -index_group = { - 'name': 'Package Index Options (including deprecated options)', - 'options': non_deprecated_index_group['options'] + [ - allow_external, - allow_all_external, - no_allow_external, - allow_unsafe, - no_allow_unsafe, - ] -} diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/cli/main_parser.py b/Shared/lib/python3.4/site-packages/pip/_internal/cli/main_parser.py new file mode 100644 index 0000000..1774a6b --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/cli/main_parser.py @@ -0,0 +1,96 @@ +"""A single place for constructing and exposing the main parser +""" + +import os +import sys + +from pip import __version__ +from pip._internal.cli import cmdoptions +from pip._internal.cli.parser import ( + ConfigOptionParser, UpdatingDefaultsHelpFormatter, +) +from pip._internal.commands import ( + commands_dict, get_similar_commands, get_summaries, +) +from pip._internal.exceptions import CommandError +from pip._internal.utils.misc import get_prog + +__all__ = ["create_main_parser", "parse_command"] + + +def create_main_parser(): + """Creates and returns the main parser for pip's CLI + """ + + parser_kw = { + 'usage': '\n%prog [options]', + 'add_help_option': False, + 'formatter': UpdatingDefaultsHelpFormatter(), + 'name': 'global', + 'prog': get_prog(), + } + + parser = ConfigOptionParser(**parser_kw) + parser.disable_interspersed_args() + + pip_pkg_dir = os.path.abspath(os.path.join( + os.path.dirname(__file__), "..", "..", + )) + parser.version = 'pip %s from %s (python %s)' % ( + __version__, pip_pkg_dir, sys.version[:3], + ) + + # add the general options + gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) + parser.add_option_group(gen_opts) + + parser.main = True # so the help formatter knows + + # create command listing for description + command_summaries = get_summaries() + description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] + parser.description = '\n'.join(description) + + return parser + + +def parse_command(args): + parser = create_main_parser() + + # Note: parser calls disable_interspersed_args(), so the result of this + # call is to split the initial args into the general options before the + # subcommand and everything else. + # For example: + # args: ['--timeout=5', 'install', '--user', 'INITools'] + # general_options: ['--timeout==5'] + # args_else: ['install', '--user', 'INITools'] + general_options, args_else = parser.parse_args(args) + + # --version + if general_options.version: + sys.stdout.write(parser.version) + sys.stdout.write(os.linesep) + sys.exit() + + # pip || pip help -> print_help() + if not args_else or (args_else[0] == 'help' and len(args_else) == 1): + parser.print_help() + sys.exit() + + # the subcommand name + cmd_name = args_else[0] + + if cmd_name not in commands_dict: + guess = get_similar_commands(cmd_name) + + msg = ['unknown command "%s"' % cmd_name] + if guess: + msg.append('maybe you meant "%s"' % guess) + + raise CommandError(' - '.join(msg)) + + # all the args without the subcommand + cmd_args = args[:] + cmd_args.remove(cmd_name) + + return cmd_name, cmd_args diff --git a/Shared/lib/python3.4/site-packages/pip/baseparser.py b/Shared/lib/python3.4/site-packages/pip/_internal/cli/parser.py similarity index 66% rename from Shared/lib/python3.4/site-packages/pip/baseparser.py rename to Shared/lib/python3.4/site-packages/pip/_internal/cli/parser.py index ccbf36b..e1eaac4 100644 --- a/Shared/lib/python3.4/site-packages/pip/baseparser.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/cli/parser.py @@ -1,23 +1,19 @@ """Base option parser setup""" from __future__ import absolute_import -import sys +import logging import optparse -import os -import re +import sys import textwrap from distutils.util import strtobool from pip._vendor.six import string_types -from pip._vendor.six.moves import configparser -from pip.locations import ( - legacy_config_file, config_basename, running_under_virtualenv, - site_config_files -) -from pip.utils import appdirs, get_terminal_size +from pip._internal.cli.status_codes import UNKNOWN_ERROR +from pip._internal.configuration import Configuration, ConfigurationError +from pip._internal.utils.compat import get_terminal_size -_environ_prefix_re = re.compile(r"^PIP_", re.I) +logger = logging.getLogger(__name__) class PrettyHelpFormatter(optparse.IndentedHelpFormatter): @@ -113,6 +109,7 @@ class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): class CustomOptionParser(optparse.OptionParser): + def insert_option_group(self, idx, *args, **kwargs): """Insert an OptionGroup at a given position.""" group = self.add_option_group(*args, **kwargs) @@ -136,58 +133,15 @@ class ConfigOptionParser(CustomOptionParser): """Custom option parser which updates its defaults by checking the configuration files and environmental variables""" - isolated = False - def __init__(self, *args, **kwargs): - self.config = configparser.RawConfigParser() self.name = kwargs.pop('name') - self.isolated = kwargs.pop("isolated", False) - self.files = self.get_config_files() - if self.files: - self.config.read(self.files) + + isolated = kwargs.pop("isolated", False) + self.config = Configuration(isolated) + assert self.name optparse.OptionParser.__init__(self, *args, **kwargs) - def get_config_files(self): - # the files returned by this method will be parsed in order with the - # first files listed being overridden by later files in standard - # ConfigParser fashion - config_file = os.environ.get('PIP_CONFIG_FILE', False) - if config_file == os.devnull: - return [] - - # at the base we have any site-wide configuration - files = list(site_config_files) - - # per-user configuration next - if not self.isolated: - if config_file and os.path.exists(config_file): - files.append(config_file) - else: - # This is the legacy config file, we consider it to be a lower - # priority than the new file location. - files.append(legacy_config_file) - - # This is the new config file, we consider it to be a higher - # priority than the legacy file. - files.append( - os.path.join( - appdirs.user_config_dir("pip"), - config_basename, - ) - ) - - # finally virtualenv configuration first trumping others - if running_under_virtualenv(): - venv_config_file = os.path.join( - sys.prefix, - config_basename, - ) - if os.path.exists(venv_config_file): - files.append(venv_config_file) - - return files - def check_default(self, option, key, val): try: return option.check_value(key, val) @@ -195,30 +149,43 @@ class ConfigOptionParser(CustomOptionParser): print("An error occurred during configuration: %s" % exc) sys.exit(3) + def _get_ordered_configuration_items(self): + # Configuration gives keys in an unordered manner. Order them. + override_order = ["global", self.name, ":env:"] + + # Pool the options into different groups + section_items = {name: [] for name in override_order} + for section_key, val in self.config.items(): + # ignore empty values + if not val: + logger.debug( + "Ignoring configuration key '%s' as it's value is empty.", + section_key + ) + continue + + section, key = section_key.split(".", 1) + if section in override_order: + section_items[section].append((key, val)) + + # Yield each group in their override order + for section in override_order: + for key, val in section_items[section]: + yield key, val + def _update_defaults(self, defaults): """Updates the given defaults with values from the config files and the environ. Does a little special handling for certain types of options (lists).""" - # Then go and look for the other sources of configuration: - config = {} - # 1. config files - for section in ('global', self.name): - config.update( - self.normalize_keys(self.get_config_section(section)) - ) - # 2. environmental variables - if not self.isolated: - config.update(self.normalize_keys(self.get_environ_vars())) + # Accumulate complex default state. self.values = optparse.Values(self.defaults) late_eval = set() # Then set the options with those values - for key, val in config.items(): - # ignore empty values - if not val: - continue + for key, val in self._get_ordered_configuration_items(): + # '--' because configuration supports only long names + option = self.get_option('--' + key) - option = self.get_option(key) # Ignore options not present in this parser. E.g. non-globals put # in [global] by users that want them to apply to all applicable # commands. @@ -226,7 +193,14 @@ class ConfigOptionParser(CustomOptionParser): continue if option.action in ('store_true', 'store_false', 'count'): - val = strtobool(val) + try: + val = strtobool(val) + except ValueError: + error_msg = invalid_config_error_message( + option.action, key, val + ) + self.error(error_msg) + elif option.action == 'append': val = val.split() val = [self.check_default(option, key, v) for v in val] @@ -248,37 +222,19 @@ class ConfigOptionParser(CustomOptionParser): self.values = None return defaults - def normalize_keys(self, items): - """Return a config dictionary with normalized keys regardless of - whether the keys were specified in environment variables or in config - files""" - normalized = {} - for key, val in items: - key = key.replace('_', '-') - if not key.startswith('--'): - key = '--%s' % key # only prefer long opts - normalized[key] = val - return normalized - - def get_config_section(self, name): - """Get a section of a configuration""" - if self.config.has_section(name): - return self.config.items(name) - return [] - - def get_environ_vars(self): - """Returns a generator with all environmental vars with prefix PIP_""" - for key, val in os.environ.items(): - if _environ_prefix_re.search(key): - yield (_environ_prefix_re.sub("", key).lower(), val) - def get_default_values(self): - """Overridding to make updating the defaults after instantiation of + """Overriding to make updating the defaults after instantiation of the option parser possible, _update_defaults() does the dirty work.""" if not self.process_default_values: # Old, pre-Optik 1.5 behaviour. return optparse.Values(self.defaults) + # Load the configuration, or error out in case of an error + try: + self.config.load() + except ConfigurationError as err: + self.exit(UNKNOWN_ERROR, str(err)) + defaults = self._update_defaults(self.defaults.copy()) # ours for option in self._get_all_options(): default = defaults.get(option.dest) @@ -289,4 +245,17 @@ class ConfigOptionParser(CustomOptionParser): def error(self, msg): self.print_usage(sys.stderr) - self.exit(2, "%s\n" % msg) + self.exit(UNKNOWN_ERROR, "%s\n" % msg) + + +def invalid_config_error_message(action, key, val): + """Returns a better error message when invalid configuration option + is provided.""" + if action in ('store_true', 'store_false'): + return ("{0} is not a valid value for {1} option, " + "please specify a boolean value like yes/no, " + "true/false or 1/0 instead.").format(val, key) + + return ("{0} is not a valid value for {1} option, " + "please specify a numerical value like 1/0 " + "instead.").format(val, key) diff --git a/Shared/lib/python3.4/site-packages/pip/status_codes.py b/Shared/lib/python3.4/site-packages/pip/_internal/cli/status_codes.py similarity index 100% rename from Shared/lib/python3.4/site-packages/pip/status_codes.py rename to Shared/lib/python3.4/site-packages/pip/_internal/cli/status_codes.py diff --git a/Shared/lib/python3.4/site-packages/pip/commands/__init__.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/__init__.py similarity index 52% rename from Shared/lib/python3.4/site-packages/pip/commands/__init__.py rename to Shared/lib/python3.4/site-packages/pip/_internal/commands/__init__.py index 92b7ff5..c7d1da3 100644 --- a/Shared/lib/python3.4/site-packages/pip/commands/__init__.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/__init__.py @@ -3,33 +3,25 @@ Package containing all pip commands """ from __future__ import absolute_import -from pip.commands.completion import CompletionCommand -from pip.commands.download import DownloadCommand -from pip.commands.freeze import FreezeCommand -from pip.commands.hash import HashCommand -from pip.commands.help import HelpCommand -from pip.commands.list import ListCommand -from pip.commands.search import SearchCommand -from pip.commands.show import ShowCommand -from pip.commands.install import InstallCommand -from pip.commands.uninstall import UninstallCommand -from pip.commands.wheel import WheelCommand +from pip._internal.commands.completion import CompletionCommand +from pip._internal.commands.configuration import ConfigurationCommand +from pip._internal.commands.download import DownloadCommand +from pip._internal.commands.freeze import FreezeCommand +from pip._internal.commands.hash import HashCommand +from pip._internal.commands.help import HelpCommand +from pip._internal.commands.list import ListCommand +from pip._internal.commands.check import CheckCommand +from pip._internal.commands.search import SearchCommand +from pip._internal.commands.show import ShowCommand +from pip._internal.commands.install import InstallCommand +from pip._internal.commands.uninstall import UninstallCommand +from pip._internal.commands.wheel import WheelCommand +from pip._internal.utils.typing import MYPY_CHECK_RUNNING -commands_dict = { - CompletionCommand.name: CompletionCommand, - FreezeCommand.name: FreezeCommand, - HashCommand.name: HashCommand, - HelpCommand.name: HelpCommand, - SearchCommand.name: SearchCommand, - ShowCommand.name: ShowCommand, - InstallCommand.name: InstallCommand, - UninstallCommand.name: UninstallCommand, - DownloadCommand.name: DownloadCommand, - ListCommand.name: ListCommand, - WheelCommand.name: WheelCommand, -} - +if MYPY_CHECK_RUNNING: + from typing import List, Type # noqa: F401 + from pip._internal.cli.base_command import Command # noqa: F401 commands_order = [ InstallCommand, @@ -38,12 +30,16 @@ commands_order = [ FreezeCommand, ListCommand, ShowCommand, + CheckCommand, + ConfigurationCommand, SearchCommand, WheelCommand, HashCommand, CompletionCommand, HelpCommand, -] +] # type: List[Type[Command]] + +commands_dict = {c.name: c for c in commands_order} def get_summaries(ordered=True): diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/commands/check.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/check.py new file mode 100644 index 0000000..1be3ec2 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/check.py @@ -0,0 +1,41 @@ +import logging + +from pip._internal.cli.base_command import Command +from pip._internal.operations.check import ( + check_package_set, create_package_set_from_installed, +) + +logger = logging.getLogger(__name__) + + +class CheckCommand(Command): + """Verify installed packages have compatible dependencies.""" + name = 'check' + usage = """ + %prog [options]""" + summary = 'Verify installed packages have compatible dependencies.' + + def run(self, options, args): + package_set = create_package_set_from_installed() + missing, conflicting = check_package_set(package_set) + + for project_name in missing: + version = package_set[project_name].version + for dependency in missing[project_name]: + logger.info( + "%s %s requires %s, which is not installed.", + project_name, version, dependency[0], + ) + + for project_name in conflicting: + version = package_set[project_name].version + for dep_name, dep_version, req in conflicting[project_name]: + logger.info( + "%s %s has requirement %s, but you have %s %s.", + project_name, version, req, dep_name, dep_version, + ) + + if missing or conflicting: + return 1 + else: + logger.info("No broken requirements found.") diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/commands/completion.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/completion.py new file mode 100644 index 0000000..2fcdd39 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/completion.py @@ -0,0 +1,94 @@ +from __future__ import absolute_import + +import sys +import textwrap + +from pip._internal.cli.base_command import Command +from pip._internal.utils.misc import get_prog + +BASE_COMPLETION = """ +# pip %(shell)s completion start%(script)s# pip %(shell)s completion end +""" + +COMPLETION_SCRIPTS = { + 'bash': """ + _pip_completion() + { + COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ + COMP_CWORD=$COMP_CWORD \\ + PIP_AUTO_COMPLETE=1 $1 ) ) + } + complete -o default -F _pip_completion %(prog)s + """, + 'zsh': """ + function _pip_completion { + local words cword + read -Ac words + read -cn cword + reply=( $( COMP_WORDS="$words[*]" \\ + COMP_CWORD=$(( cword-1 )) \\ + PIP_AUTO_COMPLETE=1 $words[1] ) ) + } + compctl -K _pip_completion %(prog)s + """, + 'fish': """ + function __fish_complete_pip + set -lx COMP_WORDS (commandline -o) "" + set -lx COMP_CWORD ( \\ + math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\ + ) + set -lx PIP_AUTO_COMPLETE 1 + string split \\ -- (eval $COMP_WORDS[1]) + end + complete -fa "(__fish_complete_pip)" -c %(prog)s + """, +} + + +class CompletionCommand(Command): + """A helper command to be used for command completion.""" + name = 'completion' + summary = 'A helper command used for command completion.' + ignore_require_venv = True + + def __init__(self, *args, **kw): + super(CompletionCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option( + '--bash', '-b', + action='store_const', + const='bash', + dest='shell', + help='Emit completion code for bash') + cmd_opts.add_option( + '--zsh', '-z', + action='store_const', + const='zsh', + dest='shell', + help='Emit completion code for zsh') + cmd_opts.add_option( + '--fish', '-f', + action='store_const', + const='fish', + dest='shell', + help='Emit completion code for fish') + + self.parser.insert_option_group(0, cmd_opts) + + def run(self, options, args): + """Prints the completion code of the given shell""" + shells = COMPLETION_SCRIPTS.keys() + shell_options = ['--' + shell for shell in sorted(shells)] + if options.shell in shells: + script = textwrap.dedent( + COMPLETION_SCRIPTS.get(options.shell, '') % { + 'prog': get_prog(), + } + ) + print(BASE_COMPLETION % {'script': script, 'shell': options.shell}) + else: + sys.stderr.write( + 'ERROR: You must pass %s\n' % ' or '.join(shell_options) + ) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/commands/configuration.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/configuration.py new file mode 100644 index 0000000..826c08d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/configuration.py @@ -0,0 +1,227 @@ +import logging +import os +import subprocess + +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS +from pip._internal.configuration import Configuration, kinds +from pip._internal.exceptions import PipError +from pip._internal.locations import venv_config_file +from pip._internal.utils.misc import get_prog + +logger = logging.getLogger(__name__) + + +class ConfigurationCommand(Command): + """Manage local and global configuration. + + Subcommands: + + list: List the active configuration (or from the file specified) + edit: Edit the configuration file in an editor + get: Get the value associated with name + set: Set the name=value + unset: Unset the value associated with name + + If none of --user, --global and --venv are passed, a virtual + environment configuration file is used if one is active and the file + exists. Otherwise, all modifications happen on the to the user file by + default. + """ + + name = 'config' + usage = """ + %prog [] list + %prog [] [--editor ] edit + + %prog [] get name + %prog [] set name value + %prog [] unset name + """ + + summary = "Manage local and global configuration." + + def __init__(self, *args, **kwargs): + super(ConfigurationCommand, self).__init__(*args, **kwargs) + + self.configuration = None + + self.cmd_opts.add_option( + '--editor', + dest='editor', + action='store', + default=None, + help=( + 'Editor to use to edit the file. Uses VISUAL or EDITOR ' + 'environment variables if not provided.' + ) + ) + + self.cmd_opts.add_option( + '--global', + dest='global_file', + action='store_true', + default=False, + help='Use the system-wide configuration file only' + ) + + self.cmd_opts.add_option( + '--user', + dest='user_file', + action='store_true', + default=False, + help='Use the user configuration file only' + ) + + self.cmd_opts.add_option( + '--venv', + dest='venv_file', + action='store_true', + default=False, + help='Use the virtualenv configuration file only' + ) + + self.parser.insert_option_group(0, self.cmd_opts) + + def run(self, options, args): + handlers = { + "list": self.list_values, + "edit": self.open_in_editor, + "get": self.get_name, + "set": self.set_name_value, + "unset": self.unset_name + } + + # Determine action + if not args or args[0] not in handlers: + logger.error("Need an action ({}) to perform.".format( + ", ".join(sorted(handlers))) + ) + return ERROR + + action = args[0] + + # Determine which configuration files are to be loaded + # Depends on whether the command is modifying. + try: + load_only = self._determine_file( + options, need_value=(action in ["get", "set", "unset", "edit"]) + ) + except PipError as e: + logger.error(e.args[0]) + return ERROR + + # Load a new configuration + self.configuration = Configuration( + isolated=options.isolated_mode, load_only=load_only + ) + self.configuration.load() + + # Error handling happens here, not in the action-handlers. + try: + handlers[action](options, args[1:]) + except PipError as e: + logger.error(e.args[0]) + return ERROR + + return SUCCESS + + def _determine_file(self, options, need_value): + file_options = { + kinds.USER: options.user_file, + kinds.GLOBAL: options.global_file, + kinds.VENV: options.venv_file + } + + if sum(file_options.values()) == 0: + if not need_value: + return None + # Default to user, unless there's a virtualenv file. + elif os.path.exists(venv_config_file): + return kinds.VENV + else: + return kinds.USER + elif sum(file_options.values()) == 1: + # There's probably a better expression for this. + return [key for key in file_options if file_options[key]][0] + + raise PipError( + "Need exactly one file to operate upon " + "(--user, --venv, --global) to perform." + ) + + def list_values(self, options, args): + self._get_n_args(args, "list", n=0) + + for key, value in sorted(self.configuration.items()): + logger.info("%s=%r", key, value) + + def get_name(self, options, args): + key = self._get_n_args(args, "get [name]", n=1) + value = self.configuration.get_value(key) + + logger.info("%s", value) + + def set_name_value(self, options, args): + key, value = self._get_n_args(args, "set [name] [value]", n=2) + self.configuration.set_value(key, value) + + self._save_configuration() + + def unset_name(self, options, args): + key = self._get_n_args(args, "unset [name]", n=1) + self.configuration.unset_value(key) + + self._save_configuration() + + def open_in_editor(self, options, args): + editor = self._determine_editor(options) + + fname = self.configuration.get_file_to_edit() + if fname is None: + raise PipError("Could not determine appropriate file.") + + try: + subprocess.check_call([editor, fname]) + except subprocess.CalledProcessError as e: + raise PipError( + "Editor Subprocess exited with exit code {}" + .format(e.returncode) + ) + + def _get_n_args(self, args, example, n): + """Helper to make sure the command got the right number of arguments + """ + if len(args) != n: + msg = ( + 'Got unexpected number of arguments, expected {}. ' + '(example: "{} config {}")' + ).format(n, get_prog(), example) + raise PipError(msg) + + if n == 1: + return args[0] + else: + return args + + def _save_configuration(self): + # We successfully ran a modifying command. Need to save the + # configuration. + try: + self.configuration.save() + except Exception: + logger.error( + "Unable to save configuration. Please report this as a bug.", + exc_info=1 + ) + raise PipError("Internal Error.") + + def _determine_editor(self, options): + if options.editor is not None: + return options.editor + elif "VISUAL" in os.environ: + return os.environ["VISUAL"] + elif "EDITOR" in os.environ: + return os.environ["EDITOR"] + else: + raise PipError("Could not determine editor to use.") diff --git a/Shared/lib/python3.4/site-packages/pip/commands/download.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/download.py similarity index 58% rename from Shared/lib/python3.4/site-packages/pip/commands/download.py rename to Shared/lib/python3.4/site-packages/pip/_internal/commands/download.py index 4155e05..b3f3c6e 100644 --- a/Shared/lib/python3.4/site-packages/pip/commands/download.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/download.py @@ -3,13 +3,15 @@ from __future__ import absolute_import import logging import os -from pip.req import RequirementSet -from pip.basecommand import RequirementCommand -from pip import cmdoptions -from pip.utils import ensure_dir, normalize_path -from pip.utils.build import BuildDirectory -from pip.utils.filesystem import check_path_owner - +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import RequirementCommand +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req import RequirementSet +from pip._internal.req.req_tracker import RequirementTracker +from pip._internal.resolve import Resolver +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.misc import ensure_dir, normalize_path +from pip._internal.utils.temp_dir import TempDirectory logger = logging.getLogger(__name__) @@ -31,8 +33,8 @@ class DownloadCommand(RequirementCommand): usage = """ %prog [options] [package-index-options] ... %prog [options] -r [package-index-options] ... - %prog [options] [-e] ... - %prog [options] [-e] ... + %prog [options] ... + %prog [options] ... %prog [options] ...""" summary = 'Download packages.' @@ -43,17 +45,19 @@ class DownloadCommand(RequirementCommand): cmd_opts = self.cmd_opts cmd_opts.add_option(cmdoptions.constraints()) - cmd_opts.add_option(cmdoptions.editable()) cmd_opts.add_option(cmdoptions.requirements()) cmd_opts.add_option(cmdoptions.build_dir()) cmd_opts.add_option(cmdoptions.no_deps()) cmd_opts.add_option(cmdoptions.global_options()) cmd_opts.add_option(cmdoptions.no_binary()) cmd_opts.add_option(cmdoptions.only_binary()) + cmd_opts.add_option(cmdoptions.prefer_binary()) cmd_opts.add_option(cmdoptions.src()) cmd_opts.add_option(cmdoptions.pre()) cmd_opts.add_option(cmdoptions.no_clean()) cmd_opts.add_option(cmdoptions.require_hashes()) + cmd_opts.add_option(cmdoptions.progress_bar()) + cmd_opts.add_option(cmdoptions.no_build_isolation()) cmd_opts.add_option( '-d', '--dest', '--destination-dir', '--destination-directory', @@ -63,8 +67,13 @@ class DownloadCommand(RequirementCommand): help=("Download packages into ."), ) + cmd_opts.add_option(cmdoptions.platform()) + cmd_opts.add_option(cmdoptions.python_version()) + cmd_opts.add_option(cmdoptions.implementation()) + cmd_opts.add_option(cmdoptions.abi()) + index_opts = cmdoptions.make_option_group( - cmdoptions.non_deprecated_index_group, + cmdoptions.index_group, self.parser, ) @@ -73,14 +82,31 @@ class DownloadCommand(RequirementCommand): def run(self, options, args): options.ignore_installed = True + # editable doesn't really make sense for `pip download`, but the bowels + # of the RequirementSet code require that property. + options.editables = [] + + if options.python_version: + python_versions = [options.python_version] + else: + python_versions = None + + cmdoptions.check_dist_restriction(options) + options.src_dir = os.path.abspath(options.src_dir) options.download_dir = normalize_path(options.download_dir) ensure_dir(options.download_dir) with self._build_session(options) as session: - - finder = self._build_package_finder(options, session) + finder = self._build_package_finder( + options=options, + session=session, + platform=options.platform, + python_versions=python_versions, + abi=options.abi, + implementation=options.implementation, + ) build_delete = (not (options.no_clean or options.build_dir)) if options.cache_dir and not check_path_owner(options.cache_dir): logger.warning( @@ -93,18 +119,12 @@ class DownloadCommand(RequirementCommand): ) options.cache_dir = None - with BuildDirectory(options.build_dir, - delete=build_delete) as build_dir: + with RequirementTracker() as req_tracker, TempDirectory( + options.build_dir, delete=build_delete, kind="download" + ) as directory: requirement_set = RequirementSet( - build_dir=build_dir, - src_dir=options.src_dir, - download_dir=options.download_dir, - ignore_installed=True, - ignore_dependencies=options.ignore_dependencies, - session=session, - isolated=options.isolated_mode, - require_hashes=options.require_hashes + require_hashes=options.require_hashes, ) self.populate_requirement_set( requirement_set, @@ -116,18 +136,36 @@ class DownloadCommand(RequirementCommand): None ) - if not requirement_set.has_requirements: - return + preparer = RequirementPreparer( + build_dir=directory.path, + src_dir=options.src_dir, + download_dir=options.download_dir, + wheel_download_dir=None, + progress_bar=options.progress_bar, + build_isolation=options.build_isolation, + req_tracker=req_tracker, + ) - requirement_set.prepare_files(finder) + resolver = Resolver( + preparer=preparer, + finder=finder, + session=session, + wheel_cache=None, + use_user_site=False, + upgrade_strategy="to-satisfy-only", + force_reinstall=False, + ignore_dependencies=options.ignore_dependencies, + ignore_requires_python=False, + ignore_installed=True, + isolated=options.isolated_mode, + ) + resolver.resolve(requirement_set) downloaded = ' '.join([ req.name for req in requirement_set.successfully_downloaded ]) if downloaded: - logger.info( - 'Successfully downloaded %s', downloaded - ) + logger.info('Successfully downloaded %s', downloaded) # Clean up if not options.no_clean: diff --git a/Shared/lib/python3.4/site-packages/pip/commands/freeze.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/freeze.py similarity index 67% rename from Shared/lib/python3.4/site-packages/pip/commands/freeze.py rename to Shared/lib/python3.4/site-packages/pip/_internal/commands/freeze.py index 0485d5f..dc9c53a 100644 --- a/Shared/lib/python3.4/site-packages/pip/commands/freeze.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/freeze.py @@ -2,14 +2,13 @@ from __future__ import absolute_import import sys -import pip -from pip.compat import stdlib_pkgs -from pip.basecommand import Command -from pip.operations.freeze import freeze -from pip.wheel import WheelCache +from pip._internal.cache import WheelCache +from pip._internal.cli.base_command import Command +from pip._internal.models.format_control import FormatControl +from pip._internal.operations.freeze import freeze +from pip._internal.utils.compat import stdlib_pkgs - -DEV_PKGS = ('pip', 'setuptools', 'distribute', 'wheel') +DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'} class FreezeCommand(Command): @@ -29,12 +28,13 @@ class FreezeCommand(Command): self.cmd_opts.add_option( '-r', '--requirement', - dest='requirement', - action='store', - default=None, + dest='requirements', + action='append', + default=[], metavar='file', help="Use the order in the given requirements file and its " - "comments when generating output.") + "comments when generating output. This option can be " + "used multiple times.") self.cmd_opts.add_option( '-f', '--find-links', dest='find_links', @@ -62,25 +62,35 @@ class FreezeCommand(Command): action='store_true', help='Do not skip these packages in the output:' ' %s' % ', '.join(DEV_PKGS)) + self.cmd_opts.add_option( + '--exclude-editable', + dest='exclude_editable', + action='store_true', + help='Exclude editable package from output.') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): - format_control = pip.index.FormatControl(set(), set()) + format_control = FormatControl(set(), set()) wheel_cache = WheelCache(options.cache_dir, format_control) skip = set(stdlib_pkgs) if not options.freeze_all: skip.update(DEV_PKGS) freeze_kwargs = dict( - requirement=options.requirement, + requirement=options.requirements, find_links=options.find_links, local_only=options.local, user_only=options.user, skip_regex=options.skip_requirements_regex, isolated=options.isolated_mode, wheel_cache=wheel_cache, - skip=skip) + skip=skip, + exclude_editable=options.exclude_editable, + ) - for line in freeze(**freeze_kwargs): - sys.stdout.write(line + '\n') + try: + for line in freeze(**freeze_kwargs): + sys.stdout.write(line + '\n') + finally: + wheel_cache.cleanup() diff --git a/Shared/lib/python3.4/site-packages/pip/commands/hash.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/hash.py similarity index 85% rename from Shared/lib/python3.4/site-packages/pip/commands/hash.py rename to Shared/lib/python3.4/site-packages/pip/_internal/commands/hash.py index 27cca0b..423440e 100644 --- a/Shared/lib/python3.4/site-packages/pip/commands/hash.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/hash.py @@ -4,11 +4,10 @@ import hashlib import logging import sys -from pip.basecommand import Command -from pip.status_codes import ERROR -from pip.utils import read_chunks -from pip.utils.hashes import FAVORITE_HASH, STRONG_HASHES - +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR +from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES +from pip._internal.utils.misc import read_chunks logger = logging.getLogger(__name__) @@ -24,6 +23,7 @@ class HashCommand(Command): name = 'hash' usage = '%prog [options] ...' summary = 'Compute hashes of package archives.' + ignore_require_venv = True def __init__(self, *args, **kw): super(HashCommand, self).__init__(*args, **kw) diff --git a/Shared/lib/python3.4/site-packages/pip/commands/help.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/help.py similarity index 75% rename from Shared/lib/python3.4/site-packages/pip/commands/help.py rename to Shared/lib/python3.4/site-packages/pip/_internal/commands/help.py index 11722f1..49a81cb 100644 --- a/Shared/lib/python3.4/site-packages/pip/commands/help.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/help.py @@ -1,7 +1,8 @@ from __future__ import absolute_import -from pip.basecommand import Command, SUCCESS -from pip.exceptions import CommandError +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import SUCCESS +from pip._internal.exceptions import CommandError class HelpCommand(Command): @@ -10,9 +11,10 @@ class HelpCommand(Command): usage = """ %prog """ summary = 'Show help for commands.' + ignore_require_venv = True def run(self, options, args): - from pip.commands import commands_dict, get_similar_commands + from pip._internal.commands import commands_dict, get_similar_commands try: # 'pip help' with no args is handled by pip.__init__.parseopt() diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/commands/install.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/install.py new file mode 100644 index 0000000..c9ed3b4 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/install.py @@ -0,0 +1,555 @@ +from __future__ import absolute_import + +import errno +import logging +import operator +import os +import shutil +from optparse import SUPPRESS_HELP + +from pip._vendor import pkg_resources + +from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import RequirementCommand +from pip._internal.cli.status_codes import ERROR +from pip._internal.exceptions import ( + CommandError, InstallationError, PreviousBuildDirError, +) +from pip._internal.locations import distutils_scheme, virtualenv_no_global +from pip._internal.operations.check import check_install_conflicts +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req import RequirementSet, install_given_reqs +from pip._internal.req.req_tracker import RequirementTracker +from pip._internal.resolve import Resolver +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.misc import ( + ensure_dir, get_installed_version, + protect_pip_from_modification_on_windows, +) +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.wheel import WheelBuilder + +try: + import wheel +except ImportError: + wheel = None + +from pip._internal.locations import running_under_virtualenv + +logger = logging.getLogger(__name__) + + +class InstallCommand(RequirementCommand): + """ + Install packages from: + + - PyPI (and other indexes) using requirement specifiers. + - VCS project urls. + - Local project directories. + - Local or remote source archives. + + pip also supports installing from "requirements files", which provide + an easy way to specify a whole environment to be installed. + """ + name = 'install' + + usage = """ + %prog [options] [package-index-options] ... + %prog [options] -r [package-index-options] ... + %prog [options] [-e] ... + %prog [options] [-e] ... + %prog [options] ...""" + + summary = 'Install packages.' + + def __init__(self, *args, **kw): + super(InstallCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option(cmdoptions.requirements()) + cmd_opts.add_option(cmdoptions.constraints()) + cmd_opts.add_option(cmdoptions.no_deps()) + cmd_opts.add_option(cmdoptions.pre()) + + cmd_opts.add_option(cmdoptions.editable()) + cmd_opts.add_option( + '-t', '--target', + dest='target_dir', + metavar='dir', + default=None, + help='Install packages into . ' + 'By default this will not replace existing files/folders in ' + '. Use --upgrade to replace existing packages in ' + 'with new versions.' + ) + cmd_opts.add_option(cmdoptions.platform()) + cmd_opts.add_option(cmdoptions.python_version()) + cmd_opts.add_option(cmdoptions.implementation()) + cmd_opts.add_option(cmdoptions.abi()) + + cmd_opts.add_option( + '--user', + dest='use_user_site', + action='store_true', + help="Install to the Python user install directory for your " + "platform. Typically ~/.local/, or %APPDATA%\\Python on " + "Windows. (See the Python documentation for site.USER_BASE " + "for full details.) On Debian systems, this is the " + "default when running outside of a virtual environment " + "and not as root.") + + cmd_opts.add_option( + '--no-user', + dest='use_system_location', + action='store_true', + help=SUPPRESS_HELP) + cmd_opts.add_option( + '--root', + dest='root_path', + metavar='dir', + default=None, + help="Install everything relative to this alternate root " + "directory.") + cmd_opts.add_option( + '--prefix', + dest='prefix_path', + metavar='dir', + default=None, + help="Installation prefix where lib, bin and other top-level " + "folders are placed") + + cmd_opts.add_option( + '--system', + dest='use_system_location', + action='store_true', + help="Install using the system scheme (overrides --user on " + "Debian systems)") + + cmd_opts.add_option(cmdoptions.build_dir()) + + cmd_opts.add_option(cmdoptions.src()) + + cmd_opts.add_option( + '-U', '--upgrade', + dest='upgrade', + action='store_true', + help='Upgrade all specified packages to the newest available ' + 'version. The handling of dependencies depends on the ' + 'upgrade-strategy used.' + ) + + cmd_opts.add_option( + '--upgrade-strategy', + dest='upgrade_strategy', + default='only-if-needed', + choices=['only-if-needed', 'eager'], + help='Determines how dependency upgrading should be handled ' + '[default: %default]. ' + '"eager" - dependencies are upgraded regardless of ' + 'whether the currently installed version satisfies the ' + 'requirements of the upgraded package(s). ' + '"only-if-needed" - are upgraded only when they do not ' + 'satisfy the requirements of the upgraded package(s).' + ) + + cmd_opts.add_option( + '--force-reinstall', + dest='force_reinstall', + action='store_true', + help='Reinstall all packages even if they are already ' + 'up-to-date.') + + cmd_opts.add_option( + '-I', '--ignore-installed', + dest='ignore_installed', + action='store_true', + help='Ignore the installed packages (reinstalling instead).') + + cmd_opts.add_option(cmdoptions.ignore_requires_python()) + cmd_opts.add_option(cmdoptions.no_build_isolation()) + + cmd_opts.add_option(cmdoptions.install_options()) + cmd_opts.add_option(cmdoptions.global_options()) + + cmd_opts.add_option( + "--compile", + action="store_true", + dest="compile", + default=True, + help="Compile Python source files to bytecode", + ) + + cmd_opts.add_option( + "--no-compile", + action="store_false", + dest="compile", + help="Do not compile Python source files to bytecode", + ) + + cmd_opts.add_option( + "--no-warn-script-location", + action="store_false", + dest="warn_script_location", + default=True, + help="Do not warn when installing scripts outside PATH", + ) + cmd_opts.add_option( + "--no-warn-conflicts", + action="store_false", + dest="warn_about_conflicts", + default=True, + help="Do not warn about broken dependencies", + ) + + cmd_opts.add_option(cmdoptions.no_binary()) + cmd_opts.add_option(cmdoptions.only_binary()) + cmd_opts.add_option(cmdoptions.prefer_binary()) + cmd_opts.add_option(cmdoptions.no_clean()) + cmd_opts.add_option(cmdoptions.require_hashes()) + cmd_opts.add_option(cmdoptions.progress_bar()) + + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, + self.parser, + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, cmd_opts) + + def run(self, options, args): + cmdoptions.check_install_build_global(options) + upgrade_strategy = "to-satisfy-only" + if options.upgrade: + upgrade_strategy = options.upgrade_strategy + + if options.build_dir: + options.build_dir = os.path.abspath(options.build_dir) + + cmdoptions.check_dist_restriction(options, check_target=True) + + if options.python_version: + python_versions = [options.python_version] + else: + python_versions = None + + # compute install location defaults + if (not options.use_user_site and not options.prefix_path and not + options.target_dir and not options.use_system_location): + if not running_under_virtualenv() and os.geteuid() != 0: + options.use_user_site = True + + if options.use_system_location: + options.use_user_site = False + + options.src_dir = os.path.abspath(options.src_dir) + install_options = options.install_options or [] + if options.use_user_site: + if options.prefix_path: + raise CommandError( + "Can not combine '--user' and '--prefix' as they imply " + "different installation locations" + ) + if virtualenv_no_global(): + raise InstallationError( + "Can not perform a '--user' install. User site-packages " + "are not visible in this virtualenv." + ) + install_options.append('--user') + install_options.append('--prefix=') + + target_temp_dir = TempDirectory(kind="target") + if options.target_dir: + options.ignore_installed = True + options.target_dir = os.path.abspath(options.target_dir) + if (os.path.exists(options.target_dir) and not + os.path.isdir(options.target_dir)): + raise CommandError( + "Target path exists but is not a directory, will not " + "continue." + ) + + # Create a target directory for using with the target option + target_temp_dir.create() + install_options.append('--home=' + target_temp_dir.path) + + global_options = options.global_options or [] + + with self._build_session(options) as session: + finder = self._build_package_finder( + options=options, + session=session, + platform=options.platform, + python_versions=python_versions, + abi=options.abi, + implementation=options.implementation, + ) + build_delete = (not (options.no_clean or options.build_dir)) + wheel_cache = WheelCache(options.cache_dir, options.format_control) + + if options.cache_dir and not check_path_owner(options.cache_dir): + logger.warning( + "The directory '%s' or its parent directory is not owned " + "by the current user and caching wheels has been " + "disabled. check the permissions and owner of that " + "directory. If executing pip with sudo, you may want " + "sudo's -H flag.", + options.cache_dir, + ) + options.cache_dir = None + + with RequirementTracker() as req_tracker, TempDirectory( + options.build_dir, delete=build_delete, kind="install" + ) as directory: + requirement_set = RequirementSet( + require_hashes=options.require_hashes, + check_supported_wheels=not options.target_dir, + ) + + try: + self.populate_requirement_set( + requirement_set, args, options, finder, session, + self.name, wheel_cache + ) + preparer = RequirementPreparer( + build_dir=directory.path, + src_dir=options.src_dir, + download_dir=None, + wheel_download_dir=None, + progress_bar=options.progress_bar, + build_isolation=options.build_isolation, + req_tracker=req_tracker, + ) + + resolver = Resolver( + preparer=preparer, + finder=finder, + session=session, + wheel_cache=wheel_cache, + use_user_site=options.use_user_site, + upgrade_strategy=upgrade_strategy, + force_reinstall=options.force_reinstall, + ignore_dependencies=options.ignore_dependencies, + ignore_requires_python=options.ignore_requires_python, + ignore_installed=options.ignore_installed, + isolated=options.isolated_mode, + ) + resolver.resolve(requirement_set) + + protect_pip_from_modification_on_windows( + modifying_pip=requirement_set.has_requirement("pip") + ) + + # If caching is disabled or wheel is not installed don't + # try to build wheels. + if wheel and options.cache_dir: + # build wheels before install. + wb = WheelBuilder( + finder, preparer, wheel_cache, + build_options=[], global_options=[], + ) + # Ignore the result: a failed wheel will be + # installed from the sdist/vcs whatever. + wb.build( + requirement_set.requirements.values(), + session=session, autobuilding=True + ) + + to_install = resolver.get_installation_order( + requirement_set + ) + + # Consistency Checking of the package set we're installing. + should_warn_about_conflicts = ( + not options.ignore_dependencies and + options.warn_about_conflicts + ) + if should_warn_about_conflicts: + self._warn_about_conflicts(to_install) + + # Don't warn about script install locations if + # --target has been specified + warn_script_location = options.warn_script_location + if options.target_dir: + warn_script_location = False + + installed = install_given_reqs( + to_install, + install_options, + global_options, + root=options.root_path, + home=target_temp_dir.path, + prefix=options.prefix_path, + pycompile=options.compile, + warn_script_location=warn_script_location, + use_user_site=options.use_user_site, + ) + + lib_locations = get_lib_location_guesses( + user=options.use_user_site, + home=target_temp_dir.path, + root=options.root_path, + prefix=options.prefix_path, + isolated=options.isolated_mode, + ) + working_set = pkg_resources.WorkingSet(lib_locations) + + reqs = sorted(installed, key=operator.attrgetter('name')) + items = [] + for req in reqs: + item = req.name + try: + installed_version = get_installed_version( + req.name, working_set=working_set + ) + if installed_version: + item += '-' + installed_version + except Exception: + pass + items.append(item) + installed = ' '.join(items) + if installed: + logger.info('Successfully installed %s', installed) + except EnvironmentError as error: + show_traceback = (self.verbosity >= 1) + + message = create_env_error_message( + error, show_traceback, options.use_user_site, + ) + logger.error(message, exc_info=show_traceback) + + return ERROR + except PreviousBuildDirError: + options.no_clean = True + raise + finally: + # Clean up + if not options.no_clean: + requirement_set.cleanup_files() + wheel_cache.cleanup() + + if options.target_dir: + self._handle_target_dir( + options.target_dir, target_temp_dir, options.upgrade + ) + return requirement_set + + def _handle_target_dir(self, target_dir, target_temp_dir, upgrade): + ensure_dir(target_dir) + + # Checking both purelib and platlib directories for installed + # packages to be moved to target directory + lib_dir_list = [] + + with target_temp_dir: + # Checking both purelib and platlib directories for installed + # packages to be moved to target directory + scheme = distutils_scheme('', home=target_temp_dir.path) + purelib_dir = scheme['purelib'] + platlib_dir = scheme['platlib'] + data_dir = scheme['data'] + + if os.path.exists(purelib_dir): + lib_dir_list.append(purelib_dir) + if os.path.exists(platlib_dir) and platlib_dir != purelib_dir: + lib_dir_list.append(platlib_dir) + if os.path.exists(data_dir): + lib_dir_list.append(data_dir) + + for lib_dir in lib_dir_list: + for item in os.listdir(lib_dir): + if lib_dir == data_dir: + ddir = os.path.join(data_dir, item) + if any(s.startswith(ddir) for s in lib_dir_list[:-1]): + continue + target_item_dir = os.path.join(target_dir, item) + if os.path.exists(target_item_dir): + if not upgrade: + logger.warning( + 'Target directory %s already exists. Specify ' + '--upgrade to force replacement.', + target_item_dir + ) + continue + if os.path.islink(target_item_dir): + logger.warning( + 'Target directory %s already exists and is ' + 'a link. Pip will not automatically replace ' + 'links, please remove if replacement is ' + 'desired.', + target_item_dir + ) + continue + if os.path.isdir(target_item_dir): + shutil.rmtree(target_item_dir) + else: + os.remove(target_item_dir) + + shutil.move( + os.path.join(lib_dir, item), + target_item_dir + ) + + def _warn_about_conflicts(self, to_install): + package_set, _dep_info = check_install_conflicts(to_install) + missing, conflicting = _dep_info + + # NOTE: There is some duplication here from pip check + for project_name in missing: + version = package_set[project_name][0] + for dependency in missing[project_name]: + logger.critical( + "%s %s requires %s, which is not installed.", + project_name, version, dependency[1], + ) + + for project_name in conflicting: + version = package_set[project_name][0] + for dep_name, dep_version, req in conflicting[project_name]: + logger.critical( + "%s %s has requirement %s, but you'll have %s %s which is " + "incompatible.", + project_name, version, req, dep_name, dep_version, + ) + + +def get_lib_location_guesses(*args, **kwargs): + scheme = distutils_scheme('', *args, **kwargs) + return [scheme['purelib'], scheme['platlib']] + + +def create_env_error_message(error, show_traceback, using_user_site): + """Format an error message for an EnvironmentError + + It may occur anytime during the execution of the install command. + """ + parts = [] + + # Mention the error if we are not going to show a traceback + parts.append("Could not install packages due to an EnvironmentError") + if not show_traceback: + parts.append(": ") + parts.append(str(error)) + else: + parts.append(".") + + # Spilt the error indication from a helper message (if any) + parts[-1] += "\n" + + # Suggest useful actions to the user: + # (1) using user site-packages or (2) verifying the permissions + if error.errno == errno.EACCES: + user_option_part = "Consider using the `--user` option" + permissions_part = "Check the permissions" + + if not using_user_site: + parts.extend([ + user_option_part, " or ", + permissions_part.lower(), + ]) + else: + parts.append(permissions_part) + parts.append(".\n") + + return "".join(parts).strip() + "\n" diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/commands/list.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/list.py new file mode 100644 index 0000000..c6eeca7 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/list.py @@ -0,0 +1,306 @@ +from __future__ import absolute_import + +import json +import logging + +from pip._vendor import six +from pip._vendor.six.moves import zip_longest + +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import Command +from pip._internal.exceptions import CommandError +from pip._internal.index import PackageFinder +from pip._internal.utils.misc import ( + dist_is_editable, get_installed_distributions, +) +from pip._internal.utils.packaging import get_installer + +logger = logging.getLogger(__name__) + + +class ListCommand(Command): + """ + List installed packages, including editables. + + Packages are listed in a case-insensitive sorted order. + """ + name = 'list' + usage = """ + %prog [options]""" + summary = 'List installed packages.' + + def __init__(self, *args, **kw): + super(ListCommand, self).__init__(*args, **kw) + + cmd_opts = self.cmd_opts + + cmd_opts.add_option( + '-o', '--outdated', + action='store_true', + default=False, + help='List outdated packages') + cmd_opts.add_option( + '-u', '--uptodate', + action='store_true', + default=False, + help='List uptodate packages') + cmd_opts.add_option( + '-e', '--editable', + action='store_true', + default=False, + help='List editable projects.') + cmd_opts.add_option( + '-l', '--local', + action='store_true', + default=False, + help=('If in a virtualenv that has global access, do not list ' + 'globally-installed packages.'), + ) + self.cmd_opts.add_option( + '--user', + dest='user', + action='store_true', + default=False, + help='Only output packages installed in user-site.') + + cmd_opts.add_option( + '--pre', + action='store_true', + default=False, + help=("Include pre-release and development versions. By default, " + "pip only finds stable versions."), + ) + + cmd_opts.add_option( + '--format', + action='store', + dest='list_format', + default="columns", + choices=('columns', 'freeze', 'json'), + help="Select the output format among: columns (default), freeze, " + "or json", + ) + + cmd_opts.add_option( + '--not-required', + action='store_true', + dest='not_required', + help="List packages that are not dependencies of " + "installed packages.", + ) + + cmd_opts.add_option( + '--exclude-editable', + action='store_false', + dest='include_editable', + help='Exclude editable package from output.', + ) + cmd_opts.add_option( + '--include-editable', + action='store_true', + dest='include_editable', + help='Include editable package from output.', + default=True, + ) + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, self.parser + ) + + self.parser.insert_option_group(0, index_opts) + self.parser.insert_option_group(0, cmd_opts) + + def _build_package_finder(self, options, index_urls, session): + """ + Create a package finder appropriate to this list command. + """ + return PackageFinder( + find_links=options.find_links, + index_urls=index_urls, + allow_all_prereleases=options.pre, + trusted_hosts=options.trusted_hosts, + process_dependency_links=options.process_dependency_links, + session=session, + ) + + def run(self, options, args): + if options.outdated and options.uptodate: + raise CommandError( + "Options --outdated and --uptodate cannot be combined.") + + packages = get_installed_distributions( + local_only=options.local, + user_only=options.user, + editables_only=options.editable, + include_editables=options.include_editable, + ) + + if options.outdated: + packages = self.get_outdated(packages, options) + elif options.uptodate: + packages = self.get_uptodate(packages, options) + + if options.not_required: + packages = self.get_not_required(packages, options) + + self.output_package_listing(packages, options) + + def get_outdated(self, packages, options): + return [ + dist for dist in self.iter_packages_latest_infos(packages, options) + if dist.latest_version > dist.parsed_version + ] + + def get_uptodate(self, packages, options): + return [ + dist for dist in self.iter_packages_latest_infos(packages, options) + if dist.latest_version == dist.parsed_version + ] + + def get_not_required(self, packages, options): + dep_keys = set() + for dist in packages: + dep_keys.update(requirement.key for requirement in dist.requires()) + return {pkg for pkg in packages if pkg.key not in dep_keys} + + def iter_packages_latest_infos(self, packages, options): + index_urls = [options.index_url] + options.extra_index_urls + if options.no_index: + logger.debug('Ignoring indexes: %s', ','.join(index_urls)) + index_urls = [] + + dependency_links = [] + for dist in packages: + if dist.has_metadata('dependency_links.txt'): + dependency_links.extend( + dist.get_metadata_lines('dependency_links.txt'), + ) + + with self._build_session(options) as session: + finder = self._build_package_finder(options, index_urls, session) + finder.add_dependency_links(dependency_links) + + for dist in packages: + typ = 'unknown' + all_candidates = finder.find_all_candidates(dist.key) + if not options.pre: + # Remove prereleases + all_candidates = [candidate for candidate in all_candidates + if not candidate.version.is_prerelease] + + if not all_candidates: + continue + best_candidate = max(all_candidates, + key=finder._candidate_sort_key) + remote_version = best_candidate.version + if best_candidate.location.is_wheel: + typ = 'wheel' + else: + typ = 'sdist' + # This is dirty but makes the rest of the code much cleaner + dist.latest_version = remote_version + dist.latest_filetype = typ + yield dist + + def output_package_listing(self, packages, options): + packages = sorted( + packages, + key=lambda dist: dist.project_name.lower(), + ) + if options.list_format == 'columns' and packages: + data, header = format_for_columns(packages, options) + self.output_package_listing_columns(data, header) + elif options.list_format == 'freeze': + for dist in packages: + if options.verbose >= 1: + logger.info("%s==%s (%s)", dist.project_name, + dist.version, dist.location) + else: + logger.info("%s==%s", dist.project_name, dist.version) + elif options.list_format == 'json': + logger.info(format_for_json(packages, options)) + + def output_package_listing_columns(self, data, header): + # insert the header first: we need to know the size of column names + if len(data) > 0: + data.insert(0, header) + + pkg_strings, sizes = tabulate(data) + + # Create and add a separator. + if len(data) > 0: + pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes))) + + for val in pkg_strings: + logger.info(val) + + +def tabulate(vals): + # From pfmoore on GitHub: + # https://github.com/pypa/pip/issues/3651#issuecomment-216932564 + assert len(vals) > 0 + + sizes = [0] * max(len(x) for x in vals) + for row in vals: + sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)] + + result = [] + for row in vals: + display = " ".join([str(c).ljust(s) if c is not None else '' + for s, c in zip_longest(sizes, row)]) + result.append(display) + + return result, sizes + + +def format_for_columns(pkgs, options): + """ + Convert the package data into something usable + by output_package_listing_columns. + """ + running_outdated = options.outdated + # Adjust the header for the `pip list --outdated` case. + if running_outdated: + header = ["Package", "Version", "Latest", "Type"] + else: + header = ["Package", "Version"] + + data = [] + if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs): + header.append("Location") + if options.verbose >= 1: + header.append("Installer") + + for proj in pkgs: + # if we're working on the 'outdated' list, separate out the + # latest_version and type + row = [proj.project_name, proj.version] + + if running_outdated: + row.append(proj.latest_version) + row.append(proj.latest_filetype) + + if options.verbose >= 1 or dist_is_editable(proj): + row.append(proj.location) + if options.verbose >= 1: + row.append(get_installer(proj)) + + data.append(row) + + return data, header + + +def format_for_json(packages, options): + data = [] + for dist in packages: + info = { + 'name': dist.project_name, + 'version': six.text_type(dist.version), + } + if options.verbose >= 1: + info['location'] = dist.location + info['installer'] = get_installer(dist) + if options.outdated: + info['latest_version'] = six.text_type(dist.latest_version) + info['latest_filetype'] = dist.latest_filetype + data.append(info) + return json.dumps(data) diff --git a/Shared/lib/python3.4/site-packages/pip/commands/search.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/search.py similarity index 70% rename from Shared/lib/python3.4/site-packages/pip/commands/search.py rename to Shared/lib/python3.4/site-packages/pip/_internal/commands/search.py index 3155e18..c157a31 100644 --- a/Shared/lib/python3.4/site-packages/pip/commands/search.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/search.py @@ -3,17 +3,21 @@ from __future__ import absolute_import import logging import sys import textwrap +from collections import OrderedDict -from pip.basecommand import Command, SUCCESS -from pip.download import PipXmlrpcTransport -from pip.models import PyPI -from pip.utils import get_terminal_size -from pip.utils.logging import indent_log -from pip.exceptions import CommandError -from pip.status_codes import NO_MATCHES_FOUND from pip._vendor import pkg_resources -from pip._vendor.six.moves import xmlrpc_client +from pip._vendor.packaging.version import parse as parse_version +# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is +# why we ignore the type on this import +from pip._vendor.six.moves import xmlrpc_client # type: ignore +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS +from pip._internal.download import PipXmlrpcTransport +from pip._internal.exceptions import CommandError +from pip._internal.models.index import PyPI +from pip._internal.utils.compat import get_terminal_size +from pip._internal.utils.logging import indent_log logger = logging.getLogger(__name__) @@ -24,11 +28,12 @@ class SearchCommand(Command): usage = """ %prog [options] """ summary = 'Search PyPI for packages.' + ignore_require_venv = True def __init__(self, *args, **kw): super(SearchCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( - '--index', + '-i', '--index', dest='index', metavar='URL', default=PyPI.pypi_url, @@ -67,21 +72,17 @@ def transform_hits(hits): packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ - packages = {} + packages = OrderedDict() for hit in hits: name = hit['name'] summary = hit['summary'] version = hit['version'] - score = hit['_pypi_ordering'] - if score is None: - score = 0 if name not in packages.keys(): packages[name] = { 'name': name, 'summary': summary, 'versions': [version], - 'score': score, } else: packages[name]['versions'].append(version) @@ -89,16 +90,8 @@ def transform_hits(hits): # if this is the highest version, replace summary and score if version == highest_version(packages[name]['versions']): packages[name]['summary'] = summary - packages[name]['score'] = score - # each record has a unique name now, so we will convert the dict into a - # list sorted by score - package_list = sorted( - packages.values(), - key=lambda x: x['score'], - reverse=True, - ) - return package_list + return list(packages.values()) def print_results(hits, name_column_width=None, terminal_width=None): @@ -106,7 +99,7 @@ def print_results(hits, name_column_width=None, terminal_width=None): return if name_column_width is None: name_column_width = max([ - len(hit['name']) + len(hit.get('versions', ['-'])[-1]) + len(hit['name']) + len(highest_version(hit.get('versions', ['-']))) for hit in hits ]) + 4 @@ -114,23 +107,21 @@ def print_results(hits, name_column_width=None, terminal_width=None): for hit in hits: name = hit['name'] summary = hit['summary'] or '' - version = hit.get('versions', ['-'])[-1] + latest = highest_version(hit.get('versions', ['-'])) if terminal_width is not None: - # wrap and indent summary to fit terminal - summary = textwrap.wrap( - summary, - terminal_width - name_column_width - 5, - ) - summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) + target_width = terminal_width - name_column_width - 5 + if target_width > 10: + # wrap and indent summary to fit terminal + summary = textwrap.wrap(summary, target_width) + summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) line = '%-*s - %s' % (name_column_width, - '%s (%s)' % (name, version), summary) + '%s (%s)' % (name, latest), summary) try: logger.info(line) if name in installed_packages: dist = pkg_resources.get_distribution(name) with indent_log(): - latest = highest_version(hit['versions']) if dist.version == latest: logger.info('INSTALLED: %s (latest)', dist.version) else: @@ -141,6 +132,4 @@ def print_results(hits, name_column_width=None, terminal_width=None): def highest_version(versions): - return next(iter( - sorted(versions, key=pkg_resources.parse_version, reverse=True) - )) + return max(versions, key=parse_version) diff --git a/Shared/lib/python3.4/site-packages/pip/commands/show.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/show.py similarity index 63% rename from Shared/lib/python3.4/site-packages/pip/commands/show.py rename to Shared/lib/python3.4/site-packages/pip/_internal/commands/show.py index 52a673a..f92c9bc 100644 --- a/Shared/lib/python3.4/site-packages/pip/commands/show.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/show.py @@ -1,23 +1,29 @@ from __future__ import absolute_import -from email.parser import FeedParser import logging import os +from email.parser import FeedParser # type: ignore -from pip.basecommand import Command -from pip.status_codes import SUCCESS, ERROR from pip._vendor import pkg_resources +from pip._vendor.packaging.utils import canonicalize_name +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS logger = logging.getLogger(__name__) class ShowCommand(Command): - """Show information about one or more installed packages.""" + """ + Show information about one or more installed packages. + + The output is in RFC-compliant mail header format. + """ name = 'show' usage = """ %prog [options] ...""" summary = 'Show information about installed packages.' + ignore_require_venv = True def __init__(self, *args, **kw): super(ShowCommand, self).__init__(*args, **kw) @@ -37,7 +43,8 @@ class ShowCommand(Command): query = args results = search_packages_info(query) - if not print_results(results, options.files): + if not print_results( + results, list_files=options.files, verbose=options.verbose): return ERROR return SUCCESS @@ -49,9 +56,12 @@ def search_packages_info(query): pip generated 'installed-files.txt' in the distributions '.egg-info' directory. """ - installed = dict( - [(p.project_name.lower(), p) for p in pkg_resources.working_set]) - query_names = [name.lower() for name in query] + installed = {} + for p in pkg_resources.working_set: + installed[canonicalize_name(p.project_name)] = p + + query_names = [canonicalize_name(name) for name in query] + for dist in [installed[pkg] for pkg in query_names if pkg in installed]: package = { 'name': dist.project_name, @@ -85,13 +95,11 @@ def search_packages_info(query): entry_points = dist.get_metadata_lines('entry_points.txt') package['entry_points'] = entry_points - installer = None if dist.has_metadata('INSTALLER'): for line in dist.get_metadata_lines('INSTALLER'): if line.strip(): - installer = line.strip() + package['installer'] = line.strip() break - package['installer'] = installer # @todo: Should pkg_resources.Distribution have a # `get_pkg_info` method? @@ -102,12 +110,9 @@ def search_packages_info(query): 'home-page', 'author', 'author-email', 'license'): package[key] = pkg_info_dict.get(key) - # It looks like FeedParser can not deal with repeated headers + # It looks like FeedParser cannot deal with repeated headers classifiers = [] for line in metadata.splitlines(): - if not line: - break - # Classifier: License :: OSI Approved :: MIT License if line.startswith('Classifier: '): classifiers.append(line[len('Classifier: '):]) package['classifiers'] = classifiers @@ -117,38 +122,47 @@ def search_packages_info(query): yield package -def print_results(distributions, list_all_files): +def print_results(distributions, list_files=False, verbose=False): """ Print the informations from installed distributions found. """ results_printed = False - for dist in distributions: + for i, dist in enumerate(distributions): results_printed = True - logger.info("---") - logger.info("Metadata-Version: %s", dist.get('metadata-version')) - logger.info("Name: %s", dist['name']) - logger.info("Version: %s", dist['version']) - logger.info("Summary: %s", dist.get('summary')) - logger.info("Home-page: %s", dist.get('home-page')) - logger.info("Author: %s", dist.get('author')) - logger.info("Author-email: %s", dist.get('author-email')) - if dist['installer'] is not None: - logger.info("Installer: %s", dist['installer']) - logger.info("License: %s", dist.get('license')) - logger.info("Location: %s", dist['location']) - logger.info("Requires: %s", ', '.join(dist['requires'])) - logger.info("Classifiers:") - for classifier in dist['classifiers']: - logger.info(" %s", classifier) - if list_all_files: - logger.info("Files:") - if 'files' in dist: - for line in dist['files']: - logger.info(" %s", line.strip()) - else: - logger.info("Cannot locate installed-files.txt") - if 'entry_points' in dist: + if i > 0: + logger.info("---") + + name = dist.get('name', '') + required_by = [ + pkg.project_name for pkg in pkg_resources.working_set + if name in [required.name for required in pkg.requires()] + ] + + logger.info("Name: %s", name) + logger.info("Version: %s", dist.get('version', '')) + logger.info("Summary: %s", dist.get('summary', '')) + logger.info("Home-page: %s", dist.get('home-page', '')) + logger.info("Author: %s", dist.get('author', '')) + logger.info("Author-email: %s", dist.get('author-email', '')) + logger.info("License: %s", dist.get('license', '')) + logger.info("Location: %s", dist.get('location', '')) + logger.info("Requires: %s", ', '.join(dist.get('requires', []))) + logger.info("Required-by: %s", ', '.join(required_by)) + + if verbose: + logger.info("Metadata-Version: %s", + dist.get('metadata-version', '')) + logger.info("Installer: %s", dist.get('installer', '')) + logger.info("Classifiers:") + for classifier in dist.get('classifiers', []): + logger.info(" %s", classifier) logger.info("Entry-points:") - for line in dist['entry_points']: + for entry in dist.get('entry_points', []): + logger.info(" %s", entry.strip()) + if list_files: + logger.info("Files:") + for line in dist.get('files', []): logger.info(" %s", line.strip()) + if "files" not in dist: + logger.info("Cannot locate installed-files.txt") return results_printed diff --git a/Shared/lib/python3.4/site-packages/pip/commands/uninstall.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/uninstall.py similarity index 60% rename from Shared/lib/python3.4/site-packages/pip/commands/uninstall.py rename to Shared/lib/python3.4/site-packages/pip/_internal/commands/uninstall.py index 8ba1a7c..0cd6f54 100644 --- a/Shared/lib/python3.4/site-packages/pip/commands/uninstall.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/uninstall.py @@ -1,10 +1,12 @@ from __future__ import absolute_import -import pip -from pip.wheel import WheelCache -from pip.req import InstallRequirement, RequirementSet, parse_requirements -from pip.basecommand import Command -from pip.exceptions import InstallationError +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.cli.base_command import Command +from pip._internal.exceptions import InstallationError +from pip._internal.req import parse_requirements +from pip._internal.req.constructors import install_req_from_line +from pip._internal.utils.misc import protect_pip_from_modification_on_windows class UninstallCommand(Command): @@ -44,33 +46,33 @@ class UninstallCommand(Command): def run(self, options, args): with self._build_session(options) as session: - format_control = pip.index.FormatControl(set(), set()) - wheel_cache = WheelCache(options.cache_dir, format_control) - requirement_set = RequirementSet( - build_dir=None, - src_dir=None, - download_dir=None, - isolated=options.isolated_mode, - session=session, - wheel_cache=wheel_cache, - ) + reqs_to_uninstall = {} for name in args: - requirement_set.add_requirement( - InstallRequirement.from_line( - name, isolated=options.isolated_mode, - wheel_cache=wheel_cache - ) + req = install_req_from_line( + name, isolated=options.isolated_mode, ) + if req.name: + reqs_to_uninstall[canonicalize_name(req.name)] = req for filename in options.requirements: for req in parse_requirements( filename, options=options, - session=session, - wheel_cache=wheel_cache): - requirement_set.add_requirement(req) - if not requirement_set.has_requirements: + session=session): + if req.name: + reqs_to_uninstall[canonicalize_name(req.name)] = req + if not reqs_to_uninstall: raise InstallationError( 'You must give at least one requirement to %(name)s (see ' '"pip help %(name)s")' % dict(name=self.name) ) - requirement_set.uninstall(auto_confirm=options.yes) + + protect_pip_from_modification_on_windows( + modifying_pip="pip" in reqs_to_uninstall + ) + + for req in reqs_to_uninstall.values(): + uninstall_pathset = req.uninstall( + auto_confirm=options.yes, verbose=self.verbosity > 0, + ) + if uninstall_pathset: + uninstall_pathset.commit() diff --git a/Shared/lib/python3.4/site-packages/pip/commands/wheel.py b/Shared/lib/python3.4/site-packages/pip/_internal/commands/wheel.py similarity index 54% rename from Shared/lib/python3.4/site-packages/pip/commands/wheel.py rename to Shared/lib/python3.4/site-packages/pip/_internal/commands/wheel.py index 1d77fe6..9c1f149 100644 --- a/Shared/lib/python3.4/site-packages/pip/commands/wheel.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/commands/wheel.py @@ -3,17 +3,17 @@ from __future__ import absolute_import import logging import os -import warnings - -from pip.basecommand import RequirementCommand -from pip.exceptions import CommandError, PreviousBuildDirError -from pip.req import RequirementSet -from pip.utils import import_or_raise -from pip.utils.build import BuildDirectory -from pip.utils.deprecation import RemovedInPip10Warning -from pip.wheel import WheelCache, WheelBuilder -from pip import cmdoptions +from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import RequirementCommand +from pip._internal.exceptions import CommandError, PreviousBuildDirError +from pip._internal.operations.prepare import RequirementPreparer +from pip._internal.req import RequirementSet +from pip._internal.req.req_tracker import RequirementTracker +from pip._internal.resolve import Resolver +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.wheel import WheelBuilder logger = logging.getLogger(__name__) @@ -24,7 +24,7 @@ class WheelCommand(RequirementCommand): Wheel is a built-package format, and offers the advantage of not recompiling your software during every install. For more details, see the - wheel docs: http://wheel.readthedocs.org/en/latest. + wheel docs: https://wheel.readthedocs.io/en/latest/ Requirements: setuptools>=0.8, and wheel. @@ -56,22 +56,25 @@ class WheelCommand(RequirementCommand): help=("Build wheels into , where the default is the " "current working directory."), ) - cmd_opts.add_option(cmdoptions.use_wheel()) - cmd_opts.add_option(cmdoptions.no_use_wheel()) cmd_opts.add_option(cmdoptions.no_binary()) cmd_opts.add_option(cmdoptions.only_binary()) + cmd_opts.add_option(cmdoptions.prefer_binary()) cmd_opts.add_option( '--build-option', dest='build_options', metavar='options', action='append', - help="Extra arguments to be supplied to 'setup.py bdist_wheel'.") + help="Extra arguments to be supplied to 'setup.py bdist_wheel'.", + ) + cmd_opts.add_option(cmdoptions.no_build_isolation()) cmd_opts.add_option(cmdoptions.constraints()) cmd_opts.add_option(cmdoptions.editable()) cmd_opts.add_option(cmdoptions.requirements()) cmd_opts.add_option(cmdoptions.src()) + cmd_opts.add_option(cmdoptions.ignore_requires_python()) cmd_opts.add_option(cmdoptions.no_deps()) cmd_opts.add_option(cmdoptions.build_dir()) + cmd_opts.add_option(cmdoptions.progress_bar()) cmd_opts.add_option( '--global-option', @@ -100,99 +103,74 @@ class WheelCommand(RequirementCommand): self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, cmd_opts) - def check_required_packages(self): - import_or_raise( - 'wheel.bdist_wheel', - CommandError, - "'pip wheel' requires the 'wheel' package. To fix this, run: " - "pip install wheel" - ) - pkg_resources = import_or_raise( - 'pkg_resources', - CommandError, - "'pip wheel' requires setuptools >= 0.8 for dist-info support." - " To fix this, run: pip install --upgrade setuptools" - ) - if not hasattr(pkg_resources, 'DistInfoDistribution'): - raise CommandError( - "'pip wheel' requires setuptools >= 0.8 for dist-info " - "support. To fix this, run: pip install --upgrade " - "setuptools" - ) - def run(self, options, args): - self.check_required_packages() - cmdoptions.resolve_wheel_no_use_binary(options) cmdoptions.check_install_build_global(options) - if options.allow_external: - warnings.warn( - "--allow-external has been deprecated and will be removed in " - "the future. Due to changes in the repository protocol, it no " - "longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_all_external: - warnings.warn( - "--allow-all-external has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_unverified: - warnings.warn( - "--allow-unverified has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - index_urls = [options.index_url] + options.extra_index_urls if options.no_index: - logger.info('Ignoring indexes: %s', ','.join(index_urls)) + logger.debug('Ignoring indexes: %s', ','.join(index_urls)) index_urls = [] if options.build_dir: options.build_dir = os.path.abspath(options.build_dir) + options.src_dir = os.path.abspath(options.src_dir) + with self._build_session(options) as session: finder = self._build_package_finder(options, session) build_delete = (not (options.no_clean or options.build_dir)) wheel_cache = WheelCache(options.cache_dir, options.format_control) - with BuildDirectory(options.build_dir, - delete=build_delete) as build_dir: + + with RequirementTracker() as req_tracker, TempDirectory( + options.build_dir, delete=build_delete, kind="wheel" + ) as directory: + requirement_set = RequirementSet( - build_dir=build_dir, - src_dir=options.src_dir, - download_dir=None, - ignore_dependencies=options.ignore_dependencies, - ignore_installed=True, - isolated=options.isolated_mode, - session=session, - wheel_cache=wheel_cache, - wheel_download_dir=options.wheel_dir, - require_hashes=options.require_hashes + require_hashes=options.require_hashes, ) - self.populate_requirement_set( - requirement_set, args, options, finder, session, self.name, - wheel_cache - ) - - if not requirement_set.has_requirements: - return - try: + self.populate_requirement_set( + requirement_set, args, options, finder, session, + self.name, wheel_cache + ) + + preparer = RequirementPreparer( + build_dir=directory.path, + src_dir=options.src_dir, + download_dir=None, + wheel_download_dir=options.wheel_dir, + progress_bar=options.progress_bar, + build_isolation=options.build_isolation, + req_tracker=req_tracker, + ) + + resolver = Resolver( + preparer=preparer, + finder=finder, + session=session, + wheel_cache=wheel_cache, + use_user_site=False, + upgrade_strategy="to-satisfy-only", + force_reinstall=False, + ignore_dependencies=options.ignore_dependencies, + ignore_requires_python=options.ignore_requires_python, + ignore_installed=True, + isolated=options.isolated_mode, + ) + resolver.resolve(requirement_set) + # build wheels wb = WheelBuilder( - requirement_set, - finder, + finder, preparer, wheel_cache, build_options=options.build_options or [], global_options=options.global_options or [], + no_clean=options.no_clean, ) - if not wb.build(): + wheels_built_successfully = wb.build( + requirement_set.requirements.values(), session=session, + ) + if not wheels_built_successfully: raise CommandError( "Failed to build one or more wheels" ) @@ -202,3 +180,4 @@ class WheelCommand(RequirementCommand): finally: if not options.no_clean: requirement_set.cleanup_files() + wheel_cache.cleanup() diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/configuration.py b/Shared/lib/python3.4/site-packages/pip/_internal/configuration.py new file mode 100644 index 0000000..fe6df9b --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/configuration.py @@ -0,0 +1,387 @@ +"""Configuration management setup + +Some terminology: +- name + As written in config files. +- value + Value associated with a name +- key + Name combined with it's section (section.name) +- variant + A single word describing where the configuration key-value pair came from +""" + +import locale +import logging +import os + +from pip._vendor import six +from pip._vendor.six.moves import configparser + +from pip._internal.exceptions import ( + ConfigurationError, ConfigurationFileCouldNotBeLoaded, +) +from pip._internal.locations import ( + legacy_config_file, new_config_file, running_under_virtualenv, + site_config_files, venv_config_file, +) +from pip._internal.utils.misc import ensure_dir, enum +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import ( # noqa: F401 + Any, Dict, Iterable, List, NewType, Optional, Tuple + ) + + RawConfigParser = configparser.RawConfigParser # Shorthand + Kind = NewType("Kind", str) + +logger = logging.getLogger(__name__) + + +# NOTE: Maybe use the optionx attribute to normalize keynames. +def _normalize_name(name): + # type: (str) -> str + """Make a name consistent regardless of source (environment or file) + """ + name = name.lower().replace('_', '-') + if name.startswith('--'): + name = name[2:] # only prefer long opts + return name + + +def _disassemble_key(name): + # type: (str) -> List[str] + return name.split(".", 1) + + +# The kinds of configurations there are. +kinds = enum( + USER="user", # User Specific + GLOBAL="global", # System Wide + VENV="venv", # Virtual Environment Specific + ENV="env", # from PIP_CONFIG_FILE + ENV_VAR="env-var", # from Environment Variables +) + + +class Configuration(object): + """Handles management of configuration. + + Provides an interface to accessing and managing configuration files. + + This class converts provides an API that takes "section.key-name" style + keys and stores the value associated with it as "key-name" under the + section "section". + + This allows for a clean interface wherein the both the section and the + key-name are preserved in an easy to manage form in the configuration files + and the data stored is also nice. + """ + + def __init__(self, isolated, load_only=None): + # type: (bool, Kind) -> None + super(Configuration, self).__init__() + + _valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.VENV, None] + if load_only not in _valid_load_only: + raise ConfigurationError( + "Got invalid value for load_only - should be one of {}".format( + ", ".join(map(repr, _valid_load_only[:-1])) + ) + ) + self.isolated = isolated # type: bool + self.load_only = load_only # type: Optional[Kind] + + # The order here determines the override order. + self._override_order = [ + kinds.GLOBAL, kinds.USER, kinds.VENV, kinds.ENV, kinds.ENV_VAR + ] + + self._ignore_env_names = ["version", "help"] + + # Because we keep track of where we got the data from + self._parsers = { + variant: [] for variant in self._override_order + } # type: Dict[Kind, List[Tuple[str, RawConfigParser]]] + self._config = { + variant: {} for variant in self._override_order + } # type: Dict[Kind, Dict[str, Any]] + self._modified_parsers = [] # type: List[Tuple[str, RawConfigParser]] + + def load(self): + # type: () -> None + """Loads configuration from configuration files and environment + """ + self._load_config_files() + if not self.isolated: + self._load_environment_vars() + + def get_file_to_edit(self): + # type: () -> Optional[str] + """Returns the file with highest priority in configuration + """ + assert self.load_only is not None, \ + "Need to be specified a file to be editing" + + try: + return self._get_parser_to_modify()[0] + except IndexError: + return None + + def items(self): + # type: () -> Iterable[Tuple[str, Any]] + """Returns key-value pairs like dict.items() representing the loaded + configuration + """ + return self._dictionary.items() + + def get_value(self, key): + # type: (str) -> Any + """Get a value from the configuration. + """ + try: + return self._dictionary[key] + except KeyError: + raise ConfigurationError("No such key - {}".format(key)) + + def set_value(self, key, value): + # type: (str, Any) -> None + """Modify a value in the configuration. + """ + self._ensure_have_load_only() + + fname, parser = self._get_parser_to_modify() + + if parser is not None: + section, name = _disassemble_key(key) + + # Modify the parser and the configuration + if not parser.has_section(section): + parser.add_section(section) + parser.set(section, name, value) + + self._config[self.load_only][key] = value + self._mark_as_modified(fname, parser) + + def unset_value(self, key): + # type: (str) -> None + """Unset a value in the configuration. + """ + self._ensure_have_load_only() + + if key not in self._config[self.load_only]: + raise ConfigurationError("No such key - {}".format(key)) + + fname, parser = self._get_parser_to_modify() + + if parser is not None: + section, name = _disassemble_key(key) + + # Remove the key in the parser + modified_something = False + if parser.has_section(section): + # Returns whether the option was removed or not + modified_something = parser.remove_option(section, name) + + if modified_something: + # name removed from parser, section may now be empty + section_iter = iter(parser.items(section)) + try: + val = six.next(section_iter) + except StopIteration: + val = None + + if val is None: + parser.remove_section(section) + + self._mark_as_modified(fname, parser) + else: + raise ConfigurationError( + "Fatal Internal error [id=1]. Please report as a bug." + ) + + del self._config[self.load_only][key] + + def save(self): + # type: () -> None + """Save the currentin-memory state. + """ + self._ensure_have_load_only() + + for fname, parser in self._modified_parsers: + logger.info("Writing to %s", fname) + + # Ensure directory exists. + ensure_dir(os.path.dirname(fname)) + + with open(fname, "w") as f: + parser.write(f) # type: ignore + + # + # Private routines + # + + def _ensure_have_load_only(self): + # type: () -> None + if self.load_only is None: + raise ConfigurationError("Needed a specific file to be modifying.") + logger.debug("Will be working with %s variant only", self.load_only) + + @property + def _dictionary(self): + # type: () -> Dict[str, Any] + """A dictionary representing the loaded configuration. + """ + # NOTE: Dictionaries are not populated if not loaded. So, conditionals + # are not needed here. + retval = {} + + for variant in self._override_order: + retval.update(self._config[variant]) + + return retval + + def _load_config_files(self): + # type: () -> None + """Loads configuration from configuration files + """ + config_files = dict(self._iter_config_files()) + if config_files[kinds.ENV][0:1] == [os.devnull]: + logger.debug( + "Skipping loading configuration files due to " + "environment's PIP_CONFIG_FILE being os.devnull" + ) + return + + for variant, files in config_files.items(): + for fname in files: + # If there's specific variant set in `load_only`, load only + # that variant, not the others. + if self.load_only is not None and variant != self.load_only: + logger.debug( + "Skipping file '%s' (variant: %s)", fname, variant + ) + continue + + parser = self._load_file(variant, fname) + + # Keeping track of the parsers used + self._parsers[variant].append((fname, parser)) + + def _load_file(self, variant, fname): + # type: (Kind, str) -> RawConfigParser + logger.debug("For variant '%s', will try loading '%s'", variant, fname) + parser = self._construct_parser(fname) + + for section in parser.sections(): + items = parser.items(section) + self._config[variant].update(self._normalized_keys(section, items)) + + return parser + + def _construct_parser(self, fname): + # type: (str) -> RawConfigParser + parser = configparser.RawConfigParser() + # If there is no such file, don't bother reading it but create the + # parser anyway, to hold the data. + # Doing this is useful when modifying and saving files, where we don't + # need to construct a parser. + if os.path.exists(fname): + try: + parser.read(fname) + except UnicodeDecodeError: + # See https://github.com/pypa/pip/issues/4963 + raise ConfigurationFileCouldNotBeLoaded( + reason="contains invalid {} characters".format( + locale.getpreferredencoding(False) + ), + fname=fname, + ) + except configparser.Error as error: + # See https://github.com/pypa/pip/issues/4893 + raise ConfigurationFileCouldNotBeLoaded(error=error) + return parser + + def _load_environment_vars(self): + # type: () -> None + """Loads configuration from environment variables + """ + self._config[kinds.ENV_VAR].update( + self._normalized_keys(":env:", self._get_environ_vars()) + ) + + def _normalized_keys(self, section, items): + # type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any] + """Normalizes items to construct a dictionary with normalized keys. + + This routine is where the names become keys and are made the same + regardless of source - configuration files or environment. + """ + normalized = {} + for name, val in items: + key = section + "." + _normalize_name(name) + normalized[key] = val + return normalized + + def _get_environ_vars(self): + # type: () -> Iterable[Tuple[str, str]] + """Returns a generator with all environmental vars with prefix PIP_""" + for key, val in os.environ.items(): + should_be_yielded = ( + key.startswith("PIP_") and + key[4:].lower() not in self._ignore_env_names + ) + if should_be_yielded: + yield key[4:].lower(), val + + # XXX: This is patched in the tests. + def _iter_config_files(self): + # type: () -> Iterable[Tuple[Kind, List[str]]] + """Yields variant and configuration files associated with it. + + This should be treated like items of a dictionary. + """ + # SMELL: Move the conditions out of this function + + # environment variables have the lowest priority + config_file = os.environ.get('PIP_CONFIG_FILE', None) + if config_file is not None: + yield kinds.ENV, [config_file] + else: + yield kinds.ENV, [] + + # at the base we have any global configuration + yield kinds.GLOBAL, list(site_config_files) + + # per-user configuration next + should_load_user_config = not self.isolated and not ( + config_file and os.path.exists(config_file) + ) + if should_load_user_config: + # The legacy config file is overridden by the new config file + yield kinds.USER, [legacy_config_file, new_config_file] + + # finally virtualenv configuration first trumping others + if running_under_virtualenv(): + yield kinds.VENV, [venv_config_file] + + def _get_parser_to_modify(self): + # type: () -> Tuple[str, RawConfigParser] + # Determine which parser to modify + parsers = self._parsers[self.load_only] + if not parsers: + # This should not happen if everything works correctly. + raise ConfigurationError( + "Fatal Internal error [id=2]. Please report as a bug." + ) + + # Use the highest priority parser. + return parsers[-1] + + # XXX: This is patched in the tests. + def _mark_as_modified(self, fname, parser): + # type: (str, RawConfigParser) -> None + file_parser_tuple = (fname, parser) + if file_parser_tuple not in self._modified_parsers: + self._modified_parsers.append(file_parser_tuple) diff --git a/Shared/lib/python3.4/site-packages/pip/download.py b/Shared/lib/python3.4/site-packages/pip/_internal/download.py similarity index 84% rename from Shared/lib/python3.4/site-packages/pip/download.py rename to Shared/lib/python3.4/site-packages/pip/_internal/download.py index bbef9ea..96f3b65 100644 --- a/Shared/lib/python3.4/site-packages/pip/download.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/download.py @@ -11,42 +11,48 @@ import platform import re import shutil import sys -import tempfile -try: - import ssl # noqa - HAS_TLS = True -except ImportError: - HAS_TLS = False - -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves.urllib import request as urllib_request - -import pip - -from pip.exceptions import InstallationError, HashMismatch -from pip.models import PyPI -from pip.utils import (splitext, rmtree, format_size, display_path, - backup_dir, ask_path_exists, unpack_file, - ARCHIVE_EXTENSIONS, consume, call_subprocess) -from pip.utils.encoding import auto_decode -from pip.utils.filesystem import check_path_owner -from pip.utils.logging import indent_log -from pip.utils.setuptools_build import SETUPTOOLS_SHIM -from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner -from pip.locations import write_delete_marker_file -from pip.vcs import vcs -from pip._vendor import requests, six +from pip._vendor import requests, six, urllib3 +from pip._vendor.cachecontrol import CacheControlAdapter +from pip._vendor.cachecontrol.caches import FileCache +from pip._vendor.lockfile import LockError from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response from pip._vendor.requests.structures import CaseInsensitiveDict -from pip._vendor.requests.packages import urllib3 -from pip._vendor.cachecontrol import CacheControlAdapter -from pip._vendor.cachecontrol.caches import FileCache -from pip._vendor.lockfile import LockError -from pip._vendor.six.moves import xmlrpc_client +from pip._vendor.requests.utils import get_netrc_auth +# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is +# why we ignore the type on this import +from pip._vendor.six.moves import xmlrpc_client # type: ignore +from pip._vendor.six.moves.urllib import parse as urllib_parse +from pip._vendor.six.moves.urllib import request as urllib_request +from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote +from pip._vendor.urllib3.util import IS_PYOPENSSL +import pip +from pip._internal.exceptions import HashMismatch, InstallationError +from pip._internal.locations import write_delete_marker_file +from pip._internal.models.index import PyPI +from pip._internal.utils.encoding import auto_decode +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.glibc import libc_ver +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + ARCHIVE_EXTENSIONS, ask_path_exists, backup_dir, call_subprocess, consume, + display_path, format_size, get_installed_version, rmtree, splitext, + unpack_file, +) +from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.ui import DownloadProgressProvider +from pip._internal.vcs import vcs + +try: + import ssl # noqa +except ImportError: + ssl = None + +HAS_TLS = (ssl is not None) or IS_PYOPENSSL __all__ = ['get_file_content', 'is_url', 'url_to_path', 'path_to_url', @@ -88,21 +94,22 @@ def user_agent(): data["implementation"]["version"] = platform.python_version() if sys.platform.startswith("linux"): - distro = dict(filter( + from pip._vendor import distro + distro_infos = dict(filter( lambda x: x[1], - zip(["name", "version", "id"], platform.linux_distribution()), + zip(["name", "version", "id"], distro.linux_distribution()), )) libc = dict(filter( lambda x: x[1], - zip(["lib", "version"], platform.libc_ver()), + zip(["lib", "version"], libc_ver()), )) if libc: - distro["libc"] = libc - if distro: - data["distro"] = distro + distro_infos["libc"] = libc + if distro_infos: + data["distro"] = distro_infos if sys.platform.startswith("darwin") and platform.mac_ver()[0]: - data["distro"] = {"name": "OS X", "version": platform.mac_ver()[0]} + data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]} if platform.system(): data.setdefault("system", {})["name"] = platform.system() @@ -113,10 +120,13 @@ def user_agent(): if platform.machine(): data["cpu"] = platform.machine() - # Python 2.6 doesn't have ssl.OPENSSL_VERSION. - if HAS_TLS and sys.version_info[:2] > (2, 6): + if HAS_TLS: data["openssl_version"] = ssl.OPENSSL_VERSION + setuptools_version = get_installed_version("setuptools") + if setuptools_version is not None: + data["setuptools_version"] = setuptools_version + return "{data[installer][name]}/{data[installer][version]} {json}".format( data=data, json=json.dumps(data, separators=(",", ":"), sort_keys=True), @@ -145,6 +155,11 @@ class MultiDomainBasicAuth(AuthBase): if username is None: username, password = self.parse_credentials(parsed.netloc) + # Get creds from netrc if we still don't have them + if username is None and password is None: + netrc_auth = get_netrc_auth(req.url) + username, password = netrc_auth if netrc_auth else (None, None) + if username or password: # Store the username and password self.passwords[netloc] = (username, password) @@ -163,7 +178,7 @@ class MultiDomainBasicAuth(AuthBase): if resp.status_code != 401: return resp - # We are not able to prompt the user so simple return the response + # We are not able to prompt the user so simply return the response if not self.prompting: return resp @@ -195,8 +210,9 @@ class MultiDomainBasicAuth(AuthBase): if "@" in netloc: userinfo = netloc.rsplit("@", 1)[0] if ":" in userinfo: - return userinfo.split(":", 1) - return userinfo, None + user, pwd = userinfo.split(":", 1) + return (urllib_unquote(user), urllib_unquote(pwd)) + return urllib_unquote(userinfo), None return None, None @@ -331,10 +347,12 @@ class PipSession(requests.Session): total=retries, # A 503 error from PyPI typically means that the Fastly -> Origin - # connection got interupted in some way. A 503 error in general + # connection got interrupted in some way. A 503 error in general # is typically considered a transient error so we'll go ahead and # retry it. - status_forcelist=[503], + # A 500 may indicate transient error in Amazon S3 + # A 520 or 527 - may indicate transient error in CloudFlare + status_forcelist=[500, 503, 520, 527], # Add a small amount of back off between failed requests in # order to prevent hammering the service. @@ -368,7 +386,7 @@ class PipSession(requests.Session): # We want to use a non-validating adapter for any requests which are # deemed insecure. for host in insecure_hosts: - self.mount("https://{0}/".format(host), insecure_adapter) + self.mount("https://{}/".format(host), insecure_adapter) def request(self, method, url, *args, **kwargs): # Allow setting a default timeout on a session @@ -380,7 +398,12 @@ class PipSession(requests.Session): def get_file_content(url, comes_from=None, session=None): """Gets the content of a file; it may be a filename, file: URL, or - http: URL. Returns (location, content). Content is unicode.""" + http: URL. Returns (location, content). Content is unicode. + + :param url: File path or url. + :param comes_from: Origin description of requirements. + :param session: Instance of pip.download.PipSession. + """ if session is None: raise TypeError( "get_file_content() missing 1 required keyword argument: 'session'" @@ -501,14 +524,13 @@ def _progress_indicator(iterable, *args, **kwargs): return iterable -def _download_url(resp, link, content_file, hashes): +def _download_url(resp, link, content_file, hashes, progress_bar): try: total_length = int(resp.headers['content-length']) except (ValueError, KeyError, TypeError): total_length = 0 cached_resp = getattr(resp, "from_cache", False) - if logger.getEffectiveLevel() > logging.INFO: show_progress = False elif cached_resp: @@ -572,12 +594,12 @@ def _download_url(resp, link, content_file, hashes): url = link.url_without_fragment if show_progress: # We don't show progress on cached responses + progress_indicator = DownloadProgressProvider(progress_bar, + max=total_length) if total_length: logger.info("Downloading %s (%s)", url, format_size(total_length)) - progress_indicator = DownloadProgressBar(max=total_length).iter else: logger.info("Downloading %s", url) - progress_indicator = DownloadProgressSpinner().iter elif cached_resp: logger.info("Using cached %s", url) else: @@ -602,8 +624,8 @@ def _copy_file(filename, location, link): download_location = os.path.join(location, link.filename) if os.path.exists(download_location): response = ask_path_exists( - 'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' % - display_path(download_location), ('i', 'w', 'b')) + 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' % + display_path(download_location), ('i', 'w', 'b', 'a')) if response == 'i': copy = False elif response == 'w': @@ -617,48 +639,49 @@ def _copy_file(filename, location, link): display_path(dest_file), ) shutil.move(download_location, dest_file) + elif response == 'a': + sys.exit(-1) if copy: shutil.copy(filename, download_location) logger.info('Saved %s', display_path(download_location)) def unpack_http_url(link, location, download_dir=None, - session=None, hashes=None): + session=None, hashes=None, progress_bar="on"): if session is None: raise TypeError( "unpack_http_url() missing 1 required keyword argument: 'session'" ) - temp_dir = tempfile.mkdtemp('-unpack', 'pip-') + with TempDirectory(kind="unpack") as temp_dir: + # If a download dir is specified, is the file already downloaded there? + already_downloaded_path = None + if download_dir: + already_downloaded_path = _check_download_dir(link, + download_dir, + hashes) - # If a download dir is specified, is the file already downloaded there? - already_downloaded_path = None - if download_dir: - already_downloaded_path = _check_download_dir(link, - download_dir, - hashes) + if already_downloaded_path: + from_path = already_downloaded_path + content_type = mimetypes.guess_type(from_path)[0] + else: + # let's download to a tmp dir + from_path, content_type = _download_http_url(link, + session, + temp_dir.path, + hashes, + progress_bar) - if already_downloaded_path: - from_path = already_downloaded_path - content_type = mimetypes.guess_type(from_path)[0] - else: - # let's download to a tmp dir - from_path, content_type = _download_http_url(link, - session, - temp_dir, - hashes) + # unpack the archive to the build dir location. even when only + # downloading archives, they have to be unpacked to parse dependencies + unpack_file(from_path, location, content_type, link) - # unpack the archive to the build dir location. even when only downloading - # archives, they have to be unpacked to parse dependencies - unpack_file(from_path, location, content_type, link) + # a download dir is specified; let's copy the archive there + if download_dir and not already_downloaded_path: + _copy_file(from_path, download_dir, link) - # a download dir is specified; let's copy the archive there - if download_dir and not already_downloaded_path: - _copy_file(from_path, download_dir, link) - - if not already_downloaded_path: - os.unlink(from_path) - rmtree(temp_dir) + if not already_downloaded_path: + os.unlink(from_path) def unpack_file_url(link, location, download_dir=None, hashes=None): @@ -679,7 +702,7 @@ def unpack_file_url(link, location, download_dir=None, hashes=None): return # If --require-hashes is off, `hashes` is either empty, the - # link's embeddded hash, or MissingHashes; it is required to + # link's embedded hash, or MissingHashes; it is required to # match. If --require-hashes is on, we are satisfied by any # hash in `hashes` matching: a URL-based or an option-based # one; no internet-sourced hash will be in `hashes`. @@ -749,6 +772,7 @@ class PipXmlrpcTransport(xmlrpc_client.Transport): """Provide a `xmlrpclib.Transport` implementation via a `PipSession` object. """ + def __init__(self, index_url, session, use_datetime=False): xmlrpc_client.Transport.__init__(self, use_datetime) index_parts = urllib_parse.urlparse(index_url) @@ -774,7 +798,8 @@ class PipXmlrpcTransport(xmlrpc_client.Transport): def unpack_url(link, location, download_dir=None, - only_download=False, session=None, hashes=None): + only_download=False, session=None, hashes=None, + progress_bar="on"): """Unpack link. If link is a VCS link: if only_download, export into download_dir and ignore location @@ -807,13 +832,14 @@ def unpack_url(link, location, download_dir=None, location, download_dir, session, - hashes=hashes + hashes=hashes, + progress_bar=progress_bar ) if only_download: write_delete_marker_file(location) -def _download_http_url(link, session, temp_dir, hashes): +def _download_http_url(link, session, temp_dir, hashes, progress_bar): """Download link url into temp_dir using provided session""" target_url = link.url.split('#', 1)[0] try: @@ -868,7 +894,7 @@ def _download_http_url(link, session, temp_dir, hashes): filename += ext file_path = os.path.join(temp_dir, filename) with open(file_path, 'wb') as content_file: - _download_url(resp, link, content_file, hashes) + _download_url(resp, link, content_file, hashes, progress_bar) return file_path, content_type diff --git a/Shared/lib/python3.4/site-packages/pip/exceptions.py b/Shared/lib/python3.4/site-packages/pip/_internal/exceptions.py similarity index 88% rename from Shared/lib/python3.4/site-packages/pip/exceptions.py rename to Shared/lib/python3.4/site-packages/pip/_internal/exceptions.py index a529e40..f1ca6f3 100644 --- a/Shared/lib/python3.4/site-packages/pip/exceptions.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/exceptions.py @@ -10,6 +10,10 @@ class PipError(Exception): """Base pip exception""" +class ConfigurationError(PipError): + """General exception in configuration""" + + class InstallationError(PipError): """General exception during installation""" @@ -158,7 +162,8 @@ class HashMissing(HashError): self.gotten_hash = gotten_hash def body(self): - from pip.utils.hashes import FAVORITE_HASH # Dodge circular import. + # Dodge circular import. + from pip._internal.utils.hashes import FAVORITE_HASH package = None if self.req: @@ -237,3 +242,27 @@ class HashMismatch(HashError): self.gots[hash_name].hexdigest()) prefix = ' or' return '\n'.join(lines) + + +class UnsupportedPythonVersion(InstallationError): + """Unsupported python version according to Requires-Python package + metadata.""" + + +class ConfigurationFileCouldNotBeLoaded(ConfigurationError): + """When there are errors while loading a configuration file + """ + + def __init__(self, reason="could not be loaded", fname=None, error=None): + super(ConfigurationFileCouldNotBeLoaded, self).__init__(error) + self.reason = reason + self.fname = fname + self.error = error + + def __str__(self): + if self.fname is not None: + message_part = " in {}.".format(self.fname) + else: + assert self.error is not None + message_part = ".\n{}\n".format(self.error.message) + return "Configuration file {}{}".format(self.reason, message_part) diff --git a/Shared/lib/python3.4/site-packages/pip/index.py b/Shared/lib/python3.4/site-packages/pip/_internal/index.py similarity index 62% rename from Shared/lib/python3.4/site-packages/pip/index.py rename to Shared/lib/python3.4/site-packages/pip/_internal/index.py index ba0bd6c..8c2f24f 100644 --- a/Shared/lib/python3.4/site-packages/pip/index.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/index.py @@ -1,41 +1,46 @@ """Routines related to PyPI, indexes""" from __future__ import absolute_import -import logging import cgi -from collections import namedtuple import itertools -import sys -import os -import re +import logging import mimetypes +import os import posixpath -import warnings +import re +import sys +from collections import namedtuple +from pip._vendor import html5lib, requests, six +from pip._vendor.distlib.compat import unescape +from pip._vendor.packaging import specifiers +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.version import parse as parse_version +from pip._vendor.requests.exceptions import SSLError from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request -from pip.compat import ipaddress -from pip.utils import ( - cached_property, splitext, normalize_path, - ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, -) -from pip.utils.deprecation import RemovedInPip9Warning, RemovedInPip10Warning -from pip.utils.logging import indent_log -from pip.exceptions import ( - DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename, +from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path +from pip._internal.exceptions import ( + BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename, UnsupportedWheel, ) -from pip.download import HAS_TLS, is_url, path_to_url, url_to_path -from pip.wheel import Wheel, wheel_ext -from pip.pep425tags import supported_tags -from pip._vendor import html5lib, requests, six -from pip._vendor.packaging.version import parse as parse_version -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.requests.exceptions import SSLError +from pip._internal.models.candidate import InstallationCandidate +from pip._internal.models.format_control import FormatControl +from pip._internal.models.index import PyPI +from pip._internal.models.link import Link +from pip._internal.pep425tags import get_supported +from pip._internal.utils.compat import ipaddress +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, normalize_path, + remove_auth_from_url, +) +from pip._internal.utils.packaging import check_requires_python +from pip._internal.wheel import Wheel, wheel_ext - -__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder'] +__all__ = ['FormatControl', 'PackageFinder'] SECURE_ORIGINS = [ @@ -54,45 +59,120 @@ SECURE_ORIGINS = [ logger = logging.getLogger(__name__) -class InstallationCandidate(object): +def _get_content_type(url, session): + """Get the Content-Type of the given url, using a HEAD request""" + scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) + if scheme not in {'http', 'https'}: + # FIXME: some warning or something? + # assertion error? + return '' - def __init__(self, project, version, location): - self.project = project - self.version = parse_version(version) - self.location = location - self._key = (self.project, self.version, self.location) + resp = session.head(url, allow_redirects=True) + resp.raise_for_status() - def __repr__(self): - return "".format( - self.project, self.version, self.location, + return resp.headers.get("Content-Type", "") + + +def _handle_get_page_fail(link, reason, url, meth=None): + if meth is None: + meth = logger.debug + meth("Could not fetch URL %s: %s - skipping", link, reason) + + +def _get_html_page(link, session=None): + if session is None: + raise TypeError( + "_get_html_page() missing 1 required keyword argument: 'session'" ) - def __hash__(self): - return hash(self._key) + url = link.url + url = url.split('#', 1)[0] - def __lt__(self, other): - return self._compare(other, lambda s, o: s < o) + # Check for VCS schemes that do not support lookup as web pages. + from pip._internal.vcs import VcsSupport + for scheme in VcsSupport.schemes: + if url.lower().startswith(scheme) and url[len(scheme)] in '+:': + logger.debug('Cannot look at %s URL %s', scheme, link) + return None - def __le__(self, other): - return self._compare(other, lambda s, o: s <= o) + try: + filename = link.filename + for bad_ext in ARCHIVE_EXTENSIONS: + if filename.endswith(bad_ext): + content_type = _get_content_type(url, session=session) + if content_type.lower().startswith('text/html'): + break + else: + logger.debug( + 'Skipping page %s because of Content-Type: %s', + link, + content_type, + ) + return - def __eq__(self, other): - return self._compare(other, lambda s, o: s == o) + logger.debug('Getting page %s', url) - def __ge__(self, other): - return self._compare(other, lambda s, o: s >= o) + # Tack index.html onto file:// URLs that point to directories + (scheme, netloc, path, params, query, fragment) = \ + urllib_parse.urlparse(url) + if (scheme == 'file' and + os.path.isdir(urllib_request.url2pathname(path))): + # add trailing slash if not present so urljoin doesn't trim + # final segment + if not url.endswith('/'): + url += '/' + url = urllib_parse.urljoin(url, 'index.html') + logger.debug(' file: URL is directory, getting %s', url) - def __gt__(self, other): - return self._compare(other, lambda s, o: s > o) + resp = session.get( + url, + headers={ + "Accept": "text/html", + # We don't want to blindly returned cached data for + # /simple/, because authors generally expecting that + # twine upload && pip install will function, but if + # they've done a pip install in the last ~10 minutes + # it won't. Thus by setting this to zero we will not + # blindly use any cached data, however the benefit of + # using max-age=0 instead of no-cache, is that we will + # still support conditional requests, so we will still + # minimize traffic sent in cases where the page hasn't + # changed at all, we will just always incur the round + # trip for the conditional GET now instead of only + # once per 10 minutes. + # For more information, please see pypa/pip#5670. + "Cache-Control": "max-age=0", + }, + ) + resp.raise_for_status() - def __ne__(self, other): - return self._compare(other, lambda s, o: s != o) + # The check for archives above only works if the url ends with + # something that looks like an archive. However that is not a + # requirement of an url. Unless we issue a HEAD request on every + # url we cannot know ahead of time for sure if something is HTML + # or not. However we can check after we've downloaded it. + content_type = resp.headers.get('Content-Type', 'unknown') + if not content_type.lower().startswith("text/html"): + logger.debug( + 'Skipping page %s because of Content-Type: %s', + link, + content_type, + ) + return - def _compare(self, other, method): - if not isinstance(other, InstallationCandidate): - return NotImplemented - - return method(self._key, other._key) + inst = HTMLPage(resp.content, resp.url, resp.headers) + except requests.HTTPError as exc: + _handle_get_page_fail(link, exc, url) + except SSLError as exc: + reason = "There was a problem confirming the ssl certificate: " + reason += str(exc) + _handle_get_page_fail(link, reason, url, meth=logger.info) + except requests.ConnectionError as exc: + _handle_get_page_fail(link, "connection error: %s" % exc, url) + except requests.Timeout: + _handle_get_page_fail(link, "timed out", url) + else: + return inst class PackageFinder(object): @@ -104,12 +184,25 @@ class PackageFinder(object): def __init__(self, find_links, index_urls, allow_all_prereleases=False, trusted_hosts=None, process_dependency_links=False, - session=None, format_control=None): + session=None, format_control=None, platform=None, + versions=None, abi=None, implementation=None, + prefer_binary=False): """Create a PackageFinder. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. + :param platform: A string or None. If None, searches for packages + that are supported by the current system. Otherwise, will find + packages that can be built on the platform passed in. These + packages will only be downloaded for distribution: they will + not be built locally. + :param versions: A list of strings or None. This is passed directly + to pep425tags.py in the get_supported() method. + :param abi: A string or None. This is passed directly + to pep425tags.py in the get_supported() method. + :param implementation: A string or None. This is passed directly + to pep425tags.py in the get_supported() method. """ if session is None: raise TypeError( @@ -153,6 +246,17 @@ class PackageFinder(object): # The Session we'll use to make requests self.session = session + # The valid tags to check potential found wheel candidates against + self.valid_tags = get_supported( + versions=versions, + platform=platform, + abi=abi, + impl=implementation, + ) + + # Do we prefer old, but valid, binary dist over new source dist + self.prefer_binary = prefer_binary + # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. if not HAS_TLS: @@ -166,16 +270,31 @@ class PackageFinder(object): ) break + def get_formatted_locations(self): + lines = [] + if self.index_urls and self.index_urls != [PyPI.simple_url]: + lines.append( + "Looking in indexes: {}".format(", ".join( + remove_auth_from_url(url) for url in self.index_urls)) + ) + if self.find_links: + lines.append( + "Looking in links: {}".format(", ".join(self.find_links)) + ) + return "\n".join(lines) + def add_dependency_links(self, links): - # # FIXME: this shouldn't be global list this, it should only - # # apply to requirements of the package that specifies the - # # dependency_links value - # # FIXME: also, we should track comes_from (i.e., use Link) + # FIXME: this shouldn't be global list this, it should only + # apply to requirements of the package that specifies the + # dependency_links value + # FIXME: also, we should track comes_from (i.e., use Link) if self.process_dependency_links: - warnings.warn( + deprecated( "Dependency Links processing has been deprecated and will be " "removed in a future release.", - RemovedInPip9Warning, + replacement="PEP 508 URL dependencies", + gone_in="18.2", + issue=4187, ) self.dependency_links.extend(links) @@ -218,14 +337,16 @@ class PackageFinder(object): else: logger.warning( "Url '%s' is ignored: it is neither a file " - "nor a directory.", url) + "nor a directory.", url, + ) elif is_url(url): # Only add url with clear scheme urls.append(url) else: logger.warning( "Url '%s' is ignored. It is either a non-existing " - "path or lacks a specific scheme.", url) + "path or lacks a specific scheme.", url, + ) return files, urls @@ -236,25 +357,34 @@ class PackageFinder(object): If not finding wheels, then sorted by version only. If finding wheels, then the sort order is by version, then: 1. existing installs - 2. wheels ordered via Wheel.support_index_min() + 2. wheels ordered via Wheel.support_index_min(self.valid_tags) 3. source archives + If prefer_binary was set, then all wheels are sorted above sources. Note: it was considered to embed this logic into the Link comparison operators, but then different sdist links with the same version, would have to be considered equal """ - support_num = len(supported_tags) + support_num = len(self.valid_tags) + build_tag = tuple() + binary_preference = 0 if candidate.location.is_wheel: # can raise InvalidWheelFilename wheel = Wheel(candidate.location.filename) - if not wheel.supported(): + if not wheel.supported(self.valid_tags): raise UnsupportedWheel( "%s is not a supported wheel for this platform. It " "can't be sorted." % wheel.filename ) - pri = -(wheel.support_index_min()) + if self.prefer_binary: + binary_preference = 1 + pri = -(wheel.support_index_min(self.valid_tags)) + if wheel.build_tag is not None: + match = re.match(r'^(\d+)(.*)$', wheel.build_tag) + build_tag_groups = match.groups() + build_tag = (int(build_tag_groups[0]), build_tag_groups[1]) else: # sdist pri = -(support_num) - return (candidate.version, pri) + return (binary_preference, candidate.version, build_tag, pri) def _validate_secure_origin(self, logger, location): # Determine if this url used a secure transport mechanism @@ -318,9 +448,9 @@ class PackageFinder(object): # log a warning that we are ignoring it. logger.warning( "The repository located at %s is not a trusted or secure host and " - "is being ignored. If this repository is available via HTTPS it " - "is recommended to use HTTPS instead, otherwise you may silence " - "this warning and allow it anyways with '--trusted-host %s'.", + "is being ignored. If this repository is available via HTTPS we " + "recommend you use HTTPS instead, otherwise you may silence " + "this warning and allow it anyway with '--trusted-host %s'.", parsed.hostname, parsed.hostname, ) @@ -335,7 +465,9 @@ class PackageFinder(object): """ def mkurl_pypi_url(url): - loc = posixpath.join(url, urllib_parse.quote(project_name.lower())) + loc = posixpath.join( + url, + urllib_parse.quote(canonicalize_name(project_name))) # For maximum compatibility with easy_install, ensure the path # ends in a trailing slash. Although this isn't in the spec # (and PyPI can handle it without the slash) some other index @@ -358,13 +490,13 @@ class PackageFinder(object): index_locations = self._get_index_urls_locations(project_name) index_file_loc, index_url_loc = self._sort_locations(index_locations) fl_file_loc, fl_url_loc = self._sort_locations( - self.find_links, expand_dir=True) + self.find_links, expand_dir=True, + ) dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links) - file_locations = ( - Link(url) for url in itertools.chain( - index_file_loc, fl_file_loc, dep_file_loc) - ) + file_locations = (Link(url) for url in itertools.chain( + index_file_loc, fl_file_loc, dep_file_loc, + )) # We trust every url that the user has given us whether it was given # via --index-url or --find-links @@ -386,7 +518,7 @@ class PackageFinder(object): logger.debug('* %s', location) canonical_name = canonicalize_name(project_name) - formats = fmt_ctl_formats(self.format_control, canonical_name) + formats = self.format_control.get_allowed_formats(canonical_name) search = Search(project_name, canonical_name, formats) find_links_versions = self._package_versions( # We trust every directly linked archive in find_links @@ -399,7 +531,7 @@ class PackageFinder(object): logger.debug('Analyzing links from page %s', page.url) with indent_log(): page_versions.extend( - self._package_versions(page.links, search) + self._package_versions(page.iter_links(), search) ) dependency_versions = self._package_versions( @@ -479,7 +611,7 @@ class PackageFinder(object): req, ', '.join( sorted( - set(str(c.version) for c in all_candidates), + {str(c.version) for c in all_candidates}, key=parse_version, ) ) @@ -579,7 +711,6 @@ class PackageFinder(object): def _link_package_versions(self, link, search): """Return an InstallationCandidate or None""" - version = None if link.egg_fragment: egg_info = link.egg_fragment @@ -591,11 +722,13 @@ class PackageFinder(object): return if ext not in SUPPORTED_EXTENSIONS: self._log_skipped_link( - link, 'unsupported archive format: %s' % ext) + link, 'unsupported archive format: %s' % ext, + ) return if "binary" not in search.formats and ext == wheel_ext: self._log_skipped_link( - link, 'No binaries permitted for %s' % search.supplied) + link, 'No binaries permitted for %s' % search.supplied, + ) return if "macosx10" in link.path and ext == '.zip': self._log_skipped_link(link, 'macosx10 one') @@ -610,7 +743,8 @@ class PackageFinder(object): self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return - if not wheel.supported(): + + if not wheel.supported(self.valid_tags): self._log_skipped_link( link, 'it is not compatible with this Python') return @@ -620,14 +754,15 @@ class PackageFinder(object): # This should be up by the search.ok_binary check, but see issue 2700. if "source" not in search.formats and ext != wheel_ext: self._log_skipped_link( - link, 'No sources permitted for %s' % search.supplied) + link, 'No sources permitted for %s' % search.supplied, + ) return if not version: version = egg_info_matches(egg_info, search.supplied, link) if version is None: self._log_skipped_link( - link, 'wrong project name (not %s)' % search.supplied) + link, 'Missing project version for %s' % search.supplied) return match = self._py_version_re.search(version) @@ -638,12 +773,24 @@ class PackageFinder(object): self._log_skipped_link( link, 'Python version is incorrect') return + try: + support_this_python = check_requires_python(link.requires_python) + except specifiers.InvalidSpecifier: + logger.debug("Package %s has an invalid Requires-Python entry: %s", + link.filename, link.requires_python) + support_this_python = True + + if not support_this_python: + logger.debug("The package %s is incompatible with the python" + "version in use. Acceptable python versions are:%s", + link, link.requires_python) + return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search.supplied, version, link) def _get_page(self, link): - return HTMLPage.get_page(link, session=self.session) + return _get_html_page(link, session=self.session) def egg_info_matches( @@ -663,7 +810,7 @@ def egg_info_matches( return None if search_name is None: full_match = match.group(0) - return full_match[full_match.index('-'):] + return full_match.split('-', 1)[-1] name = match.group(0).lower() # To match the "safe" name that pkg_resources creates: name = name.replace('_', '-') @@ -675,365 +822,71 @@ def egg_info_matches( return None +def _determine_base_url(document, page_url): + """Determine the HTML document's base URL. + + This looks for a ```` tag in the HTML document. If present, its href + attribute denotes the base URL of anchor tags in the document. If there is + no such tag (or if it does not have a valid href attribute), the HTML + file's URL is used as the base URL. + + :param document: An HTML document representation. The current + implementation expects the result of ``html5lib.parse()``. + :param page_url: The URL of the HTML document. + """ + for base in document.findall(".//base"): + href = base.get("href") + if href is not None: + return href + return page_url + + +def _get_encoding_from_headers(headers): + """Determine if we have any encoding information in our headers. + """ + if headers and "Content-Type" in headers: + content_type, params = cgi.parse_header(headers["Content-Type"]) + if "charset" in params: + return params['charset'] + return None + + +_CLEAN_LINK_RE = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + +def _clean_link(url): + """Makes sure a link is fully encoded. That is, if a ' ' shows up in + the link, it will be rewritten to %20 (while not over-quoting + % or other characters).""" + return _CLEAN_LINK_RE.sub(lambda match: '%%%2x' % ord(match.group(0)), url) + + class HTMLPage(object): """Represents one page, along with its URL""" def __init__(self, content, url, headers=None): - # Determine if we have any encoding information in our headers - encoding = None - if headers and "Content-Type" in headers: - content_type, params = cgi.parse_header(headers["Content-Type"]) - - if "charset" in params: - encoding = params['charset'] - self.content = content - self.parsed = html5lib.parse( - self.content, - encoding=encoding, - namespaceHTMLElements=False, - ) self.url = url self.headers = headers def __str__(self): return self.url - @classmethod - def get_page(cls, link, skip_archives=True, session=None): - if session is None: - raise TypeError( - "get_page() missing 1 required keyword argument: 'session'" - ) - - url = link.url - url = url.split('#', 1)[0] - - # Check for VCS schemes that do not support lookup as web pages. - from pip.vcs import VcsSupport - for scheme in VcsSupport.schemes: - if url.lower().startswith(scheme) and url[len(scheme)] in '+:': - logger.debug('Cannot look at %s URL %s', scheme, link) - return None - - try: - if skip_archives: - filename = link.filename - for bad_ext in ARCHIVE_EXTENSIONS: - if filename.endswith(bad_ext): - content_type = cls._get_content_type( - url, session=session, - ) - if content_type.lower().startswith('text/html'): - break - else: - logger.debug( - 'Skipping page %s because of Content-Type: %s', - link, - content_type, - ) - return - - logger.debug('Getting page %s', url) - - # Tack index.html onto file:// URLs that point to directories - (scheme, netloc, path, params, query, fragment) = \ - urllib_parse.urlparse(url) - if (scheme == 'file' and - os.path.isdir(urllib_request.url2pathname(path))): - # add trailing slash if not present so urljoin doesn't trim - # final segment - if not url.endswith('/'): - url += '/' - url = urllib_parse.urljoin(url, 'index.html') - logger.debug(' file: URL is directory, getting %s', url) - - resp = session.get( - url, - headers={ - "Accept": "text/html", - "Cache-Control": "max-age=600", - }, - ) - resp.raise_for_status() - - # The check for archives above only works if the url ends with - # something that looks like an archive. However that is not a - # requirement of an url. Unless we issue a HEAD request on every - # url we cannot know ahead of time for sure if something is HTML - # or not. However we can check after we've downloaded it. - content_type = resp.headers.get('Content-Type', 'unknown') - if not content_type.lower().startswith("text/html"): - logger.debug( - 'Skipping page %s because of Content-Type: %s', - link, - content_type, - ) - return - - inst = cls(resp.content, resp.url, resp.headers) - except requests.HTTPError as exc: - cls._handle_fail(link, exc, url) - except SSLError as exc: - reason = ("There was a problem confirming the ssl certificate: " - "%s" % exc) - cls._handle_fail(link, reason, url, meth=logger.info) - except requests.ConnectionError as exc: - cls._handle_fail(link, "connection error: %s" % exc, url) - except requests.Timeout: - cls._handle_fail(link, "timed out", url) - else: - return inst - - @staticmethod - def _handle_fail(link, reason, url, meth=None): - if meth is None: - meth = logger.debug - - meth("Could not fetch URL %s: %s - skipping", link, reason) - - @staticmethod - def _get_content_type(url, session): - """Get the Content-Type of the given url, using a HEAD request""" - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) - if scheme not in ('http', 'https'): - # FIXME: some warning or something? - # assertion error? - return '' - - resp = session.head(url, allow_redirects=True) - resp.raise_for_status() - - return resp.headers.get("Content-Type", "") - - @cached_property - def base_url(self): - bases = [ - x for x in self.parsed.findall(".//base") - if x.get("href") is not None - ] - if bases and bases[0].get("href"): - return bases[0].get("href") - else: - return self.url - - @property - def links(self): + def iter_links(self): """Yields all links in the page""" - for anchor in self.parsed.findall(".//a"): + document = html5lib.parse( + self.content, + transport_encoding=_get_encoding_from_headers(self.headers), + namespaceHTMLElements=False, + ) + base_url = _determine_base_url(document, self.url) + for anchor in document.findall(".//a"): if anchor.get("href"): href = anchor.get("href") - url = self.clean_link( - urllib_parse.urljoin(self.base_url, href) - ) - yield Link(url, self) - - _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) - - def clean_link(self, url): - """Makes sure a link is fully encoded. That is, if a ' ' shows up in - the link, it will be rewritten to %20 (while not over-quoting - % or other characters).""" - return self._clean_re.sub( - lambda match: '%%%2x' % ord(match.group(0)), url) - - -class Link(object): - - def __init__(self, url, comes_from=None): - - # url can be a UNC windows share - if url.startswith('\\\\'): - url = path_to_url(url) - - self.url = url - self.comes_from = comes_from - - def __str__(self): - if self.comes_from: - return '%s (from %s)' % (self.url, self.comes_from) - else: - return str(self.url) - - def __repr__(self): - return '' % self - - def __eq__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url == other.url - - def __ne__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url != other.url - - def __lt__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url < other.url - - def __le__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url <= other.url - - def __gt__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url > other.url - - def __ge__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url >= other.url - - def __hash__(self): - return hash(self.url) - - @property - def filename(self): - _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) - name = posixpath.basename(path.rstrip('/')) or netloc - name = urllib_parse.unquote(name) - assert name, ('URL %r produced no filename' % self.url) - return name - - @property - def scheme(self): - return urllib_parse.urlsplit(self.url)[0] - - @property - def netloc(self): - return urllib_parse.urlsplit(self.url)[1] - - @property - def path(self): - return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) - - def splitext(self): - return splitext(posixpath.basename(self.path.rstrip('/'))) - - @property - def ext(self): - return self.splitext()[1] - - @property - def url_without_fragment(self): - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) - return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) - - _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') - - @property - def egg_fragment(self): - match = self._egg_fragment_re.search(self.url) - if not match: - return None - return match.group(1) - - _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') - - @property - def subdirectory_fragment(self): - match = self._subdirectory_fragment_re.search(self.url) - if not match: - return None - return match.group(1) - - _hash_re = re.compile( - r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' - ) - - @property - def hash(self): - match = self._hash_re.search(self.url) - if match: - return match.group(2) - return None - - @property - def hash_name(self): - match = self._hash_re.search(self.url) - if match: - return match.group(1) - return None - - @property - def show_url(self): - return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) - - @property - def is_wheel(self): - return self.ext == wheel_ext - - @property - def is_artifact(self): - """ - Determines if this points to an actual artifact (e.g. a tarball) or if - it points to an "abstract" thing like a path or a VCS location. - """ - from pip.vcs import vcs - - if self.scheme in vcs.all_schemes: - return False - - return True - - -FormatControl = namedtuple('FormatControl', 'no_binary only_binary') -"""This object has two fields, no_binary and only_binary. - -If a field is falsy, it isn't set. If it is {':all:'}, it should match all -packages except those listed in the other field. Only one field can be set -to {':all:'} at a time. The rest of the time exact package name matches -are listed, with any given package only showing up in one field at a time. -""" - - -def fmt_ctl_handle_mutual_exclude(value, target, other): - new = value.split(',') - while ':all:' in new: - other.clear() - target.clear() - target.add(':all:') - del new[:new.index(':all:') + 1] - if ':none:' not in new: - # Without a none, we want to discard everything as :all: covers it - return - for name in new: - if name == ':none:': - target.clear() - continue - name = canonicalize_name(name) - other.discard(name) - target.add(name) - - -def fmt_ctl_formats(fmt_ctl, canonical_name): - result = set(["binary", "source"]) - if canonical_name in fmt_ctl.only_binary: - result.discard('source') - elif canonical_name in fmt_ctl.no_binary: - result.discard('binary') - elif ':all:' in fmt_ctl.only_binary: - result.discard('source') - elif ':all:' in fmt_ctl.no_binary: - result.discard('binary') - return frozenset(result) - - -def fmt_ctl_no_binary(fmt_ctl): - fmt_ctl_handle_mutual_exclude( - ':all:', fmt_ctl.no_binary, fmt_ctl.only_binary) - - -def fmt_ctl_no_use_wheel(fmt_ctl): - fmt_ctl_no_binary(fmt_ctl) - warnings.warn( - '--no-use-wheel is deprecated and will be removed in the future. ' - ' Please use --no-binary :all: instead.', RemovedInPip10Warning, - stacklevel=2) + url = _clean_link(urllib_parse.urljoin(base_url, href)) + pyrequire = anchor.get('data-requires-python') + pyrequire = unescape(pyrequire) if pyrequire else None + yield Link(url, self.url, requires_python=pyrequire) Search = namedtuple('Search', 'supplied canonical formats') diff --git a/Shared/lib/python3.4/site-packages/pip/locations.py b/Shared/lib/python3.4/site-packages/pip/_internal/locations.py similarity index 81% rename from Shared/lib/python3.4/site-packages/pip/locations.py rename to Shared/lib/python3.4/site-packages/pip/_internal/locations.py index 1bd0fae..183aaa3 100644 --- a/Shared/lib/python3.4/site-packages/pip/locations.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/locations.py @@ -3,15 +3,15 @@ from __future__ import absolute_import import os import os.path +import platform import site import sys +import sysconfig +from distutils import sysconfig as distutils_sysconfig +from distutils.command.install import SCHEME_KEYS # type: ignore -from distutils import sysconfig -from distutils.command.install import install, SCHEME_KEYS # noqa - -from pip.compat import WINDOWS, expanduser -from pip.utils import appdirs - +from pip._internal.utils import appdirs +from pip._internal.utils.compat import WINDOWS, expanduser # Application Directories USER_CACHE_DIR = appdirs.user_cache_dir("pip") @@ -73,15 +73,25 @@ else: "The folder you are executing pip from can no longer be found." ) -# under Mac OS X + virtualenv sys.prefix is not properly resolved +# under macOS + virtualenv sys.prefix is not properly resolved # it is something like /path/to/python/bin/.. # Note: using realpath due to tmp dirs on OSX being symlinks src_prefix = os.path.abspath(src_prefix) # FIXME doesn't account for venv linked to global site-packages -site_packages = sysconfig.get_python_lib() -user_site = site.USER_SITE +site_packages = sysconfig.get_path("purelib") +# This is because of a bug in PyPy's sysconfig module, see +# https://bitbucket.org/pypy/pypy/issues/2506/sysconfig-returns-incorrect-paths +# for more information. +if platform.python_implementation().lower() == "pypy": + site_packages = distutils_sysconfig.get_python_lib() +try: + # Use getusersitepackages if this is present, as it ensures that the + # value is initialised properly. + user_site = site.getusersitepackages() +except AttributeError: + user_site = site.USER_SITE user_dir = expanduser('~') if WINDOWS: bin_py = os.path.join(sys.prefix, 'Scripts') @@ -109,8 +119,7 @@ else: legacy_storage_dir, config_basename, ) - - # Forcing to use /usr/local/bin for standard Mac OS X framework installs + # Forcing to use /usr/local/bin for standard macOS framework installs # Also log to ~/Library/Logs/ for use with the Console.app log viewer if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/': bin_py = '/usr/local/bin' @@ -120,6 +129,9 @@ site_config_files = [ for path in appdirs.site_config_dirs('pip') ] +venv_config_file = os.path.join(sys.prefix, config_basename) +new_config_file = os.path.join(appdirs.user_config_dir("pip"), config_basename) + def distutils_scheme(dist_name, user=False, home=None, root=None, isolated=False, prefix=None): @@ -143,7 +155,7 @@ def distutils_scheme(dist_name, user=False, home=None, root=None, # NOTE: setting user or home has the side-effect of creating the home dir # or user base for installations during finalize_options() # ideally, we'd prefer a scheme class that has no side-effects. - assert not (user and prefix), "user={0} prefix={1}".format(user, prefix) + assert not (user and prefix), "user={} prefix={}".format(user, prefix) i.user = user or i.user if user: i.prefix = "" diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/models/__init__.py b/Shared/lib/python3.4/site-packages/pip/_internal/models/__init__.py new file mode 100644 index 0000000..7855226 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/models/__init__.py @@ -0,0 +1,2 @@ +"""A package that contains models that represent entities. +""" diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/models/candidate.py b/Shared/lib/python3.4/site-packages/pip/_internal/models/candidate.py new file mode 100644 index 0000000..c736de6 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/models/candidate.py @@ -0,0 +1,23 @@ +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.utils.models import KeyBasedCompareMixin + + +class InstallationCandidate(KeyBasedCompareMixin): + """Represents a potential "candidate" for installation. + """ + + def __init__(self, project, version, location): + self.project = project + self.version = parse_version(version) + self.location = location + + super(InstallationCandidate, self).__init__( + key=(self.project, self.version, self.location), + defining_class=InstallationCandidate + ) + + def __repr__(self): + return "".format( + self.project, self.version, self.location, + ) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/models/format_control.py b/Shared/lib/python3.4/site-packages/pip/_internal/models/format_control.py new file mode 100644 index 0000000..2748856 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/models/format_control.py @@ -0,0 +1,62 @@ +from pip._vendor.packaging.utils import canonicalize_name + + +class FormatControl(object): + """A helper class for controlling formats from which packages are installed. + If a field is falsy, it isn't set. If it is {':all:'}, it should match all + packages except those listed in the other field. Only one field can be set + to {':all:'} at a time. The rest of the time exact package name matches + are listed, with any given package only showing up in one field at a time. + """ + def __init__(self, no_binary=None, only_binary=None): + self.no_binary = set() if no_binary is None else no_binary + self.only_binary = set() if only_binary is None else only_binary + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "{}({}, {})".format( + self.__class__.__name__, + self.no_binary, + self.only_binary + ) + + @staticmethod + def handle_mutual_excludes(value, target, other): + new = value.split(',') + while ':all:' in new: + other.clear() + target.clear() + target.add(':all:') + del new[:new.index(':all:') + 1] + # Without a none, we want to discard everything as :all: covers it + if ':none:' not in new: + return + for name in new: + if name == ':none:': + target.clear() + continue + name = canonicalize_name(name) + other.discard(name) + target.add(name) + + def get_allowed_formats(self, canonical_name): + result = {"binary", "source"} + if canonical_name in self.only_binary: + result.discard('source') + elif canonical_name in self.no_binary: + result.discard('binary') + elif ':all:' in self.only_binary: + result.discard('source') + elif ':all:' in self.no_binary: + result.discard('binary') + return frozenset(result) + + def disallow_binaries(self): + self.handle_mutual_excludes( + ':all:', self.no_binary, self.only_binary, + ) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/models/index.py b/Shared/lib/python3.4/site-packages/pip/_internal/models/index.py new file mode 100644 index 0000000..870a315 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/models/index.py @@ -0,0 +1,29 @@ +from pip._vendor.six.moves.urllib import parse as urllib_parse + + +class PackageIndex(object): + """Represents a Package Index and provides easier access to endpoints + """ + + def __init__(self, url, file_storage_domain): + super(PackageIndex, self).__init__() + self.url = url + self.netloc = urllib_parse.urlsplit(url).netloc + self.simple_url = self._url_for_path('simple') + self.pypi_url = self._url_for_path('pypi') + + # This is part of a temporary hack used to block installs of PyPI + # packages which depend on external urls only necessary until PyPI can + # block such packages themselves + self.file_storage_domain = file_storage_domain + + def _url_for_path(self, path): + return urllib_parse.urljoin(self.url, path) + + +PyPI = PackageIndex( + 'https://pypi.org/', file_storage_domain='files.pythonhosted.org' +) +TestPyPI = PackageIndex( + 'https://test.pypi.org/', file_storage_domain='test-files.pythonhosted.org' +) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/models/link.py b/Shared/lib/python3.4/site-packages/pip/_internal/models/link.py new file mode 100644 index 0000000..5decb7c --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/models/link.py @@ -0,0 +1,141 @@ +import posixpath +import re + +from pip._vendor.six.moves.urllib import parse as urllib_parse + +from pip._internal.download import path_to_url +from pip._internal.utils.misc import splitext +from pip._internal.utils.models import KeyBasedCompareMixin +from pip._internal.wheel import wheel_ext + + +class Link(KeyBasedCompareMixin): + """Represents a parsed link from a Package Index's simple URL + """ + + def __init__(self, url, comes_from=None, requires_python=None): + """ + url: + url of the resource pointed to (href of the link) + comes_from: + instance of HTMLPage where the link was found, or string. + requires_python: + String containing the `Requires-Python` metadata field, specified + in PEP 345. This may be specified by a data-requires-python + attribute in the HTML link tag, as described in PEP 503. + """ + + # url can be a UNC windows share + if url.startswith('\\\\'): + url = path_to_url(url) + + self.url = url + self.comes_from = comes_from + self.requires_python = requires_python if requires_python else None + + super(Link, self).__init__( + key=(self.url), + defining_class=Link + ) + + def __str__(self): + if self.requires_python: + rp = ' (requires-python:%s)' % self.requires_python + else: + rp = '' + if self.comes_from: + return '%s (from %s)%s' % (self.url, self.comes_from, rp) + else: + return str(self.url) + + def __repr__(self): + return '' % self + + @property + def filename(self): + _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) + name = posixpath.basename(path.rstrip('/')) or netloc + name = urllib_parse.unquote(name) + assert name, ('URL %r produced no filename' % self.url) + return name + + @property + def scheme(self): + return urllib_parse.urlsplit(self.url)[0] + + @property + def netloc(self): + return urllib_parse.urlsplit(self.url)[1] + + @property + def path(self): + return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) + + def splitext(self): + return splitext(posixpath.basename(self.path.rstrip('/'))) + + @property + def ext(self): + return self.splitext()[1] + + @property + def url_without_fragment(self): + scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) + return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) + + _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') + + @property + def egg_fragment(self): + match = self._egg_fragment_re.search(self.url) + if not match: + return None + return match.group(1) + + _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') + + @property + def subdirectory_fragment(self): + match = self._subdirectory_fragment_re.search(self.url) + if not match: + return None + return match.group(1) + + _hash_re = re.compile( + r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' + ) + + @property + def hash(self): + match = self._hash_re.search(self.url) + if match: + return match.group(2) + return None + + @property + def hash_name(self): + match = self._hash_re.search(self.url) + if match: + return match.group(1) + return None + + @property + def show_url(self): + return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) + + @property + def is_wheel(self): + return self.ext == wheel_ext + + @property + def is_artifact(self): + """ + Determines if this points to an actual artifact (e.g. a tarball) or if + it points to an "abstract" thing like a path or a VCS location. + """ + from pip._internal.vcs import vcs + + if self.scheme in vcs.all_schemes: + return False + + return True diff --git a/Shared/lib/python3.4/site-packages/pip/operations/__init__.py b/Shared/lib/python3.4/site-packages/pip/_internal/operations/__init__.py similarity index 100% rename from Shared/lib/python3.4/site-packages/pip/operations/__init__.py rename to Shared/lib/python3.4/site-packages/pip/_internal/operations/__init__.py diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/operations/check.py b/Shared/lib/python3.4/site-packages/pip/_internal/operations/check.py new file mode 100644 index 0000000..799257a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/operations/check.py @@ -0,0 +1,148 @@ +"""Validation of dependencies of packages +""" + +from collections import namedtuple + +from pip._vendor.packaging.utils import canonicalize_name + +from pip._internal.operations.prepare import make_abstract_dist +from pip._internal.utils.misc import get_installed_distributions +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from pip._internal.req.req_install import InstallRequirement # noqa: F401 + from typing import ( # noqa: F401 + Any, Callable, Dict, Iterator, Optional, Set, Tuple, List + ) + + # Shorthands + PackageSet = Dict[str, 'PackageDetails'] + Missing = Tuple[str, Any] + Conflicting = Tuple[str, str, Any] + + MissingDict = Dict[str, List[Missing]] + ConflictingDict = Dict[str, List[Conflicting]] + CheckResult = Tuple[MissingDict, ConflictingDict] + +PackageDetails = namedtuple('PackageDetails', ['version', 'requires']) + + +def create_package_set_from_installed(**kwargs): + # type: (**Any) -> PackageSet + """Converts a list of distributions into a PackageSet. + """ + # Default to using all packages installed on the system + if kwargs == {}: + kwargs = {"local_only": False, "skip": ()} + + package_set = {} + for dist in get_installed_distributions(**kwargs): + name = canonicalize_name(dist.project_name) + package_set[name] = PackageDetails(dist.version, dist.requires()) + return package_set + + +def check_package_set(package_set, should_ignore=None): + # type: (PackageSet, Optional[Callable[[str], bool]]) -> CheckResult + """Check if a package set is consistent + + If should_ignore is passed, it should be a callable that takes a + package name and returns a boolean. + """ + if should_ignore is None: + def should_ignore(name): + return False + + missing = dict() + conflicting = dict() + + for package_name in package_set: + # Info about dependencies of package_name + missing_deps = set() # type: Set[Missing] + conflicting_deps = set() # type: Set[Conflicting] + + if should_ignore(package_name): + continue + + for req in package_set[package_name].requires: + name = canonicalize_name(req.project_name) # type: str + + # Check if it's missing + if name not in package_set: + missed = True + if req.marker is not None: + missed = req.marker.evaluate() + if missed: + missing_deps.add((name, req)) + continue + + # Check if there's a conflict + version = package_set[name].version # type: str + if not req.specifier.contains(version, prereleases=True): + conflicting_deps.add((name, version, req)) + + if missing_deps: + missing[package_name] = sorted(missing_deps, key=str) + if conflicting_deps: + conflicting[package_name] = sorted(conflicting_deps, key=str) + + return missing, conflicting + + +def check_install_conflicts(to_install): + # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult] + """For checking if the dependency graph would be consistent after \ + installing given requirements + """ + # Start from the current state + package_set = create_package_set_from_installed() + # Install packages + would_be_installed = _simulate_installation_of(to_install, package_set) + + # Only warn about directly-dependent packages; create a whitelist of them + whitelist = _create_whitelist(would_be_installed, package_set) + + return ( + package_set, + check_package_set( + package_set, should_ignore=lambda name: name not in whitelist + ) + ) + + +# NOTE from @pradyunsg +# This required a minor update in dependency link handling logic over at +# operations.prepare.IsSDist.dist() to get it working +def _simulate_installation_of(to_install, package_set): + # type: (List[InstallRequirement], PackageSet) -> Set[str] + """Computes the version of packages after installing to_install. + """ + + # Keep track of packages that were installed + installed = set() + + # Modify it as installing requirement_set would (assuming no errors) + for inst_req in to_install: + dist = make_abstract_dist(inst_req).dist(finder=None) + name = canonicalize_name(dist.key) + package_set[name] = PackageDetails(dist.version, dist.requires()) + + installed.add(name) + + return installed + + +def _create_whitelist(would_be_installed, package_set): + # type: (Set[str], PackageSet) -> Set[str] + packages_affected = set(would_be_installed) + + for package_name in package_set: + if package_name in packages_affected: + continue + + for req in package_set[package_name].requires: + if canonicalize_name(req.name) in packages_affected: + packages_affected.add(package_name) + break + + return packages_affected diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/operations/freeze.py b/Shared/lib/python3.4/site-packages/pip/_internal/operations/freeze.py new file mode 100644 index 0000000..beb2feb --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/operations/freeze.py @@ -0,0 +1,264 @@ +from __future__ import absolute_import + +import collections +import logging +import os +import re + +from pip._vendor import pkg_resources, six +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.pkg_resources import RequirementParseError + +from pip._internal.exceptions import InstallationError +from pip._internal.req.constructors import ( + install_req_from_editable, install_req_from_line, +) +from pip._internal.req.req_file import COMMENT_RE +from pip._internal.utils.deprecation import deprecated +from pip._internal.utils.misc import ( + dist_is_editable, get_installed_distributions, make_vcs_requirement_url, +) + +logger = logging.getLogger(__name__) + + +def freeze( + requirement=None, + find_links=None, local_only=None, user_only=None, skip_regex=None, + isolated=False, + wheel_cache=None, + exclude_editable=False, + skip=()): + find_links = find_links or [] + skip_match = None + + if skip_regex: + skip_match = re.compile(skip_regex).search + + dependency_links = [] + + for dist in pkg_resources.working_set: + if dist.has_metadata('dependency_links.txt'): + dependency_links.extend( + dist.get_metadata_lines('dependency_links.txt') + ) + for link in find_links: + if '#egg=' in link: + dependency_links.append(link) + for link in find_links: + yield '-f %s' % link + installations = {} + for dist in get_installed_distributions(local_only=local_only, + skip=(), + user_only=user_only): + try: + req = FrozenRequirement.from_dist( + dist, + dependency_links + ) + except RequirementParseError: + logger.warning( + "Could not parse requirement: %s", + dist.project_name + ) + continue + if exclude_editable and req.editable: + continue + installations[req.name] = req + + if requirement: + # the options that don't get turned into an InstallRequirement + # should only be emitted once, even if the same option is in multiple + # requirements files, so we need to keep track of what has been emitted + # so that we don't emit it again if it's seen again + emitted_options = set() + # keep track of which files a requirement is in so that we can + # give an accurate warning if a requirement appears multiple times. + req_files = collections.defaultdict(list) + for req_file_path in requirement: + with open(req_file_path) as req_file: + for line in req_file: + if (not line.strip() or + line.strip().startswith('#') or + (skip_match and skip_match(line)) or + line.startswith(( + '-r', '--requirement', + '-Z', '--always-unzip', + '-f', '--find-links', + '-i', '--index-url', + '--pre', + '--trusted-host', + '--process-dependency-links', + '--extra-index-url'))): + line = line.rstrip() + if line not in emitted_options: + emitted_options.add(line) + yield line + continue + + if line.startswith('-e') or line.startswith('--editable'): + if line.startswith('-e'): + line = line[2:].strip() + else: + line = line[len('--editable'):].strip().lstrip('=') + line_req = install_req_from_editable( + line, + isolated=isolated, + wheel_cache=wheel_cache, + ) + else: + line_req = install_req_from_line( + COMMENT_RE.sub('', line).strip(), + isolated=isolated, + wheel_cache=wheel_cache, + ) + + if not line_req.name: + logger.info( + "Skipping line in requirement file [%s] because " + "it's not clear what it would install: %s", + req_file_path, line.strip(), + ) + logger.info( + " (add #egg=PackageName to the URL to avoid" + " this warning)" + ) + elif line_req.name not in installations: + # either it's not installed, or it is installed + # but has been processed already + if not req_files[line_req.name]: + logger.warning( + "Requirement file [%s] contains %s, but that " + "package is not installed", + req_file_path, + COMMENT_RE.sub('', line).strip(), + ) + else: + req_files[line_req.name].append(req_file_path) + else: + yield str(installations[line_req.name]).rstrip() + del installations[line_req.name] + req_files[line_req.name].append(req_file_path) + + # Warn about requirements that were included multiple times (in a + # single requirements file or in different requirements files). + for name, files in six.iteritems(req_files): + if len(files) > 1: + logger.warning("Requirement %s included multiple times [%s]", + name, ', '.join(sorted(set(files)))) + + yield( + '## The following requirements were added by ' + 'pip freeze:' + ) + for installation in sorted( + installations.values(), key=lambda x: x.name.lower()): + if canonicalize_name(installation.name) not in skip: + yield str(installation).rstrip() + + +class FrozenRequirement(object): + def __init__(self, name, req, editable, comments=()): + self.name = name + self.req = req + self.editable = editable + self.comments = comments + + _rev_re = re.compile(r'-r(\d+)$') + _date_re = re.compile(r'-(20\d\d\d\d\d\d)$') + + @classmethod + def _init_args_from_dist(cls, dist, dependency_links): + """ + Compute and return arguments (req, editable, comments) to pass to + FrozenRequirement.__init__(). + + This method is for use in FrozenRequirement.from_dist(). + """ + location = os.path.normcase(os.path.abspath(dist.location)) + comments = [] + from pip._internal.vcs import vcs, get_src_requirement + if dist_is_editable(dist) and vcs.get_backend_name(location): + editable = True + try: + req = get_src_requirement(dist, location) + except InstallationError as exc: + logger.warning( + "Error when trying to get requirement for VCS system %s, " + "falling back to uneditable format", exc + ) + req = None + if req is None: + logger.warning( + 'Could not determine repository location of %s', location + ) + comments.append( + '## !! Could not determine repository location' + ) + req = dist.as_requirement() + editable = False + else: + editable = False + req = dist.as_requirement() + specs = req.specs + assert len(specs) == 1 and specs[0][0] in ["==", "==="], \ + 'Expected 1 spec with == or ===; specs = %r; dist = %r' % \ + (specs, dist) + version = specs[0][1] + ver_match = cls._rev_re.search(version) + date_match = cls._date_re.search(version) + if ver_match or date_match: + svn_backend = vcs.get_backend('svn') + if svn_backend: + svn_location = svn_backend().get_location( + dist, + dependency_links, + ) + if not svn_location: + logger.warning( + 'Warning: cannot find svn location for %s', req, + ) + comments.append( + '## FIXME: could not find svn URL in dependency_links ' + 'for this package:' + ) + else: + deprecated( + "SVN editable detection based on dependency links " + "will be dropped in the future.", + replacement=None, + gone_in="18.2", + issue=4187, + ) + comments.append( + '# Installing as editable to satisfy requirement %s:' % + req + ) + if ver_match: + rev = ver_match.group(1) + else: + rev = '{%s}' % date_match.group(1) + editable = True + egg_name = cls.egg_name(dist) + req = make_vcs_requirement_url(svn_location, rev, egg_name) + + return (req, editable, comments) + + @classmethod + def from_dist(cls, dist, dependency_links): + args = cls._init_args_from_dist(dist, dependency_links) + return cls(dist.project_name, *args) + + @staticmethod + def egg_name(dist): + name = dist.egg_name() + match = re.search(r'-py\d\.\d$', name) + if match: + name = name[:match.start()] + return name + + def __str__(self): + req = self.req + if self.editable: + req = '-e %s' % req + return '\n'.join(list(self.comments) + [str(req)]) + '\n' diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/operations/prepare.py b/Shared/lib/python3.4/site-packages/pip/_internal/operations/prepare.py new file mode 100644 index 0000000..104bea3 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/operations/prepare.py @@ -0,0 +1,355 @@ +"""Prepares a distribution for installation +""" + +import logging +import os + +from pip._vendor import pkg_resources, requests + +from pip._internal.build_env import BuildEnvironment +from pip._internal.download import ( + is_dir_url, is_file_url, is_vcs_url, unpack_url, url_to_path, +) +from pip._internal.exceptions import ( + DirectoryUrlHashUnsupported, HashUnpinned, InstallationError, + PreviousBuildDirError, VcsHashUnsupported, +) +from pip._internal.utils.compat import expanduser +from pip._internal.utils.hashes import MissingHashes +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import display_path, normalize_path +from pip._internal.vcs import vcs + +logger = logging.getLogger(__name__) + + +def make_abstract_dist(req): + """Factory to make an abstract dist object. + + Preconditions: Either an editable req with a source_dir, or satisfied_by or + a wheel link, or a non-editable req with a source_dir. + + :return: A concrete DistAbstraction. + """ + if req.editable: + return IsSDist(req) + elif req.link and req.link.is_wheel: + return IsWheel(req) + else: + return IsSDist(req) + + +class DistAbstraction(object): + """Abstracts out the wheel vs non-wheel Resolver.resolve() logic. + + The requirements for anything installable are as follows: + - we must be able to determine the requirement name + (or we can't correctly handle the non-upgrade case). + - we must be able to generate a list of run-time dependencies + without installing any additional packages (or we would + have to either burn time by doing temporary isolated installs + or alternatively violate pips 'don't start installing unless + all requirements are available' rule - neither of which are + desirable). + - for packages with setup requirements, we must also be able + to determine their requirements without installing additional + packages (for the same reason as run-time dependencies) + - we must be able to create a Distribution object exposing the + above metadata. + """ + + def __init__(self, req): + self.req = req + + def dist(self, finder): + """Return a setuptools Dist object.""" + raise NotImplementedError(self.dist) + + def prep_for_dist(self, finder, build_isolation): + """Ensure that we can get a Dist for this requirement.""" + raise NotImplementedError(self.dist) + + +class IsWheel(DistAbstraction): + + def dist(self, finder): + return list(pkg_resources.find_distributions( + self.req.source_dir))[0] + + def prep_for_dist(self, finder, build_isolation): + # FIXME:https://github.com/pypa/pip/issues/1112 + pass + + +class IsSDist(DistAbstraction): + + def dist(self, finder): + dist = self.req.get_dist() + # FIXME: shouldn't be globally added. + if finder and dist.has_metadata('dependency_links.txt'): + finder.add_dependency_links( + dist.get_metadata_lines('dependency_links.txt') + ) + return dist + + def prep_for_dist(self, finder, build_isolation): + # Prepare for building. We need to: + # 1. Load pyproject.toml (if it exists) + # 2. Set up the build environment + + self.req.load_pyproject_toml() + should_isolate = self.req.use_pep517 and build_isolation + + if should_isolate: + # Isolate in a BuildEnvironment and install the build-time + # requirements. + self.req.build_env = BuildEnvironment() + self.req.build_env.install_requirements( + finder, self.req.pyproject_requires, + "Installing build dependencies" + ) + missing = [] + if self.req.requirements_to_check: + check = self.req.requirements_to_check + missing = self.req.build_env.missing_requirements(check) + if missing: + logger.warning( + "Missing build requirements in pyproject.toml for %s.", + self.req, + ) + logger.warning( + "The project does not specify a build backend, and pip " + "cannot fall back to setuptools without %s.", + " and ".join(map(repr, sorted(missing))) + ) + + self.req.run_egg_info() + self.req.assert_source_matches_version() + + +class Installed(DistAbstraction): + + def dist(self, finder): + return self.req.satisfied_by + + def prep_for_dist(self, finder, build_isolation): + pass + + +class RequirementPreparer(object): + """Prepares a Requirement + """ + + def __init__(self, build_dir, download_dir, src_dir, wheel_download_dir, + progress_bar, build_isolation, req_tracker): + super(RequirementPreparer, self).__init__() + + self.src_dir = src_dir + self.build_dir = build_dir + self.req_tracker = req_tracker + + # Where still packed archives should be written to. If None, they are + # not saved, and are deleted immediately after unpacking. + self.download_dir = download_dir + + # Where still-packed .whl files should be written to. If None, they are + # written to the download_dir parameter. Separate to download_dir to + # permit only keeping wheel archives for pip wheel. + if wheel_download_dir: + wheel_download_dir = normalize_path(wheel_download_dir) + self.wheel_download_dir = wheel_download_dir + + # NOTE + # download_dir and wheel_download_dir overlap semantically and may + # be combined if we're willing to have non-wheel archives present in + # the wheelhouse output by 'pip wheel'. + + self.progress_bar = progress_bar + + # Is build isolation allowed? + self.build_isolation = build_isolation + + @property + def _download_should_save(self): + # TODO: Modify to reduce indentation needed + if self.download_dir: + self.download_dir = expanduser(self.download_dir) + if os.path.exists(self.download_dir): + return True + else: + logger.critical('Could not find download directory') + raise InstallationError( + "Could not find or access download directory '%s'" + % display_path(self.download_dir)) + return False + + def prepare_linked_requirement(self, req, session, finder, + upgrade_allowed, require_hashes): + """Prepare a requirement that would be obtained from req.link + """ + # TODO: Breakup into smaller functions + if req.link and req.link.scheme == 'file': + path = url_to_path(req.link.url) + logger.info('Processing %s', display_path(path)) + else: + logger.info('Collecting %s', req) + + with indent_log(): + # @@ if filesystem packages are not marked + # editable in a req, a non deterministic error + # occurs when the script attempts to unpack the + # build directory + req.ensure_has_source_dir(self.build_dir) + # If a checkout exists, it's unwise to keep going. version + # inconsistencies are logged later, but do not fail the + # installation. + # FIXME: this won't upgrade when there's an existing + # package unpacked in `req.source_dir` + # package unpacked in `req.source_dir` + if os.path.exists(os.path.join(req.source_dir, 'setup.py')): + raise PreviousBuildDirError( + "pip can't proceed with requirements '%s' due to a" + " pre-existing build directory (%s). This is " + "likely due to a previous installation that failed" + ". pip is being responsible and not assuming it " + "can delete this. Please delete it and try again." + % (req, req.source_dir) + ) + req.populate_link(finder, upgrade_allowed, require_hashes) + + # We can't hit this spot and have populate_link return None. + # req.satisfied_by is None here (because we're + # guarded) and upgrade has no impact except when satisfied_by + # is not None. + # Then inside find_requirement existing_applicable -> False + # If no new versions are found, DistributionNotFound is raised, + # otherwise a result is guaranteed. + assert req.link + link = req.link + + # Now that we have the real link, we can tell what kind of + # requirements we have and raise some more informative errors + # than otherwise. (For example, we can raise VcsHashUnsupported + # for a VCS URL rather than HashMissing.) + if require_hashes: + # We could check these first 2 conditions inside + # unpack_url and save repetition of conditions, but then + # we would report less-useful error messages for + # unhashable requirements, complaining that there's no + # hash provided. + if is_vcs_url(link): + raise VcsHashUnsupported() + elif is_file_url(link) and is_dir_url(link): + raise DirectoryUrlHashUnsupported() + if not req.original_link and not req.is_pinned: + # Unpinned packages are asking for trouble when a new + # version is uploaded. This isn't a security check, but + # it saves users a surprising hash mismatch in the + # future. + # + # file:/// URLs aren't pinnable, so don't complain + # about them not being pinned. + raise HashUnpinned() + + hashes = req.hashes(trust_internet=not require_hashes) + if require_hashes and not hashes: + # Known-good hashes are missing for this requirement, so + # shim it with a facade object that will provoke hash + # computation and then raise a HashMissing exception + # showing the user what the hash should be. + hashes = MissingHashes() + + try: + download_dir = self.download_dir + # We always delete unpacked sdists after pip ran. + autodelete_unpacked = True + if req.link.is_wheel and self.wheel_download_dir: + # when doing 'pip wheel` we download wheels to a + # dedicated dir. + download_dir = self.wheel_download_dir + if req.link.is_wheel: + if download_dir: + # When downloading, we only unpack wheels to get + # metadata. + autodelete_unpacked = True + else: + # When installing a wheel, we use the unpacked + # wheel. + autodelete_unpacked = False + unpack_url( + req.link, req.source_dir, + download_dir, autodelete_unpacked, + session=session, hashes=hashes, + progress_bar=self.progress_bar + ) + except requests.HTTPError as exc: + logger.critical( + 'Could not install requirement %s because of error %s', + req, + exc, + ) + raise InstallationError( + 'Could not install requirement %s because of HTTP ' + 'error %s for URL %s' % + (req, exc, req.link) + ) + abstract_dist = make_abstract_dist(req) + with self.req_tracker.track(req): + abstract_dist.prep_for_dist(finder, self.build_isolation) + if self._download_should_save: + # Make a .zip of the source_dir we already created. + if req.link.scheme in vcs.all_schemes: + req.archive(self.download_dir) + return abstract_dist + + def prepare_editable_requirement(self, req, require_hashes, use_user_site, + finder): + """Prepare an editable requirement + """ + assert req.editable, "cannot prepare a non-editable req as editable" + + logger.info('Obtaining %s', req) + + with indent_log(): + if require_hashes: + raise InstallationError( + 'The editable requirement %s cannot be installed when ' + 'requiring hashes, because there is no single file to ' + 'hash.' % req + ) + req.ensure_has_source_dir(self.src_dir) + req.update_editable(not self._download_should_save) + + abstract_dist = make_abstract_dist(req) + with self.req_tracker.track(req): + abstract_dist.prep_for_dist(finder, self.build_isolation) + + if self._download_should_save: + req.archive(self.download_dir) + req.check_if_exists(use_user_site) + + return abstract_dist + + def prepare_installed_requirement(self, req, require_hashes, skip_reason): + """Prepare an already-installed requirement + """ + assert req.satisfied_by, "req should have been satisfied but isn't" + assert skip_reason is not None, ( + "did not get skip reason skipped but req.satisfied_by " + "is set to %r" % (req.satisfied_by,) + ) + logger.info( + 'Requirement %s: %s (%s)', + skip_reason, req, req.satisfied_by.version + ) + with indent_log(): + if require_hashes: + logger.debug( + 'Since it is already installed, we are trusting this ' + 'package without checking its hash. To ensure a ' + 'completely repeatable environment, install into an ' + 'empty virtualenv.' + ) + abstract_dist = Installed(req) + + return abstract_dist diff --git a/Shared/lib/python3.4/site-packages/pip/pep425tags.py b/Shared/lib/python3.4/site-packages/pip/_internal/pep425tags.py similarity index 78% rename from Shared/lib/python3.4/site-packages/pip/pep425tags.py rename to Shared/lib/python3.4/site-packages/pip/_internal/pep425tags.py index e118457..ab1a029 100644 --- a/Shared/lib/python3.4/site-packages/pip/pep425tags.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/pep425tags.py @@ -1,22 +1,17 @@ """Generate and work with PEP 425 Compatibility Tags.""" from __future__ import absolute_import +import distutils.util +import logging +import platform import re import sys +import sysconfig import warnings -import platform -import logging -import ctypes - -try: - import sysconfig -except ImportError: # pragma nocover - # Python < 2.7 - import distutils.sysconfig as sysconfig -import distutils.util - -from pip.compat import OrderedDict +from collections import OrderedDict +import pip._internal.utils.glibc +from pip._internal.utils.compat import get_extension_suffixes logger = logging.getLogger(__name__) @@ -27,7 +22,7 @@ def get_config_var(var): try: return sysconfig.get_config_var(var) except IOError as e: # Issue #1074 - warnings.warn("{0}".format(e), RuntimeWarning) + warnings.warn("{}".format(e), RuntimeWarning) return None @@ -67,7 +62,7 @@ def get_impl_tag(): """ Returns the Tag for this specific implementation. """ - return "{0}{1}".format(get_abbr_impl(), get_impl_ver()) + return "{}{}".format(get_abbr_impl(), get_impl_ver()) def get_flag(var, fallback, expected=True, warn=True): @@ -87,7 +82,7 @@ def get_abi_tag(): (CPython 2, PyPy).""" soabi = get_config_var('SOABI') impl = get_abbr_impl() - if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'): + if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): d = '' m = '' u = '' @@ -125,7 +120,7 @@ def get_platform(): if sys.platform == 'darwin': # distutils.util.get_platform() returns the release based on the value # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may - # be signficantly older than the user's current machine. + # be significantly older than the user's current machine. release, _, machine = platform.mac_ver() split_ver = release.split('.') @@ -134,7 +129,7 @@ def get_platform(): elif machine == "ppc64" and _is_running_32bit(): machine = "ppc" - return 'macosx_{0}_{1}_{2}'.format(split_ver[0], split_ver[1], machine) + return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine) # XXX remove distutils dependency result = distutils.util.get_platform().replace('.', '_').replace('-', '_') @@ -148,7 +143,7 @@ def get_platform(): def is_manylinux1_compatible(): # Only Linux, and only x86-64 / i686 - if get_platform() not in ("linux_x86_64", "linux_i686"): + if get_platform() not in {"linux_x86_64", "linux_i686"}: return False # Check for presence of _manylinux module @@ -160,46 +155,17 @@ def is_manylinux1_compatible(): pass # Check glibc version. CentOS 5 uses glibc 2.5. - return have_compatible_glibc(2, 5) - - -def have_compatible_glibc(major, minimum_minor): - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - process_namespace = ctypes.CDLL(None) - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return False - - # Call gnu_get_libc_version, which returns a string like "2.5". - gnu_get_libc_version.restype = ctypes.c_char_p - version_str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - # Parse string and check against requested version. - version = [int(piece) for piece in version_str.split(".")] - if len(version) < 2: - warnings.warn("Expected glibc version with 2 components major.minor," - " got: %s" % version_str, RuntimeWarning) - return False - return version[0] == major and version[1] >= minimum_minor + return pip._internal.utils.glibc.have_compatible_glibc(2, 5) def get_darwin_arches(major, minor, machine): """Return a list of supported arches (including group arches) for - the given major, minor and machine architecture of an OS X machine. + the given major, minor and machine architecture of an macOS machine. """ arches = [] def _supports_arch(major, minor, arch): - # Looking at the application support for OS X versions in the chart + # Looking at the application support for macOS versions in the chart # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears # our timeline looks roughly like: # @@ -253,12 +219,19 @@ def get_darwin_arches(major, minor, machine): return arches -def get_supported(versions=None, noarch=False): +def get_supported(versions=None, noarch=False, platform=None, + impl=None, abi=None): """Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. + :param platform: specify the exact platform you want valid + tags for, or None. If None, use the local system platform. + :param impl: specify the exact implementation you want valid + tags for, or None. If None, use the local interpreter impl. + :param abi: specify the exact abi you want valid + tags for, or None. If None, use the local interpreter abi. """ supported = [] @@ -271,32 +244,31 @@ def get_supported(versions=None, noarch=False): for minor in range(version_info[-1], -1, -1): versions.append(''.join(map(str, major + (minor,)))) - impl = get_abbr_impl() + impl = impl or get_abbr_impl() abis = [] - abi = get_abi_tag() + abi = abi or get_abi_tag() if abi: abis[0:0] = [abi] abi3s = set() - import imp - for suffix in imp.get_suffixes(): - if suffix[0].startswith('.abi'): - abi3s.add(suffix[0].split('.', 2)[1]) + for suffix in get_extension_suffixes(): + if suffix.startswith('.abi'): + abi3s.add(suffix.split('.', 2)[1]) abis.extend(sorted(list(abi3s))) abis.append('none') if not noarch: - arch = get_platform() - if sys.platform == 'darwin': + arch = platform or get_platform() + if arch.startswith('macosx'): # support macosx-10.6-intel on macosx-10.9-x86_64 match = _osx_arch_pat.match(arch) if match: name, major, minor, actual_arch = match.groups() - tpl = '{0}_{1}_%i_%s'.format(name, major) + tpl = '{}_{}_%i_%s'.format(name, major) arches = [] for m in reversed(range(int(minor) + 1)): for a in get_darwin_arches(int(major), m, actual_arch): @@ -304,7 +276,7 @@ def get_supported(versions=None, noarch=False): else: # arch pattern didn't match (?!) arches = [arch] - elif is_manylinux1_compatible(): + elif platform is None and is_manylinux1_compatible(): arches = [arch.replace('linux', 'manylinux1'), arch] else: arches = [arch] @@ -314,6 +286,15 @@ def get_supported(versions=None, noarch=False): for arch in arches: supported.append(('%s%s' % (impl, versions[0]), abi, arch)) + # abi3 modules compatible with older version of Python + for version in versions[1:]: + # abi3 was introduced in Python 3.2 + if version in {'31', '30'}: + break + for abi in abi3s: # empty set if not Python 3 + for arch in arches: + supported.append(("%s%s" % (impl, version), abi, arch)) + # Has binaries, does not use the Python API: for arch in arches: supported.append(('py%s' % (versions[0][0]), 'none', arch)) @@ -332,7 +313,5 @@ def get_supported(versions=None, noarch=False): return supported -supported_tags = get_supported() -supported_tags_noarch = get_supported(noarch=True) implementation_tag = get_impl_tag() diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/pyproject.py b/Shared/lib/python3.4/site-packages/pip/_internal/pyproject.py new file mode 100644 index 0000000..f938a76 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/pyproject.py @@ -0,0 +1,144 @@ +from __future__ import absolute_import + +import io +import os + +from pip._vendor import pytoml, six + +from pip._internal.exceptions import InstallationError + + +def _is_list_of_str(obj): + return ( + isinstance(obj, list) and + all(isinstance(item, six.string_types) for item in obj) + ) + + +def load_pyproject_toml(use_pep517, pyproject_toml, setup_py, req_name): + """Load the pyproject.toml file. + + Parameters: + use_pep517 - Has the user requested PEP 517 processing? None + means the user hasn't explicitly specified. + pyproject_toml - Location of the project's pyproject.toml file + setup_py - Location of the project's setup.py file + req_name - The name of the requirement we're processing (for + error reporting) + + Returns: + None if we should use the legacy code path, otherwise a tuple + ( + requirements from pyproject.toml, + name of PEP 517 backend, + requirements we should check are installed after setting + up the build environment + ) + """ + has_pyproject = os.path.isfile(pyproject_toml) + has_setup = os.path.isfile(setup_py) + + if has_pyproject: + with io.open(pyproject_toml, encoding="utf-8") as f: + pp_toml = pytoml.load(f) + build_system = pp_toml.get("build-system") + else: + build_system = None + + # The following cases must use PEP 517 + # We check for use_pep517 equalling False because that + # means the user explicitly requested --no-use-pep517 + if has_pyproject and not has_setup: + if use_pep517 is False: + raise InstallationError( + "Disabling PEP 517 processing is invalid: " + "project does not have a setup.py" + ) + use_pep517 = True + elif build_system and "build-backend" in build_system: + if use_pep517 is False: + raise InstallationError( + "Disabling PEP 517 processing is invalid: " + "project specifies a build backend of {} " + "in pyproject.toml".format( + build_system["build-backend"] + ) + ) + use_pep517 = True + + # If we haven't worked out whether to use PEP 517 yet, + # and the user hasn't explicitly stated a preference, + # we do so if the project has a pyproject.toml file. + elif use_pep517 is None: + use_pep517 = has_pyproject + + # At this point, we know whether we're going to use PEP 517. + assert use_pep517 is not None + + # If we're using the legacy code path, there is nothing further + # for us to do here. + if not use_pep517: + return None + + if build_system is None: + # Either the user has a pyproject.toml with no build-system + # section, or the user has no pyproject.toml, but has opted in + # explicitly via --use-pep517. + # In the absence of any explicit backend specification, we + # assume the setuptools backend, and require wheel and a version + # of setuptools that supports that backend. + build_system = { + "requires": ["setuptools>=38.2.5", "wheel"], + "build-backend": "setuptools.build_meta", + } + + # If we're using PEP 517, we have build system information (either + # from pyproject.toml, or defaulted by the code above). + # Note that at this point, we do not know if the user has actually + # specified a backend, though. + assert build_system is not None + + # Ensure that the build-system section in pyproject.toml conforms + # to PEP 518. + error_template = ( + "{package} has a pyproject.toml file that does not comply " + "with PEP 518: {reason}" + ) + + # Specifying the build-system table but not the requires key is invalid + if "requires" not in build_system: + raise InstallationError( + error_template.format(package=req_name, reason=( + "it has a 'build-system' table but not " + "'build-system.requires' which is mandatory in the table" + )) + ) + + # Error out if requires is not a list of strings + requires = build_system["requires"] + if not _is_list_of_str(requires): + raise InstallationError(error_template.format( + package=req_name, + reason="'build-system.requires' is not a list of strings.", + )) + + backend = build_system.get("build-backend") + check = [] + if backend is None: + # If the user didn't specify a backend, we assume they want to use + # the setuptools backend. But we can't be sure they have included + # a version of setuptools which supplies the backend, or wheel + # (which is neede by the backend) in their requirements. So we + # make a note to check that those requirements are present once + # we have set up the environment. + # TODO: Review this - it's quite a lot of work to check for a very + # specific case. The problem is, that case is potentially quite + # common - projects that adopted PEP 518 early for the ability to + # specify requirements to execute setup.py, but never considered + # needing to mention the build tools themselves. The original PEP + # 518 code had a similar check (but implemented in a different + # way). + backend = "setuptools.build_meta" + check = ["setuptools>=38.2.5", "wheel"] + + return (requires, backend, check) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/req/__init__.py b/Shared/lib/python3.4/site-packages/pip/_internal/req/__init__.py new file mode 100644 index 0000000..b270498 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/req/__init__.py @@ -0,0 +1,69 @@ +from __future__ import absolute_import + +import logging + +from .req_install import InstallRequirement +from .req_set import RequirementSet +from .req_file import parse_requirements +from pip._internal.utils.logging import indent_log + + +__all__ = [ + "RequirementSet", "InstallRequirement", + "parse_requirements", "install_given_reqs", +] + +logger = logging.getLogger(__name__) + + +def install_given_reqs(to_install, install_options, global_options=(), + *args, **kwargs): + """ + Install everything in the given list. + + (to be called after having downloaded and unpacked the packages) + """ + + if to_install: + logger.info( + 'Installing collected packages: %s', + ', '.join([req.name for req in to_install]), + ) + + with indent_log(): + for requirement in to_install: + if requirement.conflicts_with: + logger.info( + 'Found existing installation: %s', + requirement.conflicts_with, + ) + with indent_log(): + uninstalled_pathset = requirement.uninstall( + auto_confirm=True + ) + try: + requirement.install( + install_options, + global_options, + *args, + **kwargs + ) + except Exception: + should_rollback = ( + requirement.conflicts_with and + not requirement.install_succeeded + ) + # if install did not succeed, rollback previous uninstall + if should_rollback: + uninstalled_pathset.rollback() + raise + else: + should_commit = ( + requirement.conflicts_with and + requirement.install_succeeded + ) + if should_commit: + uninstalled_pathset.commit() + requirement.remove_temporary_source() + + return to_install diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/req/constructors.py b/Shared/lib/python3.4/site-packages/pip/_internal/req/constructors.py new file mode 100644 index 0000000..4c4641d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/req/constructors.py @@ -0,0 +1,298 @@ +"""Backing implementation for InstallRequirement's various constructors + +The idea here is that these formed a major chunk of InstallRequirement's size +so, moving them and support code dedicated to them outside of that class +helps creates for better understandability for the rest of the code. + +These are meant to be used elsewhere within pip to create instances of +InstallRequirement. +""" + +import logging +import os +import re +import traceback + +from pip._vendor.packaging.markers import Marker +from pip._vendor.packaging.requirements import InvalidRequirement, Requirement +from pip._vendor.packaging.specifiers import Specifier +from pip._vendor.pkg_resources import RequirementParseError, parse_requirements + +from pip._internal.download import ( + is_archive_file, is_url, path_to_url, url_to_path, +) +from pip._internal.exceptions import InstallationError +from pip._internal.models.index import PyPI, TestPyPI +from pip._internal.models.link import Link +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.misc import is_installable_dir +from pip._internal.vcs import vcs +from pip._internal.wheel import Wheel + +__all__ = [ + "install_req_from_editable", "install_req_from_line", + "parse_editable" +] + +logger = logging.getLogger(__name__) +operators = Specifier._operators.keys() + + +def _strip_extras(path): + m = re.match(r'^(.+)(\[[^\]]+\])$', path) + extras = None + if m: + path_no_extras = m.group(1) + extras = m.group(2) + else: + path_no_extras = path + + return path_no_extras, extras + + +def parse_editable(editable_req): + """Parses an editable requirement into: + - a requirement name + - an URL + - extras + - editable options + Accepted requirements: + svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir + .[some_extra] + """ + + url = editable_req + + # If a file path is specified with extras, strip off the extras. + url_no_extras, extras = _strip_extras(url) + + if os.path.isdir(url_no_extras): + if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): + raise InstallationError( + "Directory %r is not installable. File 'setup.py' not found." % + url_no_extras + ) + # Treating it as code that has already been checked out + url_no_extras = path_to_url(url_no_extras) + + if url_no_extras.lower().startswith('file:'): + package_name = Link(url_no_extras).egg_fragment + if extras: + return ( + package_name, + url_no_extras, + Requirement("placeholder" + extras.lower()).extras, + ) + else: + return package_name, url_no_extras, None + + for version_control in vcs: + if url.lower().startswith('%s:' % version_control): + url = '%s+%s' % (version_control, url) + break + + if '+' not in url: + raise InstallationError( + '%s should either be a path to a local project or a VCS url ' + 'beginning with svn+, git+, hg+, or bzr+' % + editable_req + ) + + vc_type = url.split('+', 1)[0].lower() + + if not vcs.get_backend(vc_type): + error_message = 'For --editable=%s only ' % editable_req + \ + ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ + ' is currently supported' + raise InstallationError(error_message) + + package_name = Link(url).egg_fragment + if not package_name: + raise InstallationError( + "Could not detect requirement name for '%s', please specify one " + "with #egg=your_package_name" % editable_req + ) + return package_name, url, None + + +def deduce_helpful_msg(req): + """Returns helpful msg in case requirements file does not exist, + or cannot be parsed. + + :params req: Requirements file path + """ + msg = "" + if os.path.exists(req): + msg = " It does exist." + # Try to parse and check if it is a requirements file. + try: + with open(req, 'r') as fp: + # parse first line only + next(parse_requirements(fp.read())) + msg += " The argument you provided " + \ + "(%s) appears to be a" % (req) + \ + " requirements file. If that is the" + \ + " case, use the '-r' flag to install" + \ + " the packages specified within it." + except RequirementParseError: + logger.debug("Cannot parse '%s' as requirements \ + file" % (req), exc_info=1) + else: + msg += " File '%s' does not exist." % (req) + return msg + + +# ---- The actual constructors follow ---- + + +def install_req_from_editable( + editable_req, comes_from=None, isolated=False, options=None, + wheel_cache=None, constraint=False +): + name, url, extras_override = parse_editable(editable_req) + if url.startswith('file:'): + source_dir = url_to_path(url) + else: + source_dir = None + + if name is not None: + try: + req = Requirement(name) + except InvalidRequirement: + raise InstallationError("Invalid requirement: '%s'" % name) + else: + req = None + return InstallRequirement( + req, comes_from, source_dir=source_dir, + editable=True, + link=Link(url), + constraint=constraint, + isolated=isolated, + options=options if options else {}, + wheel_cache=wheel_cache, + extras=extras_override or (), + ) + + +def install_req_from_line( + name, comes_from=None, isolated=False, options=None, wheel_cache=None, + constraint=False +): + """Creates an InstallRequirement from a name, which might be a + requirement, directory containing 'setup.py', filename, or URL. + """ + if is_url(name): + marker_sep = '; ' + else: + marker_sep = ';' + if marker_sep in name: + name, markers = name.split(marker_sep, 1) + markers = markers.strip() + if not markers: + markers = None + else: + markers = Marker(markers) + else: + markers = None + name = name.strip() + req = None + path = os.path.normpath(os.path.abspath(name)) + link = None + extras = None + + if is_url(name): + link = Link(name) + else: + p, extras = _strip_extras(path) + looks_like_dir = os.path.isdir(p) and ( + os.path.sep in name or + (os.path.altsep is not None and os.path.altsep in name) or + name.startswith('.') + ) + if looks_like_dir: + if not is_installable_dir(p): + raise InstallationError( + "Directory %r is not installable. Neither 'setup.py' " + "nor 'pyproject.toml' found." % name + ) + link = Link(path_to_url(p)) + elif is_archive_file(p): + if not os.path.isfile(p): + logger.warning( + 'Requirement %r looks like a filename, but the ' + 'file does not exist', + name + ) + link = Link(path_to_url(p)) + + # it's a local file, dir, or url + if link: + # Handle relative file URLs + if link.scheme == 'file' and re.search(r'\.\./', link.url): + link = Link( + path_to_url(os.path.normpath(os.path.abspath(link.path)))) + # wheel file + if link.is_wheel: + wheel = Wheel(link.filename) # can raise InvalidWheelFilename + req = "%s==%s" % (wheel.name, wheel.version) + else: + # set the req to the egg fragment. when it's not there, this + # will become an 'unnamed' requirement + req = link.egg_fragment + + # a requirement specifier + else: + req = name + + if extras: + extras = Requirement("placeholder" + extras.lower()).extras + else: + extras = () + if req is not None: + try: + req = Requirement(req) + except InvalidRequirement: + if os.path.sep in req: + add_msg = "It looks like a path." + add_msg += deduce_helpful_msg(req) + elif '=' in req and not any(op in req for op in operators): + add_msg = "= is not a valid operator. Did you mean == ?" + else: + add_msg = traceback.format_exc() + raise InstallationError( + "Invalid requirement: '%s'\n%s" % (req, add_msg) + ) + + return InstallRequirement( + req, comes_from, link=link, markers=markers, + isolated=isolated, + options=options if options else {}, + wheel_cache=wheel_cache, + constraint=constraint, + extras=extras, + ) + + +def install_req_from_req( + req, comes_from=None, isolated=False, wheel_cache=None +): + try: + req = Requirement(req) + except InvalidRequirement: + raise InstallationError("Invalid requirement: '%s'" % req) + + domains_not_allowed = [ + PyPI.file_storage_domain, + TestPyPI.file_storage_domain, + ] + if req.url and comes_from.link.netloc in domains_not_allowed: + # Explicitly disallow pypi packages that depend on external urls + raise InstallationError( + "Packages installed from PyPI cannot depend on packages " + "which are not also hosted on PyPI.\n" + "%s depends on %s " % (comes_from.name, req) + ) + + return InstallRequirement( + req, comes_from, isolated=isolated, wheel_cache=wheel_cache + ) diff --git a/Shared/lib/python3.4/site-packages/pip/req/req_file.py b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_file.py similarity index 82% rename from Shared/lib/python3.4/site-packages/pip/req/req_file.py rename to Shared/lib/python3.4/site-packages/pip/_internal/req/req_file.py index 2cfb479..e7acf7c 100644 --- a/Shared/lib/python3.4/site-packages/pip/req/req_file.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_file.py @@ -4,28 +4,33 @@ Requirements file parsing from __future__ import absolute_import +import optparse import os import re import shlex import sys -import optparse -import warnings -from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves import filterfalse +from pip._vendor.six.moves.urllib import parse as urllib_parse -import pip -from pip.download import get_file_content -from pip.req.req_install import InstallRequirement -from pip.exceptions import (RequirementsFileParseError) -from pip.utils.deprecation import RemovedInPip10Warning -from pip import cmdoptions +from pip._internal.cli import cmdoptions +from pip._internal.download import get_file_content +from pip._internal.exceptions import RequirementsFileParseError +from pip._internal.req.constructors import ( + install_req_from_editable, install_req_from_line, +) __all__ = ['parse_requirements'] SCHEME_RE = re.compile(r'^(http|https|file):', re.I) COMMENT_RE = re.compile(r'(^|\s)+#.*$') +# Matches environment variable-style values in '${MY_VARIABLE_1}' with the +# variable name consisting of only uppercase letters, digits or the '_' +# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1, +# 2013 Edition. +ENV_VAR_RE = re.compile(r'(?P\$\{(?P[A-Z0-9_]+)\})') + SUPPORTED_OPTIONS = [ cmdoptions.constraints, cmdoptions.editable, @@ -34,13 +39,6 @@ SUPPORTED_OPTIONS = [ cmdoptions.index_url, cmdoptions.find_links, cmdoptions.extra_index_url, - cmdoptions.allow_external, - cmdoptions.allow_all_external, - cmdoptions.no_allow_external, - cmdoptions.allow_unsafe, - cmdoptions.no_allow_unsafe, - cmdoptions.use_wheel, - cmdoptions.no_use_wheel, cmdoptions.always_unzip, cmdoptions.no_binary, cmdoptions.only_binary, @@ -104,6 +102,7 @@ def preprocess(content, options): lines_enum = join_lines(lines_enum) lines_enum = ignore_comments(lines_enum) lines_enum = skip_regex(lines_enum, options) + lines_enum = expand_env_variables(lines_enum) return lines_enum @@ -127,7 +126,7 @@ def process_line(line, filename, line_number, finder=None, comes_from=None, :param constraint: If True, parsing a constraints file. :param options: OptionParser options that we may update """ - parser = build_parser() + parser = build_parser(line) defaults = parser.get_default_values() defaults.index_url = None if finder: @@ -135,13 +134,14 @@ def process_line(line, filename, line_number, finder=None, comes_from=None, defaults.format_control = finder.format_control args_str, options_str = break_args_options(line) if sys.version_info < (2, 7, 3): - # Priori to 2.7.3, shlex can not deal with unicode entries + # Prior to 2.7.3, shlex cannot deal with unicode entries options_str = options_str.encode('utf8') opts, _ = parser.parse_args(shlex.split(options_str), defaults) # preserve for the nested code path line_comes_from = '%s %s (line %s)' % ( - '-c' if constraint else '-r', filename, line_number) + '-c' if constraint else '-r', filename, line_number, + ) # yield a line requirement if args_str: @@ -153,7 +153,7 @@ def process_line(line, filename, line_number, finder=None, comes_from=None, for dest in SUPPORTED_OPTIONS_REQ_DEST: if dest in opts.__dict__ and opts.__dict__[dest]: req_options[dest] = opts.__dict__[dest] - yield InstallRequirement.from_line( + yield install_req_from_line( args_str, line_comes_from, constraint=constraint, isolated=isolated, options=req_options, wheel_cache=wheel_cache ) @@ -161,11 +161,9 @@ def process_line(line, filename, line_number, finder=None, comes_from=None, # yield an editable requirement elif opts.editables: isolated = options.isolated_mode if options else False - default_vcs = options.default_vcs if options else None - yield InstallRequirement.from_editable( + yield install_req_from_editable( opts.editables[0], comes_from=line_comes_from, - constraint=constraint, default_vcs=default_vcs, isolated=isolated, - wheel_cache=wheel_cache + constraint=constraint, isolated=isolated, wheel_cache=wheel_cache ) # parse a nested requirements file @@ -198,35 +196,8 @@ def process_line(line, filename, line_number, finder=None, comes_from=None, # set finder options elif finder: - if opts.allow_external: - warnings.warn( - "--allow-external has been deprecated and will be removed in " - "the future. Due to changes in the repository protocol, it no " - "longer has any effect.", - RemovedInPip10Warning, - ) - - if opts.allow_all_external: - warnings.warn( - "--allow-all-external has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if opts.allow_unverified: - warnings.warn( - "--allow-unverified has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - if opts.index_url: finder.index_urls = [opts.index_url] - if opts.use_wheel is False: - finder.use_wheel = False - pip.index.fmt_ctl_no_use_wheel(finder.format_control) if opts.no_index is True: finder.index_urls = [] if opts.extra_index_urls: @@ -267,7 +238,7 @@ def break_args_options(line): return ' '.join(args), ' '.join(options) -def build_parser(): +def build_parser(line): """ Return a parser for parsing requirement lines """ @@ -281,6 +252,8 @@ def build_parser(): # By default optparse sys.exits on parsing errors. We want to wrap # that in our own exception. def parser_exit(self, msg): + # add offending line + msg = 'Invalid requirement: %s\n%s' % (line, msg) raise RequirementsFileParseError(msg) parser.exit = parser_exit @@ -336,7 +309,32 @@ def skip_regex(lines_enum, options): skip_regex = options.skip_requirements_regex if options else None if skip_regex: pattern = re.compile(skip_regex) - lines_enum = filterfalse( - lambda e: pattern.search(e[1]), - lines_enum) + lines_enum = filterfalse(lambda e: pattern.search(e[1]), lines_enum) return lines_enum + + +def expand_env_variables(lines_enum): + """Replace all environment variables that can be retrieved via `os.getenv`. + + The only allowed format for environment variables defined in the + requirement file is `${MY_VARIABLE_1}` to ensure two things: + + 1. Strings that contain a `$` aren't accidentally (partially) expanded. + 2. Ensure consistency across platforms for requirement files. + + These points are the result of a discusssion on the `github pull + request #3514 `_. + + Valid characters in variable names follow the `POSIX standard + `_ and are limited + to uppercase letter, digits and the `_` (underscore). + """ + for line_number, line in lines_enum: + for env_var, var_name in ENV_VAR_RE.findall(line): + value = os.getenv(var_name) + if not value: + continue + + line = line.replace(env_var, value) + + yield line_number, line diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/req/req_install.py b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_install.py new file mode 100644 index 0000000..c2624fe --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_install.py @@ -0,0 +1,860 @@ +from __future__ import absolute_import + +import logging +import os +import shutil +import sys +import sysconfig +import zipfile +from distutils.util import change_root + +from pip._vendor import pkg_resources, six +from pip._vendor.packaging.requirements import Requirement +from pip._vendor.packaging.utils import canonicalize_name +from pip._vendor.packaging.version import Version +from pip._vendor.packaging.version import parse as parse_version +from pip._vendor.pep517.wrappers import Pep517HookCaller + +from pip._internal import wheel +from pip._internal.build_env import NoOpBuildEnvironment +from pip._internal.exceptions import InstallationError +from pip._internal.locations import ( + PIP_DELETE_MARKER_FILENAME, running_under_virtualenv, +) +from pip._internal.models.link import Link +from pip._internal.pyproject import load_pyproject_toml +from pip._internal.req.req_uninstall import UninstallPathSet +from pip._internal.utils.compat import native_str +from pip._internal.utils.hashes import Hashes +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + _make_build_dir, ask_path_exists, backup_dir, call_subprocess, + display_path, dist_in_site_packages, dist_in_usersite, ensure_dir, + get_installed_version, rmtree, +) +from pip._internal.utils.packaging import get_metadata +from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.ui import open_spinner +from pip._internal.vcs import vcs +from pip._internal.wheel import move_wheel_files + +logger = logging.getLogger(__name__) + + +class InstallRequirement(object): + """ + Represents something that may be installed later on, may have information + about where to fetch the relavant requirement and also contains logic for + installing the said requirement. + """ + + def __init__(self, req, comes_from, source_dir=None, editable=False, + link=None, update=True, markers=None, + isolated=False, options=None, wheel_cache=None, + constraint=False, extras=()): + assert req is None or isinstance(req, Requirement), req + self.req = req + self.comes_from = comes_from + self.constraint = constraint + if source_dir is not None: + self.source_dir = os.path.normpath(os.path.abspath(source_dir)) + else: + self.source_dir = None + self.editable = editable + + self._wheel_cache = wheel_cache + if link is not None: + self.link = self.original_link = link + else: + self.link = self.original_link = req and req.url and Link(req.url) + + if extras: + self.extras = extras + elif req: + self.extras = { + pkg_resources.safe_extra(extra) for extra in req.extras + } + else: + self.extras = set() + if markers is not None: + self.markers = markers + else: + self.markers = req and req.marker + self._egg_info_path = None + # This holds the pkg_resources.Distribution object if this requirement + # is already available: + self.satisfied_by = None + # This hold the pkg_resources.Distribution object if this requirement + # conflicts with another installed distribution: + self.conflicts_with = None + # Temporary build location + self._temp_build_dir = TempDirectory(kind="req-build") + # Used to store the global directory where the _temp_build_dir should + # have been created. Cf _correct_build_location method. + self._ideal_build_dir = None + # True if the editable should be updated: + self.update = update + # Set to True after successful installation + self.install_succeeded = None + # UninstallPathSet of uninstalled distribution (for possible rollback) + self.uninstalled_pathset = None + self.options = options if options else {} + # Set to True after successful preparation of this requirement + self.prepared = False + self.is_direct = False + + self.isolated = isolated + self.build_env = NoOpBuildEnvironment() + + # The static build requirements (from pyproject.toml) + self.pyproject_requires = None + + # Build requirements that we will check are available + # TODO: We don't do this for --no-build-isolation. Should we? + self.requirements_to_check = [] + + # The PEP 517 backend we should use to build the project + self.pep517_backend = None + + # Are we using PEP 517 for this requirement? + # After pyproject.toml has been loaded, the only valid values are True + # and False. Before loading, None is valid (meaning "use the default"). + # Setting an explicit value before loading pyproject.toml is supported, + # but after loading this flag should be treated as read only. + self.use_pep517 = None + + def __str__(self): + if self.req: + s = str(self.req) + if self.link: + s += ' from %s' % self.link.url + elif self.link: + s = self.link.url + else: + s = '' + if self.satisfied_by is not None: + s += ' in %s' % display_path(self.satisfied_by.location) + if self.comes_from: + if isinstance(self.comes_from, six.string_types): + comes_from = self.comes_from + else: + comes_from = self.comes_from.from_path() + if comes_from: + s += ' (from %s)' % comes_from + return s + + def __repr__(self): + return '<%s object: %s editable=%r>' % ( + self.__class__.__name__, str(self), self.editable) + + def populate_link(self, finder, upgrade, require_hashes): + """Ensure that if a link can be found for this, that it is found. + + Note that self.link may still be None - if Upgrade is False and the + requirement is already installed. + + If require_hashes is True, don't use the wheel cache, because cached + wheels, always built locally, have different hashes than the files + downloaded from the index server and thus throw false hash mismatches. + Furthermore, cached wheels at present have undeterministic contents due + to file modification times. + """ + if self.link is None: + self.link = finder.find_requirement(self, upgrade) + if self._wheel_cache is not None and not require_hashes: + old_link = self.link + self.link = self._wheel_cache.get(self.link, self.name) + if old_link != self.link: + logger.debug('Using cached wheel link: %s', self.link) + + # Things that are valid for all kinds of requirements? + @property + def name(self): + if self.req is None: + return None + return native_str(pkg_resources.safe_name(self.req.name)) + + @property + def specifier(self): + return self.req.specifier + + @property + def is_pinned(self): + """Return whether I am pinned to an exact version. + + For example, some-package==1.2 is pinned; some-package>1.2 is not. + """ + specifiers = self.specifier + return (len(specifiers) == 1 and + next(iter(specifiers)).operator in {'==', '==='}) + + @property + def installed_version(self): + return get_installed_version(self.name) + + def match_markers(self, extras_requested=None): + if not extras_requested: + # Provide an extra to safely evaluate the markers + # without matching any extra + extras_requested = ('',) + if self.markers is not None: + return any( + self.markers.evaluate({'extra': extra}) + for extra in extras_requested) + else: + return True + + @property + def has_hash_options(self): + """Return whether any known-good hashes are specified as options. + + These activate --require-hashes mode; hashes specified as part of a + URL do not. + + """ + return bool(self.options.get('hashes', {})) + + def hashes(self, trust_internet=True): + """Return a hash-comparer that considers my option- and URL-based + hashes to be known-good. + + Hashes in URLs--ones embedded in the requirements file, not ones + downloaded from an index server--are almost peers with ones from + flags. They satisfy --require-hashes (whether it was implicitly or + explicitly activated) but do not activate it. md5 and sha224 are not + allowed in flags, which should nudge people toward good algos. We + always OR all hashes together, even ones from URLs. + + :param trust_internet: Whether to trust URL-based (#md5=...) hashes + downloaded from the internet, as by populate_link() + + """ + good_hashes = self.options.get('hashes', {}).copy() + link = self.link if trust_internet else self.original_link + if link and link.hash: + good_hashes.setdefault(link.hash_name, []).append(link.hash) + return Hashes(good_hashes) + + def from_path(self): + """Format a nice indicator to show where this "comes from" + """ + if self.req is None: + return None + s = str(self.req) + if self.comes_from: + if isinstance(self.comes_from, six.string_types): + comes_from = self.comes_from + else: + comes_from = self.comes_from.from_path() + if comes_from: + s += '->' + comes_from + return s + + def build_location(self, build_dir): + assert build_dir is not None + if self._temp_build_dir.path is not None: + return self._temp_build_dir.path + if self.req is None: + # for requirement via a path to a directory: the name of the + # package is not available yet so we create a temp directory + # Once run_egg_info will have run, we'll be able + # to fix it via _correct_build_location + # Some systems have /tmp as a symlink which confuses custom + # builds (such as numpy). Thus, we ensure that the real path + # is returned. + self._temp_build_dir.create() + self._ideal_build_dir = build_dir + + return self._temp_build_dir.path + if self.editable: + name = self.name.lower() + else: + name = self.name + # FIXME: Is there a better place to create the build_dir? (hg and bzr + # need this) + if not os.path.exists(build_dir): + logger.debug('Creating directory %s', build_dir) + _make_build_dir(build_dir) + return os.path.join(build_dir, name) + + def _correct_build_location(self): + """Move self._temp_build_dir to self._ideal_build_dir/self.req.name + + For some requirements (e.g. a path to a directory), the name of the + package is not available until we run egg_info, so the build_location + will return a temporary directory and store the _ideal_build_dir. + + This is only called by self.run_egg_info to fix the temporary build + directory. + """ + if self.source_dir is not None: + return + assert self.req is not None + assert self._temp_build_dir.path + assert self._ideal_build_dir.path + old_location = self._temp_build_dir.path + self._temp_build_dir.path = None + + new_location = self.build_location(self._ideal_build_dir) + if os.path.exists(new_location): + raise InstallationError( + 'A package already exists in %s; please remove it to continue' + % display_path(new_location)) + logger.debug( + 'Moving package %s from %s to new location %s', + self, display_path(old_location), display_path(new_location), + ) + shutil.move(old_location, new_location) + self._temp_build_dir.path = new_location + self._ideal_build_dir = None + self.source_dir = os.path.normpath(os.path.abspath(new_location)) + self._egg_info_path = None + + def remove_temporary_source(self): + """Remove the source files from this requirement, if they are marked + for deletion""" + if self.source_dir and os.path.exists( + os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)): + logger.debug('Removing source in %s', self.source_dir) + rmtree(self.source_dir) + self.source_dir = None + self._temp_build_dir.cleanup() + self.build_env.cleanup() + + def check_if_exists(self, use_user_site): + """Find an installed distribution that satisfies or conflicts + with this requirement, and set self.satisfied_by or + self.conflicts_with appropriately. + """ + if self.req is None: + return False + try: + # get_distribution() will resolve the entire list of requirements + # anyway, and we've already determined that we need the requirement + # in question, so strip the marker so that we don't try to + # evaluate it. + no_marker = Requirement(str(self.req)) + no_marker.marker = None + self.satisfied_by = pkg_resources.get_distribution(str(no_marker)) + if self.editable and self.satisfied_by: + self.conflicts_with = self.satisfied_by + # when installing editables, nothing pre-existing should ever + # satisfy + self.satisfied_by = None + return True + except pkg_resources.DistributionNotFound: + return False + except pkg_resources.VersionConflict: + existing_dist = pkg_resources.get_distribution( + self.req.name + ) + if use_user_site: + if dist_in_usersite(existing_dist): + self.conflicts_with = existing_dist + elif (running_under_virtualenv() and + dist_in_site_packages(existing_dist)): + raise InstallationError( + "Will not install to the user site because it will " + "lack sys.path precedence to %s in %s" % + (existing_dist.project_name, existing_dist.location) + ) + else: + self.conflicts_with = existing_dist + return True + + # Things valid for wheels + @property + def is_wheel(self): + return self.link and self.link.is_wheel + + def move_wheel_files(self, wheeldir, root=None, home=None, prefix=None, + warn_script_location=True, use_user_site=False, + pycompile=True): + move_wheel_files( + self.name, self.req, wheeldir, + user=use_user_site, + home=home, + root=root, + prefix=prefix, + pycompile=pycompile, + isolated=self.isolated, + warn_script_location=warn_script_location, + ) + + # Things valid for sdists + @property + def setup_py_dir(self): + return os.path.join( + self.source_dir, + self.link and self.link.subdirectory_fragment or '') + + @property + def setup_py(self): + assert self.source_dir, "No source dir for %s" % self + + setup_py = os.path.join(self.setup_py_dir, 'setup.py') + + # Python2 __file__ should not be unicode + if six.PY2 and isinstance(setup_py, six.text_type): + setup_py = setup_py.encode(sys.getfilesystemencoding()) + + return setup_py + + @property + def pyproject_toml(self): + assert self.source_dir, "No source dir for %s" % self + + pp_toml = os.path.join(self.setup_py_dir, 'pyproject.toml') + + # Python2 __file__ should not be unicode + if six.PY2 and isinstance(pp_toml, six.text_type): + pp_toml = pp_toml.encode(sys.getfilesystemencoding()) + + return pp_toml + + def load_pyproject_toml(self): + """Load the pyproject.toml file. + + After calling this routine, all of the attributes related to PEP 517 + processing for this requirement have been set. In particular, the + use_pep517 attribute can be used to determine whether we should + follow the PEP 517 or legacy (setup.py) code path. + """ + pep517_data = load_pyproject_toml( + self.use_pep517, + self.pyproject_toml, + self.setup_py, + str(self) + ) + + if pep517_data is None: + self.use_pep517 = False + else: + self.use_pep517 = True + requires, backend, check = pep517_data + self.requirements_to_check = check + self.pyproject_requires = requires + self.pep517_backend = Pep517HookCaller(self.setup_py_dir, backend) + + def run_egg_info(self): + assert self.source_dir + if self.name: + logger.debug( + 'Running setup.py (path:%s) egg_info for package %s', + self.setup_py, self.name, + ) + else: + logger.debug( + 'Running setup.py (path:%s) egg_info for package from %s', + self.setup_py, self.link, + ) + + with indent_log(): + script = SETUPTOOLS_SHIM % self.setup_py + base_cmd = [sys.executable, '-c', script] + if self.isolated: + base_cmd += ["--no-user-cfg"] + egg_info_cmd = base_cmd + ['egg_info'] + # We can't put the .egg-info files at the root, because then the + # source code will be mistaken for an installed egg, causing + # problems + if self.editable: + egg_base_option = [] + else: + egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info') + ensure_dir(egg_info_dir) + egg_base_option = ['--egg-base', 'pip-egg-info'] + with self.build_env: + call_subprocess( + egg_info_cmd + egg_base_option, + cwd=self.setup_py_dir, + show_stdout=False, + command_desc='python setup.py egg_info') + + if not self.req: + if isinstance(parse_version(self.metadata["Version"]), Version): + op = "==" + else: + op = "===" + self.req = Requirement( + "".join([ + self.metadata["Name"], + op, + self.metadata["Version"], + ]) + ) + self._correct_build_location() + else: + metadata_name = canonicalize_name(self.metadata["Name"]) + if canonicalize_name(self.req.name) != metadata_name: + logger.warning( + 'Running setup.py (path:%s) egg_info for package %s ' + 'produced metadata for project name %s. Fix your ' + '#egg=%s fragments.', + self.setup_py, self.name, metadata_name, self.name + ) + self.req = Requirement(metadata_name) + + @property + def egg_info_path(self): + if self._egg_info_path is None: + if self.editable: + base = self.source_dir + else: + base = os.path.join(self.setup_py_dir, 'pip-egg-info') + filenames = os.listdir(base) + if self.editable: + filenames = [] + for root, dirs, files in os.walk(base): + for dir in vcs.dirnames: + if dir in dirs: + dirs.remove(dir) + # Iterate over a copy of ``dirs``, since mutating + # a list while iterating over it can cause trouble. + # (See https://github.com/pypa/pip/pull/462.) + for dir in list(dirs): + # Don't search in anything that looks like a virtualenv + # environment + if ( + os.path.lexists( + os.path.join(root, dir, 'bin', 'python') + ) or + os.path.exists( + os.path.join( + root, dir, 'Scripts', 'Python.exe' + ) + )): + dirs.remove(dir) + # Also don't search through tests + elif dir == 'test' or dir == 'tests': + dirs.remove(dir) + filenames.extend([os.path.join(root, dir) + for dir in dirs]) + filenames = [f for f in filenames if f.endswith('.egg-info')] + + if not filenames: + raise InstallationError( + "Files/directories not found in %s" % base + ) + # if we have more than one match, we pick the toplevel one. This + # can easily be the case if there is a dist folder which contains + # an extracted tarball for testing purposes. + if len(filenames) > 1: + filenames.sort( + key=lambda x: x.count(os.path.sep) + + (os.path.altsep and x.count(os.path.altsep) or 0) + ) + self._egg_info_path = os.path.join(base, filenames[0]) + return self._egg_info_path + + @property + def metadata(self): + if not hasattr(self, '_metadata'): + self._metadata = get_metadata(self.get_dist()) + + return self._metadata + + def get_dist(self): + """Return a pkg_resources.Distribution built from self.egg_info_path""" + egg_info = self.egg_info_path.rstrip(os.path.sep) + base_dir = os.path.dirname(egg_info) + metadata = pkg_resources.PathMetadata(base_dir, egg_info) + dist_name = os.path.splitext(os.path.basename(egg_info))[0] + return pkg_resources.Distribution( + os.path.dirname(egg_info), + project_name=dist_name, + metadata=metadata, + ) + + def assert_source_matches_version(self): + assert self.source_dir + version = self.metadata['version'] + if self.req.specifier and version not in self.req.specifier: + logger.warning( + 'Requested %s, but installing version %s', + self, + version, + ) + else: + logger.debug( + 'Source in %s has version %s, which satisfies requirement %s', + display_path(self.source_dir), + version, + self, + ) + + # For both source distributions and editables + def ensure_has_source_dir(self, parent_dir): + """Ensure that a source_dir is set. + + This will create a temporary build dir if the name of the requirement + isn't known yet. + + :param parent_dir: The ideal pip parent_dir for the source_dir. + Generally src_dir for editables and build_dir for sdists. + :return: self.source_dir + """ + if self.source_dir is None: + self.source_dir = self.build_location(parent_dir) + return self.source_dir + + # For editable installations + def install_editable(self, install_options, + global_options=(), prefix=None): + logger.info('Running setup.py develop for %s', self.name) + + if self.isolated: + global_options = list(global_options) + ["--no-user-cfg"] + + if prefix: + prefix_param = ['--prefix={}'.format(prefix)] + install_options = list(install_options) + prefix_param + + with indent_log(): + # FIXME: should we do --install-headers here too? + with self.build_env: + call_subprocess( + [ + sys.executable, + '-c', + SETUPTOOLS_SHIM % self.setup_py + ] + + list(global_options) + + ['develop', '--no-deps'] + + list(install_options), + + cwd=self.setup_py_dir, + show_stdout=False, + ) + + self.install_succeeded = True + + def update_editable(self, obtain=True): + if not self.link: + logger.debug( + "Cannot update repository at %s; repository location is " + "unknown", + self.source_dir, + ) + return + assert self.editable + assert self.source_dir + if self.link.scheme == 'file': + # Static paths don't get updated + return + assert '+' in self.link.url, "bad url: %r" % self.link.url + if not self.update: + return + vc_type, url = self.link.url.split('+', 1) + backend = vcs.get_backend(vc_type) + if backend: + vcs_backend = backend(self.link.url) + if obtain: + vcs_backend.obtain(self.source_dir) + else: + vcs_backend.export(self.source_dir) + else: + assert 0, ( + 'Unexpected version control type (in %s): %s' + % (self.link, vc_type)) + + # Top-level Actions + def uninstall(self, auto_confirm=False, verbose=False, + use_user_site=False): + """ + Uninstall the distribution currently satisfying this requirement. + + Prompts before removing or modifying files unless + ``auto_confirm`` is True. + + Refuses to delete or modify files outside of ``sys.prefix`` - + thus uninstallation within a virtual environment can only + modify that virtual environment, even if the virtualenv is + linked to global site-packages. + + """ + if not self.check_if_exists(use_user_site): + logger.warning("Skipping %s as it is not installed.", self.name) + return + dist = self.satisfied_by or self.conflicts_with + + uninstalled_pathset = UninstallPathSet.from_dist(dist) + uninstalled_pathset.remove(auto_confirm, verbose) + return uninstalled_pathset + + def _clean_zip_name(self, name, prefix): # only used by archive. + assert name.startswith(prefix + os.path.sep), ( + "name %r doesn't start with prefix %r" % (name, prefix) + ) + name = name[len(prefix) + 1:] + name = name.replace(os.path.sep, '/') + return name + + # TODO: Investigate if this should be kept in InstallRequirement + # Seems to be used only when VCS + downloads + def archive(self, build_dir): + assert self.source_dir + create_archive = True + archive_name = '%s-%s.zip' % (self.name, self.metadata["version"]) + archive_path = os.path.join(build_dir, archive_name) + if os.path.exists(archive_path): + response = ask_path_exists( + 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' % + display_path(archive_path), ('i', 'w', 'b', 'a')) + if response == 'i': + create_archive = False + elif response == 'w': + logger.warning('Deleting %s', display_path(archive_path)) + os.remove(archive_path) + elif response == 'b': + dest_file = backup_dir(archive_path) + logger.warning( + 'Backing up %s to %s', + display_path(archive_path), + display_path(dest_file), + ) + shutil.move(archive_path, dest_file) + elif response == 'a': + sys.exit(-1) + if create_archive: + zip = zipfile.ZipFile( + archive_path, 'w', zipfile.ZIP_DEFLATED, + allowZip64=True + ) + dir = os.path.normcase(os.path.abspath(self.setup_py_dir)) + for dirpath, dirnames, filenames in os.walk(dir): + if 'pip-egg-info' in dirnames: + dirnames.remove('pip-egg-info') + for dirname in dirnames: + dirname = os.path.join(dirpath, dirname) + name = self._clean_zip_name(dirname, dir) + zipdir = zipfile.ZipInfo(self.name + '/' + name + '/') + zipdir.external_attr = 0x1ED << 16 # 0o755 + zip.writestr(zipdir, '') + for filename in filenames: + if filename == PIP_DELETE_MARKER_FILENAME: + continue + filename = os.path.join(dirpath, filename) + name = self._clean_zip_name(filename, dir) + zip.write(filename, self.name + '/' + name) + zip.close() + logger.info('Saved %s', display_path(archive_path)) + + def install(self, install_options, global_options=None, root=None, + home=None, prefix=None, warn_script_location=True, + use_user_site=False, pycompile=True): + global_options = global_options if global_options is not None else [] + if self.editable: + self.install_editable( + install_options, global_options, prefix=prefix, + ) + return + if self.is_wheel: + version = wheel.wheel_version(self.source_dir) + wheel.check_compatibility(version, self.name) + + self.move_wheel_files( + self.source_dir, root=root, prefix=prefix, home=home, + warn_script_location=warn_script_location, + use_user_site=use_user_site, pycompile=pycompile, + ) + self.install_succeeded = True + return + + # Extend the list of global and install options passed on to + # the setup.py call with the ones from the requirements file. + # Options specified in requirements file override those + # specified on the command line, since the last option given + # to setup.py is the one that is used. + global_options = list(global_options) + \ + self.options.get('global_options', []) + install_options = list(install_options) + \ + self.options.get('install_options', []) + + if self.isolated: + global_options = global_options + ["--no-user-cfg"] + + with TempDirectory(kind="record") as temp_dir: + record_filename = os.path.join(temp_dir.path, 'install-record.txt') + install_args = self.get_install_args( + global_options, record_filename, root, prefix, pycompile, + ) + msg = 'Running setup.py install for %s' % (self.name,) + with open_spinner(msg) as spinner: + with indent_log(): + with self.build_env: + call_subprocess( + install_args + install_options, + cwd=self.setup_py_dir, + show_stdout=False, + spinner=spinner, + ) + + if not os.path.exists(record_filename): + logger.debug('Record file %s not found', record_filename) + return + self.install_succeeded = True + + def prepend_root(path): + if root is None or not os.path.isabs(path): + return path + else: + return change_root(root, path) + + with open(record_filename) as f: + for line in f: + directory = os.path.dirname(line) + if directory.endswith('.egg-info'): + egg_info_dir = prepend_root(directory) + break + else: + logger.warning( + 'Could not find .egg-info directory in install record' + ' for %s', + self, + ) + # FIXME: put the record somewhere + # FIXME: should this be an error? + return + new_lines = [] + with open(record_filename) as f: + for line in f: + filename = line.strip() + if os.path.isdir(filename): + filename += os.path.sep + new_lines.append( + os.path.relpath(prepend_root(filename), egg_info_dir) + ) + new_lines.sort() + ensure_dir(egg_info_dir) + inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt') + with open(inst_files_path, 'w') as f: + f.write('\n'.join(new_lines) + '\n') + + def get_install_args(self, global_options, record_filename, root, prefix, + pycompile): + install_args = [sys.executable, "-u"] + install_args.append('-c') + install_args.append(SETUPTOOLS_SHIM % self.setup_py) + install_args += list(global_options) + \ + ['install', '--record', record_filename] + install_args += ['--single-version-externally-managed'] + + if root is not None: + install_args += ['--root', root] + if prefix is not None: + install_args += ['--prefix', prefix] + + if pycompile: + install_args += ["--compile"] + else: + install_args += ["--no-compile"] + + if running_under_virtualenv(): + py_ver_str = 'python' + sysconfig.get_python_version() + install_args += ['--install-headers', + os.path.join(sys.prefix, 'include', 'site', + py_ver_str, self.name)] + + return install_args diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/req/req_set.py b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_set.py new file mode 100644 index 0000000..b198317 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_set.py @@ -0,0 +1,181 @@ +from __future__ import absolute_import + +import logging +from collections import OrderedDict + +from pip._internal.exceptions import InstallationError +from pip._internal.utils.logging import indent_log +from pip._internal.wheel import Wheel + +logger = logging.getLogger(__name__) + + +class RequirementSet(object): + + def __init__(self, require_hashes=False, check_supported_wheels=True): + """Create a RequirementSet. + """ + + self.requirements = OrderedDict() + self.require_hashes = require_hashes + self.check_supported_wheels = check_supported_wheels + + # Mapping of alias: real_name + self.requirement_aliases = {} + self.unnamed_requirements = [] + self.successfully_downloaded = [] + self.reqs_to_cleanup = [] + + def __str__(self): + reqs = [req for req in self.requirements.values() + if not req.comes_from] + reqs.sort(key=lambda req: req.name.lower()) + return ' '.join([str(req.req) for req in reqs]) + + def __repr__(self): + reqs = [req for req in self.requirements.values()] + reqs.sort(key=lambda req: req.name.lower()) + reqs_str = ', '.join([str(req.req) for req in reqs]) + return ('<%s object; %d requirement(s): %s>' + % (self.__class__.__name__, len(reqs), reqs_str)) + + def add_requirement(self, install_req, parent_req_name=None, + extras_requested=None): + """Add install_req as a requirement to install. + + :param parent_req_name: The name of the requirement that needed this + added. The name is used because when multiple unnamed requirements + resolve to the same name, we could otherwise end up with dependency + links that point outside the Requirements set. parent_req must + already be added. Note that None implies that this is a user + supplied requirement, vs an inferred one. + :param extras_requested: an iterable of extras used to evaluate the + environment markers. + :return: Additional requirements to scan. That is either [] if + the requirement is not applicable, or [install_req] if the + requirement is applicable and has just been added. + """ + name = install_req.name + + # If the markers do not match, ignore this requirement. + if not install_req.match_markers(extras_requested): + logger.info( + "Ignoring %s: markers '%s' don't match your environment", + name, install_req.markers, + ) + return [], None + + # If the wheel is not supported, raise an error. + # Should check this after filtering out based on environment markers to + # allow specifying different wheels based on the environment/OS, in a + # single requirements file. + if install_req.link and install_req.link.is_wheel: + wheel = Wheel(install_req.link.filename) + if self.check_supported_wheels and not wheel.supported(): + raise InstallationError( + "%s is not a supported wheel on this platform." % + wheel.filename + ) + + # This next bit is really a sanity check. + assert install_req.is_direct == (parent_req_name is None), ( + "a direct req shouldn't have a parent and also, " + "a non direct req should have a parent" + ) + + # Unnamed requirements are scanned again and the requirement won't be + # added as a dependency until after scanning. + if not name: + # url or path requirement w/o an egg fragment + self.unnamed_requirements.append(install_req) + return [install_req], None + + try: + existing_req = self.get_requirement(name) + except KeyError: + existing_req = None + + has_conflicting_requirement = ( + parent_req_name is None and + existing_req and + not existing_req.constraint and + existing_req.extras == install_req.extras and + existing_req.req.specifier != install_req.req.specifier + ) + if has_conflicting_requirement: + raise InstallationError( + "Double requirement given: %s (already in %s, name=%r)" + % (install_req, existing_req, name) + ) + + # When no existing requirement exists, add the requirement as a + # dependency and it will be scanned again after. + if not existing_req: + self.requirements[name] = install_req + # FIXME: what about other normalizations? E.g., _ vs. -? + if name.lower() != name: + self.requirement_aliases[name.lower()] = name + # We'd want to rescan this requirements later + return [install_req], install_req + + # Assume there's no need to scan, and that we've already + # encountered this for scanning. + if install_req.constraint or not existing_req.constraint: + return [], existing_req + + does_not_satisfy_constraint = ( + install_req.link and + not ( + existing_req.link and + install_req.link.path == existing_req.link.path + ) + ) + if does_not_satisfy_constraint: + self.reqs_to_cleanup.append(install_req) + raise InstallationError( + "Could not satisfy constraints for '%s': " + "installation from path or url cannot be " + "constrained to a version" % name, + ) + # If we're now installing a constraint, mark the existing + # object for real installation. + existing_req.constraint = False + existing_req.extras = tuple(sorted( + set(existing_req.extras) | set(install_req.extras) + )) + logger.debug( + "Setting %s extras to: %s", + existing_req, existing_req.extras, + ) + # Return the existing requirement for addition to the parent and + # scanning again. + return [existing_req], existing_req + + def has_requirement(self, project_name): + name = project_name.lower() + if (name in self.requirements and + not self.requirements[name].constraint or + name in self.requirement_aliases and + not self.requirements[self.requirement_aliases[name]].constraint): + return True + return False + + @property + def has_requirements(self): + return list(req for req in self.requirements.values() if not + req.constraint) or self.unnamed_requirements + + def get_requirement(self, project_name): + for name in project_name, project_name.lower(): + if name in self.requirements: + return self.requirements[name] + if name in self.requirement_aliases: + return self.requirements[self.requirement_aliases[name]] + raise KeyError("No project with the name %r" % project_name) + + def cleanup_files(self): + """Clean up files, remove builds.""" + logger.debug('Cleaning up...') + with indent_log(): + for req in self.reqs_to_cleanup: + req.remove_temporary_source() diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/req/req_tracker.py b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_tracker.py new file mode 100644 index 0000000..0a86f4c --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_tracker.py @@ -0,0 +1,76 @@ +from __future__ import absolute_import + +import contextlib +import errno +import hashlib +import logging +import os + +from pip._internal.utils.temp_dir import TempDirectory + +logger = logging.getLogger(__name__) + + +class RequirementTracker(object): + + def __init__(self): + self._root = os.environ.get('PIP_REQ_TRACKER') + if self._root is None: + self._temp_dir = TempDirectory(delete=False, kind='req-tracker') + self._temp_dir.create() + self._root = os.environ['PIP_REQ_TRACKER'] = self._temp_dir.path + logger.debug('Created requirements tracker %r', self._root) + else: + self._temp_dir = None + logger.debug('Re-using requirements tracker %r', self._root) + self._entries = set() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.cleanup() + + def _entry_path(self, link): + hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest() + return os.path.join(self._root, hashed) + + def add(self, req): + link = req.link + info = str(req) + entry_path = self._entry_path(link) + try: + with open(entry_path) as fp: + # Error, these's already a build in progress. + raise LookupError('%s is already being built: %s' + % (link, fp.read())) + except IOError as e: + if e.errno != errno.ENOENT: + raise + assert req not in self._entries + with open(entry_path, 'w') as fp: + fp.write(info) + self._entries.add(req) + logger.debug('Added %s to build tracker %r', req, self._root) + + def remove(self, req): + link = req.link + self._entries.remove(req) + os.unlink(self._entry_path(link)) + logger.debug('Removed %s from build tracker %r', req, self._root) + + def cleanup(self): + for req in set(self._entries): + self.remove(req) + remove = self._temp_dir is not None + if remove: + self._temp_dir.cleanup() + logger.debug('%s build tracker %r', + 'Removed' if remove else 'Cleaned', + self._root) + + @contextlib.contextmanager + def track(self, req): + self.add(req) + yield + self.remove(req) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/req/req_uninstall.py b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_uninstall.py new file mode 100644 index 0000000..a7d8230 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/req/req_uninstall.py @@ -0,0 +1,460 @@ +from __future__ import absolute_import + +import csv +import functools +import logging +import os +import sys +import sysconfig + +from pip._vendor import pkg_resources + +from pip._internal.exceptions import UninstallationError +from pip._internal.locations import bin_py, bin_user +from pip._internal.utils.compat import WINDOWS, cache_from_source, uses_pycache +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + FakeFile, ask, dist_in_usersite, dist_is_local, egg_link_path, is_local, + normalize_path, renames, +) +from pip._internal.utils.temp_dir import TempDirectory + +logger = logging.getLogger(__name__) + + +def _script_names(dist, script_name, is_gui): + """Create the fully qualified name of the files created by + {console,gui}_scripts for the given ``dist``. + Returns the list of file names + """ + if dist_in_usersite(dist): + bin_dir = bin_user + else: + bin_dir = bin_py + exe_name = os.path.join(bin_dir, script_name) + paths_to_remove = [exe_name] + if WINDOWS: + paths_to_remove.append(exe_name + '.exe') + paths_to_remove.append(exe_name + '.exe.manifest') + if is_gui: + paths_to_remove.append(exe_name + '-script.pyw') + else: + paths_to_remove.append(exe_name + '-script.py') + return paths_to_remove + + +def _unique(fn): + @functools.wraps(fn) + def unique(*args, **kw): + seen = set() + for item in fn(*args, **kw): + if item not in seen: + seen.add(item) + yield item + return unique + + +@_unique +def uninstallation_paths(dist): + """ + Yield all the uninstallation paths for dist based on RECORD-without-.py[co] + + Yield paths to all the files in RECORD. For each .py file in RECORD, add + the .pyc and .pyo in the same directory. + + UninstallPathSet.add() takes care of the __pycache__ .py[co]. + """ + r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD'))) + for row in r: + path = os.path.join(dist.location, row[0]) + yield path + if path.endswith('.py'): + dn, fn = os.path.split(path) + base = fn[:-3] + path = os.path.join(dn, base + '.pyc') + yield path + path = os.path.join(dn, base + '.pyo') + yield path + + +def compact(paths): + """Compact a path set to contain the minimal number of paths + necessary to contain all paths in the set. If /a/path/ and + /a/path/to/a/file.txt are both in the set, leave only the + shorter path.""" + + sep = os.path.sep + short_paths = set() + for path in sorted(paths, key=len): + should_add = any( + path.startswith(shortpath.rstrip("*")) and + path[len(shortpath.rstrip("*").rstrip(sep))] == sep + for shortpath in short_paths + ) + if not should_add: + short_paths.add(path) + return short_paths + + +def compress_for_output_listing(paths): + """Returns a tuple of 2 sets of which paths to display to user + + The first set contains paths that would be deleted. Files of a package + are not added and the top-level directory of the package has a '*' added + at the end - to signify that all it's contents are removed. + + The second set contains files that would have been skipped in the above + folders. + """ + + will_remove = list(paths) + will_skip = set() + + # Determine folders and files + folders = set() + files = set() + for path in will_remove: + if path.endswith(".pyc"): + continue + if path.endswith("__init__.py") or ".dist-info" in path: + folders.add(os.path.dirname(path)) + files.add(path) + + _normcased_files = set(map(os.path.normcase, files)) + + folders = compact(folders) + + # This walks the tree using os.walk to not miss extra folders + # that might get added. + for folder in folders: + for dirpath, _, dirfiles in os.walk(folder): + for fname in dirfiles: + if fname.endswith(".pyc"): + continue + + file_ = os.path.join(dirpath, fname) + if (os.path.isfile(file_) and + os.path.normcase(file_) not in _normcased_files): + # We are skipping this file. Add it to the set. + will_skip.add(file_) + + will_remove = files | { + os.path.join(folder, "*") for folder in folders + } + + return will_remove, will_skip + + +class UninstallPathSet(object): + """A set of file paths to be removed in the uninstallation of a + requirement.""" + def __init__(self, dist): + self.paths = set() + self._refuse = set() + self.pth = {} + self.dist = dist + self.save_dir = TempDirectory(kind="uninstall") + self._moved_paths = [] + + def _permitted(self, path): + """ + Return True if the given path is one we are permitted to + remove/modify, False otherwise. + + """ + return is_local(path) + + def add(self, path): + head, tail = os.path.split(path) + + # we normalize the head to resolve parent directory symlinks, but not + # the tail, since we only want to uninstall symlinks, not their targets + path = os.path.join(normalize_path(head), os.path.normcase(tail)) + + if not os.path.exists(path): + return + if self._permitted(path): + self.paths.add(path) + else: + self._refuse.add(path) + + # __pycache__ files can show up after 'installed-files.txt' is created, + # due to imports + if os.path.splitext(path)[1] == '.py' and uses_pycache: + self.add(cache_from_source(path)) + + def add_pth(self, pth_file, entry): + pth_file = normalize_path(pth_file) + if self._permitted(pth_file): + if pth_file not in self.pth: + self.pth[pth_file] = UninstallPthEntries(pth_file) + self.pth[pth_file].add(entry) + else: + self._refuse.add(pth_file) + + def _stash(self, path): + return os.path.join( + self.save_dir.path, os.path.splitdrive(path)[1].lstrip(os.path.sep) + ) + + def remove(self, auto_confirm=False, verbose=False): + """Remove paths in ``self.paths`` with confirmation (unless + ``auto_confirm`` is True).""" + + if not self.paths: + logger.info( + "Can't uninstall '%s'. No files were found to uninstall.", + self.dist.project_name, + ) + return + + dist_name_version = ( + self.dist.project_name + "-" + self.dist.version + ) + logger.info('Uninstalling %s:', dist_name_version) + + with indent_log(): + if auto_confirm or self._allowed_to_proceed(verbose): + self.save_dir.create() + + for path in sorted(compact(self.paths)): + new_path = self._stash(path) + logger.debug('Removing file or directory %s', path) + self._moved_paths.append(path) + renames(path, new_path) + for pth in self.pth.values(): + pth.remove() + + logger.info('Successfully uninstalled %s', dist_name_version) + + def _allowed_to_proceed(self, verbose): + """Display which files would be deleted and prompt for confirmation + """ + + def _display(msg, paths): + if not paths: + return + + logger.info(msg) + with indent_log(): + for path in sorted(compact(paths)): + logger.info(path) + + if not verbose: + will_remove, will_skip = compress_for_output_listing(self.paths) + else: + # In verbose mode, display all the files that are going to be + # deleted. + will_remove = list(self.paths) + will_skip = set() + + _display('Would remove:', will_remove) + _display('Would not remove (might be manually added):', will_skip) + _display('Would not remove (outside of prefix):', self._refuse) + + return ask('Proceed (y/n)? ', ('y', 'n')) == 'y' + + def rollback(self): + """Rollback the changes previously made by remove().""" + if self.save_dir.path is None: + logger.error( + "Can't roll back %s; was not uninstalled", + self.dist.project_name, + ) + return False + logger.info('Rolling back uninstall of %s', self.dist.project_name) + for path in self._moved_paths: + tmp_path = self._stash(path) + logger.debug('Replacing %s', path) + renames(tmp_path, path) + for pth in self.pth.values(): + pth.rollback() + + def commit(self): + """Remove temporary save dir: rollback will no longer be possible.""" + self.save_dir.cleanup() + self._moved_paths = [] + + @classmethod + def from_dist(cls, dist): + dist_path = normalize_path(dist.location) + if not dist_is_local(dist): + logger.info( + "Not uninstalling %s at %s, outside environment %s", + dist.key, + dist_path, + sys.prefix, + ) + return cls(dist) + + if dist_path in {p for p in {sysconfig.get_path("stdlib"), + sysconfig.get_path("platstdlib")} + if p}: + logger.info( + "Not uninstalling %s at %s, as it is in the standard library.", + dist.key, + dist_path, + ) + return cls(dist) + + paths_to_remove = cls(dist) + develop_egg_link = egg_link_path(dist) + develop_egg_link_egg_info = '{}.egg-info'.format( + pkg_resources.to_filename(dist.project_name)) + egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info) + # Special case for distutils installed package + distutils_egg_info = getattr(dist._provider, 'path', None) + + # Uninstall cases order do matter as in the case of 2 installs of the + # same package, pip needs to uninstall the currently detected version + if (egg_info_exists and dist.egg_info.endswith('.egg-info') and + not dist.egg_info.endswith(develop_egg_link_egg_info)): + # if dist.egg_info.endswith(develop_egg_link_egg_info), we + # are in fact in the develop_egg_link case + paths_to_remove.add(dist.egg_info) + if dist.has_metadata('installed-files.txt'): + for installed_file in dist.get_metadata( + 'installed-files.txt').splitlines(): + path = os.path.normpath( + os.path.join(dist.egg_info, installed_file) + ) + paths_to_remove.add(path) + # FIXME: need a test for this elif block + # occurs with --single-version-externally-managed/--record outside + # of pip + elif dist.has_metadata('top_level.txt'): + if dist.has_metadata('namespace_packages.txt'): + namespaces = dist.get_metadata('namespace_packages.txt') + else: + namespaces = [] + for top_level_pkg in [ + p for p + in dist.get_metadata('top_level.txt').splitlines() + if p and p not in namespaces]: + path = os.path.join(dist.location, top_level_pkg) + paths_to_remove.add(path) + paths_to_remove.add(path + '.py') + paths_to_remove.add(path + '.pyc') + paths_to_remove.add(path + '.pyo') + + elif distutils_egg_info: + raise UninstallationError( + "Cannot uninstall {!r}. It is a distutils installed project " + "and thus we cannot accurately determine which files belong " + "to it which would lead to only a partial uninstall.".format( + dist.project_name, + ) + ) + + elif dist.location.endswith('.egg'): + # package installed by easy_install + # We cannot match on dist.egg_name because it can slightly vary + # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg + paths_to_remove.add(dist.location) + easy_install_egg = os.path.split(dist.location)[1] + easy_install_pth = os.path.join(os.path.dirname(dist.location), + 'easy-install.pth') + paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) + + elif egg_info_exists and dist.egg_info.endswith('.dist-info'): + for path in uninstallation_paths(dist): + paths_to_remove.add(path) + + elif develop_egg_link: + # develop egg + with open(develop_egg_link, 'r') as fh: + link_pointer = os.path.normcase(fh.readline().strip()) + assert (link_pointer == dist.location), ( + 'Egg-link %s does not match installed location of %s ' + '(at %s)' % (link_pointer, dist.project_name, dist.location) + ) + paths_to_remove.add(develop_egg_link) + easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), + 'easy-install.pth') + paths_to_remove.add_pth(easy_install_pth, dist.location) + + else: + logger.debug( + 'Not sure how to uninstall: %s - Check: %s', + dist, dist.location, + ) + + # find distutils scripts= scripts + if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): + for script in dist.metadata_listdir('scripts'): + if dist_in_usersite(dist): + bin_dir = bin_user + else: + bin_dir = bin_py + paths_to_remove.add(os.path.join(bin_dir, script)) + if WINDOWS: + paths_to_remove.add(os.path.join(bin_dir, script) + '.bat') + + # find console_scripts + _scripts_to_remove = [] + console_scripts = dist.get_entry_map(group='console_scripts') + for name in console_scripts.keys(): + _scripts_to_remove.extend(_script_names(dist, name, False)) + # find gui_scripts + gui_scripts = dist.get_entry_map(group='gui_scripts') + for name in gui_scripts.keys(): + _scripts_to_remove.extend(_script_names(dist, name, True)) + + for s in _scripts_to_remove: + paths_to_remove.add(s) + + return paths_to_remove + + +class UninstallPthEntries(object): + def __init__(self, pth_file): + if not os.path.isfile(pth_file): + raise UninstallationError( + "Cannot remove entries from nonexistent file %s" % pth_file + ) + self.file = pth_file + self.entries = set() + self._saved_lines = None + + def add(self, entry): + entry = os.path.normcase(entry) + # On Windows, os.path.normcase converts the entry to use + # backslashes. This is correct for entries that describe absolute + # paths outside of site-packages, but all the others use forward + # slashes. + if WINDOWS and not os.path.splitdrive(entry)[0]: + entry = entry.replace('\\', '/') + self.entries.add(entry) + + def remove(self): + logger.debug('Removing pth entries from %s:', self.file) + with open(self.file, 'rb') as fh: + # windows uses '\r\n' with py3k, but uses '\n' with py2.x + lines = fh.readlines() + self._saved_lines = lines + if any(b'\r\n' in line for line in lines): + endline = '\r\n' + else: + endline = '\n' + # handle missing trailing newline + if lines and not lines[-1].endswith(endline.encode("utf-8")): + lines[-1] = lines[-1] + endline.encode("utf-8") + for entry in self.entries: + try: + logger.debug('Removing entry: %s', entry) + lines.remove((entry + endline).encode("utf-8")) + except ValueError: + pass + with open(self.file, 'wb') as fh: + fh.writelines(lines) + + def rollback(self): + if self._saved_lines is None: + logger.error( + 'Cannot roll back changes to %s, none were made', self.file + ) + return False + logger.debug('Rolling %s back to previous state', self.file) + with open(self.file, 'wb') as fh: + fh.writelines(self._saved_lines) + return True diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/resolve.py b/Shared/lib/python3.4/site-packages/pip/_internal/resolve.py new file mode 100644 index 0000000..2d9f1c5 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/resolve.py @@ -0,0 +1,353 @@ +"""Dependency Resolution + +The dependency resolution in pip is performed as follows: + +for top-level requirements: + a. only one spec allowed per project, regardless of conflicts or not. + otherwise a "double requirement" exception is raised + b. they override sub-dependency requirements. +for sub-dependencies + a. "first found, wins" (where the order is breadth first) +""" + +import logging +from collections import defaultdict +from itertools import chain + +from pip._internal.exceptions import ( + BestVersionAlreadyInstalled, DistributionNotFound, HashError, HashErrors, + UnsupportedPythonVersion, +) +from pip._internal.req.constructors import install_req_from_req +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import dist_in_usersite, ensure_dir +from pip._internal.utils.packaging import check_dist_requires_python + +logger = logging.getLogger(__name__) + + +class Resolver(object): + """Resolves which packages need to be installed/uninstalled to perform \ + the requested operation without breaking the requirements of any package. + """ + + _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"} + + def __init__(self, preparer, session, finder, wheel_cache, use_user_site, + ignore_dependencies, ignore_installed, ignore_requires_python, + force_reinstall, isolated, upgrade_strategy): + super(Resolver, self).__init__() + assert upgrade_strategy in self._allowed_strategies + + self.preparer = preparer + self.finder = finder + self.session = session + + # NOTE: This would eventually be replaced with a cache that can give + # information about both sdist and wheels transparently. + self.wheel_cache = wheel_cache + + self.require_hashes = None # This is set in resolve + + self.upgrade_strategy = upgrade_strategy + self.force_reinstall = force_reinstall + self.isolated = isolated + self.ignore_dependencies = ignore_dependencies + self.ignore_installed = ignore_installed + self.ignore_requires_python = ignore_requires_python + self.use_user_site = use_user_site + + self._discovered_dependencies = defaultdict(list) + + def resolve(self, requirement_set): + """Resolve what operations need to be done + + As a side-effect of this method, the packages (and their dependencies) + are downloaded, unpacked and prepared for installation. This + preparation is done by ``pip.operations.prepare``. + + Once PyPI has static dependency metadata available, it would be + possible to move the preparation to become a step separated from + dependency resolution. + """ + # make the wheelhouse + if self.preparer.wheel_download_dir: + ensure_dir(self.preparer.wheel_download_dir) + + # If any top-level requirement has a hash specified, enter + # hash-checking mode, which requires hashes from all. + root_reqs = ( + requirement_set.unnamed_requirements + + list(requirement_set.requirements.values()) + ) + self.require_hashes = ( + requirement_set.require_hashes or + any(req.has_hash_options for req in root_reqs) + ) + + # Display where finder is looking for packages + locations = self.finder.get_formatted_locations() + if locations: + logger.info(locations) + + # Actually prepare the files, and collect any exceptions. Most hash + # exceptions cannot be checked ahead of time, because + # req.populate_link() needs to be called before we can make decisions + # based on link type. + discovered_reqs = [] + hash_errors = HashErrors() + for req in chain(root_reqs, discovered_reqs): + try: + discovered_reqs.extend( + self._resolve_one(requirement_set, req) + ) + except HashError as exc: + exc.req = req + hash_errors.append(exc) + + if hash_errors: + raise hash_errors + + def _is_upgrade_allowed(self, req): + if self.upgrade_strategy == "to-satisfy-only": + return False + elif self.upgrade_strategy == "eager": + return True + else: + assert self.upgrade_strategy == "only-if-needed" + return req.is_direct + + def _set_req_to_reinstall(self, req): + """ + Set a requirement to be installed. + """ + # Don't uninstall the conflict if doing a user install and the + # conflict is not a user install. + if not self.use_user_site or dist_in_usersite(req.satisfied_by): + req.conflicts_with = req.satisfied_by + req.satisfied_by = None + + # XXX: Stop passing requirement_set for options + def _check_skip_installed(self, req_to_install): + """Check if req_to_install should be skipped. + + This will check if the req is installed, and whether we should upgrade + or reinstall it, taking into account all the relevant user options. + + After calling this req_to_install will only have satisfied_by set to + None if the req_to_install is to be upgraded/reinstalled etc. Any + other value will be a dist recording the current thing installed that + satisfies the requirement. + + Note that for vcs urls and the like we can't assess skipping in this + routine - we simply identify that we need to pull the thing down, + then later on it is pulled down and introspected to assess upgrade/ + reinstalls etc. + + :return: A text reason for why it was skipped, or None. + """ + if self.ignore_installed: + return None + + req_to_install.check_if_exists(self.use_user_site) + if not req_to_install.satisfied_by: + return None + + if self.force_reinstall: + self._set_req_to_reinstall(req_to_install) + return None + + if not self._is_upgrade_allowed(req_to_install): + if self.upgrade_strategy == "only-if-needed": + return 'already satisfied, skipping upgrade' + return 'already satisfied' + + # Check for the possibility of an upgrade. For link-based + # requirements we have to pull the tree down and inspect to assess + # the version #, so it's handled way down. + if not req_to_install.link: + try: + self.finder.find_requirement(req_to_install, upgrade=True) + except BestVersionAlreadyInstalled: + # Then the best version is installed. + return 'already up-to-date' + except DistributionNotFound: + # No distribution found, so we squash the error. It will + # be raised later when we re-try later to do the install. + # Why don't we just raise here? + pass + + self._set_req_to_reinstall(req_to_install) + return None + + def _get_abstract_dist_for(self, req): + """Takes a InstallRequirement and returns a single AbstractDist \ + representing a prepared variant of the same. + """ + assert self.require_hashes is not None, ( + "require_hashes should have been set in Resolver.resolve()" + ) + + if req.editable: + return self.preparer.prepare_editable_requirement( + req, self.require_hashes, self.use_user_site, self.finder, + ) + + # satisfied_by is only evaluated by calling _check_skip_installed, + # so it must be None here. + assert req.satisfied_by is None + skip_reason = self._check_skip_installed(req) + + if req.satisfied_by: + return self.preparer.prepare_installed_requirement( + req, self.require_hashes, skip_reason + ) + + upgrade_allowed = self._is_upgrade_allowed(req) + abstract_dist = self.preparer.prepare_linked_requirement( + req, self.session, self.finder, upgrade_allowed, + self.require_hashes + ) + + # NOTE + # The following portion is for determining if a certain package is + # going to be re-installed/upgraded or not and reporting to the user. + # This should probably get cleaned up in a future refactor. + + # req.req is only avail after unpack for URL + # pkgs repeat check_if_exists to uninstall-on-upgrade + # (#14) + if not self.ignore_installed: + req.check_if_exists(self.use_user_site) + + if req.satisfied_by: + should_modify = ( + self.upgrade_strategy != "to-satisfy-only" or + self.force_reinstall or + self.ignore_installed or + req.link.scheme == 'file' + ) + if should_modify: + self._set_req_to_reinstall(req) + else: + logger.info( + 'Requirement already satisfied (use --upgrade to upgrade):' + ' %s', req, + ) + + return abstract_dist + + def _resolve_one(self, requirement_set, req_to_install): + """Prepare a single requirements file. + + :return: A list of additional InstallRequirements to also install. + """ + # Tell user what we are doing for this requirement: + # obtain (editable), skipping, processing (local url), collecting + # (remote url or package name) + if req_to_install.constraint or req_to_install.prepared: + return [] + + req_to_install.prepared = True + + # register tmp src for cleanup in case something goes wrong + requirement_set.reqs_to_cleanup.append(req_to_install) + + abstract_dist = self._get_abstract_dist_for(req_to_install) + + # Parse and return dependencies + dist = abstract_dist.dist(self.finder) + try: + check_dist_requires_python(dist) + except UnsupportedPythonVersion as err: + if self.ignore_requires_python: + logger.warning(err.args[0]) + else: + raise + + more_reqs = [] + + def add_req(subreq, extras_requested): + sub_install_req = install_req_from_req( + str(subreq), + req_to_install, + isolated=self.isolated, + wheel_cache=self.wheel_cache, + ) + parent_req_name = req_to_install.name + to_scan_again, add_to_parent = requirement_set.add_requirement( + sub_install_req, + parent_req_name=parent_req_name, + extras_requested=extras_requested, + ) + if parent_req_name and add_to_parent: + self._discovered_dependencies[parent_req_name].append( + add_to_parent + ) + more_reqs.extend(to_scan_again) + + with indent_log(): + # We add req_to_install before its dependencies, so that we + # can refer to it when adding dependencies. + if not requirement_set.has_requirement(req_to_install.name): + # 'unnamed' requirements will get added here + req_to_install.is_direct = True + requirement_set.add_requirement( + req_to_install, parent_req_name=None, + ) + + if not self.ignore_dependencies: + if req_to_install.extras: + logger.debug( + "Installing extra requirements: %r", + ','.join(req_to_install.extras), + ) + missing_requested = sorted( + set(req_to_install.extras) - set(dist.extras) + ) + for missing in missing_requested: + logger.warning( + '%s does not provide the extra \'%s\'', + dist, missing + ) + + available_requested = sorted( + set(dist.extras) & set(req_to_install.extras) + ) + for subreq in dist.requires(available_requested): + add_req(subreq, extras_requested=available_requested) + + if not req_to_install.editable and not req_to_install.satisfied_by: + # XXX: --no-install leads this to report 'Successfully + # downloaded' for only non-editable reqs, even though we took + # action on them. + requirement_set.successfully_downloaded.append(req_to_install) + + return more_reqs + + def get_installation_order(self, req_set): + """Create the installation order. + + The installation order is topological - requirements are installed + before the requiring thing. We break cycles at an arbitrary point, + and make no other guarantees. + """ + # The current implementation, which we may change at any point + # installs the user specified things in the order given, except when + # dependencies must come earlier to achieve topological order. + order = [] + ordered_reqs = set() + + def schedule(req): + if req.satisfied_by or req in ordered_reqs: + return + if req.constraint: + return + ordered_reqs.add(req) + for dep in self._discovered_dependencies[req.name]: + schedule(dep) + order.append(req) + + for install_req in req_set.requirements.values(): + schedule(install_req) + return order diff --git a/Shared/lib/python3.4/site-packages/wheel/test/simple.dist/simpledist/__init__.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/__init__.py similarity index 100% rename from Shared/lib/python3.4/site-packages/wheel/test/simple.dist/simpledist/__init__.py rename to Shared/lib/python3.4/site-packages/pip/_internal/utils/__init__.py diff --git a/Shared/lib/python3.4/site-packages/pip/utils/appdirs.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/appdirs.py similarity index 81% rename from Shared/lib/python3.4/site-packages/pip/utils/appdirs.py rename to Shared/lib/python3.4/site-packages/pip/_internal/utils/appdirs.py index 60ae76e..cc96f98 100644 --- a/Shared/lib/python3.4/site-packages/pip/utils/appdirs.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/appdirs.py @@ -7,7 +7,9 @@ from __future__ import absolute_import import os import sys -from pip.compat import WINDOWS, expanduser +from pip._vendor.six import PY2, text_type + +from pip._internal.utils.compat import WINDOWS, expanduser def user_cache_dir(appname): @@ -17,9 +19,9 @@ def user_cache_dir(appname): "appname" is the name of application. Typical user cache directories are: - Mac OS X: ~/Library/Caches/ + macOS: ~/Library/Caches/ Unix: ~/.cache/ (XDG default) - Windows: C:\Users\\AppData\Local\\Cache + Windows: C:\Users\\AppData\Local\\Cache On Windows the only suggestion in the MSDN docs is that local settings go in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the @@ -35,6 +37,11 @@ def user_cache_dir(appname): # Get the base path path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) + # When using Python 2, return paths as bytes on Windows like we do on + # other operating systems. See helper function docs for more details. + if PY2 and isinstance(path, text_type): + path = _win_path_to_bytes(path) + # Add our app name and Cache directory to it path = os.path.join(path, appname, "Cache") elif sys.platform == "darwin": @@ -54,7 +61,7 @@ def user_cache_dir(appname): def user_data_dir(appname, roaming=False): - """ + r""" Return full path to the user-specific data dir for this application. "appname" is the name of application. @@ -67,7 +74,8 @@ def user_data_dir(appname, roaming=False): for a discussion of issues. Typical user data directories are: - Mac OS X: ~/Library/Application Support/ + macOS: ~/Library/Application Support/ + if it exists, else ~/.config/ Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined Win XP (not roaming): C:\Documents and Settings\\ ... @@ -87,6 +95,13 @@ def user_data_dir(appname, roaming=False): path = os.path.join( expanduser('~/Library/Application Support/'), appname, + ) if os.path.isdir(os.path.join( + expanduser('~/Library/Application Support/'), + appname, + ) + ) else os.path.join( + expanduser('~/.config/'), + appname, ) else: path = os.path.join( @@ -110,12 +125,12 @@ def user_config_dir(appname, roaming=True): for a discussion of issues. Typical user data directories are: - Mac OS X: same as user_data_dir + macOS: same as user_data_dir Unix: ~/.config/ Win *: same as user_data_dir For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by deafult "~/.config/". + That means, by default "~/.config/". """ if WINDOWS: path = user_data_dir(appname, roaming=roaming) @@ -131,12 +146,12 @@ def user_config_dir(appname, roaming=True): # for the discussion regarding site_config_dirs locations # see def site_config_dirs(appname): - """Return a list of potential user-shared config dirs for this application. + r"""Return a list of potential user-shared config dirs for this application. "appname" is the name of application. Typical user config directories are: - Mac OS X: /Library/Application Support// + macOS: /Library/Application Support// Unix: /etc or $XDG_CONFIG_DIRS[i]// for each value in $XDG_CONFIG_DIRS Win XP: C:\Documents and Settings\All Users\Application ... @@ -216,9 +231,28 @@ def _get_win_folder_with_ctypes(csidl_name): return buf.value + if WINDOWS: try: import ctypes _get_win_folder = _get_win_folder_with_ctypes except ImportError: _get_win_folder = _get_win_folder_from_registry + + +def _win_path_to_bytes(path): + """Encode Windows paths to bytes. Only used on Python 2. + + Motivation is to be consistent with other operating systems where paths + are also returned as bytes. This avoids problems mixing bytes and Unicode + elsewhere in the codebase. For more details and discussion see + . + + If encoding using ASCII and MBCS fails, return the original Unicode path. + """ + for encoding in ('ASCII', 'MBCS'): + try: + return path.encode(encoding) + except (UnicodeEncodeError, LookupError): + pass + return path diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/utils/compat.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/compat.py new file mode 100644 index 0000000..3114f2d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/compat.py @@ -0,0 +1,248 @@ +"""Stuff that differs in different Python versions and platform +distributions.""" +from __future__ import absolute_import, division + +import codecs +import locale +import logging +import os +import shutil +import sys + +from pip._vendor.six import text_type + +try: + import ipaddress +except ImportError: + try: + from pip._vendor import ipaddress # type: ignore + except ImportError: + import ipaddr as ipaddress # type: ignore + ipaddress.ip_address = ipaddress.IPAddress + ipaddress.ip_network = ipaddress.IPNetwork + + +__all__ = [ + "ipaddress", "uses_pycache", "console_to_str", "native_str", + "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile", "get_terminal_size", + "get_extension_suffixes", +] + + +logger = logging.getLogger(__name__) + +if sys.version_info >= (3, 4): + uses_pycache = True + from importlib.util import cache_from_source +else: + import imp + + try: + cache_from_source = imp.cache_from_source # type: ignore + except AttributeError: + # does not use __pycache__ + cache_from_source = None + + uses_pycache = cache_from_source is not None + + +if sys.version_info >= (3, 5): + backslashreplace_decode = "backslashreplace" +else: + # In version 3.4 and older, backslashreplace exists + # but does not support use for decoding. + # We implement our own replace handler for this + # situation, so that we can consistently use + # backslash replacement for all versions. + def backslashreplace_decode_fn(err): + raw_bytes = (err.object[i] for i in range(err.start, err.end)) + if sys.version_info[0] == 2: + # Python 2 gave us characters - convert to numeric bytes + raw_bytes = (ord(b) for b in raw_bytes) + return u"".join(u"\\x%x" % c for c in raw_bytes), err.end + codecs.register_error( + "backslashreplace_decode", + backslashreplace_decode_fn, + ) + backslashreplace_decode = "backslashreplace_decode" + + +def console_to_str(data): + """Return a string, safe for output, of subprocess output. + + We assume the data is in the locale preferred encoding. + If it won't decode properly, we warn the user but decode as + best we can. + + We also ensure that the output can be safely written to + standard output without encoding errors. + """ + + # First, get the encoding we assume. This is the preferred + # encoding for the locale, unless that is not found, or + # it is ASCII, in which case assume UTF-8 + encoding = locale.getpreferredencoding() + if (not encoding) or codecs.lookup(encoding).name == "ascii": + encoding = "utf-8" + + # Now try to decode the data - if we fail, warn the user and + # decode with replacement. + try: + s = data.decode(encoding) + except UnicodeDecodeError: + logger.warning( + "Subprocess output does not appear to be encoded as %s", + encoding, + ) + s = data.decode(encoding, errors=backslashreplace_decode) + + # Make sure we can print the output, by encoding it to the output + # encoding with replacement of unencodable characters, and then + # decoding again. + # We use stderr's encoding because it's less likely to be + # redirected and if we don't find an encoding we skip this + # step (on the assumption that output is wrapped by something + # that won't fail). + # The double getattr is to deal with the possibility that we're + # being called in a situation where sys.__stderr__ doesn't exist, + # or doesn't have an encoding attribute. Neither of these cases + # should occur in normal pip use, but there's no harm in checking + # in case people use pip in (unsupported) unusual situations. + output_encoding = getattr(getattr(sys, "__stderr__", None), + "encoding", None) + + if output_encoding: + s = s.encode(output_encoding, errors="backslashreplace") + s = s.decode(output_encoding) + + return s + + +if sys.version_info >= (3,): + def native_str(s, replace=False): + if isinstance(s, bytes): + return s.decode('utf-8', 'replace' if replace else 'strict') + return s + +else: + def native_str(s, replace=False): + # Replace is ignored -- unicode to UTF-8 can't fail + if isinstance(s, text_type): + return s.encode('utf-8') + return s + + +def get_path_uid(path): + """ + Return path's uid. + + Does not follow symlinks: + https://github.com/pypa/pip/pull/935#discussion_r5307003 + + Placed this function in compat due to differences on AIX and + Jython, that should eventually go away. + + :raises OSError: When path is a symlink or can't be read. + """ + if hasattr(os, 'O_NOFOLLOW'): + fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) + file_uid = os.fstat(fd).st_uid + os.close(fd) + else: # AIX and Jython + # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW + if not os.path.islink(path): + # older versions of Jython don't have `os.fstat` + file_uid = os.stat(path).st_uid + else: + # raise OSError for parity with os.O_NOFOLLOW above + raise OSError( + "%s is a symlink; Will not return uid for symlinks" % path + ) + return file_uid + + +if sys.version_info >= (3, 4): + from importlib.machinery import EXTENSION_SUFFIXES + + def get_extension_suffixes(): + return EXTENSION_SUFFIXES +else: + from imp import get_suffixes + + def get_extension_suffixes(): + return [suffix[0] for suffix in get_suffixes()] + + +def expanduser(path): + """ + Expand ~ and ~user constructions. + + Includes a workaround for https://bugs.python.org/issue14768 + """ + expanded = os.path.expanduser(path) + if path.startswith('~/') and expanded.startswith('//'): + expanded = expanded[1:] + return expanded + + +# packages in the stdlib that may have installation metadata, but should not be +# considered 'installed'. this theoretically could be determined based on +# dist.location (py27:`sysconfig.get_paths()['stdlib']`, +# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may +# make this ineffective, so hard-coding +stdlib_pkgs = {"python", "wsgiref", "argparse"} + + +# windows detection, covers cpython and ironpython +WINDOWS = (sys.platform.startswith("win") or + (sys.platform == 'cli' and os.name == 'nt')) + + +def samefile(file1, file2): + """Provide an alternative for os.path.samefile on Windows/Python2""" + if hasattr(os.path, 'samefile'): + return os.path.samefile(file1, file2) + else: + path1 = os.path.normcase(os.path.abspath(file1)) + path2 = os.path.normcase(os.path.abspath(file2)) + return path1 == path2 + + +if hasattr(shutil, 'get_terminal_size'): + def get_terminal_size(): + """ + Returns a tuple (x, y) representing the width(x) and the height(y) + in characters of the terminal window. + """ + return tuple(shutil.get_terminal_size()) +else: + def get_terminal_size(): + """ + Returns a tuple (x, y) representing the width(x) and the height(y) + in characters of the terminal window. + """ + def ioctl_GWINSZ(fd): + try: + import fcntl + import termios + import struct + cr = struct.unpack_from( + 'hh', + fcntl.ioctl(fd, termios.TIOCGWINSZ, '12345678') + ) + except Exception: + return None + if cr == (0, 0): + return None + return cr + cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) + if not cr: + try: + fd = os.open(os.ctermid(), os.O_RDONLY) + cr = ioctl_GWINSZ(fd) + os.close(fd) + except Exception: + pass + if not cr: + cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80)) + return int(cr[1]), int(cr[0]) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/utils/deprecation.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/deprecation.py new file mode 100644 index 0000000..bd744cf --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/deprecation.py @@ -0,0 +1,89 @@ +""" +A module that implements tooling to enable easy warnings about deprecations. +""" +from __future__ import absolute_import + +import logging +import warnings + +from pip._vendor.packaging.version import parse + +from pip import __version__ as current_version +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Any, Optional # noqa: F401 + + +class PipDeprecationWarning(Warning): + pass + + +_original_showwarning = None # type: Any + + +# Warnings <-> Logging Integration +def _showwarning(message, category, filename, lineno, file=None, line=None): + if file is not None: + if _original_showwarning is not None: + _original_showwarning( + message, category, filename, lineno, file, line, + ) + elif issubclass(category, PipDeprecationWarning): + # We use a specially named logger which will handle all of the + # deprecation messages for pip. + logger = logging.getLogger("pip._internal.deprecations") + logger.warning(message) + else: + _original_showwarning( + message, category, filename, lineno, file, line, + ) + + +def install_warning_logger(): + # Enable our Deprecation Warnings + warnings.simplefilter("default", PipDeprecationWarning, append=True) + + global _original_showwarning + + if _original_showwarning is None: + _original_showwarning = warnings.showwarning + warnings.showwarning = _showwarning + + +def deprecated(reason, replacement, gone_in, issue=None): + # type: (str, Optional[str], Optional[str], Optional[int]) -> None + """Helper to deprecate existing functionality. + + reason: + Textual reason shown to the user about why this functionality has + been deprecated. + replacement: + Textual suggestion shown to the user about what alternative + functionality they can use. + gone_in: + The version of pip does this functionality should get removed in. + Raises errors if pip's current version is greater than or equal to + this. + issue: + Issue number on the tracker that would serve as a useful place for + users to find related discussion and provide feedback. + + Always pass replacement, gone_in and issue as keyword arguments for clarity + at the call site. + """ + + # Construct a nice message. + # This is purposely eagerly formatted as we want it to appear as if someone + # typed this entire message out. + message = "DEPRECATION: " + reason + if replacement is not None: + message += " A possible replacement is {}.".format(replacement) + if issue is not None: + url = "https://github.com/pypa/pip/issues/" + str(issue) + message += " You can find discussion regarding this at {}.".format(url) + + # Raise as an error if it has to be removed. + if gone_in is not None and parse(current_version) >= parse(gone_in): + raise PipDeprecationWarning(message) + warnings.warn(message, category=PipDeprecationWarning, stacklevel=2) diff --git a/Shared/lib/python3.4/site-packages/pip/utils/encoding.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/encoding.py similarity index 56% rename from Shared/lib/python3.4/site-packages/pip/utils/encoding.py rename to Shared/lib/python3.4/site-packages/pip/_internal/utils/encoding.py index b272a0b..56f6036 100644 --- a/Shared/lib/python3.4/site-packages/pip/utils/encoding.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/encoding.py @@ -1,6 +1,7 @@ import codecs import locale - +import re +import sys BOMS = [ (codecs.BOM_UTF8, 'utf8'), @@ -12,6 +13,8 @@ BOMS = [ (codecs.BOM_UTF32_LE, 'utf32-le'), ] +ENCODING_RE = re.compile(br'coding[:=]\s*([-\w.]+)') + def auto_decode(data): """Check a bytes string for a BOM to correctly detect the encoding @@ -20,4 +23,11 @@ def auto_decode(data): for bom, encoding in BOMS: if data.startswith(bom): return data[len(bom):].decode(encoding) - return data.decode(locale.getpreferredencoding(False)) + # Lets check the first two lines as in PEP263 + for line in data.split(b'\n')[:2]: + if line[0:1] == b'#' and ENCODING_RE.search(line): + encoding = ENCODING_RE.search(line).groups()[0].decode('ascii') + return data.decode(encoding) + return data.decode( + locale.getpreferredencoding(False) or sys.getdefaultencoding(), + ) diff --git a/Shared/lib/python3.4/site-packages/pip/utils/filesystem.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/filesystem.py similarity index 94% rename from Shared/lib/python3.4/site-packages/pip/utils/filesystem.py rename to Shared/lib/python3.4/site-packages/pip/_internal/utils/filesystem.py index 25ad516..1e9cebd 100644 --- a/Shared/lib/python3.4/site-packages/pip/utils/filesystem.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/filesystem.py @@ -1,7 +1,7 @@ import os import os.path -from pip.compat import get_path_uid +from pip._internal.utils.compat import get_path_uid def check_path_owner(path): diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/utils/glibc.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/glibc.py new file mode 100644 index 0000000..ebcfc5b --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/glibc.py @@ -0,0 +1,84 @@ +from __future__ import absolute_import + +import ctypes +import re +import warnings + + +def glibc_version_string(): + "Returns glibc version string, or None if not using glibc." + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + process_namespace = ctypes.CDLL(None) + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +# Separated out from have_compatible_glibc for easier unit testing +def check_glibc_version(version_str, required_major, minimum_minor): + # Parse string and check against requested version. + # + # We use a regexp instead of str.split because we want to discard any + # random junk that might come after the minor version -- this might happen + # in patched/forked versions of glibc (e.g. Linaro's version of glibc + # uses version strings like "2.20-2014.11"). See gh-3588. + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn("Expected glibc version with 2 components major.minor," + " got: %s" % version_str, RuntimeWarning) + return False + return (int(m.group("major")) == required_major and + int(m.group("minor")) >= minimum_minor) + + +def have_compatible_glibc(required_major, minimum_minor): + version_str = glibc_version_string() + if version_str is None: + return False + return check_glibc_version(version_str, required_major, minimum_minor) + + +# platform.libc_ver regularly returns completely nonsensical glibc +# versions. E.g. on my computer, platform says: +# +# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.7') +# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.9') +# +# But the truth is: +# +# ~$ ldd --version +# ldd (Debian GLIBC 2.22-11) 2.22 +# +# This is unfortunate, because it means that the linehaul data on libc +# versions that was generated by pip 8.1.2 and earlier is useless and +# misleading. Solution: instead of using platform, use our code that actually +# works. +def libc_ver(): + """Try to determine the glibc version + + Returns a tuple of strings (lib, version) which default to empty strings + in case the lookup fails. + """ + glibc_version = glibc_version_string() + if glibc_version is None: + return ("", "") + else: + return ("glibc", glibc_version) diff --git a/Shared/lib/python3.4/site-packages/pip/utils/hashes.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/hashes.py similarity index 95% rename from Shared/lib/python3.4/site-packages/pip/utils/hashes.py rename to Shared/lib/python3.4/site-packages/pip/_internal/utils/hashes.py index 9602970..8b909ba 100644 --- a/Shared/lib/python3.4/site-packages/pip/utils/hashes.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/hashes.py @@ -2,10 +2,12 @@ from __future__ import absolute_import import hashlib -from pip.exceptions import HashMismatch, HashMissing, InstallationError -from pip.utils import read_chunks from pip._vendor.six import iteritems, iterkeys, itervalues +from pip._internal.exceptions import ( + HashMismatch, HashMissing, InstallationError, +) +from pip._internal.utils.misc import read_chunks # The recommended hash algo of the moment. Change this whenever the state of # the art changes; it won't hurt backward compatibility. diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/utils/logging.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/logging.py new file mode 100644 index 0000000..d9b9541 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/logging.py @@ -0,0 +1,225 @@ +from __future__ import absolute_import + +import contextlib +import logging +import logging.handlers +import os + +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.misc import ensure_dir + +try: + import threading +except ImportError: + import dummy_threading as threading # type: ignore + + +try: + from pip._vendor import colorama +# Lots of different errors can come from this, including SystemError and +# ImportError. +except Exception: + colorama = None + + +_log_state = threading.local() +_log_state.indentation = 0 + + +@contextlib.contextmanager +def indent_log(num=2): + """ + A context manager which will cause the log output to be indented for any + log messages emitted inside it. + """ + _log_state.indentation += num + try: + yield + finally: + _log_state.indentation -= num + + +def get_indentation(): + return getattr(_log_state, 'indentation', 0) + + +class IndentingFormatter(logging.Formatter): + + def format(self, record): + """ + Calls the standard formatter, but will indent all of the log messages + by our current indentation level. + """ + formatted = logging.Formatter.format(self, record) + formatted = "".join([ + (" " * get_indentation()) + line + for line in formatted.splitlines(True) + ]) + return formatted + + +def _color_wrap(*colors): + def wrapped(inp): + return "".join(list(colors) + [inp, colorama.Style.RESET_ALL]) + return wrapped + + +class ColorizedStreamHandler(logging.StreamHandler): + + # Don't build up a list of colors if we don't have colorama + if colorama: + COLORS = [ + # This needs to be in order from highest logging level to lowest. + (logging.ERROR, _color_wrap(colorama.Fore.RED)), + (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)), + ] + else: + COLORS = [] + + def __init__(self, stream=None, no_color=None): + logging.StreamHandler.__init__(self, stream) + self._no_color = no_color + + if WINDOWS and colorama: + self.stream = colorama.AnsiToWin32(self.stream) + + def should_color(self): + # Don't colorize things if we do not have colorama or if told not to + if not colorama or self._no_color: + return False + + real_stream = ( + self.stream if not isinstance(self.stream, colorama.AnsiToWin32) + else self.stream.wrapped + ) + + # If the stream is a tty we should color it + if hasattr(real_stream, "isatty") and real_stream.isatty(): + return True + + # If we have an ANSI term we should color it + if os.environ.get("TERM") == "ANSI": + return True + + # If anything else we should not color it + return False + + def format(self, record): + msg = logging.StreamHandler.format(self, record) + + if self.should_color(): + for level, color in self.COLORS: + if record.levelno >= level: + msg = color(msg) + break + + return msg + + +class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): + + def _open(self): + ensure_dir(os.path.dirname(self.baseFilename)) + return logging.handlers.RotatingFileHandler._open(self) + + +class MaxLevelFilter(logging.Filter): + + def __init__(self, level): + self.level = level + + def filter(self, record): + return record.levelno < self.level + + +def setup_logging(verbosity, no_color, user_log_file): + """Configures and sets up all of the logging + """ + + # Determine the level to be logging at. + if verbosity >= 1: + level = "DEBUG" + elif verbosity == -1: + level = "WARNING" + elif verbosity == -2: + level = "ERROR" + elif verbosity <= -3: + level = "CRITICAL" + else: + level = "INFO" + + # The "root" logger should match the "console" level *unless* we also need + # to log to a user log file. + include_user_log = user_log_file is not None + if include_user_log: + additional_log_file = user_log_file + root_level = "DEBUG" + else: + additional_log_file = "/dev/null" + root_level = level + + # Disable any logging besides WARNING unless we have DEBUG level logging + # enabled for vendored libraries. + vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG" + + # Shorthands for clarity + log_streams = { + "stdout": "ext://sys.stdout", + "stderr": "ext://sys.stderr", + } + handler_classes = { + "stream": "pip._internal.utils.logging.ColorizedStreamHandler", + "file": "pip._internal.utils.logging.BetterRotatingFileHandler", + } + + logging.config.dictConfig({ + "version": 1, + "disable_existing_loggers": False, + "filters": { + "exclude_warnings": { + "()": "pip._internal.utils.logging.MaxLevelFilter", + "level": logging.WARNING, + }, + }, + "formatters": { + "indent": { + "()": IndentingFormatter, + "format": "%(message)s", + }, + }, + "handlers": { + "console": { + "level": level, + "class": handler_classes["stream"], + "no_color": no_color, + "stream": log_streams["stdout"], + "filters": ["exclude_warnings"], + "formatter": "indent", + }, + "console_errors": { + "level": "WARNING", + "class": handler_classes["stream"], + "no_color": no_color, + "stream": log_streams["stderr"], + "formatter": "indent", + }, + "user_log": { + "level": "DEBUG", + "class": handler_classes["file"], + "filename": additional_log_file, + "delay": True, + "formatter": "indent", + }, + }, + "root": { + "level": root_level, + "handlers": ["console", "console_errors"] + ( + ["user_log"] if include_user_log else [] + ), + }, + "loggers": { + "pip._vendor": { + "level": vendored_log_level + } + }, + }) diff --git a/Shared/lib/python3.4/site-packages/pip/utils/__init__.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/misc.py similarity index 83% rename from Shared/lib/python3.4/site-packages/pip/utils/__init__.py rename to Shared/lib/python3.4/site-packages/pip/_internal/utils/misc.py index 8ea2e38..e9e552e 100644 --- a/Shared/lib/python3.4/site-packages/pip/utils/__init__.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/misc.py @@ -1,6 +1,5 @@ from __future__ import absolute_import -from collections import deque import contextlib import errno import io @@ -8,26 +7,33 @@ import locale # we have a submodule named 'logging' which would shadow this if we used the # regular name: import logging as std_logging -import re import os import posixpath +import re import shutil import stat import subprocess import sys import tarfile import zipfile +from collections import deque -from pip.exceptions import InstallationError -from pip.compat import console_to_str, expanduser, stdlib_pkgs -from pip.locations import ( - site_packages, user_site, running_under_virtualenv, virtualenv_no_global, +from pip._vendor import pkg_resources +# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is +# why we ignore the type on this import. +from pip._vendor.retrying import retry # type: ignore +from pip._vendor.six import PY2 +from pip._vendor.six.moves import input +from pip._vendor.six.moves.urllib import parse as urllib_parse + +from pip._internal.exceptions import CommandError, InstallationError +from pip._internal.locations import ( + running_under_virtualenv, site_packages, user_site, virtualenv_no_global, write_delete_marker_file, ) -from pip._vendor import pkg_resources -from pip._vendor.six.moves import input -from pip._vendor.six import PY2 -from pip._vendor.retrying import retry +from pip._internal.utils.compat import ( + WINDOWS, console_to_str, expanduser, stdlib_pkgs, +) if PY2: from io import BytesIO as StringIO @@ -40,11 +46,11 @@ __all__ = ['rmtree', 'display_path', 'backup_dir', 'is_svn_page', 'file_contents', 'split_leading_dir', 'has_leading_dir', 'normalize_path', - 'renames', 'get_terminal_size', 'get_prog', + 'renames', 'get_prog', 'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess', - 'captured_stdout', 'remove_tracebacks', 'ensure_dir', + 'captured_stdout', 'ensure_dir', 'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', - 'get_installed_version'] + 'get_installed_version', 'remove_auth_from_url'] logger = std_logging.getLogger(__name__) @@ -88,8 +94,11 @@ def ensure_dir(path): def get_prog(): try: - if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'): + prog = os.path.basename(sys.argv[0]) + if prog in ('__main__.py', '-c'): return "%s -m pip" % sys.executable + else: + return prog except (AttributeError, TypeError, IndexError): pass return 'pip' @@ -178,12 +187,16 @@ def format_size(bytes): def is_installable_dir(path): - """Return True if `path` is a directory containing a setup.py file.""" + """Is path is a directory containing setup.py or pyproject.toml? + """ if not os.path.isdir(path): return False setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): return True + pyproject_toml = os.path.join(path, 'pyproject.toml') + if os.path.isfile(pyproject_toml): + return True return False @@ -296,7 +309,7 @@ def is_local(path): if running_under_virtualenv(): return path.startswith(normalize_path(sys.prefix)) else: - from pip.locations import distutils_scheme + from pip._internal.locations import distutils_scheme if path.startswith(prefix): for local_path in distutils_scheme("").values(): if path.startswith(normalize_path(local_path)): @@ -326,7 +339,7 @@ def dist_in_usersite(dist): def dist_in_site_packages(dist): """ Return True if given Distribution is installed in - distutils.sysconfig.get_python_lib(). + sysconfig.get_python_lib(). """ return normalize_path( dist_location(dist) @@ -356,7 +369,7 @@ def get_installed_distributions(local_only=True, ``skip`` argument is an iterable of lower-case project names to ignore; defaults to stdlib_pkgs - If ``editables`` is False, don't report editables. + If ``include_editables`` is False, don't report editables. If ``editables_only`` is True , only report editables. @@ -450,36 +463,6 @@ def dist_location(dist): return dist.location -def get_terminal_size(): - """Returns a tuple (x, y) representing the width(x) and the height(x) - in characters of the terminal window.""" - def ioctl_GWINSZ(fd): - try: - import fcntl - import termios - import struct - cr = struct.unpack( - 'hh', - fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234') - ) - except: - return None - if cr == (0, 0): - return None - return cr - cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) - if not cr: - try: - fd = os.open(os.ctermid(), os.O_RDONLY) - cr = ioctl_GWINSZ(fd) - os.close(fd) - except: - pass - if not cr: - cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80)) - return int(cr[1]), int(cr[0]) - - def current_umask(): """Get the current umask which involves having to set it temporarily.""" mask = os.umask(0) @@ -624,7 +607,7 @@ def unpack_file(filename, location, content_type, link): elif (content_type and content_type.startswith('text/html') and is_svn_page(file_contents(filename))): # We don't really care about this - from pip.vcs.subversion import Subversion + from pip._internal.vcs.subversion import Subversion Subversion('svn+' + link.url).unpack(location) else: # FIXME: handle? @@ -639,21 +622,17 @@ def unpack_file(filename, location, content_type, link): ) -def remove_tracebacks(output): - pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?' - r'Syntax(?:Error|Warning): (?:.*)') - output = re.sub(pattern, '', output) - if PY2: - return output - # compileall.compile_dir() prints different messages to stdout - # in Python 3 - return re.sub(r"\*\*\* Error compiling (?:.*)", '', output) - - def call_subprocess(cmd, show_stdout=True, cwd=None, on_returncode='raise', - command_level=std_logging.DEBUG, command_desc=None, - extra_environ=None, spinner=None): + command_desc=None, + extra_environ=None, unset_environ=None, spinner=None): + """ + Args: + unset_environ: an iterable of environment variable names to unset + prior to calling subprocess.Popen(). + """ + if unset_environ is None: + unset_environ = [] # This function's handling of subprocess output is confusing and I # previously broke it terribly, so as penance I will write a long comment # explaining things. @@ -686,21 +665,25 @@ def call_subprocess(cmd, show_stdout=True, cwd=None, part = '"%s"' % part.replace('"', '\\"') cmd_parts.append(part) command_desc = ' '.join(cmd_parts) - logger.log(command_level, "Running command %s", command_desc) + logger.debug("Running command %s", command_desc) env = os.environ.copy() if extra_environ: env.update(extra_environ) + for name in unset_environ: + env.pop(name, None) try: proc = subprocess.Popen( - cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout, - cwd=cwd, env=env) + cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, + stdout=stdout, cwd=cwd, env=env, + ) + proc.stdin.close() except Exception as exc: logger.critical( "Error %s while executing command %s", exc, command_desc, ) raise + all_output = [] if stdout is not None: - all_output = [] while True: line = console_to_str(proc.stdout.readline()) if not line: @@ -714,7 +697,11 @@ def call_subprocess(cmd, show_stdout=True, cwd=None, # Update the spinner if spinner is not None: spinner.spin() - proc.wait() + try: + proc.wait() + finally: + if proc.stdout: + proc.stdout.close() if spinner is not None: if proc.returncode: spinner.finish("error") @@ -745,7 +732,7 @@ def call_subprocess(cmd, show_stdout=True, cwd=None, raise ValueError('Invalid value: on_returncode=%s' % repr(on_returncode)) if not show_stdout: - return remove_tracebacks(''.join(all_output)) + return ''.join(all_output) def read_text_file(filename): @@ -856,14 +843,15 @@ class cached_property(object): return value -def get_installed_version(dist_name): +def get_installed_version(dist_name, working_set=None): """Get the installed version of dist_name avoiding pkg_resources cache""" # Create a requirement that we'll look for inside of setuptools. req = pkg_resources.Requirement.parse(dist_name) - # We want to avoid having this cached, so we need to construct a new - # working set each time. - working_set = pkg_resources.WorkingSet() + if working_set is None: + # We want to avoid having this cached, so we need to construct a new + # working set each time. + working_set = pkg_resources.WorkingSet() # Get the installed distribution from our working set dist = working_set.find(req) @@ -876,3 +864,95 @@ def get_installed_version(dist_name): def consume(iterator): """Consume an iterable at C speed.""" deque(iterator, maxlen=0) + + +# Simulates an enum +def enum(*sequential, **named): + enums = dict(zip(sequential, range(len(sequential))), **named) + reverse = {value: key for key, value in enums.items()} + enums['reverse_mapping'] = reverse + return type('Enum', (), enums) + + +def make_vcs_requirement_url(repo_url, rev, egg_project_name, subdir=None): + """ + Return the URL for a VCS requirement. + + Args: + repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+"). + """ + req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name) + if subdir: + req += '&subdirectory={}'.format(subdir) + + return req + + +def split_auth_from_netloc(netloc): + """ + Parse out and remove the auth information from a netloc. + + Returns: (netloc, (username, password)). + """ + if '@' not in netloc: + return netloc, (None, None) + + # Split from the right because that's how urllib.parse.urlsplit() + # behaves if more than one @ is present (which can be checked using + # the password attribute of urlsplit()'s return value). + auth, netloc = netloc.rsplit('@', 1) + if ':' in auth: + # Split from the left because that's how urllib.parse.urlsplit() + # behaves if more than one : is present (which again can be checked + # using the password attribute of the return value) + user_pass = tuple(auth.split(':', 1)) + else: + user_pass = auth, None + + return netloc, user_pass + + +def remove_auth_from_url(url): + # Return a copy of url with 'username:password@' removed. + # username/pass params are passed to subversion through flags + # and are not recognized in the url. + + # parsed url + purl = urllib_parse.urlsplit(url) + netloc, user_pass = split_auth_from_netloc(purl.netloc) + + # stripped url + url_pieces = ( + purl.scheme, netloc, purl.path, purl.query, purl.fragment + ) + surl = urllib_parse.urlunsplit(url_pieces) + return surl + + +def protect_pip_from_modification_on_windows(modifying_pip): + """Protection of pip.exe from modification on Windows + + On Windows, any operation modifying pip should be run as: + python -m pip ... + """ + pip_names = [ + "pip.exe", + "pip{}.exe".format(sys.version_info[0]), + "pip{}.{}.exe".format(*sys.version_info[:2]) + ] + + # See https://github.com/pypa/pip/issues/1299 for more discussion + should_show_use_python_msg = ( + modifying_pip and + WINDOWS and + os.path.basename(sys.argv[0]) in pip_names + ) + + if should_show_use_python_msg: + new_command = [ + sys.executable, "-m", "pip" + ] + sys.argv[1:] + raise CommandError( + 'To modify pip, please run the following command:\n{}' + .format(" ".join(new_command)) + ) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/utils/models.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/models.py new file mode 100644 index 0000000..d5cb80a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/models.py @@ -0,0 +1,40 @@ +"""Utilities for defining models +""" + +import operator + + +class KeyBasedCompareMixin(object): + """Provides comparision capabilities that is based on a key + """ + + def __init__(self, key, defining_class): + self._compare_key = key + self._defining_class = defining_class + + def __hash__(self): + return hash(self._compare_key) + + def __lt__(self, other): + return self._compare(other, operator.__lt__) + + def __le__(self, other): + return self._compare(other, operator.__le__) + + def __gt__(self, other): + return self._compare(other, operator.__gt__) + + def __ge__(self, other): + return self._compare(other, operator.__ge__) + + def __eq__(self, other): + return self._compare(other, operator.__eq__) + + def __ne__(self, other): + return self._compare(other, operator.__ne__) + + def _compare(self, other, method): + if not isinstance(other, self._defining_class): + return NotImplemented + + return method(self._compare_key, other._compare_key) diff --git a/Shared/lib/python3.4/site-packages/pip/utils/outdated.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/outdated.py similarity index 56% rename from Shared/lib/python3.4/site-packages/pip/utils/outdated.py rename to Shared/lib/python3.4/site-packages/pip/_internal/utils/outdated.py index 2164cc3..5bfbfe1 100644 --- a/Shared/lib/python3.4/site-packages/pip/utils/outdated.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/outdated.py @@ -6,15 +6,13 @@ import logging import os.path import sys -from pip._vendor import lockfile +from pip._vendor import lockfile, pkg_resources from pip._vendor.packaging import version as packaging_version -from pip.compat import total_seconds, WINDOWS -from pip.models import PyPI -from pip.locations import USER_CACHE_DIR, running_under_virtualenv -from pip.utils import ensure_dir, get_installed_version -from pip.utils.filesystem import check_path_owner - +from pip._internal.index import PackageFinder +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.filesystem import check_path_owner +from pip._internal.utils.misc import ensure_dir, get_installed_version SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" @@ -22,43 +20,27 @@ SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" logger = logging.getLogger(__name__) -class VirtualenvSelfCheckState(object): - def __init__(self): - self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json") +class SelfCheckState(object): + def __init__(self, cache_dir): + self.state = {} + self.statefile_path = None - # Load the existing state - try: - with open(self.statefile_path) as statefile: - self.state = json.load(statefile) - except (IOError, ValueError): - self.state = {} + # Try to load the existing state + if cache_dir: + self.statefile_path = os.path.join(cache_dir, "selfcheck.json") + try: + with open(self.statefile_path) as statefile: + self.state = json.load(statefile)[sys.prefix] + except (IOError, ValueError, KeyError): + # Explicitly suppressing exceptions, since we don't want to + # error out if the cache file is invalid. + pass def save(self, pypi_version, current_time): - # Attempt to write out our version check file - with open(self.statefile_path, "w") as statefile: - json.dump( - { - "last_check": current_time.strftime(SELFCHECK_DATE_FMT), - "pypi_version": pypi_version, - }, - statefile, - sort_keys=True, - separators=(",", ":") - ) + # If we do not have a path to cache in, don't bother saving. + if not self.statefile_path: + return - -class GlobalSelfCheckState(object): - def __init__(self): - self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json") - - # Load the existing state - try: - with open(self.statefile_path) as statefile: - self.state = json.load(statefile)[sys.prefix] - except (IOError, ValueError, KeyError): - self.state = {} - - def save(self, pypi_version, current_time): # Check to make sure that we own the directory if not check_path_owner(os.path.dirname(self.statefile_path)): return @@ -85,14 +67,21 @@ class GlobalSelfCheckState(object): separators=(",", ":")) -def load_selfcheck_statefile(): - if running_under_virtualenv(): - return VirtualenvSelfCheckState() - else: - return GlobalSelfCheckState() +def was_installed_by_pip(pkg): + """Checks whether pkg was installed by pip + + This is used not to display the upgrade message when pip is in fact + installed by system package manager, such as dnf on Fedora. + """ + try: + dist = pkg_resources.get_distribution(pkg) + return (dist.has_metadata('INSTALLER') and + 'pip' in dist.get_metadata_lines('INSTALLER')) + except pkg_resources.DistributionNotFound: + return False -def pip_version_check(session): +def pip_version_check(session, options): """Check for an update for pip. Limit the frequency of checks to once per week. State is stored either in @@ -100,14 +89,14 @@ def pip_version_check(session): of the pip script path. """ installed_version = get_installed_version("pip") - if installed_version is None: + if not installed_version: return pip_version = packaging_version.parse(installed_version) pypi_version = None try: - state = load_selfcheck_statefile() + state = SelfCheckState(cache_dir=options.cache_dir) current_time = datetime.datetime.utcnow() # Determine if we need to refresh the state @@ -116,23 +105,26 @@ def pip_version_check(session): state.state["last_check"], SELFCHECK_DATE_FMT ) - if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60: + if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60: pypi_version = state.state["pypi_version"] # Refresh the version if we need to or just see if we need to warn if pypi_version is None: - resp = session.get( - PyPI.pip_json_url, - headers={"Accept": "application/json"}, + # Lets use PackageFinder to see what the latest pip version is + finder = PackageFinder( + find_links=options.find_links, + index_urls=[options.index_url] + options.extra_index_urls, + allow_all_prereleases=False, # Explicitly set to False + trusted_hosts=options.trusted_hosts, + process_dependency_links=options.process_dependency_links, + session=session, + ) + all_candidates = finder.find_all_candidates("pip") + if not all_candidates: + return + pypi_version = str( + max(all_candidates, key=lambda c: c.version).version ) - resp.raise_for_status() - pypi_version = [ - v for v in sorted( - list(resp.json()["releases"]), - key=packaging_version.parse, - ) - if not packaging_version.parse(v).is_prerelease - ][-1] # save that we've performed a check state.save(pypi_version, current_time) @@ -141,7 +133,8 @@ def pip_version_check(session): # Determine if our pypi_version is older if (pip_version < remote_version and - pip_version.base_version != remote_version.base_version): + pip_version.base_version != remote_version.base_version and + was_installed_by_pip('pip')): # Advise "python -m pip" on Windows to avoid issues # with overwriting pip.exe. if WINDOWS: @@ -154,7 +147,6 @@ def pip_version_check(session): "'%s install --upgrade pip' command.", pip_version, pypi_version, pip_cmd ) - except Exception: logger.debug( "There was an error checking the latest version of pip", diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/utils/packaging.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/packaging.py new file mode 100644 index 0000000..c43142f --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/packaging.py @@ -0,0 +1,75 @@ +from __future__ import absolute_import + +import logging +import sys +from email.parser import FeedParser # type: ignore + +from pip._vendor import pkg_resources +from pip._vendor.packaging import specifiers, version + +from pip._internal import exceptions +from pip._internal.utils.misc import display_path + +logger = logging.getLogger(__name__) + + +def check_requires_python(requires_python): + """ + Check if the python version in use match the `requires_python` specifier. + + Returns `True` if the version of python in use matches the requirement. + Returns `False` if the version of python in use does not matches the + requirement. + + Raises an InvalidSpecifier if `requires_python` have an invalid format. + """ + if requires_python is None: + # The package provides no information + return True + requires_python_specifier = specifiers.SpecifierSet(requires_python) + + # We only use major.minor.micro + python_version = version.parse('.'.join(map(str, sys.version_info[:3]))) + return python_version in requires_python_specifier + + +def get_metadata(dist): + if (isinstance(dist, pkg_resources.DistInfoDistribution) and + dist.has_metadata('METADATA')): + metadata = dist.get_metadata('METADATA') + elif dist.has_metadata('PKG-INFO'): + metadata = dist.get_metadata('PKG-INFO') + else: + logger.warning("No metadata found in %s", display_path(dist.location)) + metadata = '' + + feed_parser = FeedParser() + feed_parser.feed(metadata) + return feed_parser.close() + + +def check_dist_requires_python(dist): + pkg_info_dict = get_metadata(dist) + requires_python = pkg_info_dict.get('Requires-Python') + try: + if not check_requires_python(requires_python): + raise exceptions.UnsupportedPythonVersion( + "%s requires Python '%s' but the running Python is %s" % ( + dist.project_name, + requires_python, + '.'.join(map(str, sys.version_info[:3])),) + ) + except specifiers.InvalidSpecifier as e: + logger.warning( + "Package %s has an invalid Requires-Python entry %s - %s", + dist.project_name, requires_python, e, + ) + return + + +def get_installer(dist): + if dist.has_metadata('INSTALLER'): + for line in dist.get_metadata_lines('INSTALLER'): + if line.strip(): + return line.strip() + return '' diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/utils/setuptools_build.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/setuptools_build.py new file mode 100644 index 0000000..03973e9 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/setuptools_build.py @@ -0,0 +1,8 @@ +# Shim to wrap setup.py invocation with setuptools +SETUPTOOLS_SHIM = ( + "import setuptools, tokenize;__file__=%r;" + "f=getattr(tokenize, 'open', open)(__file__);" + "code=f.read().replace('\\r\\n', '\\n');" + "f.close();" + "exec(compile(code, __file__, 'exec'))" +) diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/utils/temp_dir.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/temp_dir.py new file mode 100644 index 0000000..edc506b --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/temp_dir.py @@ -0,0 +1,82 @@ +from __future__ import absolute_import + +import logging +import os.path +import tempfile + +from pip._internal.utils.misc import rmtree + +logger = logging.getLogger(__name__) + + +class TempDirectory(object): + """Helper class that owns and cleans up a temporary directory. + + This class can be used as a context manager or as an OO representation of a + temporary directory. + + Attributes: + path + Location to the created temporary directory or None + delete + Whether the directory should be deleted when exiting + (when used as a contextmanager) + + Methods: + create() + Creates a temporary directory and stores its path in the path + attribute. + cleanup() + Deletes the temporary directory and sets path attribute to None + + When used as a context manager, a temporary directory is created on + entering the context and, if the delete attribute is True, on exiting the + context the created directory is deleted. + """ + + def __init__(self, path=None, delete=None, kind="temp"): + super(TempDirectory, self).__init__() + + if path is None and delete is None: + # If we were not given an explicit directory, and we were not given + # an explicit delete option, then we'll default to deleting. + delete = True + + self.path = path + self.delete = delete + self.kind = kind + + def __repr__(self): + return "<{} {!r}>".format(self.__class__.__name__, self.path) + + def __enter__(self): + self.create() + return self + + def __exit__(self, exc, value, tb): + if self.delete: + self.cleanup() + + def create(self): + """Create a temporary directory and store it's path in self.path + """ + if self.path is not None: + logger.debug( + "Skipped creation of temporary directory: {}".format(self.path) + ) + return + # We realpath here because some systems have their default tmpdir + # symlinked to another directory. This tends to confuse build + # scripts, so we canonicalize the path by traversing potential + # symlinks here. + self.path = os.path.realpath( + tempfile.mkdtemp(prefix="pip-{}-".format(self.kind)) + ) + logger.debug("Created temporary directory: {}".format(self.path)) + + def cleanup(self): + """Remove the temporary directory created and reset state + """ + if self.path is not None and os.path.exists(self.path): + rmtree(self.path) + self.path = None diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/utils/typing.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/typing.py new file mode 100644 index 0000000..e085cdf --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/typing.py @@ -0,0 +1,29 @@ +"""For neatly implementing static typing in pip. + +`mypy` - the static type analysis tool we use - uses the `typing` module, which +provides core functionality fundamental to mypy's functioning. + +Generally, `typing` would be imported at runtime and used in that fashion - +it acts as a no-op at runtime and does not have any run-time overhead by +design. + +As it turns out, `typing` is not vendorable - it uses separate sources for +Python 2/Python 3. Thus, this codebase can not expect it to be present. +To work around this, mypy allows the typing import to be behind a False-y +optional to prevent it from running at runtime and type-comments can be used +to remove the need for the types to be accessible directly during runtime. + +This module provides the False-y guard in a nicely named fashion so that a +curious maintainer can reach here to read this. + +In pip, all static-typing related imports should be guarded as follows: + + from pip._internal.utils.typing import MYPY_CHECK_RUNNING + + if MYPY_CHECK_RUNNING: + from typing import ... # noqa: F401 + +Ref: https://github.com/python/mypy/issues/3216 +""" + +MYPY_CHECK_RUNNING = False diff --git a/Shared/lib/python3.4/site-packages/pip/utils/ui.py b/Shared/lib/python3.4/site-packages/pip/_internal/utils/ui.py similarity index 80% rename from Shared/lib/python3.4/site-packages/pip/utils/ui.py rename to Shared/lib/python3.4/site-packages/pip/_internal/utils/ui.py index bba73e3..6bab904 100644 --- a/Shared/lib/python3.4/site-packages/pip/utils/ui.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/utils/ui.py @@ -1,22 +1,28 @@ -from __future__ import absolute_import -from __future__ import division +from __future__ import absolute_import, division -import itertools -import sys -from signal import signal, SIGINT, default_int_handler -import time import contextlib +import itertools import logging +import sys +import time +from signal import SIGINT, default_int_handler, signal -from pip.compat import WINDOWS -from pip.utils import format_size -from pip.utils.logging import get_indentation from pip._vendor import six -from pip._vendor.progress.bar import Bar, IncrementalBar -from pip._vendor.progress.helpers import (WritelnMixin, - HIDE_CURSOR, SHOW_CURSOR) +from pip._vendor.progress.bar import ( + Bar, ChargingBar, FillingCirclesBar, FillingSquaresBar, IncrementalBar, + ShadyBar, +) +from pip._vendor.progress.helpers import HIDE_CURSOR, SHOW_CURSOR, WritelnMixin from pip._vendor.progress.spinner import Spinner +from pip._internal.utils.compat import WINDOWS +from pip._internal.utils.logging import get_indentation +from pip._internal.utils.misc import format_size +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Any # noqa: F401 + try: from pip._vendor import colorama # Lots of different errors can come from this, including SystemError and @@ -54,7 +60,7 @@ def _select_progress_class(preferred, fallback): return preferred -_BaseBar = _select_progress_class(IncrementalBar, Bar) +_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any class InterruptibleMixin(object): @@ -112,6 +118,20 @@ class InterruptibleMixin(object): self.original_handler(signum, frame) +class SilentBar(Bar): + + def update(self): + pass + + +class BlueEmojiBar(IncrementalBar): + + suffix = "%(percent)d%%" + bar_prefix = " " + bar_suffix = " " + phases = (u"\U0001F539", u"\U0001F537", u"\U0001F535") # type: Any + + class DownloadProgressMixin(object): def __init__(self, *args, **kwargs): @@ -171,13 +191,54 @@ class WindowsMixin(object): self.file.flush = lambda: self.file.wrapped.flush() -class DownloadProgressBar(WindowsMixin, InterruptibleMixin, - DownloadProgressMixin, _BaseBar): +class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, + DownloadProgressMixin): file = sys.stdout message = "%(percent)d%%" suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" +# NOTE: The "type: ignore" comments on the following classes are there to +# work around https://github.com/python/typing/issues/241 + + +class DefaultDownloadProgressBar(BaseDownloadProgressBar, + _BaseBar): # type: ignore + pass + + +class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): # type: ignore + pass + + +class DownloadIncrementalBar(BaseDownloadProgressBar, # type: ignore + IncrementalBar): + pass + + +class DownloadChargingBar(BaseDownloadProgressBar, # type: ignore + ChargingBar): + pass + + +class DownloadShadyBar(BaseDownloadProgressBar, ShadyBar): # type: ignore + pass + + +class DownloadFillingSquaresBar(BaseDownloadProgressBar, # type: ignore + FillingSquaresBar): + pass + + +class DownloadFillingCirclesBar(BaseDownloadProgressBar, # type: ignore + FillingCirclesBar): + pass + + +class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, # type: ignore + BlueEmojiBar): + pass + class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin, DownloadProgressMixin, WritelnMixin, Spinner): @@ -205,6 +266,22 @@ class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin, self.writeln(line) +BAR_TYPES = { + "off": (DownloadSilentBar, DownloadSilentBar), + "on": (DefaultDownloadProgressBar, DownloadProgressSpinner), + "ascii": (DownloadIncrementalBar, DownloadProgressSpinner), + "pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner), + "emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner) +} + + +def DownloadProgressProvider(progress_bar, max=None): + if max is None or max == 0: + return BAR_TYPES[progress_bar][1]().iter + else: + return BAR_TYPES[progress_bar][0](max=max).iter + + ################################################################ # Generic "something is happening" spinners # diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/vcs/__init__.py b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/__init__.py new file mode 100644 index 0000000..794b35d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/__init__.py @@ -0,0 +1,509 @@ +"""Handles all VCS (version control) support""" +from __future__ import absolute_import + +import errno +import logging +import os +import shutil +import sys + +from pip._vendor.six.moves.urllib import parse as urllib_parse + +from pip._internal.exceptions import BadCommand +from pip._internal.utils.misc import ( + display_path, backup_dir, call_subprocess, rmtree, ask_path_exists, +) +from pip._internal.utils.typing import MYPY_CHECK_RUNNING + +if MYPY_CHECK_RUNNING: + from typing import Dict, Optional, Tuple # noqa: F401 + from pip._internal.cli.base_command import Command # noqa: F401 + +__all__ = ['vcs', 'get_src_requirement'] + + +logger = logging.getLogger(__name__) + + +class RevOptions(object): + + """ + Encapsulates a VCS-specific revision to install, along with any VCS + install options. + + Instances of this class should be treated as if immutable. + """ + + def __init__(self, vcs, rev=None, extra_args=None): + """ + Args: + vcs: a VersionControl object. + rev: the name of the revision to install. + extra_args: a list of extra options. + """ + if extra_args is None: + extra_args = [] + + self.extra_args = extra_args + self.rev = rev + self.vcs = vcs + + def __repr__(self): + return ''.format(self.vcs.name, self.rev) + + @property + def arg_rev(self): + if self.rev is None: + return self.vcs.default_arg_rev + + return self.rev + + def to_args(self): + """ + Return the VCS-specific command arguments. + """ + args = [] + rev = self.arg_rev + if rev is not None: + args += self.vcs.get_base_rev_args(rev) + args += self.extra_args + + return args + + def to_display(self): + if not self.rev: + return '' + + return ' (to revision {})'.format(self.rev) + + def make_new(self, rev): + """ + Make a copy of the current instance, but with a new rev. + + Args: + rev: the name of the revision for the new object. + """ + return self.vcs.make_rev_options(rev, extra_args=self.extra_args) + + +class VcsSupport(object): + _registry = {} # type: Dict[str, Command] + schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] + + def __init__(self): + # Register more schemes with urlparse for various version control + # systems + urllib_parse.uses_netloc.extend(self.schemes) + # Python >= 2.7.4, 3.3 doesn't have uses_fragment + if getattr(urllib_parse, 'uses_fragment', None): + urllib_parse.uses_fragment.extend(self.schemes) + super(VcsSupport, self).__init__() + + def __iter__(self): + return self._registry.__iter__() + + @property + def backends(self): + return list(self._registry.values()) + + @property + def dirnames(self): + return [backend.dirname for backend in self.backends] + + @property + def all_schemes(self): + schemes = [] + for backend in self.backends: + schemes.extend(backend.schemes) + return schemes + + def register(self, cls): + if not hasattr(cls, 'name'): + logger.warning('Cannot register VCS %s', cls.__name__) + return + if cls.name not in self._registry: + self._registry[cls.name] = cls + logger.debug('Registered VCS backend: %s', cls.name) + + def unregister(self, cls=None, name=None): + if name in self._registry: + del self._registry[name] + elif cls in self._registry.values(): + del self._registry[cls.name] + else: + logger.warning('Cannot unregister because no class or name given') + + def get_backend_name(self, location): + """ + Return the name of the version control backend if found at given + location, e.g. vcs.get_backend_name('/path/to/vcs/checkout') + """ + for vc_type in self._registry.values(): + if vc_type.controls_location(location): + logger.debug('Determine that %s uses VCS: %s', + location, vc_type.name) + return vc_type.name + return None + + def get_backend(self, name): + name = name.lower() + if name in self._registry: + return self._registry[name] + + def get_backend_from_location(self, location): + vc_type = self.get_backend_name(location) + if vc_type: + return self.get_backend(vc_type) + return None + + +vcs = VcsSupport() + + +class VersionControl(object): + name = '' + dirname = '' + # List of supported schemes for this Version Control + schemes = () # type: Tuple[str, ...] + # Iterable of environment variable names to pass to call_subprocess(). + unset_environ = () # type: Tuple[str, ...] + default_arg_rev = None # type: Optional[str] + + def __init__(self, url=None, *args, **kwargs): + self.url = url + super(VersionControl, self).__init__(*args, **kwargs) + + def get_base_rev_args(self, rev): + """ + Return the base revision arguments for a vcs command. + + Args: + rev: the name of a revision to install. Cannot be None. + """ + raise NotImplementedError + + def make_rev_options(self, rev=None, extra_args=None): + """ + Return a RevOptions object. + + Args: + rev: the name of a revision to install. + extra_args: a list of extra options. + """ + return RevOptions(self, rev, extra_args=extra_args) + + def _is_local_repository(self, repo): + """ + posix absolute paths start with os.path.sep, + win32 ones start with drive (like c:\\folder) + """ + drive, tail = os.path.splitdrive(repo) + return repo.startswith(os.path.sep) or drive + + def export(self, location): + """ + Export the repository at the url to the destination location + i.e. only download the files, without vcs informations + """ + raise NotImplementedError + + def get_netloc_and_auth(self, netloc, scheme): + """ + Parse the repository URL's netloc, and return the new netloc to use + along with auth information. + + Args: + netloc: the original repository URL netloc. + scheme: the repository URL's scheme without the vcs prefix. + + This is mainly for the Subversion class to override, so that auth + information can be provided via the --username and --password options + instead of through the URL. For other subclasses like Git without + such an option, auth information must stay in the URL. + + Returns: (netloc, (username, password)). + """ + return netloc, (None, None) + + def get_url_rev_and_auth(self, url): + """ + Parse the repository URL to use, and return the URL, revision, + and auth info to use. + + Returns: (url, rev, (username, password)). + """ + scheme, netloc, path, query, frag = urllib_parse.urlsplit(url) + if '+' not in scheme: + raise ValueError( + "Sorry, {!r} is a malformed VCS url. " + "The format is +://, " + "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url) + ) + # Remove the vcs prefix. + scheme = scheme.split('+', 1)[1] + netloc, user_pass = self.get_netloc_and_auth(netloc, scheme) + rev = None + if '@' in path: + path, rev = path.rsplit('@', 1) + url = urllib_parse.urlunsplit((scheme, netloc, path, query, '')) + return url, rev, user_pass + + def make_rev_args(self, username, password): + """ + Return the RevOptions "extra arguments" to use in obtain(). + """ + return [] + + def get_url_rev_options(self, url): + """ + Return the URL and RevOptions object to use in obtain() and in + some cases export(), as a tuple (url, rev_options). + """ + url, rev, user_pass = self.get_url_rev_and_auth(url) + username, password = user_pass + extra_args = self.make_rev_args(username, password) + rev_options = self.make_rev_options(rev, extra_args=extra_args) + + return url, rev_options + + def normalize_url(self, url): + """ + Normalize a URL for comparison by unquoting it and removing any + trailing slash. + """ + return urllib_parse.unquote(url).rstrip('/') + + def compare_urls(self, url1, url2): + """ + Compare two repo URLs for identity, ignoring incidental differences. + """ + return (self.normalize_url(url1) == self.normalize_url(url2)) + + def fetch_new(self, dest, url, rev_options): + """ + Fetch a revision from a repository, in the case that this is the + first fetch from the repository. + + Args: + dest: the directory to fetch the repository to. + rev_options: a RevOptions object. + """ + raise NotImplementedError + + def switch(self, dest, url, rev_options): + """ + Switch the repo at ``dest`` to point to ``URL``. + + Args: + rev_options: a RevOptions object. + """ + raise NotImplementedError + + def update(self, dest, url, rev_options): + """ + Update an already-existing repo to the given ``rev_options``. + + Args: + rev_options: a RevOptions object. + """ + raise NotImplementedError + + def is_commit_id_equal(self, dest, name): + """ + Return whether the id of the current commit equals the given name. + + Args: + dest: the repository directory. + name: a string name. + """ + raise NotImplementedError + + def obtain(self, dest): + """ + Install or update in editable mode the package represented by this + VersionControl object. + + Args: + dest: the repository directory in which to install or update. + """ + url, rev_options = self.get_url_rev_options(self.url) + + if not os.path.exists(dest): + self.fetch_new(dest, url, rev_options) + return + + rev_display = rev_options.to_display() + if self.is_repository_directory(dest): + existing_url = self.get_url(dest) + if self.compare_urls(existing_url, url): + logger.debug( + '%s in %s exists, and has correct URL (%s)', + self.repo_name.title(), + display_path(dest), + url, + ) + if not self.is_commit_id_equal(dest, rev_options.rev): + logger.info( + 'Updating %s %s%s', + display_path(dest), + self.repo_name, + rev_display, + ) + self.update(dest, url, rev_options) + else: + logger.info('Skipping because already up-to-date.') + return + + logger.warning( + '%s %s in %s exists with URL %s', + self.name, + self.repo_name, + display_path(dest), + existing_url, + ) + prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', + ('s', 'i', 'w', 'b')) + else: + logger.warning( + 'Directory %s already exists, and is not a %s %s.', + dest, + self.name, + self.repo_name, + ) + prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b')) + + logger.warning( + 'The plan is to install the %s repository %s', + self.name, + url, + ) + response = ask_path_exists('What to do? %s' % prompt[0], prompt[1]) + + if response == 'a': + sys.exit(-1) + + if response == 'w': + logger.warning('Deleting %s', display_path(dest)) + rmtree(dest) + self.fetch_new(dest, url, rev_options) + return + + if response == 'b': + dest_dir = backup_dir(dest) + logger.warning( + 'Backing up %s to %s', display_path(dest), dest_dir, + ) + shutil.move(dest, dest_dir) + self.fetch_new(dest, url, rev_options) + return + + # Do nothing if the response is "i". + if response == 's': + logger.info( + 'Switching %s %s to %s%s', + self.repo_name, + display_path(dest), + url, + rev_display, + ) + self.switch(dest, url, rev_options) + + def unpack(self, location): + """ + Clean up current location and download the url repository + (and vcs infos) into location + """ + if os.path.exists(location): + rmtree(location) + self.obtain(location) + + def get_src_requirement(self, dist, location): + """ + Return a string representing the requirement needed to + redownload the files currently present in location, something + like: + {repository_url}@{revision}#egg={project_name}-{version_identifier} + """ + raise NotImplementedError + + def get_url(self, location): + """ + Return the url used at location + """ + raise NotImplementedError + + def get_revision(self, location): + """ + Return the current commit id of the files at the given location. + """ + raise NotImplementedError + + def run_command(self, cmd, show_stdout=True, cwd=None, + on_returncode='raise', + command_desc=None, + extra_environ=None, spinner=None): + """ + Run a VCS subcommand + This is simply a wrapper around call_subprocess that adds the VCS + command name, and checks that the VCS is available + """ + cmd = [self.name] + cmd + try: + return call_subprocess(cmd, show_stdout, cwd, + on_returncode, + command_desc, extra_environ, + unset_environ=self.unset_environ, + spinner=spinner) + except OSError as e: + # errno.ENOENT = no such file or directory + # In other words, the VCS executable isn't available + if e.errno == errno.ENOENT: + raise BadCommand( + 'Cannot find command %r - do you have ' + '%r installed and in your ' + 'PATH?' % (self.name, self.name)) + else: + raise # re-raise exception if a different error occurred + + @classmethod + def is_repository_directory(cls, path): + """ + Return whether a directory path is a repository directory. + """ + logger.debug('Checking in %s for %s (%s)...', + path, cls.dirname, cls.name) + return os.path.exists(os.path.join(path, cls.dirname)) + + @classmethod + def controls_location(cls, location): + """ + Check if a location is controlled by the vcs. + It is meant to be overridden to implement smarter detection + mechanisms for specific vcs. + + This can do more than is_repository_directory() alone. For example, + the Git override checks that Git is actually available. + """ + return cls.is_repository_directory(location) + + +def get_src_requirement(dist, location): + version_control = vcs.get_backend_from_location(location) + if version_control: + try: + return version_control().get_src_requirement(dist, + location) + except BadCommand: + logger.warning( + 'cannot determine version of editable source in %s ' + '(%s command not found in path)', + location, + version_control.name, + ) + return dist.as_requirement() + logger.warning( + 'cannot determine version of editable source in %s (is not SVN ' + 'checkout, Git clone, Mercurial clone or Bazaar branch)', + location, + ) + return dist.as_requirement() diff --git a/Shared/lib/python3.4/site-packages/pip/vcs/bazaar.py b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/bazaar.py similarity index 55% rename from Shared/lib/python3.4/site-packages/pip/vcs/bazaar.py rename to Shared/lib/python3.4/site-packages/pip/_internal/vcs/bazaar.py index 0f09584..3cc66c9 100644 --- a/Shared/lib/python3.4/site-packages/pip/vcs/bazaar.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/bazaar.py @@ -2,18 +2,15 @@ from __future__ import absolute_import import logging import os -import tempfile -# TODO: Get this into six.moves.urllib.parse -try: - from urllib import parse as urllib_parse -except ImportError: - import urlparse as urllib_parse - -from pip.utils import rmtree, display_path -from pip.vcs import vcs, VersionControl -from pip.download import path_to_url +from pip._vendor.six.moves.urllib import parse as urllib_parse +from pip._internal.download import path_to_url +from pip._internal.utils.misc import ( + display_path, make_vcs_requirement_url, rmtree, +) +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.vcs import VersionControl, vcs logger = logging.getLogger(__name__) @@ -29,56 +26,54 @@ class Bazaar(VersionControl): def __init__(self, url=None, *args, **kwargs): super(Bazaar, self).__init__(url, *args, **kwargs) - # Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical + # This is only needed for python <2.7.5 # Register lp but do not expose as a scheme to support bzr+lp. if getattr(urllib_parse, 'uses_fragment', None): urllib_parse.uses_fragment.extend(['lp']) - urllib_parse.non_hierarchical.extend(['lp']) + + def get_base_rev_args(self, rev): + return ['-r', rev] def export(self, location): """ Export the Bazaar repository at the url to the destination location """ - temp_dir = tempfile.mkdtemp('-export', 'pip-') - self.unpack(temp_dir) + # Remove the location to make sure Bazaar can export it correctly if os.path.exists(location): - # Remove the location to make sure Bazaar can export it correctly rmtree(location) - try: - self.run_command(['export', location], cwd=temp_dir, - show_stdout=False) - finally: - rmtree(temp_dir) + + with TempDirectory(kind="export") as temp_dir: + self.unpack(temp_dir.path) + + self.run_command( + ['export', location], + cwd=temp_dir.path, show_stdout=False, + ) + + def fetch_new(self, dest, url, rev_options): + rev_display = rev_options.to_display() + logger.info( + 'Checking out %s%s to %s', + url, + rev_display, + display_path(dest), + ) + cmd_args = ['branch', '-q'] + rev_options.to_args() + [url, dest] + self.run_command(cmd_args) def switch(self, dest, url, rev_options): self.run_command(['switch', url], cwd=dest) - def update(self, dest, rev_options): - self.run_command(['pull', '-q'] + rev_options, cwd=dest) + def update(self, dest, url, rev_options): + cmd_args = ['pull', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) - def obtain(self, dest): - url, rev = self.get_url_rev() - if rev: - rev_options = ['-r', rev] - rev_display = ' (to revision %s)' % rev - else: - rev_options = [] - rev_display = '' - if self.check_destination(dest, url, rev_options, rev_display): - logger.info( - 'Checking out %s%s to %s', - url, - rev_display, - display_path(dest), - ) - self.run_command(['branch', '-q'] + rev_options + [url, dest]) - - def get_url_rev(self): + def get_url_rev_and_auth(self, url): # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it - url, rev = super(Bazaar, self).get_url_rev() + url, rev, user_pass = super(Bazaar, self).get_url_rev_and_auth(url) if url.startswith('ssh://'): url = 'bzr+' + url - return url, rev + return url, rev, user_pass def get_url(self, location): urls = self.run_command(['info'], show_stdout=False, cwd=location) @@ -95,7 +90,8 @@ class Bazaar(VersionControl): def get_revision(self, location): revision = self.run_command( - ['revno'], show_stdout=False, cwd=location) + ['revno'], show_stdout=False, cwd=location, + ) return revision.splitlines()[-1] def get_src_requirement(self, dist, location): @@ -104,11 +100,11 @@ class Bazaar(VersionControl): return None if not repo.lower().startswith('bzr:'): repo = 'bzr+' + repo - egg_project_name = dist.egg_name().split('-', 1)[0] current_rev = self.get_revision(location) - return '%s@%s#egg=%s' % (repo, current_rev, egg_project_name) + egg_project_name = dist.egg_name().split('-', 1)[0] + return make_vcs_requirement_url(repo, current_rev, egg_project_name) - def check_version(self, dest, rev_options): + def is_commit_id_equal(self, dest, name): """Always assume the versions don't match""" return False diff --git a/Shared/lib/python3.4/site-packages/pip/_internal/vcs/git.py b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/git.py new file mode 100644 index 0000000..9778539 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/git.py @@ -0,0 +1,346 @@ +from __future__ import absolute_import + +import logging +import os.path +import re + +from pip._vendor.packaging.version import parse as parse_version +from pip._vendor.six.moves.urllib import parse as urllib_parse +from pip._vendor.six.moves.urllib import request as urllib_request + +from pip._internal.exceptions import BadCommand +from pip._internal.utils.compat import samefile +from pip._internal.utils.misc import display_path, make_vcs_requirement_url +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.vcs import VersionControl, vcs + +urlsplit = urllib_parse.urlsplit +urlunsplit = urllib_parse.urlunsplit + + +logger = logging.getLogger(__name__) + + +HASH_REGEX = re.compile('[a-fA-F0-9]{40}') + + +def looks_like_hash(sha): + return bool(HASH_REGEX.match(sha)) + + +class Git(VersionControl): + name = 'git' + dirname = '.git' + repo_name = 'clone' + schemes = ( + 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', + ) + # Prevent the user's environment variables from interfering with pip: + # https://github.com/pypa/pip/issues/1130 + unset_environ = ('GIT_DIR', 'GIT_WORK_TREE') + default_arg_rev = 'HEAD' + + def __init__(self, url=None, *args, **kwargs): + + # Works around an apparent Git bug + # (see https://article.gmane.org/gmane.comp.version-control.git/146500) + if url: + scheme, netloc, path, query, fragment = urlsplit(url) + if scheme.endswith('file'): + initial_slashes = path[:-len(path.lstrip('/'))] + newpath = ( + initial_slashes + + urllib_request.url2pathname(path) + .replace('\\', '/').lstrip('/') + ) + url = urlunsplit((scheme, netloc, newpath, query, fragment)) + after_plus = scheme.find('+') + 1 + url = scheme[:after_plus] + urlunsplit( + (scheme[after_plus:], netloc, newpath, query, fragment), + ) + + super(Git, self).__init__(url, *args, **kwargs) + + def get_base_rev_args(self, rev): + return [rev] + + def get_git_version(self): + VERSION_PFX = 'git version ' + version = self.run_command(['version'], show_stdout=False) + if version.startswith(VERSION_PFX): + version = version[len(VERSION_PFX):].split()[0] + else: + version = '' + # get first 3 positions of the git version becasue + # on windows it is x.y.z.windows.t, and this parses as + # LegacyVersion which always smaller than a Version. + version = '.'.join(version.split('.')[:3]) + return parse_version(version) + + def get_branch(self, location): + """ + Return the current branch, or None if HEAD isn't at a branch + (e.g. detached HEAD). + """ + args = ['rev-parse', '--abbrev-ref', 'HEAD'] + output = self.run_command(args, show_stdout=False, cwd=location) + branch = output.strip() + + if branch == 'HEAD': + return None + + return branch + + def export(self, location): + """Export the Git repository at the url to the destination location""" + if not location.endswith('/'): + location = location + '/' + + with TempDirectory(kind="export") as temp_dir: + self.unpack(temp_dir.path) + self.run_command( + ['checkout-index', '-a', '-f', '--prefix', location], + show_stdout=False, cwd=temp_dir.path + ) + + def get_revision_sha(self, dest, rev): + """ + Return (sha_or_none, is_branch), where sha_or_none is a commit hash + if the revision names a remote branch or tag, otherwise None. + + Args: + dest: the repository directory. + rev: the revision name. + """ + # Pass rev to pre-filter the list. + output = self.run_command(['show-ref', rev], cwd=dest, + show_stdout=False, on_returncode='ignore') + refs = {} + for line in output.strip().splitlines(): + try: + sha, ref = line.split() + except ValueError: + # Include the offending line to simplify troubleshooting if + # this error ever occurs. + raise ValueError('unexpected show-ref line: {!r}'.format(line)) + + refs[ref] = sha + + branch_ref = 'refs/remotes/origin/{}'.format(rev) + tag_ref = 'refs/tags/{}'.format(rev) + + sha = refs.get(branch_ref) + if sha is not None: + return (sha, True) + + sha = refs.get(tag_ref) + + return (sha, False) + + def resolve_revision(self, dest, url, rev_options): + """ + Resolve a revision to a new RevOptions object with the SHA1 of the + branch, tag, or ref if found. + + Args: + rev_options: a RevOptions object. + """ + rev = rev_options.arg_rev + sha, is_branch = self.get_revision_sha(dest, rev) + + if sha is not None: + rev_options = rev_options.make_new(sha) + rev_options.branch_name = rev if is_branch else None + + return rev_options + + # Do not show a warning for the common case of something that has + # the form of a Git commit hash. + if not looks_like_hash(rev): + logger.warning( + "Did not find branch or tag '%s', assuming revision or ref.", + rev, + ) + + if not rev.startswith('refs/'): + return rev_options + + # If it looks like a ref, we have to fetch it explicitly. + self.run_command( + ['fetch', '-q', url] + rev_options.to_args(), + cwd=dest, + ) + # Change the revision to the SHA of the ref we fetched + sha = self.get_revision(dest, rev='FETCH_HEAD') + rev_options = rev_options.make_new(sha) + + return rev_options + + def is_commit_id_equal(self, dest, name): + """ + Return whether the current commit hash equals the given name. + + Args: + dest: the repository directory. + name: a string name. + """ + if not name: + # Then avoid an unnecessary subprocess call. + return False + + return self.get_revision(dest) == name + + def fetch_new(self, dest, url, rev_options): + rev_display = rev_options.to_display() + logger.info( + 'Cloning %s%s to %s', url, rev_display, display_path(dest), + ) + self.run_command(['clone', '-q', url, dest]) + + if rev_options.rev: + # Then a specific revision was requested. + rev_options = self.resolve_revision(dest, url, rev_options) + branch_name = getattr(rev_options, 'branch_name', None) + if branch_name is None: + # Only do a checkout if the current commit id doesn't match + # the requested revision. + if not self.is_commit_id_equal(dest, rev_options.rev): + cmd_args = ['checkout', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + elif self.get_branch(dest) != branch_name: + # Then a specific branch was requested, and that branch + # is not yet checked out. + track_branch = 'origin/{}'.format(branch_name) + cmd_args = [ + 'checkout', '-b', branch_name, '--track', track_branch, + ] + self.run_command(cmd_args, cwd=dest) + + #: repo may contain submodules + self.update_submodules(dest) + + def switch(self, dest, url, rev_options): + self.run_command(['config', 'remote.origin.url', url], cwd=dest) + cmd_args = ['checkout', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + + self.update_submodules(dest) + + def update(self, dest, url, rev_options): + # First fetch changes from the default remote + if self.get_git_version() >= parse_version('1.9.0'): + # fetch tags in addition to everything else + self.run_command(['fetch', '-q', '--tags'], cwd=dest) + else: + self.run_command(['fetch', '-q'], cwd=dest) + # Then reset to wanted revision (maybe even origin/master) + rev_options = self.resolve_revision(dest, url, rev_options) + cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + #: update submodules + self.update_submodules(dest) + + def get_url(self, location): + """Return URL of the first remote encountered.""" + remotes = self.run_command( + ['config', '--get-regexp', r'remote\..*\.url'], + show_stdout=False, cwd=location, + ) + remotes = remotes.splitlines() + found_remote = remotes[0] + for remote in remotes: + if remote.startswith('remote.origin.url '): + found_remote = remote + break + url = found_remote.split(' ')[1] + return url.strip() + + def get_revision(self, location, rev=None): + if rev is None: + rev = 'HEAD' + current_rev = self.run_command( + ['rev-parse', rev], show_stdout=False, cwd=location, + ) + return current_rev.strip() + + def _get_subdirectory(self, location): + """Return the relative path of setup.py to the git repo root.""" + # find the repo root + git_dir = self.run_command(['rev-parse', '--git-dir'], + show_stdout=False, cwd=location).strip() + if not os.path.isabs(git_dir): + git_dir = os.path.join(location, git_dir) + root_dir = os.path.join(git_dir, '..') + # find setup.py + orig_location = location + while not os.path.exists(os.path.join(location, 'setup.py')): + last_location = location + location = os.path.dirname(location) + if location == last_location: + # We've traversed up to the root of the filesystem without + # finding setup.py + logger.warning( + "Could not find setup.py for directory %s (tried all " + "parent directories)", + orig_location, + ) + return None + # relative path of setup.py to repo root + if samefile(root_dir, location): + return None + return os.path.relpath(location, root_dir) + + def get_src_requirement(self, dist, location): + repo = self.get_url(location) + if not repo.lower().startswith('git:'): + repo = 'git+' + repo + current_rev = self.get_revision(location) + egg_project_name = dist.egg_name().split('-', 1)[0] + subdir = self._get_subdirectory(location) + req = make_vcs_requirement_url(repo, current_rev, egg_project_name, + subdir=subdir) + + return req + + def get_url_rev_and_auth(self, url): + """ + Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. + That's required because although they use SSH they sometimes don't + work with a ssh:// scheme (e.g. GitHub). But we need a scheme for + parsing. Hence we remove it again afterwards and return it as a stub. + """ + if '://' not in url: + assert 'file:' not in url + url = url.replace('git+', 'git+ssh://') + url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) + url = url.replace('ssh://', '') + else: + url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) + + return url, rev, user_pass + + def update_submodules(self, location): + if not os.path.exists(os.path.join(location, '.gitmodules')): + return + self.run_command( + ['submodule', 'update', '--init', '--recursive', '-q'], + cwd=location, + ) + + @classmethod + def controls_location(cls, location): + if super(Git, cls).controls_location(location): + return True + try: + r = cls().run_command(['rev-parse'], + cwd=location, + show_stdout=False, + on_returncode='ignore') + return not r + except BadCommand: + logger.debug("could not determine if %s is under git control " + "because git is not available", location) + return False + + +vcs.register(Git) diff --git a/Shared/lib/python3.4/site-packages/pip/vcs/mercurial.py b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/mercurial.py similarity index 64% rename from Shared/lib/python3.4/site-packages/pip/vcs/mercurial.py rename to Shared/lib/python3.4/site-packages/pip/_internal/vcs/mercurial.py index 1aa83b9..17cfb67 100644 --- a/Shared/lib/python3.4/site-packages/pip/vcs/mercurial.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/mercurial.py @@ -2,13 +2,13 @@ from __future__ import absolute_import import logging import os -import tempfile -from pip.utils import display_path, rmtree -from pip.vcs import vcs, VersionControl -from pip.download import path_to_url from pip._vendor.six.moves import configparser +from pip._internal.download import path_to_url +from pip._internal.utils.misc import display_path, make_vcs_requirement_url +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.vcs import VersionControl, vcs logger = logging.getLogger(__name__) @@ -19,15 +19,29 @@ class Mercurial(VersionControl): repo_name = 'clone' schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http') + def get_base_rev_args(self, rev): + return [rev] + def export(self, location): """Export the Hg repository at the url to the destination location""" - temp_dir = tempfile.mkdtemp('-export', 'pip-') - self.unpack(temp_dir) - try: + with TempDirectory(kind="export") as temp_dir: + self.unpack(temp_dir.path) + self.run_command( - ['archive', location], show_stdout=False, cwd=temp_dir) - finally: - rmtree(temp_dir) + ['archive', location], show_stdout=False, cwd=temp_dir.path + ) + + def fetch_new(self, dest, url, rev_options): + rev_display = rev_options.to_display() + logger.info( + 'Cloning hg %s%s to %s', + url, + rev_display, + display_path(dest), + ) + self.run_command(['clone', '--noupdate', '-q', url, dest]) + cmd_args = ['update', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) def switch(self, dest, url, rev_options): repo_config = os.path.join(dest, self.dirname, 'hgrc') @@ -42,29 +56,13 @@ class Mercurial(VersionControl): 'Could not switch Mercurial repository to %s: %s', url, exc, ) else: - self.run_command(['update', '-q'] + rev_options, cwd=dest) + cmd_args = ['update', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) - def update(self, dest, rev_options): + def update(self, dest, url, rev_options): self.run_command(['pull', '-q'], cwd=dest) - self.run_command(['update', '-q'] + rev_options, cwd=dest) - - def obtain(self, dest): - url, rev = self.get_url_rev() - if rev: - rev_options = [rev] - rev_display = ' (to revision %s)' % rev - else: - rev_options = [] - rev_display = '' - if self.check_destination(dest, url, rev_options, rev_display): - logger.info( - 'Cloning hg %s%s to %s', - url, - rev_display, - display_path(dest), - ) - self.run_command(['clone', '--noupdate', '-q', url, dest]) - self.run_command(['update', '-q'] + rev_options, cwd=dest) + cmd_args = ['update', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) def get_url(self, location): url = self.run_command( @@ -90,14 +88,14 @@ class Mercurial(VersionControl): repo = self.get_url(location) if not repo.lower().startswith('hg:'): repo = 'hg+' + repo - egg_project_name = dist.egg_name().split('-', 1)[0] - if not repo: - return None current_rev_hash = self.get_revision_hash(location) - return '%s@%s#egg=%s' % (repo, current_rev_hash, egg_project_name) + egg_project_name = dist.egg_name().split('-', 1)[0] + return make_vcs_requirement_url(repo, current_rev_hash, + egg_project_name) - def check_version(self, dest, rev_options): + def is_commit_id_equal(self, dest, name): """Always assume the versions don't match""" return False + vcs.register(Mercurial) diff --git a/Shared/lib/python3.4/site-packages/pip/vcs/subversion.py b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/subversion.py similarity index 60% rename from Shared/lib/python3.4/site-packages/pip/vcs/subversion.py rename to Shared/lib/python3.4/site-packages/pip/_internal/vcs/subversion.py index aa78fa6..6f7cb5d 100644 --- a/Shared/lib/python3.4/site-packages/pip/vcs/subversion.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/vcs/subversion.py @@ -4,17 +4,15 @@ import logging import os import re -from pip._vendor.six.moves.urllib import parse as urllib_parse - -from pip.index import Link -from pip.utils import rmtree, display_path -from pip.utils.logging import indent_log -from pip.vcs import vcs, VersionControl +from pip._internal.models.link import Link +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + display_path, make_vcs_requirement_url, rmtree, split_auth_from_netloc, +) +from pip._internal.vcs import VersionControl, vcs _svn_xml_url_re = re.compile('url="([^"]+)"') -_svn_rev_re = re.compile('committed-rev="(\d+)"') -_svn_url_re = re.compile(r'URL: (.+)') -_svn_revision_re = re.compile(r'Revision: (.+)') +_svn_rev_re = re.compile(r'committed-rev="(\d+)"') _svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') _svn_info_xml_url_re = re.compile(r'(.*)') @@ -28,69 +26,40 @@ class Subversion(VersionControl): repo_name = 'checkout' schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn') - def get_info(self, location): - """Returns (url, revision), where both are strings""" - assert not location.rstrip('/').endswith(self.dirname), \ - 'Bad directory: %s' % location - output = self.run_command( - ['info', location], - show_stdout=False, - extra_environ={'LANG': 'C'}, - ) - match = _svn_url_re.search(output) - if not match: - logger.warning( - 'Cannot determine URL of svn checkout %s', - display_path(location), - ) - logger.debug('Output that cannot be parsed: \n%s', output) - return None, None - url = match.group(1).strip() - match = _svn_revision_re.search(output) - if not match: - logger.warning( - 'Cannot determine revision of svn checkout %s', - display_path(location), - ) - logger.debug('Output that cannot be parsed: \n%s', output) - return url, None - return url, match.group(1) + def get_base_rev_args(self, rev): + return ['-r', rev] def export(self, location): """Export the svn repository at the url to the destination location""" - url, rev = self.get_url_rev() - rev_options = get_rev_options(url, rev) + url, rev_options = self.get_url_rev_options(self.url) + logger.info('Exporting svn repository %s to %s', url, location) with indent_log(): if os.path.exists(location): # Subversion doesn't like to check out over an existing # directory --force fixes this, but was only added in svn 1.5 rmtree(location) - self.run_command( - ['export'] + rev_options + [url, location], - show_stdout=False) + cmd_args = ['export'] + rev_options.to_args() + [url, location] + self.run_command(cmd_args, show_stdout=False) + + def fetch_new(self, dest, url, rev_options): + rev_display = rev_options.to_display() + logger.info( + 'Checking out %s%s to %s', + url, + rev_display, + display_path(dest), + ) + cmd_args = ['checkout', '-q'] + rev_options.to_args() + [url, dest] + self.run_command(cmd_args) def switch(self, dest, url, rev_options): - self.run_command(['switch'] + rev_options + [url, dest]) + cmd_args = ['switch'] + rev_options.to_args() + [url, dest] + self.run_command(cmd_args) - def update(self, dest, rev_options): - self.run_command(['update'] + rev_options + [dest]) - - def obtain(self, dest): - url, rev = self.get_url_rev() - rev_options = get_rev_options(url, rev) - if rev: - rev_display = ' (to revision %s)' % rev - else: - rev_display = '' - if self.check_destination(dest, url, rev_options, rev_display): - logger.info( - 'Checking out %s%s to %s', - url, - rev_display, - display_path(dest), - ) - self.run_command(['checkout', '-q'] + rev_options + [url, dest]) + def update(self, dest, url, rev_options): + cmd_args = ['update'] + rev_options.to_args() + [dest] + self.run_command(cmd_args) def get_location(self, dist, dependency_links): for url in dependency_links: @@ -126,19 +95,41 @@ class Subversion(VersionControl): dirurl, localrev = self._get_svn_url_rev(base) if base == location: - base_url = dirurl + '/' # save the root url - elif not dirurl or not dirurl.startswith(base_url): + base = dirurl + '/' # save the root url + elif not dirurl or not dirurl.startswith(base): dirs[:] = [] continue # not part of the same svn tree, skip it revision = max(revision, localrev) return revision - def get_url_rev(self): + def get_netloc_and_auth(self, netloc, scheme): + """ + This override allows the auth information to be passed to svn via the + --username and --password options instead of via the URL. + """ + if scheme == 'ssh': + # The --username and --password options can't be used for + # svn+ssh URLs, so keep the auth information in the URL. + return super(Subversion, self).get_netloc_and_auth( + netloc, scheme) + + return split_auth_from_netloc(netloc) + + def get_url_rev_and_auth(self, url): # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it - url, rev = super(Subversion, self).get_url_rev() + url, rev, user_pass = super(Subversion, self).get_url_rev_and_auth(url) if url.startswith('ssh://'): url = 'svn+' + url - return url, rev + return url, rev, user_pass + + def make_rev_args(self, username, password): + extra_args = [] + if username: + extra_args += ['--username', username] + if password: + extra_args += ['--password', password] + + return extra_args def get_url(self, location): # In cases where the source is in a subdirectory, not alongside @@ -161,7 +152,7 @@ class Subversion(VersionControl): return self._get_svn_url_rev(location)[0] def _get_svn_url_rev(self, location): - from pip.exceptions import InstallationError + from pip._internal.exceptions import InstallationError entries_path = os.path.join(location, self.dirname, 'entries') if os.path.exists(entries_path): @@ -208,42 +199,15 @@ class Subversion(VersionControl): repo = self.get_url(location) if repo is None: return None + repo = 'svn+' + repo + rev = self.get_revision(location) # FIXME: why not project name? egg_project_name = dist.egg_name().split('-', 1)[0] - rev = self.get_revision(location) - return 'svn+%s@%s#egg=%s' % (repo, rev, egg_project_name) + return make_vcs_requirement_url(repo, rev, egg_project_name) - def check_version(self, dest, rev_options): + def is_commit_id_equal(self, dest, name): """Always assume the versions don't match""" return False -def get_rev_options(url, rev): - if rev: - rev_options = ['-r', rev] - else: - rev_options = [] - - r = urllib_parse.urlsplit(url) - if hasattr(r, 'username'): - # >= Python-2.5 - username, password = r.username, r.password - else: - netloc = r[1] - if '@' in netloc: - auth = netloc.split('@')[0] - if ':' in auth: - username, password = auth.split(':', 1) - else: - username, password = auth, None - else: - username, password = None, None - - if username: - rev_options += ['--username', username] - if password: - rev_options += ['--password', password] - return rev_options - - vcs.register(Subversion) diff --git a/Shared/lib/python3.4/site-packages/pip/wheel.py b/Shared/lib/python3.4/site-packages/pip/_internal/wheel.py similarity index 72% rename from Shared/lib/python3.4/site-packages/pip/wheel.py rename to Shared/lib/python3.4/site-packages/pip/_internal/wheel.py index 3e12402..5ce890e 100644 --- a/Shared/lib/python3.4/site-packages/pip/wheel.py +++ b/Shared/lib/python3.4/site-packages/pip/_internal/wheel.py @@ -3,44 +3,44 @@ Support for installing and building the "wheel" binary package format. """ from __future__ import absolute_import +import collections import compileall import csv -import errno -import functools import hashlib import logging -import os import os.path import re import shutil import stat import sys -import tempfile import warnings - from base64 import urlsafe_b64encode from email.parser import Parser +from pip._vendor import pkg_resources +from pip._vendor.distlib.scripts import ScriptMaker +from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.six import StringIO -import pip -from pip.compat import expanduser -from pip.download import path_to_url, unpack_url -from pip.exceptions import ( - InstallationError, InvalidWheelFilename, UnsupportedWheel) -from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME -from pip import pep425tags -from pip.utils import ( - call_subprocess, ensure_dir, captured_stdout, rmtree, read_chunks, +from pip._internal import pep425tags +from pip._internal.download import path_to_url, unpack_url +from pip._internal.exceptions import ( + InstallationError, InvalidWheelFilename, UnsupportedWheel, ) -from pip.utils.ui import open_spinner -from pip.utils.logging import indent_log -from pip.utils.setuptools_build import SETUPTOOLS_SHIM -from pip._vendor.distlib.scripts import ScriptMaker -from pip._vendor import pkg_resources -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.six.moves import configparser +from pip._internal.locations import ( + PIP_DELETE_MARKER_FILENAME, distutils_scheme, +) +from pip._internal.utils.logging import indent_log +from pip._internal.utils.misc import ( + call_subprocess, captured_stdout, ensure_dir, read_chunks, +) +from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM +from pip._internal.utils.temp_dir import TempDirectory +from pip._internal.utils.typing import MYPY_CHECK_RUNNING +from pip._internal.utils.ui import open_spinner +if MYPY_CHECK_RUNNING: + from typing import Dict, List, Optional # noqa: F401 wheel_ext = '.whl' @@ -50,107 +50,9 @@ VERSION_COMPATIBLE = (1, 0) logger = logging.getLogger(__name__) -class WheelCache(object): - """A cache of wheels for future installs.""" - - def __init__(self, cache_dir, format_control): - """Create a wheel cache. - - :param cache_dir: The root of the cache. - :param format_control: A pip.index.FormatControl object to limit - binaries being read from the cache. - """ - self._cache_dir = expanduser(cache_dir) if cache_dir else None - self._format_control = format_control - - def cached_wheel(self, link, package_name): - return cached_wheel( - self._cache_dir, link, self._format_control, package_name) - - -def _cache_for_link(cache_dir, link): - """ - Return a directory to store cached wheels in for link. - - Because there are M wheels for any one sdist, we provide a directory - to cache them in, and then consult that directory when looking up - cache hits. - - We only insert things into the cache if they have plausible version - numbers, so that we don't contaminate the cache with things that were not - unique. E.g. ./package might have dozens of installs done for it and build - a version of 0.0...and if we built and cached a wheel, we'd end up using - the same wheel even if the source has been edited. - - :param cache_dir: The cache_dir being used by pip. - :param link: The link of the sdist for which this will cache wheels. - """ - - # We want to generate an url to use as our cache key, we don't want to just - # re-use the URL because it might have other items in the fragment and we - # don't care about those. - key_parts = [link.url_without_fragment] - if link.hash_name is not None and link.hash is not None: - key_parts.append("=".join([link.hash_name, link.hash])) - key_url = "#".join(key_parts) - - # Encode our key url with sha224, we'll use this because it has similar - # security properties to sha256, but with a shorter total output (and thus - # less secure). However the differences don't make a lot of difference for - # our use case here. - hashed = hashlib.sha224(key_url.encode()).hexdigest() - - # We want to nest the directories some to prevent having a ton of top level - # directories where we might run out of sub directories on some FS. - parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]] - - # Inside of the base location for cached wheels, expand our parts and join - # them all together. - return os.path.join(cache_dir, "wheels", *parts) - - -def cached_wheel(cache_dir, link, format_control, package_name): - if not cache_dir: - return link - if not link: - return link - if link.is_wheel: - return link - if not link.is_artifact: - return link - if not package_name: - return link - canonical_name = canonicalize_name(package_name) - formats = pip.index.fmt_ctl_formats(format_control, canonical_name) - if "binary" not in formats: - return link - root = _cache_for_link(cache_dir, link) - try: - wheel_names = os.listdir(root) - except OSError as e: - if e.errno in (errno.ENOENT, errno.ENOTDIR): - return link - raise - candidates = [] - for wheel_name in wheel_names: - try: - wheel = Wheel(wheel_name) - except InvalidWheelFilename: - continue - if not wheel.supported(): - # Built for a different python/arch/etc - continue - candidates.append((wheel.support_index_min(), wheel_name)) - if not candidates: - return link - candidates.sort() - path = os.path.join(root, candidates[0][1]) - return pip.index.Link(path_to_url(path)) - - -def rehash(path, algo='sha256', blocksize=1 << 20): - """Return (hash, length) for path using hashlib.new(algo)""" - h = hashlib.new(algo) +def rehash(path, blocksize=1 << 20): + """Return (hash, length) for path using hashlib.sha256()""" + h = hashlib.sha256() length = 0 with open(path, 'rb') as f: for block in read_chunks(f, size=blocksize): @@ -189,7 +91,8 @@ def fix_script(path): script.write(rest) return True -dist_info_re = re.compile(r"""^(?P(?P.+?)(-(?P\d.+?))?) + +dist_info_re = re.compile(r"""^(?P(?P.+?)(-(?P.+?))?) \.dist-info$""", re.VERBOSE) @@ -224,21 +127,86 @@ def get_entrypoints(filename): data.write("\n") data.seek(0) - cp = configparser.RawConfigParser() - cp.optionxform = lambda option: option - cp.readfp(data) + # get the entry points and then the script names + entry_points = pkg_resources.EntryPoint.parse_map(data) + console = entry_points.get('console_scripts', {}) + gui = entry_points.get('gui_scripts', {}) - console = {} - gui = {} - if cp.has_section('console_scripts'): - console = dict(cp.items('console_scripts')) - if cp.has_section('gui_scripts'): - gui = dict(cp.items('gui_scripts')) + def _split_ep(s): + """get the string representation of EntryPoint, remove space and split + on '='""" + return str(s).replace(" ", "").split("=") + + # convert the EntryPoint objects into strings with module:function + console = dict(_split_ep(v) for v in console.values()) + gui = dict(_split_ep(v) for v in gui.values()) return console, gui +def message_about_scripts_not_on_PATH(scripts): + # type: (List[str]) -> Optional[str] + """Determine if any scripts are not on PATH and format a warning. + + Returns a warning message if one or more scripts are not on PATH, + otherwise None. + """ + if not scripts: + return None + + # Group scripts by the path they were installed in + grouped_by_dir = collections.defaultdict(set) # type: Dict[str, set] + for destfile in scripts: + parent_dir = os.path.dirname(destfile) + script_name = os.path.basename(destfile) + grouped_by_dir[parent_dir].add(script_name) + + # We don't want to warn for directories that are on PATH. + not_warn_dirs = [ + os.path.normcase(i).rstrip(os.sep) for i in + os.environ.get("PATH", "").split(os.pathsep) + ] + # If an executable sits with sys.executable, we don't warn for it. + # This covers the case of venv invocations without activating the venv. + not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable))) + warn_for = { + parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items() + if os.path.normcase(parent_dir) not in not_warn_dirs + } + if not warn_for: + return None + + # Format a message + msg_lines = [] + for parent_dir, scripts in warn_for.items(): + scripts = sorted(scripts) + if len(scripts) == 1: + start_text = "script {} is".format(scripts[0]) + else: + start_text = "scripts {} are".format( + ", ".join(scripts[:-1]) + " and " + scripts[-1] + ) + + msg_lines.append( + "The {} installed in '{}' which is not on PATH." + .format(start_text, parent_dir) + ) + + last_line_fmt = ( + "Consider adding {} to PATH or, if you prefer " + "to suppress this warning, use --no-warn-script-location." + ) + if len(msg_lines) == 1: + msg_lines.append(last_line_fmt.format("this directory")) + else: + msg_lines.append(last_line_fmt.format("these directories")) + + # Returns the formatted multiline message + return "\n".join(msg_lines) + + def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None, - pycompile=True, scheme=None, isolated=False, prefix=None): + pycompile=True, scheme=None, isolated=False, prefix=None, + warn_script_location=True): """Install a wheel""" if not scheme: @@ -298,10 +266,11 @@ def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None, continue elif (is_base and s.endswith('.dist-info') and - # is self.req.project_name case preserving? - s.lower().startswith( - req.project_name.replace('-', '_').lower())): - assert not info_dir, 'Multiple .dist-info directories' + canonicalize_name(s).startswith( + canonicalize_name(req.name))): + assert not info_dir, ('Multiple .dist-info directories: ' + + destsubdir + ', ' + + ', '.join(info_dir)) info_dir.append(destsubdir) for f in files: # Skip unwanted files @@ -314,6 +283,17 @@ def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None, # uninstalled. ensure_dir(destdir) + # copyfile (called below) truncates the destination if it + # exists and then writes the new contents. This is fine in most + # cases, but can cause a segfault if pip has loaded a shared + # object (e.g. from pyopenssl through its vendored urllib3) + # Since the shared object is mmap'd an attempt to call a + # symbol in it will then cause a segfault. Unlinking the file + # allows writing of new contents while allowing the process to + # continue to use the old copy. + if os.path.exists(destfile): + os.unlink(destfile) + # We use copyfile (not move, copy, or copy2) to be extra sure # that we are not moving directories over (copyfile fails for # directories) as well as to ensure that we are not copying @@ -384,7 +364,7 @@ def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None, # Ensure we don't generate any variants for scripts because this is almost # never what somebody wants. # See https://bitbucket.org/pypa/distlib/issue/35/ - maker.variants = set(('', )) + maker.variants = {''} # This is required because otherwise distlib creates scripts that are not # executable. @@ -410,14 +390,14 @@ def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None, } maker._get_script_text = _get_script_text - maker.script_template = """# -*- coding: utf-8 -*- + maker.script_template = r"""# -*- coding: utf-8 -*- import re import sys from %(module)s import %(import_name)s if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(%(func)s()) """ @@ -438,7 +418,7 @@ if __name__ == '__main__': # Because setuptools and pip are bundled with _ensurepip and virtualenv, # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we # override the versioned entry points in the wheel and generate the - # correct ones. This code is purely a short-term measure until Metadat 2.0 + # correct ones. This code is purely a short-term measure until Metadata 2.0 # is available. # # To add the level of hack in this section of code, in order to support @@ -487,9 +467,16 @@ if __name__ == '__main__': # Generate the console and GUI entry points specified in the wheel if len(console) > 0: - generated.extend( - maker.make_multiple(['%s = %s' % kv for kv in console.items()]) + generated_console_scripts = maker.make_multiple( + ['%s = %s' % kv for kv in console.items()] ) + generated.extend(generated_console_scripts) + + if warn_script_location: + msg = message_about_scripts_not_on_PATH(generated_console_scripts) + if msg is not None: + logger.warning(msg) + if len(gui) > 0: generated.extend( maker.make_multiple( @@ -513,53 +500,22 @@ if __name__ == '__main__': with open_for_csv(temp_record, 'w+') as record_out: reader = csv.reader(record_in) writer = csv.writer(record_out) + outrows = [] for row in reader: row[0] = installed.pop(row[0], row[0]) if row[0] in changed: row[1], row[2] = rehash(row[0]) - writer.writerow(row) + outrows.append(tuple(row)) for f in generated: - h, l = rehash(f) - writer.writerow((normpath(f, lib_dir), h, l)) + digest, length = rehash(f) + outrows.append((normpath(f, lib_dir), digest, length)) for f in installed: - writer.writerow((installed[f], '', '')) + outrows.append((installed[f], '', '')) + for row in sorted(outrows): + writer.writerow(row) shutil.move(temp_record, record) -def _unique(fn): - @functools.wraps(fn) - def unique(*args, **kw): - seen = set() - for item in fn(*args, **kw): - if item not in seen: - seen.add(item) - yield item - return unique - - -# TODO: this goes somewhere besides the wheel module -@_unique -def uninstallation_paths(dist): - """ - Yield all the uninstallation paths for dist based on RECORD-without-.pyc - - Yield paths to all the files in RECORD. For each .py file in RECORD, add - the .pyc in the same directory. - - UninstallPathSet.add() takes care of the __pycache__ .pyc. - """ - from pip.utils import FakeFile # circular import - r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD'))) - for row in r: - path = os.path.join(dist.location, row[0]) - yield path - if path.endswith('.py'): - dn, fn = os.path.split(path) - base = fn[:-3] - path = os.path.join(dn, base + '.pyc') - yield path - - def wheel_version(source_dir): """ Return the Wheel-Version of an extracted wheel, if possible. @@ -575,7 +531,7 @@ def wheel_version(source_dir): version = wheel_data['Wheel-Version'].strip() version = tuple(map(int, version.split('.'))) return version - except: + except Exception: return False @@ -614,8 +570,8 @@ class Wheel(object): # TODO: maybe move the install code into this class wheel_file_re = re.compile( - r"""^(?P(?P.+?)-(?P\d.*?)) - ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) + r"""^(?P(?P.+?)-(?P.*?)) + ((-(?P\d[^-]*?))?-(?P.+?)-(?P.+?)-(?P.+?) \.whl|\.dist-info)$""", re.VERBOSE ) @@ -634,15 +590,16 @@ class Wheel(object): # we'll assume "_" means "-" due to wheel naming scheme # (https://github.com/pypa/pip/issues/1150) self.version = wheel_info.group('ver').replace('_', '-') + self.build_tag = wheel_info.group('build') self.pyversions = wheel_info.group('pyver').split('.') self.abis = wheel_info.group('abi').split('.') self.plats = wheel_info.group('plat').split('.') # All the tag combinations from this file - self.file_tags = set( + self.file_tags = { (x, y, z) for x in self.pyversions for y in self.abis for z in self.plats - ) + } def support_index_min(self, tags=None): """ @@ -652,54 +609,66 @@ class Wheel(object): None is the wheel is not supported. """ if tags is None: # for mock - tags = pep425tags.supported_tags + tags = pep425tags.get_supported() indexes = [tags.index(c) for c in self.file_tags if c in tags] return min(indexes) if indexes else None def supported(self, tags=None): """Is this wheel supported on this system?""" if tags is None: # for mock - tags = pep425tags.supported_tags + tags = pep425tags.get_supported() return bool(set(tags).intersection(self.file_tags)) class WheelBuilder(object): """Build wheels from a RequirementSet.""" - def __init__(self, requirement_set, finder, build_options=None, - global_options=None): - self.requirement_set = requirement_set + def __init__(self, finder, preparer, wheel_cache, + build_options=None, global_options=None, no_clean=False): self.finder = finder - self._cache_root = requirement_set._wheel_cache._cache_dir - self._wheel_dir = requirement_set.wheel_download_dir + self.preparer = preparer + self.wheel_cache = wheel_cache + + self._wheel_dir = preparer.wheel_download_dir + self.build_options = build_options or [] self.global_options = global_options or [] + self.no_clean = no_clean def _build_one(self, req, output_dir, python_tag=None): """Build one wheel. :return: The filename of the built wheel, or None if the build failed. """ - tempd = tempfile.mkdtemp('pip-wheel-') - try: - if self.__build_one(req, tempd, python_tag=python_tag): + # Install build deps into temporary directory (PEP 518) + with req.build_env: + return self._build_one_inside_env(req, output_dir, + python_tag=python_tag) + + def _build_one_inside_env(self, req, output_dir, python_tag=None): + with TempDirectory(kind="wheel") as temp_dir: + if self.__build_one(req, temp_dir.path, python_tag=python_tag): try: - wheel_name = os.listdir(tempd)[0] + wheel_name = os.listdir(temp_dir.path)[0] wheel_path = os.path.join(output_dir, wheel_name) - shutil.move(os.path.join(tempd, wheel_name), wheel_path) + shutil.move( + os.path.join(temp_dir.path, wheel_name), wheel_path + ) logger.info('Stored in directory: %s', output_dir) return wheel_path - except: + except Exception: pass # Ignore return, we can't do anything else useful. self._clean_one(req) return None - finally: - rmtree(tempd) def _base_setup_args(self, req): + # NOTE: Eventually, we'd want to also -S to the flags here, when we're + # isolating. Currently, it breaks Python in virtualenvs, because it + # relies on site.py to find parts of the standard library outside the + # virtualenv. return [ - sys.executable, "-u", '-c', + sys.executable, '-u', '-c', SETUPTOOLS_SHIM % req.setup_py ] + list(self.global_options) @@ -719,7 +688,7 @@ class WheelBuilder(object): call_subprocess(wheel_args, cwd=req.setup_py_dir, show_stdout=False, spinner=spinner) return True - except: + except Exception: spinner.finish("error") logger.error('Failed building wheel for %s', req.name) return False @@ -732,56 +701,58 @@ class WheelBuilder(object): try: call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False) return True - except: + except Exception: logger.error('Failed cleaning build dir for %s', req.name) return False - def build(self, autobuilding=False): + def build(self, requirements, session, autobuilding=False): """Build wheels. :param unpack: If True, replace the sdist we built from with the newly built wheel, in preparation for installation. :return: True if all the wheels built correctly. """ - assert self._wheel_dir or (autobuilding and self._cache_root) - # unpack sdists and constructs req set - self.requirement_set.prepare_files(self.finder) + from pip._internal import index + from pip._internal.models.link import Link - reqset = self.requirement_set.requirements.values() + building_is_possible = self._wheel_dir or ( + autobuilding and self.wheel_cache.cache_dir + ) + assert building_is_possible buildset = [] - for req in reqset: + format_control = self.finder.format_control + for req in requirements: if req.constraint: continue if req.is_wheel: if not autobuilding: logger.info( - 'Skipping %s, due to already being wheel.', req.name) - elif req.editable: - if not autobuilding: - logger.info( - 'Skipping bdist_wheel for %s, due to being editable', - req.name) - elif autobuilding and req.link and not req.link.is_artifact: + 'Skipping %s, due to already being wheel.', req.name, + ) + elif autobuilding and req.editable: pass elif autobuilding and not req.source_dir: pass + elif autobuilding and req.link and not req.link.is_artifact: + # VCS checkout. Build wheel just for this run. + buildset.append((req, True)) else: + ephem_cache = False if autobuilding: link = req.link base, ext = link.splitext() - if pip.index.egg_info_matches(base, None, link) is None: - # Doesn't look like a package - don't autobuild a wheel - # because we'll have no way to lookup the result sanely - continue - if "binary" not in pip.index.fmt_ctl_formats( - self.finder.format_control, + if index.egg_info_matches(base, None, link) is None: + # E.g. local directory. Build wheel just for this run. + ephem_cache = True + if "binary" not in format_control.get_allowed_formats( canonicalize_name(req.name)): logger.info( "Skipping bdist_wheel for %s, due to binaries " - "being disabled for it.", req.name) + "being disabled for it.", req.name, + ) continue - buildset.append(req) + buildset.append((req, ephem_cache)) if not buildset: return True @@ -789,20 +760,24 @@ class WheelBuilder(object): # Build the wheels. logger.info( 'Building wheels for collected packages: %s', - ', '.join([req.name for req in buildset]), + ', '.join([req.name for (req, _) in buildset]), ) + _cache = self.wheel_cache # shorter name with indent_log(): build_success, build_failure = [], [] - for req in buildset: + for req, ephem in buildset: python_tag = None if autobuilding: python_tag = pep425tags.implementation_tag - output_dir = _cache_for_link(self._cache_root, req.link) + if ephem: + output_dir = _cache.get_ephem_path_for_link(req.link) + else: + output_dir = _cache.get_path_for_link(req.link) try: ensure_dir(output_dir) except OSError as e: - logger.warn("Building wheel for %s failed: %s", - req.name, e) + logger.warning("Building wheel for %s failed: %s", + req.name, e) build_failure.append(req) continue else: @@ -828,15 +803,16 @@ class WheelBuilder(object): # set the build directory again - name is known from # the work prepare_files did. req.source_dir = req.build_location( - self.requirement_set.build_dir) + self.preparer.build_dir + ) # Update the link for this. - req.link = pip.index.Link( - path_to_url(wheel_file)) + req.link = Link(path_to_url(wheel_file)) assert req.link.is_wheel # extract the wheel into the dir unpack_url( req.link, req.source_dir, None, False, - session=self.requirement_set.session) + session=session, + ) else: build_failure.append(req) diff --git a/Shared/lib/python3.4/site-packages/pip/_vendor/__init__.py b/Shared/lib/python3.4/site-packages/pip/_vendor/__init__.py index a822a5b..07db110 100644 --- a/Shared/lib/python3.4/site-packages/pip/_vendor/__init__.py +++ b/Shared/lib/python3.4/site-packages/pip/_vendor/__init__.py @@ -64,16 +64,19 @@ if DEBUNDLED: vendored("cachecontrol") vendored("colorama") vendored("distlib") + vendored("distro") vendored("html5lib") vendored("lockfile") vendored("six") vendored("six.moves") vendored("six.moves.urllib") + vendored("six.moves.urllib.parse") vendored("packaging") vendored("packaging.version") vendored("packaging.specifiers") vendored("pkg_resources") vendored("progress") + vendored("pytoml") vendored("retrying") vendored("requests") vendored("requests.packages") @@ -108,3 +111,4 @@ if DEBUNDLED: vendored("requests.packages.urllib3.util.ssl_") vendored("requests.packages.urllib3.util.timeout") vendored("requests.packages.urllib3.util.url") + vendored("urllib3") diff --git a/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/__init__.py b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/__init__.py new file mode 100644 index 0000000..8beedea --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/__init__.py @@ -0,0 +1,4 @@ +"""Wrappers to build Python packages using PEP 517 hooks +""" + +__version__ = '0.2' diff --git a/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/_in_process.py b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/_in_process.py new file mode 100644 index 0000000..baa14d3 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/_in_process.py @@ -0,0 +1,182 @@ +"""This is invoked in a subprocess to call the build backend hooks. + +It expects: +- Command line args: hook_name, control_dir +- Environment variable: PEP517_BUILD_BACKEND=entry.point:spec +- control_dir/input.json: + - {"kwargs": {...}} + +Results: +- control_dir/output.json + - {"return_val": ...} +""" +from glob import glob +from importlib import import_module +import os +from os.path import join as pjoin +import re +import shutil +import sys + +# This is run as a script, not a module, so it can't do a relative import +import compat + +def _build_backend(): + """Find and load the build backend""" + ep = os.environ['PEP517_BUILD_BACKEND'] + mod_path, _, obj_path = ep.partition(':') + obj = import_module(mod_path) + if obj_path: + for path_part in obj_path.split('.'): + obj = getattr(obj, path_part) + return obj + +def get_requires_for_build_wheel(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_wheel + except AttributeError: + return [] + else: + return hook(config_settings) + +def prepare_metadata_for_build_wheel(metadata_directory, config_settings): + """Invoke optional prepare_metadata_for_build_wheel + + Implements a fallback by building a wheel if the hook isn't defined. + """ + backend = _build_backend() + try: + hook = backend.prepare_metadata_for_build_wheel + except AttributeError: + return _get_wheel_metadata_from_wheel(backend, metadata_directory, + config_settings) + else: + return hook(metadata_directory, config_settings) + +WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' + +def _dist_info_files(whl_zip): + """Identify the .dist-info folder inside a wheel ZipFile.""" + res = [] + for path in whl_zip.namelist(): + m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) + if m: + res.append(path) + if res: + return res + raise Exception("No .dist-info folder found in wheel") + +def _get_wheel_metadata_from_wheel(backend, metadata_directory, config_settings): + """Build a wheel and extract the metadata from it. + + Fallback for when the build backend does not define the 'get_wheel_metadata' + hook. + """ + from zipfile import ZipFile + whl_basename = backend.build_wheel(metadata_directory, config_settings) + with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): + pass # Touch marker file + + whl_file = os.path.join(metadata_directory, whl_basename) + with ZipFile(whl_file) as zipf: + dist_info = _dist_info_files(zipf) + zipf.extractall(path=metadata_directory, members=dist_info) + return dist_info[0].split('/')[0] + +def _find_already_built_wheel(metadata_directory): + """Check for a wheel already built during the get_wheel_metadata hook. + """ + if not metadata_directory: + return None + metadata_parent = os.path.dirname(metadata_directory) + if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): + return None + + whl_files = glob(os.path.join(metadata_parent, '*.whl')) + if not whl_files: + print('Found wheel built marker, but no .whl files') + return None + if len(whl_files) > 1: + print('Found multiple .whl files; unspecified behaviour. ' + 'Will call build_wheel.') + return None + + # Exactly one .whl file + return whl_files[0] + +def build_wheel(wheel_directory, config_settings, metadata_directory=None): + """Invoke the mandatory build_wheel hook. + + If a wheel was already built in the prepare_metadata_for_build_wheel fallback, this + will copy it rather than rebuilding the wheel. + """ + prebuilt_whl = _find_already_built_wheel(metadata_directory) + if prebuilt_whl: + shutil.copy2(prebuilt_whl, wheel_directory) + return os.path.basename(prebuilt_whl) + + return _build_backend().build_wheel(wheel_directory, config_settings, + metadata_directory) + + +def get_requires_for_build_sdist(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_sdist + except AttributeError: + return [] + else: + return hook(config_settings) + +class _DummyException(Exception): + """Nothing should ever raise this exception""" + +class GotUnsupportedOperation(Exception): + """For internal use when backend raises UnsupportedOperation""" + +def build_sdist(sdist_directory, config_settings): + """Invoke the mandatory build_sdist hook.""" + backend = _build_backend() + try: + return backend.build_sdist(sdist_directory, config_settings) + except getattr(backend, 'UnsupportedOperation', _DummyException): + raise GotUnsupportedOperation + +HOOK_NAMES = { + 'get_requires_for_build_wheel', + 'prepare_metadata_for_build_wheel', + 'build_wheel', + 'get_requires_for_build_sdist', + 'build_sdist', +} + +def main(): + if len(sys.argv) < 3: + sys.exit("Needs args: hook_name, control_dir") + hook_name = sys.argv[1] + control_dir = sys.argv[2] + if hook_name not in HOOK_NAMES: + sys.exit("Unknown hook: %s" % hook_name) + hook = globals()[hook_name] + + hook_input = compat.read_json(pjoin(control_dir, 'input.json')) + + json_out = {'unsupported': False, 'return_val': None} + try: + json_out['return_val'] = hook(**hook_input['kwargs']) + except GotUnsupportedOperation: + json_out['unsupported'] = True + + compat.write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) + +if __name__ == '__main__': + main() diff --git a/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/check.py b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/check.py new file mode 100644 index 0000000..c65d51c --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/check.py @@ -0,0 +1,194 @@ +"""Check a project and backend by attempting to build using PEP 517 hooks. +""" +import argparse +import logging +import os +from os.path import isfile, join as pjoin +from pip._vendor.pytoml import TomlError, load as toml_load +import shutil +from subprocess import CalledProcessError +import sys +import tarfile +from tempfile import mkdtemp +import zipfile + +from .colorlog import enable_colourful_output +from .envbuild import BuildEnvironment +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + +def check_build_sdist(hooks): + with BuildEnvironment() as env: + try: + env.pip_install(hooks.build_sys_requires) + log.info('Installed static build dependencies') + except CalledProcessError: + log.error('Failed to install static build dependencies') + return False + + try: + reqs = hooks.get_requires_for_build_sdist({}) + log.info('Got build requires: %s', reqs) + except: + log.error('Failure in get_requires_for_build_sdist', exc_info=True) + return False + + try: + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + except CalledProcessError: + log.error('Failed to install dynamic build dependencies') + return False + + td = mkdtemp() + log.info('Trying to build sdist in %s', td) + try: + try: + filename = hooks.build_sdist(td, {}) + log.info('build_sdist returned %r', filename) + except: + log.info('Failure in build_sdist', exc_info=True) + return False + + if not filename.endswith('.tar.gz'): + log.error("Filename %s doesn't have .tar.gz extension", filename) + return False + + path = pjoin(td, filename) + if isfile(path): + log.info("Output file %s exists", path) + else: + log.error("Output file %s does not exist", path) + return False + + if tarfile.is_tarfile(path): + log.info("Output file is a tar file") + else: + log.error("Output file is not a tar file") + return False + + finally: + shutil.rmtree(td) + + return True + +def check_build_wheel(hooks): + with BuildEnvironment() as env: + try: + env.pip_install(hooks.build_sys_requires) + log.info('Installed static build dependencies') + except CalledProcessError: + log.error('Failed to install static build dependencies') + return False + + try: + reqs = hooks.get_requires_for_build_wheel({}) + log.info('Got build requires: %s', reqs) + except: + log.error('Failure in get_requires_for_build_sdist', exc_info=True) + return False + + try: + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + except CalledProcessError: + log.error('Failed to install dynamic build dependencies') + return False + + td = mkdtemp() + log.info('Trying to build wheel in %s', td) + try: + try: + filename = hooks.build_wheel(td, {}) + log.info('build_wheel returned %r', filename) + except: + log.info('Failure in build_wheel', exc_info=True) + return False + + if not filename.endswith('.whl'): + log.error("Filename %s doesn't have .whl extension", filename) + return False + + path = pjoin(td, filename) + if isfile(path): + log.info("Output file %s exists", path) + else: + log.error("Output file %s does not exist", path) + return False + + if zipfile.is_zipfile(path): + log.info("Output file is a zip file") + else: + log.error("Output file is not a zip file") + return False + + finally: + shutil.rmtree(td) + + return True + + +def check(source_dir): + pyproject = pjoin(source_dir, 'pyproject.toml') + if isfile(pyproject): + log.info('Found pyproject.toml') + else: + log.error('Missing pyproject.toml') + return False + + try: + with open(pyproject) as f: + pyproject_data = toml_load(f) + # Ensure the mandatory data can be loaded + buildsys = pyproject_data['build-system'] + requires = buildsys['requires'] + backend = buildsys['build-backend'] + log.info('Loaded pyproject.toml') + except (TomlError, KeyError): + log.error("Invalid pyproject.toml", exc_info=True) + return False + + hooks = Pep517HookCaller(source_dir, backend) + + sdist_ok = check_build_sdist(hooks) + wheel_ok = check_build_wheel(hooks) + + if not sdist_ok: + log.warning('Sdist checks failed; scroll up to see') + if not wheel_ok: + log.warning('Wheel checks failed') + + return sdist_ok + + +def main(argv=None): + ap = argparse.ArgumentParser() + ap.add_argument('source_dir', + help="A directory containing pyproject.toml") + args = ap.parse_args(argv) + + enable_colourful_output() + + ok = check(args.source_dir) + + if ok: + print(ansi('Checks passed', 'green')) + else: + print(ansi('Checks failed', 'red')) + sys.exit(1) + +ansi_codes = { + 'reset': '\x1b[0m', + 'bold': '\x1b[1m', + 'red': '\x1b[31m', + 'green': '\x1b[32m', +} +def ansi(s, attr): + if os.name != 'nt' and sys.stdout.isatty(): + return ansi_codes[attr] + str(s) + ansi_codes['reset'] + else: + return str(s) + +if __name__ == '__main__': + main() diff --git a/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/colorlog.py b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/colorlog.py new file mode 100644 index 0000000..26cf748 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/colorlog.py @@ -0,0 +1,110 @@ +"""Nicer log formatting with colours. + +Code copied from Tornado, Apache licensed. +""" +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import sys + +try: + import curses +except ImportError: + curses = None + +def _stderr_supports_color(): + color = False + if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): + try: + curses.setupterm() + if curses.tigetnum("colors") > 0: + color = True + except Exception: + pass + return color + +class LogFormatter(logging.Formatter): + """Log formatter with colour support + """ + DEFAULT_COLORS = { + logging.INFO: 2, # Green + logging.WARNING: 3, # Yellow + logging.ERROR: 1, # Red + logging.CRITICAL: 1, + } + + def __init__(self, color=True, datefmt=None): + r""" + :arg bool color: Enables color support. + :arg string fmt: Log message format. + It will be applied to the attributes dict of log records. The + text between ``%(color)s`` and ``%(end_color)s`` will be colored + depending on the level if color support is on. + :arg dict colors: color mappings from logging level to terminal color + code + :arg string datefmt: Datetime format. + Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. + .. versionchanged:: 3.2 + Added ``fmt`` and ``datefmt`` arguments. + """ + logging.Formatter.__init__(self, datefmt=datefmt) + self._colors = {} + if color and _stderr_supports_color(): + # The curses module has some str/bytes confusion in + # python3. Until version 3.2.3, most methods return + # bytes, but only accept strings. In addition, we want to + # output these strings with the logging module, which + # works with unicode strings. The explicit calls to + # unicode() below are harmless in python2 but will do the + # right conversion in python 3. + fg_color = (curses.tigetstr("setaf") or + curses.tigetstr("setf") or "") + if (3, 0) < sys.version_info < (3, 2, 3): + fg_color = str(fg_color, "ascii") + + for levelno, code in self.DEFAULT_COLORS.items(): + self._colors[levelno] = str(curses.tparm(fg_color, code), "ascii") + self._normal = str(curses.tigetstr("sgr0"), "ascii") + + scr = curses.initscr() + self.termwidth = scr.getmaxyx()[1] + curses.endwin() + else: + self._normal = '' + # Default width is usually 80, but too wide is worse than too narrow + self.termwidth = 70 + + def formatMessage(self, record): + l = len(record.message) + right_text = '{initial}-{name}'.format(initial=record.levelname[0], + name=record.name) + if l + len(right_text) < self.termwidth: + space = ' ' * (self.termwidth - (l + len(right_text))) + else: + space = ' ' + + if record.levelno in self._colors: + start_color = self._colors[record.levelno] + end_color = self._normal + else: + start_color = end_color = '' + + return record.message + space + start_color + right_text + end_color + +def enable_colourful_output(level=logging.INFO): + handler = logging.StreamHandler() + handler.setFormatter(LogFormatter()) + logging.root.addHandler(handler) + logging.root.setLevel(level) diff --git a/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/compat.py b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/compat.py new file mode 100644 index 0000000..01c66fc --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/compat.py @@ -0,0 +1,23 @@ +"""Handle reading and writing JSON in UTF-8, on Python 3 and 2.""" +import json +import sys + +if sys.version_info[0] >= 3: + # Python 3 + def write_json(obj, path, **kwargs): + with open(path, 'w', encoding='utf-8') as f: + json.dump(obj, f, **kwargs) + + def read_json(path): + with open(path, 'r', encoding='utf-8') as f: + return json.load(f) + +else: + # Python 2 + def write_json(obj, path, **kwargs): + with open(path, 'wb') as f: + json.dump(obj, f, encoding='utf-8', **kwargs) + + def read_json(path): + with open(path, 'rb') as f: + return json.load(f) diff --git a/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/envbuild.py b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/envbuild.py new file mode 100644 index 0000000..c264f46 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/envbuild.py @@ -0,0 +1,150 @@ +"""Build wheels/sdists by installing build deps to a temporary environment. +""" + +import os +import logging +from pip._vendor import pytoml +import shutil +from subprocess import check_call +import sys +from sysconfig import get_paths +from tempfile import mkdtemp + +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + +def _load_pyproject(source_dir): + with open(os.path.join(source_dir, 'pyproject.toml')) as f: + pyproject_data = pytoml.load(f) + buildsys = pyproject_data['build-system'] + return buildsys['requires'], buildsys['build-backend'] + + +class BuildEnvironment(object): + """Context manager to install build deps in a simple temporary environment + + Based on code I wrote for pip, which is MIT licensed. + """ + # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file) + # + # Permission is hereby granted, free of charge, to any person obtaining + # a copy of this software and associated documentation files (the + # "Software"), to deal in the Software without restriction, including + # without limitation the rights to use, copy, modify, merge, publish, + # distribute, sublicense, and/or sell copies of the Software, and to + # permit persons to whom the Software is furnished to do so, subject to + # the following conditions: + # + # The above copyright notice and this permission notice shall be + # included in all copies or substantial portions of the Software. + # + # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + path = None + + def __init__(self, cleanup=True): + self._cleanup = cleanup + + def __enter__(self): + self.path = mkdtemp(prefix='pep517-build-env-') + log.info('Temporary build environment: %s', self.path) + + self.save_path = os.environ.get('PATH', None) + self.save_pythonpath = os.environ.get('PYTHONPATH', None) + + install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' + install_dirs = get_paths(install_scheme, vars={ + 'base': self.path, + 'platbase': self.path, + }) + + scripts = install_dirs['scripts'] + if self.save_path: + os.environ['PATH'] = scripts + os.pathsep + self.save_path + else: + os.environ['PATH'] = scripts + os.pathsep + os.defpath + + if install_dirs['purelib'] == install_dirs['platlib']: + lib_dirs = install_dirs['purelib'] + else: + lib_dirs = install_dirs['purelib'] + os.pathsep + \ + install_dirs['platlib'] + if self.save_pythonpath: + os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ + self.save_pythonpath + else: + os.environ['PYTHONPATH'] = lib_dirs + + return self + + def pip_install(self, reqs): + """Install dependencies into this env by calling pip in a subprocess""" + if not reqs: + return + log.info('Calling pip to install %s', reqs) + check_call([sys.executable, '-m', 'pip', 'install', '--ignore-installed', + '--prefix', self.path] + list(reqs)) + + def __exit__(self, exc_type, exc_val, exc_tb): + if self._cleanup and (self.path is not None) and os.path.isdir(self.path): + shutil.rmtree(self.path) + + if self.save_path is None: + os.environ.pop('PATH', None) + else: + os.environ['PATH'] = self.save_path + + if self.save_pythonpath is None: + os.environ.pop('PYTHONPATH', None) + else: + os.environ['PYTHONPATH'] = self.save_pythonpath + +def build_wheel(source_dir, wheel_dir, config_settings=None): + """Build a wheel from a source directory using PEP 517 hooks. + + :param str source_dir: Source directory containing pyproject.toml + :param str wheel_dir: Target directory to create wheel in + :param dict config_settings: Options to pass to build backend + + This is a blocking function which will run pip in a subprocess to install + build requirements. + """ + if config_settings is None: + config_settings = {} + requires, backend = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + reqs = hooks.get_requires_for_build_wheel(config_settings) + env.pip_install(reqs) + return hooks.build_wheel(wheel_dir, config_settings) + + +def build_sdist(source_dir, sdist_dir, config_settings=None): + """Build an sdist from a source directory using PEP 517 hooks. + + :param str source_dir: Source directory containing pyproject.toml + :param str sdist_dir: Target directory to place sdist in + :param dict config_settings: Options to pass to build backend + + This is a blocking function which will run pip in a subprocess to install + build requirements. + """ + if config_settings is None: + config_settings = {} + requires, backend = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + reqs = hooks.get_requires_for_build_sdist(config_settings) + env.pip_install(reqs) + return hooks.build_sdist(sdist_dir, config_settings) diff --git a/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/wrappers.py b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/wrappers.py new file mode 100644 index 0000000..28260f3 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pip/_vendor/pep517/wrappers.py @@ -0,0 +1,134 @@ +from contextlib import contextmanager +import os +from os.path import dirname, abspath, join as pjoin +import shutil +from subprocess import check_call +import sys +from tempfile import mkdtemp + +from . import compat + +_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py') + +@contextmanager +def tempdir(): + td = mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + +class UnsupportedOperation(Exception): + """May be raised by build_sdist if the backend indicates that it can't.""" + +class Pep517HookCaller(object): + """A wrapper around a source directory to be built with a PEP 517 backend. + + source_dir : The path to the source directory, containing pyproject.toml. + backend : The build backend spec, as per PEP 517, from pyproject.toml. + """ + def __init__(self, source_dir, build_backend): + self.source_dir = abspath(source_dir) + self.build_backend = build_backend + + def get_requires_for_build_wheel(self, config_settings=None): + """Identify packages required for building a wheel + + Returns a list of dependency specifications, e.g.: + ["wheel >= 0.25", "setuptools"] + + This does not include requirements specified in pyproject.toml. + It returns the result of calling the equivalently named hook in a + subprocess. + """ + return self._call_hook('get_requires_for_build_wheel', { + 'config_settings': config_settings + }) + + def prepare_metadata_for_build_wheel(self, metadata_directory, config_settings=None): + """Prepare a *.dist-info folder with metadata for this project. + + Returns the name of the newly created folder. + + If the build backend defines a hook with this name, it will be called + in a subprocess. If not, the backend will be asked to build a wheel, + and the dist-info extracted from that. + """ + return self._call_hook('prepare_metadata_for_build_wheel', { + 'metadata_directory': abspath(metadata_directory), + 'config_settings': config_settings, + }) + + def build_wheel(self, wheel_directory, config_settings=None, metadata_directory=None): + """Build a wheel from this project. + + Returns the name of the newly created file. + + In general, this will call the 'build_wheel' hook in the backend. + However, if that was previously called by + 'prepare_metadata_for_build_wheel', and the same metadata_directory is + used, the previously built wheel will be copied to wheel_directory. + """ + if metadata_directory is not None: + metadata_directory = abspath(metadata_directory) + return self._call_hook('build_wheel', { + 'wheel_directory': abspath(wheel_directory), + 'config_settings': config_settings, + 'metadata_directory': metadata_directory, + }) + + def get_requires_for_build_sdist(self, config_settings=None): + """Identify packages required for building a wheel + + Returns a list of dependency specifications, e.g.: + ["setuptools >= 26"] + + This does not include requirements specified in pyproject.toml. + It returns the result of calling the equivalently named hook in a + subprocess. + """ + return self._call_hook('get_requires_for_build_sdist', { + 'config_settings': config_settings + }) + + def build_sdist(self, sdist_directory, config_settings=None): + """Build an sdist from this project. + + Returns the name of the newly created file. + + This calls the 'build_sdist' backend hook in a subprocess. + """ + return self._call_hook('build_sdist', { + 'sdist_directory': abspath(sdist_directory), + 'config_settings': config_settings, + }) + + + def _call_hook(self, hook_name, kwargs): + env = os.environ.copy() + + # On Python 2, pytoml returns Unicode values (which is correct) but the + # environment passed to check_call needs to contain string values. We + # convert here by encoding using ASCII (the backend can only contain + # letters, digits and _, . and : characters, and will be used as a + # Python identifier, so non-ASCII content is wrong on Python 2 in + # any case). + if sys.version_info[0] == 2: + build_backend = self.build_backend.encode('ASCII') + else: + build_backend = self.build_backend + + env['PEP517_BUILD_BACKEND'] = build_backend + with tempdir() as td: + compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'), + indent=2) + + # Run the hook in a subprocess + check_call([sys.executable, _in_proc_script, hook_name, td], + cwd=self.source_dir, env=env) + + data = compat.read_json(pjoin(td, 'output.json')) + if data.get('unsupported'): + raise UnsupportedOperation + return data['return_val'] + diff --git a/Shared/lib/python3.4/site-packages/pip/commands/completion.py b/Shared/lib/python3.4/site-packages/pip/commands/completion.py deleted file mode 100644 index dc80af3..0000000 --- a/Shared/lib/python3.4/site-packages/pip/commands/completion.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import absolute_import - -import sys -from pip.basecommand import Command - -BASE_COMPLETION = """ -# pip %(shell)s completion start%(script)s# pip %(shell)s completion end -""" - -COMPLETION_SCRIPTS = { - 'bash': """ -_pip_completion() -{ - COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ - COMP_CWORD=$COMP_CWORD \\ - PIP_AUTO_COMPLETE=1 $1 ) ) -} -complete -o default -F _pip_completion pip -""", 'zsh': """ -function _pip_completion { - local words cword - read -Ac words - read -cn cword - reply=( $( COMP_WORDS="$words[*]" \\ - COMP_CWORD=$(( cword-1 )) \\ - PIP_AUTO_COMPLETE=1 $words[1] ) ) -} -compctl -K _pip_completion pip -"""} - - -class CompletionCommand(Command): - """A helper command to be used for command completion.""" - name = 'completion' - summary = 'A helper command used for command completion' - - def __init__(self, *args, **kw): - super(CompletionCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option( - '--bash', '-b', - action='store_const', - const='bash', - dest='shell', - help='Emit completion code for bash') - cmd_opts.add_option( - '--zsh', '-z', - action='store_const', - const='zsh', - dest='shell', - help='Emit completion code for zsh') - - self.parser.insert_option_group(0, cmd_opts) - - def run(self, options, args): - """Prints the completion code of the given shell""" - shells = COMPLETION_SCRIPTS.keys() - shell_options = ['--' + shell for shell in sorted(shells)] - if options.shell in shells: - script = COMPLETION_SCRIPTS.get(options.shell, '') - print(BASE_COMPLETION % {'script': script, 'shell': options.shell}) - else: - sys.stderr.write( - 'ERROR: You must pass %s\n' % ' or '.join(shell_options) - ) diff --git a/Shared/lib/python3.4/site-packages/pip/commands/install.py b/Shared/lib/python3.4/site-packages/pip/commands/install.py deleted file mode 100644 index 13b328f..0000000 --- a/Shared/lib/python3.4/site-packages/pip/commands/install.py +++ /dev/null @@ -1,404 +0,0 @@ -from __future__ import absolute_import - -import logging -import operator -import os -import tempfile -import shutil -import warnings -try: - import wheel -except ImportError: - wheel = None - -from pip.req import RequirementSet -from pip.basecommand import RequirementCommand -from pip.locations import virtualenv_no_global, distutils_scheme -from pip.exceptions import ( - InstallationError, CommandError, PreviousBuildDirError, -) -from pip import cmdoptions -from pip.utils import ensure_dir -from pip.utils.build import BuildDirectory -from pip.utils.deprecation import RemovedInPip10Warning -from pip.utils.filesystem import check_path_owner -from pip.wheel import WheelCache, WheelBuilder - -from pip.locations import running_under_virtualenv - -logger = logging.getLogger(__name__) - - -class InstallCommand(RequirementCommand): - """ - Install packages from: - - - PyPI (and other indexes) using requirement specifiers. - - VCS project urls. - - Local project directories. - - Local or remote source archives. - - pip also supports installing from "requirements files", which provide - an easy way to specify a whole environment to be installed. - """ - name = 'install' - - usage = """ - %prog [options] [package-index-options] ... - %prog [options] -r [package-index-options] ... - %prog [options] [-e] ... - %prog [options] [-e] ... - %prog [options] ...""" - - summary = 'Install packages.' - - def __init__(self, *args, **kw): - super(InstallCommand, self).__init__(*args, **kw) - - default_user = True - if running_under_virtualenv(): - default_user = False - if os.geteuid() == 0: - default_user = False - - cmd_opts = self.cmd_opts - - cmd_opts.add_option(cmdoptions.constraints()) - cmd_opts.add_option(cmdoptions.editable()) - cmd_opts.add_option(cmdoptions.requirements()) - cmd_opts.add_option(cmdoptions.build_dir()) - - cmd_opts.add_option( - '-t', '--target', - dest='target_dir', - metavar='dir', - default=None, - help='Install packages into . ' - 'By default this will not replace existing files/folders in ' - '. Use --upgrade to replace existing packages in ' - 'with new versions.' - ) - - cmd_opts.add_option( - '-d', '--download', '--download-dir', '--download-directory', - dest='download_dir', - metavar='dir', - default=None, - help=("Download packages into instead of installing them, " - "regardless of what's already installed."), - ) - - cmd_opts.add_option(cmdoptions.src()) - - cmd_opts.add_option( - '-U', '--upgrade', - dest='upgrade', - action='store_true', - help='Upgrade all specified packages to the newest available ' - 'version. This process is recursive regardless of whether ' - 'a dependency is already satisfied.' - ) - - cmd_opts.add_option( - '--force-reinstall', - dest='force_reinstall', - action='store_true', - help='When upgrading, reinstall all packages even if they are ' - 'already up-to-date.') - - cmd_opts.add_option( - '-I', '--ignore-installed', - dest='ignore_installed', - action='store_true', - default=default_user, - help='Ignore the installed packages (reinstalling instead).') - - cmd_opts.add_option(cmdoptions.no_deps()) - - cmd_opts.add_option(cmdoptions.install_options()) - cmd_opts.add_option(cmdoptions.global_options()) - - cmd_opts.add_option( - '--user', - dest='use_user_site', - action='store_true', - default=default_user, - help="Install to the Python user install directory for your " - "platform. Typically ~/.local/, or %APPDATA%\Python on " - "Windows. (See the Python documentation for site.USER_BASE " - "for full details.) On Debian systems, this is the " - "default when running outside of a virtual environment " - "and not as root.") - - cmd_opts.add_option( - '--system', - dest='use_user_site', - action='store_false', - help="Install using the system scheme (overrides --user on " - "Debian systems)") - - cmd_opts.add_option( - '--egg', - dest='as_egg', - action='store_true', - help="Install packages as eggs, not 'flat', like pip normally " - "does. This option is not about installing *from* eggs. " - "(WARNING: Because this option overrides pip's normal install" - " logic, requirements files may not behave as expected.)") - - cmd_opts.add_option( - '--root', - dest='root_path', - metavar='dir', - default=None, - help="Install everything relative to this alternate root " - "directory.") - - cmd_opts.add_option( - '--prefix', - dest='prefix_path', - metavar='dir', - default=None, - help="Installation prefix where lib, bin and other top-level " - "folders are placed") - - cmd_opts.add_option( - "--compile", - action="store_true", - dest="compile", - default=True, - help="Compile py files to pyc", - ) - - cmd_opts.add_option( - "--no-compile", - action="store_false", - dest="compile", - help="Do not compile py files to pyc", - ) - - cmd_opts.add_option(cmdoptions.use_wheel()) - cmd_opts.add_option(cmdoptions.no_use_wheel()) - cmd_opts.add_option(cmdoptions.no_binary()) - cmd_opts.add_option(cmdoptions.only_binary()) - cmd_opts.add_option(cmdoptions.pre()) - cmd_opts.add_option(cmdoptions.no_clean()) - cmd_opts.add_option(cmdoptions.require_hashes()) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def run(self, options, args): - cmdoptions.resolve_wheel_no_use_binary(options) - cmdoptions.check_install_build_global(options) - - if options.allow_external: - warnings.warn( - "--allow-external has been deprecated and will be removed in " - "the future. Due to changes in the repository protocol, it no " - "longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_all_external: - warnings.warn( - "--allow-all-external has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_unverified: - warnings.warn( - "--allow-unverified has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if options.download_dir: - warnings.warn( - "pip install --download has been deprecated and will be " - "removed in the future. Pip now has a download command that " - "should be used instead.", - RemovedInPip10Warning, - ) - options.ignore_installed = True - - if options.build_dir: - options.build_dir = os.path.abspath(options.build_dir) - - options.src_dir = os.path.abspath(options.src_dir) - install_options = options.install_options or [] - if options.use_user_site: - if options.prefix_path: - raise CommandError( - "Can not combine '--user' and '--prefix' as they imply " - "different installation locations" - ) - if virtualenv_no_global(): - raise InstallationError( - "Can not perform a '--user' install. User site-packages " - "are not visible in this virtualenv." - ) - install_options.append('--user') - install_options.append('--prefix=') - - temp_target_dir = None - if options.target_dir: - options.ignore_installed = True - temp_target_dir = tempfile.mkdtemp() - options.target_dir = os.path.abspath(options.target_dir) - if (os.path.exists(options.target_dir) and not - os.path.isdir(options.target_dir)): - raise CommandError( - "Target path exists but is not a directory, will not " - "continue." - ) - install_options.append('--home=' + temp_target_dir) - - global_options = options.global_options or [] - - with self._build_session(options) as session: - - finder = self._build_package_finder(options, session) - build_delete = (not (options.no_clean or options.build_dir)) - wheel_cache = WheelCache(options.cache_dir, options.format_control) - if options.cache_dir and not check_path_owner(options.cache_dir): - logger.warning( - "The directory '%s' or its parent directory is not owned " - "by the current user and caching wheels has been " - "disabled. check the permissions and owner of that " - "directory. If executing pip with sudo, you may want " - "sudo's -H flag.", - options.cache_dir, - ) - options.cache_dir = None - - with BuildDirectory(options.build_dir, - delete=build_delete) as build_dir: - requirement_set = RequirementSet( - build_dir=build_dir, - src_dir=options.src_dir, - download_dir=options.download_dir, - upgrade=options.upgrade, - as_egg=options.as_egg, - ignore_installed=options.ignore_installed, - ignore_dependencies=options.ignore_dependencies, - force_reinstall=options.force_reinstall, - use_user_site=options.use_user_site, - target_dir=temp_target_dir, - session=session, - pycompile=options.compile, - isolated=options.isolated_mode, - wheel_cache=wheel_cache, - require_hashes=options.require_hashes, - ) - - self.populate_requirement_set( - requirement_set, args, options, finder, session, self.name, - wheel_cache - ) - - if not requirement_set.has_requirements: - return - - try: - if (options.download_dir or not wheel or not - options.cache_dir): - # on -d don't do complex things like building - # wheels, and don't try to build wheels when wheel is - # not installed. - requirement_set.prepare_files(finder) - else: - # build wheels before install. - wb = WheelBuilder( - requirement_set, - finder, - build_options=[], - global_options=[], - ) - # Ignore the result: a failed wheel will be - # installed from the sdist/vcs whatever. - wb.build(autobuilding=True) - - if not options.download_dir: - requirement_set.install( - install_options, - global_options, - root=options.root_path, - prefix=options.prefix_path, - ) - reqs = sorted( - requirement_set.successfully_installed, - key=operator.attrgetter('name')) - items = [] - for req in reqs: - item = req.name - try: - if hasattr(req, 'installed_version'): - if req.installed_version: - item += '-' + req.installed_version - except Exception: - pass - items.append(item) - installed = ' '.join(items) - if installed: - logger.info('Successfully installed %s', installed) - else: - downloaded = ' '.join([ - req.name - for req in requirement_set.successfully_downloaded - ]) - if downloaded: - logger.info( - 'Successfully downloaded %s', downloaded - ) - except PreviousBuildDirError: - options.no_clean = True - raise - finally: - # Clean up - if not options.no_clean: - requirement_set.cleanup_files() - - if options.target_dir: - ensure_dir(options.target_dir) - - lib_dir = distutils_scheme('', home=temp_target_dir)['purelib'] - - for item in os.listdir(lib_dir): - target_item_dir = os.path.join(options.target_dir, item) - if os.path.exists(target_item_dir): - if not options.upgrade: - logger.warning( - 'Target directory %s already exists. Specify ' - '--upgrade to force replacement.', - target_item_dir - ) - continue - if os.path.islink(target_item_dir): - logger.warning( - 'Target directory %s already exists and is ' - 'a link. Pip will not automatically replace ' - 'links, please remove if replacement is ' - 'desired.', - target_item_dir - ) - continue - if os.path.isdir(target_item_dir): - shutil.rmtree(target_item_dir) - else: - os.remove(target_item_dir) - - shutil.move( - os.path.join(lib_dir, item), - target_item_dir - ) - shutil.rmtree(temp_target_dir) - return requirement_set diff --git a/Shared/lib/python3.4/site-packages/pip/commands/list.py b/Shared/lib/python3.4/site-packages/pip/commands/list.py deleted file mode 100644 index 5346488..0000000 --- a/Shared/lib/python3.4/site-packages/pip/commands/list.py +++ /dev/null @@ -1,209 +0,0 @@ -from __future__ import absolute_import - -import logging -import warnings - -from pip.basecommand import Command -from pip.exceptions import CommandError -from pip.index import PackageFinder -from pip.utils import ( - get_installed_distributions, dist_is_editable) -from pip.utils.deprecation import RemovedInPip10Warning -from pip.cmdoptions import make_option_group, index_group - - -logger = logging.getLogger(__name__) - - -class ListCommand(Command): - """ - List installed packages, including editables. - - Packages are listed in a case-insensitive sorted order. - """ - name = 'list' - usage = """ - %prog [options]""" - summary = 'List installed packages.' - - def __init__(self, *args, **kw): - super(ListCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option( - '-o', '--outdated', - action='store_true', - default=False, - help='List outdated packages') - cmd_opts.add_option( - '-u', '--uptodate', - action='store_true', - default=False, - help='List uptodate packages') - cmd_opts.add_option( - '-e', '--editable', - action='store_true', - default=False, - help='List editable projects.') - cmd_opts.add_option( - '-l', '--local', - action='store_true', - default=False, - help=('If in a virtualenv that has global access, do not list ' - 'globally-installed packages.'), - ) - self.cmd_opts.add_option( - '--user', - dest='user', - action='store_true', - default=False, - help='Only output packages installed in user-site.') - - cmd_opts.add_option( - '--pre', - action='store_true', - default=False, - help=("Include pre-release and development versions. By default, " - "pip only finds stable versions."), - ) - - index_opts = make_option_group(index_group, self.parser) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def _build_package_finder(self, options, index_urls, session): - """ - Create a package finder appropriate to this list command. - """ - return PackageFinder( - find_links=options.find_links, - index_urls=index_urls, - allow_all_prereleases=options.pre, - trusted_hosts=options.trusted_hosts, - process_dependency_links=options.process_dependency_links, - session=session, - ) - - def run(self, options, args): - if options.allow_external: - warnings.warn( - "--allow-external has been deprecated and will be removed in " - "the future. Due to changes in the repository protocol, it no " - "longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_all_external: - warnings.warn( - "--allow-all-external has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_unverified: - warnings.warn( - "--allow-unverified has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - if options.outdated and options.uptodate: - raise CommandError( - "Options --outdated and --uptodate cannot be combined.") - - if options.outdated: - self.run_outdated(options) - elif options.uptodate: - self.run_uptodate(options) - else: - self.run_listing(options) - - def run_outdated(self, options): - for dist, latest_version, typ in sorted( - self.find_packages_latest_versions(options), - key=lambda p: p[0].project_name.lower()): - if latest_version > dist.parsed_version: - logger.info( - '%s - Latest: %s [%s]', - self.output_package(dist), latest_version, typ, - ) - - def find_packages_latest_versions(self, options): - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.info('Ignoring indexes: %s', ','.join(index_urls)) - index_urls = [] - - dependency_links = [] - for dist in get_installed_distributions( - local_only=options.local, - user_only=options.user, - editables_only=options.editable): - if dist.has_metadata('dependency_links.txt'): - dependency_links.extend( - dist.get_metadata_lines('dependency_links.txt'), - ) - - with self._build_session(options) as session: - finder = self._build_package_finder(options, index_urls, session) - finder.add_dependency_links(dependency_links) - - installed_packages = get_installed_distributions( - local_only=options.local, - user_only=options.user, - editables_only=options.editable, - ) - for dist in installed_packages: - typ = 'unknown' - all_candidates = finder.find_all_candidates(dist.key) - if not options.pre: - # Remove prereleases - all_candidates = [candidate for candidate in all_candidates - if not candidate.version.is_prerelease] - - if not all_candidates: - continue - best_candidate = max(all_candidates, - key=finder._candidate_sort_key) - remote_version = best_candidate.version - if best_candidate.location.is_wheel: - typ = 'wheel' - else: - typ = 'sdist' - yield dist, remote_version, typ - - def run_listing(self, options): - installed_packages = get_installed_distributions( - local_only=options.local, - user_only=options.user, - editables_only=options.editable, - ) - self.output_package_listing(installed_packages) - - def output_package(self, dist): - if dist_is_editable(dist): - return '%s (%s, %s)' % ( - dist.project_name, - dist.version, - dist.location, - ) - else: - return '%s (%s)' % (dist.project_name, dist.version) - - def output_package_listing(self, installed_packages): - installed_packages = sorted( - installed_packages, - key=lambda dist: dist.project_name.lower(), - ) - for dist in installed_packages: - logger.info(self.output_package(dist)) - - def run_uptodate(self, options): - uptodate = [] - for dist, version, typ in self.find_packages_latest_versions(options): - if dist.parsed_version == version: - uptodate.append(dist) - self.output_package_listing(uptodate) diff --git a/Shared/lib/python3.4/site-packages/pip/compat/__init__.py b/Shared/lib/python3.4/site-packages/pip/compat/__init__.py deleted file mode 100644 index 703852b..0000000 --- a/Shared/lib/python3.4/site-packages/pip/compat/__init__.py +++ /dev/null @@ -1,164 +0,0 @@ -"""Stuff that differs in different Python versions and platform -distributions.""" -from __future__ import absolute_import, division - -import os -import sys - -from pip._vendor.six import text_type - -try: - from logging.config import dictConfig as logging_dictConfig -except ImportError: - from pip.compat.dictconfig import dictConfig as logging_dictConfig - -try: - from collections import OrderedDict -except ImportError: - from pip.compat.ordereddict import OrderedDict - -try: - import ipaddress -except ImportError: - try: - from pip._vendor import ipaddress - except ImportError: - import ipaddr as ipaddress - ipaddress.ip_address = ipaddress.IPAddress - ipaddress.ip_network = ipaddress.IPNetwork - - -try: - import sysconfig - - def get_stdlib(): - paths = [ - sysconfig.get_path("stdlib"), - sysconfig.get_path("platstdlib"), - ] - return set(filter(bool, paths)) -except ImportError: - from distutils import sysconfig - - def get_stdlib(): - paths = [ - sysconfig.get_python_lib(standard_lib=True), - sysconfig.get_python_lib(standard_lib=True, plat_specific=True), - ] - return set(filter(bool, paths)) - - -__all__ = [ - "logging_dictConfig", "ipaddress", "uses_pycache", "console_to_str", - "native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile", - "OrderedDict", -] - - -if sys.version_info >= (3, 4): - uses_pycache = True - from importlib.util import cache_from_source -else: - import imp - uses_pycache = hasattr(imp, 'cache_from_source') - if uses_pycache: - cache_from_source = imp.cache_from_source - else: - cache_from_source = None - - -if sys.version_info >= (3,): - def console_to_str(s): - try: - return s.decode(sys.__stdout__.encoding) - except UnicodeDecodeError: - return s.decode('utf_8') - - def native_str(s, replace=False): - if isinstance(s, bytes): - return s.decode('utf-8', 'replace' if replace else 'strict') - return s - -else: - def console_to_str(s): - return s - - def native_str(s, replace=False): - # Replace is ignored -- unicode to UTF-8 can't fail - if isinstance(s, text_type): - return s.encode('utf-8') - return s - - -def total_seconds(td): - if hasattr(td, "total_seconds"): - return td.total_seconds() - else: - val = td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6 - return val / 10 ** 6 - - -def get_path_uid(path): - """ - Return path's uid. - - Does not follow symlinks: - https://github.com/pypa/pip/pull/935#discussion_r5307003 - - Placed this function in compat due to differences on AIX and - Jython, that should eventually go away. - - :raises OSError: When path is a symlink or can't be read. - """ - if hasattr(os, 'O_NOFOLLOW'): - fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) - file_uid = os.fstat(fd).st_uid - os.close(fd) - else: # AIX and Jython - # WARNING: time of check vulnerabity, but best we can do w/o NOFOLLOW - if not os.path.islink(path): - # older versions of Jython don't have `os.fstat` - file_uid = os.stat(path).st_uid - else: - # raise OSError for parity with os.O_NOFOLLOW above - raise OSError( - "%s is a symlink; Will not return uid for symlinks" % path - ) - return file_uid - - -def expanduser(path): - """ - Expand ~ and ~user constructions. - - Includes a workaround for http://bugs.python.org/issue14768 - """ - expanded = os.path.expanduser(path) - if path.startswith('~/') and expanded.startswith('//'): - expanded = expanded[1:] - return expanded - - -# packages in the stdlib that may have installation metadata, but should not be -# considered 'installed'. this theoretically could be determined based on -# dist.location (py27:`sysconfig.get_paths()['stdlib']`, -# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may -# make this ineffective, so hard-coding -stdlib_pkgs = ('python', 'wsgiref') -if sys.version_info >= (2, 7): - stdlib_pkgs += ('argparse',) - - -# windows detection, covers cpython and ironpython -WINDOWS = (sys.platform.startswith("win") or - (sys.platform == 'cli' and os.name == 'nt')) - - -def samefile(file1, file2): - """Provide an alternative for os.path.samefile on Windows/Python2""" - if hasattr(os.path, 'samefile'): - return os.path.samefile(file1, file2) - else: - path1 = os.path.normcase(os.path.abspath(file1)) - path2 = os.path.normcase(os.path.abspath(file2)) - return path1 == path2 diff --git a/Shared/lib/python3.4/site-packages/pip/compat/dictconfig.py b/Shared/lib/python3.4/site-packages/pip/compat/dictconfig.py deleted file mode 100644 index ec684aa..0000000 --- a/Shared/lib/python3.4/site-packages/pip/compat/dictconfig.py +++ /dev/null @@ -1,565 +0,0 @@ -# This is a copy of the Python logging.config.dictconfig module, -# reproduced with permission. It is provided here for backwards -# compatibility for Python versions prior to 2.7. -# -# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose and without fee is hereby granted, -# provided that the above copyright notice appear in all copies and that -# both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of Vinay Sajip -# not be used in advertising or publicity pertaining to distribution -# of the software without specific, written prior permission. -# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING -# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL -# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR -# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER -# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -from __future__ import absolute_import - -import logging.handlers -import re -import sys -import types - -from pip._vendor import six - -# flake8: noqa - -IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) - - -def valid_ident(s): - m = IDENTIFIER.match(s) - if not m: - raise ValueError('Not a valid Python identifier: %r' % s) - return True - -# -# This function is defined in logging only in recent versions of Python -# -try: - from logging import _checkLevel -except ImportError: - def _checkLevel(level): - if isinstance(level, int): - rv = level - elif str(level) == level: - if level not in logging._levelNames: - raise ValueError('Unknown level: %r' % level) - rv = logging._levelNames[level] - else: - raise TypeError('Level not an integer or a ' - 'valid string: %r' % level) - return rv - -# The ConvertingXXX classes are wrappers around standard Python containers, -# and they serve to convert any suitable values in the container. The -# conversion converts base dicts, lists and tuples to their wrapped -# equivalents, whereas strings which match a conversion format are converted -# appropriately. -# -# Each wrapper should have a configurator attribute holding the actual -# configurator to use for conversion. - - -class ConvertingDict(dict): - """A converting dictionary wrapper.""" - - def __getitem__(self, key): - value = dict.__getitem__(self, key) - result = self.configurator.convert(value) - # If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def get(self, key, default=None): - value = dict.get(self, key, default) - result = self.configurator.convert(value) - # If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, key, default=None): - value = dict.pop(self, key, default) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - -class ConvertingList(list): - """A converting list wrapper.""" - def __getitem__(self, key): - value = list.__getitem__(self, key) - result = self.configurator.convert(value) - # If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, idx=-1): - value = list.pop(self, idx) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - return result - - -class ConvertingTuple(tuple): - """A converting tuple wrapper.""" - def __getitem__(self, key): - value = tuple.__getitem__(self, key) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - -class BaseConfigurator(object): - """ - The configurator base class which defines some useful defaults. - """ - - CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') - - WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') - DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') - INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') - DIGIT_PATTERN = re.compile(r'^\d+$') - - value_converters = { - 'ext' : 'ext_convert', - 'cfg' : 'cfg_convert', - } - - # We might want to use a different one, e.g. importlib - importer = __import__ - - def __init__(self, config): - self.config = ConvertingDict(config) - self.config.configurator = self - - def resolve(self, s): - """ - Resolve strings to objects using standard import and attribute - syntax. - """ - name = s.split('.') - used = name.pop(0) - try: - found = self.importer(used) - for frag in name: - used += '.' + frag - try: - found = getattr(found, frag) - except AttributeError: - self.importer(used) - found = getattr(found, frag) - return found - except ImportError: - e, tb = sys.exc_info()[1:] - v = ValueError('Cannot resolve %r: %s' % (s, e)) - v.__cause__, v.__traceback__ = e, tb - raise v - - def ext_convert(self, value): - """Default converter for the ext:// protocol.""" - return self.resolve(value) - - def cfg_convert(self, value): - """Default converter for the cfg:// protocol.""" - rest = value - m = self.WORD_PATTERN.match(rest) - if m is None: - raise ValueError("Unable to convert %r" % value) - else: - rest = rest[m.end():] - d = self.config[m.groups()[0]] - # print d, rest - while rest: - m = self.DOT_PATTERN.match(rest) - if m: - d = d[m.groups()[0]] - else: - m = self.INDEX_PATTERN.match(rest) - if m: - idx = m.groups()[0] - if not self.DIGIT_PATTERN.match(idx): - d = d[idx] - else: - try: - n = int(idx) # try as number first (most likely) - d = d[n] - except TypeError: - d = d[idx] - if m: - rest = rest[m.end():] - else: - raise ValueError('Unable to convert ' - '%r at %r' % (value, rest)) - # rest should be empty - return d - - def convert(self, value): - """ - Convert values to an appropriate type. dicts, lists and tuples are - replaced by their converting alternatives. Strings are checked to - see if they have a conversion format and are converted if they do. - """ - if not isinstance(value, ConvertingDict) and isinstance(value, dict): - value = ConvertingDict(value) - value.configurator = self - elif not isinstance(value, ConvertingList) and isinstance(value, list): - value = ConvertingList(value) - value.configurator = self - elif not isinstance(value, ConvertingTuple) and\ - isinstance(value, tuple): - value = ConvertingTuple(value) - value.configurator = self - elif isinstance(value, six.string_types): # str for py3k - m = self.CONVERT_PATTERN.match(value) - if m: - d = m.groupdict() - prefix = d['prefix'] - converter = self.value_converters.get(prefix, None) - if converter: - suffix = d['suffix'] - converter = getattr(self, converter) - value = converter(suffix) - return value - - def configure_custom(self, config): - """Configure an object with a user-supplied factory.""" - c = config.pop('()') - if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType: - c = self.resolve(c) - props = config.pop('.', None) - # Check for valid identifiers - kwargs = dict((k, config[k]) for k in config if valid_ident(k)) - result = c(**kwargs) - if props: - for name, value in props.items(): - setattr(result, name, value) - return result - - def as_tuple(self, value): - """Utility function which converts lists to tuples.""" - if isinstance(value, list): - value = tuple(value) - return value - - -class DictConfigurator(BaseConfigurator): - """ - Configure logging using a dictionary-like object to describe the - configuration. - """ - - def configure(self): - """Do the configuration.""" - - config = self.config - if 'version' not in config: - raise ValueError("dictionary doesn't specify a version") - if config['version'] != 1: - raise ValueError("Unsupported version: %s" % config['version']) - incremental = config.pop('incremental', False) - EMPTY_DICT = {} - logging._acquireLock() - try: - if incremental: - handlers = config.get('handlers', EMPTY_DICT) - # incremental handler config only if handler name - # ties in to logging._handlers (Python 2.7) - if sys.version_info[:2] == (2, 7): - for name in handlers: - if name not in logging._handlers: - raise ValueError('No handler found with ' - 'name %r' % name) - else: - try: - handler = logging._handlers[name] - handler_config = handlers[name] - level = handler_config.get('level', None) - if level: - handler.setLevel(_checkLevel(level)) - except StandardError as e: - raise ValueError('Unable to configure handler ' - '%r: %s' % (name, e)) - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - try: - self.configure_logger(name, loggers[name], True) - except StandardError as e: - raise ValueError('Unable to configure logger ' - '%r: %s' % (name, e)) - root = config.get('root', None) - if root: - try: - self.configure_root(root, True) - except StandardError as e: - raise ValueError('Unable to configure root ' - 'logger: %s' % e) - else: - disable_existing = config.pop('disable_existing_loggers', True) - - logging._handlers.clear() - del logging._handlerList[:] - - # Do formatters first - they don't refer to anything else - formatters = config.get('formatters', EMPTY_DICT) - for name in formatters: - try: - formatters[name] = self.configure_formatter( - formatters[name]) - except StandardError as e: - raise ValueError('Unable to configure ' - 'formatter %r: %s' % (name, e)) - # Next, do filters - they don't refer to anything else, either - filters = config.get('filters', EMPTY_DICT) - for name in filters: - try: - filters[name] = self.configure_filter(filters[name]) - except StandardError as e: - raise ValueError('Unable to configure ' - 'filter %r: %s' % (name, e)) - - # Next, do handlers - they refer to formatters and filters - # As handlers can refer to other handlers, sort the keys - # to allow a deterministic order of configuration - handlers = config.get('handlers', EMPTY_DICT) - for name in sorted(handlers): - try: - handler = self.configure_handler(handlers[name]) - handler.name = name - handlers[name] = handler - except StandardError as e: - raise ValueError('Unable to configure handler ' - '%r: %s' % (name, e)) - # Next, do loggers - they refer to handlers and filters - - # we don't want to lose the existing loggers, - # since other threads may have pointers to them. - # existing is set to contain all existing loggers, - # and as we go through the new configuration we - # remove any which are configured. At the end, - # what's left in existing is the set of loggers - # which were in the previous configuration but - # which are not in the new configuration. - root = logging.root - existing = list(root.manager.loggerDict) - # The list needs to be sorted so that we can - # avoid disabling child loggers of explicitly - # named loggers. With a sorted list it is easier - # to find the child loggers. - existing.sort() - # We'll keep the list of existing loggers - # which are children of named loggers here... - child_loggers = [] - # now set up the new ones... - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - if name in existing: - i = existing.index(name) - prefixed = name + "." - pflen = len(prefixed) - num_existing = len(existing) - i = i + 1 # look at the entry after name - while (i < num_existing) and\ - (existing[i][:pflen] == prefixed): - child_loggers.append(existing[i]) - i = i + 1 - existing.remove(name) - try: - self.configure_logger(name, loggers[name]) - except StandardError as e: - raise ValueError('Unable to configure logger ' - '%r: %s' % (name, e)) - - # Disable any old loggers. There's no point deleting - # them as other threads may continue to hold references - # and by disabling them, you stop them doing any logging. - # However, don't disable children of named loggers, as that's - # probably not what was intended by the user. - for log in existing: - logger = root.manager.loggerDict[log] - if log in child_loggers: - logger.level = logging.NOTSET - logger.handlers = [] - logger.propagate = True - elif disable_existing: - logger.disabled = True - - # And finally, do the root logger - root = config.get('root', None) - if root: - try: - self.configure_root(root) - except StandardError as e: - raise ValueError('Unable to configure root ' - 'logger: %s' % e) - finally: - logging._releaseLock() - - def configure_formatter(self, config): - """Configure a formatter from a dictionary.""" - if '()' in config: - factory = config['()'] # for use in exception handler - try: - result = self.configure_custom(config) - except TypeError as te: - if "'format'" not in str(te): - raise - # Name of parameter changed from fmt to format. - # Retry with old name. - # This is so that code can be used with older Python versions - #(e.g. by Django) - config['fmt'] = config.pop('format') - config['()'] = factory - result = self.configure_custom(config) - else: - fmt = config.get('format', None) - dfmt = config.get('datefmt', None) - result = logging.Formatter(fmt, dfmt) - return result - - def configure_filter(self, config): - """Configure a filter from a dictionary.""" - if '()' in config: - result = self.configure_custom(config) - else: - name = config.get('name', '') - result = logging.Filter(name) - return result - - def add_filters(self, filterer, filters): - """Add filters to a filterer from a list of names.""" - for f in filters: - try: - filterer.addFilter(self.config['filters'][f]) - except StandardError as e: - raise ValueError('Unable to add filter %r: %s' % (f, e)) - - def configure_handler(self, config): - """Configure a handler from a dictionary.""" - formatter = config.pop('formatter', None) - if formatter: - try: - formatter = self.config['formatters'][formatter] - except StandardError as e: - raise ValueError('Unable to set formatter ' - '%r: %s' % (formatter, e)) - level = config.pop('level', None) - filters = config.pop('filters', None) - if '()' in config: - c = config.pop('()') - if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType: - c = self.resolve(c) - factory = c - else: - klass = self.resolve(config.pop('class')) - # Special case for handler which refers to another handler - if issubclass(klass, logging.handlers.MemoryHandler) and\ - 'target' in config: - try: - config['target'] = self.config['handlers'][config['target']] - except StandardError as e: - raise ValueError('Unable to set target handler ' - '%r: %s' % (config['target'], e)) - elif issubclass(klass, logging.handlers.SMTPHandler) and\ - 'mailhost' in config: - config['mailhost'] = self.as_tuple(config['mailhost']) - elif issubclass(klass, logging.handlers.SysLogHandler) and\ - 'address' in config: - config['address'] = self.as_tuple(config['address']) - factory = klass - kwargs = dict((k, config[k]) for k in config if valid_ident(k)) - try: - result = factory(**kwargs) - except TypeError as te: - if "'stream'" not in str(te): - raise - # The argument name changed from strm to stream - # Retry with old name. - # This is so that code can be used with older Python versions - #(e.g. by Django) - kwargs['strm'] = kwargs.pop('stream') - result = factory(**kwargs) - if formatter: - result.setFormatter(formatter) - if level is not None: - result.setLevel(_checkLevel(level)) - if filters: - self.add_filters(result, filters) - return result - - def add_handlers(self, logger, handlers): - """Add handlers to a logger from a list of names.""" - for h in handlers: - try: - logger.addHandler(self.config['handlers'][h]) - except StandardError as e: - raise ValueError('Unable to add handler %r: %s' % (h, e)) - - def common_logger_config(self, logger, config, incremental=False): - """ - Perform configuration which is common to root and non-root loggers. - """ - level = config.get('level', None) - if level is not None: - logger.setLevel(_checkLevel(level)) - if not incremental: - # Remove any existing handlers - for h in logger.handlers[:]: - logger.removeHandler(h) - handlers = config.get('handlers', None) - if handlers: - self.add_handlers(logger, handlers) - filters = config.get('filters', None) - if filters: - self.add_filters(logger, filters) - - def configure_logger(self, name, config, incremental=False): - """Configure a non-root logger from a dictionary.""" - logger = logging.getLogger(name) - self.common_logger_config(logger, config, incremental) - propagate = config.get('propagate', None) - if propagate is not None: - logger.propagate = propagate - - def configure_root(self, config, incremental=False): - """Configure a root logger from a dictionary.""" - root = logging.getLogger() - self.common_logger_config(root, config, incremental) - -dictConfigClass = DictConfigurator - - -def dictConfig(config): - """Configure logging using a dictionary.""" - dictConfigClass(config).configure() diff --git a/Shared/lib/python3.4/site-packages/pip/compat/ordereddict.py b/Shared/lib/python3.4/site-packages/pip/compat/ordereddict.py deleted file mode 100644 index 6eb3ba4..0000000 --- a/Shared/lib/python3.4/site-packages/pip/compat/ordereddict.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) 2009 Raymond Hettinger -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation files -# (the "Software"), to deal in the Software without restriction, -# including without limitation the rights to use, copy, modify, merge, -# publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, -# subject to the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. - -# flake8: noqa - -from UserDict import DictMixin - -class OrderedDict(dict, DictMixin): - - def __init__(self, *args, **kwds): - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - if last: - key = reversed(self).next() - else: - key = iter(self).next() - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - iterkeys = DictMixin.iterkeys - itervalues = DictMixin.itervalues - iteritems = DictMixin.iteritems - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - if len(self) != len(other): - return False - for p, q in zip(self.items(), other.items()): - if p != q: - return False - return True - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other diff --git a/Shared/lib/python3.4/site-packages/pip/models/__init__.py b/Shared/lib/python3.4/site-packages/pip/models/__init__.py deleted file mode 100644 index 1d727d7..0000000 --- a/Shared/lib/python3.4/site-packages/pip/models/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from pip.models.index import Index, PyPI - - -__all__ = ["Index", "PyPI"] diff --git a/Shared/lib/python3.4/site-packages/pip/models/index.py b/Shared/lib/python3.4/site-packages/pip/models/index.py deleted file mode 100644 index be99119..0000000 --- a/Shared/lib/python3.4/site-packages/pip/models/index.py +++ /dev/null @@ -1,16 +0,0 @@ -from pip._vendor.six.moves.urllib import parse as urllib_parse - - -class Index(object): - def __init__(self, url): - self.url = url - self.netloc = urllib_parse.urlsplit(url).netloc - self.simple_url = self.url_to_path('simple') - self.pypi_url = self.url_to_path('pypi') - self.pip_json_url = self.url_to_path('pypi/pip/json') - - def url_to_path(self, path): - return urllib_parse.urljoin(self.url, path) - - -PyPI = Index('https://pypi.python.org/') diff --git a/Shared/lib/python3.4/site-packages/pip/operations/freeze.py b/Shared/lib/python3.4/site-packages/pip/operations/freeze.py deleted file mode 100644 index 086922e..0000000 --- a/Shared/lib/python3.4/site-packages/pip/operations/freeze.py +++ /dev/null @@ -1,114 +0,0 @@ -from __future__ import absolute_import - -import logging -import re - -import pip -from pip.req import InstallRequirement -from pip.utils import get_installed_distributions -from pip._vendor import pkg_resources -from pip._vendor.packaging.utils import canonicalize_name - - -logger = logging.getLogger(__name__) - - -def freeze( - requirement=None, - find_links=None, local_only=None, user_only=None, skip_regex=None, - default_vcs=None, - isolated=False, - wheel_cache=None, - skip=()): - find_links = find_links or [] - skip_match = None - - if skip_regex: - skip_match = re.compile(skip_regex).search - - dependency_links = [] - - for dist in pkg_resources.working_set: - if dist.has_metadata('dependency_links.txt'): - dependency_links.extend( - dist.get_metadata_lines('dependency_links.txt') - ) - for link in find_links: - if '#egg=' in link: - dependency_links.append(link) - for link in find_links: - yield '-f %s' % link - installations = {} - for dist in get_installed_distributions(local_only=local_only, - skip=(), - user_only=user_only): - req = pip.FrozenRequirement.from_dist( - dist, - dependency_links - ) - installations[req.name] = req - - if requirement: - with open(requirement) as req_file: - for line in req_file: - if (not line.strip() or - line.strip().startswith('#') or - (skip_match and skip_match(line)) or - line.startswith(( - '-r', '--requirement', - '-Z', '--always-unzip', - '-f', '--find-links', - '-i', '--index-url', - '--pre', - '--trusted-host', - '--process-dependency-links', - '--extra-index-url'))): - yield line.rstrip() - continue - - if line.startswith('-e') or line.startswith('--editable'): - if line.startswith('-e'): - line = line[2:].strip() - else: - line = line[len('--editable'):].strip().lstrip('=') - line_req = InstallRequirement.from_editable( - line, - default_vcs=default_vcs, - isolated=isolated, - wheel_cache=wheel_cache, - ) - else: - line_req = InstallRequirement.from_line( - line, - isolated=isolated, - wheel_cache=wheel_cache, - ) - - if not line_req.name: - logger.info( - "Skipping line because it's not clear what it " - "would install: %s", - line.strip(), - ) - logger.info( - " (add #egg=PackageName to the URL to avoid" - " this warning)" - ) - elif line_req.name not in installations: - logger.warning( - "Requirement file contains %s, but that package is" - " not installed", - line.strip(), - ) - else: - yield str(installations[line_req.name]).rstrip() - del installations[line_req.name] - - yield( - '## The following requirements were added by ' - 'pip freeze:' - ) - for installation in sorted( - installations.values(), key=lambda x: x.name.lower()): - if canonicalize_name(installation.name) not in skip: - yield str(installation).rstrip() diff --git a/Shared/lib/python3.4/site-packages/pip/req/__init__.py b/Shared/lib/python3.4/site-packages/pip/req/__init__.py deleted file mode 100644 index 00185a4..0000000 --- a/Shared/lib/python3.4/site-packages/pip/req/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from __future__ import absolute_import - -from .req_install import InstallRequirement -from .req_set import RequirementSet, Requirements -from .req_file import parse_requirements - -__all__ = [ - "RequirementSet", "Requirements", "InstallRequirement", - "parse_requirements", -] diff --git a/Shared/lib/python3.4/site-packages/pip/req/req_install.py b/Shared/lib/python3.4/site-packages/pip/req/req_install.py deleted file mode 100644 index caeda76..0000000 --- a/Shared/lib/python3.4/site-packages/pip/req/req_install.py +++ /dev/null @@ -1,1183 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import re -import shutil -import sys -import tempfile -import traceback -import warnings -import zipfile - -from distutils import sysconfig -from distutils.util import change_root -from email.parser import FeedParser - -from pip._vendor import pkg_resources, six -from pip._vendor.distlib.markers import interpret as markers_interpret -from pip._vendor.packaging import specifiers -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.six.moves import configparser - -import pip.wheel - -from pip.compat import native_str, get_stdlib, WINDOWS -from pip.download import is_url, url_to_path, path_to_url, is_archive_file -from pip.exceptions import ( - InstallationError, UninstallationError, UnsupportedWheel, -) -from pip.locations import ( - bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user, -) -from pip.utils import ( - display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir, - dist_in_usersite, dist_in_site_packages, egg_link_path, - call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir, - get_installed_version, normalize_path, dist_is_local, -) - -from pip.utils.hashes import Hashes -from pip.utils.deprecation import RemovedInPip9Warning, RemovedInPip10Warning -from pip.utils.logging import indent_log -from pip.utils.setuptools_build import SETUPTOOLS_SHIM -from pip.utils.ui import open_spinner -from pip.req.req_uninstall import UninstallPathSet -from pip.vcs import vcs -from pip.wheel import move_wheel_files, Wheel -from pip._vendor.packaging.version import Version - - -logger = logging.getLogger(__name__) - -operators = specifiers.Specifier._operators.keys() - - -def _strip_extras(path): - m = re.match(r'^(.+)(\[[^\]]+\])$', path) - extras = None - if m: - path_no_extras = m.group(1) - extras = m.group(2) - else: - path_no_extras = path - - return path_no_extras, extras - - -class InstallRequirement(object): - - def __init__(self, req, comes_from, source_dir=None, editable=False, - link=None, as_egg=False, update=True, - pycompile=True, markers=None, isolated=False, options=None, - wheel_cache=None, constraint=False): - self.extras = () - if isinstance(req, six.string_types): - try: - req = pkg_resources.Requirement.parse(req) - except pkg_resources.RequirementParseError: - if os.path.sep in req: - add_msg = "It looks like a path. Does it exist ?" - elif '=' in req and not any(op in req for op in operators): - add_msg = "= is not a valid operator. Did you mean == ?" - else: - add_msg = traceback.format_exc() - raise InstallationError( - "Invalid requirement: '%s'\n%s" % (req, add_msg)) - self.extras = req.extras - - self.req = req - self.comes_from = comes_from - self.constraint = constraint - self.source_dir = source_dir - self.editable = editable - - self._wheel_cache = wheel_cache - self.link = self.original_link = link - self.as_egg = as_egg - self.markers = markers - self._egg_info_path = None - # This holds the pkg_resources.Distribution object if this requirement - # is already available: - self.satisfied_by = None - # This hold the pkg_resources.Distribution object if this requirement - # conflicts with another installed distribution: - self.conflicts_with = None - # Temporary build location - self._temp_build_dir = None - # Used to store the global directory where the _temp_build_dir should - # have been created. Cf _correct_build_location method. - self._ideal_build_dir = None - # True if the editable should be updated: - self.update = update - # Set to True after successful installation - self.install_succeeded = None - # UninstallPathSet of uninstalled distribution (for possible rollback) - self.uninstalled = None - # Set True if a legitimate do-nothing-on-uninstall has happened - e.g. - # system site packages, stdlib packages. - self.nothing_to_uninstall = False - self.use_user_site = False - self.target_dir = None - self.options = options if options else {} - self.pycompile = pycompile - # Set to True after successful preparation of this requirement - self.prepared = False - - self.isolated = isolated - - @classmethod - def from_editable(cls, editable_req, comes_from=None, default_vcs=None, - isolated=False, options=None, wheel_cache=None, - constraint=False): - from pip.index import Link - - name, url, extras_override = parse_editable( - editable_req, default_vcs) - if url.startswith('file:'): - source_dir = url_to_path(url) - else: - source_dir = None - - res = cls(name, comes_from, source_dir=source_dir, - editable=True, - link=Link(url), - constraint=constraint, - isolated=isolated, - options=options if options else {}, - wheel_cache=wheel_cache) - - if extras_override is not None: - res.extras = extras_override - - return res - - @classmethod - def from_line( - cls, name, comes_from=None, isolated=False, options=None, - wheel_cache=None, constraint=False): - """Creates an InstallRequirement from a name, which might be a - requirement, directory containing 'setup.py', filename, or URL. - """ - from pip.index import Link - - if is_url(name): - marker_sep = '; ' - else: - marker_sep = ';' - if marker_sep in name: - name, markers = name.split(marker_sep, 1) - markers = markers.strip() - if not markers: - markers = None - else: - markers = None - name = name.strip() - req = None - path = os.path.normpath(os.path.abspath(name)) - link = None - extras = None - - if is_url(name): - link = Link(name) - else: - p, extras = _strip_extras(path) - if (os.path.isdir(p) and - (os.path.sep in name or name.startswith('.'))): - - if not is_installable_dir(p): - raise InstallationError( - "Directory %r is not installable. File 'setup.py' " - "not found." % name - ) - link = Link(path_to_url(p)) - elif is_archive_file(p): - if not os.path.isfile(p): - logger.warning( - 'Requirement %r looks like a filename, but the ' - 'file does not exist', - name - ) - link = Link(path_to_url(p)) - - # it's a local file, dir, or url - if link: - # Handle relative file URLs - if link.scheme == 'file' and re.search(r'\.\./', link.url): - link = Link( - path_to_url(os.path.normpath(os.path.abspath(link.path)))) - # wheel file - if link.is_wheel: - wheel = Wheel(link.filename) # can raise InvalidWheelFilename - if not wheel.supported(): - raise UnsupportedWheel( - "%s is not a supported wheel on this platform." % - wheel.filename - ) - req = "%s==%s" % (wheel.name, wheel.version) - else: - # set the req to the egg fragment. when it's not there, this - # will become an 'unnamed' requirement - req = link.egg_fragment - - # a requirement specifier - else: - req = name - - options = options if options else {} - res = cls(req, comes_from, link=link, markers=markers, - isolated=isolated, options=options, - wheel_cache=wheel_cache, constraint=constraint) - - if extras: - res.extras = pkg_resources.Requirement.parse('__placeholder__' + - extras).extras - - return res - - def __str__(self): - if self.req: - s = str(self.req) - if self.link: - s += ' from %s' % self.link.url - else: - s = self.link.url if self.link else None - if self.satisfied_by is not None: - s += ' in %s' % display_path(self.satisfied_by.location) - if self.comes_from: - if isinstance(self.comes_from, six.string_types): - comes_from = self.comes_from - else: - comes_from = self.comes_from.from_path() - if comes_from: - s += ' (from %s)' % comes_from - return s - - def __repr__(self): - return '<%s object: %s editable=%r>' % ( - self.__class__.__name__, str(self), self.editable) - - def populate_link(self, finder, upgrade, require_hashes): - """Ensure that if a link can be found for this, that it is found. - - Note that self.link may still be None - if Upgrade is False and the - requirement is already installed. - - If require_hashes is True, don't use the wheel cache, because cached - wheels, always built locally, have different hashes than the files - downloaded from the index server and thus throw false hash mismatches. - Furthermore, cached wheels at present have undeterministic contents due - to file modification times. - """ - if self.link is None: - self.link = finder.find_requirement(self, upgrade) - if self._wheel_cache is not None and not require_hashes: - old_link = self.link - self.link = self._wheel_cache.cached_wheel(self.link, self.name) - if old_link != self.link: - logger.debug('Using cached wheel link: %s', self.link) - - @property - def specifier(self): - return self.req.specifier - - @property - def is_pinned(self): - """Return whether I am pinned to an exact version. - - For example, some-package==1.2 is pinned; some-package>1.2 is not. - """ - specifiers = self.specifier - return (len(specifiers) == 1 and - next(iter(specifiers)).operator in ('==', '===')) - - def from_path(self): - if self.req is None: - return None - s = str(self.req) - if self.comes_from: - if isinstance(self.comes_from, six.string_types): - comes_from = self.comes_from - else: - comes_from = self.comes_from.from_path() - if comes_from: - s += '->' + comes_from - return s - - def build_location(self, build_dir): - if self._temp_build_dir is not None: - return self._temp_build_dir - if self.req is None: - # for requirement via a path to a directory: the name of the - # package is not available yet so we create a temp directory - # Once run_egg_info will have run, we'll be able - # to fix it via _correct_build_location - self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-') - self._ideal_build_dir = build_dir - return self._temp_build_dir - if self.editable: - name = self.name.lower() - else: - name = self.name - # FIXME: Is there a better place to create the build_dir? (hg and bzr - # need this) - if not os.path.exists(build_dir): - logger.debug('Creating directory %s', build_dir) - _make_build_dir(build_dir) - return os.path.join(build_dir, name) - - def _correct_build_location(self): - """Move self._temp_build_dir to self._ideal_build_dir/self.req.name - - For some requirements (e.g. a path to a directory), the name of the - package is not available until we run egg_info, so the build_location - will return a temporary directory and store the _ideal_build_dir. - - This is only called by self.egg_info_path to fix the temporary build - directory. - """ - if self.source_dir is not None: - return - assert self.req is not None - assert self._temp_build_dir - assert self._ideal_build_dir - old_location = self._temp_build_dir - self._temp_build_dir = None - new_location = self.build_location(self._ideal_build_dir) - if os.path.exists(new_location): - raise InstallationError( - 'A package already exists in %s; please remove it to continue' - % display_path(new_location)) - logger.debug( - 'Moving package %s from %s to new location %s', - self, display_path(old_location), display_path(new_location), - ) - shutil.move(old_location, new_location) - self._temp_build_dir = new_location - self._ideal_build_dir = None - self.source_dir = new_location - self._egg_info_path = None - - @property - def name(self): - if self.req is None: - return None - return native_str(self.req.project_name) - - @property - def setup_py_dir(self): - return os.path.join( - self.source_dir, - self.link and self.link.subdirectory_fragment or '') - - @property - def setup_py(self): - assert self.source_dir, "No source dir for %s" % self - try: - import setuptools # noqa - except ImportError: - if get_installed_version('setuptools') is None: - add_msg = "Please install setuptools." - else: - add_msg = traceback.format_exc() - # Setuptools is not available - raise InstallationError( - "Could not import setuptools which is required to " - "install from a source distribution.\n%s" % add_msg - ) - - setup_py = os.path.join(self.setup_py_dir, 'setup.py') - - # Python2 __file__ should not be unicode - if six.PY2 and isinstance(setup_py, six.text_type): - setup_py = setup_py.encode(sys.getfilesystemencoding()) - - return setup_py - - def run_egg_info(self): - assert self.source_dir - if self.name: - logger.debug( - 'Running setup.py (path:%s) egg_info for package %s', - self.setup_py, self.name, - ) - else: - logger.debug( - 'Running setup.py (path:%s) egg_info for package from %s', - self.setup_py, self.link, - ) - - with indent_log(): - script = SETUPTOOLS_SHIM % self.setup_py - base_cmd = [sys.executable, '-c', script] - if self.isolated: - base_cmd += ["--no-user-cfg"] - egg_info_cmd = base_cmd + ['egg_info'] - # We can't put the .egg-info files at the root, because then the - # source code will be mistaken for an installed egg, causing - # problems - if self.editable: - egg_base_option = [] - else: - egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info') - ensure_dir(egg_info_dir) - egg_base_option = ['--egg-base', 'pip-egg-info'] - call_subprocess( - egg_info_cmd + egg_base_option, - cwd=self.setup_py_dir, - show_stdout=False, - command_level=logging.DEBUG, - command_desc='python setup.py egg_info') - - if not self.req: - if isinstance( - pkg_resources.parse_version(self.pkg_info()["Version"]), - Version): - op = "==" - else: - op = "===" - self.req = pkg_resources.Requirement.parse( - "".join([ - self.pkg_info()["Name"], - op, - self.pkg_info()["Version"], - ])) - self._correct_build_location() - else: - metadata_name = canonicalize_name(self.pkg_info()["Name"]) - if canonicalize_name(self.req.project_name) != metadata_name: - logger.warning( - 'Running setup.py (path:%s) egg_info for package %s ' - 'produced metadata for project name %s. Fix your ' - '#egg=%s fragments.', - self.setup_py, self.name, metadata_name, self.name - ) - self.req = pkg_resources.Requirement.parse(metadata_name) - - def egg_info_data(self, filename): - if self.satisfied_by is not None: - if not self.satisfied_by.has_metadata(filename): - return None - return self.satisfied_by.get_metadata(filename) - assert self.source_dir - filename = self.egg_info_path(filename) - if not os.path.exists(filename): - return None - data = read_text_file(filename) - return data - - def egg_info_path(self, filename): - if self._egg_info_path is None: - if self.editable: - base = self.source_dir - else: - base = os.path.join(self.setup_py_dir, 'pip-egg-info') - filenames = os.listdir(base) - if self.editable: - filenames = [] - for root, dirs, files in os.walk(base): - for dir in vcs.dirnames: - if dir in dirs: - dirs.remove(dir) - # Iterate over a copy of ``dirs``, since mutating - # a list while iterating over it can cause trouble. - # (See https://github.com/pypa/pip/pull/462.) - for dir in list(dirs): - # Don't search in anything that looks like a virtualenv - # environment - if ( - os.path.exists( - os.path.join(root, dir, 'bin', 'python') - ) or - os.path.exists( - os.path.join( - root, dir, 'Scripts', 'Python.exe' - ) - )): - dirs.remove(dir) - # Also don't search through tests - elif dir == 'test' or dir == 'tests': - dirs.remove(dir) - filenames.extend([os.path.join(root, dir) - for dir in dirs]) - filenames = [f for f in filenames if f.endswith('.egg-info')] - - if not filenames: - raise InstallationError( - 'No files/directories in %s (from %s)' % (base, filename) - ) - assert filenames, \ - "No files/directories in %s (from %s)" % (base, filename) - - # if we have more than one match, we pick the toplevel one. This - # can easily be the case if there is a dist folder which contains - # an extracted tarball for testing purposes. - if len(filenames) > 1: - filenames.sort( - key=lambda x: x.count(os.path.sep) + - (os.path.altsep and x.count(os.path.altsep) or 0) - ) - self._egg_info_path = os.path.join(base, filenames[0]) - return os.path.join(self._egg_info_path, filename) - - def pkg_info(self): - p = FeedParser() - data = self.egg_info_data('PKG-INFO') - if not data: - logger.warning( - 'No PKG-INFO file found in %s', - display_path(self.egg_info_path('PKG-INFO')), - ) - p.feed(data or '') - return p.close() - - _requirements_section_re = re.compile(r'\[(.*?)\]') - - @property - def installed_version(self): - return get_installed_version(self.name) - - def assert_source_matches_version(self): - assert self.source_dir - version = self.pkg_info()['version'] - if version not in self.req: - logger.warning( - 'Requested %s, but installing version %s', - self, - self.installed_version, - ) - else: - logger.debug( - 'Source in %s has version %s, which satisfies requirement %s', - display_path(self.source_dir), - version, - self, - ) - - def update_editable(self, obtain=True): - if not self.link: - logger.debug( - "Cannot update repository at %s; repository location is " - "unknown", - self.source_dir, - ) - return - assert self.editable - assert self.source_dir - if self.link.scheme == 'file': - # Static paths don't get updated - return - assert '+' in self.link.url, "bad url: %r" % self.link.url - if not self.update: - return - vc_type, url = self.link.url.split('+', 1) - backend = vcs.get_backend(vc_type) - if backend: - vcs_backend = backend(self.link.url) - if obtain: - vcs_backend.obtain(self.source_dir) - else: - vcs_backend.export(self.source_dir) - else: - assert 0, ( - 'Unexpected version control type (in %s): %s' - % (self.link, vc_type)) - - def uninstall(self, auto_confirm=False): - """ - Uninstall the distribution currently satisfying this requirement. - - Prompts before removing or modifying files unless - ``auto_confirm`` is True. - - Refuses to delete or modify files outside of ``sys.prefix`` - - thus uninstallation within a virtual environment can only - modify that virtual environment, even if the virtualenv is - linked to global site-packages. - - """ - if not self.check_if_exists(): - raise UninstallationError( - "Cannot uninstall requirement %s, not installed" % (self.name,) - ) - dist = self.satisfied_by or self.conflicts_with - - dist_path = normalize_path(dist.location) - if not dist_is_local(dist): - logger.info( - "Not uninstalling %s at %s, outside environment %s", - dist.key, - dist_path, - sys.prefix, - ) - self.nothing_to_uninstall = True - return - - if dist_path in get_stdlib(): - logger.info( - "Not uninstalling %s at %s, as it is in the standard library.", - dist.key, - dist_path, - ) - self.nothing_to_uninstall = True - return - - paths_to_remove = UninstallPathSet(dist) - develop_egg_link = egg_link_path(dist) - develop_egg_link_egg_info = '{0}.egg-info'.format( - pkg_resources.to_filename(dist.project_name)) - egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info) - # Special case for distutils installed package - distutils_egg_info = getattr(dist._provider, 'path', None) - - # Uninstall cases order do matter as in the case of 2 installs of the - # same package, pip needs to uninstall the currently detected version - if (egg_info_exists and dist.egg_info.endswith('.egg-info') and - not dist.egg_info.endswith(develop_egg_link_egg_info)): - # if dist.egg_info.endswith(develop_egg_link_egg_info), we - # are in fact in the develop_egg_link case - paths_to_remove.add(dist.egg_info) - if dist.has_metadata('installed-files.txt'): - for installed_file in dist.get_metadata( - 'installed-files.txt').splitlines(): - path = os.path.normpath( - os.path.join(dist.egg_info, installed_file) - ) - paths_to_remove.add(path) - # FIXME: need a test for this elif block - # occurs with --single-version-externally-managed/--record outside - # of pip - elif dist.has_metadata('top_level.txt'): - if dist.has_metadata('namespace_packages.txt'): - namespaces = dist.get_metadata('namespace_packages.txt') - else: - namespaces = [] - for top_level_pkg in [ - p for p - in dist.get_metadata('top_level.txt').splitlines() - if p and p not in namespaces]: - path = os.path.join(dist.location, top_level_pkg) - paths_to_remove.add(path) - paths_to_remove.add(path + '.py') - paths_to_remove.add(path + '.pyc') - paths_to_remove.add(path + '.pyo') - - elif distutils_egg_info: - warnings.warn( - "Uninstalling a distutils installed project ({0}) has been " - "deprecated and will be removed in a future version. This is " - "due to the fact that uninstalling a distutils project will " - "only partially uninstall the project.".format(self.name), - RemovedInPip10Warning, - ) - paths_to_remove.add(distutils_egg_info) - - elif dist.location.endswith('.egg'): - # package installed by easy_install - # We cannot match on dist.egg_name because it can slightly vary - # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg - paths_to_remove.add(dist.location) - easy_install_egg = os.path.split(dist.location)[1] - easy_install_pth = os.path.join(os.path.dirname(dist.location), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) - - elif develop_egg_link: - # develop egg - with open(develop_egg_link, 'r') as fh: - link_pointer = os.path.normcase(fh.readline().strip()) - assert (link_pointer == dist.location), ( - 'Egg-link %s does not match installed location of %s ' - '(at %s)' % (link_pointer, self.name, dist.location) - ) - paths_to_remove.add(develop_egg_link) - easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, dist.location) - - elif egg_info_exists and dist.egg_info.endswith('.dist-info'): - for path in pip.wheel.uninstallation_paths(dist): - paths_to_remove.add(path) - - else: - logger.debug( - 'Not sure how to uninstall: %s - Check: %s', - dist, dist.location) - - # find distutils scripts= scripts - if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): - for script in dist.metadata_listdir('scripts'): - if dist_in_usersite(dist): - bin_dir = bin_user - else: - bin_dir = bin_py - paths_to_remove.add(os.path.join(bin_dir, script)) - if WINDOWS: - paths_to_remove.add(os.path.join(bin_dir, script) + '.bat') - - # find console_scripts - if dist.has_metadata('entry_points.txt'): - if six.PY2: - options = {} - else: - options = {"delimiters": ('=', )} - config = configparser.SafeConfigParser(**options) - config.readfp( - FakeFile(dist.get_metadata_lines('entry_points.txt')) - ) - if config.has_section('console_scripts'): - for name, value in config.items('console_scripts'): - if dist_in_usersite(dist): - bin_dir = bin_user - else: - bin_dir = bin_py - paths_to_remove.add(os.path.join(bin_dir, name)) - if WINDOWS: - paths_to_remove.add( - os.path.join(bin_dir, name) + '.exe' - ) - paths_to_remove.add( - os.path.join(bin_dir, name) + '.exe.manifest' - ) - paths_to_remove.add( - os.path.join(bin_dir, name) + '-script.py' - ) - - paths_to_remove.remove(auto_confirm) - self.uninstalled = paths_to_remove - - def rollback_uninstall(self): - if self.uninstalled: - self.uninstalled.rollback() - else: - logger.error( - "Can't rollback %s, nothing uninstalled.", self.name, - ) - - def commit_uninstall(self): - if self.uninstalled: - self.uninstalled.commit() - elif not self.nothing_to_uninstall: - logger.error( - "Can't commit %s, nothing uninstalled.", self.name, - ) - - def archive(self, build_dir): - assert self.source_dir - create_archive = True - archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"]) - archive_path = os.path.join(build_dir, archive_name) - if os.path.exists(archive_path): - response = ask_path_exists( - 'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' % - display_path(archive_path), ('i', 'w', 'b')) - if response == 'i': - create_archive = False - elif response == 'w': - logger.warning('Deleting %s', display_path(archive_path)) - os.remove(archive_path) - elif response == 'b': - dest_file = backup_dir(archive_path) - logger.warning( - 'Backing up %s to %s', - display_path(archive_path), - display_path(dest_file), - ) - shutil.move(archive_path, dest_file) - if create_archive: - zip = zipfile.ZipFile( - archive_path, 'w', zipfile.ZIP_DEFLATED, - allowZip64=True - ) - dir = os.path.normcase(os.path.abspath(self.setup_py_dir)) - for dirpath, dirnames, filenames in os.walk(dir): - if 'pip-egg-info' in dirnames: - dirnames.remove('pip-egg-info') - for dirname in dirnames: - dirname = os.path.join(dirpath, dirname) - name = self._clean_zip_name(dirname, dir) - zipdir = zipfile.ZipInfo(self.name + '/' + name + '/') - zipdir.external_attr = 0x1ED << 16 # 0o755 - zip.writestr(zipdir, '') - for filename in filenames: - if filename == PIP_DELETE_MARKER_FILENAME: - continue - filename = os.path.join(dirpath, filename) - name = self._clean_zip_name(filename, dir) - zip.write(filename, self.name + '/' + name) - zip.close() - logger.info('Saved %s', display_path(archive_path)) - - def _clean_zip_name(self, name, prefix): - assert name.startswith(prefix + os.path.sep), ( - "name %r doesn't start with prefix %r" % (name, prefix) - ) - name = name[len(prefix) + 1:] - name = name.replace(os.path.sep, '/') - return name - - def match_markers(self): - if self.markers is not None: - return markers_interpret(self.markers) - else: - return True - - def install(self, install_options, global_options=[], root=None, - prefix=None): - if self.editable: - self.install_editable( - install_options, global_options, prefix=prefix) - return - if self.is_wheel: - version = pip.wheel.wheel_version(self.source_dir) - pip.wheel.check_compatibility(version, self.name) - - self.move_wheel_files(self.source_dir, root=root, prefix=prefix) - self.install_succeeded = True - return - - # Extend the list of global and install options passed on to - # the setup.py call with the ones from the requirements file. - # Options specified in requirements file override those - # specified on the command line, since the last option given - # to setup.py is the one that is used. - global_options += self.options.get('global_options', []) - install_options += self.options.get('install_options', []) - - if self.isolated: - global_options = list(global_options) + ["--no-user-cfg"] - - temp_location = tempfile.mkdtemp('-record', 'pip-') - record_filename = os.path.join(temp_location, 'install-record.txt') - try: - install_args = [sys.executable, "-u"] - install_args.append('-c') - install_args.append(SETUPTOOLS_SHIM % self.setup_py) - install_args += list(global_options) + \ - ['install', '--record', record_filename] - - if not self.as_egg: - install_args += ['--single-version-externally-managed'] - - if root is not None: - install_args += ['--root', root] - if prefix is not None: - install_args += ['--prefix', prefix] - - if self.pycompile: - install_args += ["--compile"] - else: - install_args += ["--no-compile"] - - if running_under_virtualenv(): - py_ver_str = 'python' + sysconfig.get_python_version() - install_args += ['--install-headers', - os.path.join(sys.prefix, 'include', 'site', - py_ver_str, self.name)] - msg = 'Running setup.py install for %s' % (self.name,) - with open_spinner(msg) as spinner: - with indent_log(): - call_subprocess( - install_args + install_options, - cwd=self.setup_py_dir, - show_stdout=False, - spinner=spinner, - ) - - if not os.path.exists(record_filename): - logger.debug('Record file %s not found', record_filename) - return - self.install_succeeded = True - if self.as_egg: - # there's no --always-unzip option we can pass to install - # command so we unable to save the installed-files.txt - return - - def prepend_root(path): - if root is None or not os.path.isabs(path): - return path - else: - return change_root(root, path) - - with open(record_filename) as f: - for line in f: - directory = os.path.dirname(line) - if directory.endswith('.egg-info'): - egg_info_dir = prepend_root(directory) - break - else: - logger.warning( - 'Could not find .egg-info directory in install record' - ' for %s', - self, - ) - # FIXME: put the record somewhere - # FIXME: should this be an error? - return - new_lines = [] - with open(record_filename) as f: - for line in f: - filename = line.strip() - if os.path.isdir(filename): - filename += os.path.sep - new_lines.append( - os.path.relpath( - prepend_root(filename), egg_info_dir) - ) - inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt') - with open(inst_files_path, 'w') as f: - f.write('\n'.join(new_lines) + '\n') - finally: - if os.path.exists(record_filename): - os.remove(record_filename) - rmtree(temp_location) - - def ensure_has_source_dir(self, parent_dir): - """Ensure that a source_dir is set. - - This will create a temporary build dir if the name of the requirement - isn't known yet. - - :param parent_dir: The ideal pip parent_dir for the source_dir. - Generally src_dir for editables and build_dir for sdists. - :return: self.source_dir - """ - if self.source_dir is None: - self.source_dir = self.build_location(parent_dir) - return self.source_dir - - def remove_temporary_source(self): - """Remove the source files from this requirement, if they are marked - for deletion""" - if self.source_dir and os.path.exists( - os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)): - logger.debug('Removing source in %s', self.source_dir) - rmtree(self.source_dir) - self.source_dir = None - if self._temp_build_dir and os.path.exists(self._temp_build_dir): - rmtree(self._temp_build_dir) - self._temp_build_dir = None - - def install_editable(self, install_options, - global_options=(), prefix=None): - logger.info('Running setup.py develop for %s', self.name) - - if self.isolated: - global_options = list(global_options) + ["--no-user-cfg"] - - if prefix: - prefix_param = ['--prefix={0}'.format(prefix)] - install_options = list(install_options) + prefix_param - - with indent_log(): - # FIXME: should we do --install-headers here too? - call_subprocess( - [ - sys.executable, - '-c', - SETUPTOOLS_SHIM % self.setup_py - ] + - list(global_options) + - ['develop', '--no-deps'] + - list(install_options), - - cwd=self.setup_py_dir, - show_stdout=False) - - self.install_succeeded = True - - def check_if_exists(self): - """Find an installed distribution that satisfies or conflicts - with this requirement, and set self.satisfied_by or - self.conflicts_with appropriately. - """ - if self.req is None: - return False - try: - self.satisfied_by = pkg_resources.get_distribution(self.req) - except pkg_resources.DistributionNotFound: - return False - except pkg_resources.VersionConflict: - existing_dist = pkg_resources.get_distribution( - self.req.project_name - ) - if self.use_user_site: - if dist_in_usersite(existing_dist): - self.conflicts_with = existing_dist - elif (running_under_virtualenv() and - dist_in_site_packages(existing_dist)): - raise InstallationError( - "Will not install to the user site because it will " - "lack sys.path precedence to %s in %s" % - (existing_dist.project_name, existing_dist.location) - ) - else: - self.conflicts_with = existing_dist - return True - - @property - def is_wheel(self): - return self.link and self.link.is_wheel - - def move_wheel_files(self, wheeldir, root=None, prefix=None): - move_wheel_files( - self.name, self.req, wheeldir, - user=self.use_user_site, - home=self.target_dir, - root=root, - prefix=prefix, - pycompile=self.pycompile, - isolated=self.isolated, - ) - - def get_dist(self): - """Return a pkg_resources.Distribution built from self.egg_info_path""" - egg_info = self.egg_info_path('').rstrip('/') - base_dir = os.path.dirname(egg_info) - metadata = pkg_resources.PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - return pkg_resources.Distribution( - os.path.dirname(egg_info), - project_name=dist_name, - metadata=metadata) - - @property - def has_hash_options(self): - """Return whether any known-good hashes are specified as options. - - These activate --require-hashes mode; hashes specified as part of a - URL do not. - - """ - return bool(self.options.get('hashes', {})) - - def hashes(self, trust_internet=True): - """Return a hash-comparer that considers my option- and URL-based - hashes to be known-good. - - Hashes in URLs--ones embedded in the requirements file, not ones - downloaded from an index server--are almost peers with ones from - flags. They satisfy --require-hashes (whether it was implicitly or - explicitly activated) but do not activate it. md5 and sha224 are not - allowed in flags, which should nudge people toward good algos. We - always OR all hashes together, even ones from URLs. - - :param trust_internet: Whether to trust URL-based (#md5=...) hashes - downloaded from the internet, as by populate_link() - - """ - good_hashes = self.options.get('hashes', {}).copy() - link = self.link if trust_internet else self.original_link - if link and link.hash: - good_hashes.setdefault(link.hash_name, []).append(link.hash) - return Hashes(good_hashes) - - -def _strip_postfix(req): - """ - Strip req postfix ( -dev, 0.2, etc ) - """ - # FIXME: use package_to_requirement? - match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req) - if match: - # Strip off -dev, -0.2, etc. - req = match.group(1) - return req - - -def _build_req_from_url(url): - - parts = [p for p in url.split('#', 1)[0].split('/') if p] - - req = None - if len(parts) > 2 and parts[-2] in ('tags', 'branches', 'tag', 'branch'): - req = parts[-3] - elif len(parts) > 1 and parts[-1] == 'trunk': - req = parts[-2] - if req: - warnings.warn( - 'Sniffing the requirement name from the url is deprecated and ' - 'will be removed in the future. Please specify an #egg segment ' - 'instead.', RemovedInPip9Warning, - stacklevel=2) - return req - - -def parse_editable(editable_req, default_vcs=None): - """Parses an editable requirement into: - - a requirement name - - an URL - - extras - - editable options - Accepted requirements: - svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir - .[some_extra] - """ - - from pip.index import Link - - url = editable_req - extras = None - - # If a file path is specified with extras, strip off the extras. - m = re.match(r'^(.+)(\[[^\]]+\])$', url) - if m: - url_no_extras = m.group(1) - extras = m.group(2) - else: - url_no_extras = url - - if os.path.isdir(url_no_extras): - if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): - raise InstallationError( - "Directory %r is not installable. File 'setup.py' not found." % - url_no_extras - ) - # Treating it as code that has already been checked out - url_no_extras = path_to_url(url_no_extras) - - if url_no_extras.lower().startswith('file:'): - package_name = Link(url_no_extras).egg_fragment - if extras: - return ( - package_name, - url_no_extras, - pkg_resources.Requirement.parse( - '__placeholder__' + extras - ).extras, - ) - else: - return package_name, url_no_extras, None - - for version_control in vcs: - if url.lower().startswith('%s:' % version_control): - url = '%s+%s' % (version_control, url) - break - - if '+' not in url: - if default_vcs: - url = default_vcs + '+' + url - else: - raise InstallationError( - '%s should either be a path to a local project or a VCS url ' - 'beginning with svn+, git+, hg+, or bzr+' % - editable_req - ) - - vc_type = url.split('+', 1)[0].lower() - - if not vcs.get_backend(vc_type): - error_message = 'For --editable=%s only ' % editable_req + \ - ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ - ' is currently supported' - raise InstallationError(error_message) - - package_name = Link(url).egg_fragment - if not package_name: - package_name = _build_req_from_url(editable_req) - if not package_name: - raise InstallationError( - '--editable=%s is not the right format; it must have ' - '#egg=Package' % editable_req - ) - return _strip_postfix(package_name), url, None diff --git a/Shared/lib/python3.4/site-packages/pip/req/req_set.py b/Shared/lib/python3.4/site-packages/pip/req/req_set.py deleted file mode 100644 index e7a8d87..0000000 --- a/Shared/lib/python3.4/site-packages/pip/req/req_set.py +++ /dev/null @@ -1,746 +0,0 @@ -from __future__ import absolute_import - -from collections import defaultdict -from itertools import chain -import logging -import os - -from pip._vendor import pkg_resources -from pip._vendor import requests - -from pip.compat import expanduser -from pip.download import (is_file_url, is_dir_url, is_vcs_url, url_to_path, - unpack_url) -from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled, - DistributionNotFound, PreviousBuildDirError, - HashError, HashErrors, HashUnpinned, - DirectoryUrlHashUnsupported, VcsHashUnsupported) -from pip.req.req_install import InstallRequirement -from pip.utils import ( - display_path, dist_in_usersite, ensure_dir, normalize_path) -from pip.utils.hashes import MissingHashes -from pip.utils.logging import indent_log -from pip.vcs import vcs - - -logger = logging.getLogger(__name__) - - -class Requirements(object): - - def __init__(self): - self._keys = [] - self._dict = {} - - def keys(self): - return self._keys - - def values(self): - return [self._dict[key] for key in self._keys] - - def __contains__(self, item): - return item in self._keys - - def __setitem__(self, key, value): - if key not in self._keys: - self._keys.append(key) - self._dict[key] = value - - def __getitem__(self, key): - return self._dict[key] - - def __repr__(self): - values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()] - return 'Requirements({%s})' % ', '.join(values) - - -class DistAbstraction(object): - """Abstracts out the wheel vs non-wheel prepare_files logic. - - The requirements for anything installable are as follows: - - we must be able to determine the requirement name - (or we can't correctly handle the non-upgrade case). - - we must be able to generate a list of run-time dependencies - without installing any additional packages (or we would - have to either burn time by doing temporary isolated installs - or alternatively violate pips 'don't start installing unless - all requirements are available' rule - neither of which are - desirable). - - for packages with setup requirements, we must also be able - to determine their requirements without installing additional - packages (for the same reason as run-time dependencies) - - we must be able to create a Distribution object exposing the - above metadata. - """ - - def __init__(self, req_to_install): - self.req_to_install = req_to_install - - def dist(self, finder): - """Return a setuptools Dist object.""" - raise NotImplementedError(self.dist) - - def prep_for_dist(self): - """Ensure that we can get a Dist for this requirement.""" - raise NotImplementedError(self.dist) - - -def make_abstract_dist(req_to_install): - """Factory to make an abstract dist object. - - Preconditions: Either an editable req with a source_dir, or satisfied_by or - a wheel link, or a non-editable req with a source_dir. - - :return: A concrete DistAbstraction. - """ - if req_to_install.editable: - return IsSDist(req_to_install) - elif req_to_install.link and req_to_install.link.is_wheel: - return IsWheel(req_to_install) - else: - return IsSDist(req_to_install) - - -class IsWheel(DistAbstraction): - - def dist(self, finder): - return list(pkg_resources.find_distributions( - self.req_to_install.source_dir))[0] - - def prep_for_dist(self): - # FIXME:https://github.com/pypa/pip/issues/1112 - pass - - -class IsSDist(DistAbstraction): - - def dist(self, finder): - dist = self.req_to_install.get_dist() - # FIXME: shouldn't be globally added: - if dist.has_metadata('dependency_links.txt'): - finder.add_dependency_links( - dist.get_metadata_lines('dependency_links.txt') - ) - return dist - - def prep_for_dist(self): - self.req_to_install.run_egg_info() - self.req_to_install.assert_source_matches_version() - - -class Installed(DistAbstraction): - - def dist(self, finder): - return self.req_to_install.satisfied_by - - def prep_for_dist(self): - pass - - -class RequirementSet(object): - - def __init__(self, build_dir, src_dir, download_dir, upgrade=False, - ignore_installed=False, as_egg=False, target_dir=None, - ignore_dependencies=False, force_reinstall=False, - use_user_site=False, session=None, pycompile=True, - isolated=False, wheel_download_dir=None, - wheel_cache=None, require_hashes=False): - """Create a RequirementSet. - - :param wheel_download_dir: Where still-packed .whl files should be - written to. If None they are written to the download_dir parameter. - Separate to download_dir to permit only keeping wheel archives for - pip wheel. - :param download_dir: Where still packed archives should be written to. - If None they are not saved, and are deleted immediately after - unpacking. - :param wheel_cache: The pip wheel cache, for passing to - InstallRequirement. - """ - if session is None: - raise TypeError( - "RequirementSet() missing 1 required keyword argument: " - "'session'" - ) - - self.build_dir = build_dir - self.src_dir = src_dir - # XXX: download_dir and wheel_download_dir overlap semantically and may - # be combined if we're willing to have non-wheel archives present in - # the wheelhouse output by 'pip wheel'. - self.download_dir = download_dir - self.upgrade = upgrade - self.ignore_installed = ignore_installed - self.force_reinstall = force_reinstall - self.requirements = Requirements() - # Mapping of alias: real_name - self.requirement_aliases = {} - self.unnamed_requirements = [] - self.ignore_dependencies = ignore_dependencies - self.successfully_downloaded = [] - self.successfully_installed = [] - self.reqs_to_cleanup = [] - self.as_egg = as_egg - self.use_user_site = use_user_site - self.target_dir = target_dir # set from --target option - self.session = session - self.pycompile = pycompile - self.isolated = isolated - if wheel_download_dir: - wheel_download_dir = normalize_path(wheel_download_dir) - self.wheel_download_dir = wheel_download_dir - self._wheel_cache = wheel_cache - self.require_hashes = require_hashes - # Maps from install_req -> dependencies_of_install_req - self._dependencies = defaultdict(list) - - def __str__(self): - reqs = [req for req in self.requirements.values() - if not req.comes_from] - reqs.sort(key=lambda req: req.name.lower()) - return ' '.join([str(req.req) for req in reqs]) - - def __repr__(self): - reqs = [req for req in self.requirements.values()] - reqs.sort(key=lambda req: req.name.lower()) - reqs_str = ', '.join([str(req.req) for req in reqs]) - return ('<%s object; %d requirement(s): %s>' - % (self.__class__.__name__, len(reqs), reqs_str)) - - def add_requirement(self, install_req, parent_req_name=None): - """Add install_req as a requirement to install. - - :param parent_req_name: The name of the requirement that needed this - added. The name is used because when multiple unnamed requirements - resolve to the same name, we could otherwise end up with dependency - links that point outside the Requirements set. parent_req must - already be added. Note that None implies that this is a user - supplied requirement, vs an inferred one. - :return: Additional requirements to scan. That is either [] if - the requirement is not applicable, or [install_req] if the - requirement is applicable and has just been added. - """ - name = install_req.name - if not install_req.match_markers(): - logger.warning("Ignoring %s: markers %r don't match your " - "environment", install_req.name, - install_req.markers) - return [] - - install_req.as_egg = self.as_egg - install_req.use_user_site = self.use_user_site - install_req.target_dir = self.target_dir - install_req.pycompile = self.pycompile - if not name: - # url or path requirement w/o an egg fragment - self.unnamed_requirements.append(install_req) - return [install_req] - else: - try: - existing_req = self.get_requirement(name) - except KeyError: - existing_req = None - if (parent_req_name is None and existing_req and not - existing_req.constraint and - existing_req.extras == install_req.extras and not - existing_req.req.specs == install_req.req.specs): - raise InstallationError( - 'Double requirement given: %s (already in %s, name=%r)' - % (install_req, existing_req, name)) - if not existing_req: - # Add requirement - self.requirements[name] = install_req - # FIXME: what about other normalizations? E.g., _ vs. -? - if name.lower() != name: - self.requirement_aliases[name.lower()] = name - result = [install_req] - else: - # Assume there's no need to scan, and that we've already - # encountered this for scanning. - result = [] - if not install_req.constraint and existing_req.constraint: - if (install_req.link and not (existing_req.link and - install_req.link.path == existing_req.link.path)): - self.reqs_to_cleanup.append(install_req) - raise InstallationError( - "Could not satisfy constraints for '%s': " - "installation from path or url cannot be " - "constrained to a version" % name) - # If we're now installing a constraint, mark the existing - # object for real installation. - existing_req.constraint = False - existing_req.extras = tuple( - sorted(set(existing_req.extras).union( - set(install_req.extras)))) - logger.debug("Setting %s extras to: %s", - existing_req, existing_req.extras) - # And now we need to scan this. - result = [existing_req] - # Canonicalise to the already-added object for the backref - # check below. - install_req = existing_req - if parent_req_name: - parent_req = self.get_requirement(parent_req_name) - self._dependencies[parent_req].append(install_req) - return result - - def has_requirement(self, project_name): - name = project_name.lower() - if (name in self.requirements and - not self.requirements[name].constraint or - name in self.requirement_aliases and - not self.requirements[self.requirement_aliases[name]].constraint): - return True - return False - - @property - def has_requirements(self): - return list(req for req in self.requirements.values() if not - req.constraint) or self.unnamed_requirements - - @property - def is_download(self): - if self.download_dir: - self.download_dir = expanduser(self.download_dir) - if os.path.exists(self.download_dir): - return True - else: - logger.critical('Could not find download directory') - raise InstallationError( - "Could not find or access download directory '%s'" - % display_path(self.download_dir)) - return False - - def get_requirement(self, project_name): - for name in project_name, project_name.lower(): - if name in self.requirements: - return self.requirements[name] - if name in self.requirement_aliases: - return self.requirements[self.requirement_aliases[name]] - raise KeyError("No project with the name %r" % project_name) - - def uninstall(self, auto_confirm=False): - for req in self.requirements.values(): - if req.constraint: - continue - req.uninstall(auto_confirm=auto_confirm) - req.commit_uninstall() - - def prepare_files(self, finder): - """ - Prepare process. Create temp directories, download and/or unpack files. - """ - # make the wheelhouse - if self.wheel_download_dir: - ensure_dir(self.wheel_download_dir) - - # If any top-level requirement has a hash specified, enter - # hash-checking mode, which requires hashes from all. - root_reqs = self.unnamed_requirements + self.requirements.values() - require_hashes = (self.require_hashes or - any(req.has_hash_options for req in root_reqs)) - if require_hashes and self.as_egg: - raise InstallationError( - '--egg is not allowed with --require-hashes mode, since it ' - 'delegates dependency resolution to setuptools and could thus ' - 'result in installation of unhashed packages.') - - # Actually prepare the files, and collect any exceptions. Most hash - # exceptions cannot be checked ahead of time, because - # req.populate_link() needs to be called before we can make decisions - # based on link type. - discovered_reqs = [] - hash_errors = HashErrors() - for req in chain(root_reqs, discovered_reqs): - try: - discovered_reqs.extend(self._prepare_file( - finder, - req, - require_hashes=require_hashes, - ignore_dependencies=self.ignore_dependencies)) - except HashError as exc: - exc.req = req - hash_errors.append(exc) - - if hash_errors: - raise hash_errors - - def _check_skip_installed(self, req_to_install, finder): - """Check if req_to_install should be skipped. - - This will check if the req is installed, and whether we should upgrade - or reinstall it, taking into account all the relevant user options. - - After calling this req_to_install will only have satisfied_by set to - None if the req_to_install is to be upgraded/reinstalled etc. Any - other value will be a dist recording the current thing installed that - satisfies the requirement. - - Note that for vcs urls and the like we can't assess skipping in this - routine - we simply identify that we need to pull the thing down, - then later on it is pulled down and introspected to assess upgrade/ - reinstalls etc. - - :return: A text reason for why it was skipped, or None. - """ - # Check whether to upgrade/reinstall this req or not. - req_to_install.check_if_exists() - if req_to_install.satisfied_by: - skip_reason = 'satisfied (use --upgrade to upgrade)' - if self.upgrade: - best_installed = False - # For link based requirements we have to pull the - # tree down and inspect to assess the version #, so - # its handled way down. - if not (self.force_reinstall or req_to_install.link): - try: - finder.find_requirement(req_to_install, self.upgrade) - except BestVersionAlreadyInstalled: - skip_reason = 'up-to-date' - best_installed = True - except DistributionNotFound: - # No distribution found, so we squash the - # error - it will be raised later when we - # re-try later to do the install. - # Why don't we just raise here? - pass - - if not best_installed: - # don't uninstall conflict if user install and - # conflict is not user install - if not (self.use_user_site and not - dist_in_usersite(req_to_install.satisfied_by)): - req_to_install.conflicts_with = \ - req_to_install.satisfied_by - req_to_install.satisfied_by = None - return skip_reason - else: - return None - - def _prepare_file(self, - finder, - req_to_install, - require_hashes=False, - ignore_dependencies=False): - """Prepare a single requirements file. - - :return: A list of additional InstallRequirements to also install. - """ - # Tell user what we are doing for this requirement: - # obtain (editable), skipping, processing (local url), collecting - # (remote url or package name) - if req_to_install.constraint or req_to_install.prepared: - return [] - - req_to_install.prepared = True - - # ###################### # - # # print log messages # # - # ###################### # - if req_to_install.editable: - logger.info('Obtaining %s', req_to_install) - else: - # satisfied_by is only evaluated by calling _check_skip_installed, - # so it must be None here. - assert req_to_install.satisfied_by is None - if not self.ignore_installed: - skip_reason = self._check_skip_installed( - req_to_install, finder) - - if req_to_install.satisfied_by: - assert skip_reason is not None, ( - '_check_skip_installed returned None but ' - 'req_to_install.satisfied_by is set to %r' - % (req_to_install.satisfied_by,)) - logger.info( - 'Requirement already %s: %s', skip_reason, - req_to_install) - else: - if (req_to_install.link and - req_to_install.link.scheme == 'file'): - path = url_to_path(req_to_install.link.url) - logger.info('Processing %s', display_path(path)) - else: - logger.info('Collecting %s', req_to_install) - - with indent_log(): - # ################################ # - # # vcs update or unpack archive # # - # ################################ # - if req_to_install.editable: - if require_hashes: - raise InstallationError( - 'The editable requirement %s cannot be installed when ' - 'requiring hashes, because there is no single file to ' - 'hash.' % req_to_install) - req_to_install.ensure_has_source_dir(self.src_dir) - req_to_install.update_editable(not self.is_download) - abstract_dist = make_abstract_dist(req_to_install) - abstract_dist.prep_for_dist() - if self.is_download: - req_to_install.archive(self.download_dir) - elif req_to_install.satisfied_by: - if require_hashes: - logger.debug( - 'Since it is already installed, we are trusting this ' - 'package without checking its hash. To ensure a ' - 'completely repeatable environment, install into an ' - 'empty virtualenv.') - abstract_dist = Installed(req_to_install) - else: - # @@ if filesystem packages are not marked - # editable in a req, a non deterministic error - # occurs when the script attempts to unpack the - # build directory - req_to_install.ensure_has_source_dir(self.build_dir) - # If a checkout exists, it's unwise to keep going. version - # inconsistencies are logged later, but do not fail the - # installation. - # FIXME: this won't upgrade when there's an existing - # package unpacked in `req_to_install.source_dir` - if os.path.exists( - os.path.join(req_to_install.source_dir, 'setup.py')): - raise PreviousBuildDirError( - "pip can't proceed with requirements '%s' due to a" - " pre-existing build directory (%s). This is " - "likely due to a previous installation that failed" - ". pip is being responsible and not assuming it " - "can delete this. Please delete it and try again." - % (req_to_install, req_to_install.source_dir) - ) - req_to_install.populate_link( - finder, self.upgrade, require_hashes) - # We can't hit this spot and have populate_link return None. - # req_to_install.satisfied_by is None here (because we're - # guarded) and upgrade has no impact except when satisfied_by - # is not None. - # Then inside find_requirement existing_applicable -> False - # If no new versions are found, DistributionNotFound is raised, - # otherwise a result is guaranteed. - assert req_to_install.link - link = req_to_install.link - - # Now that we have the real link, we can tell what kind of - # requirements we have and raise some more informative errors - # than otherwise. (For example, we can raise VcsHashUnsupported - # for a VCS URL rather than HashMissing.) - if require_hashes: - # We could check these first 2 conditions inside - # unpack_url and save repetition of conditions, but then - # we would report less-useful error messages for - # unhashable requirements, complaining that there's no - # hash provided. - if is_vcs_url(link): - raise VcsHashUnsupported() - elif is_file_url(link) and is_dir_url(link): - raise DirectoryUrlHashUnsupported() - if (not req_to_install.original_link and - not req_to_install.is_pinned): - # Unpinned packages are asking for trouble when a new - # version is uploaded. This isn't a security check, but - # it saves users a surprising hash mismatch in the - # future. - # - # file:/// URLs aren't pinnable, so don't complain - # about them not being pinned. - raise HashUnpinned() - hashes = req_to_install.hashes( - trust_internet=not require_hashes) - if require_hashes and not hashes: - # Known-good hashes are missing for this requirement, so - # shim it with a facade object that will provoke hash - # computation and then raise a HashMissing exception - # showing the user what the hash should be. - hashes = MissingHashes() - - try: - download_dir = self.download_dir - # We always delete unpacked sdists after pip ran. - autodelete_unpacked = True - if req_to_install.link.is_wheel \ - and self.wheel_download_dir: - # when doing 'pip wheel` we download wheels to a - # dedicated dir. - download_dir = self.wheel_download_dir - if req_to_install.link.is_wheel: - if download_dir: - # When downloading, we only unpack wheels to get - # metadata. - autodelete_unpacked = True - else: - # When installing a wheel, we use the unpacked - # wheel. - autodelete_unpacked = False - unpack_url( - req_to_install.link, req_to_install.source_dir, - download_dir, autodelete_unpacked, - session=self.session, hashes=hashes) - except requests.HTTPError as exc: - logger.critical( - 'Could not install requirement %s because ' - 'of error %s', - req_to_install, - exc, - ) - raise InstallationError( - 'Could not install requirement %s because ' - 'of HTTP error %s for URL %s' % - (req_to_install, exc, req_to_install.link) - ) - abstract_dist = make_abstract_dist(req_to_install) - abstract_dist.prep_for_dist() - if self.is_download: - # Make a .zip of the source_dir we already created. - if req_to_install.link.scheme in vcs.all_schemes: - req_to_install.archive(self.download_dir) - # req_to_install.req is only avail after unpack for URL - # pkgs repeat check_if_exists to uninstall-on-upgrade - # (#14) - if not self.ignore_installed: - req_to_install.check_if_exists() - if req_to_install.satisfied_by: - if self.upgrade or self.ignore_installed: - # don't uninstall conflict if user install and - # conflict is not user install - if not (self.use_user_site and not - dist_in_usersite( - req_to_install.satisfied_by)): - req_to_install.conflicts_with = \ - req_to_install.satisfied_by - req_to_install.satisfied_by = None - else: - logger.info( - 'Requirement already satisfied (use ' - '--upgrade to upgrade): %s', - req_to_install, - ) - - # ###################### # - # # parse dependencies # # - # ###################### # - dist = abstract_dist.dist(finder) - more_reqs = [] - - def add_req(subreq): - sub_install_req = InstallRequirement( - str(subreq), - req_to_install, - isolated=self.isolated, - wheel_cache=self._wheel_cache, - ) - more_reqs.extend(self.add_requirement( - sub_install_req, req_to_install.name)) - - # We add req_to_install before its dependencies, so that we - # can refer to it when adding dependencies. - if not self.has_requirement(req_to_install.name): - # 'unnamed' requirements will get added here - self.add_requirement(req_to_install, None) - - if not ignore_dependencies: - if (req_to_install.extras): - logger.debug( - "Installing extra requirements: %r", - ','.join(req_to_install.extras), - ) - missing_requested = sorted( - set(req_to_install.extras) - set(dist.extras) - ) - for missing in missing_requested: - logger.warning( - '%s does not provide the extra \'%s\'', - dist, missing - ) - - available_requested = sorted( - set(dist.extras) & set(req_to_install.extras) - ) - for subreq in dist.requires(available_requested): - add_req(subreq) - - # cleanup tmp src - self.reqs_to_cleanup.append(req_to_install) - - if not req_to_install.editable and not req_to_install.satisfied_by: - # XXX: --no-install leads this to report 'Successfully - # downloaded' for only non-editable reqs, even though we took - # action on them. - self.successfully_downloaded.append(req_to_install) - - return more_reqs - - def cleanup_files(self): - """Clean up files, remove builds.""" - logger.debug('Cleaning up...') - with indent_log(): - for req in self.reqs_to_cleanup: - req.remove_temporary_source() - - def _to_install(self): - """Create the installation order. - - The installation order is topological - requirements are installed - before the requiring thing. We break cycles at an arbitrary point, - and make no other guarantees. - """ - # The current implementation, which we may change at any point - # installs the user specified things in the order given, except when - # dependencies must come earlier to achieve topological order. - order = [] - ordered_reqs = set() - - def schedule(req): - if req.satisfied_by or req in ordered_reqs: - return - if req.constraint: - return - ordered_reqs.add(req) - for dep in self._dependencies[req]: - schedule(dep) - order.append(req) - for install_req in self.requirements.values(): - schedule(install_req) - return order - - def install(self, install_options, global_options=(), *args, **kwargs): - """ - Install everything in this set (after having downloaded and unpacked - the packages) - """ - to_install = self._to_install() - - if to_install: - logger.info( - 'Installing collected packages: %s', - ', '.join([req.name for req in to_install]), - ) - - with indent_log(): - for requirement in to_install: - if requirement.conflicts_with: - logger.info( - 'Found existing installation: %s', - requirement.conflicts_with, - ) - with indent_log(): - requirement.uninstall(auto_confirm=True) - try: - requirement.install( - install_options, - global_options, - *args, - **kwargs - ) - except: - # if install did not succeed, rollback previous uninstall - if (requirement.conflicts_with and not - requirement.install_succeeded): - requirement.rollback_uninstall() - raise - else: - if (requirement.conflicts_with and - requirement.install_succeeded): - requirement.commit_uninstall() - requirement.remove_temporary_source() - - self.successfully_installed = to_install diff --git a/Shared/lib/python3.4/site-packages/pip/req/req_uninstall.py b/Shared/lib/python3.4/site-packages/pip/req/req_uninstall.py deleted file mode 100644 index 5248430..0000000 --- a/Shared/lib/python3.4/site-packages/pip/req/req_uninstall.py +++ /dev/null @@ -1,195 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import tempfile - -from pip.compat import uses_pycache, WINDOWS, cache_from_source -from pip.exceptions import UninstallationError -from pip.utils import rmtree, ask, is_local, renames, normalize_path -from pip.utils.logging import indent_log - - -logger = logging.getLogger(__name__) - - -class UninstallPathSet(object): - """A set of file paths to be removed in the uninstallation of a - requirement.""" - def __init__(self, dist): - self.paths = set() - self._refuse = set() - self.pth = {} - self.dist = dist - self.save_dir = None - self._moved_paths = [] - - def _permitted(self, path): - """ - Return True if the given path is one we are permitted to - remove/modify, False otherwise. - - """ - return is_local(path) - - def add(self, path): - head, tail = os.path.split(path) - - # we normalize the head to resolve parent directory symlinks, but not - # the tail, since we only want to uninstall symlinks, not their targets - path = os.path.join(normalize_path(head), os.path.normcase(tail)) - - if not os.path.exists(path): - return - if self._permitted(path): - self.paths.add(path) - else: - self._refuse.add(path) - - # __pycache__ files can show up after 'installed-files.txt' is created, - # due to imports - if os.path.splitext(path)[1] == '.py' and uses_pycache: - self.add(cache_from_source(path)) - - def add_pth(self, pth_file, entry): - pth_file = normalize_path(pth_file) - if self._permitted(pth_file): - if pth_file not in self.pth: - self.pth[pth_file] = UninstallPthEntries(pth_file) - self.pth[pth_file].add(entry) - else: - self._refuse.add(pth_file) - - def compact(self, paths): - """Compact a path set to contain the minimal number of paths - necessary to contain all paths in the set. If /a/path/ and - /a/path/to/a/file.txt are both in the set, leave only the - shorter path.""" - short_paths = set() - for path in sorted(paths, key=len): - if not any([ - (path.startswith(shortpath) and - path[len(shortpath.rstrip(os.path.sep))] == os.path.sep) - for shortpath in short_paths]): - short_paths.add(path) - return short_paths - - def _stash(self, path): - return os.path.join( - self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep)) - - def remove(self, auto_confirm=False): - """Remove paths in ``self.paths`` with confirmation (unless - ``auto_confirm`` is True).""" - if not self.paths: - logger.info( - "Can't uninstall '%s'. No files were found to uninstall.", - self.dist.project_name, - ) - return - logger.info( - 'Uninstalling %s-%s:', - self.dist.project_name, self.dist.version - ) - - with indent_log(): - paths = sorted(self.compact(self.paths)) - - if auto_confirm: - response = 'y' - else: - for path in paths: - logger.info(path) - response = ask('Proceed (y/n)? ', ('y', 'n')) - if self._refuse: - logger.info('Not removing or modifying (outside of prefix):') - for path in self.compact(self._refuse): - logger.info(path) - if response == 'y': - self.save_dir = tempfile.mkdtemp(suffix='-uninstall', - prefix='pip-') - for path in paths: - new_path = self._stash(path) - logger.debug('Removing file or directory %s', path) - self._moved_paths.append(path) - renames(path, new_path) - for pth in self.pth.values(): - pth.remove() - logger.info( - 'Successfully uninstalled %s-%s', - self.dist.project_name, self.dist.version - ) - - def rollback(self): - """Rollback the changes previously made by remove().""" - if self.save_dir is None: - logger.error( - "Can't roll back %s; was not uninstalled", - self.dist.project_name, - ) - return False - logger.info('Rolling back uninstall of %s', self.dist.project_name) - for path in self._moved_paths: - tmp_path = self._stash(path) - logger.debug('Replacing %s', path) - renames(tmp_path, path) - for pth in self.pth.values(): - pth.rollback() - - def commit(self): - """Remove temporary save dir: rollback will no longer be possible.""" - if self.save_dir is not None: - rmtree(self.save_dir) - self.save_dir = None - self._moved_paths = [] - - -class UninstallPthEntries(object): - def __init__(self, pth_file): - if not os.path.isfile(pth_file): - raise UninstallationError( - "Cannot remove entries from nonexistent file %s" % pth_file - ) - self.file = pth_file - self.entries = set() - self._saved_lines = None - - def add(self, entry): - entry = os.path.normcase(entry) - # On Windows, os.path.normcase converts the entry to use - # backslashes. This is correct for entries that describe absolute - # paths outside of site-packages, but all the others use forward - # slashes. - if WINDOWS and not os.path.splitdrive(entry)[0]: - entry = entry.replace('\\', '/') - self.entries.add(entry) - - def remove(self): - logger.debug('Removing pth entries from %s:', self.file) - with open(self.file, 'rb') as fh: - # windows uses '\r\n' with py3k, but uses '\n' with py2.x - lines = fh.readlines() - self._saved_lines = lines - if any(b'\r\n' in line for line in lines): - endline = '\r\n' - else: - endline = '\n' - for entry in self.entries: - try: - logger.debug('Removing entry: %s', entry) - lines.remove((entry + endline).encode("utf-8")) - except ValueError: - pass - with open(self.file, 'wb') as fh: - fh.writelines(lines) - - def rollback(self): - if self._saved_lines is None: - logger.error( - 'Cannot roll back changes to %s, none were made', self.file - ) - return False - logger.debug('Rolling %s back to previous state', self.file) - with open(self.file, 'wb') as fh: - fh.writelines(self._saved_lines) - return True diff --git a/Shared/lib/python3.4/site-packages/pip/utils/build.py b/Shared/lib/python3.4/site-packages/pip/utils/build.py deleted file mode 100644 index fc65cfa..0000000 --- a/Shared/lib/python3.4/site-packages/pip/utils/build.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import absolute_import - -import os.path -import tempfile - -from pip.utils import rmtree - - -class BuildDirectory(object): - - def __init__(self, name=None, delete=None): - # If we were not given an explicit directory, and we were not given an - # explicit delete option, then we'll default to deleting. - if name is None and delete is None: - delete = True - - if name is None: - # We realpath here because some systems have their default tmpdir - # symlinked to another directory. This tends to confuse build - # scripts, so we canonicalize the path by traversing potential - # symlinks here. - name = os.path.realpath(tempfile.mkdtemp(prefix="pip-build-")) - # If we were not given an explicit directory, and we were not given - # an explicit delete option, then we'll default to deleting. - if delete is None: - delete = True - - self.name = name - self.delete = delete - - def __repr__(self): - return "<{} {!r}>".format(self.__class__.__name__, self.name) - - def __enter__(self): - return self.name - - def __exit__(self, exc, value, tb): - self.cleanup() - - def cleanup(self): - if self.delete: - rmtree(self.name) diff --git a/Shared/lib/python3.4/site-packages/pip/utils/deprecation.py b/Shared/lib/python3.4/site-packages/pip/utils/deprecation.py deleted file mode 100644 index 2fb1d1e..0000000 --- a/Shared/lib/python3.4/site-packages/pip/utils/deprecation.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -A module that implments tooling to enable easy warnings about deprecations. -""" -from __future__ import absolute_import - -import logging -import warnings - - -class PipDeprecationWarning(Warning): - pass - - -class Pending(object): - pass - - -class RemovedInPip9Warning(PipDeprecationWarning): - pass - - -class RemovedInPip10Warning(PipDeprecationWarning, Pending): - pass - - -class Python26DeprecationWarning(PipDeprecationWarning, Pending): - pass - - -# Warnings <-> Logging Integration - - -_warnings_showwarning = None - - -def _showwarning(message, category, filename, lineno, file=None, line=None): - if file is not None: - if _warnings_showwarning is not None: - _warnings_showwarning( - message, category, filename, lineno, file, line, - ) - else: - if issubclass(category, PipDeprecationWarning): - # We use a specially named logger which will handle all of the - # deprecation messages for pip. - logger = logging.getLogger("pip.deprecations") - - # This is purposely using the % formatter here instead of letting - # the logging module handle the interpolation. This is because we - # want it to appear as if someone typed this entire message out. - log_message = "DEPRECATION: %s" % message - - # PipDeprecationWarnings that are Pending still have at least 2 - # versions to go until they are removed so they can just be - # warnings. Otherwise, they will be removed in the very next - # version of pip. We want these to be more obvious so we use the - # ERROR logging level. - if issubclass(category, Pending): - logger.warning(log_message) - else: - logger.error(log_message) - else: - _warnings_showwarning( - message, category, filename, lineno, file, line, - ) - - -def install_warning_logger(): - # Enable our Deprecation Warnings - warnings.simplefilter("default", PipDeprecationWarning, append=True) - - global _warnings_showwarning - - if _warnings_showwarning is None: - _warnings_showwarning = warnings.showwarning - warnings.showwarning = _showwarning diff --git a/Shared/lib/python3.4/site-packages/pip/utils/logging.py b/Shared/lib/python3.4/site-packages/pip/utils/logging.py deleted file mode 100644 index 1c1053a..0000000 --- a/Shared/lib/python3.4/site-packages/pip/utils/logging.py +++ /dev/null @@ -1,130 +0,0 @@ -from __future__ import absolute_import - -import contextlib -import logging -import logging.handlers -import os - -try: - import threading -except ImportError: - import dummy_threading as threading - -from pip.compat import WINDOWS -from pip.utils import ensure_dir - -try: - from pip._vendor import colorama -# Lots of different errors can come from this, including SystemError and -# ImportError. -except Exception: - colorama = None - - -_log_state = threading.local() -_log_state.indentation = 0 - - -@contextlib.contextmanager -def indent_log(num=2): - """ - A context manager which will cause the log output to be indented for any - log messages emitted inside it. - """ - _log_state.indentation += num - try: - yield - finally: - _log_state.indentation -= num - - -def get_indentation(): - return getattr(_log_state, 'indentation', 0) - - -class IndentingFormatter(logging.Formatter): - - def format(self, record): - """ - Calls the standard formatter, but will indent all of the log messages - by our current indentation level. - """ - formatted = logging.Formatter.format(self, record) - formatted = "".join([ - (" " * get_indentation()) + line - for line in formatted.splitlines(True) - ]) - return formatted - - -def _color_wrap(*colors): - def wrapped(inp): - return "".join(list(colors) + [inp, colorama.Style.RESET_ALL]) - return wrapped - - -class ColorizedStreamHandler(logging.StreamHandler): - - # Don't build up a list of colors if we don't have colorama - if colorama: - COLORS = [ - # This needs to be in order from highest logging level to lowest. - (logging.ERROR, _color_wrap(colorama.Fore.RED)), - (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)), - ] - else: - COLORS = [] - - def __init__(self, stream=None): - logging.StreamHandler.__init__(self, stream) - - if WINDOWS and colorama: - self.stream = colorama.AnsiToWin32(self.stream) - - def should_color(self): - # Don't colorize things if we do not have colorama - if not colorama: - return False - - real_stream = ( - self.stream if not isinstance(self.stream, colorama.AnsiToWin32) - else self.stream.wrapped - ) - - # If the stream is a tty we should color it - if hasattr(real_stream, "isatty") and real_stream.isatty(): - return True - - # If we have an ASNI term we should color it - if os.environ.get("TERM") == "ANSI": - return True - - # If anything else we should not color it - return False - - def format(self, record): - msg = logging.StreamHandler.format(self, record) - - if self.should_color(): - for level, color in self.COLORS: - if record.levelno >= level: - msg = color(msg) - break - - return msg - - -class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): - - def _open(self): - ensure_dir(os.path.dirname(self.baseFilename)) - return logging.handlers.RotatingFileHandler._open(self) - - -class MaxLevelFilter(logging.Filter): - - def __init__(self, level): - self.level = level - - def filter(self, record): - return record.levelno < self.level diff --git a/Shared/lib/python3.4/site-packages/pip/utils/setuptools_build.py b/Shared/lib/python3.4/site-packages/pip/utils/setuptools_build.py deleted file mode 100644 index 4c9095e..0000000 --- a/Shared/lib/python3.4/site-packages/pip/utils/setuptools_build.py +++ /dev/null @@ -1,6 +0,0 @@ -# Shim to wrap setup.py invocation with setuptools -SETUPTOOLS_SHIM = ( - "import setuptools, tokenize;__file__=%r;" - "exec(compile(getattr(tokenize, 'open', open)(__file__).read()" - ".replace('\\r\\n', '\\n'), __file__, 'exec'))" -) diff --git a/Shared/lib/python3.4/site-packages/pip/vcs/__init__.py b/Shared/lib/python3.4/site-packages/pip/vcs/__init__.py deleted file mode 100644 index 9dc1c60..0000000 --- a/Shared/lib/python3.4/site-packages/pip/vcs/__init__.py +++ /dev/null @@ -1,363 +0,0 @@ -"""Handles all VCS (version control) support""" -from __future__ import absolute_import - -import errno -import logging -import os -import shutil - -from pip._vendor.six.moves.urllib import parse as urllib_parse - -from pip.exceptions import BadCommand -from pip.utils import (display_path, backup_dir, call_subprocess, - rmtree, ask_path_exists) - - -__all__ = ['vcs', 'get_src_requirement'] - - -logger = logging.getLogger(__name__) - - -class VcsSupport(object): - _registry = {} - schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] - - def __init__(self): - # Register more schemes with urlparse for various version control - # systems - urllib_parse.uses_netloc.extend(self.schemes) - # Python >= 2.7.4, 3.3 doesn't have uses_fragment - if getattr(urllib_parse, 'uses_fragment', None): - urllib_parse.uses_fragment.extend(self.schemes) - super(VcsSupport, self).__init__() - - def __iter__(self): - return self._registry.__iter__() - - @property - def backends(self): - return list(self._registry.values()) - - @property - def dirnames(self): - return [backend.dirname for backend in self.backends] - - @property - def all_schemes(self): - schemes = [] - for backend in self.backends: - schemes.extend(backend.schemes) - return schemes - - def register(self, cls): - if not hasattr(cls, 'name'): - logger.warning('Cannot register VCS %s', cls.__name__) - return - if cls.name not in self._registry: - self._registry[cls.name] = cls - logger.debug('Registered VCS backend: %s', cls.name) - - def unregister(self, cls=None, name=None): - if name in self._registry: - del self._registry[name] - elif cls in self._registry.values(): - del self._registry[cls.name] - else: - logger.warning('Cannot unregister because no class or name given') - - def get_backend_name(self, location): - """ - Return the name of the version control backend if found at given - location, e.g. vcs.get_backend_name('/path/to/vcs/checkout') - """ - for vc_type in self._registry.values(): - if vc_type.controls_location(location): - logger.debug('Determine that %s uses VCS: %s', - location, vc_type.name) - return vc_type.name - return None - - def get_backend(self, name): - name = name.lower() - if name in self._registry: - return self._registry[name] - - def get_backend_from_location(self, location): - vc_type = self.get_backend_name(location) - if vc_type: - return self.get_backend(vc_type) - return None - - -vcs = VcsSupport() - - -class VersionControl(object): - name = '' - dirname = '' - # List of supported schemes for this Version Control - schemes = () - - def __init__(self, url=None, *args, **kwargs): - self.url = url - super(VersionControl, self).__init__(*args, **kwargs) - - def _is_local_repository(self, repo): - """ - posix absolute paths start with os.path.sep, - win32 ones ones start with drive (like c:\\folder) - """ - drive, tail = os.path.splitdrive(repo) - return repo.startswith(os.path.sep) or drive - - # See issue #1083 for why this method was introduced: - # https://github.com/pypa/pip/issues/1083 - def translate_egg_surname(self, surname): - # For example, Django has branches of the form "stable/1.7.x". - return surname.replace('/', '_') - - def export(self, location): - """ - Export the repository at the url to the destination location - i.e. only download the files, without vcs informations - """ - raise NotImplementedError - - def get_url_rev(self): - """ - Returns the correct repository URL and revision by parsing the given - repository URL - """ - error_message = ( - "Sorry, '%s' is a malformed VCS url. " - "The format is +://, " - "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp" - ) - assert '+' in self.url, error_message % self.url - url = self.url.split('+', 1)[1] - scheme, netloc, path, query, frag = urllib_parse.urlsplit(url) - rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) - url = urllib_parse.urlunsplit((scheme, netloc, path, query, '')) - return url, rev - - def get_info(self, location): - """ - Returns (url, revision), where both are strings - """ - assert not location.rstrip('/').endswith(self.dirname), \ - 'Bad directory: %s' % location - return self.get_url(location), self.get_revision(location) - - def normalize_url(self, url): - """ - Normalize a URL for comparison by unquoting it and removing any - trailing slash. - """ - return urllib_parse.unquote(url).rstrip('/') - - def compare_urls(self, url1, url2): - """ - Compare two repo URLs for identity, ignoring incidental differences. - """ - return (self.normalize_url(url1) == self.normalize_url(url2)) - - def obtain(self, dest): - """ - Called when installing or updating an editable package, takes the - source path of the checkout. - """ - raise NotImplementedError - - def switch(self, dest, url, rev_options): - """ - Switch the repo at ``dest`` to point to ``URL``. - """ - raise NotImplementedError - - def update(self, dest, rev_options): - """ - Update an already-existing repo to the given ``rev_options``. - """ - raise NotImplementedError - - def check_version(self, dest, rev_options): - """ - Return True if the version is identical to what exists and - doesn't need to be updated. - """ - raise NotImplementedError - - def check_destination(self, dest, url, rev_options, rev_display): - """ - Prepare a location to receive a checkout/clone. - - Return True if the location is ready for (and requires) a - checkout/clone, False otherwise. - """ - checkout = True - prompt = False - if os.path.exists(dest): - checkout = False - if os.path.exists(os.path.join(dest, self.dirname)): - existing_url = self.get_url(dest) - if self.compare_urls(existing_url, url): - logger.debug( - '%s in %s exists, and has correct URL (%s)', - self.repo_name.title(), - display_path(dest), - url, - ) - if not self.check_version(dest, rev_options): - logger.info( - 'Updating %s %s%s', - display_path(dest), - self.repo_name, - rev_display, - ) - self.update(dest, rev_options) - else: - logger.info( - 'Skipping because already up-to-date.') - else: - logger.warning( - '%s %s in %s exists with URL %s', - self.name, - self.repo_name, - display_path(dest), - existing_url, - ) - prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', - ('s', 'i', 'w', 'b')) - else: - logger.warning( - 'Directory %s already exists, and is not a %s %s.', - dest, - self.name, - self.repo_name, - ) - prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b')) - if prompt: - logger.warning( - 'The plan is to install the %s repository %s', - self.name, - url, - ) - response = ask_path_exists('What to do? %s' % prompt[0], - prompt[1]) - - if response == 's': - logger.info( - 'Switching %s %s to %s%s', - self.repo_name, - display_path(dest), - url, - rev_display, - ) - self.switch(dest, url, rev_options) - elif response == 'i': - # do nothing - pass - elif response == 'w': - logger.warning('Deleting %s', display_path(dest)) - rmtree(dest) - checkout = True - elif response == 'b': - dest_dir = backup_dir(dest) - logger.warning( - 'Backing up %s to %s', display_path(dest), dest_dir, - ) - shutil.move(dest, dest_dir) - checkout = True - return checkout - - def unpack(self, location): - """ - Clean up current location and download the url repository - (and vcs infos) into location - """ - if os.path.exists(location): - rmtree(location) - self.obtain(location) - - def get_src_requirement(self, dist, location): - """ - Return a string representing the requirement needed to - redownload the files currently present in location, something - like: - {repository_url}@{revision}#egg={project_name}-{version_identifier} - """ - raise NotImplementedError - - def get_url(self, location): - """ - Return the url used at location - Used in get_info or check_destination - """ - raise NotImplementedError - - def get_revision(self, location): - """ - Return the current revision of the files at location - Used in get_info - """ - raise NotImplementedError - - def run_command(self, cmd, show_stdout=True, cwd=None, - on_returncode='raise', - command_level=logging.DEBUG, command_desc=None, - extra_environ=None, spinner=None): - """ - Run a VCS subcommand - This is simply a wrapper around call_subprocess that adds the VCS - command name, and checks that the VCS is available - """ - cmd = [self.name] + cmd - try: - return call_subprocess(cmd, show_stdout, cwd, - on_returncode, command_level, - command_desc, extra_environ, - spinner) - except OSError as e: - # errno.ENOENT = no such file or directory - # In other words, the VCS executable isn't available - if e.errno == errno.ENOENT: - raise BadCommand('Cannot find command %r' % self.name) - else: - raise # re-raise exception if a different error occurred - - @classmethod - def controls_location(cls, location): - """ - Check if a location is controlled by the vcs. - It is meant to be overridden to implement smarter detection - mechanisms for specific vcs. - """ - logger.debug('Checking in %s for %s (%s)...', - location, cls.dirname, cls.name) - path = os.path.join(location, cls.dirname) - return os.path.exists(path) - - -def get_src_requirement(dist, location): - version_control = vcs.get_backend_from_location(location) - if version_control: - try: - return version_control().get_src_requirement(dist, - location) - except BadCommand: - logger.warning( - 'cannot determine version of editable source in %s ' - '(%s command not found in path)', - location, - version_control.name, - ) - return dist.as_requirement() - logger.warning( - 'cannot determine version of editable source in %s (is not SVN ' - 'checkout, Git clone, Mercurial clone or Bazaar branch)', - location, - ) - return dist.as_requirement() diff --git a/Shared/lib/python3.4/site-packages/pip/vcs/git.py b/Shared/lib/python3.4/site-packages/pip/vcs/git.py deleted file mode 100644 index 24528de..0000000 --- a/Shared/lib/python3.4/site-packages/pip/vcs/git.py +++ /dev/null @@ -1,277 +0,0 @@ -from __future__ import absolute_import - -import logging -import tempfile -import os.path - -from pip.compat import samefile -from pip.exceptions import BadCommand -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves.urllib import request as urllib_request - -from pip.utils import display_path, rmtree -from pip.vcs import vcs, VersionControl - - -urlsplit = urllib_parse.urlsplit -urlunsplit = urllib_parse.urlunsplit - - -logger = logging.getLogger(__name__) - - -class Git(VersionControl): - name = 'git' - dirname = '.git' - repo_name = 'clone' - schemes = ( - 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', - ) - - def __init__(self, url=None, *args, **kwargs): - - # Works around an apparent Git bug - # (see http://article.gmane.org/gmane.comp.version-control.git/146500) - if url: - scheme, netloc, path, query, fragment = urlsplit(url) - if scheme.endswith('file'): - initial_slashes = path[:-len(path.lstrip('/'))] - newpath = ( - initial_slashes + - urllib_request.url2pathname(path) - .replace('\\', '/').lstrip('/') - ) - url = urlunsplit((scheme, netloc, newpath, query, fragment)) - after_plus = scheme.find('+') + 1 - url = scheme[:after_plus] + urlunsplit( - (scheme[after_plus:], netloc, newpath, query, fragment), - ) - - super(Git, self).__init__(url, *args, **kwargs) - - def export(self, location): - """Export the Git repository at the url to the destination location""" - temp_dir = tempfile.mkdtemp('-export', 'pip-') - self.unpack(temp_dir) - try: - if not location.endswith('/'): - location = location + '/' - self.run_command( - ['checkout-index', '-a', '-f', '--prefix', location], - show_stdout=False, cwd=temp_dir) - finally: - rmtree(temp_dir) - - def check_rev_options(self, rev, dest, rev_options): - """Check the revision options before checkout to compensate that tags - and branches may need origin/ as a prefix. - Returns the SHA1 of the branch or tag if found. - """ - revisions = self.get_short_refs(dest) - - origin_rev = 'origin/%s' % rev - if origin_rev in revisions: - # remote branch - return [revisions[origin_rev]] - elif rev in revisions: - # a local tag or branch name - return [revisions[rev]] - else: - logger.warning( - "Could not find a tag or branch '%s', assuming commit.", rev, - ) - return rev_options - - def check_version(self, dest, rev_options): - """ - Compare the current sha to the ref. ref may be a branch or tag name, - but current rev will always point to a sha. This means that a branch - or tag will never compare as True. So this ultimately only matches - against exact shas. - """ - return self.get_revision(dest).startswith(rev_options[0]) - - def switch(self, dest, url, rev_options): - self.run_command(['config', 'remote.origin.url', url], cwd=dest) - self.run_command(['checkout', '-q'] + rev_options, cwd=dest) - - self.update_submodules(dest) - - def update(self, dest, rev_options): - # First fetch changes from the default remote - self.run_command(['fetch', '-q'], cwd=dest) - # Then reset to wanted revision (maybe even origin/master) - if rev_options: - rev_options = self.check_rev_options( - rev_options[0], dest, rev_options, - ) - self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest) - #: update submodules - self.update_submodules(dest) - - def obtain(self, dest): - url, rev = self.get_url_rev() - if rev: - rev_options = [rev] - rev_display = ' (to %s)' % rev - else: - rev_options = ['origin/master'] - rev_display = '' - if self.check_destination(dest, url, rev_options, rev_display): - logger.info( - 'Cloning %s%s to %s', url, rev_display, display_path(dest), - ) - self.run_command(['clone', '-q', url, dest]) - - if rev: - rev_options = self.check_rev_options(rev, dest, rev_options) - # Only do a checkout if rev_options differs from HEAD - if not self.check_version(dest, rev_options): - self.run_command( - ['checkout', '-q'] + rev_options, - cwd=dest, - ) - #: repo may contain submodules - self.update_submodules(dest) - - def get_url(self, location): - """Return URL of the first remote encountered.""" - remotes = self.run_command( - ['config', '--get-regexp', 'remote\..*\.url'], - show_stdout=False, cwd=location) - first_remote = remotes.splitlines()[0] - url = first_remote.split(' ')[1] - return url.strip() - - def get_revision(self, location): - current_rev = self.run_command( - ['rev-parse', 'HEAD'], show_stdout=False, cwd=location) - return current_rev.strip() - - def get_full_refs(self, location): - """Yields tuples of (commit, ref) for branches and tags""" - output = self.run_command(['show-ref'], - show_stdout=False, cwd=location) - for line in output.strip().splitlines(): - commit, ref = line.split(' ', 1) - yield commit.strip(), ref.strip() - - def is_ref_remote(self, ref): - return ref.startswith('refs/remotes/') - - def is_ref_branch(self, ref): - return ref.startswith('refs/heads/') - - def is_ref_tag(self, ref): - return ref.startswith('refs/tags/') - - def is_ref_commit(self, ref): - """A ref is a commit sha if it is not anything else""" - return not any(( - self.is_ref_remote(ref), - self.is_ref_branch(ref), - self.is_ref_tag(ref), - )) - - # Should deprecate `get_refs` since it's ambiguous - def get_refs(self, location): - return self.get_short_refs(location) - - def get_short_refs(self, location): - """Return map of named refs (branches or tags) to commit hashes.""" - rv = {} - for commit, ref in self.get_full_refs(location): - ref_name = None - if self.is_ref_remote(ref): - ref_name = ref[len('refs/remotes/'):] - elif self.is_ref_branch(ref): - ref_name = ref[len('refs/heads/'):] - elif self.is_ref_tag(ref): - ref_name = ref[len('refs/tags/'):] - if ref_name is not None: - rv[ref_name] = commit - return rv - - def _get_subdirectory(self, location): - """Return the relative path of setup.py to the git repo root.""" - # find the repo root - git_dir = self.run_command(['rev-parse', '--git-dir'], - show_stdout=False, cwd=location).strip() - if not os.path.isabs(git_dir): - git_dir = os.path.join(location, git_dir) - root_dir = os.path.join(git_dir, '..') - # find setup.py - orig_location = location - while not os.path.exists(os.path.join(location, 'setup.py')): - last_location = location - location = os.path.dirname(location) - if location == last_location: - # We've traversed up to the root of the filesystem without - # finding setup.py - logger.warning( - "Could not find setup.py for directory %s (tried all " - "parent directories)", - orig_location, - ) - return None - # relative path of setup.py to repo root - if samefile(root_dir, location): - return None - return os.path.relpath(location, root_dir) - - def get_src_requirement(self, dist, location): - repo = self.get_url(location) - if not repo.lower().startswith('git:'): - repo = 'git+' + repo - egg_project_name = dist.egg_name().split('-', 1)[0] - if not repo: - return None - current_rev = self.get_revision(location) - req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name) - subdirectory = self._get_subdirectory(location) - if subdirectory: - req += '&subdirectory=' + subdirectory - return req - - def get_url_rev(self): - """ - Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. - That's required because although they use SSH they sometimes doesn't - work with a ssh:// scheme (e.g. Github). But we need a scheme for - parsing. Hence we remove it again afterwards and return it as a stub. - """ - if '://' not in self.url: - assert 'file:' not in self.url - self.url = self.url.replace('git+', 'git+ssh://') - url, rev = super(Git, self).get_url_rev() - url = url.replace('ssh://', '') - else: - url, rev = super(Git, self).get_url_rev() - - return url, rev - - def update_submodules(self, location): - if not os.path.exists(os.path.join(location, '.gitmodules')): - return - self.run_command( - ['submodule', 'update', '--init', '--recursive', '-q'], - cwd=location, - ) - - @classmethod - def controls_location(cls, location): - if super(Git, cls).controls_location(location): - return True - try: - r = cls().run_command(['rev-parse'], - cwd=location, - show_stdout=False, - on_returncode='ignore') - return not r - except BadCommand: - logger.debug("could not determine if %s is under git control " - "because git is not available", location) - return False - - -vcs.register(Git) diff --git a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/AUTHORS.txt b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/AUTHORS.txt new file mode 100644 index 0000000..e845ac7 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/AUTHORS.txt @@ -0,0 +1,421 @@ +Adam Chainz +Adam Wentz +Adrien Morison +Alan Yee +Aleks Bunin +Alex Gaynor +Alex Grönholm +Alex Morega +Alex Stachowiak +Alexander Shtyrov +Alexandre Conrad +Alli +Anatoly Techtonik +Andrei Geacar +Andrew Gaul +Andrey Bulgakov +Andrés Delfino <34587441+andresdelfino@users.noreply.github.com> +Andrés Delfino +Andy Freeland +Andy Kluger +Anish Tambe +Anrs Hu +Anthony Sottile +Antoine Musso +Anton Ovchinnikov +Anton Patrushev +Antonio Alvarado Hernandez +Antony Lee +Antti Kaihola +Anubhav Patel +Anuj Godase +AQNOUCH Mohammed +AraHaan +Arindam Choudhury +Armin Ronacher +Ashley Manton +Atsushi Odagiri +Avner Cohen +Baptiste Mispelon +Barney Gale +barneygale +Bartek Ogryczak +Bastian Venthur +Ben Darnell +Ben Hoyt +Ben Rosser +Bence Nagy +Benjamin VanEvery +Benoit Pierre +Berker Peksag +Bernardo B. Marques +Bernhard M. Wiedemann +Bogdan Opanchuk +Brad Erickson +Bradley Ayers +Brandon L. Reiss +Brett Randall +Brian Rosner +BrownTruck +Bruno Oliveira +Bruno Renié +Bstrdsmkr +Buck Golemon +burrows +Bussonnier Matthias +c22 +Calvin Smith +Carl Meyer +Carlos Liam +Carol Willing +Carter Thayer +Cass +Chandrasekhar Atina +Chris Brinker +Chris Jerdonek +Chris McDonough +Chris Wolfe +Christian Heimes +Christian Oudard +Christopher Snyder +Clark Boylan +Clay McClure +Cody +Cody Soyland +Colin Watson +Connor Osborn +Cooper Lees +Cooper Ry Lees +Cory Benfield +Cory Wright +Craig Kerstiens +Cristian Sorinel +Curtis Doty +Damian Quiroga +Dan Black +Dan Savilonis +Dan Sully +daniel +Daniel Collins +Daniel Hahler +Daniel Holth +Daniel Jost +Daniel Shaulov +Daniele Procida +Danny Hermes +Dav Clark +Dave Abrahams +David Aguilar +David Black +David Caro +David Evans +David Linke +David Pursehouse +David Tucker +David Wales +Davidovich +derwolfe +Dmitry Gladkov +Domen Kožar +Donald Stufft +Dongweiming +Douglas Thor +DrFeathers +Dustin Ingram +Dwayne Bailey +Ed Morley <501702+edmorley@users.noreply.github.com> +Ed Morley +Eli Schwartz +Emil Styrke +Endoh Takanao +enoch +Eric Gillingham +Eric Hanchrow +Eric Hopper +Erik M. Bray +Erik Rose +Ernest W Durbin III +Ernest W. Durbin III +Erwin Janssen +Eugene Vereshchagin +fiber-space +Filip Kokosiński +Florian Briand +Francesco +Francesco Montesano +Gabriel Curio +Gabriel de Perthuis +Garry Polley +gdanielson +Geoffrey Lehée +Geoffrey Sneddon +George Song +Georgi Valkov +Giftlin Rajaiah +gizmoguy1 +gkdoc <40815324+gkdoc@users.noreply.github.com> +GOTO Hayato <3532528+gh640@users.noreply.github.com> +Guilherme Espada +Guy Rozendorn +Hari Charan +Herbert Pfennig +Hsiaoming Yang +Hugo +Hugo Lopes Tavares +hugovk +Hynek Schlawack +Ian Bicking +Ian Cordasco +Ian Lee +Ian Stapleton Cordasco +Ian Wienand +Ian Wienand +Igor Kuzmitshov +Igor Sobreira +Ilya Baryshev +INADA Naoki +Ionel Cristian Mărieș +Ionel Maries Cristian +Jakub Stasiak +Jakub Vysoky +Jakub Wilk +James Cleveland +James Cleveland +James Firth +James Polley +Jan Pokorný +Jannis Leidel +jarondl +Jason R. Coombs +Jay Graves +Jean-Christophe Fillion-Robin +Jeff Barber +Jeff Dairiki +Jeremy Stanley +Jeremy Zafran +Jim Garrison +Jivan Amara +John-Scott Atlakson +Jon Banafato +Jon Dufresne +Jon Parise +Jon Wayne Parrott +Jonas Nockert +Jonathan Herbert +Joost Molenaar +Jorge Niedbalski +Joseph Long +Josh Bronson +Josh Hansen +Josh Schneier +Julien Demoor +jwg4 +Jyrki Pulliainen +Kamal Bin Mustafa +kaustav haldar +keanemind +Kelsey Hightower +Kenneth Belitzky +Kenneth Reitz +Kenneth Reitz +Kevin Burke +Kevin Carter +Kevin Frommelt +Kexuan Sun +Kit Randel +kpinc +Kumar McMillan +Kyle Persohn +Laurent Bristiel +Laurie Opperman +Leon Sasson +Lev Givon +Lincoln de Sousa +Lipis +Loren Carvalho +Lucas Cimon +Ludovic Gasc +Luke Macken +Luo Jiebin +luojiebin +luz.paz +Marc Abramowitz +Marc Tamlyn +Marcus Smith +Mariatta +Mark Kohler +Markus Hametner +Masklinn +Matej Stuchlik +Mathew Jennings +Mathieu Bridon +Matt Good +Matt Maker +Matt Robenolt +matthew +Matthew Einhorn +Matthew Gilliard +Matthew Iversen +Matthew Trumbell +Matthew Willson +Matthias Bussonnier +mattip +Maxim Kurnikov +Maxime Rouyrre +memoselyk +Michael +Michael Aquilina +Michael E. Karpeles +Michael Klich +Michael Williamson +michaelpacer +Mickaël Schoentgen +Miguel Araujo Perez +Mihir Singh +Min RK +MinRK +Miro Hrončok +montefra +Monty Taylor +Nate Coraor +Nathaniel J. Smith +Nehal J Wani +Nick Coghlan +Nick Stenning +Nikhil Benesch +Nitesh Sharma +Nowell Strite +nvdv +Ofekmeister +Oliver Jeeves +Oliver Tonnhofer +Olivier Girardot +Olivier Grisel +Ollie Rutherfurd +OMOTO Kenji +Oren Held +Oscar Benjamin +Oz N Tiram +Patrick Dubroy +Patrick Jenkins +Patrick Lawson +patricktokeeffe +Paul Kehrer +Paul Moore +Paul Nasrat +Paul Oswald +Paul van der Linden +Paulus Schoutsen +Pawel Jasinski +Pekka Klärck +Peter Waller +Phaneendra Chiruvella +Phil Freo +Phil Pennock +Phil Whelan +Philip Molloy +Philippe Ombredanne +Pi Delport +Pierre-Yves Rofes +pip +Pradyun Gedam +Pratik Mallya +Preston Holmes +Przemek Wrzos +Qiangning Hong +R. David Murray +Rafael Caricio +Ralf Schmitt +Razzi Abuissa +Remi Rampin +Rene Dudfield +Richard Jones +RobberPhex +Robert Collins +Robert McGibbon +Robert T. McGibbon +Roey Berman +Rohan Jain +Rohan Jain +Rohan Jain +Roman Bogorodskiy +Romuald Brunet +Ronny Pfannschmidt +Rory McCann +Ross Brattain +Roy Wellington Ⅳ +Roy Wellington Ⅳ +Ryan Wooden +ryneeverett +Sachi King +Salvatore Rinchiera +schlamar +Scott Kitterman +seanj +Sebastian Schaetz +Segev Finer +Sergey Vasilyev +Seth Woodworth +Shlomi Fish +Simeon Visser +Simon Cross +Simon Pichugin +Sorin Sbarnea +Stavros Korokithakis +Stefan Scherfke +Stephan Erb +stepshal +Steve (Gadget) Barnes +Steve Barnes +Steve Kowalik +Steven Myint +stonebig +Stéphane Bidoul (ACSONE) +Stéphane Bidoul +Stéphane Klein +Takayuki SHIMIZUKAWA +Thijs Triemstra +Thomas Fenzl +Thomas Grainger +Thomas Guettler +Thomas Johansson +Thomas Kluyver +Thomas Smith +Tim D. Smith +Tim Harder +Tim Heap +tim smith +tinruufu +Tom Freudenheim +Tom V +Tomer Chachamu +Tony Zhaocheng Tan +Toshio Kuratomi +Travis Swicegood +Tzu-ping Chung +Valentin Haenel +Victor Stinner +Viktor Szépe +Ville Skyttä +Vinay Sajip +Vincent Philippon +Vitaly Babiy +Vladimir Rutsky +W. Trevor King +Wil Tan +Wilfred Hughes +William ML Leslie +Wolfgang Maier +Xavier Fernandez +Xavier Fernandez +xoviat +YAMAMOTO Takashi +Yen Chi Hsuan +Yoval P +Yu Jian +Zearin +Zearin +Zhiping Deng +Zvezdan Petkovic +Łukasz Langa +Семён Марьясин diff --git a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/DESCRIPTION.rst deleted file mode 100644 index e118723..0000000 --- a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,3 +0,0 @@ -UNKNOWN - - diff --git a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/LICENSE.txt b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/LICENSE.txt new file mode 100644 index 0000000..d3379fa --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008-2018 The pip developers (see AUTHORS.txt file) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/METADATA b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/METADATA index 7a50487..cf6c930 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/METADATA +++ b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/METADATA @@ -1,4 +1,4 @@ -Metadata-Version: 2.0 +Metadata-Version: 2.1 Name: pkg_resources Version: 0.0.0 Summary: UNKNOWN diff --git a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/RECORD index d7509e8..7c72d6a 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/RECORD +++ b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/RECORD @@ -1,26 +1,38 @@ -pkg_resources/__init__.py,sha256=bucu_98c11mzrGldEJeqxArn14F7ZmURsb-8CaNSbVo,108616 -pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pkg_resources/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 -pkg_resources/_vendor/packaging/__about__.py,sha256=YzdrW-1lWmyCBDyrcNkZbJo4tiDWXpoiqPjfyCYMzIE,1073 -pkg_resources/_vendor/packaging/__init__.py,sha256=2V8n-eEpSgBuXlV8hlMmhU7ZklpsrrusWMZNp2gC4Hs,906 -pkg_resources/_vendor/packaging/_compat.py,sha256=wofog8iYo_zudt_10i6JiXKHDs5GhCuXC09hCuSJiv4,1253 -pkg_resources/_vendor/packaging/_structures.py,sha256=93YvgrEE2HgFp8AdXy0pwCRVnZeutRHO_-puJ7T0cPw,1809 -pkg_resources/_vendor/packaging/specifiers.py,sha256=UV9T01_kKloA8PSeMI3HTYBSJ_4KLs00yLvrlciZ3yU,28079 -pkg_resources/_vendor/packaging/version.py,sha256=dEGrWZJZ6sef1xMxSfDCego2hS3Q86by0hUIFVk-AGc,11949 -pkg_resources/extern/__init__.py,sha256=azKvXDutMVFe3c641wdiwndjtku92Bl3_iGVAIMKnsM,2461 -pkg_resources-0.0.0.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 -pkg_resources-0.0.0.dist-info/METADATA,sha256=FOYDX6cmnDUkWo-yhqWQYtjKIMZR2IW2G1GFZhA6gUQ,177 -pkg_resources-0.0.0.dist-info/RECORD,, -pkg_resources-0.0.0.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 -pkg_resources-0.0.0.dist-info/metadata.json,sha256=8ZVRFU96pY_wnWouockCkvXw981Y0iDB5nQFFGq8ZiY,221 +pkg_resources-0.0.0.dist-info/AUTHORS.txt,sha256=Pu4WdZapZ2U2wKwWxd830ZxnROCHwmV_TpWoL9dqJ-M,15880 pkg_resources-0.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,, -pkg_resources/_vendor/__pycache__/six.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,, -pkg_resources/extern/__pycache__/__init__.cpython-34.pyc,, -pkg_resources/__pycache__/__init__.cpython-34.pyc,, -pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,, +pkg_resources-0.0.0.dist-info/LICENSE.txt,sha256=ORqHhOMZ2uVDFHfUzJvFBPxdcf2eieHIDxzThV9dfPo,1090 +pkg_resources-0.0.0.dist-info/METADATA,sha256=V9_WPOtD1FnuKrTGv6Ique7kAOn2lasvT8W0_iMCCCk,177 +pkg_resources-0.0.0.dist-info/RECORD,, +pkg_resources-0.0.0.dist-info/WHEEL,sha256=_wJFdOYk7i3xxT8ElOkUJvOdOvfNGbR9g-bf6UQT6sU,110 +pkg_resources/__init__.py,sha256=1CH-AzmMwXmdx_7bCm03hV11azPdW64rzVum2ylDE7k,104406 +pkg_resources/__pycache__/__init__.cpython-37.pyc,, +pkg_resources/__pycache__/py31compat.cpython-37.pyc,, +pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pkg_resources/_vendor/__pycache__/__init__.cpython-37.pyc,, +pkg_resources/_vendor/__pycache__/appdirs.cpython-37.pyc,, +pkg_resources/_vendor/__pycache__/pyparsing.cpython-37.pyc,, +pkg_resources/_vendor/__pycache__/six.cpython-37.pyc,, +pkg_resources/_vendor/appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701 +pkg_resources/_vendor/packaging/__about__.py,sha256=zkcCPTN_6TcLW0Nrlg0176-R1QQ_WVPTm8sz1R4-HjM,720 +pkg_resources/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513 +pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-37.pyc,, +pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-37.pyc,, +pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-37.pyc,, +pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-37.pyc,, +pkg_resources/_vendor/packaging/__pycache__/markers.cpython-37.pyc,, +pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-37.pyc,, +pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc,, +pkg_resources/_vendor/packaging/__pycache__/utils.cpython-37.pyc,, +pkg_resources/_vendor/packaging/__pycache__/version.cpython-37.pyc,, +pkg_resources/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860 +pkg_resources/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416 +pkg_resources/_vendor/packaging/markers.py,sha256=uEcBBtGvzqltgnArqb9c4RrcInXezDLos14zbBHhWJo,8248 +pkg_resources/_vendor/packaging/requirements.py,sha256=SikL2UynbsT0qtY9ltqngndha_sfo0w6XGFhAhoSoaQ,4355 +pkg_resources/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025 +pkg_resources/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421 +pkg_resources/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556 +pkg_resources/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055 +pkg_resources/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 +pkg_resources/extern/__init__.py,sha256=cHiEfHuLmm6rs5Ve_ztBfMI7Lr31vss-D4wkqF5xzlI,2498 +pkg_resources/extern/__pycache__/__init__.cpython-37.pyc,, +pkg_resources/py31compat.py,sha256=-WQ0e4c3RG_acdhwC3gLiXhP_lg4G5q7XYkZkQg0gxU,558 diff --git a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/WHEEL index 8b6dd1b..c4bde30 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/WHEEL +++ b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) +Generator: bdist_wheel (0.32.3) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any diff --git a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/metadata.json deleted file mode 100644 index f7d360a..0000000 --- a/Shared/lib/python3.4/site-packages/pkg_resources-0.0.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}}}, "generator": "bdist_wheel (0.29.0)", "metadata_version": "2.0", "name": "pkg_resources", "summary": "UNKNOWN", "version": "0.0.0"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/__init__.py b/Shared/lib/python3.4/site-packages/pkg_resources/__init__.py index d04cd34..7413470 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/__init__.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/__init__.py @@ -1,3 +1,4 @@ +# coding: utf-8 """ Package resource API -------------------- @@ -28,15 +29,16 @@ import warnings import stat import functools import pkgutil -import token -import symbol import operator import platform import collections import plistlib import email.parser +import errno import tempfile import textwrap +import itertools +import inspect from pkgutil import get_importer try: @@ -45,6 +47,11 @@ except ImportError: # Python 3.2 compatibility import imp as _imp +try: + FileExistsError +except NameError: + FileExistsError = OSError + from pkg_resources.extern import six from pkg_resources.extern.six.moves import urllib, map, filter @@ -67,27 +74,45 @@ try: except ImportError: importlib_machinery = None -try: - import parser -except ImportError: - pass - +from . import py31compat +from pkg_resources.extern import appdirs from pkg_resources.extern import packaging __import__('pkg_resources.extern.packaging.version') __import__('pkg_resources.extern.packaging.specifiers') +__import__('pkg_resources.extern.packaging.requirements') +__import__('pkg_resources.extern.packaging.markers') -if (3, 0) < sys.version_info < (3, 3): - msg = ( - "Support for Python 3.0-3.2 has been dropped. Future versions " - "will fail here." - ) - warnings.warn(msg) +__metaclass__ = type + + +if (3, 0) < sys.version_info < (3, 4): + raise RuntimeError("Python 3.4 or later is required") + +if six.PY2: + # Those builtin exceptions are only defined in Python 3 + PermissionError = None + NotADirectoryError = None # declare some globals that will be defined later to # satisfy the linters. require = None working_set = None +add_activation_listener = None +resources_stream = None +cleanup_resources = None +resource_dir = None +resource_stream = None +set_extraction_path = None +resource_isdir = None +resource_string = None +iter_entry_points = None +resource_listdir = None +resource_filename = None +resource_exists = None +_distribution_finders = None +_namespace_handlers = None +_namespace_packages = None class PEP440Warning(RuntimeWarning): @@ -97,153 +122,53 @@ class PEP440Warning(RuntimeWarning): """ -class _SetuptoolsVersionMixin(object): - - def __hash__(self): - return super(_SetuptoolsVersionMixin, self).__hash__() - - def __lt__(self, other): - if isinstance(other, tuple): - return tuple(self) < other - else: - return super(_SetuptoolsVersionMixin, self).__lt__(other) - - def __le__(self, other): - if isinstance(other, tuple): - return tuple(self) <= other - else: - return super(_SetuptoolsVersionMixin, self).__le__(other) - - def __eq__(self, other): - if isinstance(other, tuple): - return tuple(self) == other - else: - return super(_SetuptoolsVersionMixin, self).__eq__(other) - - def __ge__(self, other): - if isinstance(other, tuple): - return tuple(self) >= other - else: - return super(_SetuptoolsVersionMixin, self).__ge__(other) - - def __gt__(self, other): - if isinstance(other, tuple): - return tuple(self) > other - else: - return super(_SetuptoolsVersionMixin, self).__gt__(other) - - def __ne__(self, other): - if isinstance(other, tuple): - return tuple(self) != other - else: - return super(_SetuptoolsVersionMixin, self).__ne__(other) - - def __getitem__(self, key): - return tuple(self)[key] - - def __iter__(self): - component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE) - replace = { - 'pre': 'c', - 'preview': 'c', - '-': 'final-', - 'rc': 'c', - 'dev': '@', - }.get - - def _parse_version_parts(s): - for part in component_re.split(s): - part = replace(part, part) - if not part or part == '.': - continue - if part[:1] in '0123456789': - # pad for numeric comparison - yield part.zfill(8) - else: - yield '*'+part - - # ensure that alpha/beta/candidate are before final - yield '*final' - - def old_parse_version(s): - parts = [] - for part in _parse_version_parts(s.lower()): - if part.startswith('*'): - # remove '-' before a prerelease tag - if part < '*final': - while parts and parts[-1] == '*final-': - parts.pop() - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == '00000000': - parts.pop() - parts.append(part) - return tuple(parts) - - # Warn for use of this function - warnings.warn( - "You have iterated over the result of " - "pkg_resources.parse_version. This is a legacy behavior which is " - "inconsistent with the new version class introduced in setuptools " - "8.0. In most cases, conversion to a tuple is unnecessary. For " - "comparison of versions, sort the Version instances directly. If " - "you have another use case requiring the tuple, please file a " - "bug with the setuptools project describing that need.", - RuntimeWarning, - stacklevel=1, - ) - - for part in old_parse_version(str(self)): - yield part - - -class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version): - pass - - -class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin, - packaging.version.LegacyVersion): - pass - - def parse_version(v): try: - return SetuptoolsVersion(v) + return packaging.version.Version(v) except packaging.version.InvalidVersion: - return SetuptoolsLegacyVersion(v) + return packaging.version.LegacyVersion(v) _state_vars = {} + def _declare_state(vartype, **kw): globals().update(kw) _state_vars.update(dict.fromkeys(kw, vartype)) + def __getstate__(): state = {} g = globals() for k, v in _state_vars.items(): - state[k] = g['_sget_'+v](g[k]) + state[k] = g['_sget_' + v](g[k]) return state + def __setstate__(state): g = globals() for k, v in state.items(): - g['_sset_'+_state_vars[k]](k, g[k], v) + g['_sset_' + _state_vars[k]](k, g[k], v) return state + def _sget_dict(val): return val.copy() + def _sset_dict(key, ob, state): ob.clear() ob.update(state) + def _sget_object(val): return val.__getstate__() + def _sset_object(key, ob, state): ob.__setstate__(state) + _sget_none = _sset_none = lambda *args: None @@ -270,9 +195,10 @@ def get_supported_platform(): pass return plat + __all__ = [ # Basic resource access and distribution/entry point discovery - 'require', 'run_script', 'get_provider', 'get_distribution', + 'require', 'run_script', 'get_provider', 'get_distribution', 'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points', 'resource_string', 'resource_stream', 'resource_filename', @@ -316,10 +242,12 @@ __all__ = [ 'run_main', 'AvailableDistributions', ] + class ResolutionError(Exception): """Abstract base for dependency resolution errors""" + def __repr__(self): - return self.__class__.__name__+repr(self.args) + return self.__class__.__name__ + repr(self.args) class VersionConflict(ResolutionError): @@ -396,6 +324,8 @@ class DistributionNotFound(ResolutionError): class UnknownExtra(ResolutionError): """Distribution doesn't have an "extra feature" of the given name""" + + _provider_factories = {} PY_MAJOR = sys.version[:3] @@ -405,6 +335,7 @@ SOURCE_DIST = 1 CHECKOUT_DIST = 0 DEVELOP_DIST = -1 + def register_loader_type(loader_type, provider_factory): """Register `provider_factory` to make providers for `loader_type` @@ -414,6 +345,7 @@ def register_loader_type(loader_type, provider_factory): """ _provider_factories[loader_type] = provider_factory + def get_provider(moduleOrReq): """Return an IResourceProvider for the named module or requirement""" if isinstance(moduleOrReq, Requirement): @@ -426,6 +358,7 @@ def get_provider(moduleOrReq): loader = getattr(module, '__loader__', None) return _find_adapter(_provider_factories, loader)(module) + def _macosx_vers(_cache=[]): if not _cache: version = platform.mac_ver()[0] @@ -441,34 +374,35 @@ def _macosx_vers(_cache=[]): _cache.append(version.split('.')) return _cache[0] + def _macosx_arch(machine): return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) + def get_build_platform(): """Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X. """ - try: - # Python 2.7 or >=3.2 - from sysconfig import get_platform - except ImportError: - from distutils.util import get_platform + from sysconfig import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") - return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]), - _macosx_arch(machine)) + return "macosx-%d.%d-%s" % ( + int(version[0]), int(version[1]), + _macosx_arch(machine), + ) except ValueError: # if someone is running a non-Mac darwin system, this will fall # through to the default implementation pass return plat + macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") # XXX backward compat @@ -482,7 +416,7 @@ def compatible_platforms(provided, required): XXX Needs compatibility checks for Linux and other unixy OSes. """ - if provided is None or required is None or provided==required: + if provided is None or required is None or provided == required: # easy case return True @@ -529,9 +463,11 @@ def run_script(dist_spec, script_name): ns['__name__'] = name require(dist_spec)[0].run_script(script_name, ns) + # backward compatibility run_main = run_script + def get_distribution(dist): """Return a current distribution object for a Requirement or string""" if isinstance(dist, six.string_types): @@ -542,21 +478,23 @@ def get_distribution(dist): raise TypeError("Expected string, Requirement, or Distribution", dist) return dist + def load_entry_point(dist, group, name): """Return `name` entry point of `group` for `dist` or raise ImportError""" return get_distribution(dist).load_entry_point(group, name) + def get_entry_map(dist, group=None): """Return the entry point map for `group`, or the full entry map""" return get_distribution(dist).get_entry_map(group) + def get_entry_info(dist, group, name): """Return the EntryPoint object for `group`+`name`, or ``None``""" return get_distribution(dist).get_entry_info(group, name) class IMetadataProvider: - def has_metadata(name): """Does the package's distribution contain the named metadata?""" @@ -607,7 +545,7 @@ class IResourceProvider(IMetadataProvider): """List of resource names in the directory (like ``os.listdir()``)""" -class WorkingSet(object): +class WorkingSet: """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): @@ -707,13 +645,12 @@ class WorkingSet(object): distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ - for dist in self: - entries = dist.get_entry_map(group) - if name is None: - for ep in entries.values(): - yield ep - elif name in entries: - yield entries[name] + return ( + entry + for dist in self + for entry in dist.get_entry_map(group).values() + if name is None or name == entry.name + ) def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" @@ -737,7 +674,7 @@ class WorkingSet(object): for key in self.entry_keys[item]: if key not in seen: - seen[key]=1 + seen[key] = 1 yield self.by_key[key] def add(self, dist, entry=None, insert=True, replace=False): @@ -757,8 +694,8 @@ class WorkingSet(object): if entry is None: entry = dist.location - keys = self.entry_keys.setdefault(entry,[]) - keys2 = self.entry_keys.setdefault(dist.location,[]) + keys = self.entry_keys.setdefault(entry, []) + keys2 = self.entry_keys.setdefault(dist.location, []) if not replace and dist.key in self.by_key: # ignore hidden distros return @@ -771,7 +708,7 @@ class WorkingSet(object): self._added_new(dist) def resolve(self, requirements, env=None, installer=None, - replace_conflicting=False): + replace_conflicting=False, extras=None): """List all distributions needed to (recursively) meet `requirements` `requirements` must be a sequence of ``Requirement`` objects. `env`, @@ -782,11 +719,18 @@ class WorkingSet(object): already-installed distribution; it should return a ``Distribution`` or ``None``. - Unless `replace_conflicting=True`, raises a VersionConflict exception if + Unless `replace_conflicting=True`, raises a VersionConflict exception + if any requirements are found on the path that have the correct name but the wrong version. Otherwise, if an `installer` is supplied it will be invoked to obtain the correct version of the requirement and activate it. + + `extras` is a list of the extras to be used with these requirements. + This is important because extra requirements may look like `my_req; + extra = "my_extra"`, which would otherwise be interpreted as a purely + optional requirement. Instead, we want to be able to assert that these + requirements are truly required. """ # set up the stack @@ -797,6 +741,8 @@ class WorkingSet(object): best = {} to_activate = [] + req_extras = _ReqExtras() + # Mapping of requirement to set of distributions that required it; # useful for reporting info about conflicts. required_by = collections.defaultdict(set) @@ -807,6 +753,10 @@ class WorkingSet(object): if req in processed: # Ignore cyclic or redundant dependencies continue + + if not req_extras.markers_pass(req, extras): + continue + dist = best.get(req.key) if dist is None: # Find the best distribution and add it to the map @@ -822,7 +772,10 @@ class WorkingSet(object): # distribution env = Environment([]) ws = WorkingSet([]) - dist = best[req.key] = env.best_match(req, ws, installer) + dist = best[req.key] = env.best_match( + req, ws, installer, + replace_conflicting=replace_conflicting + ) if dist is None: requirers = required_by.get(req, None) raise DistributionNotFound(req, requirers) @@ -839,14 +792,15 @@ class WorkingSet(object): # Register the new requirements needed by req for new_requirement in new_requirements: required_by[new_requirement].add(req.project_name) + req_extras[new_requirement] = req.extras processed[req] = True # return list of distros to activate return to_activate - def find_plugins(self, plugin_env, full_env=None, installer=None, - fallback=True): + def find_plugins( + self, plugin_env, full_env=None, installer=None, fallback=True): """Find all activatable distributions in `plugin_env` Example usage:: @@ -945,11 +899,17 @@ class WorkingSet(object): return needed - def subscribe(self, callback): - """Invoke `callback` for all distributions (including existing ones)""" + def subscribe(self, callback, existing=True): + """Invoke `callback` for all distributions + + If `existing=True` (default), + call on all existing ones, as well. + """ if callback in self.callbacks: return self.callbacks.append(callback) + if not existing: + return for dist in self: callback(dist) @@ -971,10 +931,31 @@ class WorkingSet(object): self.callbacks = callbacks[:] -class Environment(object): +class _ReqExtras(dict): + """ + Map each requirement to the extras that demanded it. + """ + + def markers_pass(self, req, extras=None): + """ + Evaluate markers for req against each extra that + demanded it. + + Return False if the req has a marker and fails + evaluation. Otherwise, return True. + """ + extra_evals = ( + req.marker.evaluate({'extra': extra}) + for extra in self.get(req, ()) + (extras or (None,)) + ) + return not req.marker or any(extra_evals) + + +class Environment: """Searchable snapshot of distributions on a search path""" - def __init__(self, search_path=None, platform=get_supported_platform(), + def __init__( + self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR): """Snapshot distributions available on a search path @@ -985,7 +966,7 @@ class Environment(object): `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an - optional string naming the desired version of Python (e.g. ``'3.3'``); + optional string naming the desired version of Python (e.g. ``'3.6'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you @@ -1004,9 +985,12 @@ class Environment(object): requirements specified when this environment was created, or False is returned. """ - return (self.python is None or dist.py_version is None - or dist.py_version==self.python) \ - and compatible_platforms(dist.platform, self.platform) + py_compat = ( + self.python is None + or dist.py_version is None + or dist.py_version == self.python + ) + return py_compat and compatible_platforms(dist.platform, self.platform) def remove(self, dist): """Remove `dist` from the environment""" @@ -1047,7 +1031,8 @@ class Environment(object): dists.append(dist) dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - def best_match(self, req, working_set, installer=None): + def best_match( + self, req, working_set, installer=None, replace_conflicting=False): """Find distribution best matching `req` and usable on `working_set` This calls the ``find(req)`` method of the `working_set` to see if a @@ -1060,7 +1045,12 @@ class Environment(object): calling the environment's ``obtain(req, installer)`` method will be returned. """ - dist = working_set.find(req) + try: + dist = working_set.find(req) + except VersionConflict: + if not replace_conflicting: + raise + dist = None if dist is not None: return dist for dist in self[req.key]: @@ -1177,8 +1167,8 @@ class ResourceManager: tmpl = textwrap.dedent(""" Can't extract file(s) to egg cache - The following error occurred while trying to extract file(s) to the Python egg - cache: + The following error occurred while trying to extract file(s) + to the Python egg cache: {old_exc} @@ -1186,9 +1176,9 @@ class ResourceManager: {cache_path} - Perhaps your account does not have write access to this directory? You can - change the cache directory by setting the PYTHON_EGG_CACHE environment - variable to point to an accessible directory. + Perhaps your account does not have write access to this directory? + You can change the cache directory by setting the PYTHON_EGG_CACHE + environment variable to point to an accessible directory. """).lstrip() err = ExtractionError(tmpl.format(**locals())) err.manager = self @@ -1210,10 +1200,10 @@ class ResourceManager: extract, as it tracks the generated names for possible cleanup later. """ extract_path = self.extraction_path or get_default_cache() - target_path = os.path.join(extract_path, archive_name+'-tmp', *names) + target_path = os.path.join(extract_path, archive_name + '-tmp', *names) try: _bypass_ensure_directory(target_path) - except: + except Exception: self.extraction_error() self._warn_unsafe_extraction_path(extract_path) @@ -1238,11 +1228,13 @@ class ResourceManager: return mode = os.stat(path).st_mode if mode & stat.S_IWOTH or mode & stat.S_IWGRP: - msg = ("%s is writable by group/others and vulnerable to attack " + msg = ( + "%s is writable by group/others and vulnerable to attack " "when " "used with get_resource_filename. Consider a more secure " "location (set with .set_extraction_path or the " - "PYTHON_EGG_CACHE environment variable)." % path) + "PYTHON_EGG_CACHE environment variable)." % path + ) warnings.warn(msg, UserWarning) def postprocess(self, tempname, filename): @@ -1304,49 +1296,18 @@ class ResourceManager: """ # XXX + def get_default_cache(): - """Determine the default cache location - - This returns the ``PYTHON_EGG_CACHE`` environment variable, if set. - Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the - "Application Data" directory. On all other systems, it's "~/.python-eggs". """ - try: - return os.environ['PYTHON_EGG_CACHE'] - except KeyError: - pass + Return the ``PYTHON_EGG_CACHE`` environment variable + or a platform-relevant user cache dir for an app + named "Python-Eggs". + """ + return ( + os.environ.get('PYTHON_EGG_CACHE') + or appdirs.user_cache_dir(appname='Python-Eggs') + ) - if os.name!='nt': - return os.path.expanduser('~/.python-eggs') - - # XXX this may be locale-specific! - app_data = 'Application Data' - app_homes = [ - # best option, should be locale-safe - (('APPDATA',), None), - (('USERPROFILE',), app_data), - (('HOMEDRIVE','HOMEPATH'), app_data), - (('HOMEPATH',), app_data), - (('HOME',), None), - # 95/98/ME - (('WINDIR',), app_data), - ] - - for keys, subdir in app_homes: - dirname = '' - for key in keys: - if key in os.environ: - dirname = os.path.join(dirname, os.environ[key]) - else: - break - else: - if subdir: - dirname = os.path.join(dirname, subdir) - return os.path.join(dirname, 'Python-Eggs') - else: - raise RuntimeError( - "Please set the PYTHON_EGG_CACHE enviroment variable" - ) def safe_name(name): """Convert an arbitrary string to a standard distribution name @@ -1364,7 +1325,7 @@ def safe_version(version): # normalize the version return str(packaging.version.Version(version)) except packaging.version.InvalidVersion: - version = version.replace(' ','.') + version = version.replace(' ', '.') return re.sub('[^A-Za-z0-9.]+', '-', version) @@ -1374,7 +1335,7 @@ def safe_extra(extra): Any runs of non-alphanumeric characters are replaced with a single '_', and the result is always lowercased. """ - return re.sub('[^A-Za-z0-9.]+', '_', extra).lower() + return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() def to_filename(name): @@ -1382,205 +1343,37 @@ def to_filename(name): Any '-' characters are currently replaced with '_'. """ - return name.replace('-','_') + return name.replace('-', '_') -class MarkerEvaluation(object): - values = { - 'os_name': lambda: os.name, - 'sys_platform': lambda: sys.platform, - 'python_full_version': platform.python_version, - 'python_version': lambda: platform.python_version()[:3], - 'platform_version': platform.version, - 'platform_machine': platform.machine, - 'platform_python_implementation': platform.python_implementation, - 'python_implementation': platform.python_implementation, - } +def invalid_marker(text): + """ + Validate text as a PEP 508 environment marker; return an exception + if invalid or False otherwise. + """ + try: + evaluate_marker(text) + except SyntaxError as e: + e.filename = None + e.lineno = None + return e + return False - @classmethod - def is_invalid_marker(cls, text): - """ - Validate text as a PEP 426 environment marker; return an exception - if invalid or False otherwise. - """ - try: - cls.evaluate_marker(text) - except SyntaxError as e: - return cls.normalize_exception(e) - return False - @staticmethod - def normalize_exception(exc): - """ - Given a SyntaxError from a marker evaluation, normalize the error - message: - - Remove indications of filename and line number. - - Replace platform-specific error messages with standard error - messages. - """ - subs = { - 'unexpected EOF while parsing': 'invalid syntax', - 'parenthesis is never closed': 'invalid syntax', - } - exc.filename = None - exc.lineno = None - exc.msg = subs.get(exc.msg, exc.msg) - return exc +def evaluate_marker(text, extra=None): + """ + Evaluate a PEP 508 environment marker. + Return a boolean indicating the marker result in this environment. + Raise SyntaxError if marker is invalid. - @classmethod - def and_test(cls, nodelist): - # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! - items = [ - cls.interpret(nodelist[i]) - for i in range(1, len(nodelist), 2) - ] - return functools.reduce(operator.and_, items) + This implementation uses the 'pyparsing' module. + """ + try: + marker = packaging.markers.Marker(text) + return marker.evaluate() + except packaging.markers.InvalidMarker as e: + raise SyntaxError(e) - @classmethod - def test(cls, nodelist): - # MUST NOT short-circuit evaluation, or invalid syntax can be skipped! - items = [ - cls.interpret(nodelist[i]) - for i in range(1, len(nodelist), 2) - ] - return functools.reduce(operator.or_, items) - - @classmethod - def atom(cls, nodelist): - t = nodelist[1][0] - if t == token.LPAR: - if nodelist[2][0] == token.RPAR: - raise SyntaxError("Empty parentheses") - return cls.interpret(nodelist[2]) - msg = "Language feature not supported in environment markers" - raise SyntaxError(msg) - - @classmethod - def comparison(cls, nodelist): - if len(nodelist) > 4: - msg = "Chained comparison not allowed in environment markers" - raise SyntaxError(msg) - comp = nodelist[2][1] - cop = comp[1] - if comp[0] == token.NAME: - if len(nodelist[2]) == 3: - if cop == 'not': - cop = 'not in' - else: - cop = 'is not' - try: - cop = cls.get_op(cop) - except KeyError: - msg = repr(cop) + " operator not allowed in environment markers" - raise SyntaxError(msg) - return cop(cls.evaluate(nodelist[1]), cls.evaluate(nodelist[3])) - - @classmethod - def get_op(cls, op): - ops = { - symbol.test: cls.test, - symbol.and_test: cls.and_test, - symbol.atom: cls.atom, - symbol.comparison: cls.comparison, - 'not in': lambda x, y: x not in y, - 'in': lambda x, y: x in y, - '==': operator.eq, - '!=': operator.ne, - '<': operator.lt, - '>': operator.gt, - '<=': operator.le, - '>=': operator.ge, - } - if hasattr(symbol, 'or_test'): - ops[symbol.or_test] = cls.test - return ops[op] - - @classmethod - def evaluate_marker(cls, text, extra=None): - """ - Evaluate a PEP 426 environment marker on CPython 2.4+. - Return a boolean indicating the marker result in this environment. - Raise SyntaxError if marker is invalid. - - This implementation uses the 'parser' module, which is not implemented - on - Jython and has been superseded by the 'ast' module in Python 2.6 and - later. - """ - return cls.interpret(parser.expr(text).totuple(1)[1]) - - @staticmethod - def _translate_metadata2(env): - """ - Markerlib implements Metadata 1.2 (PEP 345) environment markers. - Translate the variables to Metadata 2.0 (PEP 426). - """ - return dict( - (key.replace('.', '_'), value) - for key, value in env.items() - ) - - @classmethod - def _markerlib_evaluate(cls, text): - """ - Evaluate a PEP 426 environment marker using markerlib. - Return a boolean indicating the marker result in this environment. - Raise SyntaxError if marker is invalid. - """ - import _markerlib - - env = cls._translate_metadata2(_markerlib.default_environment()) - try: - result = _markerlib.interpret(text, env) - except NameError as e: - raise SyntaxError(e.args[0]) - return result - - if 'parser' not in globals(): - # Fall back to less-complete _markerlib implementation if 'parser' module - # is not available. - evaluate_marker = _markerlib_evaluate - - @classmethod - def interpret(cls, nodelist): - while len(nodelist)==2: nodelist = nodelist[1] - try: - op = cls.get_op(nodelist[0]) - except KeyError: - raise SyntaxError("Comparison or logical expression expected") - return op(nodelist) - - @classmethod - def evaluate(cls, nodelist): - while len(nodelist)==2: nodelist = nodelist[1] - kind = nodelist[0] - name = nodelist[1] - if kind==token.NAME: - try: - op = cls.values[name] - except KeyError: - raise SyntaxError("Unknown name %r" % name) - return op() - if kind==token.STRING: - s = nodelist[1] - if not cls._safe_string(s): - raise SyntaxError( - "Only plain strings allowed in environment markers") - return s[1:-1] - msg = "Language feature not supported in environment markers" - raise SyntaxError(msg) - - @staticmethod - def _safe_string(cand): - return ( - cand[:1] in "'\"" and - not cand.startswith('"""') and - not cand.startswith("'''") and - '\\' not in cand - ) - -invalid_marker = MarkerEvaluation.is_invalid_marker -evaluate_marker = MarkerEvaluation.evaluate_marker class NullProvider: """Try to implement resources and metadata for arbitrary PEP 302 loaders""" @@ -1608,16 +1401,11 @@ class NullProvider: def has_metadata(self, name): return self.egg_info and self._has(self._fn(self.egg_info, name)) - if sys.version_info <= (3,): - def get_metadata(self, name): - if not self.egg_info: - return "" - return self._get(self._fn(self.egg_info, name)) - else: - def get_metadata(self, name): - if not self.egg_info: - return "" - return self._get(self._fn(self.egg_info, name)).decode("utf-8") + def get_metadata(self, name): + if not self.egg_info: + return "" + value = self._get(self._fn(self.egg_info, name)) + return value.decode('utf-8') if six.PY3 else value def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) @@ -1637,9 +1425,12 @@ class NullProvider: return [] def run_script(self, script_name, namespace): - script = 'scripts/'+script_name + script = 'scripts/' + script_name if not self.has_metadata(script): - raise ResolutionError("No script named %r" % script_name) + raise ResolutionError( + "Script {script!r} not found in metadata at {self.egg_info!r}" + .format(**locals()), + ) script_text = self.get_metadata(script).replace('\r\n', '\n') script_text = script_text.replace('\r', '\n') script_filename = self._fn(self.egg_info, script) @@ -1653,7 +1444,7 @@ class NullProvider: cache[script_filename] = ( len(script_text), 0, script_text.split('\n'), script_filename ) - script_code = compile(script_text, script_filename,'exec') + script_code = compile(script_text, script_filename, 'exec') exec(script_code, namespace, namespace) def _has(self, path): @@ -1683,6 +1474,7 @@ class NullProvider: "Can't perform this operation for loaders without 'get_data()'" ) + register_loader_type(object, NullProvider) @@ -1698,8 +1490,8 @@ class EggProvider(NullProvider): # of multiple eggs; that's why we use module_path instead of .archive path = self.module_path old = None - while path!=old: - if _is_unpacked_egg(path): + while path != old: + if _is_egg_path(path): self.egg_name = os.path.basename(path) self.egg_info = os.path.join(path, 'EGG-INFO') self.egg_root = path @@ -1707,6 +1499,7 @@ class EggProvider(NullProvider): old = path path, base = os.path.split(path) + class DefaultProvider(EggProvider): """Provides access to package resources in the filesystem""" @@ -1728,9 +1521,11 @@ class DefaultProvider(EggProvider): @classmethod def _register(cls): - loader_cls = getattr(importlib_machinery, 'SourceFileLoader', - type(None)) - register_loader_type(loader_cls, cls) + loader_names = 'SourceFileLoader', 'SourcelessFileLoader', + for name in loader_names: + loader_cls = getattr(importlib_machinery, name, type(None)) + register_loader_type(loader_cls, cls) + DefaultProvider._register() @@ -1738,14 +1533,20 @@ DefaultProvider._register() class EmptyProvider(NullProvider): """Provider that returns nothing for all requests""" - _isdir = _has = lambda self, path: False - _get = lambda self, path: '' - _listdir = lambda self, path: [] module_path = None + _isdir = _has = lambda self, path: False + + def _get(self, path): + return '' + + def _listdir(self, path): + return [] + def __init__(self): pass + empty_provider = EmptyProvider() @@ -1763,7 +1564,7 @@ class ZipManifests(dict): Use a platform-specific path separator (os.sep) for the path keys for compatibility with pypy on Windows. """ - with ContextualZipFile(path) as zfile: + with zipfile.ZipFile(path) as zfile: items = ( ( name.replace('/', os.sep), @@ -1796,26 +1597,6 @@ class MemoizedZipManifests(ZipManifests): return self[path].manifest -class ContextualZipFile(zipfile.ZipFile): - """ - Supplement ZipFile class to support context manager for Python 2.6 - """ - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def __new__(cls, *args, **kwargs): - """ - Construct a ZipFile or ContextualZipFile as appropriate - """ - if hasattr(zipfile.ZipFile, '__exit__'): - return zipfile.ZipFile(*args, **kwargs) - return super(ContextualZipFile, cls).__new__(cls) - - class ZipProvider(EggProvider): """Resource support for zips and eggs""" @@ -1824,11 +1605,14 @@ class ZipProvider(EggProvider): def __init__(self, module): EggProvider.__init__(self, module) - self.zip_pre = self.loader.archive+os.sep + self.zip_pre = self.loader.archive + os.sep def _zipinfo_name(self, fspath): # Convert a virtual filename (full path to file) into a zipfile subpath # usable with the zipimport directory cache for our target archive + fspath = fspath.rstrip(os.sep) + if fspath == self.loader.archive: + return '' if fspath.startswith(self.zip_pre): return fspath[len(self.zip_pre):] raise AssertionError( @@ -1838,9 +1622,9 @@ class ZipProvider(EggProvider): def _parts(self, zip_path): # Convert a zipfile subpath into an egg-relative path part list. # pseudo-fs path - fspath = self.zip_pre+zip_path - if fspath.startswith(self.egg_root+os.sep): - return fspath[len(self.egg_root)+1:].split(os.sep) + fspath = self.zip_pre + zip_path + if fspath.startswith(self.egg_root + os.sep): + return fspath[len(self.egg_root) + 1:].split(os.sep) raise AssertionError( "%s is not a subpath of %s" % (fspath, self.egg_root) ) @@ -1895,7 +1679,10 @@ class ZipProvider(EggProvider): if self._is_current(real_path, zip_path): return real_path - outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path)) + outf, tmpnam = _mkstemp( + ".$extract", + dir=os.path.dirname(real_path), + ) os.write(outf, self.loader.get_data(zip_path)) os.close(outf) utime(tmpnam, (timestamp, timestamp)) @@ -1911,7 +1698,7 @@ class ZipProvider(EggProvider): # so proceed. return real_path # Windows, del old file and retry - elif os.name=='nt': + elif os.name == 'nt': unlink(real_path) rename(tmpnam, real_path) return real_path @@ -1931,7 +1718,7 @@ class ZipProvider(EggProvider): if not os.path.isfile(file_path): return False stat = os.stat(file_path) - if stat.st_size!=size or stat.st_mtime!=timestamp: + if stat.st_size != size or stat.st_mtime != timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) @@ -1981,6 +1768,7 @@ class ZipProvider(EggProvider): def _resource_to_zip(self, resource_name): return self._zipinfo_name(self._fn(self.module_path, resource_name)) + register_loader_type(zipimport.zipimporter, ZipProvider) @@ -2000,14 +1788,24 @@ class FileMetadata(EmptyProvider): self.path = path def has_metadata(self, name): - return name=='PKG-INFO' and os.path.isfile(self.path) + return name == 'PKG-INFO' and os.path.isfile(self.path) def get_metadata(self, name): - if name=='PKG-INFO': - with io.open(self.path, encoding='utf-8') as f: - metadata = f.read() - return metadata - raise KeyError("No metadata except PKG-INFO is available") + if name != 'PKG-INFO': + raise KeyError("No metadata except PKG-INFO is available") + + with io.open(self.path, encoding='utf-8', errors="replace") as f: + metadata = f.read() + self._warn_on_replacement(metadata) + return metadata + + def _warn_on_replacement(self, metadata): + # Python 2.7 compat for: replacement_char = '�' + replacement_char = b'\xef\xbf\xbd'.decode('utf-8') + if replacement_char in metadata: + tmpl = "{self.path} could not be properly decoded in UTF-8" + msg = tmpl.format(**locals()) + warnings.warn(msg) def get_metadata_lines(self, name): return yield_lines(self.get_metadata(name)) @@ -2044,7 +1842,7 @@ class EggMetadata(ZipProvider): def __init__(self, importer): """Create a metadata provider from a zipimporter""" - self.zip_pre = importer.archive+os.sep + self.zip_pre = importer.archive + os.sep self.loader = importer if importer.prefix: self.module_path = os.path.join(importer.archive, importer.prefix) @@ -2052,7 +1850,9 @@ class EggMetadata(ZipProvider): self.module_path = importer.archive self._setup_prefix() -_declare_state('dict', _distribution_finders = {}) + +_declare_state('dict', _distribution_finders={}) + def register_finder(importer_type, distribution_finder): """Register `distribution_finder` to find distributions in sys.path items @@ -2070,6 +1870,7 @@ def find_distributions(path_item, only=False): finder = _find_adapter(_distribution_finders, importer) return finder(importer, path_item, only) + def find_eggs_in_zip(importer, path_item, only=False): """ Find eggs in zip files; possibly multiple nested eggs. @@ -2085,57 +1886,181 @@ def find_eggs_in_zip(importer, path_item, only=False): # don't yield nested distros return for subitem in metadata.resource_listdir('/'): - if _is_unpacked_egg(subitem): + if _is_egg_path(subitem): subpath = os.path.join(path_item, subitem) - for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath): + dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) + for dist in dists: yield dist + elif subitem.lower().endswith('.dist-info'): + subpath = os.path.join(path_item, subitem) + submeta = EggMetadata(zipimport.zipimporter(subpath)) + submeta.egg_info = subpath + yield Distribution.from_location(path_item, subitem, submeta) + register_finder(zipimport.zipimporter, find_eggs_in_zip) + def find_nothing(importer, path_item, only=False): return () + + register_finder(object, find_nothing) + +def _by_version_descending(names): + """ + Given a list of filenames, return them in descending order + by version number. + + >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' + >>> _by_version_descending(names) + ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] + >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' + >>> _by_version_descending(names) + ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] + >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' + >>> _by_version_descending(names) + ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] + """ + def _by_version(name): + """ + Parse each component of the filename + """ + name, ext = os.path.splitext(name) + parts = itertools.chain(name.split('-'), [ext]) + return [packaging.version.parse(part) for part in parts] + + return sorted(names, key=_by_version, reverse=True) + + def find_on_path(importer, path_item, only=False): """Yield distributions accessible on a sys.path directory""" path_item = _normalize_cached(path_item) - if os.path.isdir(path_item) and os.access(path_item, os.R_OK): - if _is_unpacked_egg(path_item): - yield Distribution.from_filename( - path_item, metadata=PathMetadata( - path_item, os.path.join(path_item,'EGG-INFO') - ) + if _is_unpacked_egg(path_item): + yield Distribution.from_filename( + path_item, metadata=PathMetadata( + path_item, os.path.join(path_item, 'EGG-INFO') ) - else: - # scan for .egg and .egg-info in directory - for entry in os.listdir(path_item): - lower = entry.lower() - if lower.endswith('.egg-info') or lower.endswith('.dist-info'): - fullpath = os.path.join(path_item, entry) - if os.path.isdir(fullpath): - # egg-info directory, allow getting metadata - metadata = PathMetadata(path_item, fullpath) - else: - metadata = FileMetadata(fullpath) - yield Distribution.from_location( - path_item, entry, metadata, precedence=DEVELOP_DIST - ) - elif not only and _is_unpacked_egg(entry): - dists = find_distributions(os.path.join(path_item, entry)) - for dist in dists: - yield dist - elif not only and lower.endswith('.egg-link'): - with open(os.path.join(path_item, entry)) as entry_file: - entry_lines = entry_file.readlines() - for line in entry_lines: - if not line.strip(): - continue - path = os.path.join(path_item, line.rstrip()) - dists = find_distributions(path) - for item in dists: - yield item - break + ) + return + + entries = safe_listdir(path_item) + + # for performance, before sorting by version, + # screen entries for only those that will yield + # distributions + filtered = ( + entry + for entry in entries + if dist_factory(path_item, entry, only) + ) + + # scan for .egg and .egg-info in directory + path_item_entries = _by_version_descending(filtered) + for entry in path_item_entries: + fullpath = os.path.join(path_item, entry) + factory = dist_factory(path_item, entry, only) + for dist in factory(fullpath): + yield dist + + +def dist_factory(path_item, entry, only): + """ + Return a dist_factory for a path_item and entry + """ + lower = entry.lower() + is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info'))) + return ( + distributions_from_metadata + if is_meta else + find_distributions + if not only and _is_egg_path(entry) else + resolve_egg_link + if not only and lower.endswith('.egg-link') else + NoDists() + ) + + +class NoDists: + """ + >>> bool(NoDists()) + False + + >>> list(NoDists()('anything')) + [] + """ + def __bool__(self): + return False + if six.PY2: + __nonzero__ = __bool__ + + def __call__(self, fullpath): + return iter(()) + + +def safe_listdir(path): + """ + Attempt to list contents of path, but suppress some exceptions. + """ + try: + return os.listdir(path) + except (PermissionError, NotADirectoryError): + pass + except OSError as e: + # Ignore the directory if does not exist, not a directory or + # permission denied + ignorable = ( + e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT) + # Python 2 on Windows needs to be handled this way :( + or getattr(e, "winerror", None) == 267 + ) + if not ignorable: + raise + return () + + +def distributions_from_metadata(path): + root = os.path.dirname(path) + if os.path.isdir(path): + if len(os.listdir(path)) == 0: + # empty metadata dir; skip + return + metadata = PathMetadata(root, path) + else: + metadata = FileMetadata(path) + entry = os.path.basename(path) + yield Distribution.from_location( + root, entry, metadata, precedence=DEVELOP_DIST, + ) + + +def non_empty_lines(path): + """ + Yield non-empty lines from file at path + """ + with open(path) as f: + for line in f: + line = line.strip() + if line: + yield line + + +def resolve_egg_link(path): + """ + Given a path to an .egg-link, resolve distributions + present in the referenced path. + """ + referenced_paths = non_empty_lines(path) + resolved_paths = ( + os.path.join(os.path.dirname(path), ref) + for ref in referenced_paths + ) + dist_groups = map(find_distributions, resolved_paths) + return next(dist_groups, ()) + + register_finder(pkgutil.ImpImporter, find_on_path) if hasattr(importlib_machinery, 'FileFinder'): @@ -2162,13 +2087,19 @@ def register_namespace_handler(importer_type, namespace_handler): """ _namespace_handlers[importer_type] = namespace_handler + def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None - loader = importer.find_module(packageName) + + # capture warnings due to #1111 + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + loader = importer.find_module(packageName) + if loader is None: return None module = sys.modules.get(packageName) @@ -2176,7 +2107,7 @@ def _handle_ns(packageName, path_item): module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) - elif not hasattr(module,'__path__'): + elif not hasattr(module, '__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) @@ -2194,16 +2125,32 @@ def _rebuild_mod_path(orig_path, package_name, module): corresponding to their sys.path order """ sys_path = [_normalize_cached(p) for p in sys.path] - def position_in_sys_path(p): + + def safe_sys_path_index(entry): + """ + Workaround for #520 and #513. + """ + try: + return sys_path.index(entry) + except ValueError: + return float('inf') + + def position_in_sys_path(path): """ Return the ordinal of the path based on its position in sys.path """ - parts = p.split(os.sep) - parts = parts[:-(package_name.count('.') + 1)] - return sys_path.index(_normalize_cached(os.sep.join(parts))) + path_parts = path.split(os.sep) + module_parts = package_name.count('.') + 1 + parts = path_parts[:-module_parts] + return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) - orig_path.sort(key=position_in_sys_path) - module.__path__[:] = [_normalize_cached(p) for p in orig_path] + new_path = sorted(orig_path, key=position_in_sys_path) + new_path = [_normalize_cached(p) for p in new_path] + + if isinstance(module.__path__, list): + module.__path__[:] = new_path + else: + module.__path__ = new_path def declare_namespace(packageName): @@ -2214,9 +2161,10 @@ def declare_namespace(packageName): if packageName in _namespace_packages: return - path, parent = sys.path, None - if '.' in packageName: - parent = '.'.join(packageName.split('.')[:-1]) + path = sys.path + parent, _, _ = packageName.rpartition('.') + + if parent: declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) @@ -2227,8 +2175,8 @@ def declare_namespace(packageName): # Track what packages are namespaces, so when new path items are added, # they can be updated - _namespace_packages.setdefault(parent,[]).append(packageName) - _namespace_packages.setdefault(packageName,[]) + _namespace_packages.setdefault(parent or None, []).append(packageName) + _namespace_packages.setdefault(packageName, []) for path_item in path: # Ensure all the parent's path items are reflected in the child, @@ -2238,29 +2186,32 @@ def declare_namespace(packageName): finally: _imp.release_lock() + def fixup_namespace_packages(path_item, parent=None): """Ensure that previously-declared namespace packages include path_item""" _imp.acquire_lock() try: - for package in _namespace_packages.get(parent,()): + for package in _namespace_packages.get(parent, ()): subpath = _handle_ns(package, path_item) if subpath: fixup_namespace_packages(subpath, package) finally: _imp.release_lock() + def file_ns_handler(importer, path_item, packageName, module): """Compute an ns-package subpath for a filesystem or zipfile importer""" subpath = os.path.join(path_item, packageName.split('.')[-1]) normalized = _normalize_cached(subpath) for item in module.__path__: - if _normalize_cached(item)==normalized: + if _normalize_cached(item) == normalized: break else: # Only return the path if it's not already there return subpath + register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) register_namespace_handler(zipimport.zipimporter, file_ns_handler) @@ -2271,12 +2222,25 @@ if hasattr(importlib_machinery, 'FileFinder'): def null_ns_handler(importer, path_item, packageName, module): return None + register_namespace_handler(object, null_ns_handler) def normalize_path(filename): """Normalize a file/dir name for comparison purposes""" - return os.path.normcase(os.path.realpath(filename)) + return os.path.normcase(os.path.realpath(_cygwin_patch(filename))) + + +def _cygwin_patch(filename): # pragma: nocover + """ + Contrary to POSIX 2008, on Cygwin, getcwd (3) contains + symlink components. Using + os.path.abspath() works around this limitation. A fix in os.getcwd() + would probably better, in Cygwin even more so, except + that this seems to be by design... + """ + return os.path.abspath(filename) if sys.platform == 'cygwin' else filename + def _normalize_cached(filename, _cache={}): try: @@ -2285,14 +2249,24 @@ def _normalize_cached(filename, _cache={}): _cache[filename] = result = normalize_path(filename) return result + +def _is_egg_path(path): + """ + Determine if given path appears to be an egg. + """ + return path.lower().endswith('.egg') + + def _is_unpacked_egg(path): """ Determine if given path appears to be an unpacked egg. """ return ( - path.lower().endswith('.egg') + _is_egg_path(path) and + os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) ) + def _set_parent_ns(packageName): parts = packageName.split('.') name = parts.pop() @@ -2314,18 +2288,7 @@ def yield_lines(strs): for s in yield_lines(ss): yield s -# whitespace and comment -LINE_END = re.compile(r"\s*(#.*)?$").match -# line continuation -CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match -# Distribution or extra -DISTRO = re.compile(r"\s*((\w|[-.])+)").match -# ver. info -VERSION = re.compile(r"\s*(<=?|>=?|===?|!=|~=)\s*((\w|[-.*_!+])+)").match -# comma between items -COMMA = re.compile(r"\s*,").match -OBRACKET = re.compile(r"\s*\[").match -CBRACKET = re.compile(r"\s*\]").match + MODULE = re.compile(r"\w+(\.\w+)*$").match EGG_NAME = re.compile( r""" @@ -2341,7 +2304,7 @@ EGG_NAME = re.compile( ).match -class EntryPoint(object): +class EntryPoint: """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): @@ -2350,7 +2313,7 @@ class EntryPoint(object): self.name = name self.module_name = module_name self.attrs = tuple(attrs) - self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras + self.extras = tuple(extras) self.dist = dist def __str__(self): @@ -2392,8 +2355,14 @@ class EntryPoint(object): def require(self, env=None, installer=None): if self.extras and not self.dist: raise UnknownExtra("Can't require() without a distribution", self) + + # Get the requirements for this entry point with all its extras and + # then resolve them. We have to pass `extras` along when resolving so + # that the working set knows what extras we want. Otherwise, for + # dist-info distributions, the working set will assume that the + # requirements for that extra are purely optional and skip over them. reqs = self.dist.requires(self.extras) - items = working_set.resolve(reqs, env, installer) + items = working_set.resolve(reqs, env, installer, extras=self.extras) list(map(working_set.add, items)) pattern = re.compile( @@ -2444,7 +2413,7 @@ class EntryPoint(object): ep = cls.parse(line, dist) if ep.name in this: raise ValueError("Duplicate entry point", group, ep.name) - this[ep.name]=ep + this[ep.name] = ep return this @classmethod @@ -2481,18 +2450,20 @@ def _version_from_file(lines): Given an iterable of lines from a Metadata file, return the value of the Version field, if present, or None otherwise. """ - is_version_line = lambda line: line.lower().startswith('version:') + def is_version_line(line): + return line.lower().startswith('version:') version_lines = filter(is_version_line, lines) line = next(iter(version_lines), '') _, _, value = line.partition(':') return safe_version(value.strip()) or None -class Distribution(object): +class Distribution: """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' - def __init__(self, location=None, metadata=None, project_name=None, + def __init__( + self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST): self.project_name = safe_name(project_name or 'Unknown') @@ -2506,7 +2477,7 @@ class Distribution(object): @classmethod def from_location(cls, location, basename, metadata=None, **kw): - project_name, version, py_version, platform = [None]*4 + project_name, version, py_version, platform = [None] * 4 basename, ext = os.path.splitext(basename) if ext.lower() in _distributionImpl: cls = _distributionImpl[ext.lower()] @@ -2617,23 +2588,44 @@ class Distribution(object): @property def _dep_map(self): + """ + A map of extra to its list of (direct) requirements + for this distribution, including the null extra. + """ try: return self.__dep_map except AttributeError: - dm = self.__dep_map = {None: []} - for name in 'requires.txt', 'depends.txt': - for extra, reqs in split_sections(self._get_metadata(name)): - if extra: - if ':' in extra: - extra, marker = extra.split(':', 1) - if invalid_marker(marker): - # XXX warn - reqs=[] - elif not evaluate_marker(marker): - reqs=[] - extra = safe_extra(extra) or None - dm.setdefault(extra,[]).extend(parse_requirements(reqs)) - return dm + self.__dep_map = self._filter_extras(self._build_dep_map()) + return self.__dep_map + + @staticmethod + def _filter_extras(dm): + """ + Given a mapping of extras to dependencies, strip off + environment markers and filter out any dependencies + not matching the markers. + """ + for extra in list(filter(None, dm)): + new_extra = extra + reqs = dm.pop(extra) + new_extra, _, marker = extra.partition(':') + fails_marker = marker and ( + invalid_marker(marker) + or not evaluate_marker(marker) + ) + if fails_marker: + reqs = [] + new_extra = safe_extra(new_extra) or None + + dm.setdefault(new_extra, []).extend(reqs) + return dm + + def _build_dep_map(self): + dm = {} + for name in 'requires.txt', 'depends.txt': + for extra, reqs in split_sections(self._get_metadata(name)): + dm.setdefault(extra, []).extend(parse_requirements(reqs)) + return dm def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" @@ -2654,11 +2646,11 @@ class Distribution(object): for line in self.get_metadata_lines(name): yield line - def activate(self, path=None): + def activate(self, path=None, replace=False): """Ensure distribution is importable on `path` (default=sys.path)""" if path is None: path = sys.path - self.insert_on(path, replace=True) + self.insert_on(path, replace=replace) if path is sys.path: fixup_namespace_packages(self.location) for pkg in self._get_metadata('namespace_packages.txt'): @@ -2696,6 +2688,19 @@ class Distribution(object): raise AttributeError(attr) return getattr(self._provider, attr) + def __dir__(self): + return list( + set(super(Distribution, self).__dir__()) + | set( + attr for attr in self._provider.__dir__() + if not attr.startswith('_') + ) + ) + + if not hasattr(object, '__dir__'): + # python 2.7 not supported + del __dir__ + @classmethod def from_filename(cls, filename, metadata=None, **kw): return cls.from_location( @@ -2728,7 +2733,7 @@ class Distribution(object): self._get_metadata('entry_points.txt'), self ) if group is not None: - return ep_map.get(group,{}) + return ep_map.get(group, {}) return ep_map def get_entry_info(self, group, name): @@ -2736,7 +2741,24 @@ class Distribution(object): return self.get_entry_map(group).get(name) def insert_on(self, path, loc=None, replace=False): - """Insert self.location in path before its nearest parent directory""" + """Ensure self.location is on path + + If replace=False (default): + - If location is already in path anywhere, do nothing. + - Else: + - If it's an egg and its parent directory is on path, + insert just ahead of the parent. + - Else: add to the end of path. + If replace=True: + - If location is already on path anywhere (not eggs) + or higher priority than its parent (eggs) + do nothing. + - Else: + - If it's an egg and its parent directory is on path, + insert just ahead of the parent, + removing any lower-priority entries. + - Else: add it to the front of path. + """ loc = loc or self.location if not loc: @@ -2744,13 +2766,21 @@ class Distribution(object): nloc = _normalize_cached(loc) bdir = os.path.dirname(nloc) - npath= [(p and _normalize_cached(p) or p) for p in path] + npath = [(p and _normalize_cached(p) or p) for p in path] for p, item in enumerate(npath): if item == nloc: - break + if replace: + break + else: + # don't modify path (even removing duplicates) if + # found and not replace + return elif item == bdir and self.precedence == EGG_DIST: # if it's an .egg, give it precedence over its directory + # UNLESS it's already been added to sys.path and replace=False + if (not replace) and nloc in npath[p:]: + return if path is sys.path: self.check_version_conflict() path.insert(p, loc) @@ -2768,7 +2798,7 @@ class Distribution(object): # p is the spot where we found or inserted loc; now remove duplicates while True: try: - np = npath.index(nloc, p+1) + np = npath.index(nloc, p + 1) except ValueError: break else: @@ -2808,7 +2838,7 @@ class Distribution(object): return False return True - def clone(self,**kw): + def clone(self, **kw): """Copy this distribution, substituting in any changed keyword args""" names = 'project_name version py_version platform location precedence' for attr in names.split(): @@ -2822,7 +2852,6 @@ class Distribution(object): class EggInfoDistribution(Distribution): - def _reload_version(self): """ Packages installed by distutils (e.g. numpy or scipy), @@ -2842,7 +2871,10 @@ class EggInfoDistribution(Distribution): class DistInfoDistribution(Distribution): - """Wrap an actual or potential sys.path entry w/metadata, .dist-info style""" + """ + Wrap an actual or potential sys.path entry + w/metadata, .dist-info style. + """ PKG_INFO = 'METADATA' EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") @@ -2864,42 +2896,26 @@ class DistInfoDistribution(Distribution): self.__dep_map = self._compute_dependencies() return self.__dep_map - def _preparse_requirement(self, requires_dist): - """Convert 'Foobar (1); baz' to ('Foobar ==1', 'baz') - Split environment marker, add == prefix to version specifiers as - necessary, and remove parenthesis. - """ - parts = requires_dist.split(';', 1) + [''] - distvers = parts[0].strip() - mark = parts[1].strip() - distvers = re.sub(self.EQEQ, r"\1==\2\3", distvers) - distvers = distvers.replace('(', '').replace(')', '') - return (distvers, mark) - def _compute_dependencies(self): """Recompute this distribution's dependencies.""" - from _markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: - distvers, mark = self._preparse_requirement(req) - parsed = next(parse_requirements(distvers)) - parsed.marker_fn = compile_marker(mark) - reqs.append(parsed) + reqs.extend(parse_requirements(req)) def reqs_for_extra(extra): for req in reqs: - if req.marker_fn(override={'extra':extra}): + if not req.marker or req.marker.evaluate({'extra': extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: - extra = safe_extra(extra.strip()) - dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) + s_extra = safe_extra(extra.strip()) + dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm @@ -2908,10 +2924,10 @@ _distributionImpl = { '.egg': Distribution, '.egg-info': EggInfoDistribution, '.dist-info': DistInfoDistribution, - } +} -def issue_warning(*args,**kw): +def issue_warning(*args, **kw): level = 1 g = globals() try: @@ -2937,85 +2953,41 @@ def parse_requirements(strs): # create a steppable iterator, so we can handle \-continuations lines = iter(yield_lines(strs)) - def scan_list(ITEM, TERMINATOR, line, p, groups, item_name): - - items = [] - - while not TERMINATOR(line, p): - if CONTINUE(line, p): - try: - line = next(lines) - p = 0 - except StopIteration: - msg = "\\ must not appear on the last nonblank line" - raise RequirementParseError(msg) - - match = ITEM(line, p) - if not match: - msg = "Expected " + item_name + " in" - raise RequirementParseError(msg, line, "at", line[p:]) - - items.append(match.group(*groups)) - p = match.end() - - match = COMMA(line, p) - if match: - # skip the comma - p = match.end() - elif not TERMINATOR(line, p): - msg = "Expected ',' or end-of-list in" - raise RequirementParseError(msg, line, "at", line[p:]) - - match = TERMINATOR(line, p) - # skip the terminator, if any - if match: - p = match.end() - return line, p, items - for line in lines: - match = DISTRO(line) - if not match: - raise RequirementParseError("Missing distribution spec", line) - project_name = match.group(1) - p = match.end() - extras = [] - - match = OBRACKET(line, p) - if match: - p = match.end() - line, p, extras = scan_list( - DISTRO, CBRACKET, line, p, (1,), "'extra' name" - ) - - line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2), - "version spec") - specs = [(op, val) for op, val in specs] - yield Requirement(project_name, specs, extras) + # Drop comments -- a hash without a space may be in a URL. + if ' #' in line: + line = line[:line.find(' #')] + # If there is a line continuation, drop it, and append the next line. + if line.endswith('\\'): + line = line[:-2].strip() + try: + line += next(lines) + except StopIteration: + return + yield Requirement(line) -class Requirement: - def __init__(self, project_name, specs, extras): +class Requirement(packaging.requirements.Requirement): + def __init__(self, requirement_string): """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" - self.unsafe_name, project_name = project_name, safe_name(project_name) + try: + super(Requirement, self).__init__(requirement_string) + except packaging.requirements.InvalidRequirement as e: + raise RequirementParseError(str(e)) + self.unsafe_name = self.name + project_name = safe_name(self.name) self.project_name, self.key = project_name, project_name.lower() - self.specifier = packaging.specifiers.SpecifierSet( - ",".join(["".join([x, y]) for x, y in specs]) - ) - self.specs = specs - self.extras = tuple(map(safe_extra, extras)) + self.specs = [ + (spec.operator, spec.version) for spec in self.specifier] + self.extras = tuple(map(safe_extra, self.extras)) self.hashCmp = ( self.key, self.specifier, frozenset(self.extras), + str(self.marker) if self.marker else None, ) self.__hash = hash(self.hashCmp) - def __str__(self): - extras = ','.join(self.extras) - if extras: - extras = '[%s]' % extras - return '%s%s%s' % (self.project_name, extras, self.specifier) - def __eq__(self, other): return ( isinstance(other, Requirement) and @@ -3040,7 +3012,8 @@ class Requirement: def __hash__(self): return self.__hash - def __repr__(self): return "Requirement.parse(%r)" % str(self) + def __repr__(self): + return "Requirement.parse(%r)" % str(self) @staticmethod def parse(s): @@ -3048,16 +3021,20 @@ class Requirement: return req -def _get_mro(cls): - """Get an mro for a type or classic class""" - if not isinstance(cls, type): - class cls(cls, object): pass - return cls.__mro__[1:] - return cls.__mro__ +def _always_object(classes): + """ + Ensure object appears in the mro even + for old-style classes. + """ + if object not in classes: + return classes + (object,) + return classes + def _find_adapter(registry, ob): """Return an adapter factory for `ob` from `registry`""" - for t in _get_mro(getattr(ob, '__class__', type(ob))): + types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) + for t in types: if t in registry: return registry[t] @@ -3065,8 +3042,7 @@ def _find_adapter(registry, ob): def ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) + py31compat.makedirs(dirname, exist_ok=True) def _bypass_ensure_directory(path): @@ -3076,7 +3052,10 @@ def _bypass_ensure_directory(path): dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) - mkdir(dirname, 0o755) + try: + mkdir(dirname, 0o755) + except FileExistsError: + pass def split_sections(s): @@ -3104,12 +3083,13 @@ def split_sections(s): # wrap up last segment yield section, content -def _mkstemp(*args,**kw): + +def _mkstemp(*args, **kw): old_open = os.open try: # temporarily bypass sandboxing os.open = os_open - return tempfile.mkstemp(*args,**kw) + return tempfile.mkstemp(*args, **kw) finally: # and then put it back os.open = old_open @@ -3133,9 +3113,11 @@ def _initialize(g=globals()): "Set up global resource manager (deliberately not state-saved)" manager = ResourceManager() g['_manager'] = manager - for name in dir(manager): - if not name.startswith('_'): - g[name] = getattr(manager, name) + g.update( + (name, getattr(manager, name)) + for name in dir(manager) + if not name.startswith('_') + ) @_call_aside @@ -3160,11 +3142,19 @@ def _initialize_master_working_set(): run_script = working_set.run_script # backward compatibility run_main = run_script - # Activate all distributions already on sys.path, and ensure that - # all distributions added to the working set in the future (e.g. by - # calling ``require()``) will get activated as well. - add_activation_listener(lambda dist: dist.activate()) - working_set.entries=[] + # Activate all distributions already on sys.path with replace=False and + # ensure that all distributions added to the working set in the future + # (e.g. by calling ``require()``) will get activated as well, + # with higher priority (replace=True). + tuple( + dist.activate(replace=False) + for dist in working_set + ) + add_activation_listener( + lambda dist: dist.activate(replace=True), + existing=False, + ) + working_set.entries = [] # match order list(map(working_set.add_entry, sys.path)) globals().update(locals()) diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/appdirs.py b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/appdirs.py new file mode 100644 index 0000000..ae67001 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/appdirs.py @@ -0,0 +1,608 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright (c) 2005-2010 ActiveState Software Inc. +# Copyright (c) 2013 Eddy Petrișor + +"""Utilities for determining application-specific dirs. + +See for details and usage. +""" +# Dev Notes: +# - MSDN on where to store app data files: +# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 +# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html +# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html + +__version_info__ = (1, 4, 3) +__version__ = '.'.join(map(str, __version_info__)) + + +import sys +import os + +PY3 = sys.version_info[0] == 3 + +if PY3: + unicode = str + +if sys.platform.startswith('java'): + import platform + os_name = platform.java_ver()[3][0] + if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. + system = 'win32' + elif os_name.startswith('Mac'): # "Mac OS X", etc. + system = 'darwin' + else: # "Linux", "SunOS", "FreeBSD", etc. + # Setting this to "linux2" is not ideal, but only Windows or Mac + # are actually checked for and the rest of the module expects + # *sys.platform* style strings. + system = 'linux2' +else: + system = sys.platform + + + +def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + + for a discussion of issues. + + Typical user data directories are: + Mac OS X: ~/Library/Application Support/ + Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined + Win XP (not roaming): C:\Documents and Settings\\Application Data\\ + Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ + Win 7 (not roaming): C:\Users\\AppData\Local\\ + Win 7 (roaming): C:\Users\\AppData\Roaming\\ + + For Unix, we follow the XDG spec and support $XDG_DATA_HOME. + That means, by default "~/.local/share/". + """ + if system == "win32": + if appauthor is None: + appauthor = appname + const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" + path = os.path.normpath(_get_win_folder(const)) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + elif system == 'darwin': + path = os.path.expanduser('~/Library/Application Support/') + if appname: + path = os.path.join(path, appname) + else: + path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): + r"""Return full path to the user-shared data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "multipath" is an optional parameter only applicable to *nix + which indicates that the entire list of data dirs should be + returned. By default, the first item from XDG_DATA_DIRS is + returned, or '/usr/local/share/', + if XDG_DATA_DIRS is not set + + Typical site data directories are: + Mac OS X: /Library/Application Support/ + Unix: /usr/local/share/ or /usr/share/ + Win XP: C:\Documents and Settings\All Users\Application Data\\ + Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) + Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. + + For Unix, this is using the $XDG_DATA_DIRS[0] default. + + WARNING: Do not use this on Windows. See the Vista-Fail note above for why. + """ + if system == "win32": + if appauthor is None: + appauthor = appname + path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + elif system == 'darwin': + path = os.path.expanduser('/Library/Application Support') + if appname: + path = os.path.join(path, appname) + else: + # XDG default for $XDG_DATA_DIRS + # only first, if multipath is False + path = os.getenv('XDG_DATA_DIRS', + os.pathsep.join(['/usr/local/share', '/usr/share'])) + pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] + if appname: + if version: + appname = os.path.join(appname, version) + pathlist = [os.sep.join([x, appname]) for x in pathlist] + + if multipath: + path = os.pathsep.join(pathlist) + else: + path = pathlist[0] + return path + + if appname and version: + path = os.path.join(path, version) + return path + + +def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific config dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + + for a discussion of issues. + + Typical user config directories are: + Mac OS X: same as user_data_dir + Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined + Win *: same as user_data_dir + + For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. + That means, by default "~/.config/". + """ + if system in ["win32", "darwin"]: + path = user_data_dir(appname, appauthor, None, roaming) + else: + path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): + r"""Return full path to the user-shared data dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "multipath" is an optional parameter only applicable to *nix + which indicates that the entire list of config dirs should be + returned. By default, the first item from XDG_CONFIG_DIRS is + returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set + + Typical site config directories are: + Mac OS X: same as site_data_dir + Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in + $XDG_CONFIG_DIRS + Win *: same as site_data_dir + Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) + + For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False + + WARNING: Do not use this on Windows. See the Vista-Fail note above for why. + """ + if system in ["win32", "darwin"]: + path = site_data_dir(appname, appauthor) + if appname and version: + path = os.path.join(path, version) + else: + # XDG default for $XDG_CONFIG_DIRS + # only first, if multipath is False + path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') + pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] + if appname: + if version: + appname = os.path.join(appname, version) + pathlist = [os.sep.join([x, appname]) for x in pathlist] + + if multipath: + path = os.pathsep.join(pathlist) + else: + path = pathlist[0] + return path + + +def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): + r"""Return full path to the user-specific cache dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "opinion" (boolean) can be False to disable the appending of + "Cache" to the base app data dir for Windows. See + discussion below. + + Typical user cache directories are: + Mac OS X: ~/Library/Caches/ + Unix: ~/.cache/ (XDG default) + Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache + Vista: C:\Users\\AppData\Local\\\Cache + + On Windows the only suggestion in the MSDN docs is that local settings go in + the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming + app data dir (the default returned by `user_data_dir` above). Apps typically + put cache data somewhere *under* the given dir here. Some examples: + ...\Mozilla\Firefox\Profiles\\Cache + ...\Acme\SuperApp\Cache\1.0 + OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. + This can be disabled with the `opinion=False` option. + """ + if system == "win32": + if appauthor is None: + appauthor = appname + path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) + if appname: + if appauthor is not False: + path = os.path.join(path, appauthor, appname) + else: + path = os.path.join(path, appname) + if opinion: + path = os.path.join(path, "Cache") + elif system == 'darwin': + path = os.path.expanduser('~/Library/Caches') + if appname: + path = os.path.join(path, appname) + else: + path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): + r"""Return full path to the user-specific state dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "roaming" (boolean, default False) can be set True to use the Windows + roaming appdata directory. That means that for users on a Windows + network setup for roaming profiles, this user data will be + sync'd on login. See + + for a discussion of issues. + + Typical user state directories are: + Mac OS X: same as user_data_dir + Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined + Win *: same as user_data_dir + + For Unix, we follow this Debian proposal + to extend the XDG spec and support $XDG_STATE_HOME. + + That means, by default "~/.local/state/". + """ + if system in ["win32", "darwin"]: + path = user_data_dir(appname, appauthor, None, roaming) + else: + path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) + if appname: + path = os.path.join(path, appname) + if appname and version: + path = os.path.join(path, version) + return path + + +def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): + r"""Return full path to the user-specific log dir for this application. + + "appname" is the name of application. + If None, just the system directory is returned. + "appauthor" (only used on Windows) is the name of the + appauthor or distributing body for this application. Typically + it is the owning company name. This falls back to appname. You may + pass False to disable it. + "version" is an optional version path element to append to the + path. You might want to use this if you want multiple versions + of your app to be able to run independently. If used, this + would typically be ".". + Only applied when appname is present. + "opinion" (boolean) can be False to disable the appending of + "Logs" to the base app data dir for Windows, and "log" to the + base cache dir for Unix. See discussion below. + + Typical user log directories are: + Mac OS X: ~/Library/Logs/ + Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined + Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs + Vista: C:\Users\\AppData\Local\\\Logs + + On Windows the only suggestion in the MSDN docs is that local settings + go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in + examples of what some windows apps use for a logs dir.) + + OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` + value for Windows and appends "log" to the user cache dir for Unix. + This can be disabled with the `opinion=False` option. + """ + if system == "darwin": + path = os.path.join( + os.path.expanduser('~/Library/Logs'), + appname) + elif system == "win32": + path = user_data_dir(appname, appauthor, version) + version = False + if opinion: + path = os.path.join(path, "Logs") + else: + path = user_cache_dir(appname, appauthor, version) + version = False + if opinion: + path = os.path.join(path, "log") + if appname and version: + path = os.path.join(path, version) + return path + + +class AppDirs(object): + """Convenience wrapper for getting application dirs.""" + def __init__(self, appname=None, appauthor=None, version=None, + roaming=False, multipath=False): + self.appname = appname + self.appauthor = appauthor + self.version = version + self.roaming = roaming + self.multipath = multipath + + @property + def user_data_dir(self): + return user_data_dir(self.appname, self.appauthor, + version=self.version, roaming=self.roaming) + + @property + def site_data_dir(self): + return site_data_dir(self.appname, self.appauthor, + version=self.version, multipath=self.multipath) + + @property + def user_config_dir(self): + return user_config_dir(self.appname, self.appauthor, + version=self.version, roaming=self.roaming) + + @property + def site_config_dir(self): + return site_config_dir(self.appname, self.appauthor, + version=self.version, multipath=self.multipath) + + @property + def user_cache_dir(self): + return user_cache_dir(self.appname, self.appauthor, + version=self.version) + + @property + def user_state_dir(self): + return user_state_dir(self.appname, self.appauthor, + version=self.version) + + @property + def user_log_dir(self): + return user_log_dir(self.appname, self.appauthor, + version=self.version) + + +#---- internal support stuff + +def _get_win_folder_from_registry(csidl_name): + """This is a fallback technique at best. I'm not sure if using the + registry for this guarantees us the correct answer for all CSIDL_* + names. + """ + if PY3: + import winreg as _winreg + else: + import _winreg + + shell_folder_name = { + "CSIDL_APPDATA": "AppData", + "CSIDL_COMMON_APPDATA": "Common AppData", + "CSIDL_LOCAL_APPDATA": "Local AppData", + }[csidl_name] + + key = _winreg.OpenKey( + _winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" + ) + dir, type = _winreg.QueryValueEx(key, shell_folder_name) + return dir + + +def _get_win_folder_with_pywin32(csidl_name): + from win32com.shell import shellcon, shell + dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) + # Try to make this a unicode path because SHGetFolderPath does + # not return unicode strings when there is unicode data in the + # path. + try: + dir = unicode(dir) + + # Downgrade to short path name if have highbit chars. See + # . + has_high_char = False + for c in dir: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + try: + import win32api + dir = win32api.GetShortPathName(dir) + except ImportError: + pass + except UnicodeError: + pass + return dir + + +def _get_win_folder_with_ctypes(csidl_name): + import ctypes + + csidl_const = { + "CSIDL_APPDATA": 26, + "CSIDL_COMMON_APPDATA": 35, + "CSIDL_LOCAL_APPDATA": 28, + }[csidl_name] + + buf = ctypes.create_unicode_buffer(1024) + ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) + + # Downgrade to short path name if have highbit chars. See + # . + has_high_char = False + for c in buf: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + buf2 = ctypes.create_unicode_buffer(1024) + if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): + buf = buf2 + + return buf.value + +def _get_win_folder_with_jna(csidl_name): + import array + from com.sun import jna + from com.sun.jna.platform import win32 + + buf_size = win32.WinDef.MAX_PATH * 2 + buf = array.zeros('c', buf_size) + shell = win32.Shell32.INSTANCE + shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) + dir = jna.Native.toString(buf.tostring()).rstrip("\0") + + # Downgrade to short path name if have highbit chars. See + # . + has_high_char = False + for c in dir: + if ord(c) > 255: + has_high_char = True + break + if has_high_char: + buf = array.zeros('c', buf_size) + kernel = win32.Kernel32.INSTANCE + if kernel.GetShortPathName(dir, buf, buf_size): + dir = jna.Native.toString(buf.tostring()).rstrip("\0") + + return dir + +if system == "win32": + try: + import win32com.shell + _get_win_folder = _get_win_folder_with_pywin32 + except ImportError: + try: + from ctypes import windll + _get_win_folder = _get_win_folder_with_ctypes + except ImportError: + try: + import com.sun.jna + _get_win_folder = _get_win_folder_with_jna + except ImportError: + _get_win_folder = _get_win_folder_from_registry + + +#---- self test code + +if __name__ == "__main__": + appname = "MyApp" + appauthor = "MyCompany" + + props = ("user_data_dir", + "user_config_dir", + "user_cache_dir", + "user_state_dir", + "user_log_dir", + "site_data_dir", + "site_config_dir") + + print("-- app dirs %s --" % __version__) + + print("-- app dirs (with optional 'version')") + dirs = AppDirs(appname, appauthor, version="1.0") + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (without optional 'version')") + dirs = AppDirs(appname, appauthor) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (without optional 'appauthor')") + dirs = AppDirs(appname) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) + + print("\n-- app dirs (with disabled 'appauthor')") + dirs = AppDirs(appname, appauthor=False) + for prop in props: + print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/__about__.py b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/__about__.py index eadb794..95d330e 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/__about__.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/__about__.py @@ -1,16 +1,6 @@ -# Copyright 2014 Donald Stufft -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. from __future__ import absolute_import, division, print_function __all__ = [ @@ -22,10 +12,10 @@ __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "15.3" +__version__ = "16.8" -__author__ = "Donald Stufft" +__author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" -__license__ = "Apache License, Version 2.0" -__copyright__ = "Copyright 2014 %s" % __author__ +__license__ = "BSD or Apache License, Version 2.0" +__copyright__ = "Copyright 2014-2016 %s" % __author__ diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/__init__.py b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/__init__.py index c39a8ea..5ee6220 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/__init__.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/__init__.py @@ -1,16 +1,6 @@ -# Copyright 2014 Donald Stufft -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. from __future__ import absolute_import, division, print_function from .__about__ import ( diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_compat.py b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_compat.py index 5c396ce..210bb80 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_compat.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_compat.py @@ -1,16 +1,6 @@ -# Copyright 2014 Donald Stufft -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. from __future__ import absolute_import, division, print_function import sys diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_structures.py b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_structures.py index 0ae9bb5..ccc2786 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_structures.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_structures.py @@ -1,16 +1,6 @@ -# Copyright 2014 Donald Stufft -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. from __future__ import absolute_import, division, print_function diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/markers.py b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/markers.py index 9e90601..892e578 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/markers.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/markers.py @@ -52,13 +52,26 @@ class Node(object): def __repr__(self): return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) + def serialize(self): + raise NotImplementedError + class Variable(Node): - pass + + def serialize(self): + return str(self) class Value(Node): - pass + + def serialize(self): + return '"{0}"'.format(self) + + +class Op(Node): + + def serialize(self): + return str(self) VARIABLE = ( @@ -73,9 +86,23 @@ VARIABLE = ( L("python_version") | L("sys_platform") | L("os_name") | + L("os.name") | # PEP-345 + L("sys.platform") | # PEP-345 + L("platform.version") | # PEP-345 + L("platform.machine") | # PEP-345 + L("platform.python_implementation") | # PEP-345 + L("python_implementation") | # undocumented setuptools legacy L("extra") ) -VARIABLE.setParseAction(lambda s, l, t: Variable(t[0])) +ALIASES = { + 'os.name': 'os_name', + 'sys.platform': 'sys_platform', + 'platform.version': 'platform_version', + 'platform.machine': 'platform_machine', + 'platform.python_implementation': 'platform_python_implementation', + 'python_implementation': 'platform_python_implementation' +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) VERSION_CMP = ( L("===") | @@ -89,6 +116,7 @@ VERSION_CMP = ( ) MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) MARKER_VALUE = QuotedString("'") | QuotedString('"') MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) @@ -135,7 +163,7 @@ def _format_marker(marker, first=True): else: return "(" + " ".join(inner) + ")" elif isinstance(marker, tuple): - return '{0} {1} "{2}"'.format(*marker) + return " ".join([m.serialize() for m in marker]) else: return marker @@ -154,13 +182,13 @@ _operators = { def _eval_op(lhs, op, rhs): try: - spec = Specifier("".join([op, rhs])) + spec = Specifier("".join([op.serialize(), rhs])) except InvalidSpecifier: pass else: return spec.contains(lhs) - oper = _operators.get(op) + oper = _operators.get(op.serialize()) if oper is None: raise UndefinedComparison( "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/specifiers.py index 891664f..7f5a76c 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/specifiers.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/specifiers.py @@ -1,16 +1,6 @@ -# Copyright 2014 Donald Stufft -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. from __future__ import absolute_import, division, print_function import abc @@ -204,8 +194,8 @@ class _IndividualSpecifier(BaseSpecifier): # If our version is a prerelease, and we were not set to allow # prereleases, then we'll store it for later incase nothing # else matches this specifier. - if (parsed_version.is_prerelease - and not (prereleases or self.prereleases)): + if (parsed_version.is_prerelease and not + (prereleases or self.prereleases)): found_prereleases.append(version) # Either this is not a prerelease, or we should have been # accepting prereleases from the begining. @@ -223,23 +213,23 @@ class _IndividualSpecifier(BaseSpecifier): class LegacySpecifier(_IndividualSpecifier): - _regex = re.compile( + _regex_str = ( r""" - ^ - \s* (?P(==|!=|<=|>=|<|>)) \s* (?P - [^\s]* # We just match everything, except for whitespace since this - # is a "legacy" specifier and the version string can be just - # about anything. + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. ) - \s* - $ - """, - re.VERBOSE | re.IGNORECASE, + """ ) + _regex = re.compile( + r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + _operators = { "==": "equal", "!=": "not_equal", @@ -284,10 +274,8 @@ def _require_version_compare(fn): class Specifier(_IndividualSpecifier): - _regex = re.compile( + _regex_str = ( r""" - ^ - \s* (?P(~=|==|!=|<=|>=|<|>|===)) (?P (?: @@ -378,12 +366,12 @@ class Specifier(_IndividualSpecifier): (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) ) - \s* - $ - """, - re.VERBOSE | re.IGNORECASE, + """ ) + _regex = re.compile( + r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + _operators = { "~=": "compatible", "==": "equal", @@ -409,8 +397,8 @@ class Specifier(_IndividualSpecifier): prefix = ".".join( list( itertools.takewhile( - lambda x: (not x.startswith("post") - and not x.startswith("dev")), + lambda x: (not x.startswith("post") and not + x.startswith("dev")), _version_split(spec), ) )[:-1] @@ -419,13 +407,15 @@ class Specifier(_IndividualSpecifier): # Add the prefix notation to the end of our string prefix += ".*" - return (self._get_operator(">=")(prospective, spec) - and self._get_operator("==")(prospective, prefix)) + return (self._get_operator(">=")(prospective, spec) and + self._get_operator("==")(prospective, prefix)) @_require_version_compare def _compare_equal(self, prospective, spec): # We need special logic to handle prefix matching if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) # Split the spec out by dots, and pretend that there is an implicit # dot in between a release segment and a pre-release segment. spec = _version_split(spec[:-2]) # Remove the trailing .* @@ -577,8 +567,8 @@ def _pad_version(left, right): right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) # Get the rest of our versions - left_split.append(left[len(left_split):]) - right_split.append(left[len(right_split):]) + left_split.append(left[len(left_split[0]):]) + right_split.append(right[len(right_split[0]):]) # Insert our padding left_split.insert( diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/version.py b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/version.py index 4ba574b..83b5ee8 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/version.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/version.py @@ -1,16 +1,6 @@ -# Copyright 2014 Donald Stufft -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. from __future__ import absolute_import, division, print_function import collections diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/pyparsing.py b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/pyparsing.py index 3e02dbe..cf75e1e 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/pyparsing.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/_vendor/pyparsing.py @@ -1,6 +1,6 @@ # module pyparsing.py # -# Copyright (c) 2003-2015 Paul T. McGuire +# Copyright (c) 2003-2018 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -25,21 +25,25 @@ __doc__ = \ """ pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= The pyparsing module is an alternative approach to creating and executing simple grammars, vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you don't need to learn a new syntax for defining grammars or matching expressions - the parsing module provides a library of classes that you use to construct the grammar directly in Python. -Here is a program to parse "Hello, World!" (or any greeting of the form C{", !"}):: +Here is a program to parse "Hello, World!" (or any greeting of the form +C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements +(L{'+'} operator gives L{And} expressions, strings are auto-converted to +L{Literal} expressions):: from pyparsing import Word, alphas # define grammar of a greeting - greet = Word( alphas ) + "," + Word( alphas ) + "!" + greet = Word(alphas) + "," + Word(alphas) + "!" hello = "Hello, World!" - print (hello, "->", greet.parseString( hello )) + print (hello, "->", greet.parseString(hello)) The program outputs the following:: @@ -48,17 +52,30 @@ The program outputs the following:: The Python representation of the grammar is quite readable, owing to the self-explanatory class names, and the use of '+', '|' and '^' operators. -The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an +The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an object with named attributes. The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - quoted strings - embedded comments + + +Getting Started - +----------------- +Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes + - construct character word-group expressions using the L{Word} class + - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes + - use L{'+'}, L{'|'}, L{'^'}, and L{'&'} operators to combine simple expressions into more complex ones + - associate names with your parsed results using L{ParserElement.setResultsName} + - find some helpful expression short-cuts like L{delimitedList} and L{oneOf} + - find more useful common expressions in the L{pyparsing_common} namespace class """ -__version__ = "2.0.6" -__versionTime__ = "9 Nov 2015 19:03" +__version__ = "2.2.1" +__versionTime__ = "18 Sep 2018 00:49 UTC" __author__ = "Paul McGuire " import string @@ -70,8 +87,31 @@ import re import sre_constants import collections import pprint -import functools -import itertools +import traceback +import types +from datetime import datetime + +try: + from _thread import RLock +except ImportError: + from threading import RLock + +try: + # Python 3 + from collections.abc import Iterable + from collections.abc import MutableMapping +except ImportError: + # Python 2.7 + from collections import Iterable + from collections import MutableMapping + +try: + from collections import OrderedDict as _OrderedDict +except ImportError: + try: + from ordereddict import OrderedDict as _OrderedDict + except ImportError: + _OrderedDict = None #~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) @@ -81,21 +121,23 @@ __all__ = [ 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase', +'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', -'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno', +'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', +'CloseMatch', 'tokenMap', 'pyparsing_common', ] -PY_3 = sys.version.startswith('3') +system_version = tuple(sys.version_info)[:3] +PY_3 = system_version[0] == 3 if PY_3: _MAX_INT = sys.maxsize basestring = str @@ -123,18 +165,11 @@ else: return str(obj) except UnicodeEncodeError: - # The Python docs (http://docs.python.org/ref/customization.html#l2h-182) - # state that "The return value must be a string object". However, does a - # unicode object (being a subclass of basestring) count as a "string - # object"? - # If so, then return a unicode object: - return unicode(obj) - # Else encode it... but how? There are many choices... :) - # Replace unprintables with escape codes? - #return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors') - # Replace unprintables with question marks? - #return unicode(obj).encode(sys.getdefaultencoding(), 'replace') - # ... + # Else encode it + ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') + xmlcharref = Regex(r'&#\d+;') + xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) + return xmlcharref.transformString(ret) # build list of single arg builtins, tolerant of Python version, that can be used as parse actions singleArgBuiltins = [] @@ -160,7 +195,7 @@ def _xml_escape(data): class _Constants(object): pass -alphas = string.ascii_lowercase + string.ascii_uppercase +alphas = string.ascii_uppercase + string.ascii_lowercase nums = "0123456789" hexnums = nums + "ABCDEFabcdef" alphanums = alphas + nums @@ -180,6 +215,15 @@ class ParseBaseException(Exception): self.msg = msg self.pstr = pstr self.parserElement = elem + self.args = (pstr, loc, msg) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) def __getattr__( self, aname ): """supported attributes by name are: @@ -212,15 +256,26 @@ class ParseBaseException(Exception): markerString, line_str[line_column:])) return line_str.strip() def __dir__(self): - return "loc msg pstr parserElement lineno col line " \ - "markInputline __str__ __repr__".split() + return "lineno col line".split() + dir(type(self)) class ParseException(ParseBaseException): - """exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text + """ + Exception thrown when parse expressions don't match class; + supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + + Example:: + try: + Word(nums).setName("integer").parseString("ABC") + except ParseException as pe: + print(pe) + print("column: {}".format(pe.col)) + + prints:: + Expected integer (at char 0), (line:1, col:1) + column: 1 """ pass @@ -230,12 +285,10 @@ class ParseFatalException(ParseBaseException): pass class ParseSyntaxException(ParseFatalException): - """just like C{L{ParseFatalException}}, but thrown internally when an - C{L{ErrorStop}} ('-' operator) indicates that parsing is to stop immediately because - an unbacktrackable syntax error has been found""" - def __init__(self, pe): - super(ParseSyntaxException, self).__init__( - pe.pstr, pe.loc, pe.msg, pe.parserElement) + """just like L{ParseFatalException}, but thrown internally when an + L{ErrorStop} ('-' operator) indicates that parsing is to stop + immediately because an unbacktrackable syntax error has been found""" + pass #~ class ReparseException(ParseBaseException): #~ """Experimental class - parse actions can raise this exception to cause @@ -251,7 +304,7 @@ class ParseSyntaxException(ParseFatalException): #~ self.reparseLoc = restartLoc class RecursiveGrammarException(Exception): - """exception thrown by C{validate()} if the grammar could be improperly recursive""" + """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive""" def __init__( self, parseElementList ): self.parseElementTrace = parseElementList @@ -264,17 +317,50 @@ class _ParseResultsWithOffset(object): def __getitem__(self,i): return self.tup[i] def __repr__(self): - return repr(self.tup) + return repr(self.tup[0]) def setOffset(self,i): self.tup = (self.tup[0],i) class ParseResults(object): - """Structured parse results, to provide multiple means of access to the parsed data: + """ + Structured parse results, to provide multiple means of access to the parsed data: - as a list (C{len(results)}) - by list index (C{results[0], results[1]}, etc.) - - by attribute (C{results.}) - """ - def __new__(cls, toklist, name=None, asList=True, modal=True ): + - by attribute (C{results.} - see L{ParserElement.setResultsName}) + + Example:: + integer = Word(nums) + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + # equivalent form: + # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + # parseString returns a ParseResults object + result = date_str.parseString("1999/12/31") + + def test(s, fn=repr): + print("%s -> %s" % (s, fn(eval(s)))) + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + prints:: + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: 31 + - month: 12 + - year: 1999 + """ + def __new__(cls, toklist=None, name=None, asList=True, modal=True ): if isinstance(toklist, cls): return toklist retobj = object.__new__(cls) @@ -283,12 +369,16 @@ class ParseResults(object): # Performance tuning: we construct a *lot* of these, so keep this # constructor as small and fast as possible - def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ): + def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): if self.__doinit: self.__doinit = False self.__name = None self.__parent = None self.__accumNames = {} + self.__asList = asList + self.__modal = modal + if toklist is None: + toklist = [] if isinstance(toklist, list): self.__toklist = toklist[:] elif isinstance(toklist, _generatorType): @@ -331,7 +421,7 @@ class ParseResults(object): if isinstance(v,_ParseResultsWithOffset): self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] sub = v[0] - elif isinstance(k,int): + elif isinstance(k,(int,slice)): self.__toklist[k] = v sub = v else: @@ -354,11 +444,6 @@ class ParseResults(object): removed = list(range(*i.indices(mylen))) removed.reverse() # fixup indices in token dictionary - #~ for name in self.__tokdict: - #~ occurrences = self.__tokdict[name] - #~ for j in removed: - #~ for k, (value, position) in enumerate(occurrences): - #~ occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) for name,occurrences in self.__tokdict.items(): for j in removed: for k, (value, position) in enumerate(occurrences): @@ -370,39 +455,52 @@ class ParseResults(object): return k in self.__tokdict def __len__( self ): return len( self.__toklist ) - def __bool__(self): return len( self.__toklist ) > 0 + def __bool__(self): return ( not not self.__toklist ) __nonzero__ = __bool__ def __iter__( self ): return iter( self.__toklist ) def __reversed__( self ): return iter( self.__toklist[::-1] ) - def iterkeys( self ): - """Returns all named result keys.""" + def _iterkeys( self ): if hasattr(self.__tokdict, "iterkeys"): return self.__tokdict.iterkeys() else: return iter(self.__tokdict) - def itervalues( self ): - """Returns all named result values.""" - return (self[k] for k in self.iterkeys()) + def _itervalues( self ): + return (self[k] for k in self._iterkeys()) - def iteritems( self ): - return ((k, self[k]) for k in self.iterkeys()) + def _iteritems( self ): + return ((k, self[k]) for k in self._iterkeys()) if PY_3: - keys = iterkeys - values = itervalues - items = iteritems + keys = _iterkeys + """Returns an iterator of all named result keys (Python 3.x only).""" + + values = _itervalues + """Returns an iterator of all named result values (Python 3.x only).""" + + items = _iteritems + """Returns an iterator of all named result key-value tuples (Python 3.x only).""" + else: + iterkeys = _iterkeys + """Returns an iterator of all named result keys (Python 2.x only).""" + + itervalues = _itervalues + """Returns an iterator of all named result values (Python 2.x only).""" + + iteritems = _iteritems + """Returns an iterator of all named result key-value tuples (Python 2.x only).""" + def keys( self ): - """Returns all named result keys.""" + """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.iterkeys()) def values( self ): - """Returns all named result values.""" + """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" return list(self.itervalues()) def items( self ): - """Returns all named result keys and values as a list of tuples.""" + """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" return list(self.iteritems()) def haskeys( self ): @@ -411,14 +509,39 @@ class ParseResults(object): return bool(self.__tokdict) def pop( self, *args, **kwargs): - """Removes and returns item at specified index (default=last). - Supports both list and dict semantics for pop(). If passed no - argument or an integer argument, it will use list semantics - and pop tokens from the list of parsed tokens. If passed a - non-integer argument (most likely a string), it will use dict - semantics and pop the corresponding value from any defined - results names. A second default return value argument is - supported, just as in dict.pop().""" + """ + Removes and returns item at specified index (default=C{last}). + Supports both C{list} and C{dict} semantics for C{pop()}. If passed no + argument or an integer argument, it will use C{list} semantics + and pop tokens from the list of parsed tokens. If passed a + non-integer argument (most likely a string), it will use C{dict} + semantics and pop the corresponding value from any defined + results names. A second default return value argument is + supported, just as in C{dict.pop()}. + + Example:: + def remove_first(tokens): + tokens.pop(0) + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + OneOrMore(Word(nums)) + print(patt.parseString("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.addParseAction(remove_LABEL) + print(patt.parseString("AAB 123 321").dump()) + prints:: + ['AAB', '123', '321'] + - LABEL: AAB + + ['AAB', '123', '321'] + """ if not args: args = [-1] for k,v in kwargs.items(): @@ -438,39 +561,83 @@ class ParseResults(object): return defaultvalue def get(self, key, defaultValue=None): - """Returns named result matching the given key, or if there is no - such name, then returns the given C{defaultValue} or C{None} if no - C{defaultValue} is specified.""" + """ + Returns named result matching the given key, or if there is no + such name, then returns the given C{defaultValue} or C{None} if no + C{defaultValue} is specified. + + Similar to C{dict.get()}. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ if key in self: return self[key] else: return defaultValue def insert( self, index, insStr ): - """Inserts new element at location index in the list of parsed tokens.""" + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to C{list.insert()}. + + Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] + """ self.__toklist.insert(index, insStr) # fixup indices in token dictionary - #~ for name in self.__tokdict: - #~ occurrences = self.__tokdict[name] - #~ for k, (value, position) in enumerate(occurrences): - #~ occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) for name,occurrences in self.__tokdict.items(): for k, (value, position) in enumerate(occurrences): occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) def append( self, item ): - """Add single element to end of ParseResults list of elements.""" + """ + Add single element to end of ParseResults list of elements. + + Example:: + print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] + """ self.__toklist.append(item) def extend( self, itemseq ): - """Add sequence of elements to end of ParseResults list of elements.""" + """ + Add sequence of elements to end of ParseResults list of elements. + + Example:: + patt = OneOrMore(Word(alphas)) + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ if isinstance(itemseq, ParseResults): self += itemseq else: self.__toklist.extend(itemseq) def clear( self ): - """Clear all elements and results names.""" + """ + Clear all elements and results names. + """ del self.__toklist[:] self.__tokdict.clear() @@ -511,7 +678,11 @@ class ParseResults(object): def __radd__(self, other): if isinstance(other,int) and other == 0: + # useful for merging many ParseResults using sum() builtin return self.copy() + else: + # this may raise a TypeError - so be it + return other + self def __repr__( self ): return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) @@ -531,18 +702,60 @@ class ParseResults(object): return out def asList( self ): - """Returns the parse results as a nested list of matching tokens, all converted to strings.""" + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + patt = OneOrMore(Word(alphas)) + result = patt.parseString("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] + + # Use asList() to create an actual list + result_list = result.asList() + print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] + """ return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] def asDict( self ): - """Returns the named parse results as dictionary.""" + """ + Returns the named parse results as a nested dictionary. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.asDict() + print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ if PY_3: - return dict( self.items() ) + item_fn = self.items else: - return dict( self.iteritems() ) + item_fn = self.iteritems + + def toItem(obj): + if isinstance(obj, ParseResults): + if obj.haskeys(): + return obj.asDict() + else: + return [toItem(v) for v in obj] + else: + return obj + + return dict((k,toItem(v)) for k,v in item_fn()) def copy( self ): - """Returns a new copy of a C{ParseResults} object.""" + """ + Returns a new copy of a C{ParseResults} object. + """ ret = ParseResults( self.__toklist ) ret.__tokdict = self.__tokdict.copy() ret.__parent = self.__parent @@ -551,7 +764,9 @@ class ParseResults(object): return ret def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): - """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.""" + """ + (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. + """ nl = "\n" out = [] namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() @@ -617,7 +832,27 @@ class ParseResults(object): return None def getName(self): - """Returns the results name for this token expression.""" + r""" + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = OneOrMore(user_data) + + result = user_info.parseString("22 111-22-3333 #221B") + for item in result: + print(item.getName(), ':', item[0]) + prints:: + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ if self.__name: return self.__name elif self.__parent: @@ -628,45 +863,77 @@ class ParseResults(object): return None elif (len(self) == 1 and len(self.__tokdict) == 1 and - self.__tokdict.values()[0][0][1] in (0,-1)): - return self.__tokdict.keys()[0] + next(iter(self.__tokdict.values()))[0][1] in (0,-1)): + return next(iter(self.__tokdict.keys())) else: return None - def dump(self,indent='',depth=0): - """Diagnostic method for listing out the contents of a C{ParseResults}. - Accepts an optional C{indent} argument so that this string can be embedded - in a nested display of other data.""" + def dump(self, indent='', depth=0, full=True): + """ + Diagnostic method for listing out the contents of a C{ParseResults}. + Accepts an optional C{indent} argument so that this string can be embedded + in a nested display of other data. + + Example:: + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parseString('12/31/1999') + print(result.dump()) + prints:: + ['12', '/', '31', '/', '1999'] + - day: 1999 + - month: 31 + - year: 12 + """ out = [] NL = '\n' out.append( indent+_ustr(self.asList()) ) - if self.haskeys(): - items = sorted(self.items()) - for k,v in items: - if out: - out.append(NL) - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): - if v: - out.append( v.dump(indent,depth+1) ) + if full: + if self.haskeys(): + items = sorted((str(k), v) for k,v in self.items()) + for k,v in items: + if out: + out.append(NL) + out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) + if isinstance(v,ParseResults): + if v: + out.append( v.dump(indent,depth+1) ) + else: + out.append(_ustr(v)) else: - out.append(_ustr(v)) - else: - out.append(_ustr(v)) - elif any(isinstance(vv,ParseResults) for vv in self): - v = self - for i,vv in enumerate(v): - if isinstance(vv,ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) - else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) + out.append(repr(v)) + elif any(isinstance(vv,ParseResults) for vv in self): + v = self + for i,vv in enumerate(v): + if isinstance(vv,ParseResults): + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) + else: + out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) return "".join(out) def pprint(self, *args, **kwargs): - """Pretty-printer for parsed results as a list, using the C{pprint} module. - Accepts additional positional or keyword args as defined for the - C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})""" + """ + Pretty-printer for parsed results as a list, using the C{pprint} module. + Accepts additional positional or keyword args as defined for the + C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) + + Example:: + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(delimitedList(term))) + result = func.parseString("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + prints:: + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ pprint.pprint(self.asList(), *args, **kwargs) # add support for pickle protocol @@ -690,10 +957,13 @@ class ParseResults(object): else: self.__parent = None - def __dir__(self): - return dir(super(ParseResults,self)) + list(self.keys()) + def __getnewargs__(self): + return self.__toklist, self.__name, self.__asList, self.__modal -collections.MutableMapping.register(ParseResults) + def __dir__(self): + return (dir(type(self)) + list(self.keys())) + +MutableMapping.register(ParseResults) def col (loc,strg): """Returns current column within a string, counting newlines as line separators. @@ -706,7 +976,7 @@ def col (loc,strg): positions within the parsed string. """ s = strg - return 1 if loc= (3,5): + def extract_stack(limit=0): + # special handling for Python 3.5.0 - extra deep call stack by 1 + offset = -3 if system_version == (3,5,0) else -2 + frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] + return [frame_summary[:2]] + def extract_tb(tb, limit=0): + frames = traceback.extract_tb(tb, limit=limit) + frame_summary = frames[-1] + return [frame_summary[:2]] + else: + extract_stack = traceback.extract_stack + extract_tb = traceback.extract_tb + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + LINE_DIFF = 6 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + this_line = extract_stack(limit=2)[-1] + pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) + def wrapper(*args): while 1: try: @@ -778,12 +1073,33 @@ def _trim_arity(func, maxargs=2): foundArity[0] = True return ret except TypeError: - if limit[0] <= maxargs and not foundArity[0]: + # re-raise TypeErrors if they did not come from our arity testing + if foundArity[0]: + raise + else: + try: + tb = sys.exc_info()[-1] + if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: + raise + finally: + del tb + + if limit[0] <= maxargs: limit[0] += 1 continue raise + + # copy func name to wrapper for sensible debug output + func_name = "" + try: + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + wrapper.__name__ = func_name + return wrapper - + class ParserElement(object): """Abstract base level parser element class.""" DEFAULT_WHITE_CHARS = " \n\t\r" @@ -791,7 +1107,16 @@ class ParserElement(object): @staticmethod def setDefaultWhitespaceChars( chars ): - """Overrides the default whitespace chars + r""" + Overrides the default whitespace chars + + Example:: + # default whitespace chars are space, and newline + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.setDefaultWhitespaceChars(" \t") + OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] """ ParserElement.DEFAULT_WHITE_CHARS = chars @@ -799,8 +1124,22 @@ class ParserElement(object): def inlineLiteralsUsing(cls): """ Set class to be used for inclusion of string literals into a parser. + + Example:: + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inlineLiteralsUsing(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] """ - ParserElement.literalStringClass = cls + ParserElement._literalStringClass = cls def __init__( self, savelist=False ): self.parseAction = list() @@ -826,8 +1165,21 @@ class ParserElement(object): self.callDuringTry = False def copy( self ): - """Make a copy of this C{ParserElement}. Useful for defining different parse actions - for the same parsing pattern, using copies of the original parse element.""" + """ + Make a copy of this C{ParserElement}. Useful for defining different parse actions + for the same parsing pattern, using copies of the original parse element. + + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") + integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + + print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) + prints:: + [5120, 100, 655360, 268435456] + Equivalent form of C{expr.copy()} is just C{expr()}:: + integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") + """ cpy = copy.copy( self ) cpy.parseAction = self.parseAction[:] cpy.ignoreExprs = self.ignoreExprs[:] @@ -836,7 +1188,13 @@ class ParserElement(object): return cpy def setName( self, name ): - """Define name for this expression, for use in debugging.""" + """ + Define name for this expression, makes debugging and exception messages clearer. + + Example:: + Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) + Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ self.name = name self.errmsg = "Expected " + self.name if hasattr(self,"exception"): @@ -844,15 +1202,24 @@ class ParserElement(object): return self def setResultsName( self, name, listAllMatches=False ): - """Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original C{ParserElement} object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - C{expr("name")} in place of C{expr.setResultsName("name")} - - see L{I{__call__}<__call__>}. + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + NOTE: this returns a *copy* of the original C{ParserElement} object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + C{expr("name")} in place of C{expr.setResultsName("name")} - + see L{I{__call__}<__call__>}. + + Example:: + date_str = (integer.setResultsName("year") + '/' + + integer.setResultsName("month") + '/' + + integer.setResultsName("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") """ newself = self.copy() if name.endswith("*"): @@ -881,42 +1248,76 @@ class ParserElement(object): return self def setParseAction( self, *fns, **kwargs ): - """Define action to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, - C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. + """ + Define one or more actions to perform when successfully matching parse element definition. + Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, + C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object + If the functions in fns modify the tokens, they can return them as the return + value from fn, and the modified list of tokens will replace the original. + Otherwise, fn does not need to return any value. - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{parseString}} for more information - on parsing strings containing C{}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ + Optional keyword arguments: + - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{parseString}} for more information + on parsing strings containing C{}s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + + Example:: + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + # use parse action to convert to ints at parse time + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + date_str = integer + '/' + integer + '/' + integer + + # note that integer fields are now ints, not strings + date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] + """ self.parseAction = list(map(_trim_arity, list(fns))) self.callDuringTry = kwargs.get("callDuringTry", False) return self def addParseAction( self, *fns, **kwargs ): - """Add parse action to expression's list of parse actions. See L{I{setParseAction}}.""" + """ + Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}}. + + See examples in L{I{copy}}. + """ self.parseAction += list(map(_trim_arity, list(fns))) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self def addCondition(self, *fns, **kwargs): """Add a boolean predicate function to expression's list of parse actions. See - L{I{setParseAction}}. Optional keyword argument C{message} can - be used to define a custom message to be used in the raised exception.""" - msg = kwargs.get("message") or "failed user-defined condition" + L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, + functions passed to C{addCondition} need to return boolean success/fail of the condition. + + Optional keyword arguments: + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException + + Example:: + integer = Word(nums).setParseAction(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) + """ + msg = kwargs.get("message", "failed user-defined condition") + exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException for fn in fns: def pa(s,l,t): if not bool(_trim_arity(fn)(s,l,t)): - raise ParseException(s,l,msg) - return t + raise exc_type(s,l,msg) self.parseAction.append(pa) self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) return self @@ -996,7 +1397,7 @@ class ParserElement(object): else: preloc = loc tokensStart = preloc - if self.mayIndexError or loc >= len(instring): + if self.mayIndexError or preloc >= len(instring): try: loc,tokens = self.parseImpl( instring, preloc, doActions ) except IndexError: @@ -1030,7 +1431,6 @@ class ParserElement(object): self.resultsName, asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), modal=self.modalResults ) - if debugging: #~ print ("Matched",self,"->",retTokens.asList()) if (self.debugActions[1] ): @@ -1043,43 +1443,147 @@ class ParserElement(object): return self._parse( instring, loc, doActions=False )[0] except ParseFatalException: raise ParseException( instring, loc, self.errmsg, self) + + def canParseNext(self, instring, loc): + try: + self.tryParse(instring, loc) + except (ParseException, IndexError): + return False + else: + return True + + class _UnboundedCache(object): + def __init__(self): + cache = {} + self.not_in_cache = not_in_cache = object() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + if _OrderedDict is not None: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = _OrderedDict() + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(cache) > size: + try: + cache.popitem(False) + except KeyError: + pass + + def clear(self): + cache.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + else: + class _FifoCache(object): + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + + cache = {} + key_fifo = collections.deque([], size) + + def get(self, key): + return cache.get(key, not_in_cache) + + def set(self, key, value): + cache[key] = value + while len(key_fifo) > size: + cache.pop(key_fifo.popleft(), None) + key_fifo.append(key) + + def clear(self): + cache.clear() + key_fifo.clear() + + def cache_len(self): + return len(cache) + + self.get = types.MethodType(get, self) + self.set = types.MethodType(set, self) + self.clear = types.MethodType(clear, self) + self.__len__ = types.MethodType(cache_len, self) + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] # this method gets repeatedly called during backtracking with the same arguments - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - lookup = (self,instring,loc,callPreParse,doActions) - if lookup in ParserElement._exprArgCache: - value = ParserElement._exprArgCache[ lookup ] - if isinstance(value, Exception): - raise value - return (value[0],value[1].copy()) - else: - try: - value = self._parseNoCache( instring, loc, doActions, callPreParse ) - ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy()) - return value - except ParseBaseException as pe: - pe.__traceback__ = None - ParserElement._exprArgCache[ lookup ] = pe - raise + HIT, MISS = 0, 1 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy())) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if isinstance(value, Exception): + raise value + return (value[0], value[1].copy()) _parse = _parseNoCache - # argument cache for optimizing repeated calls when backtracking through recursive expressions - _exprArgCache = {} @staticmethod def resetCache(): - ParserElement._exprArgCache.clear() + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) _packratEnabled = False @staticmethod - def enablePackrat(): + def enablePackrat(cache_size_limit=128): """Enables "packrat" parsing, which adds memoizing to the parsing logic. Repeated parse attempts at the same string location (which happens often in many complex grammars) can immediately return a cached value, instead of re-executing parsing/validating code. Memoizing is done of both valid results and parsing exceptions. - + + Parameters: + - cache_size_limit - (default=C{128}) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + This speedup may break existing programs that use parse actions that have side-effects. For this reason, packrat parsing is disabled when you first import pyparsing. To activate the packrat feature, your @@ -1088,32 +1592,45 @@ class ParserElement(object): C{enablePackrat} before calling C{psyco.full()}. If you do not do this, Python will crash. For best results, call C{enablePackrat()} immediately after importing pyparsing. + + Example:: + import pyparsing + pyparsing.ParserElement.enablePackrat() """ if not ParserElement._packratEnabled: ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = ParserElement._UnboundedCache() + else: + ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) ParserElement._parse = ParserElement._parseCache def parseString( self, instring, parseAll=False ): - """Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. + """ + Execute the parse expression with the given string. + This is the main interface to the client code, once the complete + expression has been built. - If you want the grammar to require that the entire input string be - successfully parsed, then set C{parseAll} to True (equivalent to ending - the grammar with C{L{StringEnd()}}). + If you want the grammar to require that the entire input string be + successfully parsed, then set C{parseAll} to True (equivalent to ending + the grammar with C{L{StringEnd()}}). - Note: C{parseString} implicitly calls C{expandtabs()} on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the C{loc} argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - calling C{parseWithTabs} on your grammar before calling C{parseString} - (see L{I{parseWithTabs}}) - - define your parse action using the full C{(s,loc,toks)} signature, and - reference the input string using the parse action's C{s} argument - - explictly expand the tabs in your input string before calling - C{parseString} + Note: C{parseString} implicitly calls C{expandtabs()} on the input string, + in order to report proper column numbers in parse actions. + If the input string contains tabs and + the grammar uses parse actions that use the C{loc} argument to index into the + string being parsed, you can ensure you have a consistent view of the input + string by: + - calling C{parseWithTabs} on your grammar before calling C{parseString} + (see L{I{parseWithTabs}}) + - define your parse action using the full C{(s,loc,toks)} signature, and + reference the input string using the parse action's C{s} argument + - explictly expand the tabs in your input string before calling + C{parseString} + + Example:: + Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] + Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text """ ParserElement.resetCache() if not self.streamlined: @@ -1139,14 +1656,35 @@ class ParserElement(object): return tokens def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): - """Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - C{maxMatches} argument, to clip scanning after 'n' matches are found. If - C{overlap} is specified, then overlapping matches will be reported. + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + C{maxMatches} argument, to clip scanning after 'n' matches are found. If + C{overlap} is specified, then overlapping matches will be reported. - Note that the start and end locations are reported relative to the string - being parsed. See L{I{parseString}} for more information on parsing - strings with embedded tabs.""" + Note that the start and end locations are reported relative to the string + being parsed. See L{I{parseString}} for more information on parsing + strings with embedded tabs. + + Example:: + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens,start,end in Word(alphas).scanString(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ if not self.streamlined: self.streamline() for e in self.ignoreExprs: @@ -1189,12 +1727,22 @@ class ParserElement(object): raise exc def transformString( self, instring ): - """Extension to C{L{scanString}}, to modify matching text with modified tokens that may - be returned from a parse action. To use C{transformString}, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking C{transformString()} on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. C{transformString()} returns the resulting transformed string.""" + """ + Extension to C{L{scanString}}, to modify matching text with modified tokens that may + be returned from a parse action. To use C{transformString}, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking C{transformString()} on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. C{transformString()} returns the resulting transformed string. + + Example:: + wd = Word(alphas) + wd.setParseAction(lambda toks: toks[0].title()) + + print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) + Prints:: + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ out = [] lastE = 0 # force preservation of s, to minimize unwanted transformation of string, and to @@ -1222,9 +1770,22 @@ class ParserElement(object): raise exc def searchString( self, instring, maxMatches=_MAX_INT ): - """Another extension to C{L{scanString}}, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - C{maxMatches} argument, to clip searching after 'n' matches are found. + """ + Another extension to C{L{scanString}}, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + C{maxMatches} argument, to clip searching after 'n' matches are found. + + Example:: + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))) + prints:: + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] """ try: return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) @@ -1235,10 +1796,42 @@ class ParserElement(object): # catch and re-raise exception from here, clears out pyparsing internal stack trace raise exc + def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): + """ + Generator method to split a string using the given expression as a separator. + May be called with optional C{maxsplit} argument, to limit the number of splits; + and the optional C{includeSeparators} argument (default=C{False}), if the separating + matching text should be included in the split results. + + Example:: + punc = oneOf(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + prints:: + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + splits = 0 + last = 0 + for t,s,e in self.scanString(instring, maxMatches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + def __add__(self, other ): - """Implementation of + operator - returns C{L{And}}""" + """ + Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement + converts them to L{Literal}s by default. + + Example:: + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print (hello, "->", greet.parseString(hello)) + Prints:: + Hello, World! -> ['Hello', ',', 'World', '!'] + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1246,9 +1839,11 @@ class ParserElement(object): return And( [ self, other ] ) def __radd__(self, other ): - """Implementation of + operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of + operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1256,19 +1851,23 @@ class ParserElement(object): return other + self def __sub__(self, other): - """Implementation of - operator, returns C{L{And}} with error stop""" + """ + Implementation of - operator, returns C{L{And}} with error stop + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) return None - return And( [ self, And._ErrorStop(), other ] ) + return self + And._ErrorStop() + other def __rsub__(self, other ): - """Implementation of - operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of - operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1276,24 +1875,24 @@ class ParserElement(object): return other - self def __mul__(self,other): - """Implementation of * operator, allows use of C{expr * 3} in place of - C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer - tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples - may also include C{None} as in: - - C{expr*(n,None)} or C{expr*(n,)} is equivalent + """ + Implementation of * operator, allows use of C{expr * 3} in place of + C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer + tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples + may also include C{None} as in: + - C{expr*(n,None)} or C{expr*(n,)} is equivalent to C{expr*n + L{ZeroOrMore}(expr)} (read as "at least n instances of C{expr}") - - C{expr*(None,n)} is equivalent to C{expr*(0,n)} + - C{expr*(None,n)} is equivalent to C{expr*(0,n)} (read as "0 to n instances of C{expr}") - - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} - - Note that C{expr*(None,n)} does not raise an exception if - more than n exprs exist in the input stream; that is, - C{expr*(None,n)} does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - C{expr*(None,n) + ~expr} + - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} + - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} + Note that C{expr*(None,n)} does not raise an exception if + more than n exprs exist in the input stream; that is, + C{expr*(None,n)} does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + C{expr*(None,n) + ~expr} """ if isinstance(other,int): minElements, optElements = other,0 @@ -1347,9 +1946,11 @@ class ParserElement(object): return self.__mul__(other) def __or__(self, other ): - """Implementation of | operator - returns C{L{MatchFirst}}""" + """ + Implementation of | operator - returns C{L{MatchFirst}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1357,9 +1958,11 @@ class ParserElement(object): return MatchFirst( [ self, other ] ) def __ror__(self, other ): - """Implementation of | operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of | operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1367,9 +1970,11 @@ class ParserElement(object): return other | self def __xor__(self, other ): - """Implementation of ^ operator - returns C{L{Or}}""" + """ + Implementation of ^ operator - returns C{L{Or}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1377,9 +1982,11 @@ class ParserElement(object): return Or( [ self, other ] ) def __rxor__(self, other ): - """Implementation of ^ operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of ^ operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1387,9 +1994,11 @@ class ParserElement(object): return other ^ self def __and__(self, other ): - """Implementation of & operator - returns C{L{Each}}""" + """ + Implementation of & operator - returns C{L{Each}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1397,9 +2006,11 @@ class ParserElement(object): return Each( [ self, other ] ) def __rand__(self, other ): - """Implementation of & operator when left operand is not a C{L{ParserElement}}""" + """ + Implementation of & operator when left operand is not a C{L{ParserElement}} + """ if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) if not isinstance( other, ParserElement ): warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), SyntaxWarning, stacklevel=2) @@ -1407,41 +2018,49 @@ class ParserElement(object): return other & self def __invert__( self ): - """Implementation of ~ operator - returns C{L{NotAny}}""" + """ + Implementation of ~ operator - returns C{L{NotAny}} + """ return NotAny( self ) def __call__(self, name=None): - """Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}:: - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - could be written as:: - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") - - If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be - passed as C{True}. + """ + Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. + + If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be + passed as C{True}. - If C{name} is omitted, same as calling C{L{copy}}. - """ + If C{name} is omitted, same as calling C{L{copy}}. + + Example:: + # these are equivalent + userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") + userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + """ if name is not None: return self.setResultsName(name) else: return self.copy() def suppress( self ): - """Suppresses the output of this C{ParserElement}; useful to keep punctuation from - cluttering up returned output. + """ + Suppresses the output of this C{ParserElement}; useful to keep punctuation from + cluttering up returned output. """ return Suppress( self ) def leaveWhitespace( self ): - """Disables the skipping of whitespace before matching the characters in the - C{ParserElement}'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. + """ + Disables the skipping of whitespace before matching the characters in the + C{ParserElement}'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. """ self.skipWhitespace = False return self def setWhitespaceChars( self, chars ): - """Overrides the default whitespace chars + """ + Overrides the default whitespace chars """ self.skipWhitespace = True self.whiteChars = chars @@ -1449,26 +2068,41 @@ class ParserElement(object): return self def parseWithTabs( self ): - """Overrides default behavior to expand C{}s to spaces before parsing the input string. - Must be called before C{parseString} when the input grammar contains elements that - match C{} characters.""" + """ + Overrides default behavior to expand C{}s to spaces before parsing the input string. + Must be called before C{parseString} when the input grammar contains elements that + match C{} characters. + """ self.keepTabs = True return self def ignore( self, other ): - """Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + patt = OneOrMore(Word(alphas)) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] + + patt.ignore(cStyleComment) + patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] + """ + if isinstance(other, basestring): + other = Suppress(other) + if isinstance( other, Suppress ): if other not in self.ignoreExprs: - self.ignoreExprs.append( other.copy() ) + self.ignoreExprs.append(other) else: self.ignoreExprs.append( Suppress( other.copy() ) ) return self def setDebugActions( self, startAction, successAction, exceptionAction ): - """Enable display of debugging messages while doing pattern matching.""" + """ + Enable display of debugging messages while doing pattern matching. + """ self.debugActions = (startAction or _defaultStartDebugAction, successAction or _defaultSuccessDebugAction, exceptionAction or _defaultExceptionDebugAction) @@ -1476,8 +2110,40 @@ class ParserElement(object): return self def setDebug( self, flag=True ): - """Enable display of debugging messages while doing pattern matching. - Set C{flag} to True to enable, False to disable.""" + """ + Enable display of debugging messages while doing pattern matching. + Set C{flag} to True to enable, False to disable. + + Example:: + wd = Word(alphas).setName("alphaword") + integer = Word(nums).setName("numword") + term = wd | integer + + # turn on debugging for wd + wd.setDebug() + + OneOrMore(term).parseString("abc 123 xyz 890") + + prints:: + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using L{setDebugActions}. Prior to attempting + to match the C{wd} expression, the debugging message C{"Match at loc (,)"} + is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} + message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. + """ if flag: self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) else: @@ -1499,20 +2165,22 @@ class ParserElement(object): pass def validate( self, validateTrace=[] ): - """Check defined expressions for valid structure, check for infinite recursive definitions.""" + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ self.checkRecursion( [] ) def parseFile( self, file_or_filename, parseAll=False ): - """Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. """ try: file_contents = file_or_filename.read() except AttributeError: - f = open(file_or_filename, "r") - file_contents = f.read() - f.close() + with open(file_or_filename, "r") as f: + file_contents = f.read() try: return self.parseString(file_contents, parseAll) except ParseBaseException as exc: @@ -1524,13 +2192,9 @@ class ParserElement(object): def __eq__(self,other): if isinstance(other, ParserElement): - return self is other or self.__dict__ == other.__dict__ + return self is other or vars(self) == vars(other) elif isinstance(other, basestring): - try: - self.parseString(_ustr(other), parseAll=True) - return True - except ParseBaseException: - return False + return self.matches(other) else: return super(ParserElement,self)==other @@ -1546,40 +2210,169 @@ class ParserElement(object): def __rne__(self,other): return not (self == other) - def runTests(self, tests, parseAll=False): - """Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. + def matches(self, testString, parseAll=True): + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. - Parameters: - - tests - a list of separate test strings, or a multiline string of test strings - - parseAll - (default=False) - flag to pass to C{L{parseString}} when running tests + Parameters: + - testString - to test against this expression for a match + - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests + + Example:: + expr = Word(nums) + assert expr.matches("100") + """ + try: + self.parseString(_ustr(testString), parseAll=parseAll) + return True + except ParseBaseException: + return False + + def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False): + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + - tests - a list of separate test strings, or a multiline string of test strings + - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests + - comment - (default=C{'#'}) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - printResults - (default=C{True}) prints test output to stdout + - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if C{failureTests} is True), and the results contain a list of lines of each + test's output + + Example:: + number_expr = pyparsing_common.number.copy() + + result = number_expr.runTests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.runTests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failureTests=True) + print("Success" if result[0] else "Failed!") + prints:: + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading 'r'.) """ if isinstance(tests, basestring): - tests = map(str.strip, tests.splitlines()) + tests = list(map(str.strip, tests.rstrip().splitlines())) + if isinstance(comment, basestring): + comment = Literal(comment) + allResults = [] + comments = [] + success = True for t in tests: - out = [t] + if comment is not None and comment.matches(t, False) or comments and not t: + comments.append(t) + continue + if not t: + continue + out = ['\n'.join(comments), t] + comments = [] try: - out.append(self.parseString(t, parseAll=parseAll).dump()) - except ParseException as pe: + t = t.replace(r'\n','\n') + result = self.parseString(t, parseAll=parseAll) + out.append(result.dump(full=fullDump)) + success = success and not failureTests + except ParseBaseException as pe: + fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" if '\n' in t: out.append(line(pe.loc, t)) - out.append(' '*(col(pe.loc,t)-1) + '^') + out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) else: - out.append(' '*pe.loc + '^') - out.append(str(pe)) - out.append('') - print('\n'.join(out)) + out.append(' '*pe.loc + '^' + fatal) + out.append("FAIL: " + str(pe)) + success = success and failureTests + result = pe + except Exception as exc: + out.append("FAIL-EXCEPTION: " + str(exc)) + success = success and failureTests + result = exc + + if printResults: + if fullDump: + out.append('') + print('\n'.join(out)) + + allResults.append((t, result)) + + return success, allResults class Token(ParserElement): - """Abstract C{ParserElement} subclass, for defining atomic matching patterns.""" + """ + Abstract C{ParserElement} subclass, for defining atomic matching patterns. + """ def __init__( self ): super(Token,self).__init__( savelist=False ) class Empty(Token): - """An empty token, will always match.""" + """ + An empty token, will always match. + """ def __init__( self ): super(Empty,self).__init__() self.name = "Empty" @@ -1588,7 +2381,9 @@ class Empty(Token): class NoMatch(Token): - """A token that will never match.""" + """ + A token that will never match. + """ def __init__( self ): super(NoMatch,self).__init__() self.name = "NoMatch" @@ -1601,7 +2396,19 @@ class NoMatch(Token): class Literal(Token): - """Token to exactly match a specified string.""" + """ + Token to exactly match a specified string. + + Example:: + Literal('blah').parseString('blah') # -> ['blah'] + Literal('blah').parseString('blahfooblah') # -> ['blah'] + Literal('blah').parseString('bla') # -> Exception: Expected "blah" + + For case-insensitive matching, use L{CaselessLiteral}. + + For keyword matching (force word break before and after the matched string), + use L{Keyword} or L{CaselessKeyword}. + """ def __init__( self, matchString ): super(Literal,self).__init__() self.match = matchString @@ -1627,22 +2434,31 @@ class Literal(Token): return loc+self.matchLen, self.match raise ParseException(instring, loc, self.errmsg, self) _L = Literal -ParserElement.literalStringClass = Literal +ParserElement._literalStringClass = Literal class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, it must be - immediately followed by a non-keyword character. Compare with C{L{Literal}}:: - Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}. - Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} - Accepts two optional constructor arguments in addition to the keyword string: - C{identChars} is a string of characters that would be valid identifier characters, - defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive - matching, default is C{False}. + """ + Token to exactly match a specified string as a keyword, that is, it must be + immediately followed by a non-keyword character. Compare with C{L{Literal}}: + - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. + - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} + Accepts two optional constructor arguments in addition to the keyword string: + - C{identChars} is a string of characters that would be valid identifier characters, + defaulting to all alphanumerics + "_" and "$" + - C{caseless} allows case-insensitive matching, default is C{False}. + + Example:: + Keyword("start").parseString("start") # -> ['start'] + Keyword("start").parseString("starting") # -> Exception + + For case-insensitive matching, use L{CaselessKeyword}. """ DEFAULT_KEYWORD_CHARS = alphanums+"_$" - def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ): + def __init__( self, matchString, identChars=None, caseless=False ): super(Keyword,self).__init__() + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS self.match = matchString self.matchLen = len(matchString) try: @@ -1686,9 +2502,15 @@ class Keyword(Token): Keyword.DEFAULT_KEYWORD_CHARS = chars class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for L{CaselessKeyword}.) """ def __init__( self, matchString ): super(CaselessLiteral,self).__init__( matchString.upper() ) @@ -1703,7 +2525,15 @@ class CaselessLiteral(Literal): raise ParseException(instring, loc, self.errmsg, self) class CaselessKeyword(Keyword): - def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ): + """ + Caseless version of L{Keyword}. + + Example:: + OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] + + (Contrast with example for L{CaselessLiteral}.) + """ + def __init__( self, matchString, identChars=None ): super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) def parseImpl( self, instring, loc, doActions=True ): @@ -1712,17 +2542,113 @@ class CaselessKeyword(Keyword): return loc+self.matchLen, self.match raise ParseException(instring, loc, self.errmsg, self) +class CloseMatch(Token): + """ + A variation on L{Literal} which matches "close" matches, that is, + strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: + - C{match_string} - string to be matched + - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match + + The results from a successful parse will contain the matched text from the input string and the following named results: + - C{mismatches} - a list of the positions within the match_string where mismatches were found + - C{original} - the original match_string used to compare against the input string + + If C{mismatches} is an empty list, then the match was an exact match. + + Example:: + patt = CloseMatch("ATCATCGAATGGA") + patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) + patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + def __init__(self, match_string, maxMismatches=1): + super(CloseMatch,self).__init__() + self.name = match_string + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) + self.mayIndexError = False + self.mayReturnEmpty = False + + def parseImpl( self, instring, loc, doActions=True ): + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): + src,mat = s_m + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results['original'] = self.match_string + results['mismatches'] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, - an optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. An optional - C{exclude} parameter can list characters that might be found in - the input C{bodyChars} string; useful to define a word of all printables - except for one or two characters, for instance. + """ + Token for matching words composed of allowed character sets. + Defined with string containing all allowed initial characters, + an optional string containing allowed body characters (if omitted, + defaults to the initial character set), and an optional minimum, + maximum, and/or exact length. The default value for C{min} is 1 (a + minimum value < 1 is not valid); the default values for C{max} and C{exact} + are 0, meaning no maximum or exact length restriction. An optional + C{excludeChars} parameter can list characters that might be found in + the input C{bodyChars} string; useful to define a word of all printables + except for one or two characters, for instance. + + L{srange} is useful for defining custom character set strings for defining + C{Word} expressions, using range notation from regular expression character sets. + + A common mistake is to use C{Word} to match a specific literal string, as in + C{Word("Address")}. Remember that C{Word} uses the string argument to define + I{sets} of matchable characters. This expression would match "Add", "AAA", + "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. + To match an exact literal string, use L{Literal} or L{Keyword}. + + pyparsing includes helper strings for building Words: + - L{alphas} + - L{nums} + - L{alphanums} + - L{hexnums} + - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) + - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - L{printables} (any non-whitespace character) + + Example:: + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capital_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums+'-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, excludeChars=",") """ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): super(Word,self).__init__() @@ -1775,7 +2701,7 @@ class Word(Token): self.reString = r"\b"+self.reString+r"\b" try: self.re = re.compile( self.reString ) - except: + except Exception: self.re = None def parseImpl( self, instring, loc, doActions=True ): @@ -1816,7 +2742,7 @@ class Word(Token): def __str__( self ): try: return super(Word,self).__str__() - except: + except Exception: pass @@ -1837,8 +2763,17 @@ class Word(Token): class Regex(Token): - """Token for matching strings that match a given regular expression. - Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. + r""" + Token for matching strings that match a given regular expression. + Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. + If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as + named parse results. + + Example:: + realnum = Regex(r"[+-]?\d+\.\d*") + date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') + # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") """ compiledREtype = type(re.compile("[A-Z]")) def __init__( self, pattern, flags=0): @@ -1846,7 +2781,7 @@ class Regex(Token): super(Regex,self).__init__() if isinstance(pattern, basestring): - if len(pattern) == 0: + if not pattern: warnings.warn("null string passed to Regex; use Empty() instead", SyntaxWarning, stacklevel=2) @@ -1891,7 +2826,7 @@ class Regex(Token): def __str__( self ): try: return super(Regex,self).__str__() - except: + except Exception: pass if self.strRepr is None: @@ -1901,23 +2836,36 @@ class Regex(Token): class QuotedString(Token): - """Token for matching strings that are delimited by quoting characters. + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + - quoteChar - string of one or more characters defining the quote delimiting string + - escChar - character to escape quotes, typically backslash (default=C{None}) + - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) + - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) + - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) + - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) + - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) + + Example:: + qs = QuotedString('"') + print(qs.searchString('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', endQuoteChar='}}') + print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', escQuote='""') + print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + prints:: + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None): - """ - Defined with the following parameters: - - quoteChar - string of one or more characters defining the quote delimiting string - - escChar - character to escape quotes, typically backslash (default=None) - - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None) - - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - """ + def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): super(QuotedString,self).__init__() # remove white space from quote chars - wont work anyway quoteChar = quoteChar.strip() - if len(quoteChar) == 0: + if not quoteChar: warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) raise SyntaxError() @@ -1925,7 +2873,7 @@ class QuotedString(Token): endQuoteChar = quoteChar else: endQuoteChar = endQuoteChar.strip() - if len(endQuoteChar) == 0: + if not endQuoteChar: warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) raise SyntaxError() @@ -1937,6 +2885,7 @@ class QuotedString(Token): self.escChar = escChar self.escQuote = escQuote self.unquoteResults = unquoteResults + self.convertWhitespaceEscapes = convertWhitespaceEscapes if multiline: self.flags = re.MULTILINE | re.DOTALL @@ -1990,9 +2939,20 @@ class QuotedString(Token): ret = ret[self.quoteCharLen:-self.endQuoteCharLen] if isinstance(ret,basestring): + # replace escaped whitespace + if '\\' in ret and self.convertWhitespaceEscapes: + ws_map = { + r'\t' : '\t', + r'\n' : '\n', + r'\f' : '\f', + r'\r' : '\r', + } + for wslit,wschar in ws_map.items(): + ret = ret.replace(wslit, wschar) + # replace escaped characters if self.escChar: - ret = re.sub(self.escCharReplacePattern,"\g<1>",ret) + ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) # replace escaped quotes if self.escQuote: @@ -2003,7 +2963,7 @@ class QuotedString(Token): def __str__( self ): try: return super(QuotedString,self).__str__() - except: + except Exception: pass if self.strRepr is None: @@ -2013,11 +2973,20 @@ class QuotedString(Token): class CharsNotIn(Token): - """Token for matching words composed of characters *not* in a given set. - Defined with string containing all disallowed characters, and an optional - minimum, maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. + """ + Token for matching words composed of characters I{not} in a given set (will + include whitespace in matched characters if not listed in the provided exclusion set - see example). + Defined with string containing all disallowed characters, and an optional + minimum, maximum, and/or exact length. The default value for C{min} is 1 (a + minimum value < 1 is not valid); the default values for C{max} and C{exact} + are 0, meaning no maximum or exact length restriction. + + Example:: + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) + prints:: + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] """ def __init__( self, notChars, min=1, max=0, exact=0 ): super(CharsNotIn,self).__init__() @@ -2063,7 +3032,7 @@ class CharsNotIn(Token): def __str__( self ): try: return super(CharsNotIn, self).__str__() - except: + except Exception: pass if self.strRepr is None: @@ -2075,11 +3044,13 @@ class CharsNotIn(Token): return self.strRepr class White(Token): - """Special matching class for matching whitespace. Normally, whitespace is ignored - by pyparsing grammars. This class is included when some whitespace structures - are significant. Define with a string containing the whitespace characters to be - matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, - as defined for the C{L{Word}} class.""" + """ + Special matching class for matching whitespace. Normally, whitespace is ignored + by pyparsing grammars. This class is included when some whitespace structures + are significant. Define with a string containing the whitespace characters to be + matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, + as defined for the C{L{Word}} class. + """ whiteStrs = { " " : "", "\t": "", @@ -2131,7 +3102,9 @@ class _PositionToken(Token): self.mayIndexError = False class GoToColumn(_PositionToken): - """Token to advance to a specific column of input text; useful for tabular report scraping.""" + """ + Token to advance to a specific column of input text; useful for tabular report scraping. + """ def __init__( self, colno ): super(GoToColumn,self).__init__() self.col = colno @@ -2153,28 +3126,41 @@ class GoToColumn(_PositionToken): ret = instring[ loc: newloc ] return newloc, ret + class LineStart(_PositionToken): - """Matches if current position is at the beginning of a line within the parse string""" + """ + Matches if current position is at the beginning of a line within the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + restOfLine).searchString(test): + print(t) + + Prints:: + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ def __init__( self ): super(LineStart,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) self.errmsg = "Expected start of line" - def preParse( self, instring, loc ): - preloc = super(LineStart,self).preParse(instring,loc) - if instring[preloc] == "\n": - loc += 1 - return loc - def parseImpl( self, instring, loc, doActions=True ): - if not( loc==0 or - (loc == self.preParse( instring, 0 )) or - (instring[loc-1] == "\n") ): #col(loc, instring) != 1: - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) class LineEnd(_PositionToken): - """Matches if current position is at the end of a line within the parse string""" + """ + Matches if current position is at the end of a line within the parse string + """ def __init__( self ): super(LineEnd,self).__init__() self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) @@ -2192,7 +3178,9 @@ class LineEnd(_PositionToken): raise ParseException(instring, loc, self.errmsg, self) class StringStart(_PositionToken): - """Matches if current position is at the beginning of the parse string""" + """ + Matches if current position is at the beginning of the parse string + """ def __init__( self ): super(StringStart,self).__init__() self.errmsg = "Expected start of text" @@ -2205,7 +3193,9 @@ class StringStart(_PositionToken): return loc, [] class StringEnd(_PositionToken): - """Matches if current position is at the end of the parse string""" + """ + Matches if current position is at the end of the parse string + """ def __init__( self ): super(StringEnd,self).__init__() self.errmsg = "Expected end of text" @@ -2221,11 +3211,12 @@ class StringEnd(_PositionToken): raise ParseException(instring, loc, self.errmsg, self) class WordStart(_PositionToken): - """Matches if the current position is at the beginning of a Word, and - is not preceded by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of - the string being parsed, or at the beginning of a line. + """ + Matches if the current position is at the beginning of a Word, and + is not preceded by any character in a given set of C{wordChars} + (default=C{printables}). To emulate the C{\b} behavior of regular expressions, + use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of + the string being parsed, or at the beginning of a line. """ def __init__(self, wordChars = printables): super(WordStart,self).__init__() @@ -2240,11 +3231,12 @@ class WordStart(_PositionToken): return loc, [] class WordEnd(_PositionToken): - """Matches if the current position is at the end of a Word, and - is not followed by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of - the string being parsed, or at the end of a line. + """ + Matches if the current position is at the end of a Word, and + is not followed by any character in a given set of C{wordChars} + (default=C{printables}). To emulate the C{\b} behavior of regular expressions, + use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of + the string being parsed, or at the end of a line. """ def __init__(self, wordChars = printables): super(WordEnd,self).__init__() @@ -2262,18 +3254,21 @@ class WordEnd(_PositionToken): class ParseExpression(ParserElement): - """Abstract subclass of ParserElement, for combining and post-processing parsed tokens.""" + """ + Abstract subclass of ParserElement, for combining and post-processing parsed tokens. + """ def __init__( self, exprs, savelist = False ): super(ParseExpression,self).__init__(savelist) if isinstance( exprs, _generatorType ): exprs = list(exprs) if isinstance( exprs, basestring ): - self.exprs = [ Literal( exprs ) ] - elif isinstance( exprs, collections.Sequence ): + self.exprs = [ ParserElement._literalStringClass( exprs ) ] + elif isinstance( exprs, Iterable ): + exprs = list(exprs) # if sequence of strings provided, wrap with Literal if all(isinstance(expr, basestring) for expr in exprs): - exprs = map(Literal, exprs) + exprs = map(ParserElement._literalStringClass, exprs) self.exprs = list(exprs) else: try: @@ -2314,7 +3309,7 @@ class ParseExpression(ParserElement): def __str__( self ): try: return super(ParseExpression,self).__str__() - except: + except Exception: pass if self.strRepr is None: @@ -2351,7 +3346,7 @@ class ParseExpression(ParserElement): self.mayReturnEmpty |= other.mayReturnEmpty self.mayIndexError |= other.mayIndexError - self.errmsg = "Expected " + str(self) + self.errmsg = "Expected " + _ustr(self) return self @@ -2371,9 +3366,19 @@ class ParseExpression(ParserElement): return ret class And(ParseExpression): - """Requires all given C{ParseExpression}s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the C{'+'} operator. + """ + Requires all given C{ParseExpression}s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the C{'+'} operator. + May also be constructed using the C{'-'} operator, which will suppress backtracking. + + Example:: + integer = Word(nums) + name_expr = OneOrMore(Word(alphas)) + + expr = And([integer("id"),name_expr("name"),integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") """ class _ErrorStop(Empty): @@ -2405,9 +3410,9 @@ class And(ParseExpression): raise except ParseBaseException as pe: pe.__traceback__ = None - raise ParseSyntaxException(pe) + raise ParseSyntaxException._from_exception(pe) except IndexError: - raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) ) + raise ParseSyntaxException(instring, len(instring), self.errmsg, self) else: loc, exprtokens = e._parse( instring, loc, doActions ) if exprtokens or exprtokens.haskeys(): @@ -2416,7 +3421,7 @@ class And(ParseExpression): def __iadd__(self, other ): if isinstance( other, basestring ): - other = Literal( other ) + other = ParserElement._literalStringClass( other ) return self.append( other ) #And( [ self, other ] ) def checkRecursion( self, parseElementList ): @@ -2437,9 +3442,18 @@ class And(ParseExpression): class Or(ParseExpression): - """Requires that at least one C{ParseExpression} is found. - If two expressions match, the expression that matches the longest string will be used. - May be constructed using the C{'^'} operator. + """ + Requires that at least one C{ParseExpression} is found. + If two expressions match, the expression that matches the longest string will be used. + May be constructed using the C{'^'} operator. + + Example:: + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) + prints:: + [['123'], ['3.1416'], ['789']] """ def __init__( self, exprs, savelist = False ): super(Or,self).__init__(exprs, savelist) @@ -2488,7 +3502,7 @@ class Or(ParseExpression): def __ixor__(self, other ): if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) return self.append( other ) #Or( [ self, other ] ) def __str__( self ): @@ -2507,9 +3521,21 @@ class Or(ParseExpression): class MatchFirst(ParseExpression): - """Requires that at least one C{ParseExpression} is found. - If two expressions match, the first one listed is the one that will match. - May be constructed using the C{'|'} operator. + """ + Requires that at least one C{ParseExpression} is found. + If two expressions match, the first one listed is the one that will match. + May be constructed using the C{'|'} operator. + + Example:: + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] """ def __init__( self, exprs, savelist = False ): super(MatchFirst,self).__init__(exprs, savelist) @@ -2544,7 +3570,7 @@ class MatchFirst(ParseExpression): def __ior__(self, other ): if isinstance( other, basestring ): - other = ParserElement.literalStringClass( other ) + other = ParserElement._literalStringClass( other ) return self.append( other ) #MatchFirst( [ self, other ] ) def __str__( self ): @@ -2563,9 +3589,58 @@ class MatchFirst(ParseExpression): class Each(ParseExpression): - """Requires all given C{ParseExpression}s to be found, but in any order. - Expressions may be separated by whitespace. - May be constructed using the C{'&'} operator. + """ + Requires all given C{ParseExpression}s to be found, but in any order. + Expressions may be separated by whitespace. + May be constructed using the C{'&'} operator. + + Example:: + color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) + + shape_spec.runTests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + prints:: + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 """ def __init__( self, exprs, savelist = True ): super(Each,self).__init__(exprs, savelist) @@ -2619,17 +3694,7 @@ class Each(ParseExpression): loc,results = e._parse(instring,loc,doActions) resultlist.append(results) - finalResults = ParseResults([]) - for r in resultlist: - dups = {} - for k in r.keys(): - if k in finalResults: - tmp = ParseResults(finalResults[k]) - tmp += ParseResults(r[k]) - dups[k] = tmp - finalResults += ParseResults(r) - for k,v in dups.items(): - finalResults[k] = v + finalResults = sum(resultlist, ParseResults([])) return loc, finalResults def __str__( self ): @@ -2648,11 +3713,16 @@ class Each(ParseExpression): class ParseElementEnhance(ParserElement): - """Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.""" + """ + Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. + """ def __init__( self, expr, savelist=False ): super(ParseElementEnhance,self).__init__(savelist) if isinstance( expr, basestring ): - expr = Literal(expr) + if issubclass(ParserElement._literalStringClass, Token): + expr = ParserElement._literalStringClass(expr) + else: + expr = ParserElement._literalStringClass(Literal(expr)) self.expr = expr self.strRepr = None if expr is not None: @@ -2711,7 +3781,7 @@ class ParseElementEnhance(ParserElement): def __str__( self ): try: return super(ParseElementEnhance,self).__str__() - except: + except Exception: pass if self.strRepr is None and self.expr is not None: @@ -2720,10 +3790,22 @@ class ParseElementEnhance(ParserElement): class FollowedBy(ParseElementEnhance): - """Lookahead matching of the given parse expression. C{FollowedBy} - does *not* advance the parsing position within the input string, it only + """ + Lookahead matching of the given parse expression. C{FollowedBy} + does I{not} advance the parsing position within the input string, it only verifies that the specified parse expression matches at the current - position. C{FollowedBy} always returns a null token list.""" + position. C{FollowedBy} always returns a null token list. + + Example:: + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() + prints:: + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ def __init__( self, expr ): super(FollowedBy,self).__init__(expr) self.mayReturnEmpty = True @@ -2734,11 +3816,16 @@ class FollowedBy(ParseElementEnhance): class NotAny(ParseElementEnhance): - """Lookahead to disallow matching with the given parse expression. C{NotAny} - does *not* advance the parsing position within the input string, it only - verifies that the specified parse expression does *not* match at the current - position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny} - always returns a null token list. May be constructed using the '~' operator.""" + """ + Lookahead to disallow matching with the given parse expression. C{NotAny} + does I{not} advance the parsing position within the input string, it only + verifies that the specified parse expression does I{not} match at the current + position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} + always returns a null token list. May be constructed using the '~' operator. + + Example:: + + """ def __init__( self, expr ): super(NotAny,self).__init__(expr) #~ self.leaveWhitespace() @@ -2747,11 +3834,7 @@ class NotAny(ParseElementEnhance): self.errmsg = "Found unwanted token, "+_ustr(self.expr) def parseImpl( self, instring, loc, doActions=True ): - try: - self.expr.tryParse( instring, loc ) - except (ParseException,IndexError): - pass - else: + if self.expr.canParseNext(instring, loc): raise ParseException(instring, loc, self.errmsg, self) return loc, [] @@ -2764,65 +3847,69 @@ class NotAny(ParseElementEnhance): return self.strRepr - -class ZeroOrMore(ParseElementEnhance): - """Optional repetition of zero or more of the given expression.""" - def __init__( self, expr ): - super(ZeroOrMore,self).__init__(expr) - self.mayReturnEmpty = True +class _MultipleMatch(ParseElementEnhance): + def __init__( self, expr, stopOn=None): + super(_MultipleMatch, self).__init__(expr) + self.saveAsList = True + ender = stopOn + if isinstance(ender, basestring): + ender = ParserElement._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None def parseImpl( self, instring, loc, doActions=True ): - tokens = [] + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.tryParse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) + hasIgnoreExprs = (not not self.ignoreExprs) while 1: + if check_ender: + try_not_ender(instring, loc) if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) + preloc = self_skip_ignorables( instring, loc ) else: preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) + loc, tmptokens = self_expr_parse( instring, preloc, doActions ) if tmptokens or tmptokens.haskeys(): tokens += tmptokens except (ParseException,IndexError): pass return loc, tokens + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + - expr - expression that must match one or more times + - stopOn - (default=C{None}) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) - def __str__( self ): - if hasattr(self,"name"): - return self.name + Example:: + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." + text = "shape: SQUARE posn: upper left color: BLACK" + OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - return self.strRepr - - def setResultsName( self, name, listAllMatches=False ): - ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches) - ret.saveAsList = True - return ret - - -class OneOrMore(ParseElementEnhance): - """Repetition of one or more of the given expression.""" - def parseImpl( self, instring, loc, doActions=True ): - # must be at least one - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - try: - hasIgnoreExprs = ( len(self.ignoreExprs) > 0 ) - while 1: - if hasIgnoreExprs: - preloc = self._skipIgnorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self.expr._parse( instring, preloc, doActions ) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens + # use stopOn attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parseString(text).pprint() + """ def __str__( self ): if hasattr(self,"name"): @@ -2833,10 +3920,36 @@ class OneOrMore(ParseElementEnhance): return self.strRepr - def setResultsName( self, name, listAllMatches=False ): - ret = super(OneOrMore,self).setResultsName(name,listAllMatches) - ret.saveAsList = True - return ret +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - stopOn - (default=C{None}) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example: similar to L{OneOrMore} + """ + def __init__( self, expr, stopOn=None): + super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) + self.mayReturnEmpty = True + + def parseImpl( self, instring, loc, doActions=True ): + try: + return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) + except (ParseException,IndexError): + return loc, [] + + def __str__( self ): + if hasattr(self,"name"): + return self.name + + if self.strRepr is None: + self.strRepr = "[" + _ustr(self.expr) + "]..." + + return self.strRepr class _NullToken(object): def __bool__(self): @@ -2847,12 +3960,43 @@ class _NullToken(object): _optionalNotMatched = _NullToken() class Optional(ParseElementEnhance): - """Optional matching of the given expression. - A default return string can also be specified, if the optional expression - is not found. + """ + Optional matching of the given expression. + + Parameters: + - expr - expression that must match zero or more times + - default (optional) - value to be returned if the optional expression is not found. + + Example:: + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) + zip.runTests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + prints:: + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) """ def __init__( self, expr, default=_optionalNotMatched ): super(Optional,self).__init__( expr, savelist=False ) + self.saveAsList = self.expr.saveAsList self.defaultValue = default self.mayReturnEmpty = True @@ -2879,13 +4023,60 @@ class Optional(ParseElementEnhance): return self.strRepr - class SkipTo(ParseElementEnhance): - """Token for skipping over all undefined text until the matched expression is found. - If C{include} is set to true, the matched expression is also parsed (the skipped text - and matched expression are returned as a 2-element list). The C{ignore} - argument is used to define grammars (typically quoted strings and comments) that - might contain false matches. + """ + Token for skipping over all undefined text until the matched expression is found. + + Parameters: + - expr - target expression marking the end of the data to be skipped + - include - (default=C{False}) if True, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element list). + - ignore - (default=C{None}) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - failOn - (default=C{None}) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the SkipTo is not a match + + Example:: + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quotedString) + string_data.setParseAction(tokenMap(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.searchString(report): + print tkt.dump() + prints:: + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: 6 + - desc: Intermittent system crash + - issue_num: 101 + - sev: Critical + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: 14 + - desc: Spelling error on Login ('log|n') + - issue_num: 94 + - sev: Cosmetic + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: 47 + - desc: System slow when running too many reports + - issue_num: 79 + - sev: Minor """ def __init__( self, other, include=False, ignore=None, failOn=None ): super( SkipTo, self ).__init__( other ) @@ -2894,77 +4085,85 @@ class SkipTo(ParseElementEnhance): self.mayIndexError = False self.includeMatch = include self.asList = False - if failOn is not None and isinstance(failOn, basestring): - self.failOn = Literal(failOn) + if isinstance(failOn, basestring): + self.failOn = ParserElement._literalStringClass(failOn) else: self.failOn = failOn self.errmsg = "No match found for "+_ustr(self.expr) def parseImpl( self, instring, loc, doActions=True ): - startLoc = loc + startloc = loc instrlen = len(instring) expr = self.expr - failParse = False - while loc <= instrlen: - try: - if self.failOn: + expr_parse = self.expr._parse + self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None + self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if self_ignoreExpr_tryParse is not None: + # advance past ignore expressions + while 1: try: - self.failOn.tryParse(instring, loc) + tmploc = self_ignoreExpr_tryParse(instring, tmploc) except ParseBaseException: - pass - else: - failParse = True - raise ParseException(instring, loc, "Found expression " + str(self.failOn)) - failParse = False - if self.ignoreExpr is not None: - while 1: - try: - loc = self.ignoreExpr.tryParse(instring,loc) - # print("found ignoreExpr, advance to", loc) - except ParseBaseException: - break - expr._parse( instring, loc, doActions=False, callPreParse=False ) - skipText = instring[startLoc:loc] - if self.includeMatch: - loc,mat = expr._parse(instring,loc,doActions,callPreParse=False) - if mat: - skipRes = ParseResults( skipText ) - skipRes += mat - return loc, [ skipRes ] - else: - return loc, [ skipText ] - else: - return loc, [ skipText ] - except (ParseException,IndexError): - if failParse: - raise - else: - loc += 1 - raise ParseException(instring, loc, self.errmsg, self) + break + + try: + expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) + skipresult += mat + + return loc, skipresult class Forward(ParseElementEnhance): - """Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. - Note: take care when assigning to C{Forward} not to overlook precedence of operators. - Specifically, '|' has a lower precedence than '<<', so that:: - fwdExpr << a | b | c - will actually be evaluated as:: - (fwdExpr << a) | b | c - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the C{Forward}:: - fwdExpr << (a | b | c) - Converting to use the '<<=' operator instead will avoid this problem. + Note: take care when assigning to C{Forward} not to overlook precedence of operators. + Specifically, '|' has a lower precedence than '<<', so that:: + fwdExpr << a | b | c + will actually be evaluated as:: + (fwdExpr << a) | b | c + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the C{Forward}:: + fwdExpr << (a | b | c) + Converting to use the '<<=' operator instead will avoid this problem. + + See L{ParseResults.pprint} for an example of a recursive parser created using + C{Forward}. """ def __init__( self, other=None ): super(Forward,self).__init__( other, savelist=False ) def __lshift__( self, other ): if isinstance( other, basestring ): - other = ParserElement.literalStringClass(other) + other = ParserElement._literalStringClass(other) self.expr = other - self.mayReturnEmpty = other.mayReturnEmpty self.strRepr = None self.mayIndexError = self.expr.mayIndexError self.mayReturnEmpty = self.expr.mayReturnEmpty @@ -2998,7 +4197,9 @@ class Forward(ParseElementEnhance): def __str__( self ): if hasattr(self,"name"): return self.name + return self.__class__.__name__ + ": ..." + # stubbed out for now - creates awful memory and perf issues self._revertClass = self.__class__ self.__class__ = _ForwardNoRecurse try: @@ -3023,26 +4224,29 @@ class _ForwardNoRecurse(Forward): return "..." class TokenConverter(ParseElementEnhance): - """Abstract subclass of C{ParseExpression}, for converting parsed results.""" + """ + Abstract subclass of C{ParseExpression}, for converting parsed results. + """ def __init__( self, expr, savelist=False ): super(TokenConverter,self).__init__( expr )#, savelist ) self.saveAsList = False -class Upcase(TokenConverter): - """Converter to upper case all matching tokens.""" - def __init__(self, *args): - super(Upcase,self).__init__(*args) - warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead", - DeprecationWarning,stacklevel=2) - - def postParse( self, instring, loc, tokenlist ): - return list(map( str.upper, tokenlist )) - - class Combine(TokenConverter): - """Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the input string; - this can be disabled by specifying C{'adjacent=False'} in the constructor. + """ + Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the input string; + this can be disabled by specifying C{'adjacent=False'} in the constructor. + + Example:: + real = Word(nums) + '.' + Word(nums) + print(real.parseString('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parseString('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) """ def __init__( self, expr, joinString="", adjacent=True ): super(Combine,self).__init__( expr ) @@ -3072,7 +4276,19 @@ class Combine(TokenConverter): return retToks class Group(TokenConverter): - """Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.""" + """ + Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. + + Example:: + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Optional(delimitedList(term)) + print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Optional(delimitedList(term))) + print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] + """ def __init__( self, expr ): super(Group,self).__init__( expr ) self.saveAsList = True @@ -3081,9 +4297,40 @@ class Group(TokenConverter): return [ tokenlist ] class Dict(TokenConverter): - """Converter to return a repetitive expression as a list, but also as a dictionary. - Each element can also be referenced using the first token in the expression as its key. - Useful for tabular report scraping when the first column can be used as a item key. + """ + Converter to return a repetitive expression as a list, but also as a dictionary. + Each element can also be referenced using the first token in the expression as its key. + Useful for tabular report scraping when the first column can be used as a item key. + + Example:: + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + + # print attributes as plain groups + print(OneOrMore(attr_expr).parseString(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names + result = Dict(OneOrMore(Group(attr_expr))).parseString(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.asDict()) + prints:: + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + See more examples at L{ParseResults} of accessing fields by results name. """ def __init__( self, expr ): super(Dict,self).__init__( expr ) @@ -3115,7 +4362,24 @@ class Dict(TokenConverter): class Suppress(TokenConverter): - """Converter for ignoring the results of a parsed expression.""" + """ + Converter for ignoring the results of a parsed expression. + + Example:: + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + ZeroOrMore(',' + wd) + print(wd_list1.parseString(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) + print(wd_list2.parseString(source)) + prints:: + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + (See also L{delimitedList}.) + """ def postParse( self, instring, loc, tokenlist ): return [] @@ -3124,7 +4388,9 @@ class Suppress(TokenConverter): class OnlyOnce(object): - """Wrapper for parse actions, to ensure they are only called once.""" + """ + Wrapper for parse actions, to ensure they are only called once. + """ def __init__(self, methodCall): self.callable = _trim_arity(methodCall) self.called = False @@ -3138,20 +4404,39 @@ class OnlyOnce(object): self.called = False def traceParseAction(f): - """Decorator for debugging parse actions.""" + """ + Decorator for debugging parse actions. + + When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} + When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. + + Example:: + wd = Word(alphas) + + @traceParseAction + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) + print(wds.parseString("slkdjs sld sldd sdlf sdljf")) + prints:: + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + <3: thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) ) + sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) try: ret = f(*paArgs) except Exception as exc: sys.stderr.write( "< ['aa', 'bb', 'cc'] + delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] """ dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." if combine: @@ -3177,11 +4467,22 @@ def delimitedList( expr, delim=",", combine=False ): return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) def countedArray( expr, intExpr=None ): - """Helper to define a counted list of expressions. - This helper defines a pattern of the form:: - integer expr expr expr... - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. + """ + Helper to define a counted list of expressions. + This helper defines a pattern of the form:: + integer expr expr expr... + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. + + If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. + + Example:: + countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) + countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] """ arrayExpr = Forward() def countFieldParseAction(s,l,t): @@ -3194,7 +4495,7 @@ def countedArray( expr, intExpr=None ): intExpr = intExpr.copy() intExpr.setName("arrayLen") intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return ( intExpr + arrayExpr ) + return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') def _flatten(L): ret = [] @@ -3206,16 +4507,17 @@ def _flatten(L): return ret def matchPreviousLiteral(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: - first = Word(nums) - second = matchPreviousLiteral(first) - matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches a - previous literal, will also match the leading C{"1:1"} in C{"1:10"}. - If this is not desired, use C{matchPreviousExpr}. - Do *not* use with packrat parsing enabled. + """ + Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks + for a 'repeat' of a previous expression. For example:: + first = Word(nums) + second = matchPreviousLiteral(first) + matchExpr = first + ":" + second + will match C{"1:1"}, but not C{"1:2"}. Because this matches a + previous literal, will also match the leading C{"1:1"} in C{"1:10"}. + If this is not desired, use C{matchPreviousExpr}. + Do I{not} use with packrat parsing enabled. """ rep = Forward() def copyTokenToRepeater(s,l,t): @@ -3225,24 +4527,26 @@ def matchPreviousLiteral(expr): else: # flatten t tokens tflat = _flatten(t.asList()) - rep << And( [ Literal(tt) for tt in tflat ] ) + rep << And(Literal(tt) for tt in tflat) else: rep << Empty() expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) return rep def matchPreviousExpr(expr): - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: - first = Word(nums) - second = matchPreviousExpr(first) - matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches by - expressions, will *not* match the leading C{"1:1"} in C{"1:10"}; - the expressions are evaluated first, and then compared, so - C{"1"} is compared with C{"10"}. - Do *not* use with packrat parsing enabled. + """ + Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks + for a 'repeat' of a previous expression. For example:: + first = Word(nums) + second = matchPreviousExpr(first) + matchExpr = first + ":" + second + will match C{"1:1"}, but not C{"1:2"}. Because this matches by + expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; + the expressions are evaluated first, and then compared, so + C{"1"} is compared with C{"10"}. + Do I{not} use with packrat parsing enabled. """ rep = Forward() e2 = expr.copy() @@ -3255,6 +4559,7 @@ def matchPreviousExpr(expr): raise ParseException("",0,"") rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) expr.addParseAction(copyTokenToRepeater, callDuringTry=True) + rep.setName('(prev) ' + _ustr(expr)) return rep def _escapeRegexRangeChars(s): @@ -3266,16 +4571,27 @@ def _escapeRegexRangeChars(s): return _ustr(s) def oneOf( strs, caseless=False, useRegex=True ): - """Helper to quickly define a set of alternative Literals, and makes sure to do - longest-first testing when there is a conflict, regardless of the input order, - but returns a C{L{MatchFirst}} for best performance. + """ + Helper to quickly define a set of alternative Literals, and makes sure to do + longest-first testing when there is a conflict, regardless of the input order, + but returns a C{L{MatchFirst}} for best performance. - Parameters: - - strs - a string of space-delimited literals, or a list of string literals - - caseless - (default=False) - treat all literals as caseless - - useRegex - (default=True) - as an optimization, will generate a Regex + Parameters: + - strs - a string of space-delimited literals, or a collection of string literals + - caseless - (default=C{False}) - treat all literals as caseless + - useRegex - (default=C{True}) - as an optimization, will generate a Regex object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or if creating a C{Regex} raises an exception) + + Example:: + comp_oper = oneOf("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) + prints:: + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] """ if caseless: isequal = ( lambda a,b: a.upper() == b.upper() ) @@ -3289,12 +4605,10 @@ def oneOf( strs, caseless=False, useRegex=True ): symbols = [] if isinstance(strs,basestring): symbols = strs.split() - elif isinstance(strs, collections.Sequence): - symbols = list(strs[:]) - elif isinstance(strs, _generatorType): + elif isinstance(strs, Iterable): symbols = list(strs) else: - warnings.warn("Invalid argument to oneOf, expected string or list", + warnings.warn("Invalid argument to oneOf, expected string or iterable", SyntaxWarning, stacklevel=2) if not symbols: return NoMatch() @@ -3318,41 +4632,76 @@ def oneOf( strs, caseless=False, useRegex=True ): #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) try: if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ) + return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) else: - return Regex( "|".join(re.escape(sym) for sym in symbols) ) - except: + return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) + except Exception: warnings.warn("Exception creating Regex for oneOf, building MatchFirst", SyntaxWarning, stacklevel=2) # last resort, just use MatchFirst - return MatchFirst( [ parseElementClass(sym) for sym in symbols ] ) + return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) def dictOf( key, value ): - """Helper to easily and clearly define a dictionary by specifying the respective patterns - for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens - in the proper order. The key pattern can include delimiting markers or punctuation, - as long as they are suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the C{Dict} results can include named token - fields. + """ + Helper to easily and clearly define a dictionary by specifying the respective patterns + for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens + in the proper order. The key pattern can include delimiting markers or punctuation, + as long as they are suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the C{Dict} results can include named token + fields. + + Example:: + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) + print(OneOrMore(attr_expr).parseString(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) + + # similar to Dict, but simpler call format + result = dictOf(attr_label, attr_value).parseString(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.asDict()) + prints:: + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: light blue + - posn: upper left + - shape: SQUARE + - texture: burlap + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} """ return Dict( ZeroOrMore( Group ( key + value ) ) ) def originalTextFor(expr, asString=True): - """Helper to return the original, untokenized text for a given expression. Useful to - restore the parsed fields of an HTML start tag into the raw tag text itself, or to - revert separate tokens with intervening whitespace back to the original matching - input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not - require the inspect module to chase up the call stack. By default, returns a - string containing the original parsed text. + """ + Helper to return the original, untokenized text for a given expression. Useful to + restore the parsed fields of an HTML start tag into the raw tag text itself, or to + revert separate tokens with intervening whitespace back to the original matching + input text. By default, returns astring containing the original parsed text. - If the optional C{asString} argument is passed as C{False}, then the return value is a - C{L{ParseResults}} containing any results names that were originally matched, and a - single token containing the original matched text from the input string. So if - the expression passed to C{L{originalTextFor}} contains expressions with defined - results names, you must set C{asString} to C{False} if you want to preserve those - results name values.""" + If the optional C{asString} argument is passed as C{False}, then the return value is a + C{L{ParseResults}} containing any results names that were originally matched, and a + single token containing the original matched text from the input string. So if + the expression passed to C{L{originalTextFor}} contains expressions with defined + results names, you must set C{asString} to C{False} if you want to preserve those + results name values. + + Example:: + src = "this is test bold text normal text " + for tag in ("b","i"): + opener,closer = makeHTMLTags(tag) + patt = originalTextFor(opener + SkipTo(closer) + closer) + print(patt.searchString(src)[0]) + prints:: + [' bold text '] + ['text'] + """ locMarker = Empty().setParseAction(lambda s,loc,t: loc) endlocMarker = locMarker.copy() endlocMarker.callPreparse = False @@ -3361,27 +4710,37 @@ def originalTextFor(expr, asString=True): extractText = lambda s,l,t: s[t._original_start:t._original_end] else: def extractText(s,l,t): - del t[:] - t.insert(0, s[t._original_start:t._original_end]) - del t["_original_start"] - del t["_original_end"] + t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] matchExpr.setParseAction(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs return matchExpr def ungroup(expr): - """Helper to undo pyparsing's default grouping of And expressions, even - if all but one are non-empty.""" + """ + Helper to undo pyparsing's default grouping of And expressions, even + if all but one are non-empty. + """ return TokenConverter(expr).setParseAction(lambda t:t[0]) def locatedExpr(expr): - """Helper to decorate a returned token with its starting and ending locations in the input string. - This helper adds the following results names: - - locn_start = location where matched expression begins - - locn_end = location where matched expression ends - - value = the actual parsed results + """ + Helper to decorate a returned token with its starting and ending locations in the input string. + This helper adds the following results names: + - locn_start = location where matched expression begins + - locn_end = location where matched expression ends + - value = the actual parsed results - Be careful if the input text contains C{} characters, you may want to call - C{L{ParserElement.parseWithTabs}} + Be careful if the input text contains C{} characters, you may want to call + C{L{ParserElement.parseWithTabs}} + + Example:: + wd = Word(alphas) + for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): + print(match) + prints:: + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] """ locator = Empty().setParseAction(lambda s,l,t: l) return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) @@ -3397,36 +4756,38 @@ stringEnd = StringEnd().setName("stringEnd") _escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) _escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) _escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE) +_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) _charRange = Group(_singleChar + Suppress("-") + _singleChar) _reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" def srange(s): - r"""Helper to easily define string ranges for use in Word construction. Borrows - syntax from regexp '[]' string range definitions:: - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - The input string must be enclosed in []'s, and the returned string is the expanded - character set joined into a single string. - The values enclosed in the []'s may be:: - a single character - an escaped character with a leading backslash (such as \- or \]) - an escaped hex character with a leading '\x' (\x21, which is a '!' character) - (\0x## is also supported for backwards compatibility) - an escaped octal character with a leading '\0' (\041, which is a '!' character) - a range of any of the above, separated by a dash ('a-z', etc.) - any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.) + r""" + Helper to easily define string ranges for use in Word construction. Borrows + syntax from regexp '[]' string range definitions:: + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + The input string must be enclosed in []'s, and the returned string is the expanded + character set joined into a single string. + The values enclosed in the []'s may be: + - a single character + - an escaped character with a leading backslash (such as C{\-} or C{\]}) + - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) + (C{\0x##} is also supported for backwards compatibility) + - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) + - a range of any of the above, separated by a dash (C{'a-z'}, etc.) + - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) """ _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) try: return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) - except: + except Exception: return "" def matchOnlyAtCol(n): - """Helper method for defining parse actions that require matching at a specific - column in the input text. + """ + Helper method for defining parse actions that require matching at a specific + column in the input text. """ def verifyCol(strg,locn,toks): if col(locn,strg) != n: @@ -3434,57 +4795,83 @@ def matchOnlyAtCol(n): return verifyCol def replaceWith(replStr): - """Helper method for common parse actions that simply return a literal value. Especially - useful when used with C{L{transformString}()}. """ - #def _replFunc(*args): - # return [replStr] - #return _replFunc - return functools.partial(next, itertools.repeat([replStr])) + Helper method for common parse actions that simply return a literal value. Especially + useful when used with C{L{transformString}()}. + + Example:: + num = Word(nums).setParseAction(lambda toks: int(toks[0])) + na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) + term = na | num + + OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] + """ + return lambda s,l,t: [replStr] def removeQuotes(s,l,t): - """Helper parse action for removing quotation marks from parsed quoted strings. - To use, add this parse action to quoted string using:: - quotedString.setParseAction( removeQuotes ) + """ + Helper parse action for removing quotation marks from parsed quoted strings. + + Example:: + # by default, quotation marks are included in parsed results + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] + + # use removeQuotes to strip quotation marks from parsed results + quotedString.setParseAction(removeQuotes) + quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] """ return t[0][1:-1] -def upcaseTokens(s,l,t): - """Helper parse action to convert tokens to upper case.""" - return [ tt.upper() for tt in map(_ustr,t) ] +def tokenMap(func, *args): + """ + Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional + args are passed, they are forwarded to the given function as additional arguments after + the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the + parsed data to an integer using base 16. -def downcaseTokens(s,l,t): - """Helper parse action to convert tokens to lower case.""" - return [ tt.lower() for tt in map(_ustr,t) ] + Example (compare the last to example in L{ParserElement.transformString}:: + hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) + hex_ints.runTests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).setParseAction(tokenMap(str.upper)) + OneOrMore(upperword).runTests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).setParseAction(tokenMap(str.title)) + OneOrMore(wd).setParseAction(' '.join).runTests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + prints:: + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + def pa(s,l,t): + return [func(tokn, *args) for tokn in t] -def keepOriginalText(s,startLoc,t): - """DEPRECATED - use new helper method C{L{originalTextFor}}. - Helper parse action to preserve original parsed text, - overriding any nested parse actions.""" try: - endloc = getTokensEndLoc() - except ParseException: - raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action") - del t[:] - t += ParseResults(s[startLoc:endloc]) - return t + func_name = getattr(func, '__name__', + getattr(func, '__class__').__name__) + except Exception: + func_name = str(func) + pa.__name__ = func_name -def getTokensEndLoc(): - """Method to be called from within a parse action to determine the end - location of the parsed tokens.""" - import inspect - fstack = inspect.stack() - try: - # search up the stack (through intervening argument normalizers) for correct calling routine - for f in fstack[2:]: - if f[3] == "_parseNoCache": - endloc = f[0].f_locals["loc"] - return endloc - else: - raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action") - finally: - del fstack + return pa +upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) +"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}""" + +downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) +"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}""" + def _makeTags(tagStr, xml): """Internal helper to construct opening and closing tag expressions, given a tag name""" if isinstance(tagStr,basestring): @@ -3508,40 +4895,90 @@ def _makeTags(tagStr, xml): Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") closeTag = Combine(_L("") - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % tagStr) + openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) + closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % resname) openTag.tag = resname closeTag.tag = resname return openTag, closeTag def makeHTMLTags(tagStr): - """Helper to construct opening and closing tag expressions for HTML, given a tag name""" + """ + Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches + tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. + + Example:: + text = 'More info at the pyparsing wiki page' + # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple + a,a_end = makeHTMLTags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.searchString(text): + # attributes in the tag (like "href" shown here) are also accessible as named results + print(link.link_text, '->', link.href) + prints:: + pyparsing -> http://pyparsing.wikispaces.com + """ return _makeTags( tagStr, False ) def makeXMLTags(tagStr): - """Helper to construct opening and closing tag expressions for XML, given a tag name""" + """ + Helper to construct opening and closing tag expressions for XML, given a tag name. Matches + tags only in the given upper/lower case. + + Example: similar to L{makeHTMLTags} + """ return _makeTags( tagStr, True ) def withAttribute(*args,**attrDict): - """Helper to create a validating parse action to be used with start tags created - with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag - with a required attribute value, to avoid false matches on common tags such as - C{} or C{
    }. + """ + Helper to create a validating parse action to be used with start tags created + with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag + with a required attribute value, to avoid false matches on common tags such as + C{} or C{
    }. - Call C{withAttribute} with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in C{(align="right")}, or - - as an explicit dict with C{**} operator, when an attribute name is also a Python + Call C{withAttribute} with a series of attribute names and values. Specify the list + of filter attributes names and values as: + - keyword arguments, as in C{(align="right")}, or + - as an explicit dict with C{**} operator, when an attribute name is also a Python reserved word, as in C{**{"class":"Customer", "align":"right"}} - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. + - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) + For attribute names with a namespace prefix, you must use the second form. Attribute + names are matched insensitive to upper/lower case. - If just testing for C{class} (with or without a namespace), use C{L{withClass}}. + If just testing for C{class} (with or without a namespace), use C{L{withClass}}. - To verify that the attribute exists, but without specifying a value, pass - C{withAttribute.ANY_VALUE} as the value. - """ + To verify that the attribute exists, but without specifying a value, pass + C{withAttribute.ANY_VALUE} as the value. + + Example:: + html = ''' +
    + Some text +
    1 4 0 1 0
    +
    1,3 2,3 1,1
    +
    this has no type
    +
    + + ''' + div,div_end = makeHTMLTags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().setParseAction(withAttribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ if args: attrs = args[:] else: @@ -3558,9 +4995,37 @@ def withAttribute(*args,**attrDict): withAttribute.ANY_VALUE = object() def withClass(classname, namespace=''): - """Simplified version of C{L{withAttribute}} when matching on a div class - made - difficult because C{class} is a reserved word in Python. - """ + """ + Simplified version of C{L{withAttribute}} when matching on a div class - made + difficult because C{class} is a reserved word in Python. + + Example:: + html = ''' +
    + Some text +
    1 4 0 1 0
    +
    1,3 2,3 1,1
    +
    this <div> has no class
    +
    + + ''' + div,div_end = makeHTMLTags("div") + div_grid = div().setParseAction(withClass("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ classattr = "%s:class" % namespace if namespace else "class" return withAttribute(**{classattr : classname}) @@ -3569,40 +5034,76 @@ opAssoc.LEFT = object() opAssoc.RIGHT = object() def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. + """ + Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary or + binary, left- or right-associative. Parse actions can also be attached + to operator expressions. The generated parser will also recognize the use + of parentheses to override operator precedences (see example below). + + Note: if you define a deep operator list, you may see performance issues + when using infixNotation. See L{ParserElement.enablePackrat} for a + mechanism to potentially improve your parser performance. - Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted) - - lpar - expression for matching left-parentheses (default=Suppress('(')) - - rpar - expression for matching right-parentheses (default=Suppress(')')) + Parameters: + - baseExpr - expression representing the most basic element for the nested + - opList - list of tuples, one for each operator precedence level in the + expression grammar; each tuple is of the form + (opExpr, numTerms, rightLeftAssoc, parseAction), where: + - opExpr is the pyparsing expression for the operator; + may also be a string, which will be converted to a Literal; + if numTerms is 3, opExpr is a tuple of two expressions, for the + two operators separating the 3 terms + - numTerms is the number of terms for this operator (must + be 1, 2, or 3) + - rightLeftAssoc is the indicator whether the operator is + right or left associative, using the pyparsing-defined + constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. + - parseAction is the parse action to be associated with + expressions matching this operator expression (the + parse action tuple member may be omitted); if the parse action + is passed a tuple or list of functions, this is equivalent to + calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) + - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) + - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) + + Example:: + # simple example of four-function arithmetic with ints and variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infixNotation(integer | varname, + [ + ('-', 1, opAssoc.RIGHT), + (oneOf('* /'), 2, opAssoc.LEFT), + (oneOf('+ -'), 2, opAssoc.LEFT), + ]) + + arith_expr.runTests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', fullDump=False) + prints:: + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] """ ret = Forward() lastExpr = baseExpr | ( lpar + ret + rpar ) for i,operDef in enumerate(opList): opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr if arity == 3: if opExpr is None or len(opExpr) != 2: raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") opExpr1, opExpr2 = opExpr - thisExpr = Forward()#.setName("expr%d" % i) + thisExpr = Forward().setName(termName) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) @@ -3635,38 +5136,81 @@ def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): else: raise ValueError("operator must indicate right or left associativity") if pa: - matchExpr.setParseAction( pa ) - thisExpr <<= ( matchExpr | lastExpr ) + if isinstance(pa, (tuple, list)): + matchExpr.setParseAction(*pa) + else: + matchExpr.setParseAction(pa) + thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) lastExpr = thisExpr ret <<= lastExpr return ret -operatorPrecedence = infixNotation -dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes") -sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes") -quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()) +operatorPrecedence = infixNotation +"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" + +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). + """ + Helper method for defining nested lists enclosed in opening and closing + delimiters ("(" and ")" are the default). - Parameters: - - opener - opening character for a nested list (default="("); can also be a pyparsing expression - - closer - closing character for a nested list (default=")"); can also be a pyparsing expression - - content - expression for items within the nested lists (default=None) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) + Parameters: + - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression + - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression + - content - expression for items within the nested lists (default=C{None}) + - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. + If an expression is not provided for the content argument, the nested + expression will capture all whitespace-delimited content between delimiters + as a list of separate values. - Use the C{ignoreExpr} argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. - The default is L{quotedString}, but if no expressions are to be ignored, - then pass C{None} for this argument. + Use the C{ignoreExpr} argument to define expressions that may contain + opening or closing characters that should not be treated as opening + or closing characters for nesting, such as quotedString or a comment + expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. + The default is L{quotedString}, but if no expressions are to be ignored, + then pass C{None} for this argument. + + Example:: + data_type = oneOf("void int short long char float double") + decl_data_type = Combine(data_type + Optional(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR,RPAR = map(Suppress, "()") + + code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(cStyleComment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.searchString(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + prints:: + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] """ if opener == closer: raise ValueError("opening and closing strings cannot be the same") @@ -3697,23 +5241,86 @@ def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.cop ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) else: ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) + ret.setName('nested %s%s expression' % (opener,closer)) return ret def indentedBlock(blockStatementExpr, indentStack, indent=True): - """Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. + """ + Helper method for defining space-delimited indentation blocks, such as + those used to define block statements in Python source code. - Parameters: - - blockStatementExpr - expression defining syntax of statement that + Parameters: + - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - - indentStack - list created by caller to manage indentation stack + - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the + - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements - (default=True) + (default=C{True}) - A valid block must contain at least one C{blockStatement}. + A valid block must contain at least one C{blockStatement}. + + Example:: + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group( funcDecl + func_body ) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << ( funcDef | assignment | identifier ) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + prints:: + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] """ def checkPeerIndent(s,l,t): if l >= len(s): return @@ -3738,9 +5345,9 @@ def indentedBlock(blockStatementExpr, indentStack, indent=True): indentStack.pop() NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = Empty() + Empty().setParseAction(checkSubIndent) - PEER = Empty().setParseAction(checkPeerIndent) - UNDENT = Empty().setParseAction(checkUnindent) + INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') + PEER = Empty().setParseAction(checkPeerIndent).setName('') + UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') if indent: smExpr = Group( Optional(NL) + #~ FollowedBy(blockStatementExpr) + @@ -3749,57 +5356,387 @@ def indentedBlock(blockStatementExpr, indentStack, indent=True): smExpr = Group( Optional(NL) + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr + return smExpr.setName('indented block') alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:")) -commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline() -_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "')) -replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None +anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") +def replaceHTMLEntity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) # it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment") +cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") +"Comment of the form C{/* ... */}" -htmlComment = Regex(r"") -restOfLine = Regex(r".*").leaveWhitespace() -dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment") -cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?").setName("HTML comment") +"Comment of the form C{}" + +restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") +dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") +"Comment of the form C{// ... (to end of line)}" + +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" javaStyleComment = cppStyleComment +"Same as C{L{cppStyleComment}}" + pythonStyleComment = Regex(r"#.*").setName("Python style comment") +"Comment of the form C{# ... (to end of line)}" + _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional( Word(" \t") + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. + This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """ + Here are some common low-level expressions that may be useful in jump-starting parser development: + - numeric forms (L{integers}, L{reals}, L{scientific notation}) + - common L{programming identifiers} + - network addresses (L{MAC}, L{IPv4}, L{IPv6}) + - ISO8601 L{dates} and L{datetime} + - L{UUID} + - L{comma-separated list} + Parse actions: + - C{L{convertToInteger}} + - C{L{convertToFloat}} + - C{L{convertToDate}} + - C{L{convertToDatetime}} + - C{L{stripHTMLTags}} + - C{L{upcaseTokens}} + - C{L{downcaseTokens}} + + Example:: + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + prints:: + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convertToInteger = tokenMap(int) + """ + Parse action for converting parsed integers to Python int + """ + + convertToFloat = tokenMap(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).setName("integer").setParseAction(convertToInteger) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.addParseAction(lambda t: t[0]/t[-1]) + + mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.addParseAction(sum) + + real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + """expression that parses a floating point number and returns a float""" + + sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + """expression that parses a floating point number with optional scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) + """any int or real number, returned as float""" + + identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") + "IPv4 address (C{0.0.0.0 - 255.255.255.255})" + + _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") + ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convertToDate(fmt="%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) + + Example:: + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + prints:: + [datetime.date(1999, 12, 31)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt).date() + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + @staticmethod + def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): + """ + Helper to create a parse action for converting parsed datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) + + Example:: + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + prints:: + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") + "ISO8601 date (C{yyyy-mm-dd})" + + iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") + "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" + + uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") + "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" + + _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() + @staticmethod + def stripHTMLTags(s, l, tokens): + """ + Parse action to remove HTML tags from web page HTML source + + Example:: + # strip HTML links from normal text + text = 'More info at the
    pyparsing wiki page' + td,td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + + print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' + """ + return pyparsing_common._html_stripper.transformString(tokens[0]) + + _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + + Optional( White(" \t") ) ) ).streamline().setName("commaItem") + comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) + """Parse action to convert tokens to upper case.""" + + downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) + """Parse action to convert tokens to lower case.""" if __name__ == "__main__": - selectToken = CaselessLiteral( "select" ) - fromToken = CaselessLiteral( "from" ) + selectToken = CaselessLiteral("select") + fromToken = CaselessLiteral("from") - ident = Word( alphas, alphanums + "_$" ) - columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - columnNameList = Group( delimitedList( columnName ) ).setName("columns") - tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens ) - tableNameList = Group( delimitedList( tableName ) ).setName("tables") - simpleSQL = ( selectToken + \ - ( '*' | columnNameList ).setResultsName( "columns" ) + \ - fromToken + \ - tableNameList.setResultsName( "tables" ) ) + ident = Word(alphas, alphanums + "_$") - simpleSQL.runTests("""\ - SELECT * from XYZZY, ABC - select * from SYS.XYZZY - Select A from Sys.dual - Select AA,BB,CC from Sys.dual - Select A, B, C from Sys.dual - Select A, B, C from Sys.dual - Xelect A, B, C from Sys.dual - Select A, B, C frox Sys.dual - Select - Select ^^^ frox Sys.dual - Select A, B, C from Sys.dual, Table2""") + columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + columnNameList = Group(delimitedList(columnName)).setName("columns") + columnSpec = ('*' | columnNameList) + + tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + tableNameList = Group(delimitedList(tableName)).setName("tables") + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") + + # demo runTests method, including embedded comments in test string + simpleSQL.runTests(""" + # '*' as column list and dotted table name + select * from SYS.XYZZY + + # caseless match on "SELECT", and casts back to "select" + SELECT * from XYZZY, ABC + + # list of column names, and mixed case SELECT keyword + Select AA,BB,CC from Sys.dual + + # multiple tables + Select A, B, C from Sys.dual, Table2 + + # invalid SELECT keyword - should fail + Xelect A, B, C from Sys.dual + + # incomplete command - should fail + Select + + # invalid column name - should fail + Select ^^^ frox Sys.dual + + """) + + pyparsing_common.number.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + # any int or real number, returned as float + pyparsing_common.fnumber.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + pyparsing_common.hex_integer.runTests(""" + 100 + FF + """) + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(""" + 12345678-1234-5678-1234-567812345678 + """) diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/extern/__init__.py b/Shared/lib/python3.4/site-packages/pkg_resources/extern/__init__.py index 317f4b8..c1eb9e9 100644 --- a/Shared/lib/python3.4/site-packages/pkg_resources/extern/__init__.py +++ b/Shared/lib/python3.4/site-packages/pkg_resources/extern/__init__.py @@ -6,6 +6,7 @@ class VendorImporter: A PEP 302 meta path importer for finding optionally-vendored or otherwise naturally-installed packages from root_name. """ + def __init__(self, root_name, vendored_names=(), vendor_pkg=None): self.root_name = root_name self.vendored_names = set(vendored_names) @@ -47,7 +48,7 @@ class VendorImporter: # on later Python versions to cause relative imports # in the vendor package to resolve the same modules # as those going through this importer. - if sys.version_info > (3, 3): + if prefix and sys.version_info > (3, 3): del sys.modules[extant] return mod except ImportError: @@ -67,5 +68,6 @@ class VendorImporter: if self not in sys.meta_path: sys.meta_path.append(self) -names = 'packaging', 'six' + +names = 'packaging', 'pyparsing', 'six', 'appdirs' VendorImporter(__name__, names).install() diff --git a/Shared/lib/python3.4/site-packages/pkg_resources/py31compat.py b/Shared/lib/python3.4/site-packages/pkg_resources/py31compat.py new file mode 100644 index 0000000..a381c42 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/pkg_resources/py31compat.py @@ -0,0 +1,23 @@ +import os +import errno +import sys + +from .extern import six + + +def _makedirs_31(path, exist_ok=False): + try: + os.makedirs(path) + except OSError as exc: + if not exist_ok or exc.errno != errno.EEXIST: + raise + + +# rely on compatibility behavior until mode considerations +# and exists_ok considerations are disentangled. +# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663 +needs_makedirs = ( + six.PY2 or + (3, 4) <= sys.version_info < (3, 4, 1) +) +makedirs = _makedirs_31 if needs_makedirs else os.makedirs diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/DESCRIPTION.rst deleted file mode 100644 index e92fcda..0000000 --- a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,133 +0,0 @@ -Parse, validate and reformat standard numbers and codes. - -This library offers functions for parsing, validating and reformatting -standard numbers and codes in various formats. - -Currently this package supports the following formats: - -* al.nipt: NIPT (Numri i Identifikimit për Personin e Tatueshëm, Albanian VAT number) -* ar.cuit: CUIT (Código Único de Identificación Tributaria, Argentinian tax number) -* at.businessid: Austrian Company Register Numbers -* at.uid: UID (Umsatzsteuer-Identifikationsnummer, Austrian VAT number) -* be.vat: BTW, TVA, NWSt (Belgian VAT number) -* bg.egn: EGN (ЕГН, Единен граждански номер, Bulgarian personal identity codes) -* bg.pnf: PNF (ЛНЧ, Личен номер на чужденец, Bulgarian number of a foreigner) -* bg.vat: VAT (Идентификационен номер по ДДС, Bulgarian VAT number) -* br.cnpj: CNPJ (Cadastro Nacional da Pessoa Jurídica, Brazillian company identifier) -* br.cpf: CPF (Cadastro de Pessoas Físicas, Brazillian national identifier) -* ch.ssn: Swiss social security number ("Sozialversicherungsnummer") -* ch.uid: UID (Unternehmens-Identifikationsnummer, Swiss business identifier) -* ch.vat: VAT, MWST, TVA, IVA, TPV (Mehrwertsteuernummer, the Swiss VAT number) -* cl.rut: RUT (Rol Único Tributario, Chilean national tax number) -* cn.ric: RIC No. (Chinese Resident Identity Card Number) -* co.nit: NIT (Número De Identificación Tributaria, Colombian identity code) -* cusip: CUSIP number (financial security identification number) -* cy.vat: Αριθμός Εγγραφής Φ.Π.Α. (Cypriot VAT number) -* cz.dic: DIČ (Daňové identifikační číslo, Czech VAT number) -* cz.rc: RČ (Rodné číslo, the Czech birth number) -* de.vat: Ust ID Nr. (Umsatzsteur Identifikationnummer, German VAT number) -* de.wkn: Wertpapierkennnummer (German securities identification code) -* dk.cpr: CPR (personnummer, the Danish citizen number) -* dk.cvr: CVR (Momsregistreringsnummer, Danish VAT number) -* do.cedula: Cedula (Dominican Republic national identification number) -* do.rnc: RNC (Registro Nacional del Contribuyente, Dominican Republic tax number) -* ean: EAN (International Article Number) -* ec.ci: CI (Cédula de identidad, Ecuadorian personal identity code) -* ec.ruc: RUC (Registro Único de Contribuyentes, Ecuadorian company tax number) -* ee.ik: Isikukood (Estonian Personcal ID number) -* ee.kmkr: KMKR (Käibemaksukohuslase, Estonian VAT number) -* es.cif: CIF (Certificado de Identificación Fiscal, Spanish company tax number) -* es.dni: DNI (Documento nacional de identidad, Spanish personal identity codes) -* es.nie: NIE (Número de Identificación de Extranjeros, Spanish foreigner number) -* es.nif: NIF (Número de Identificación Fiscal, Spanish VAT number) -* eu.at_02: SEPA Identifier of the Creditor (AT-02) -* eu.vat: VAT (European Union VAT number) -* fi.alv: ALV nro (Arvonlisäveronumero, Finnish VAT number) -* fi.associationid: Finnish Association Identifier -* fi.hetu: HETU (Henkilötunnus, Finnish personal identity code) -* fi.ytunnus: Y-tunnus (Finnish business identifier) -* fr.siren: SIREN (a French company identification number) -* fr.tva: n° TVA (taxe sur la valeur ajoutée, French VAT number) -* gb.sedol: SEDOL number (Stock Exchange Daily Official List number) -* gb.vat: VAT (United Kingdom (and Isle of Man) VAT registration number) -* gr.vat: FPA, ΦΠΑ, ΑΦΜ (Αριθμός Φορολογικού Μητρώου, the Greek VAT number) -* grid: GRid (Global Release Identifier) -* hr.oib: OIB (Osobni identifikacijski broj, Croatian identification number) -* hu.anum: ANUM (Közösségi adószám, Hungarian VAT number) -* iban: IBAN (International Bank Account Number) -* ie.pps: PPS No (Personal Public Service Number, Irish personal number) -* ie.vat: VAT (Irish VAT number) -* imei: IMEI (International Mobile Equipment Identity) -* imo: IMO number (International Maritime Organization number) -* imsi: IMSI (International Mobile Subscriber Identity) -* is_.kennitala: Kennitala (Icelandic personal and organisation identity code) -* is_.vsk: VSK number (Virðisaukaskattsnúmer, Icelandic VAT number) -* isan: ISAN (International Standard Audiovisual Number) -* isbn: ISBN (International Standard Book Number) -* isil: ISIL (International Standard Identifier for Libraries) -* isin: ISIN (International Securities Identification Number) -* ismn: ISMN (International Standard Music Number) -* iso6346: ISO 6346 (International standard for container identification) -* iso9362: ISO 9362 (Business identifier codes) -* issn: ISSN (International Standard Serial Number) -* it.codicefiscale: Codice Fiscale (Italian tax code for individuals) -* it.iva: Partita IVA (Italian VAT number) -* lt.pvm: PVM (Pridėtinės vertės mokestis mokėtojo kodas, Lithuanian VAT number) -* lu.tva: TVA (taxe sur la valeur ajoutée, Luxembourgian VAT number) -* lv.pvn: PVN (Pievienotās vērtības nodokļa, Latvian VAT number) -* meid: MEID (Mobile Equipment Identifier) -* mt.vat: VAT (Maltese VAT number) -* mx.rfc: RFC (Registro Federal de Contribuyentes, Mexican tax number) -* my.nric: NRIC No. (Malaysian National Registration Identity Card Number) -* nl.brin: Brin number (Dutch number for schools) -* nl.bsn: BSN (Burgerservicenummer, Dutch national identification number) -* nl.btw: BTW-nummer (Omzetbelastingnummer, the Dutch VAT number) -* nl.onderwijsnummer: Onderwijsnummer (Dutch student school number) -* nl.postcode: Postcode (Dutch postal code) -* no.mva: MVA (Merverdiavgift, Norwegian VAT number) -* no.orgnr: Orgnr (Organisasjonsnummer, Norwegian organisation number) -* pl.nip: NIP (Numer Identyfikacji Podatkowej, Polish VAT number) -* pl.pesel: PESEL (Polish national identification number) -* pl.regon: REGON (Rejestr Gospodarki Narodowej, Polish register of economic units) -* pt.nif: NIF (Número de identificação fiscal, Portuguese VAT number) -* ro.cf: CF (Cod de înregistrare în scopuri de TVA, Romanian VAT number) -* ro.cnp: CNP (Cod Numeric Personal, Romanian Numerical Personal Code) -* ru.inn: ИНН (Идентификационный номер налогоплательщика, Russian tax identifier) -* se.orgnr: Orgnr (Organisationsnummer, Swedish company number) -* se.vat: VAT (Moms, Mervärdesskatt, Swedish VAT number) -* si.ddv: ID za DDV (Davčna številka, Slovenian VAT number) -* sk.dph: IČ DPH (IČ pre daň z pridanej hodnoty, Slovak VAT number) -* sk.rc: RČ (Rodné číslo, the Slovak birth number) -* sm.coe: COE (Codice operatore economico, San Marino national tax number) -* us.atin: ATIN (U.S. Adoption Taxpayer Identification Number) -* us.ein: EIN (U.S. Employer Identification Number) -* us.itin: ITIN (U.S. Individual Taxpayer Identification Number) -* us.ptin: PTIN (U.S. Preparer Tax Identification Number) -* us.rtn: RTN (Routing transport number) -* us.ssn: SSN (U.S. Social Security Number) -* us.tin: TIN (U.S. Taxpayer Identification Number) - -Furthermore a number of generic check digit algorithms are available: - -* iso7064.mod_11_10: The ISO 7064 Mod 11, 10 algorithm -* iso7064.mod_11_2: The ISO 7064 Mod 11, 2 algorithm -* iso7064.mod_37_2: The ISO 7064 Mod 37, 2 algorithm -* iso7064.mod_37_36: The ISO 7064 Mod 37, 36 algorithm -* iso7064.mod_97_10: The ISO 7064 Mod 97, 10 algorithm -* luhn: The Luhn and Luhn mod N algorithms -* verhoeff: The Verhoeff algorithm - -All modules implement a common interface: - ->>> from stdnum import isbn ->>> isbn.validate('978-9024538270') -'9789024538270' ->>> isbn.validate('978-9024538271') -Traceback (most recent call last): - ... -InvalidChecksum: ... - -Apart from the validate() function, modules generally provide extra -parsing, validation, formatting or conversion functions. - - diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/METADATA b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/METADATA deleted file mode 100644 index bc21e13..0000000 --- a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/METADATA +++ /dev/null @@ -1,164 +0,0 @@ -Metadata-Version: 2.0 -Name: python-stdnum -Version: 1.2 -Summary: Python module to handle standardized numbers and codes -Home-page: http://arthurdejong.org/python-stdnum/ -Author: Arthur de Jong -Author-email: arthur@arthurdejong.org -License: LGPL -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: Intended Audience :: Financial and Insurance Industry -Classifier: Intended Audience :: Information Technology -Classifier: Intended Audience :: Telecommunications Industry -Classifier: License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+) -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Topic :: Office/Business :: Financial -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: General -Provides-Extra: VIES -Requires-Dist: suds; extra == 'VIES' -Provides-Extra: VIES-ALT -Requires-Dist: PySimpleSOAP; extra == 'VIES-ALT' - -Parse, validate and reformat standard numbers and codes. - -This library offers functions for parsing, validating and reformatting -standard numbers and codes in various formats. - -Currently this package supports the following formats: - -* al.nipt: NIPT (Numri i Identifikimit për Personin e Tatueshëm, Albanian VAT number) -* ar.cuit: CUIT (Código Único de Identificación Tributaria, Argentinian tax number) -* at.businessid: Austrian Company Register Numbers -* at.uid: UID (Umsatzsteuer-Identifikationsnummer, Austrian VAT number) -* be.vat: BTW, TVA, NWSt (Belgian VAT number) -* bg.egn: EGN (ЕГН, Единен граждански номер, Bulgarian personal identity codes) -* bg.pnf: PNF (ЛНЧ, Личен номер на чужденец, Bulgarian number of a foreigner) -* bg.vat: VAT (Идентификационен номер по ДДС, Bulgarian VAT number) -* br.cnpj: CNPJ (Cadastro Nacional da Pessoa Jurídica, Brazillian company identifier) -* br.cpf: CPF (Cadastro de Pessoas Físicas, Brazillian national identifier) -* ch.ssn: Swiss social security number ("Sozialversicherungsnummer") -* ch.uid: UID (Unternehmens-Identifikationsnummer, Swiss business identifier) -* ch.vat: VAT, MWST, TVA, IVA, TPV (Mehrwertsteuernummer, the Swiss VAT number) -* cl.rut: RUT (Rol Único Tributario, Chilean national tax number) -* cn.ric: RIC No. (Chinese Resident Identity Card Number) -* co.nit: NIT (Número De Identificación Tributaria, Colombian identity code) -* cusip: CUSIP number (financial security identification number) -* cy.vat: Αριθμός Εγγραφής Φ.Π.Α. (Cypriot VAT number) -* cz.dic: DIČ (Daňové identifikační číslo, Czech VAT number) -* cz.rc: RČ (Rodné číslo, the Czech birth number) -* de.vat: Ust ID Nr. (Umsatzsteur Identifikationnummer, German VAT number) -* de.wkn: Wertpapierkennnummer (German securities identification code) -* dk.cpr: CPR (personnummer, the Danish citizen number) -* dk.cvr: CVR (Momsregistreringsnummer, Danish VAT number) -* do.cedula: Cedula (Dominican Republic national identification number) -* do.rnc: RNC (Registro Nacional del Contribuyente, Dominican Republic tax number) -* ean: EAN (International Article Number) -* ec.ci: CI (Cédula de identidad, Ecuadorian personal identity code) -* ec.ruc: RUC (Registro Único de Contribuyentes, Ecuadorian company tax number) -* ee.ik: Isikukood (Estonian Personcal ID number) -* ee.kmkr: KMKR (Käibemaksukohuslase, Estonian VAT number) -* es.cif: CIF (Certificado de Identificación Fiscal, Spanish company tax number) -* es.dni: DNI (Documento nacional de identidad, Spanish personal identity codes) -* es.nie: NIE (Número de Identificación de Extranjeros, Spanish foreigner number) -* es.nif: NIF (Número de Identificación Fiscal, Spanish VAT number) -* eu.at_02: SEPA Identifier of the Creditor (AT-02) -* eu.vat: VAT (European Union VAT number) -* fi.alv: ALV nro (Arvonlisäveronumero, Finnish VAT number) -* fi.associationid: Finnish Association Identifier -* fi.hetu: HETU (Henkilötunnus, Finnish personal identity code) -* fi.ytunnus: Y-tunnus (Finnish business identifier) -* fr.siren: SIREN (a French company identification number) -* fr.tva: n° TVA (taxe sur la valeur ajoutée, French VAT number) -* gb.sedol: SEDOL number (Stock Exchange Daily Official List number) -* gb.vat: VAT (United Kingdom (and Isle of Man) VAT registration number) -* gr.vat: FPA, ΦΠΑ, ΑΦΜ (Αριθμός Φορολογικού Μητρώου, the Greek VAT number) -* grid: GRid (Global Release Identifier) -* hr.oib: OIB (Osobni identifikacijski broj, Croatian identification number) -* hu.anum: ANUM (Közösségi adószám, Hungarian VAT number) -* iban: IBAN (International Bank Account Number) -* ie.pps: PPS No (Personal Public Service Number, Irish personal number) -* ie.vat: VAT (Irish VAT number) -* imei: IMEI (International Mobile Equipment Identity) -* imo: IMO number (International Maritime Organization number) -* imsi: IMSI (International Mobile Subscriber Identity) -* is_.kennitala: Kennitala (Icelandic personal and organisation identity code) -* is_.vsk: VSK number (Virðisaukaskattsnúmer, Icelandic VAT number) -* isan: ISAN (International Standard Audiovisual Number) -* isbn: ISBN (International Standard Book Number) -* isil: ISIL (International Standard Identifier for Libraries) -* isin: ISIN (International Securities Identification Number) -* ismn: ISMN (International Standard Music Number) -* iso6346: ISO 6346 (International standard for container identification) -* iso9362: ISO 9362 (Business identifier codes) -* issn: ISSN (International Standard Serial Number) -* it.codicefiscale: Codice Fiscale (Italian tax code for individuals) -* it.iva: Partita IVA (Italian VAT number) -* lt.pvm: PVM (Pridėtinės vertės mokestis mokėtojo kodas, Lithuanian VAT number) -* lu.tva: TVA (taxe sur la valeur ajoutée, Luxembourgian VAT number) -* lv.pvn: PVN (Pievienotās vērtības nodokļa, Latvian VAT number) -* meid: MEID (Mobile Equipment Identifier) -* mt.vat: VAT (Maltese VAT number) -* mx.rfc: RFC (Registro Federal de Contribuyentes, Mexican tax number) -* my.nric: NRIC No. (Malaysian National Registration Identity Card Number) -* nl.brin: Brin number (Dutch number for schools) -* nl.bsn: BSN (Burgerservicenummer, Dutch national identification number) -* nl.btw: BTW-nummer (Omzetbelastingnummer, the Dutch VAT number) -* nl.onderwijsnummer: Onderwijsnummer (Dutch student school number) -* nl.postcode: Postcode (Dutch postal code) -* no.mva: MVA (Merverdiavgift, Norwegian VAT number) -* no.orgnr: Orgnr (Organisasjonsnummer, Norwegian organisation number) -* pl.nip: NIP (Numer Identyfikacji Podatkowej, Polish VAT number) -* pl.pesel: PESEL (Polish national identification number) -* pl.regon: REGON (Rejestr Gospodarki Narodowej, Polish register of economic units) -* pt.nif: NIF (Número de identificação fiscal, Portuguese VAT number) -* ro.cf: CF (Cod de înregistrare în scopuri de TVA, Romanian VAT number) -* ro.cnp: CNP (Cod Numeric Personal, Romanian Numerical Personal Code) -* ru.inn: ИНН (Идентификационный номер налогоплательщика, Russian tax identifier) -* se.orgnr: Orgnr (Organisationsnummer, Swedish company number) -* se.vat: VAT (Moms, Mervärdesskatt, Swedish VAT number) -* si.ddv: ID za DDV (Davčna številka, Slovenian VAT number) -* sk.dph: IČ DPH (IČ pre daň z pridanej hodnoty, Slovak VAT number) -* sk.rc: RČ (Rodné číslo, the Slovak birth number) -* sm.coe: COE (Codice operatore economico, San Marino national tax number) -* us.atin: ATIN (U.S. Adoption Taxpayer Identification Number) -* us.ein: EIN (U.S. Employer Identification Number) -* us.itin: ITIN (U.S. Individual Taxpayer Identification Number) -* us.ptin: PTIN (U.S. Preparer Tax Identification Number) -* us.rtn: RTN (Routing transport number) -* us.ssn: SSN (U.S. Social Security Number) -* us.tin: TIN (U.S. Taxpayer Identification Number) - -Furthermore a number of generic check digit algorithms are available: - -* iso7064.mod_11_10: The ISO 7064 Mod 11, 10 algorithm -* iso7064.mod_11_2: The ISO 7064 Mod 11, 2 algorithm -* iso7064.mod_37_2: The ISO 7064 Mod 37, 2 algorithm -* iso7064.mod_37_36: The ISO 7064 Mod 37, 36 algorithm -* iso7064.mod_97_10: The ISO 7064 Mod 97, 10 algorithm -* luhn: The Luhn and Luhn mod N algorithms -* verhoeff: The Verhoeff algorithm - -All modules implement a common interface: - ->>> from stdnum import isbn ->>> isbn.validate('978-9024538270') -'9789024538270' ->>> isbn.validate('978-9024538271') -Traceback (most recent call last): - ... -InvalidChecksum: ... - -Apart from the validate() function, modules generally provide extra -parsing, validation, formatting or conversion functions. - - diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/RECORD b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/RECORD deleted file mode 100644 index eaea5ab..0000000 --- a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/RECORD +++ /dev/null @@ -1,331 +0,0 @@ -python_stdnum-1.2.dist-info/DESCRIPTION.rst,sha256=1_jOMXuuFhuMbyBA6lJqaAd4wCwojFzhOvRfQpjQJj8,7653 -python_stdnum-1.2.dist-info/METADATA,sha256=6z1fYXRagUi1XuW4bodh9gBPb5y4NejPHiFx2wxURmE,8983 -python_stdnum-1.2.dist-info/RECORD,, -python_stdnum-1.2.dist-info/WHEEL,sha256=lCqt3ViRAf9c8mCs6o7ffkwROUdYSy8_YHn5f_rulB4,93 -python_stdnum-1.2.dist-info/metadata.json,sha256=QHi_oG7qwxWuUlIeg87_ThlOp6d-wWmzX_JipMItmMs,1392 -python_stdnum-1.2.dist-info/pbr.json,sha256=6PKRbBc1UyjNHWRoESNJAVJCyM-9w2s2xAcK6B9blb0,46 -python_stdnum-1.2.dist-info/top_level.txt,sha256=2TbxjF-YBqLmFtkp0c2tNeXjolXDpXEHdX05q9nqOKs,7 -stdnum/__init__.py,sha256=yxF0i6liCAXsBDkI25jcu98Op0C0chlIBPK3sbchduE,8534 -stdnum/cusip.py,sha256=gZIA8vpv7jQNx1hZof5igD4d1nzXciYh48ghbElJZXM,2867 -stdnum/ean.py,sha256=H9tdKzU84UYzpguIPuCyLWDq2J3Otqe3MRjJM8jW-00,2451 -stdnum/exceptions.py,sha256=2qGPJjy5_9O00ziFitIPSP23bATnLZuY53FPXRRV10o,2181 -stdnum/grid.py,sha256=MYYgM0WAixXOibvbYDHDAAzWItSa_DBWzMYCnQ-8l3g,2480 -stdnum/iban.dat,sha256=wcoq7o4wSWvsIxpayC1Tyg-v0U3fMvr3jcOWFFACYxk,3035 -stdnum/iban.py,sha256=9T1ckhwbwOCfgNrI368e-PwU-j0akx-dW7cOMRY5KRk,3897 -stdnum/imei.py,sha256=QT7N5tIAFqyq2a0vd30e49DDrNm9Min5C5fH_HmJV8c,3473 -stdnum/imo.py,sha256=jnRYo-LJYCE_wWteTlVYYcPmGgdQnszbQkgojXs17g4,2724 -stdnum/imsi.dat,sha256=KGBOD5ZiN-uF1XVKTuYBrKq3Ub-CmHe7vS-ur9xgTdY,266577 -stdnum/imsi.py,sha256=r_xCKuCkM6oLSguezTistGLuc9e092o89nxDASFGzIM,3005 -stdnum/isan.py,sha256=YmN758udNrDoz9Geqkc7qYEgG2caQe9qUgmxaw9CHQ8,6101 -stdnum/isbn.dat,sha256=hMp51zfVPj9CxbcAn6nRi80TuEW3RJ1ln-RapwxyiBw,14899 -stdnum/isbn.py,sha256=oqPbMxHV_JQbdhGhKVNAVjVWqARWFK85amneNSUcMuw,6435 -stdnum/isil.dat,sha256=pXBl8VZfAJPpXnpxHsTe6U7G00GO6Sgz2KP46-j_lXI,3868 -stdnum/isil.py,sha256=LPswbvQx_hnuKoRJsWvzfESvV2eqFu86Z2rAQVantDQ,2967 -stdnum/isin.py,sha256=gegs5RYR94V-bHWLOa4MdZFJckh4aM2Z0yW20zAfRsk,5220 -stdnum/ismn.py,sha256=WYxIO14iSTjFcs-mKv6DDDhktEvX5nB1wqFRk-IZNpc,4334 -stdnum/iso6346.py,sha256=WVup4H-zjCe2sgBfx1yaAj3IrdhYJzKM7egK2PyJncs,2906 -stdnum/iso9362.py,sha256=6kbCEgLJ7Cpei6bAdP5CJOjDb8GrxmcfDmhtT7PMFpw,2569 -stdnum/issn.py,sha256=zrRePanLgn1qGdcoj8pBF5kOtEt7OK0n0qtmLz1hkhU,2958 -stdnum/luhn.py,sha256=oaPRDic_cFZU9F8F5Nw6_2jeHevSTg4SZNoxAs_Asz8,2738 -stdnum/meid.py,sha256=BxGY7q0tqw2KL8J1EAPOcaFoyFK3NgV_MG2NpV_pOp0,6698 -stdnum/numdb.py,sha256=CXOOsPsF6R4btwc5iK_Ed_RC03IkIfb7o-b504EHVVY,6568 -stdnum/util.py,sha256=rT451Sa8fAme_B3R9K0cfhBzXFhFyV3sS4HQTPQ63bw,6790 -stdnum/verhoeff.py,sha256=gHhzyB7AfaXWtU4JFri9TYzMhZltnjFBVbT8_Eri-zk,3161 -stdnum/al/__init__.py,sha256=TASq1rmMOQTaRrCA1Gvw9VUuc7JlT2Dx3koC5rF1_Cc,931 -stdnum/al/nipt.py,sha256=ws4u3aoRX_wsabpmr2ubT7YlM_YbOMTjRFNMG-w9xSg,2417 -stdnum/ar/__init__.py,sha256=xkKMGPjkxYN5hfMVFn0iASY6Mmg2yVIq1A40Bx3ZFVE,937 -stdnum/ar/cuit.py,sha256=VTk00lNLMElbBtfXQdKWeZWFM146pg0s00-PrOvFnzk,2510 -stdnum/at/__init__.py,sha256=r_GHNGID_RfYlsoJQgPBg90QlS98qDUx2pIolrL54c0,930 -stdnum/at/businessid.py,sha256=467zAw8ptrUyKqOMV2y4ypvgE6Ebi-KmP1Ir5hLX7ks,2268 -stdnum/at/uid.py,sha256=nkEJmgivd6XyIlw1kfK1-qMyVbLNnHvLFUvcgQVl2WA,2455 -stdnum/be/__init__.py,sha256=AFnBXZ3krHNDYawByU4WBfle4KeP9BhJnTCLUdftBwg,868 -stdnum/be/vat.py,sha256=ihZDZVWy-GCngY8GfTomoe8Yt9oPBUB-wsbOACHc07A,2332 -stdnum/bg/__init__.py,sha256=bxXttweQIC5Ihb3-ff8-CqDn4R5CqGT3Fv2sE2FoP6A,872 -stdnum/bg/egn.py,sha256=DbPo9yTqFyp85NkGvfXwVbtQIx6GDHhi9zqrY2y_LF0,3405 -stdnum/bg/pnf.py,sha256=SOSlYJJ6YV821LIwVnT1h2r-Qqg5aYHK0oB8hm-UTwk,2594 -stdnum/bg/vat.py,sha256=iDjrww2kO5zbqH49dGED-DOgDAtmrx-IAUIZ4voekRk,3372 -stdnum/br/__init__.py,sha256=OQFc2gni_td_MKgfozV0MP8WkA01SWIgz1b5oPXYW8c,874 -stdnum/br/cnpj.py,sha256=miOstiZ-DiSTu1np7bGzc5_oG_52W0SHrnpbswnTHjM,2910 -stdnum/br/cpf.py,sha256=Lu1iF9-Z5ndH_RDDKzPjuOfUDNPSpP2AUePgrYqifbY,2650 -stdnum/ch/__init__.py,sha256=ajzUrF-bUK6t_nzy-pa3bVpiRwsXfzF3Zy5gjBqOwV4,866 -stdnum/ch/ssn.py,sha256=RN6iemQZsY8NEMey7X5kGnUMRjm-V9qe_jC9v6z3rdI,2269 -stdnum/ch/uid.py,sha256=OiY9aDLUG3ZXJVQL8YMf68nWYY_saUashtVzFb5HfmU,3006 -stdnum/ch/vat.py,sha256=2k3sgPPedUY_YS3vNfVd7WZjgZgGEbz1uX1k84RANT4,2682 -stdnum/cl/__init__.py,sha256=k4gQTKZ2H1mELvi4eHc3rX8xsZW03sikEK7SWlnv8KY,969 -stdnum/cl/rut.py,sha256=pfLV8yRPYjLTG6JMvqylQVl2Vin7YWy_DULjlX96Aso,2931 -stdnum/cn/__init__.py,sha256=w_trIsTMjk98Ylbb69p9Z6GxePO89adNQNOLAajAprw,877 -stdnum/cn/loc.dat,sha256=aNkZlxJwb6ZQnnpEwklfaQuYdqg1Lidx_e5fc0EveAQ,251803 -stdnum/cn/ric.py,sha256=DmToYCk2CRPmU2JPuG2ARUlqv4eDBHBrjUluTsRGU9E,3614 -stdnum/co/__init__.py,sha256=fl_6cHpeWB_myZG8VFP16a0_mkOR981-wPi-chNb0Ko,973 -stdnum/co/nit.py,sha256=Xn-izpqX737fGYAB94Q15D9c9yTOayiyXJnvgYp68Mw,2705 -stdnum/cy/__init__.py,sha256=aaNIXKyro60dmmIN8Ml7LHKMVNWNGz6eZ9TFI3TwwxI,868 -stdnum/cy/vat.py,sha256=n6Ya3yNvq1AmgXsXorZuxoW7MaJ5xhxSByfs4GVXpmc,2746 -stdnum/cz/__init__.py,sha256=Jf1GqLSBb-ETmYdWbiW6f1JjR2_rGnm7768EO0hIHl4,924 -stdnum/cz/dic.py,sha256=VYW_2pFYvVnkG-JtaASQCYx8YJvn3VW0SlEyEHY8wmQ,3546 -stdnum/cz/rc.py,sha256=ZypdgkDkTUmk7qkUTXsbQnZnMNX5wDPjXAqkiw6uiQY,3847 -stdnum/de/__init__.py,sha256=d1I_7iH0wjW55WHn7Jef4R1yFNmp5CO0V8PpsZTFNTA,866 -stdnum/de/vat.py,sha256=qbnl_3wYwI-tqWvo3EQN-5mZss1ZDCIlbWrZD34leHo,2168 -stdnum/de/wkn.py,sha256=rgYeYJBtHpRj3cMMc7BqgRfHiQ11NIQZT73KVY84XqE,2375 -stdnum/dk/__init__.py,sha256=hMFmM1es-poRnd5ejcInk5KEpgsHt6b3WB6dcHUfU5A,926 -stdnum/dk/cpr.py,sha256=jG7fcHh19zgLwKXMhdujAYPwR_j5WlCtSf2XXMPm61E,3521 -stdnum/dk/cvr.py,sha256=8doGPaRS8OotDfHtrQJN47O_I8zwJg9MgvAfd0aqWUc,2282 -stdnum/do/__init__.py,sha256=ooRbXhR5CDuoUaQMQJqKm1YA7Mq-u-EyuvH6Djj265E,924 -stdnum/do/cedula.py,sha256=Be1MvZ_QT9tW6Cu8qDTeGQWxRxViag_gBO447IKNgrk,7985 -stdnum/do/rnc.py,sha256=F9FTGo9csyhV_5Wwh92tWfuhIH2FJFobb86b3sDYh2Q,2477 -stdnum/ec/__init__.py,sha256=pmQS8bJHDkk64JitleopUeKSyJtxgfajs2uo1BqMlLA,935 -stdnum/ec/ci.py,sha256=bcJVKLB0eZRN3PnI69iPjCNaDtA7obmVDYlUo4PYHdw,2580 -stdnum/ec/ruc.py,sha256=CkpjTEcPZBQOCKVATMA8Ku0vBBSUGEDKFwXe8zSy_2o,3181 -stdnum/ee/__init__.py,sha256=0ZsDmLgf7PQYcEoALfRvAxdgP5sWYhk62fklJkxRHPE,931 -stdnum/ee/ik.py,sha256=B46kSLTlY_Xbkeguw3KmTwwslZhb9x3dxVFn6cXajt0,3442 -stdnum/ee/kmkr.py,sha256=ubMUz9Lqkw3EX3zuQ6-NQ0jpcrBjZnAHZSY-Z9uWurw,2242 -stdnum/es/__init__.py,sha256=V874ddYCnxTE7IWvffSDU0rsGdnC8tArK0rwVNHSZV8,928 -stdnum/es/cif.py,sha256=GR7IB_BuVnv5Mcsw0bsEgj4dCM2jQ8_P37tf0kLsiAU,3738 -stdnum/es/dni.py,sha256=s1JeSd9s4pzGOZ3wBfQGFf26kx4jPKAT9gYGTyLd8dw,2539 -stdnum/es/nie.py,sha256=UykIg7vk6o9Z1berfex1AM-zGSmfNxxyefBdyiXt6ck,2425 -stdnum/es/nif.py,sha256=tcHNLd7X3twamyBRhUV-AQ7t708B3n7_DsI3LcIaFcs,2767 -stdnum/eu/__init__.py,sha256=Eyl7TknedZQYKk4gpxdMpZU8zG2Oz3O0WKBhB6K7I9s,882 -stdnum/eu/at_02.py,sha256=oa4aGXm9lDeqajlD1OjeS2QhEgiE3637Gln9tse12dg,2733 -stdnum/eu/vat.py,sha256=vrBv7nhnpkKSWWkMth2CXMd6wWLZY6DaYn_ixskNHeQ,5969 -stdnum/fi/__init__.py,sha256=97WbsoWuneYA1HX7AVtb084QWdwapqsPtUbOhQGqg6Q,1013 -stdnum/fi/alv.py,sha256=3JE42x4UbX_ObnISulyZpelAbf6JZmO6olvMGnHSGnw,2257 -stdnum/fi/associationid.py,sha256=gIL3VcSYXkpZKmulIheWxzZTv-Zpz49KMtHV09Q19bo,2983 -stdnum/fi/hetu.py,sha256=r3wUsXsdA1EfSU6S6D9gynIjzhsrncSMGzhjMlHYLbE,3658 -stdnum/fi/ytunnus.py,sha256=2iIEqY6269TOJelP9GAuIgFixEKDvaeXv5Vy5yKs71g,2060 -stdnum/fr/__init__.py,sha256=0LjB9kIrxQvHDQzsrtcgDFW_QeL3TkiWW1xP_x1VZ_w,926 -stdnum/fr/siren.py,sha256=Qb8Ytju6wxGNdJ5AV0x7BY_mEZxeE4ZN1eNl6GzV2L0,2733 -stdnum/fr/tva.py,sha256=YyzJFN7LtyUWNboY0kIRyPZJwAEw10vIxi9OEbMnv5o,3364 -stdnum/gb/__init__.py,sha256=ZJ6CGiJ8FWTvgjeO-ZySQfdQvJ6IDI2x-NIeR8ZbjB0,882 -stdnum/gb/sedol.py,sha256=Z-4KkGLCHGIgR02-P_4V0yUs-BXTxz-b8A8ToLYCjiM,2819 -stdnum/gb/vat.py,sha256=uvtBTZ6WABxpVWCJju5xtg7ES-5ldo72PaUcH7i0CaY,4329 -stdnum/gr/__init__.py,sha256=N-2f6pGt7Q0weSyb6hzGWwgqUUjYZfPxnCO5sDJneu4,864 -stdnum/gr/vat.py,sha256=klDGUe9zvyO1D4tlQmCj_iBMHI0OB-e9a8WBhlkEFzc,2540 -stdnum/hr/__init__.py,sha256=TVWlF4AKNSNlJRYA59HwYZXV_KTe7LmXuWPjwibQ0uE,930 -stdnum/hr/oib.py,sha256=Jp8dRQJc1eaI3kksNNA4bRfIGgxbvXennk9sTgMigG4,2294 -stdnum/hu/__init__.py,sha256=eoIHV4_kuEfmgYyhqvX_KwN6QYjWK2yIaRZRS8TDQE8,933 -stdnum/hu/anum.py,sha256=1XhAnlWhwSefa7tq4BKSQt6eOfVeVd1eRoJhjYDjMiw,2391 -stdnum/ie/__init__.py,sha256=qRZkaTMOl9PxzcpdegS7QA9osO_J1Hs-NTMz1VTcEPs,864 -stdnum/ie/pps.py,sha256=GUGIeMHAb5tzBmaYPoCKWu-dehHPR-mHS_aF73K82Tw,3190 -stdnum/ie/vat.py,sha256=d2pOR0YUgzNpeUpvFs9hWbyIpvuSRRh4ZlYp4HxnpTs,3063 -stdnum/is_/__init__.py,sha256=M3aBBHbuCvU8nluAgDDDEZ-DscUtI5ph9tHfVatpQaI,934 -stdnum/is_/kennitala.py,sha256=vskt-8un8cCVXjx_tUH_ibFrFdog9oNpbIjNSgj1oQo,3673 -stdnum/is_/vsk.py,sha256=7J3U20qzvM18rTsFSRkx92pmZyZWpwxN3bMFZLVsgYg,1980 -stdnum/iso7064/__init__.py,sha256=SEyBlYRMp0ONFnKtwZunDJfCMh7mBWMfulnmQDc45AE,1363 -stdnum/iso7064/mod_11_10.py,sha256=rxrV_iNC23BZb8WpEDVF7d_zQ6j7EuUCz6orAXGfxP4,2129 -stdnum/iso7064/mod_11_2.py,sha256=GcasgSuH326MRDDFGSQSKyIG79jrlwidCqBtXp5rXT8,2157 -stdnum/iso7064/mod_37_2.py,sha256=5PTXqmUD9_GkGZt7F4itXzXb0kKzEo8-yOMT_c9HMzY,2549 -stdnum/iso7064/mod_37_36.py,sha256=zLUt_U_-mbybSfzc3-jUlhDiatJ5mPmpXVdbb4OgPlo,2603 -stdnum/iso7064/mod_97_10.py,sha256=N3l4LXos97YF7Mg-I088FLe80iklgjfgJy5Ypx_WmHI,2120 -stdnum/it/__init__.py,sha256=J9pKHC-zHv3zkbrNBkRFjxvE7jddZWnl71qsN-cmu2U,928 -stdnum/it/codicefiscale.py,sha256=4WapF77itvpPWZP3dO0a5M4rh6kWjtr0xNCievSJLUs,5304 -stdnum/it/iva.py,sha256=drbvZ13EHgmA0eZXPMzddXHT54A0R0DSZ6CBV79yuT8,2527 -stdnum/lt/__init__.py,sha256=GieIRf4oaWX0H_IdsqTj8Vt2F36futkLGKFOQa7FDt8,934 -stdnum/lt/pvm.py,sha256=8orIx3s64UoCyfTZjt6EkClC0DE3R-sSkJF-zyWslT0,3070 -stdnum/lu/__init__.py,sha256=lL6AyAN9e4C4L925ORCBsyMtLSmS0qHmzKooR_WQUJ8,940 -stdnum/lu/tva.py,sha256=MriEGmr6b9W57vCooY67rEPmdtptHMMuxFUgoPA8Qg8,2391 -stdnum/lv/__init__.py,sha256=A_aZTbMPPRruo3agBwm0z0ndVUq_7PKPYiSKdqFa_xo,928 -stdnum/lv/pvn.py,sha256=UOqtEzYtKwUcXeam-OTUY_ejBRPaGHdNvv-mZLzinBs,3945 -stdnum/mt/__init__.py,sha256=rLVwjU64pROIUOfHvNWvV4sLSrfLuYXmrQB-HhZJ-ME,868 -stdnum/mt/vat.py,sha256=_eCLoTN3LKQgeAqSKNx54_xO6sg_WTmQ_Bz5YEy-pwE,2269 -stdnum/mx/__init__.py,sha256=Ly1sWF58JC4_TefHmCP4qSHkSagc1OuVY_-nuJEbNtA,928 -stdnum/mx/rfc.py,sha256=i0bYkZ-QQPNolIjqsns6DuCpF9lH7OiP2BmT0_5zirg,5274 -stdnum/my/__init__.py,sha256=8I7UPXtTDgbydzOtTnfFOkeznBZetfjT4T1reaV7XIk,872 -stdnum/my/bp.dat,sha256=mnUtw5ZOtYFGi0ZnT6eEotu7I3bx1Nhunhn-d08HoNY,8039 -stdnum/my/nric.py,sha256=XirP8AABRcVe1qYtRdmKIyDaDJD_Lua8KCx6BfsVLfY,3695 -stdnum/nl/__init__.py,sha256=VP1TcxS2yQpBFKMElgO80M9jDy7HxnUFvDZNohFwagg,969 -stdnum/nl/brin.py,sha256=3Ys_voHB5e0WeGhPhfIYis-cntG3Gqmw8WBjaFFNaQM,2653 -stdnum/nl/bsn.py,sha256=2m3SN7jk6doPsXoba_oufMcVa-V-Hiy4WHRnRrPfWu4,2697 -stdnum/nl/btw.py,sha256=JLY-mBRr5faJfmqHIvmHcEfOdwVYANH3C3dL65shefE,2330 -stdnum/nl/onderwijsnummer.py,sha256=Tg7_ndGACoNPfmDwDBg9kplCKM0yOwgP3ku6KYZXSUQ,2379 -stdnum/nl/postcode.py,sha256=P1RS79K6JHrzUPDd3FIWLPDBC1-3zD76o6LIZjdDO4s,2348 -stdnum/no/__init__.py,sha256=AXtJKR9rW9dS8Kf9FiTltl6K9NU_3-NkEcFPZd7DadQ,933 -stdnum/no/mva.py,sha256=AsSk5bgtxfsVHUciOPgufP-zeT78nl47Do8yS08pyPE,2335 -stdnum/no/orgnr.py,sha256=16UaXSImtko99SZhAqI0QKp73GKY0AfHFuTGxqzF45E,2498 -stdnum/pl/__init__.py,sha256=XoO7LHFdmrh_SXd7reHvl71Vm7N09h-xrEWrLr4_zn8,926 -stdnum/pl/nip.py,sha256=tK_3JYqzlR7kPUMrdWz6oQ2F-7F8eEgq65aFck2edSM,2543 -stdnum/pl/pesel.py,sha256=pbskhtJROxLKSbrb9FsKU3Ce-3rQGJt0Tx-F8wuuX20,3681 -stdnum/pl/regon.py,sha256=cnS4gg-wfCE_L6_6dVmoABgS14yG80yM2Xmrrq66bQU,3216 -stdnum/pt/__init__.py,sha256=eoFj6h8iammwVStEjhDIHt3zQizg37srmdOyAmL7JvI,934 -stdnum/pt/nif.py,sha256=F85S4bqUIyDQZ-ZP6xWcdwXrvtk0zf7TZAvpmhsBWsA,2504 -stdnum/ro/__init__.py,sha256=MfNYNCocLg17IPxbsYZDxyvV3VL6jfULVlss73zUH7U,929 -stdnum/ro/cf.py,sha256=4qZajq7V_3JAwSkg6fXRGJhNIgLqIGvOZ4fEqDCiB4M,2740 -stdnum/ro/cnp.py,sha256=dYT4JTxs5O5uAwj_q6-WBdQkeqtS-BzKG_Tf7tDvXUU,3541 -stdnum/ru/__init__.py,sha256=dZ2jl-7tnVVV-cADmcZQFWtTcx8UMuWRtS8dOoO5Ggs,868 -stdnum/ru/inn.py,sha256=SbLQosWNAF3HATURXTco3EoMmgxrsQA9rH6I7OLrqzE,3155 -stdnum/se/__init__.py,sha256=MITUEyOTAgpUcEdMOelio7Lk27-CBUdQONSOEZgXS-A,868 -stdnum/se/orgnr.py,sha256=G_1MC_xRQiAOozjLVwYv9ki-84s-wo1Vk1TSlWVZFBY,2405 -stdnum/se/vat.py,sha256=3PVixnYuvGrxZL9PUpY5ZUfyN-N834hJP1iB7IZowYU,2207 -stdnum/si/__init__.py,sha256=bc9BCtt1y1srV_0p8p6kyDJBLhJp-uzDmq3vMm_jFkw,932 -stdnum/si/ddv.py,sha256=PUsxxLj6II6LiQ5yQXOM6nvTZq_hbVJew5E98pmaR4s,2574 -stdnum/sk/__init__.py,sha256=n_7mHUne9UbhAfJ3-Y11wBvtMDIWYpiNGhYx4nUdIkE,926 -stdnum/sk/dph.py,sha256=ylD6zjMSDmXxKklx07hq-q8dlppoM_fIe-8GzS20aRM,2537 -stdnum/sk/rc.py,sha256=Rsh6bhd7tXnwX3dmbB57WecYZpkg47hFQ7dthYtjslY,1903 -stdnum/sm/__init__.py,sha256=zmDH7EDPCwI_57v5LHg-o-kzzuTeFJLrkDjm4Rcmbxk,934 -stdnum/sm/coe.py,sha256=-g4m2bk9xFsmSxn6hGMyyQHIVr4RKR6BSM4B7AyrqCA,2586 -stdnum/us/__init__.py,sha256=PpQA2ZV75PbafUYxNV7fnZvgS4lkmfZde1_NIHndKa8,880 -stdnum/us/atin.py,sha256=H1Noy8_EOYzUVFpEjM7f2KSSNCKGMVvQJhBNOpGyGOs,2489 -stdnum/us/ein.dat,sha256=410wGA0zInHLRclhAfnNqH2HK86CT5g8nEdNm7AhbfA,646 -stdnum/us/ein.py,sha256=YBSKtMHcgfE3RMDxgBicT8xBsCFPQq8OMdela94HkZw,2948 -stdnum/us/itin.py,sha256=6NozHaOb2cK9ilwugH6EUMOTsNOjUohnLI5JkTQfx-c,3118 -stdnum/us/ptin.py,sha256=Yo_a0l4lxEc4Kr5xm_Brc9u45nj_AZim6NOp0GN4wP4,2169 -stdnum/us/rtn.py,sha256=cQ05B3awWjdG3mPV2mvyoGZFO52Xui-JSIewO5_wsn0,2650 -stdnum/us/ssn.py,sha256=Gcd_5holopKVWMsqwAs4av1txPP6GscXbo0R2MBv9n8,3080 -stdnum/us/tin.py,sha256=VnlwP8ZOFIr1hzRuC3Zu_JAIA0CCtQXnrAFXiEYivVk,3015 -python_stdnum-1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -stdnum/bg/__pycache__/egn.cpython-34.pyc,, -stdnum/eu/__pycache__/vat.cpython-34.pyc,, -stdnum/bg/__pycache__/__init__.cpython-34.pyc,, -stdnum/co/__pycache__/__init__.cpython-34.pyc,, -stdnum/de/__pycache__/__init__.cpython-34.pyc,, -stdnum/gb/__pycache__/vat.cpython-34.pyc,, -stdnum/no/__pycache__/__init__.cpython-34.pyc,, -stdnum/__pycache__/iso6346.cpython-34.pyc,, -stdnum/fi/__pycache__/alv.cpython-34.pyc,, -stdnum/de/__pycache__/wkn.cpython-34.pyc,, -stdnum/br/__pycache__/cnpj.cpython-34.pyc,, -stdnum/it/__pycache__/codicefiscale.cpython-34.pyc,, -stdnum/mt/__pycache__/vat.cpython-34.pyc,, -stdnum/iso7064/__pycache__/__init__.cpython-34.pyc,, -stdnum/ec/__pycache__/ci.cpython-34.pyc,, -stdnum/cl/__pycache__/__init__.cpython-34.pyc,, -stdnum/sk/__pycache__/dph.cpython-34.pyc,, -stdnum/is_/__pycache__/vsk.cpython-34.pyc,, -stdnum/pl/__pycache__/regon.cpython-34.pyc,, -stdnum/hr/__pycache__/__init__.cpython-34.pyc,, -stdnum/at/__pycache__/uid.cpython-34.pyc,, -stdnum/mt/__pycache__/__init__.cpython-34.pyc,, -stdnum/be/__pycache__/__init__.cpython-34.pyc,, -stdnum/fr/__pycache__/__init__.cpython-34.pyc,, -stdnum/__pycache__/ean.cpython-34.pyc,, -stdnum/iso7064/__pycache__/mod_37_2.cpython-34.pyc,, -stdnum/bg/__pycache__/vat.cpython-34.pyc,, -stdnum/ar/__pycache__/__init__.cpython-34.pyc,, -stdnum/__pycache__/isil.cpython-34.pyc,, -stdnum/cn/__pycache__/ric.cpython-34.pyc,, -stdnum/gr/__pycache__/__init__.cpython-34.pyc,, -stdnum/br/__pycache__/cpf.cpython-34.pyc,, -stdnum/dk/__pycache__/__init__.cpython-34.pyc,, -stdnum/co/__pycache__/nit.cpython-34.pyc,, -stdnum/fr/__pycache__/siren.cpython-34.pyc,, -stdnum/mx/__pycache__/__init__.cpython-34.pyc,, -stdnum/at/__pycache__/businessid.cpython-34.pyc,, -stdnum/us/__pycache__/ssn.cpython-34.pyc,, -stdnum/fi/__pycache__/hetu.cpython-34.pyc,, -stdnum/es/__pycache__/nif.cpython-34.pyc,, -stdnum/at/__pycache__/__init__.cpython-34.pyc,, -stdnum/us/__pycache__/itin.cpython-34.pyc,, -stdnum/lv/__pycache__/__init__.cpython-34.pyc,, -stdnum/cy/__pycache__/__init__.cpython-34.pyc,, -stdnum/do/__pycache__/rnc.cpython-34.pyc,, -stdnum/hu/__pycache__/anum.cpython-34.pyc,, -stdnum/nl/__pycache__/__init__.cpython-34.pyc,, -stdnum/__pycache__/isan.cpython-34.pyc,, -stdnum/cl/__pycache__/rut.cpython-34.pyc,, -stdnum/ar/__pycache__/cuit.cpython-34.pyc,, -stdnum/__pycache__/numdb.cpython-34.pyc,, -stdnum/us/__pycache__/tin.cpython-34.pyc,, -stdnum/fi/__pycache__/associationid.cpython-34.pyc,, -stdnum/al/__pycache__/__init__.cpython-34.pyc,, -stdnum/be/__pycache__/vat.cpython-34.pyc,, -stdnum/__pycache__/exceptions.cpython-34.pyc,, -stdnum/es/__pycache__/cif.cpython-34.pyc,, -stdnum/lu/__pycache__/tva.cpython-34.pyc,, -stdnum/es/__pycache__/__init__.cpython-34.pyc,, -stdnum/__pycache__/isin.cpython-34.pyc,, -stdnum/ch/__pycache__/uid.cpython-34.pyc,, -stdnum/hu/__pycache__/__init__.cpython-34.pyc,, -stdnum/__pycache__/iso9362.cpython-34.pyc,, -stdnum/it/__pycache__/iva.cpython-34.pyc,, -stdnum/lv/__pycache__/pvn.cpython-34.pyc,, -stdnum/__pycache__/__init__.cpython-34.pyc,, -stdnum/it/__pycache__/__init__.cpython-34.pyc,, -stdnum/ie/__pycache__/vat.cpython-34.pyc,, -stdnum/ro/__pycache__/__init__.cpython-34.pyc,, -stdnum/do/__pycache__/cedula.cpython-34.pyc,, -stdnum/gb/__pycache__/sedol.cpython-34.pyc,, -stdnum/my/__pycache__/__init__.cpython-34.pyc,, -stdnum/es/__pycache__/nie.cpython-34.pyc,, -stdnum/ee/__pycache__/ik.cpython-34.pyc,, -stdnum/iso7064/__pycache__/mod_37_36.cpython-34.pyc,, -stdnum/__pycache__/verhoeff.cpython-34.pyc,, -stdnum/__pycache__/issn.cpython-34.pyc,, -stdnum/ru/__pycache__/inn.cpython-34.pyc,, -stdnum/sk/__pycache__/__init__.cpython-34.pyc,, -stdnum/us/__pycache__/__init__.cpython-34.pyc,, -stdnum/iso7064/__pycache__/mod_97_10.cpython-34.pyc,, -stdnum/is_/__pycache__/kennitala.cpython-34.pyc,, -stdnum/__pycache__/meid.cpython-34.pyc,, -stdnum/no/__pycache__/orgnr.cpython-34.pyc,, -stdnum/__pycache__/imei.cpython-34.pyc,, -stdnum/nl/__pycache__/btw.cpython-34.pyc,, -stdnum/ec/__pycache__/__init__.cpython-34.pyc,, -stdnum/pt/__pycache__/nif.cpython-34.pyc,, -stdnum/sm/__pycache__/coe.cpython-34.pyc,, -stdnum/se/__pycache__/__init__.cpython-34.pyc,, -stdnum/eu/__pycache__/at_02.cpython-34.pyc,, -stdnum/us/__pycache__/atin.cpython-34.pyc,, -stdnum/al/__pycache__/nipt.cpython-34.pyc,, -stdnum/us/__pycache__/rtn.cpython-34.pyc,, -stdnum/se/__pycache__/orgnr.cpython-34.pyc,, -stdnum/__pycache__/ismn.cpython-34.pyc,, -stdnum/bg/__pycache__/pnf.cpython-34.pyc,, -stdnum/fr/__pycache__/tva.cpython-34.pyc,, -stdnum/ie/__pycache__/pps.cpython-34.pyc,, -stdnum/br/__pycache__/__init__.cpython-34.pyc,, -stdnum/gb/__pycache__/__init__.cpython-34.pyc,, -stdnum/do/__pycache__/__init__.cpython-34.pyc,, -stdnum/dk/__pycache__/cvr.cpython-34.pyc,, -stdnum/__pycache__/grid.cpython-34.pyc,, -stdnum/ro/__pycache__/cf.cpython-34.pyc,, -stdnum/__pycache__/luhn.cpython-34.pyc,, -stdnum/ch/__pycache__/__init__.cpython-34.pyc,, -stdnum/nl/__pycache__/bsn.cpython-34.pyc,, -stdnum/se/__pycache__/vat.cpython-34.pyc,, -stdnum/__pycache__/iban.cpython-34.pyc,, -stdnum/fi/__pycache__/__init__.cpython-34.pyc,, -stdnum/__pycache__/cusip.cpython-34.pyc,, -stdnum/__pycache__/imo.cpython-34.pyc,, -stdnum/ru/__pycache__/__init__.cpython-34.pyc,, -stdnum/de/__pycache__/vat.cpython-34.pyc,, -stdnum/iso7064/__pycache__/mod_11_2.cpython-34.pyc,, -stdnum/pl/__pycache__/__init__.cpython-34.pyc,, -stdnum/nl/__pycache__/onderwijsnummer.cpython-34.pyc,, -stdnum/dk/__pycache__/cpr.cpython-34.pyc,, -stdnum/us/__pycache__/ptin.cpython-34.pyc,, -stdnum/nl/__pycache__/postcode.cpython-34.pyc,, -stdnum/lu/__pycache__/__init__.cpython-34.pyc,, -stdnum/ee/__pycache__/__init__.cpython-34.pyc,, -stdnum/cz/__pycache__/rc.cpython-34.pyc,, -stdnum/is_/__pycache__/__init__.cpython-34.pyc,, -stdnum/nl/__pycache__/brin.cpython-34.pyc,, -stdnum/my/__pycache__/nric.cpython-34.pyc,, -stdnum/cy/__pycache__/vat.cpython-34.pyc,, -stdnum/lt/__pycache__/pvm.cpython-34.pyc,, -stdnum/us/__pycache__/ein.cpython-34.pyc,, -stdnum/cz/__pycache__/__init__.cpython-34.pyc,, -stdnum/__pycache__/isbn.cpython-34.pyc,, -stdnum/ro/__pycache__/cnp.cpython-34.pyc,, -stdnum/cz/__pycache__/dic.cpython-34.pyc,, -stdnum/si/__pycache__/ddv.cpython-34.pyc,, -stdnum/hr/__pycache__/oib.cpython-34.pyc,, -stdnum/mx/__pycache__/rfc.cpython-34.pyc,, -stdnum/iso7064/__pycache__/mod_11_10.cpython-34.pyc,, -stdnum/ie/__pycache__/__init__.cpython-34.pyc,, -stdnum/sk/__pycache__/rc.cpython-34.pyc,, -stdnum/gr/__pycache__/vat.cpython-34.pyc,, -stdnum/pl/__pycache__/nip.cpython-34.pyc,, -stdnum/pt/__pycache__/__init__.cpython-34.pyc,, -stdnum/ch/__pycache__/ssn.cpython-34.pyc,, -stdnum/es/__pycache__/dni.cpython-34.pyc,, -stdnum/__pycache__/imsi.cpython-34.pyc,, -stdnum/__pycache__/util.cpython-34.pyc,, -stdnum/eu/__pycache__/__init__.cpython-34.pyc,, -stdnum/lt/__pycache__/__init__.cpython-34.pyc,, -stdnum/sm/__pycache__/__init__.cpython-34.pyc,, -stdnum/si/__pycache__/__init__.cpython-34.pyc,, -stdnum/fi/__pycache__/ytunnus.cpython-34.pyc,, -stdnum/cn/__pycache__/__init__.cpython-34.pyc,, -stdnum/pl/__pycache__/pesel.cpython-34.pyc,, -stdnum/no/__pycache__/mva.cpython-34.pyc,, -stdnum/ec/__pycache__/ruc.cpython-34.pyc,, -stdnum/ch/__pycache__/vat.cpython-34.pyc,, -stdnum/ee/__pycache__/kmkr.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/WHEEL deleted file mode 100644 index 6d9801a..0000000 --- a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: cp34-none-any - diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/metadata.json deleted file mode 100644 index 29f66b0..0000000 --- a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Financial and Insurance Industry", "Intended Audience :: Information Technology", "Intended Audience :: Telecommunications Industry", "License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Office/Business :: Financial", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: General"], "extensions": {"python.details": {"contacts": [{"email": "arthur@arthurdejong.org", "name": "Arthur de Jong", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://arthurdejong.org/python-stdnum/"}}}, "extras": ["VIES", "VIES-ALT"], "generator": "bdist_wheel (0.29.0)", "license": "LGPL", "metadata_version": "2.0", "name": "python-stdnum", "run_requires": [{"extra": "VIES-ALT", "requires": ["PySimpleSOAP"]}, {"extra": "VIES", "requires": ["suds"]}], "summary": "Python module to handle standardized numbers and codes", "version": "1.2"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/PKG-INFO new file mode 100644 index 0000000..0fcbd15 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/PKG-INFO @@ -0,0 +1,160 @@ +Metadata-Version: 2.1 +Name: python-stdnum +Version: 1.2 +Summary: Python module to handle standardized numbers and codes +Home-page: http://arthurdejong.org/python-stdnum/ +Author: Arthur de Jong +Author-email: arthur@arthurdejong.org +License: LGPL +Description: Parse, validate and reformat standard numbers and codes. + + This library offers functions for parsing, validating and reformatting + standard numbers and codes in various formats. + + Currently this package supports the following formats: + + * al.nipt: NIPT (Numri i Identifikimit për Personin e Tatueshëm, Albanian VAT number) + * ar.cuit: CUIT (Código Único de Identificación Tributaria, Argentinian tax number) + * at.businessid: Austrian Company Register Numbers + * at.uid: UID (Umsatzsteuer-Identifikationsnummer, Austrian VAT number) + * be.vat: BTW, TVA, NWSt (Belgian VAT number) + * bg.egn: EGN (ЕГН, Единен граждански номер, Bulgarian personal identity codes) + * bg.pnf: PNF (ЛНЧ, Личен номер на чужденец, Bulgarian number of a foreigner) + * bg.vat: VAT (Идентификационен номер по ДДС, Bulgarian VAT number) + * br.cnpj: CNPJ (Cadastro Nacional da Pessoa Jurídica, Brazillian company identifier) + * br.cpf: CPF (Cadastro de Pessoas Físicas, Brazillian national identifier) + * ch.ssn: Swiss social security number ("Sozialversicherungsnummer") + * ch.uid: UID (Unternehmens-Identifikationsnummer, Swiss business identifier) + * ch.vat: VAT, MWST, TVA, IVA, TPV (Mehrwertsteuernummer, the Swiss VAT number) + * cl.rut: RUT (Rol Único Tributario, Chilean national tax number) + * cn.ric: RIC No. (Chinese Resident Identity Card Number) + * co.nit: NIT (Número De Identificación Tributaria, Colombian identity code) + * cusip: CUSIP number (financial security identification number) + * cy.vat: Αριθμός Εγγραφής Φ.Π.Α. (Cypriot VAT number) + * cz.dic: DIČ (Daňové identifikační číslo, Czech VAT number) + * cz.rc: RČ (Rodné číslo, the Czech birth number) + * de.vat: Ust ID Nr. (Umsatzsteur Identifikationnummer, German VAT number) + * de.wkn: Wertpapierkennnummer (German securities identification code) + * dk.cpr: CPR (personnummer, the Danish citizen number) + * dk.cvr: CVR (Momsregistreringsnummer, Danish VAT number) + * do.cedula: Cedula (Dominican Republic national identification number) + * do.rnc: RNC (Registro Nacional del Contribuyente, Dominican Republic tax number) + * ean: EAN (International Article Number) + * ec.ci: CI (Cédula de identidad, Ecuadorian personal identity code) + * ec.ruc: RUC (Registro Único de Contribuyentes, Ecuadorian company tax number) + * ee.ik: Isikukood (Estonian Personcal ID number) + * ee.kmkr: KMKR (Käibemaksukohuslase, Estonian VAT number) + * es.cif: CIF (Certificado de Identificación Fiscal, Spanish company tax number) + * es.dni: DNI (Documento nacional de identidad, Spanish personal identity codes) + * es.nie: NIE (Número de Identificación de Extranjeros, Spanish foreigner number) + * es.nif: NIF (Número de Identificación Fiscal, Spanish VAT number) + * eu.at_02: SEPA Identifier of the Creditor (AT-02) + * eu.vat: VAT (European Union VAT number) + * fi.alv: ALV nro (Arvonlisäveronumero, Finnish VAT number) + * fi.associationid: Finnish Association Identifier + * fi.hetu: HETU (Henkilötunnus, Finnish personal identity code) + * fi.ytunnus: Y-tunnus (Finnish business identifier) + * fr.siren: SIREN (a French company identification number) + * fr.tva: n° TVA (taxe sur la valeur ajoutée, French VAT number) + * gb.sedol: SEDOL number (Stock Exchange Daily Official List number) + * gb.vat: VAT (United Kingdom (and Isle of Man) VAT registration number) + * gr.vat: FPA, ΦΠΑ, ΑΦΜ (Αριθμός Φορολογικού Μητρώου, the Greek VAT number) + * grid: GRid (Global Release Identifier) + * hr.oib: OIB (Osobni identifikacijski broj, Croatian identification number) + * hu.anum: ANUM (Közösségi adószám, Hungarian VAT number) + * iban: IBAN (International Bank Account Number) + * ie.pps: PPS No (Personal Public Service Number, Irish personal number) + * ie.vat: VAT (Irish VAT number) + * imei: IMEI (International Mobile Equipment Identity) + * imo: IMO number (International Maritime Organization number) + * imsi: IMSI (International Mobile Subscriber Identity) + * is_.kennitala: Kennitala (Icelandic personal and organisation identity code) + * is_.vsk: VSK number (Virðisaukaskattsnúmer, Icelandic VAT number) + * isan: ISAN (International Standard Audiovisual Number) + * isbn: ISBN (International Standard Book Number) + * isil: ISIL (International Standard Identifier for Libraries) + * isin: ISIN (International Securities Identification Number) + * ismn: ISMN (International Standard Music Number) + * iso6346: ISO 6346 (International standard for container identification) + * iso9362: ISO 9362 (Business identifier codes) + * issn: ISSN (International Standard Serial Number) + * it.codicefiscale: Codice Fiscale (Italian tax code for individuals) + * it.iva: Partita IVA (Italian VAT number) + * lt.pvm: PVM (Pridėtinės vertės mokestis mokėtojo kodas, Lithuanian VAT number) + * lu.tva: TVA (taxe sur la valeur ajoutée, Luxembourgian VAT number) + * lv.pvn: PVN (Pievienotās vērtības nodokļa, Latvian VAT number) + * meid: MEID (Mobile Equipment Identifier) + * mt.vat: VAT (Maltese VAT number) + * mx.rfc: RFC (Registro Federal de Contribuyentes, Mexican tax number) + * my.nric: NRIC No. (Malaysian National Registration Identity Card Number) + * nl.brin: Brin number (Dutch number for schools) + * nl.bsn: BSN (Burgerservicenummer, Dutch national identification number) + * nl.btw: BTW-nummer (Omzetbelastingnummer, the Dutch VAT number) + * nl.onderwijsnummer: Onderwijsnummer (Dutch student school number) + * nl.postcode: Postcode (Dutch postal code) + * no.mva: MVA (Merverdiavgift, Norwegian VAT number) + * no.orgnr: Orgnr (Organisasjonsnummer, Norwegian organisation number) + * pl.nip: NIP (Numer Identyfikacji Podatkowej, Polish VAT number) + * pl.pesel: PESEL (Polish national identification number) + * pl.regon: REGON (Rejestr Gospodarki Narodowej, Polish register of economic units) + * pt.nif: NIF (Número de identificação fiscal, Portuguese VAT number) + * ro.cf: CF (Cod de înregistrare în scopuri de TVA, Romanian VAT number) + * ro.cnp: CNP (Cod Numeric Personal, Romanian Numerical Personal Code) + * ru.inn: ИНН (Идентификационный номер налогоплательщика, Russian tax identifier) + * se.orgnr: Orgnr (Organisationsnummer, Swedish company number) + * se.vat: VAT (Moms, Mervärdesskatt, Swedish VAT number) + * si.ddv: ID za DDV (Davčna številka, Slovenian VAT number) + * sk.dph: IČ DPH (IČ pre daň z pridanej hodnoty, Slovak VAT number) + * sk.rc: RČ (Rodné číslo, the Slovak birth number) + * sm.coe: COE (Codice operatore economico, San Marino national tax number) + * us.atin: ATIN (U.S. Adoption Taxpayer Identification Number) + * us.ein: EIN (U.S. Employer Identification Number) + * us.itin: ITIN (U.S. Individual Taxpayer Identification Number) + * us.ptin: PTIN (U.S. Preparer Tax Identification Number) + * us.rtn: RTN (Routing transport number) + * us.ssn: SSN (U.S. Social Security Number) + * us.tin: TIN (U.S. Taxpayer Identification Number) + + Furthermore a number of generic check digit algorithms are available: + + * iso7064.mod_11_10: The ISO 7064 Mod 11, 10 algorithm + * iso7064.mod_11_2: The ISO 7064 Mod 11, 2 algorithm + * iso7064.mod_37_2: The ISO 7064 Mod 37, 2 algorithm + * iso7064.mod_37_36: The ISO 7064 Mod 37, 36 algorithm + * iso7064.mod_97_10: The ISO 7064 Mod 97, 10 algorithm + * luhn: The Luhn and Luhn mod N algorithms + * verhoeff: The Verhoeff algorithm + + All modules implement a common interface: + + >>> from stdnum import isbn + >>> isbn.validate('978-9024538270') + '9789024538270' + >>> isbn.validate('978-9024538271') + Traceback (most recent call last): + ... + InvalidChecksum: ... + + Apart from the validate() function, modules generally provide extra + parsing, validation, formatting or conversion functions. + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Financial and Insurance Industry +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: Telecommunications Industry +Classifier: License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Topic :: Office/Business :: Financial +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: General +Provides-Extra: VIES +Provides-Extra: VIES-ALT diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/SOURCES.txt new file mode 100644 index 0000000..da8d355 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/SOURCES.txt @@ -0,0 +1,337 @@ +COPYING +ChangeLog +MANIFEST.in +NEWS +README +getcnloc.py +getiban.py +getimsi.py +getisbn.py +getisil.py +getmybp.py +getnumlist.py +setup.cfg +setup.py +docs/conf.py +docs/index.rst +docs/stdnum.al.nipt.rst +docs/stdnum.ar.cuit.rst +docs/stdnum.at.businessid.rst +docs/stdnum.at.uid.rst +docs/stdnum.be.vat.rst +docs/stdnum.bg.egn.rst +docs/stdnum.bg.pnf.rst +docs/stdnum.bg.vat.rst +docs/stdnum.br.cnpj.rst +docs/stdnum.br.cpf.rst +docs/stdnum.ch.ssn.rst +docs/stdnum.ch.uid.rst +docs/stdnum.ch.vat.rst +docs/stdnum.cl.rut.rst +docs/stdnum.cn.ric.rst +docs/stdnum.co.nit.rst +docs/stdnum.cusip.rst +docs/stdnum.cy.vat.rst +docs/stdnum.cz.dic.rst +docs/stdnum.cz.rc.rst +docs/stdnum.de.vat.rst +docs/stdnum.de.wkn.rst +docs/stdnum.dk.cpr.rst +docs/stdnum.dk.cvr.rst +docs/stdnum.do.cedula.rst +docs/stdnum.do.rnc.rst +docs/stdnum.ean.rst +docs/stdnum.ec.ci.rst +docs/stdnum.ec.ruc.rst +docs/stdnum.ee.ik.rst +docs/stdnum.ee.kmkr.rst +docs/stdnum.es.cif.rst +docs/stdnum.es.dni.rst +docs/stdnum.es.nie.rst +docs/stdnum.es.nif.rst +docs/stdnum.eu.at_02.rst +docs/stdnum.eu.vat.rst +docs/stdnum.exceptions.rst +docs/stdnum.fi.alv.rst +docs/stdnum.fi.associationid.rst +docs/stdnum.fi.hetu.rst +docs/stdnum.fi.ytunnus.rst +docs/stdnum.fr.siren.rst +docs/stdnum.fr.tva.rst +docs/stdnum.gb.sedol.rst +docs/stdnum.gb.vat.rst +docs/stdnum.gr.vat.rst +docs/stdnum.grid.rst +docs/stdnum.hr.oib.rst +docs/stdnum.hu.anum.rst +docs/stdnum.iban.rst +docs/stdnum.ie.pps.rst +docs/stdnum.ie.vat.rst +docs/stdnum.imei.rst +docs/stdnum.imo.rst +docs/stdnum.imsi.rst +docs/stdnum.is_.kennitala.rst +docs/stdnum.is_.vsk.rst +docs/stdnum.isan.rst +docs/stdnum.isbn.rst +docs/stdnum.isil.rst +docs/stdnum.isin.rst +docs/stdnum.ismn.rst +docs/stdnum.iso6346.rst +docs/stdnum.iso7064.rst +docs/stdnum.iso9362.rst +docs/stdnum.issn.rst +docs/stdnum.it.codicefiscale.rst +docs/stdnum.it.iva.rst +docs/stdnum.lt.pvm.rst +docs/stdnum.lu.tva.rst +docs/stdnum.luhn.rst +docs/stdnum.lv.pvn.rst +docs/stdnum.meid.rst +docs/stdnum.mt.vat.rst +docs/stdnum.mx.rfc.rst +docs/stdnum.my.nric.rst +docs/stdnum.nl.brin.rst +docs/stdnum.nl.bsn.rst +docs/stdnum.nl.btw.rst +docs/stdnum.nl.onderwijsnummer.rst +docs/stdnum.nl.postcode.rst +docs/stdnum.no.mva.rst +docs/stdnum.no.orgnr.rst +docs/stdnum.pl.nip.rst +docs/stdnum.pl.pesel.rst +docs/stdnum.pl.regon.rst +docs/stdnum.pt.nif.rst +docs/stdnum.ro.cf.rst +docs/stdnum.ro.cnp.rst +docs/stdnum.ru.inn.rst +docs/stdnum.se.orgnr.rst +docs/stdnum.se.vat.rst +docs/stdnum.si.ddv.rst +docs/stdnum.sk.dph.rst +docs/stdnum.sk.rc.rst +docs/stdnum.sm.coe.rst +docs/stdnum.us.atin.rst +docs/stdnum.us.ein.rst +docs/stdnum.us.itin.rst +docs/stdnum.us.ptin.rst +docs/stdnum.us.rtn.rst +docs/stdnum.us.ssn.rst +docs/stdnum.us.tin.rst +docs/stdnum.verhoeff.rst +docs/_templates/autosummary/module.rst +python_stdnum.egg-info/PKG-INFO +python_stdnum.egg-info/SOURCES.txt +python_stdnum.egg-info/dependency_links.txt +python_stdnum.egg-info/pbr.json +python_stdnum.egg-info/requires.txt +python_stdnum.egg-info/top_level.txt +stdnum/__init__.py +stdnum/cusip.py +stdnum/ean.py +stdnum/exceptions.py +stdnum/grid.py +stdnum/iban.dat +stdnum/iban.py +stdnum/imei.py +stdnum/imo.py +stdnum/imsi.dat +stdnum/imsi.py +stdnum/isan.py +stdnum/isbn.dat +stdnum/isbn.py +stdnum/isil.dat +stdnum/isil.py +stdnum/isin.py +stdnum/ismn.py +stdnum/iso6346.py +stdnum/iso9362.py +stdnum/issn.py +stdnum/luhn.py +stdnum/meid.py +stdnum/numdb.py +stdnum/util.py +stdnum/verhoeff.py +stdnum/al/__init__.py +stdnum/al/nipt.py +stdnum/ar/__init__.py +stdnum/ar/cuit.py +stdnum/at/__init__.py +stdnum/at/businessid.py +stdnum/at/uid.py +stdnum/be/__init__.py +stdnum/be/vat.py +stdnum/bg/__init__.py +stdnum/bg/egn.py +stdnum/bg/pnf.py +stdnum/bg/vat.py +stdnum/br/__init__.py +stdnum/br/cnpj.py +stdnum/br/cpf.py +stdnum/ch/__init__.py +stdnum/ch/ssn.py +stdnum/ch/uid.py +stdnum/ch/vat.py +stdnum/cl/__init__.py +stdnum/cl/rut.py +stdnum/cn/__init__.py +stdnum/cn/loc.dat +stdnum/cn/ric.py +stdnum/co/__init__.py +stdnum/co/nit.py +stdnum/cy/__init__.py +stdnum/cy/vat.py +stdnum/cz/__init__.py +stdnum/cz/dic.py +stdnum/cz/rc.py +stdnum/de/__init__.py +stdnum/de/vat.py +stdnum/de/wkn.py +stdnum/dk/__init__.py +stdnum/dk/cpr.py +stdnum/dk/cvr.py +stdnum/do/__init__.py +stdnum/do/cedula.py +stdnum/do/rnc.py +stdnum/ec/__init__.py +stdnum/ec/ci.py +stdnum/ec/ruc.py +stdnum/ee/__init__.py +stdnum/ee/ik.py +stdnum/ee/kmkr.py +stdnum/es/__init__.py +stdnum/es/cif.py +stdnum/es/dni.py +stdnum/es/nie.py +stdnum/es/nif.py +stdnum/eu/__init__.py +stdnum/eu/at_02.py +stdnum/eu/vat.py +stdnum/fi/__init__.py +stdnum/fi/alv.py +stdnum/fi/associationid.py +stdnum/fi/hetu.py +stdnum/fi/ytunnus.py +stdnum/fr/__init__.py +stdnum/fr/siren.py +stdnum/fr/tva.py +stdnum/gb/__init__.py +stdnum/gb/sedol.py +stdnum/gb/vat.py +stdnum/gr/__init__.py +stdnum/gr/vat.py +stdnum/hr/__init__.py +stdnum/hr/oib.py +stdnum/hu/__init__.py +stdnum/hu/anum.py +stdnum/ie/__init__.py +stdnum/ie/pps.py +stdnum/ie/vat.py +stdnum/is_/__init__.py +stdnum/is_/kennitala.py +stdnum/is_/vsk.py +stdnum/iso7064/__init__.py +stdnum/iso7064/mod_11_10.py +stdnum/iso7064/mod_11_2.py +stdnum/iso7064/mod_37_2.py +stdnum/iso7064/mod_37_36.py +stdnum/iso7064/mod_97_10.py +stdnum/it/__init__.py +stdnum/it/codicefiscale.py +stdnum/it/iva.py +stdnum/lt/__init__.py +stdnum/lt/pvm.py +stdnum/lu/__init__.py +stdnum/lu/tva.py +stdnum/lv/__init__.py +stdnum/lv/pvn.py +stdnum/mt/__init__.py +stdnum/mt/vat.py +stdnum/mx/__init__.py +stdnum/mx/rfc.py +stdnum/my/__init__.py +stdnum/my/bp.dat +stdnum/my/nric.py +stdnum/nl/__init__.py +stdnum/nl/brin.py +stdnum/nl/bsn.py +stdnum/nl/btw.py +stdnum/nl/onderwijsnummer.py +stdnum/nl/postcode.py +stdnum/no/__init__.py +stdnum/no/mva.py +stdnum/no/orgnr.py +stdnum/pl/__init__.py +stdnum/pl/nip.py +stdnum/pl/pesel.py +stdnum/pl/regon.py +stdnum/pt/__init__.py +stdnum/pt/nif.py +stdnum/ro/__init__.py +stdnum/ro/cf.py +stdnum/ro/cnp.py +stdnum/ru/__init__.py +stdnum/ru/inn.py +stdnum/se/__init__.py +stdnum/se/orgnr.py +stdnum/se/vat.py +stdnum/si/__init__.py +stdnum/si/ddv.py +stdnum/sk/__init__.py +stdnum/sk/dph.py +stdnum/sk/rc.py +stdnum/sm/__init__.py +stdnum/sm/coe.py +stdnum/us/__init__.py +stdnum/us/atin.py +stdnum/us/ein.dat +stdnum/us/ein.py +stdnum/us/itin.py +stdnum/us/ptin.py +stdnum/us/rtn.py +stdnum/us/ssn.py +stdnum/us/tin.py +tests/numdb-test.dat +tests/test_al_nipt.doctest +tests/test_ar_cuit.doctest +tests/test_asx.doctest +tests/test_bg_vat.doctest +tests/test_br_cnpj.doctest +tests/test_br_cpf.doctest +tests/test_ch_uid.doctest +tests/test_ch_vat.doctest +tests/test_cl_rut.doctest +tests/test_cn_ric.doctest +tests/test_co_nit.doctest +tests/test_cusip.doctest +tests/test_de_wkn.doctest +tests/test_do_cedula.doctest +tests/test_do_rnc.doctest +tests/test_ean.doctest +tests/test_ec_ci.doctest +tests/test_ec_ruc.doctest +tests/test_ee_ik.doctest +tests/test_eu_vat.doctest +tests/test_fi_hetu.doctest +tests/test_gb_sedol.doctest +tests/test_gb_vat.doctest +tests/test_iban.doctest +tests/test_imei.doctest +tests/test_is_kennitala.doctest +tests/test_isan.doctest +tests/test_isbn.doctest +tests/test_isin.doctest +tests/test_ismn.doctest +tests/test_iso6346.doctest +tests/test_iso7064.doctest +tests/test_it_codicefiscale.doctest +tests/test_luhn.doctest +tests/test_markit_red6.doctest +tests/test_meid.doctest +tests/test_mx_rfc.doctest +tests/test_my_nric.doctest +tests/test_no_mva.doctest +tests/test_pl_regon.doctest +tests/test_robustness.doctest +tests/test_ru_inn.doctest +tests/test_verhoeff.doctest \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/installed-files.txt new file mode 100644 index 0000000..f2e27b3 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/installed-files.txt @@ -0,0 +1,329 @@ +../stdnum/__init__.py +../stdnum/__pycache__/__init__.cpython-37.pyc +../stdnum/__pycache__/cusip.cpython-37.pyc +../stdnum/__pycache__/ean.cpython-37.pyc +../stdnum/__pycache__/exceptions.cpython-37.pyc +../stdnum/__pycache__/grid.cpython-37.pyc +../stdnum/__pycache__/iban.cpython-37.pyc +../stdnum/__pycache__/imei.cpython-37.pyc +../stdnum/__pycache__/imo.cpython-37.pyc +../stdnum/__pycache__/imsi.cpython-37.pyc +../stdnum/__pycache__/isan.cpython-37.pyc +../stdnum/__pycache__/isbn.cpython-37.pyc +../stdnum/__pycache__/isil.cpython-37.pyc +../stdnum/__pycache__/isin.cpython-37.pyc +../stdnum/__pycache__/ismn.cpython-37.pyc +../stdnum/__pycache__/iso6346.cpython-37.pyc +../stdnum/__pycache__/iso9362.cpython-37.pyc +../stdnum/__pycache__/issn.cpython-37.pyc +../stdnum/__pycache__/luhn.cpython-37.pyc +../stdnum/__pycache__/meid.cpython-37.pyc +../stdnum/__pycache__/numdb.cpython-37.pyc +../stdnum/__pycache__/util.cpython-37.pyc +../stdnum/__pycache__/verhoeff.cpython-37.pyc +../stdnum/al/__init__.py +../stdnum/al/__pycache__/__init__.cpython-37.pyc +../stdnum/al/__pycache__/nipt.cpython-37.pyc +../stdnum/al/nipt.py +../stdnum/ar/__init__.py +../stdnum/ar/__pycache__/__init__.cpython-37.pyc +../stdnum/ar/__pycache__/cuit.cpython-37.pyc +../stdnum/ar/cuit.py +../stdnum/at/__init__.py +../stdnum/at/__pycache__/__init__.cpython-37.pyc +../stdnum/at/__pycache__/businessid.cpython-37.pyc +../stdnum/at/__pycache__/uid.cpython-37.pyc +../stdnum/at/businessid.py +../stdnum/at/uid.py +../stdnum/be/__init__.py +../stdnum/be/__pycache__/__init__.cpython-37.pyc +../stdnum/be/__pycache__/vat.cpython-37.pyc +../stdnum/be/vat.py +../stdnum/bg/__init__.py +../stdnum/bg/__pycache__/__init__.cpython-37.pyc +../stdnum/bg/__pycache__/egn.cpython-37.pyc +../stdnum/bg/__pycache__/pnf.cpython-37.pyc +../stdnum/bg/__pycache__/vat.cpython-37.pyc +../stdnum/bg/egn.py +../stdnum/bg/pnf.py +../stdnum/bg/vat.py +../stdnum/br/__init__.py +../stdnum/br/__pycache__/__init__.cpython-37.pyc +../stdnum/br/__pycache__/cnpj.cpython-37.pyc +../stdnum/br/__pycache__/cpf.cpython-37.pyc +../stdnum/br/cnpj.py +../stdnum/br/cpf.py +../stdnum/ch/__init__.py +../stdnum/ch/__pycache__/__init__.cpython-37.pyc +../stdnum/ch/__pycache__/ssn.cpython-37.pyc +../stdnum/ch/__pycache__/uid.cpython-37.pyc +../stdnum/ch/__pycache__/vat.cpython-37.pyc +../stdnum/ch/ssn.py +../stdnum/ch/uid.py +../stdnum/ch/vat.py +../stdnum/cl/__init__.py +../stdnum/cl/__pycache__/__init__.cpython-37.pyc +../stdnum/cl/__pycache__/rut.cpython-37.pyc +../stdnum/cl/rut.py +../stdnum/cn/__init__.py +../stdnum/cn/__pycache__/__init__.cpython-37.pyc +../stdnum/cn/__pycache__/ric.cpython-37.pyc +../stdnum/cn/loc.dat +../stdnum/cn/ric.py +../stdnum/co/__init__.py +../stdnum/co/__pycache__/__init__.cpython-37.pyc +../stdnum/co/__pycache__/nit.cpython-37.pyc +../stdnum/co/nit.py +../stdnum/cusip.py +../stdnum/cy/__init__.py +../stdnum/cy/__pycache__/__init__.cpython-37.pyc +../stdnum/cy/__pycache__/vat.cpython-37.pyc +../stdnum/cy/vat.py +../stdnum/cz/__init__.py +../stdnum/cz/__pycache__/__init__.cpython-37.pyc +../stdnum/cz/__pycache__/dic.cpython-37.pyc +../stdnum/cz/__pycache__/rc.cpython-37.pyc +../stdnum/cz/dic.py +../stdnum/cz/rc.py +../stdnum/de/__init__.py +../stdnum/de/__pycache__/__init__.cpython-37.pyc +../stdnum/de/__pycache__/vat.cpython-37.pyc +../stdnum/de/__pycache__/wkn.cpython-37.pyc +../stdnum/de/vat.py +../stdnum/de/wkn.py +../stdnum/dk/__init__.py +../stdnum/dk/__pycache__/__init__.cpython-37.pyc +../stdnum/dk/__pycache__/cpr.cpython-37.pyc +../stdnum/dk/__pycache__/cvr.cpython-37.pyc +../stdnum/dk/cpr.py +../stdnum/dk/cvr.py +../stdnum/do/__init__.py +../stdnum/do/__pycache__/__init__.cpython-37.pyc +../stdnum/do/__pycache__/cedula.cpython-37.pyc +../stdnum/do/__pycache__/rnc.cpython-37.pyc +../stdnum/do/cedula.py +../stdnum/do/rnc.py +../stdnum/ean.py +../stdnum/ec/__init__.py +../stdnum/ec/__pycache__/__init__.cpython-37.pyc +../stdnum/ec/__pycache__/ci.cpython-37.pyc +../stdnum/ec/__pycache__/ruc.cpython-37.pyc +../stdnum/ec/ci.py +../stdnum/ec/ruc.py +../stdnum/ee/__init__.py +../stdnum/ee/__pycache__/__init__.cpython-37.pyc +../stdnum/ee/__pycache__/ik.cpython-37.pyc +../stdnum/ee/__pycache__/kmkr.cpython-37.pyc +../stdnum/ee/ik.py +../stdnum/ee/kmkr.py +../stdnum/es/__init__.py +../stdnum/es/__pycache__/__init__.cpython-37.pyc +../stdnum/es/__pycache__/cif.cpython-37.pyc +../stdnum/es/__pycache__/dni.cpython-37.pyc +../stdnum/es/__pycache__/nie.cpython-37.pyc +../stdnum/es/__pycache__/nif.cpython-37.pyc +../stdnum/es/cif.py +../stdnum/es/dni.py +../stdnum/es/nie.py +../stdnum/es/nif.py +../stdnum/eu/__init__.py +../stdnum/eu/__pycache__/__init__.cpython-37.pyc +../stdnum/eu/__pycache__/at_02.cpython-37.pyc +../stdnum/eu/__pycache__/vat.cpython-37.pyc +../stdnum/eu/at_02.py +../stdnum/eu/vat.py +../stdnum/exceptions.py +../stdnum/fi/__init__.py +../stdnum/fi/__pycache__/__init__.cpython-37.pyc +../stdnum/fi/__pycache__/alv.cpython-37.pyc +../stdnum/fi/__pycache__/associationid.cpython-37.pyc +../stdnum/fi/__pycache__/hetu.cpython-37.pyc +../stdnum/fi/__pycache__/ytunnus.cpython-37.pyc +../stdnum/fi/alv.py +../stdnum/fi/associationid.py +../stdnum/fi/hetu.py +../stdnum/fi/ytunnus.py +../stdnum/fr/__init__.py +../stdnum/fr/__pycache__/__init__.cpython-37.pyc +../stdnum/fr/__pycache__/siren.cpython-37.pyc +../stdnum/fr/__pycache__/tva.cpython-37.pyc +../stdnum/fr/siren.py +../stdnum/fr/tva.py +../stdnum/gb/__init__.py +../stdnum/gb/__pycache__/__init__.cpython-37.pyc +../stdnum/gb/__pycache__/sedol.cpython-37.pyc +../stdnum/gb/__pycache__/vat.cpython-37.pyc +../stdnum/gb/sedol.py +../stdnum/gb/vat.py +../stdnum/gr/__init__.py +../stdnum/gr/__pycache__/__init__.cpython-37.pyc +../stdnum/gr/__pycache__/vat.cpython-37.pyc +../stdnum/gr/vat.py +../stdnum/grid.py +../stdnum/hr/__init__.py +../stdnum/hr/__pycache__/__init__.cpython-37.pyc +../stdnum/hr/__pycache__/oib.cpython-37.pyc +../stdnum/hr/oib.py +../stdnum/hu/__init__.py +../stdnum/hu/__pycache__/__init__.cpython-37.pyc +../stdnum/hu/__pycache__/anum.cpython-37.pyc +../stdnum/hu/anum.py +../stdnum/iban.dat +../stdnum/iban.py +../stdnum/ie/__init__.py +../stdnum/ie/__pycache__/__init__.cpython-37.pyc +../stdnum/ie/__pycache__/pps.cpython-37.pyc +../stdnum/ie/__pycache__/vat.cpython-37.pyc +../stdnum/ie/pps.py +../stdnum/ie/vat.py +../stdnum/imei.py +../stdnum/imo.py +../stdnum/imsi.dat +../stdnum/imsi.py +../stdnum/is_/__init__.py +../stdnum/is_/__pycache__/__init__.cpython-37.pyc +../stdnum/is_/__pycache__/kennitala.cpython-37.pyc +../stdnum/is_/__pycache__/vsk.cpython-37.pyc +../stdnum/is_/kennitala.py +../stdnum/is_/vsk.py +../stdnum/isan.py +../stdnum/isbn.dat +../stdnum/isbn.py +../stdnum/isil.dat +../stdnum/isil.py +../stdnum/isin.py +../stdnum/ismn.py +../stdnum/iso6346.py +../stdnum/iso7064/__init__.py +../stdnum/iso7064/__pycache__/__init__.cpython-37.pyc +../stdnum/iso7064/__pycache__/mod_11_10.cpython-37.pyc +../stdnum/iso7064/__pycache__/mod_11_2.cpython-37.pyc +../stdnum/iso7064/__pycache__/mod_37_2.cpython-37.pyc +../stdnum/iso7064/__pycache__/mod_37_36.cpython-37.pyc +../stdnum/iso7064/__pycache__/mod_97_10.cpython-37.pyc +../stdnum/iso7064/mod_11_10.py +../stdnum/iso7064/mod_11_2.py +../stdnum/iso7064/mod_37_2.py +../stdnum/iso7064/mod_37_36.py +../stdnum/iso7064/mod_97_10.py +../stdnum/iso9362.py +../stdnum/issn.py +../stdnum/it/__init__.py +../stdnum/it/__pycache__/__init__.cpython-37.pyc +../stdnum/it/__pycache__/codicefiscale.cpython-37.pyc +../stdnum/it/__pycache__/iva.cpython-37.pyc +../stdnum/it/codicefiscale.py +../stdnum/it/iva.py +../stdnum/lt/__init__.py +../stdnum/lt/__pycache__/__init__.cpython-37.pyc +../stdnum/lt/__pycache__/pvm.cpython-37.pyc +../stdnum/lt/pvm.py +../stdnum/lu/__init__.py +../stdnum/lu/__pycache__/__init__.cpython-37.pyc +../stdnum/lu/__pycache__/tva.cpython-37.pyc +../stdnum/lu/tva.py +../stdnum/luhn.py +../stdnum/lv/__init__.py +../stdnum/lv/__pycache__/__init__.cpython-37.pyc +../stdnum/lv/__pycache__/pvn.cpython-37.pyc +../stdnum/lv/pvn.py +../stdnum/meid.py +../stdnum/mt/__init__.py +../stdnum/mt/__pycache__/__init__.cpython-37.pyc +../stdnum/mt/__pycache__/vat.cpython-37.pyc +../stdnum/mt/vat.py +../stdnum/mx/__init__.py +../stdnum/mx/__pycache__/__init__.cpython-37.pyc +../stdnum/mx/__pycache__/rfc.cpython-37.pyc +../stdnum/mx/rfc.py +../stdnum/my/__init__.py +../stdnum/my/__pycache__/__init__.cpython-37.pyc +../stdnum/my/__pycache__/nric.cpython-37.pyc +../stdnum/my/bp.dat +../stdnum/my/nric.py +../stdnum/nl/__init__.py +../stdnum/nl/__pycache__/__init__.cpython-37.pyc +../stdnum/nl/__pycache__/brin.cpython-37.pyc +../stdnum/nl/__pycache__/bsn.cpython-37.pyc +../stdnum/nl/__pycache__/btw.cpython-37.pyc +../stdnum/nl/__pycache__/onderwijsnummer.cpython-37.pyc +../stdnum/nl/__pycache__/postcode.cpython-37.pyc +../stdnum/nl/brin.py +../stdnum/nl/bsn.py +../stdnum/nl/btw.py +../stdnum/nl/onderwijsnummer.py +../stdnum/nl/postcode.py +../stdnum/no/__init__.py +../stdnum/no/__pycache__/__init__.cpython-37.pyc +../stdnum/no/__pycache__/mva.cpython-37.pyc +../stdnum/no/__pycache__/orgnr.cpython-37.pyc +../stdnum/no/mva.py +../stdnum/no/orgnr.py +../stdnum/numdb.py +../stdnum/pl/__init__.py +../stdnum/pl/__pycache__/__init__.cpython-37.pyc +../stdnum/pl/__pycache__/nip.cpython-37.pyc +../stdnum/pl/__pycache__/pesel.cpython-37.pyc +../stdnum/pl/__pycache__/regon.cpython-37.pyc +../stdnum/pl/nip.py +../stdnum/pl/pesel.py +../stdnum/pl/regon.py +../stdnum/pt/__init__.py +../stdnum/pt/__pycache__/__init__.cpython-37.pyc +../stdnum/pt/__pycache__/nif.cpython-37.pyc +../stdnum/pt/nif.py +../stdnum/ro/__init__.py +../stdnum/ro/__pycache__/__init__.cpython-37.pyc +../stdnum/ro/__pycache__/cf.cpython-37.pyc +../stdnum/ro/__pycache__/cnp.cpython-37.pyc +../stdnum/ro/cf.py +../stdnum/ro/cnp.py +../stdnum/ru/__init__.py +../stdnum/ru/__pycache__/__init__.cpython-37.pyc +../stdnum/ru/__pycache__/inn.cpython-37.pyc +../stdnum/ru/inn.py +../stdnum/se/__init__.py +../stdnum/se/__pycache__/__init__.cpython-37.pyc +../stdnum/se/__pycache__/orgnr.cpython-37.pyc +../stdnum/se/__pycache__/vat.cpython-37.pyc +../stdnum/se/orgnr.py +../stdnum/se/vat.py +../stdnum/si/__init__.py +../stdnum/si/__pycache__/__init__.cpython-37.pyc +../stdnum/si/__pycache__/ddv.cpython-37.pyc +../stdnum/si/ddv.py +../stdnum/sk/__init__.py +../stdnum/sk/__pycache__/__init__.cpython-37.pyc +../stdnum/sk/__pycache__/dph.cpython-37.pyc +../stdnum/sk/__pycache__/rc.cpython-37.pyc +../stdnum/sk/dph.py +../stdnum/sk/rc.py +../stdnum/sm/__init__.py +../stdnum/sm/__pycache__/__init__.cpython-37.pyc +../stdnum/sm/__pycache__/coe.cpython-37.pyc +../stdnum/sm/coe.py +../stdnum/us/__init__.py +../stdnum/us/__pycache__/__init__.cpython-37.pyc +../stdnum/us/__pycache__/atin.cpython-37.pyc +../stdnum/us/__pycache__/ein.cpython-37.pyc +../stdnum/us/__pycache__/itin.cpython-37.pyc +../stdnum/us/__pycache__/ptin.cpython-37.pyc +../stdnum/us/__pycache__/rtn.cpython-37.pyc +../stdnum/us/__pycache__/ssn.cpython-37.pyc +../stdnum/us/__pycache__/tin.cpython-37.pyc +../stdnum/us/atin.py +../stdnum/us/ein.dat +../stdnum/us/ein.py +../stdnum/us/itin.py +../stdnum/us/ptin.py +../stdnum/us/rtn.py +../stdnum/us/ssn.py +../stdnum/us/tin.py +../stdnum/util.py +../stdnum/verhoeff.py +PKG-INFO +SOURCES.txt +dependency_links.txt +pbr.json +requires.txt +top_level.txt diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/pbr.json b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/pbr.json similarity index 100% rename from Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/pbr.json rename to Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/pbr.json diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/requires.txt b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/requires.txt new file mode 100644 index 0000000..00934a5 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/requires.txt @@ -0,0 +1,6 @@ + +[VIES] +suds + +[VIES-ALT] +PySimpleSOAP diff --git a/Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/python_stdnum-1.2.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/python_stdnum-1.2.egg-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/requests-2.9.1.dist-info/RECORD b/Shared/lib/python3.4/site-packages/requests-2.9.1.dist-info/RECORD index 42a4740..e3aaf81 100644 --- a/Shared/lib/python3.4/site-packages/requests-2.9.1.dist-info/RECORD +++ b/Shared/lib/python3.4/site-packages/requests-2.9.1.dist-info/RECORD @@ -1,4 +1,25 @@ +requests-2.9.1.dist-info/DESCRIPTION.rst,sha256=5yc0ht0QIOBN24R7BGATc4y1dqDvu-cV6WeJy87bSao,35694 +requests-2.9.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +requests-2.9.1.dist-info/METADATA,sha256=X4AK0UL_4kVubFjws2WmILKnVErRWCWIWr7qz__H5Jg,36581 +requests-2.9.1.dist-info/RECORD,, +requests-2.9.1.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 +requests-2.9.1.dist-info/metadata.json,sha256=f32h7EChWrcN3CPpMcwIcLZQ3tTK4bm9us9nknaQneU,955 +requests-2.9.1.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9 requests/__init__.py,sha256=GzCmm6OIsjKVbUEMAxbtw4iBIcFj8GRsRi8GX84y6OY,2007 +requests/__pycache__/__init__.cpython-37.pyc,, +requests/__pycache__/adapters.cpython-37.pyc,, +requests/__pycache__/api.cpython-37.pyc,, +requests/__pycache__/auth.cpython-37.pyc,, +requests/__pycache__/certs.cpython-37.pyc,, +requests/__pycache__/compat.cpython-37.pyc,, +requests/__pycache__/cookies.cpython-37.pyc,, +requests/__pycache__/exceptions.cpython-37.pyc,, +requests/__pycache__/hooks.cpython-37.pyc,, +requests/__pycache__/models.cpython-37.pyc,, +requests/__pycache__/sessions.cpython-37.pyc,, +requests/__pycache__/status_codes.cpython-37.pyc,, +requests/__pycache__/structures.cpython-37.pyc,, +requests/__pycache__/utils.cpython-37.pyc,, requests/adapters.py,sha256=RqmOfpR96Lfv-GqSa8QEYIn8ZgfKb05nECh94rTQoJQ,17495 requests/api.py,sha256=mZZtHywR0qme1BStj7fKAkHdpg_3FMdDawBsvWV4eh0,5419 requests/auth.py,sha256=iez9OrPDCyE1zFxJMLL8MNw7CLj3id77gJkwNH4OlbU,7550 @@ -9,12 +30,47 @@ requests/cookies.py,sha256=mrrSrRYhxytuLDKrI5cyH5NL4zvpA373YvO7Ant9rxc,17387 requests/exceptions.py,sha256=lyzK5I-zkNCN9zfYGJgkDMvtt3akjw0QUq4q8pYI4wA,2776 requests/hooks.py,sha256=jSdmZjB5oRJ6xmKM3VtqkYkq8oFTwx6gqG9AaUWpAlw,767 requests/models.py,sha256=QQKNAR4bXpt0q2RwKTAraQlyrFJrZN__lvI0bpjPJvw,29277 -requests/sessions.py,sha256=PB_4RAr5Mr-CKStD5haAgDVJl7wBQEuShzDAP0JG-Ho,24544 -requests/status_codes.py,sha256=2RTAbhP2u3h-26-iyV0SuN1R0LknUTvga3RXwftdLtc,3280 -requests/structures.py,sha256=i3yMaaDbl4_gNJKdcK3kDmeSLoo0r59XEIWoc_qtNyo,2977 -requests/utils.py,sha256=pH5DwoyZZjwzDW_2OLrm0asDs2b_9mjzafwfm5SfzLQ,21845 requests/packages/__init__.py,sha256=CVheqNRcXIkAi5037RhxeqbAqd0QhrK1o9R9kS2xvuI,1384 +requests/packages/__pycache__/__init__.cpython-37.pyc,, requests/packages/chardet/__init__.py,sha256=XuTKCYOR7JwsoHxqZTYH86LVyMDbDI3s1s0W_qoGEBM,1295 +requests/packages/chardet/__pycache__/__init__.cpython-37.pyc,, +requests/packages/chardet/__pycache__/big5freq.cpython-37.pyc,, +requests/packages/chardet/__pycache__/big5prober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/chardetect.cpython-37.pyc,, +requests/packages/chardet/__pycache__/chardistribution.cpython-37.pyc,, +requests/packages/chardet/__pycache__/charsetgroupprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/charsetprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/codingstatemachine.cpython-37.pyc,, +requests/packages/chardet/__pycache__/compat.cpython-37.pyc,, +requests/packages/chardet/__pycache__/constants.cpython-37.pyc,, +requests/packages/chardet/__pycache__/cp949prober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/escprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/escsm.cpython-37.pyc,, +requests/packages/chardet/__pycache__/eucjpprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/euckrfreq.cpython-37.pyc,, +requests/packages/chardet/__pycache__/euckrprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/euctwfreq.cpython-37.pyc,, +requests/packages/chardet/__pycache__/euctwprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/gb2312freq.cpython-37.pyc,, +requests/packages/chardet/__pycache__/gb2312prober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/hebrewprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/jisfreq.cpython-37.pyc,, +requests/packages/chardet/__pycache__/jpcntx.cpython-37.pyc,, +requests/packages/chardet/__pycache__/langbulgarianmodel.cpython-37.pyc,, +requests/packages/chardet/__pycache__/langcyrillicmodel.cpython-37.pyc,, +requests/packages/chardet/__pycache__/langgreekmodel.cpython-37.pyc,, +requests/packages/chardet/__pycache__/langhebrewmodel.cpython-37.pyc,, +requests/packages/chardet/__pycache__/langhungarianmodel.cpython-37.pyc,, +requests/packages/chardet/__pycache__/langthaimodel.cpython-37.pyc,, +requests/packages/chardet/__pycache__/latin1prober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/mbcharsetprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/mbcsgroupprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/mbcssm.cpython-37.pyc,, +requests/packages/chardet/__pycache__/sbcharsetprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/sbcsgroupprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/sjisprober.cpython-37.pyc,, +requests/packages/chardet/__pycache__/universaldetector.cpython-37.pyc,, +requests/packages/chardet/__pycache__/utf8prober.cpython-37.pyc,, requests/packages/chardet/big5freq.py,sha256=D8oTdz-GM7Jg8TsaWJDm65vM_OLHC3xub6qUJ3rOgsQ,82594 requests/packages/chardet/big5prober.py,sha256=XX96C--6WKYW36mL-z7pJSAtc169Z8ZImByCP4pEN9A,1684 requests/packages/chardet/chardetect.py,sha256=f4299UZG6uWd3i3r_N0OdrFj2sA9JFI54PAmDLAFmWA,2504 @@ -53,25 +109,52 @@ requests/packages/chardet/sjisprober.py,sha256=UYOmiMDzttYIkSDoOB08UEagivJpUXz4t requests/packages/chardet/universaldetector.py,sha256=h-E2x6XSCzlNjycYWG0Fe4Cf1SGdaIzUNu2HCphpMZA,6840 requests/packages/chardet/utf8prober.py,sha256=7tdNZGrJY7jZUBD483GGMkiP0Tx8Fp-cGvWHoAsilHg,2652 requests/packages/urllib3/__init__.py,sha256=416Z27e3riIyAXRLwsOS5dMrU2EQB361eLxksZFWnrU,2648 +requests/packages/urllib3/__pycache__/__init__.cpython-37.pyc,, +requests/packages/urllib3/__pycache__/_collections.cpython-37.pyc,, +requests/packages/urllib3/__pycache__/connection.cpython-37.pyc,, +requests/packages/urllib3/__pycache__/connectionpool.cpython-37.pyc,, +requests/packages/urllib3/__pycache__/exceptions.cpython-37.pyc,, +requests/packages/urllib3/__pycache__/fields.cpython-37.pyc,, +requests/packages/urllib3/__pycache__/filepost.cpython-37.pyc,, +requests/packages/urllib3/__pycache__/poolmanager.cpython-37.pyc,, +requests/packages/urllib3/__pycache__/request.cpython-37.pyc,, +requests/packages/urllib3/__pycache__/response.cpython-37.pyc,, requests/packages/urllib3/_collections.py,sha256=8G9PhO4XdkNDQATNL1uy86tSlH3EvIJHXebiOJnfFok,10542 requests/packages/urllib3/connection.py,sha256=XREoqqZh54Lgag5CLdVlC27bwCpOq0aYrMmNEMtSJWk,10286 requests/packages/urllib3/connectionpool.py,sha256=Zzn84qmJhMaSWXqDjhA1WBzt9E_wg3XXi0fsJ80aVPE,31221 -requests/packages/urllib3/exceptions.py,sha256=O_rlqjhV5PJFr28ZFW4Y0kgf_Q_l84sRx7ufgBsVEG8,5440 -requests/packages/urllib3/fields.py,sha256=WVUvPfSzNBniw9zKVDoLl9y5ko2qKBjbzkH-bTQMSgQ,5872 -requests/packages/urllib3/filepost.py,sha256=NvLlFsdt8ih_Q4S2ekQF3CJG0nOXs32YI-G04_AdT2g,2320 -requests/packages/urllib3/poolmanager.py,sha256=3KaeL_tJpPPRmnSZY9x8qHcf9SNT8QucwPUNBnqCUx4,9470 -requests/packages/urllib3/request.py,sha256=jET7OvA3FSjxABBRGhCyMdPvM9XuJA6df9gRhkJiJiY,5988 -requests/packages/urllib3/response.py,sha256=6Bs5LNzhW1YEEd6stBFJtruDVFMlWNxo0MFPmtJhvDU,18103 requests/packages/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +requests/packages/urllib3/contrib/__pycache__/__init__.cpython-37.pyc,, +requests/packages/urllib3/contrib/__pycache__/appengine.cpython-37.pyc,, +requests/packages/urllib3/contrib/__pycache__/ntlmpool.cpython-37.pyc,, +requests/packages/urllib3/contrib/__pycache__/pyopenssl.cpython-37.pyc,, requests/packages/urllib3/contrib/appengine.py,sha256=ewIhezCrySj0r1SCTm8MtpnlE6EFJpEQ-AZhNjXe6dE,7531 requests/packages/urllib3/contrib/ntlmpool.py,sha256=EDJwycyalpMD89DiGF5pFNCZOGTBQBZDtLN8oOAialc,4546 requests/packages/urllib3/contrib/pyopenssl.py,sha256=JBL3GO8YucHXkdpU7uxUGd9UgShsIhAU8oCMJDOo47s,10094 +requests/packages/urllib3/exceptions.py,sha256=O_rlqjhV5PJFr28ZFW4Y0kgf_Q_l84sRx7ufgBsVEG8,5440 +requests/packages/urllib3/fields.py,sha256=WVUvPfSzNBniw9zKVDoLl9y5ko2qKBjbzkH-bTQMSgQ,5872 +requests/packages/urllib3/filepost.py,sha256=NvLlFsdt8ih_Q4S2ekQF3CJG0nOXs32YI-G04_AdT2g,2320 requests/packages/urllib3/packages/__init__.py,sha256=nlChrGzkjCkmhCX9HrF_qHPUgosfsPQkVIJxiiLhk9g,109 +requests/packages/urllib3/packages/__pycache__/__init__.cpython-37.pyc,, +requests/packages/urllib3/packages/__pycache__/ordered_dict.cpython-37.pyc,, +requests/packages/urllib3/packages/__pycache__/six.cpython-37.pyc,, requests/packages/urllib3/packages/ordered_dict.py,sha256=VQaPONfhVMsb8B63Xg7ZOydJqIE_jzeMhVN3Pec6ogw,8935 requests/packages/urllib3/packages/six.py,sha256=U-rO-WBrFS8PxHeamSl6okKCjqPF18NhiZb0qPZ67XM,11628 requests/packages/urllib3/packages/ssl_match_hostname/__init__.py,sha256=cOWMIn1orgJoA35p6pSzO_-Dc6iOX9Dhl6D2sL9b_2o,460 +requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-37.pyc,, +requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-37.pyc,, requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py,sha256=fK28k37hL7-D79v9iM2fHgNK9Q1Pw0M7qVRL4rkfFjQ,3778 +requests/packages/urllib3/poolmanager.py,sha256=3KaeL_tJpPPRmnSZY9x8qHcf9SNT8QucwPUNBnqCUx4,9470 +requests/packages/urllib3/request.py,sha256=jET7OvA3FSjxABBRGhCyMdPvM9XuJA6df9gRhkJiJiY,5988 +requests/packages/urllib3/response.py,sha256=6Bs5LNzhW1YEEd6stBFJtruDVFMlWNxo0MFPmtJhvDU,18103 requests/packages/urllib3/util/__init__.py,sha256=7LnyUDyddbD9VVmsbPP0ckT2paVTmgLPs5E_BUoHVu8,854 +requests/packages/urllib3/util/__pycache__/__init__.cpython-37.pyc,, +requests/packages/urllib3/util/__pycache__/connection.cpython-37.pyc,, +requests/packages/urllib3/util/__pycache__/request.cpython-37.pyc,, +requests/packages/urllib3/util/__pycache__/response.cpython-37.pyc,, +requests/packages/urllib3/util/__pycache__/retry.cpython-37.pyc,, +requests/packages/urllib3/util/__pycache__/ssl_.cpython-37.pyc,, +requests/packages/urllib3/util/__pycache__/timeout.cpython-37.pyc,, +requests/packages/urllib3/util/__pycache__/url.cpython-37.pyc,, requests/packages/urllib3/util/connection.py,sha256=6PvDBlK_6QDLHzEDT-uEMhqKcDoSuRO43Vtb4IXfkzQ,3380 requests/packages/urllib3/util/request.py,sha256=ZMDewRK-mjlK72szGIIjzYnLIn-zPP0WgJUMjKeZ6Tg,2128 requests/packages/urllib3/util/response.py,sha256=-vdS4K9hXQAkcvFLgnZqNmah_yyH7NZ2I6gRaKX2EwU,2167 @@ -79,90 +162,7 @@ requests/packages/urllib3/util/retry.py,sha256=Q4IdYYD3JwjNvc49r7tGsZt7jB6nVkRBo requests/packages/urllib3/util/ssl_.py,sha256=Rq7M8Y04fwHjA9EkewYXE2SE5ZK7UGZhnhZ5JledWh0,11401 requests/packages/urllib3/util/timeout.py,sha256=ioAIYptFyBG7eU_r8_ZmO45hpj1dJE6WCvrGR9dNFjs,9596 requests/packages/urllib3/util/url.py,sha256=EcX4ZfmgKWcqM4sY9FlC-yN4y_snuURPV0TpUPHNjnc,5879 -requests-2.9.1.dist-info/DESCRIPTION.rst,sha256=5yc0ht0QIOBN24R7BGATc4y1dqDvu-cV6WeJy87bSao,35694 -requests-2.9.1.dist-info/METADATA,sha256=X4AK0UL_4kVubFjws2WmILKnVErRWCWIWr7qz__H5Jg,36581 -requests-2.9.1.dist-info/RECORD,, -requests-2.9.1.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 -requests-2.9.1.dist-info/metadata.json,sha256=f32h7EChWrcN3CPpMcwIcLZQ3tTK4bm9us9nknaQneU,955 -requests-2.9.1.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9 -requests-2.9.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -requests/packages/__pycache__/__init__.cpython-34.pyc,, -requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-34.pyc,, -requests/packages/urllib3/contrib/__pycache__/pyopenssl.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/poolmanager.cpython-34.pyc,, -requests/packages/chardet/__pycache__/charsetprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/universaldetector.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/ssl_.cpython-34.pyc,, -requests/packages/chardet/__pycache__/codingstatemachine.cpython-34.pyc,, -requests/__pycache__/__init__.cpython-34.pyc,, -requests/packages/chardet/__pycache__/euckrprober.cpython-34.pyc,, -requests/__pycache__/auth.cpython-34.pyc,, -requests/packages/urllib3/packages/__pycache__/ordered_dict.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/__init__.cpython-34.pyc,, -requests/__pycache__/cookies.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/fields.cpython-34.pyc,, -requests/packages/chardet/__pycache__/__init__.cpython-34.pyc,, -requests/__pycache__/structures.cpython-34.pyc,, -requests/packages/chardet/__pycache__/mbcsgroupprober.cpython-34.pyc,, -requests/packages/urllib3/packages/__pycache__/__init__.cpython-34.pyc,, -requests/packages/chardet/__pycache__/chardistribution.cpython-34.pyc,, -requests/__pycache__/certs.cpython-34.pyc,, -requests/packages/chardet/__pycache__/sbcsgroupprober.cpython-34.pyc,, -requests/packages/urllib3/contrib/__pycache__/__init__.cpython-34.pyc,, -requests/packages/chardet/__pycache__/charsetgroupprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/cp949prober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/escsm.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/connectionpool.cpython-34.pyc,, -requests/packages/chardet/__pycache__/chardetect.cpython-34.pyc,, -requests/packages/chardet/__pycache__/compat.cpython-34.pyc,, -requests/packages/chardet/__pycache__/jpcntx.cpython-34.pyc,, -requests/packages/chardet/__pycache__/utf8prober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langgreekmodel.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/exceptions.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/response.cpython-34.pyc,, -requests/__pycache__/api.cpython-34.pyc,, -requests/packages/chardet/__pycache__/eucjpprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/mbcharsetprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/sbcharsetprober.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/request.cpython-34.pyc,, -requests/__pycache__/utils.cpython-34.pyc,, -requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/retry.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langthaimodel.cpython-34.pyc,, -requests/packages/chardet/__pycache__/big5prober.cpython-34.pyc,, -requests/__pycache__/hooks.cpython-34.pyc,, -requests/packages/chardet/__pycache__/euckrfreq.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langbulgarianmodel.cpython-34.pyc,, -requests/packages/chardet/__pycache__/euctwfreq.cpython-34.pyc,, -requests/__pycache__/adapters.cpython-34.pyc,, -requests/packages/urllib3/contrib/__pycache__/appengine.cpython-34.pyc,, -requests/__pycache__/exceptions.cpython-34.pyc,, -requests/packages/urllib3/packages/__pycache__/six.cpython-34.pyc,, -requests/__pycache__/sessions.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/url.cpython-34.pyc,, -requests/__pycache__/status_codes.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langcyrillicmodel.cpython-34.pyc,, -requests/packages/chardet/__pycache__/gb2312freq.cpython-34.pyc,, -requests/__pycache__/models.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/timeout.cpython-34.pyc,, -requests/packages/chardet/__pycache__/mbcssm.cpython-34.pyc,, -requests/__pycache__/compat.cpython-34.pyc,, -requests/packages/chardet/__pycache__/sjisprober.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/connection.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langhungarianmodel.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/_collections.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/connection.cpython-34.pyc,, -requests/packages/chardet/__pycache__/big5freq.cpython-34.pyc,, -requests/packages/chardet/__pycache__/constants.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langhebrewmodel.cpython-34.pyc,, -requests/packages/chardet/__pycache__/escprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/hebrewprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/euctwprober.cpython-34.pyc,, -requests/packages/urllib3/contrib/__pycache__/ntlmpool.cpython-34.pyc,, -requests/packages/chardet/__pycache__/jisfreq.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/__init__.cpython-34.pyc,, -requests/packages/chardet/__pycache__/latin1prober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/gb2312prober.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/response.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/filepost.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/request.cpython-34.pyc,, +requests/sessions.py,sha256=PB_4RAr5Mr-CKStD5haAgDVJl7wBQEuShzDAP0JG-Ho,24544 +requests/status_codes.py,sha256=2RTAbhP2u3h-26-iyV0SuN1R0LknUTvga3RXwftdLtc,3280 +requests/structures.py,sha256=i3yMaaDbl4_gNJKdcK3kDmeSLoo0r59XEIWoc_qtNyo,2977 +requests/utils.py,sha256=pH5DwoyZZjwzDW_2OLrm0asDs2b_9mjzafwfm5SfzLQ,21845 diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/DESCRIPTION.rst deleted file mode 100644 index c6b6a1c..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,238 +0,0 @@ -=============================== -Installing and Using Setuptools -=============================== - -.. contents:: **Table of Contents** - - -`Change History `_. - -------------------------- -Installation Instructions -------------------------- - -The recommended way to bootstrap setuptools on any system is to download -`ez_setup.py`_ and run it using the target Python environment. Different -operating systems have different recommended techniques to accomplish this -basic routine, so below are some examples to get you started. - -Setuptools requires Python 2.6 or later. To install setuptools -on Python 2.4 or Python 2.5, use the `bootstrap script for Setuptools 1.x -`_. - -The link provided to ez_setup.py is a bookmark to bootstrap script for the -latest known stable release. - -.. _ez_setup.py: https://bootstrap.pypa.io/ez_setup.py - -Windows (Powershell 3 or later) -=============================== - -For best results, uninstall previous versions FIRST (see `Uninstalling`_). - -Using Windows 8 (which includes PowerShell 3) or earlier versions of Windows -with PowerShell 3 installed, it's possible to install with one simple -Powershell command. Start up Powershell and paste this command:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | python - - -You must start the Powershell with Administrative privileges or you may choose -to install a user-local installation:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | python - --user - -If you have Python 3.3 or later, you can use the ``py`` command to install to -different Python versions. For example, to install to Python 3.3 if you have -Python 2.7 installed:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | py -3 - - -The recommended way to install setuptools on Windows is to download -`ez_setup.py`_ and run it. The script will download the appropriate -distribution file and install it for you. - -Once installation is complete, you will find an ``easy_install`` program in -your Python ``Scripts`` subdirectory. For simple invocation and best results, -add this directory to your ``PATH`` environment variable, if it is not already -present. If you did a user-local install, the ``Scripts`` subdirectory is -``$env:APPDATA\Python\Scripts``. - - -Windows (simplified) -==================== - -For Windows without PowerShell 3 or for installation without a command-line, -download `ez_setup.py`_ using your preferred web browser or other technique -and "run" that file. - - -Unix (wget) -=========== - -Most Linux distributions come with wget. - -Download `ez_setup.py`_ and run it using the target Python version. The script -will download the appropriate version and install it for you:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | python - -Note that you will may need to invoke the command with superuser privileges to -install to the system Python:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | sudo python - -Alternatively, Setuptools may be installed to a user-local path:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | python - --user - -Note that on some older systems (noted on Debian 6 and CentOS 5 installations), -`wget` may refuse to download `ez_setup.py`, complaining that the certificate common name `*.c.ssl.fastly.net` -does not match the host name `bootstrap.pypa.io`. In addition, the `ez_setup.py` script may then encounter similar problems using -`wget` internally to download `setuptools-x.y.zip`, complaining that the certificate common name of `www.python.org` does not match the -host name `pypi.python.org`. Those are known issues, related to a bug in the older versions of `wget` -(see `Issue 59 `_). If you happen to encounter them, -install Setuptools as follows:: - - > wget --no-check-certificate https://bootstrap.pypa.io/ez_setup.py - > python ez_setup.py --insecure - - -Unix including Mac OS X (curl) -============================== - -If your system has curl installed, follow the ``wget`` instructions but -replace ``wget`` with ``curl`` and ``-O`` with ``-o``. For example:: - - > curl https://bootstrap.pypa.io/ez_setup.py -o - | python - - -Advanced Installation -===================== - -For more advanced installation options, such as installing to custom -locations or prefixes, download and extract the source -tarball from `Setuptools on PyPI `_ -and run setup.py with any supported distutils and Setuptools options. -For example:: - - setuptools-x.x$ python setup.py install --prefix=/opt/setuptools - -Use ``--help`` to get a full options list, but we recommend consulting -the `EasyInstall manual`_ for detailed instructions, especially `the section -on custom installation locations`_. - -.. _EasyInstall manual: https://pythonhosted.org/setuptools/EasyInstall -.. _the section on custom installation locations: https://pythonhosted.org/setuptools/EasyInstall#custom-installation-locations - - -Downloads -========= - -All setuptools downloads can be found at `the project's home page in the Python -Package Index`_. Scroll to the very bottom of the page to find the links. - -.. _the project's home page in the Python Package Index: https://pypi.python.org/pypi/setuptools - -In addition to the PyPI downloads, the development version of ``setuptools`` -is available from the `Bitbucket repo`_, and in-development versions of the -`0.6 branch`_ are available as well. - -.. _Bitbucket repo: https://bitbucket.org/pypa/setuptools/get/default.tar.gz#egg=setuptools-dev -.. _0.6 branch: http://svn.python.org/projects/sandbox/branches/setuptools-0.6/#egg=setuptools-dev06 - -Uninstalling -============ - -On Windows, if Setuptools was installed using an ``.exe`` or ``.msi`` -installer, simply use the uninstall feature of "Add/Remove Programs" in the -Control Panel. - -Otherwise, to uninstall Setuptools or Distribute, regardless of the Python -version, delete all ``setuptools*`` and ``distribute*`` files and -directories from your system's ``site-packages`` directory -(and any other ``sys.path`` directories) FIRST. - -If you are upgrading or otherwise plan to re-install Setuptools or Distribute, -nothing further needs to be done. If you want to completely remove Setuptools, -you may also want to remove the 'easy_install' and 'easy_install-x.x' scripts -and associated executables installed to the Python scripts directory. - --------------------------------- -Using Setuptools and EasyInstall --------------------------------- - -Here are some of the available manuals, tutorials, and other resources for -learning about Setuptools, Python Eggs, and EasyInstall: - -* `The EasyInstall user's guide and reference manual`_ -* `The setuptools Developer's Guide`_ -* `The pkg_resources API reference`_ -* `The Internal Structure of Python Eggs`_ - -Questions, comments, and bug reports should be directed to the `distutils-sig -mailing list`_. If you have written (or know of) any tutorials, documentation, -plug-ins, or other resources for setuptools users, please let us know about -them there, so this reference list can be updated. If you have working, -*tested* patches to correct problems or add features, you may submit them to -the `setuptools bug tracker`_. - -.. _setuptools bug tracker: https://bitbucket.org/pypa/setuptools/issues -.. _The Internal Structure of Python Eggs: https://pythonhosted.org/setuptools/formats.html -.. _The setuptools Developer's Guide: https://pythonhosted.org/setuptools/setuptools.html -.. _The pkg_resources API reference: https://pythonhosted.org/setuptools/pkg_resources.html -.. _The EasyInstall user's guide and reference manual: https://pythonhosted.org/setuptools/easy_install.html -.. _distutils-sig mailing list: http://mail.python.org/pipermail/distutils-sig/ - - -------- -Credits -------- - -* The original design for the ``.egg`` format and the ``pkg_resources`` API was - co-created by Phillip Eby and Bob Ippolito. Bob also implemented the first - version of ``pkg_resources``, and supplied the OS X operating system version - compatibility algorithm. - -* Ian Bicking implemented many early "creature comfort" features of - easy_install, including support for downloading via Sourceforge and - Subversion repositories. Ian's comments on the Web-SIG about WSGI - application deployment also inspired the concept of "entry points" in eggs, - and he has given talks at PyCon and elsewhere to inform and educate the - community about eggs and setuptools. - -* Jim Fulton contributed time and effort to build automated tests of various - aspects of ``easy_install``, and supplied the doctests for the command-line - ``.exe`` wrappers on Windows. - -* Phillip J. Eby is the seminal author of setuptools, and - first proposed the idea of an importable binary distribution format for - Python application plug-ins. - -* Significant parts of the implementation of setuptools were funded by the Open - Source Applications Foundation, to provide a plug-in infrastructure for the - Chandler PIM application. In addition, many OSAF staffers (such as Mike - "Code Bear" Taylor) contributed their time and stress as guinea pigs for the - use of eggs and setuptools, even before eggs were "cool". (Thanks, guys!) - -* Tarek Ziadé is the principal author of the Distribute fork, which - re-invigorated the community on the project, encouraged renewed innovation, - and addressed many defects. - -* Since the merge with Distribute, Jason R. Coombs is the - maintainer of setuptools. The project is maintained in coordination with - the Python Packaging Authority (PyPA) and the larger Python community. - -.. _files: - - ---------------- -Code of Conduct ---------------- - -Everyone interacting in the setuptools project's codebases, issue trackers, -chat rooms, and mailing lists is expected to follow the -`PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/METADATA b/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/METADATA deleted file mode 100644 index e901218..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/METADATA +++ /dev/null @@ -1,267 +0,0 @@ -Metadata-Version: 2.0 -Name: setuptools -Version: 20.2.2 -Summary: Easily download, build, install, upgrade, and uninstall Python packages -Home-page: https://bitbucket.org/pypa/setuptools -Author: Python Packaging Authority -Author-email: distutils-sig@python.org -License: UNKNOWN -Keywords: CPAN PyPI distutils eggs package management -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: System :: Archiving :: Packaging -Classifier: Topic :: System :: Systems Administration -Classifier: Topic :: Utilities -Provides-Extra: certs -Requires-Dist: certifi (==2015.11.20); extra == 'certs' -Provides-Extra: ssl -Requires-Dist: wincertstore (==0.2); sys_platform=='win32' and extra == 'ssl' - -=============================== -Installing and Using Setuptools -=============================== - -.. contents:: **Table of Contents** - - -`Change History `_. - -------------------------- -Installation Instructions -------------------------- - -The recommended way to bootstrap setuptools on any system is to download -`ez_setup.py`_ and run it using the target Python environment. Different -operating systems have different recommended techniques to accomplish this -basic routine, so below are some examples to get you started. - -Setuptools requires Python 2.6 or later. To install setuptools -on Python 2.4 or Python 2.5, use the `bootstrap script for Setuptools 1.x -`_. - -The link provided to ez_setup.py is a bookmark to bootstrap script for the -latest known stable release. - -.. _ez_setup.py: https://bootstrap.pypa.io/ez_setup.py - -Windows (Powershell 3 or later) -=============================== - -For best results, uninstall previous versions FIRST (see `Uninstalling`_). - -Using Windows 8 (which includes PowerShell 3) or earlier versions of Windows -with PowerShell 3 installed, it's possible to install with one simple -Powershell command. Start up Powershell and paste this command:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | python - - -You must start the Powershell with Administrative privileges or you may choose -to install a user-local installation:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | python - --user - -If you have Python 3.3 or later, you can use the ``py`` command to install to -different Python versions. For example, to install to Python 3.3 if you have -Python 2.7 installed:: - - > (Invoke-WebRequest https://bootstrap.pypa.io/ez_setup.py).Content | py -3 - - -The recommended way to install setuptools on Windows is to download -`ez_setup.py`_ and run it. The script will download the appropriate -distribution file and install it for you. - -Once installation is complete, you will find an ``easy_install`` program in -your Python ``Scripts`` subdirectory. For simple invocation and best results, -add this directory to your ``PATH`` environment variable, if it is not already -present. If you did a user-local install, the ``Scripts`` subdirectory is -``$env:APPDATA\Python\Scripts``. - - -Windows (simplified) -==================== - -For Windows without PowerShell 3 or for installation without a command-line, -download `ez_setup.py`_ using your preferred web browser or other technique -and "run" that file. - - -Unix (wget) -=========== - -Most Linux distributions come with wget. - -Download `ez_setup.py`_ and run it using the target Python version. The script -will download the appropriate version and install it for you:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | python - -Note that you will may need to invoke the command with superuser privileges to -install to the system Python:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | sudo python - -Alternatively, Setuptools may be installed to a user-local path:: - - > wget https://bootstrap.pypa.io/ez_setup.py -O - | python - --user - -Note that on some older systems (noted on Debian 6 and CentOS 5 installations), -`wget` may refuse to download `ez_setup.py`, complaining that the certificate common name `*.c.ssl.fastly.net` -does not match the host name `bootstrap.pypa.io`. In addition, the `ez_setup.py` script may then encounter similar problems using -`wget` internally to download `setuptools-x.y.zip`, complaining that the certificate common name of `www.python.org` does not match the -host name `pypi.python.org`. Those are known issues, related to a bug in the older versions of `wget` -(see `Issue 59 `_). If you happen to encounter them, -install Setuptools as follows:: - - > wget --no-check-certificate https://bootstrap.pypa.io/ez_setup.py - > python ez_setup.py --insecure - - -Unix including Mac OS X (curl) -============================== - -If your system has curl installed, follow the ``wget`` instructions but -replace ``wget`` with ``curl`` and ``-O`` with ``-o``. For example:: - - > curl https://bootstrap.pypa.io/ez_setup.py -o - | python - - -Advanced Installation -===================== - -For more advanced installation options, such as installing to custom -locations or prefixes, download and extract the source -tarball from `Setuptools on PyPI `_ -and run setup.py with any supported distutils and Setuptools options. -For example:: - - setuptools-x.x$ python setup.py install --prefix=/opt/setuptools - -Use ``--help`` to get a full options list, but we recommend consulting -the `EasyInstall manual`_ for detailed instructions, especially `the section -on custom installation locations`_. - -.. _EasyInstall manual: https://pythonhosted.org/setuptools/EasyInstall -.. _the section on custom installation locations: https://pythonhosted.org/setuptools/EasyInstall#custom-installation-locations - - -Downloads -========= - -All setuptools downloads can be found at `the project's home page in the Python -Package Index`_. Scroll to the very bottom of the page to find the links. - -.. _the project's home page in the Python Package Index: https://pypi.python.org/pypi/setuptools - -In addition to the PyPI downloads, the development version of ``setuptools`` -is available from the `Bitbucket repo`_, and in-development versions of the -`0.6 branch`_ are available as well. - -.. _Bitbucket repo: https://bitbucket.org/pypa/setuptools/get/default.tar.gz#egg=setuptools-dev -.. _0.6 branch: http://svn.python.org/projects/sandbox/branches/setuptools-0.6/#egg=setuptools-dev06 - -Uninstalling -============ - -On Windows, if Setuptools was installed using an ``.exe`` or ``.msi`` -installer, simply use the uninstall feature of "Add/Remove Programs" in the -Control Panel. - -Otherwise, to uninstall Setuptools or Distribute, regardless of the Python -version, delete all ``setuptools*`` and ``distribute*`` files and -directories from your system's ``site-packages`` directory -(and any other ``sys.path`` directories) FIRST. - -If you are upgrading or otherwise plan to re-install Setuptools or Distribute, -nothing further needs to be done. If you want to completely remove Setuptools, -you may also want to remove the 'easy_install' and 'easy_install-x.x' scripts -and associated executables installed to the Python scripts directory. - --------------------------------- -Using Setuptools and EasyInstall --------------------------------- - -Here are some of the available manuals, tutorials, and other resources for -learning about Setuptools, Python Eggs, and EasyInstall: - -* `The EasyInstall user's guide and reference manual`_ -* `The setuptools Developer's Guide`_ -* `The pkg_resources API reference`_ -* `The Internal Structure of Python Eggs`_ - -Questions, comments, and bug reports should be directed to the `distutils-sig -mailing list`_. If you have written (or know of) any tutorials, documentation, -plug-ins, or other resources for setuptools users, please let us know about -them there, so this reference list can be updated. If you have working, -*tested* patches to correct problems or add features, you may submit them to -the `setuptools bug tracker`_. - -.. _setuptools bug tracker: https://bitbucket.org/pypa/setuptools/issues -.. _The Internal Structure of Python Eggs: https://pythonhosted.org/setuptools/formats.html -.. _The setuptools Developer's Guide: https://pythonhosted.org/setuptools/setuptools.html -.. _The pkg_resources API reference: https://pythonhosted.org/setuptools/pkg_resources.html -.. _The EasyInstall user's guide and reference manual: https://pythonhosted.org/setuptools/easy_install.html -.. _distutils-sig mailing list: http://mail.python.org/pipermail/distutils-sig/ - - -------- -Credits -------- - -* The original design for the ``.egg`` format and the ``pkg_resources`` API was - co-created by Phillip Eby and Bob Ippolito. Bob also implemented the first - version of ``pkg_resources``, and supplied the OS X operating system version - compatibility algorithm. - -* Ian Bicking implemented many early "creature comfort" features of - easy_install, including support for downloading via Sourceforge and - Subversion repositories. Ian's comments on the Web-SIG about WSGI - application deployment also inspired the concept of "entry points" in eggs, - and he has given talks at PyCon and elsewhere to inform and educate the - community about eggs and setuptools. - -* Jim Fulton contributed time and effort to build automated tests of various - aspects of ``easy_install``, and supplied the doctests for the command-line - ``.exe`` wrappers on Windows. - -* Phillip J. Eby is the seminal author of setuptools, and - first proposed the idea of an importable binary distribution format for - Python application plug-ins. - -* Significant parts of the implementation of setuptools were funded by the Open - Source Applications Foundation, to provide a plug-in infrastructure for the - Chandler PIM application. In addition, many OSAF staffers (such as Mike - "Code Bear" Taylor) contributed their time and stress as guinea pigs for the - use of eggs and setuptools, even before eggs were "cool". (Thanks, guys!) - -* Tarek Ziadé is the principal author of the Distribute fork, which - re-invigorated the community on the project, encouraged renewed innovation, - and addressed many defects. - -* Since the merge with Distribute, Jason R. Coombs is the - maintainer of setuptools. The project is maintained in coordination with - the Python Packaging Authority (PyPA) and the larger Python community. - -.. _files: - - ---------------- -Code of Conduct ---------------- - -Everyone interacting in the setuptools project's codebases, issue trackers, -chat rooms, and mailing lists is expected to follow the -`PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/RECORD b/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/RECORD deleted file mode 100644 index f4a1729..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/RECORD +++ /dev/null @@ -1,137 +0,0 @@ -easy_install.py,sha256=MDC9vt5AxDsXX5qcKlBz2TnW6Tpuv_AobnfhCJ9X3PM,126 -pkg_resources/__init__.py,sha256=bwxm1Fn4zVXphbGYtK6sddFL-iMdlwIX7A2pOlN0tVk,100876 -pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pkg_resources/_vendor/pyparsing.py,sha256=ic8qmDPiq8Li-Y0PeZcI56rEyMqevKNBK6hr6FbyVBc,160425 -pkg_resources/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 -pkg_resources/_vendor/packaging/__about__.py,sha256=AEwkfVSNgMMAAugtYao7b7wah9XryokeoXBuIw4h6d8,720 -pkg_resources/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513 -pkg_resources/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860 -pkg_resources/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416 -pkg_resources/_vendor/packaging/markers.py,sha256=0Z2in1kNfYn93n9uJj0hNEmu-sJpEQpa_qAbxpYXdS4,7359 -pkg_resources/_vendor/packaging/requirements.py,sha256=SikL2UynbsT0qtY9ltqngndha_sfo0w6XGFhAhoSoaQ,4355 -pkg_resources/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025 -pkg_resources/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421 -pkg_resources/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556 -pkg_resources/extern/__init__.py,sha256=rMBTxKimjNg8plSH94cB-y52pKO0zmM-AkFL30lZGfY,2474 -setuptools/__init__.py,sha256=WEGb6BRGN2dz3eJTbNRUfInUAhb6_OZJyYAndPGJm6w,5440 -setuptools/archive_util.py,sha256=N30WE5ZQjkytzhAodAXw4FkK-9J5AP1ChrClHnZthOA,6609 -setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 -setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752 -setuptools/cli-arm-32.exe,sha256=0pFNIi2SmY2gdY91Y4LRhj1wuBsnv5cG1fus3iBJv40,69120 -setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 -setuptools/depends.py,sha256=WyJIhjIX7D5-JpGSnMAPHEoDcVPQxaO0405keTQT6jM,6418 -setuptools/dist.py,sha256=txOleyyt2xCSTkUjCGW4MYZB8a1xsbC8MulDhSnoivQ,35701 -setuptools/extension.py,sha256=YvsyGHWVWzhNOXMHU239FR14wxw2WwdMLLzWsRP6_IY,1694 -setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 -setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264 -setuptools/gui-arm-32.exe,sha256=R5gRWLkY7wvO_CVGxoi7LZVTv0h-DKsKScy6fkbp4XI,69120 -setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 -setuptools/launch.py,sha256=hP3qZxDNu5Hf9C-VAkEP4IC_YYfR1XfxMTj6EguxxCg,730 -setuptools/lib2to3_ex.py,sha256=6jPF9sJuHiz0cyg4cwIBLl2VMAxcl3GYSZwWAOuJplU,1998 -setuptools/msvc9_support.py,sha256=fo2vjb-dna1SEuHezQCTuelCo6XFBv5cqaI56ABJ1vw,2187 -setuptools/package_index.py,sha256=T6tZGPHApup6Gl3kz1sCLtY7kmMUXLBKweSAORYS2Qc,39490 -setuptools/py26compat.py,sha256=1Vvuf-hj5bTM3OAXv6vgJQImulne12ann053caOgikU,481 -setuptools/py27compat.py,sha256=CGj-jZcFgHUkrEdLvArkxHj96tAaMbG2-yJtUVU7QVI,306 -setuptools/py31compat.py,sha256=cqYSVBd2pxvKl75185z40htfEr6EKC29KvSBiSoqHOA,1636 -setuptools/sandbox.py,sha256=tuMRu_8R0_w6Qer9VqDiOTqKy1qr_GjHi-2QAg7TMz0,14210 -setuptools/script (dev).tmpl,sha256=f7MR17dTkzaqkCMSVseyOCMVrPVSMdmTQsaB8cZzfuI,201 -setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138 -setuptools/site-patch.py,sha256=K-0-cAx36mX_PG-qPZwosG9ZLCliRjquKQ4nHiJvvzg,2389 -setuptools/ssl_support.py,sha256=tAFeeyFPVle_GgarPkNrdfnCJgP9PyN_QYGXTgypoyc,8119 -setuptools/unicode_utils.py,sha256=8zVyrL_MFc6P5AmErs21rr7z-3N1pZ_NkOcDC7BPElU,995 -setuptools/utils.py,sha256=08Z7mt-9mvrx-XvmS5EyKoRn2lxNTlgFsUwBU3Eq9JQ,293 -setuptools/version.py,sha256=vRyfcfc7GHZS-3JCgX02OcTSUwmEyvPJpQQF4XGxTmc,23 -setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714 -setuptools/command/__init__.py,sha256=1AM3hv_zCixE7kTXA-onWfK_2KF8GC8fUw3WSxzi5Fg,564 -setuptools/command/alias.py,sha256=KjpE0sz_SDIHv3fpZcIQK-sCkJz-SrC6Gmug6b9Nkc8,2426 -setuptools/command/bdist_egg.py,sha256=Km4CsGbevhvej6kKEfvTYxfkPoQijUyXmImNifrO4Tg,17184 -setuptools/command/bdist_rpm.py,sha256=B7l0TnzCGb-0nLlm6rS00jWLkojASwVmdhW2w5Qz_Ak,1508 -setuptools/command/bdist_wininst.py,sha256=_6dz3lpB1tY200LxKPLM7qgwTCceOMgaWFF-jW2-pm0,637 -setuptools/command/build_ext.py,sha256=pkQ8xp3YPVGGLkGv-SvfxC_GqFpboph1AFEoMFOgQMo,11964 -setuptools/command/build_py.py,sha256=HvJ88JuougDccaowYlfMV12kYtd0GLahg2DR2vQRqL4,7983 -setuptools/command/develop.py,sha256=VxSYbpM2jQqtRBn5klIjPVBo3sWKNZMlSbHHiRLUlZo,7383 -setuptools/command/easy_install.py,sha256=H2aThxQAtB-WKu52Hsc8ePfJcbILMQPq060CgUtvDtw,86035 -setuptools/command/egg_info.py,sha256=0_8eI8hgLAlGt8Xk5kiodY_d9lxG6_RSescJISKBJgA,16890 -setuptools/command/install.py,sha256=QwaFiZRU3ytIHoPh8uJ9EqV3Fu9C4ca4B7UGAo95tws,4685 -setuptools/command/install_egg_info.py,sha256=8J_cH4VbOJv-9Wey8Ijw5SnNI7YS_CA2IKYX569mP5Q,4035 -setuptools/command/install_lib.py,sha256=rWcysInRJHVzgneY41EKW3kW3-dR2q2CvyPzul5ASAk,3839 -setuptools/command/install_scripts.py,sha256=vX2JC6v7l090N7CrTfihWBklNbPvfNKAY2LRtukM9XE,2231 -setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628 -setuptools/command/register.py,sha256=bHlMm1qmBbSdahTOT8w6UhA-EgeQIz7p6cD-qOauaiI,270 -setuptools/command/rotate.py,sha256=QGZS2t4CmBl7t79KQijNCjRMU50lu3nRhu4FXWB5LIE,2038 -setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658 -setuptools/command/sdist.py,sha256=kQetnPMw6ao3nurWGJZgS4HkOH4AknzMOSvqbVA6jGA,7050 -setuptools/command/setopt.py,sha256=cygJaJWJmiVhR0e_Uh_0_fWyCxMJIqK-Bu6K0LyYUtU,5086 -setuptools/command/test.py,sha256=N2f5RwxkjwU3YQzFYHtzHr636-pdX9XJDuPg5Y92kSo,6888 -setuptools/command/upload.py,sha256=OjAryq4ZoARZiaTN_MpuG1X8Pu9CJNCKmmbMg-gab5I,649 -setuptools/command/upload_docs.py,sha256=htXpASci5gKP0RIrGZRRmbll7RnTRuwvKWZkYsBlDMM,6815 -setuptools/extern/__init__.py,sha256=mTrrj4yLMdFeEwwnqKnSuvZM5RM-HPZ1iXLgaYDlB9o,132 -setuptools-20.2.2.dist-info/DESCRIPTION.rst,sha256=MDsJej8DPV2OKpAKpu74g-2xksRd-uGTeZn4W7D1dnI,9940 -setuptools-20.2.2.dist-info/METADATA,sha256=iyHIJhxBur1zKQdezmdxh356iMZBDPWC-HnpBxSJkjA,11173 -setuptools-20.2.2.dist-info/RECORD,, -setuptools-20.2.2.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 -setuptools-20.2.2.dist-info/dependency_links.txt,sha256=oUNXJEArClXFiSSvfFwUKY8TYjeIXhuFfCpXn5K0DCE,226 -setuptools-20.2.2.dist-info/entry_points.txt,sha256=revbaRBbkZ2b1B-hZlAXo_18J9GjdYHgA4DoW8wdTOU,2835 -setuptools-20.2.2.dist-info/metadata.json,sha256=NJw3rJiskXQqc4F9J19zjdRFEpI28APWvf149lL509g,4636 -setuptools-20.2.2.dist-info/top_level.txt,sha256=2HUXVVwA4Pff1xgTFr3GsTXXKaPaO6vlG6oNJ_4u4Tg,38 -setuptools-20.2.2.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 -../../../bin/easy_install,sha256=4bXVXBoSo_A1XK3Ga5UMkOREdCSnh8FZIYqtJVSWCa4,298 -../../../bin/easy_install-3.4,sha256=4bXVXBoSo_A1XK3Ga5UMkOREdCSnh8FZIYqtJVSWCa4,298 -setuptools-20.2.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -setuptools/__pycache__/py27compat.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-34.pyc,, -setuptools/__pycache__/sandbox.cpython-34.pyc,, -pkg_resources/__pycache__/__init__.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,, -setuptools/__pycache__/version.cpython-34.pyc,, -setuptools/__pycache__/depends.cpython-34.pyc,, -setuptools/__pycache__/windows_support.cpython-34.pyc,, -pkg_resources/_vendor/__pycache__/six.cpython-34.pyc,, -setuptools/command/__pycache__/install_egg_info.cpython-34.pyc,, -setuptools/command/__pycache__/egg_info.cpython-34.pyc,, -pkg_resources/extern/__pycache__/__init__.cpython-34.pyc,, -setuptools/__pycache__/py26compat.cpython-34.pyc,, -setuptools/command/__pycache__/rotate.cpython-34.pyc,, -setuptools/command/__pycache__/install_scripts.cpython-34.pyc,, -setuptools/command/__pycache__/alias.cpython-34.pyc,, -setuptools/__pycache__/unicode_utils.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/utils.cpython-34.pyc,, -setuptools/command/__pycache__/install_lib.cpython-34.pyc,, -setuptools/__pycache__/package_index.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,, -setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,, -pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,, -setuptools/command/__pycache__/setopt.cpython-34.pyc,, -__pycache__/easy_install.cpython-34.pyc,, -setuptools/command/__pycache__/build_py.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,, -setuptools/command/__pycache__/upload_docs.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,, -setuptools/command/__pycache__/sdist.cpython-34.pyc,, -setuptools/__pycache__/dist.cpython-34.pyc,, -setuptools/__pycache__/archive_util.cpython-34.pyc,, -setuptools/command/__pycache__/register.cpython-34.pyc,, -setuptools/__pycache__/extension.cpython-34.pyc,, -setuptools/command/__pycache__/test.cpython-34.pyc,, -setuptools/command/__pycache__/__init__.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,, -setuptools/command/__pycache__/develop.cpython-34.pyc,, -setuptools/extern/__pycache__/__init__.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,, -setuptools/__pycache__/launch.cpython-34.pyc,, -setuptools/command/__pycache__/build_ext.cpython-34.pyc,, -setuptools/__pycache__/utils.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/markers.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,, -setuptools/__pycache__/site-patch.cpython-34.pyc,, -setuptools/command/__pycache__/saveopts.cpython-34.pyc,, -pkg_resources/_vendor/__pycache__/pyparsing.cpython-34.pyc,, -setuptools/command/__pycache__/install.cpython-34.pyc,, -setuptools/command/__pycache__/easy_install.cpython-34.pyc,, -setuptools/__pycache__/msvc9_support.cpython-34.pyc,, -setuptools/__pycache__/__init__.cpython-34.pyc,, -setuptools/__pycache__/ssl_support.cpython-34.pyc,, -setuptools/__pycache__/py31compat.cpython-34.pyc,, -setuptools/command/__pycache__/upload.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/dependency_links.txt deleted file mode 100644 index 47d1e81..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/dependency_links.txt +++ /dev/null @@ -1,2 +0,0 @@ -https://pypi.python.org/packages/source/c/certifi/certifi-2015.11.20.tar.gz#md5=25134646672c695c1ff1593c2dd75d08 -https://pypi.python.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2 diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/entry_points.txt b/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/entry_points.txt deleted file mode 100644 index 924fed8..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/entry_points.txt +++ /dev/null @@ -1,62 +0,0 @@ -[console_scripts] -easy_install = setuptools.command.easy_install:main -easy_install-3.5 = setuptools.command.easy_install:main - -[distutils.commands] -alias = setuptools.command.alias:alias -bdist_egg = setuptools.command.bdist_egg:bdist_egg -bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm -bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst -build_ext = setuptools.command.build_ext:build_ext -build_py = setuptools.command.build_py:build_py -develop = setuptools.command.develop:develop -easy_install = setuptools.command.easy_install:easy_install -egg_info = setuptools.command.egg_info:egg_info -install = setuptools.command.install:install -install_egg_info = setuptools.command.install_egg_info:install_egg_info -install_lib = setuptools.command.install_lib:install_lib -install_scripts = setuptools.command.install_scripts:install_scripts -register = setuptools.command.register:register -rotate = setuptools.command.rotate:rotate -saveopts = setuptools.command.saveopts:saveopts -sdist = setuptools.command.sdist:sdist -setopt = setuptools.command.setopt:setopt -test = setuptools.command.test:test -upload = setuptools.command.upload:upload -upload_docs = setuptools.command.upload_docs:upload_docs - -[distutils.setup_keywords] -convert_2to3_doctests = setuptools.dist:assert_string_list -dependency_links = setuptools.dist:assert_string_list -eager_resources = setuptools.dist:assert_string_list -entry_points = setuptools.dist:check_entry_points -exclude_package_data = setuptools.dist:check_package_data -extras_require = setuptools.dist:check_extras -include_package_data = setuptools.dist:assert_bool -install_requires = setuptools.dist:check_requirements -namespace_packages = setuptools.dist:check_nsp -package_data = setuptools.dist:check_package_data -packages = setuptools.dist:check_packages -setup_requires = setuptools.dist:check_requirements -test_loader = setuptools.dist:check_importable -test_runner = setuptools.dist:check_importable -test_suite = setuptools.dist:check_test_suite -tests_require = setuptools.dist:check_requirements -use_2to3 = setuptools.dist:assert_bool -use_2to3_exclude_fixers = setuptools.dist:assert_string_list -use_2to3_fixers = setuptools.dist:assert_string_list -zip_safe = setuptools.dist:assert_bool - -[egg_info.writers] -PKG-INFO = setuptools.command.egg_info:write_pkg_info -dependency_links.txt = setuptools.command.egg_info:overwrite_arg -depends.txt = setuptools.command.egg_info:warn_depends_obsolete -eager_resources.txt = setuptools.command.egg_info:overwrite_arg -entry_points.txt = setuptools.command.egg_info:write_entries -namespace_packages.txt = setuptools.command.egg_info:overwrite_arg -requires.txt = setuptools.command.egg_info:write_requirements -top_level.txt = setuptools.command.egg_info:write_toplevel_names - -[setuptools.installation] -eggsecutable = setuptools.command.easy_install:bootstrap - diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/metadata.json deleted file mode 100644 index 9f390ea..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"generator": "bdist_wheel (0.26.0)", "summary": "Easily download, build, install, upgrade, and uninstall Python packages", "classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Archiving :: Packaging", "Topic :: System :: Systems Administration", "Topic :: Utilities"], "extensions": {"python.details": {"project_urls": {"Home": "https://bitbucket.org/pypa/setuptools"}, "contacts": [{"email": "distutils-sig@python.org", "name": "Python Packaging Authority", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}, "python.exports": {"console_scripts": {"easy_install": "setuptools.command.easy_install:main", "easy_install-3.5": "setuptools.command.easy_install:main"}, "distutils.commands": {"alias": "setuptools.command.alias:alias", "bdist_egg": "setuptools.command.bdist_egg:bdist_egg", "bdist_rpm": "setuptools.command.bdist_rpm:bdist_rpm", "bdist_wininst": "setuptools.command.bdist_wininst:bdist_wininst", "build_ext": "setuptools.command.build_ext:build_ext", "build_py": "setuptools.command.build_py:build_py", "develop": "setuptools.command.develop:develop", "easy_install": "setuptools.command.easy_install:easy_install", "egg_info": "setuptools.command.egg_info:egg_info", "install": "setuptools.command.install:install", "install_egg_info": "setuptools.command.install_egg_info:install_egg_info", "install_lib": "setuptools.command.install_lib:install_lib", "install_scripts": "setuptools.command.install_scripts:install_scripts", "register": "setuptools.command.register:register", "rotate": "setuptools.command.rotate:rotate", "saveopts": "setuptools.command.saveopts:saveopts", "sdist": "setuptools.command.sdist:sdist", "setopt": "setuptools.command.setopt:setopt", "test": "setuptools.command.test:test", "upload": "setuptools.command.upload:upload", "upload_docs": "setuptools.command.upload_docs:upload_docs"}, "distutils.setup_keywords": {"convert_2to3_doctests": "setuptools.dist:assert_string_list", "dependency_links": "setuptools.dist:assert_string_list", "eager_resources": "setuptools.dist:assert_string_list", "entry_points": "setuptools.dist:check_entry_points", "exclude_package_data": "setuptools.dist:check_package_data", "extras_require": "setuptools.dist:check_extras", "include_package_data": "setuptools.dist:assert_bool", "install_requires": "setuptools.dist:check_requirements", "namespace_packages": "setuptools.dist:check_nsp", "package_data": "setuptools.dist:check_package_data", "packages": "setuptools.dist:check_packages", "setup_requires": "setuptools.dist:check_requirements", "test_loader": "setuptools.dist:check_importable", "test_runner": "setuptools.dist:check_importable", "test_suite": "setuptools.dist:check_test_suite", "tests_require": "setuptools.dist:check_requirements", "use_2to3": "setuptools.dist:assert_bool", "use_2to3_exclude_fixers": "setuptools.dist:assert_string_list", "use_2to3_fixers": "setuptools.dist:assert_string_list", "zip_safe": "setuptools.dist:assert_bool"}, "egg_info.writers": {"PKG-INFO": "setuptools.command.egg_info:write_pkg_info", "dependency_links.txt": "setuptools.command.egg_info:overwrite_arg", "depends.txt": "setuptools.command.egg_info:warn_depends_obsolete", "eager_resources.txt": "setuptools.command.egg_info:overwrite_arg", "entry_points.txt": "setuptools.command.egg_info:write_entries", "namespace_packages.txt": "setuptools.command.egg_info:overwrite_arg", "requires.txt": "setuptools.command.egg_info:write_requirements", "top_level.txt": "setuptools.command.egg_info:write_toplevel_names"}, "setuptools.installation": {"eggsecutable": "setuptools.command.easy_install:bootstrap"}}, "python.commands": {"wrap_console": {"easy_install": "setuptools.command.easy_install:main", "easy_install-3.5": "setuptools.command.easy_install:main"}}}, "keywords": ["CPAN", "PyPI", "distutils", "eggs", "package", "management"], "metadata_version": "2.0", "name": "setuptools", "extras": ["certs", "ssl"], "run_requires": [{"requires": ["certifi (==2015.11.20)"], "extra": "certs"}, {"requires": ["wincertstore (==0.2)"], "extra": "ssl", "environment": "sys_platform=='win32'"}], "version": "20.2.2", "test_requires": [{"requires": ["pytest (>=2.8)", "setuptools[ssl]"]}]} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/AUTHORS.txt b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/AUTHORS.txt new file mode 100644 index 0000000..e845ac7 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/AUTHORS.txt @@ -0,0 +1,421 @@ +Adam Chainz +Adam Wentz +Adrien Morison +Alan Yee +Aleks Bunin +Alex Gaynor +Alex Grönholm +Alex Morega +Alex Stachowiak +Alexander Shtyrov +Alexandre Conrad +Alli +Anatoly Techtonik +Andrei Geacar +Andrew Gaul +Andrey Bulgakov +Andrés Delfino <34587441+andresdelfino@users.noreply.github.com> +Andrés Delfino +Andy Freeland +Andy Kluger +Anish Tambe +Anrs Hu +Anthony Sottile +Antoine Musso +Anton Ovchinnikov +Anton Patrushev +Antonio Alvarado Hernandez +Antony Lee +Antti Kaihola +Anubhav Patel +Anuj Godase +AQNOUCH Mohammed +AraHaan +Arindam Choudhury +Armin Ronacher +Ashley Manton +Atsushi Odagiri +Avner Cohen +Baptiste Mispelon +Barney Gale +barneygale +Bartek Ogryczak +Bastian Venthur +Ben Darnell +Ben Hoyt +Ben Rosser +Bence Nagy +Benjamin VanEvery +Benoit Pierre +Berker Peksag +Bernardo B. Marques +Bernhard M. Wiedemann +Bogdan Opanchuk +Brad Erickson +Bradley Ayers +Brandon L. Reiss +Brett Randall +Brian Rosner +BrownTruck +Bruno Oliveira +Bruno Renié +Bstrdsmkr +Buck Golemon +burrows +Bussonnier Matthias +c22 +Calvin Smith +Carl Meyer +Carlos Liam +Carol Willing +Carter Thayer +Cass +Chandrasekhar Atina +Chris Brinker +Chris Jerdonek +Chris McDonough +Chris Wolfe +Christian Heimes +Christian Oudard +Christopher Snyder +Clark Boylan +Clay McClure +Cody +Cody Soyland +Colin Watson +Connor Osborn +Cooper Lees +Cooper Ry Lees +Cory Benfield +Cory Wright +Craig Kerstiens +Cristian Sorinel +Curtis Doty +Damian Quiroga +Dan Black +Dan Savilonis +Dan Sully +daniel +Daniel Collins +Daniel Hahler +Daniel Holth +Daniel Jost +Daniel Shaulov +Daniele Procida +Danny Hermes +Dav Clark +Dave Abrahams +David Aguilar +David Black +David Caro +David Evans +David Linke +David Pursehouse +David Tucker +David Wales +Davidovich +derwolfe +Dmitry Gladkov +Domen Kožar +Donald Stufft +Dongweiming +Douglas Thor +DrFeathers +Dustin Ingram +Dwayne Bailey +Ed Morley <501702+edmorley@users.noreply.github.com> +Ed Morley +Eli Schwartz +Emil Styrke +Endoh Takanao +enoch +Eric Gillingham +Eric Hanchrow +Eric Hopper +Erik M. Bray +Erik Rose +Ernest W Durbin III +Ernest W. Durbin III +Erwin Janssen +Eugene Vereshchagin +fiber-space +Filip Kokosiński +Florian Briand +Francesco +Francesco Montesano +Gabriel Curio +Gabriel de Perthuis +Garry Polley +gdanielson +Geoffrey Lehée +Geoffrey Sneddon +George Song +Georgi Valkov +Giftlin Rajaiah +gizmoguy1 +gkdoc <40815324+gkdoc@users.noreply.github.com> +GOTO Hayato <3532528+gh640@users.noreply.github.com> +Guilherme Espada +Guy Rozendorn +Hari Charan +Herbert Pfennig +Hsiaoming Yang +Hugo +Hugo Lopes Tavares +hugovk +Hynek Schlawack +Ian Bicking +Ian Cordasco +Ian Lee +Ian Stapleton Cordasco +Ian Wienand +Ian Wienand +Igor Kuzmitshov +Igor Sobreira +Ilya Baryshev +INADA Naoki +Ionel Cristian Mărieș +Ionel Maries Cristian +Jakub Stasiak +Jakub Vysoky +Jakub Wilk +James Cleveland +James Cleveland +James Firth +James Polley +Jan Pokorný +Jannis Leidel +jarondl +Jason R. Coombs +Jay Graves +Jean-Christophe Fillion-Robin +Jeff Barber +Jeff Dairiki +Jeremy Stanley +Jeremy Zafran +Jim Garrison +Jivan Amara +John-Scott Atlakson +Jon Banafato +Jon Dufresne +Jon Parise +Jon Wayne Parrott +Jonas Nockert +Jonathan Herbert +Joost Molenaar +Jorge Niedbalski +Joseph Long +Josh Bronson +Josh Hansen +Josh Schneier +Julien Demoor +jwg4 +Jyrki Pulliainen +Kamal Bin Mustafa +kaustav haldar +keanemind +Kelsey Hightower +Kenneth Belitzky +Kenneth Reitz +Kenneth Reitz +Kevin Burke +Kevin Carter +Kevin Frommelt +Kexuan Sun +Kit Randel +kpinc +Kumar McMillan +Kyle Persohn +Laurent Bristiel +Laurie Opperman +Leon Sasson +Lev Givon +Lincoln de Sousa +Lipis +Loren Carvalho +Lucas Cimon +Ludovic Gasc +Luke Macken +Luo Jiebin +luojiebin +luz.paz +Marc Abramowitz +Marc Tamlyn +Marcus Smith +Mariatta +Mark Kohler +Markus Hametner +Masklinn +Matej Stuchlik +Mathew Jennings +Mathieu Bridon +Matt Good +Matt Maker +Matt Robenolt +matthew +Matthew Einhorn +Matthew Gilliard +Matthew Iversen +Matthew Trumbell +Matthew Willson +Matthias Bussonnier +mattip +Maxim Kurnikov +Maxime Rouyrre +memoselyk +Michael +Michael Aquilina +Michael E. Karpeles +Michael Klich +Michael Williamson +michaelpacer +Mickaël Schoentgen +Miguel Araujo Perez +Mihir Singh +Min RK +MinRK +Miro Hrončok +montefra +Monty Taylor +Nate Coraor +Nathaniel J. Smith +Nehal J Wani +Nick Coghlan +Nick Stenning +Nikhil Benesch +Nitesh Sharma +Nowell Strite +nvdv +Ofekmeister +Oliver Jeeves +Oliver Tonnhofer +Olivier Girardot +Olivier Grisel +Ollie Rutherfurd +OMOTO Kenji +Oren Held +Oscar Benjamin +Oz N Tiram +Patrick Dubroy +Patrick Jenkins +Patrick Lawson +patricktokeeffe +Paul Kehrer +Paul Moore +Paul Nasrat +Paul Oswald +Paul van der Linden +Paulus Schoutsen +Pawel Jasinski +Pekka Klärck +Peter Waller +Phaneendra Chiruvella +Phil Freo +Phil Pennock +Phil Whelan +Philip Molloy +Philippe Ombredanne +Pi Delport +Pierre-Yves Rofes +pip +Pradyun Gedam +Pratik Mallya +Preston Holmes +Przemek Wrzos +Qiangning Hong +R. David Murray +Rafael Caricio +Ralf Schmitt +Razzi Abuissa +Remi Rampin +Rene Dudfield +Richard Jones +RobberPhex +Robert Collins +Robert McGibbon +Robert T. McGibbon +Roey Berman +Rohan Jain +Rohan Jain +Rohan Jain +Roman Bogorodskiy +Romuald Brunet +Ronny Pfannschmidt +Rory McCann +Ross Brattain +Roy Wellington Ⅳ +Roy Wellington Ⅳ +Ryan Wooden +ryneeverett +Sachi King +Salvatore Rinchiera +schlamar +Scott Kitterman +seanj +Sebastian Schaetz +Segev Finer +Sergey Vasilyev +Seth Woodworth +Shlomi Fish +Simeon Visser +Simon Cross +Simon Pichugin +Sorin Sbarnea +Stavros Korokithakis +Stefan Scherfke +Stephan Erb +stepshal +Steve (Gadget) Barnes +Steve Barnes +Steve Kowalik +Steven Myint +stonebig +Stéphane Bidoul (ACSONE) +Stéphane Bidoul +Stéphane Klein +Takayuki SHIMIZUKAWA +Thijs Triemstra +Thomas Fenzl +Thomas Grainger +Thomas Guettler +Thomas Johansson +Thomas Kluyver +Thomas Smith +Tim D. Smith +Tim Harder +Tim Heap +tim smith +tinruufu +Tom Freudenheim +Tom V +Tomer Chachamu +Tony Zhaocheng Tan +Toshio Kuratomi +Travis Swicegood +Tzu-ping Chung +Valentin Haenel +Victor Stinner +Viktor Szépe +Ville Skyttä +Vinay Sajip +Vincent Philippon +Vitaly Babiy +Vladimir Rutsky +W. Trevor King +Wil Tan +Wilfred Hughes +William ML Leslie +Wolfgang Maier +Xavier Fernandez +Xavier Fernandez +xoviat +YAMAMOTO Takashi +Yen Chi Hsuan +Yoval P +Yu Jian +Zearin +Zearin +Zhiping Deng +Zvezdan Petkovic +Łukasz Langa +Семён Марьясин diff --git a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/INSTALLER similarity index 100% rename from Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/INSTALLER rename to Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/INSTALLER diff --git a/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/LICENSE.txt b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/LICENSE.txt new file mode 100644 index 0000000..d3379fa --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008-2018 The pip developers (see AUTHORS.txt file) + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/METADATA b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/METADATA new file mode 100644 index 0000000..84ad80f --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/METADATA @@ -0,0 +1,72 @@ +Metadata-Version: 2.1 +Name: setuptools +Version: 40.5.0 +Summary: Easily download, build, install, upgrade, and uninstall Python packages +Home-page: https://github.com/pypa/setuptools +Author: Python Packaging Authority +Author-email: distutils-sig@python.org +License: UNKNOWN +Project-URL: Documentation, https://setuptools.readthedocs.io/ +Keywords: CPAN PyPI distutils eggs package management +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Archiving :: Packaging +Classifier: Topic :: System :: Systems Administration +Classifier: Topic :: Utilities +Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* +Description-Content-Type: text/x-rst; charset=UTF-8 + +.. image:: https://img.shields.io/pypi/v/setuptools.svg + :target: https://pypi.org/project/setuptools + +.. image:: https://readthedocs.org/projects/setuptools/badge/?version=latest + :target: https://setuptools.readthedocs.io + +.. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20build%20%40%20Travis%20CI + :target: https://travis-ci.org/pypa/setuptools + +.. image:: https://img.shields.io/appveyor/ci/pypa/setuptools/master.svg?label=Windows%20build%20%40%20Appveyor + :target: https://ci.appveyor.com/project/pypa/setuptools/branch/master + +.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg + :target: https://codecov.io/gh/pypa/setuptools + +.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg + +.. image:: https://tidelift.com/badges/github/pypa/setuptools + :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme + +See the `Installation Instructions +`_ in the Python Packaging +User's Guide for instructions on installing, upgrading, and uninstalling +Setuptools. + +The project is `maintained at GitHub `_ +by the `Setuptools Developers +`_. + +Questions and comments should be directed to the `distutils-sig +mailing list `_. +Bug reports and especially tested patches may be +submitted directly to the `bug tracker +`_. + + +Code of Conduct +--------------- + +Everyone interacting in the setuptools project's codebases, issue trackers, +chat rooms, and mailing lists is expected to follow the +`PyPA Code of Conduct `_. + + diff --git a/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/RECORD new file mode 100644 index 0000000..603d0ff --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/RECORD @@ -0,0 +1,155 @@ +../../../bin/easy_install,sha256=1--cje1uGdQFCtUMyyqfrC9R_NYElXGshzvqpAB0LrM,298 +../../../bin/easy_install-3.7,sha256=1--cje1uGdQFCtUMyyqfrC9R_NYElXGshzvqpAB0LrM,298 +__pycache__/easy_install.cpython-37.pyc,, +easy_install.py,sha256=MDC9vt5AxDsXX5qcKlBz2TnW6Tpuv_AobnfhCJ9X3PM,126 +setuptools-40.5.0.dist-info/AUTHORS.txt,sha256=Pu4WdZapZ2U2wKwWxd830ZxnROCHwmV_TpWoL9dqJ-M,15880 +setuptools-40.5.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +setuptools-40.5.0.dist-info/LICENSE.txt,sha256=ORqHhOMZ2uVDFHfUzJvFBPxdcf2eieHIDxzThV9dfPo,1090 +setuptools-40.5.0.dist-info/METADATA,sha256=x8fnpPGZbNTSJjrYCx5Fj9YRrWWdArxr20vscZ6_S-o,3034 +setuptools-40.5.0.dist-info/RECORD,, +setuptools-40.5.0.dist-info/WHEEL,sha256=_wJFdOYk7i3xxT8ElOkUJvOdOvfNGbR9g-bf6UQT6sU,110 +setuptools-40.5.0.dist-info/dependency_links.txt,sha256=HlkCFkoK5TbZ5EMLbLKYhLcY_E31kBWD8TqW2EgmatQ,239 +setuptools-40.5.0.dist-info/entry_points.txt,sha256=s4ibTr5_v_-uWueemgrdzLUIL_ageOMqsgCAKZDkY2E,2934 +setuptools-40.5.0.dist-info/top_level.txt,sha256=2HUXVVwA4Pff1xgTFr3GsTXXKaPaO6vlG6oNJ_4u4Tg,38 +setuptools-40.5.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +setuptools/__init__.py,sha256=dsZD3T-_2htjtVAELRWeu83BFxjGaTFB0h3IO7PGi3U,5878 +setuptools/__pycache__/__init__.cpython-37.pyc,, +setuptools/__pycache__/archive_util.cpython-37.pyc,, +setuptools/__pycache__/build_meta.cpython-37.pyc,, +setuptools/__pycache__/config.cpython-37.pyc,, +setuptools/__pycache__/dep_util.cpython-37.pyc,, +setuptools/__pycache__/depends.cpython-37.pyc,, +setuptools/__pycache__/dist.cpython-37.pyc,, +setuptools/__pycache__/extension.cpython-37.pyc,, +setuptools/__pycache__/glibc.cpython-37.pyc,, +setuptools/__pycache__/glob.cpython-37.pyc,, +setuptools/__pycache__/launch.cpython-37.pyc,, +setuptools/__pycache__/lib2to3_ex.cpython-37.pyc,, +setuptools/__pycache__/monkey.cpython-37.pyc,, +setuptools/__pycache__/msvc.cpython-37.pyc,, +setuptools/__pycache__/namespaces.cpython-37.pyc,, +setuptools/__pycache__/package_index.cpython-37.pyc,, +setuptools/__pycache__/pep425tags.cpython-37.pyc,, +setuptools/__pycache__/py27compat.cpython-37.pyc,, +setuptools/__pycache__/py31compat.cpython-37.pyc,, +setuptools/__pycache__/py33compat.cpython-37.pyc,, +setuptools/__pycache__/py36compat.cpython-37.pyc,, +setuptools/__pycache__/sandbox.cpython-37.pyc,, +setuptools/__pycache__/site-patch.cpython-37.pyc,, +setuptools/__pycache__/ssl_support.cpython-37.pyc,, +setuptools/__pycache__/unicode_utils.cpython-37.pyc,, +setuptools/__pycache__/version.cpython-37.pyc,, +setuptools/__pycache__/wheel.cpython-37.pyc,, +setuptools/__pycache__/windows_support.cpython-37.pyc,, +setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +setuptools/_vendor/__pycache__/__init__.cpython-37.pyc,, +setuptools/_vendor/__pycache__/pyparsing.cpython-37.pyc,, +setuptools/_vendor/__pycache__/six.cpython-37.pyc,, +setuptools/_vendor/packaging/__about__.py,sha256=zkcCPTN_6TcLW0Nrlg0176-R1QQ_WVPTm8sz1R4-HjM,720 +setuptools/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513 +setuptools/_vendor/packaging/__pycache__/__about__.cpython-37.pyc,, +setuptools/_vendor/packaging/__pycache__/__init__.cpython-37.pyc,, +setuptools/_vendor/packaging/__pycache__/_compat.cpython-37.pyc,, +setuptools/_vendor/packaging/__pycache__/_structures.cpython-37.pyc,, +setuptools/_vendor/packaging/__pycache__/markers.cpython-37.pyc,, +setuptools/_vendor/packaging/__pycache__/requirements.cpython-37.pyc,, +setuptools/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc,, +setuptools/_vendor/packaging/__pycache__/utils.cpython-37.pyc,, +setuptools/_vendor/packaging/__pycache__/version.cpython-37.pyc,, +setuptools/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860 +setuptools/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416 +setuptools/_vendor/packaging/markers.py,sha256=Gvpk9EY20yKaMTiKgQZ8yFEEpodqVgVYtfekoic1Yts,8239 +setuptools/_vendor/packaging/requirements.py,sha256=t44M2HVWtr8phIz2OhnILzuGT3rTATaovctV1dpnVIg,4343 +setuptools/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025 +setuptools/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421 +setuptools/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556 +setuptools/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055 +setuptools/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 +setuptools/archive_util.py,sha256=kw8Ib_lKjCcnPKNbS7h8HztRVK0d5RacU3r_KRdVnmM,6592 +setuptools/build_meta.py,sha256=qg4RfvgZF1uZPuO1VMioG8JRhNMp5fHrwgpgkYpnzc8,6021 +setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 +setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752 +setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 +setuptools/command/__init__.py,sha256=NWzJ0A1BEengZpVeqUyWLNm2bk4P3F4iL5QUErHy7kA,594 +setuptools/command/__pycache__/__init__.cpython-37.pyc,, +setuptools/command/__pycache__/alias.cpython-37.pyc,, +setuptools/command/__pycache__/bdist_egg.cpython-37.pyc,, +setuptools/command/__pycache__/bdist_rpm.cpython-37.pyc,, +setuptools/command/__pycache__/bdist_wininst.cpython-37.pyc,, +setuptools/command/__pycache__/build_clib.cpython-37.pyc,, +setuptools/command/__pycache__/build_ext.cpython-37.pyc,, +setuptools/command/__pycache__/build_py.cpython-37.pyc,, +setuptools/command/__pycache__/develop.cpython-37.pyc,, +setuptools/command/__pycache__/dist_info.cpython-37.pyc,, +setuptools/command/__pycache__/easy_install.cpython-37.pyc,, +setuptools/command/__pycache__/egg_info.cpython-37.pyc,, +setuptools/command/__pycache__/install.cpython-37.pyc,, +setuptools/command/__pycache__/install_egg_info.cpython-37.pyc,, +setuptools/command/__pycache__/install_lib.cpython-37.pyc,, +setuptools/command/__pycache__/install_scripts.cpython-37.pyc,, +setuptools/command/__pycache__/py36compat.cpython-37.pyc,, +setuptools/command/__pycache__/register.cpython-37.pyc,, +setuptools/command/__pycache__/rotate.cpython-37.pyc,, +setuptools/command/__pycache__/saveopts.cpython-37.pyc,, +setuptools/command/__pycache__/sdist.cpython-37.pyc,, +setuptools/command/__pycache__/setopt.cpython-37.pyc,, +setuptools/command/__pycache__/test.cpython-37.pyc,, +setuptools/command/__pycache__/upload.cpython-37.pyc,, +setuptools/command/__pycache__/upload_docs.cpython-37.pyc,, +setuptools/command/alias.py,sha256=KjpE0sz_SDIHv3fpZcIQK-sCkJz-SrC6Gmug6b9Nkc8,2426 +setuptools/command/bdist_egg.py,sha256=be-IBpr1zhS9i6GjKANJgzkbH3ChImdWY7S-j0r2BK8,18167 +setuptools/command/bdist_rpm.py,sha256=B7l0TnzCGb-0nLlm6rS00jWLkojASwVmdhW2w5Qz_Ak,1508 +setuptools/command/bdist_wininst.py,sha256=_6dz3lpB1tY200LxKPLM7qgwTCceOMgaWFF-jW2-pm0,637 +setuptools/command/build_clib.py,sha256=bQ9aBr-5ZSO-9fGsGsDLz0mnnFteHUZnftVLkhvHDq0,4484 +setuptools/command/build_ext.py,sha256=81CTgsqjBjNl_HOgCJ1lQ5vv1NIM3RBpcoVGpqT4N1M,12897 +setuptools/command/build_py.py,sha256=yWyYaaS9F3o9JbIczn064A5g1C5_UiKRDxGaTqYbtLE,9596 +setuptools/command/develop.py,sha256=Sl1iMOORbAnp5BqiXmyMBD0uuvEnhSfOCqbxIPRiJPc,8060 +setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960 +setuptools/command/easy_install.py,sha256=qVo2Ju2TLg6gIu48SrM3tG8fHXFLtsMcQMu9-hAz-y8,89333 +setuptools/command/egg_info.py,sha256=HCc6PW4SrjaWtxy_YbXw34YwTcNdqUpv6n7QjL4qHgk,25093 +setuptools/command/install.py,sha256=a0EZpL_A866KEdhicTGbuyD_TYl1sykfzdrri-zazT4,4683 +setuptools/command/install_egg_info.py,sha256=4zq_Ad3jE-EffParuyDEnvxU6efB-Xhrzdr8aB6Ln_8,3195 +setuptools/command/install_lib.py,sha256=n2iLR8f1MlYeGHtV2oFxDpUiL-wyLaQgwSAFX-YIEv4,5012 +setuptools/command/install_scripts.py,sha256=UD0rEZ6861mTYhIdzcsqKnUl8PozocXWl9VBQ1VTWnc,2439 +setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628 +setuptools/command/py36compat.py,sha256=SzjZcOxF7zdFUT47Zv2n7AM3H8koDys_0OpS-n9gIfc,4986 +setuptools/command/register.py,sha256=LO3MvYKPE8dN1m-KkrBRHC68ZFoPvA_vI8Xgp7vv6zI,534 +setuptools/command/rotate.py,sha256=co5C1EkI7P0GGT6Tqz-T2SIj2LBJTZXYELpmao6d4KQ,2164 +setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658 +setuptools/command/sdist.py,sha256=obDTe2BmWt2PlnFPZZh7e0LWvemEsbCCO9MzhrTZjm8,6711 +setuptools/command/setopt.py,sha256=NTWDyx-gjDF-txf4dO577s7LOzHVoKR0Mq33rFxaRr8,5085 +setuptools/command/test.py,sha256=fSl5OsZWSmFR3QJRvyy2OxbcYkuIkPvykWNOhFvAcUA,9228 +setuptools/command/upload.py,sha256=unktlo8fqx8yXU7F5hKkshNhQVG1tTIN3ObD9ERD0KE,1493 +setuptools/command/upload_docs.py,sha256=oXiGplM_cUKLwE4CWWw98RzCufAu8tBhMC97GegFcms,7311 +setuptools/config.py,sha256=tqFgKh3PYAIqkNgmotUSQHBTylRHJoh7mt8w0g82ax0,18695 +setuptools/dep_util.py,sha256=fgixvC1R7sH3r13ktyf7N0FALoqEXL1cBarmNpSEoWg,935 +setuptools/depends.py,sha256=hC8QIDcM3VDpRXvRVA6OfL9AaQfxvhxHcN_w6sAyNq8,5837 +setuptools/dist.py,sha256=lN_1YtfOsPg6hLVaOCDCPOlgTSoIL1FRu5jCNJuXmSg,42621 +setuptools/extension.py,sha256=uc6nHI-MxwmNCNPbUiBnybSyqhpJqjbhvOQ-emdvt_E,1729 +setuptools/extern/__init__.py,sha256=TxeNKFMSfBMzBpBDiHx8Dh3RzsdVmvWaXhtZ03DZMs0,2499 +setuptools/extern/__pycache__/__init__.cpython-37.pyc,, +setuptools/glibc.py,sha256=X64VvGPL2AbURKwYRsWJOXXGAYOiF_v2qixeTkAULuU,3146 +setuptools/glob.py,sha256=o75cHrOxYsvn854thSxE0x9k8JrKDuhP_rRXlVB00Q4,5084 +setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 +setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264 +setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 +setuptools/launch.py,sha256=sd7ejwhBocCDx_wG9rIs0OaZ8HtmmFU8ZC6IR_S0Lvg,787 +setuptools/lib2to3_ex.py,sha256=t5e12hbR2pi9V4ezWDTB4JM-AISUnGOkmcnYHek3xjg,2013 +setuptools/monkey.py,sha256=_WJYLhz9FhwvpF5dDQKjcsiXmOvH0tb51ut5RdD5i4c,5204 +setuptools/msvc.py,sha256=uuRFaZzjJt5Fv3ZmyKUUuLtjx12_8G9RILigGec4irI,40838 +setuptools/namespaces.py,sha256=F0Nrbv8KCT2OrO7rwa03om4N4GZKAlnce-rr-cgDQa8,3199 +setuptools/package_index.py,sha256=yeifZQhJVRwPSaQmRrVPxbXRy-1lF5KdTFV8NAb3YcE,40342 +setuptools/pep425tags.py,sha256=bSGwlybcIpssx9kAv_hqAUJzfEpXSzYRp2u-nDYPdbk,10862 +setuptools/py27compat.py,sha256=3mwxRMDk5Q5O1rSXOERbQDXhFqwDJhhUitfMW_qpUCo,536 +setuptools/py31compat.py,sha256=REvrUBibUHgqI9S-ww0C9bhU-n8PyaQ8Slr1_NRxaaE,820 +setuptools/py33compat.py,sha256=OubjldHJH1KGE1CKt1kRU-Q55keftHT3ea1YoL0ZSco,1195 +setuptools/py36compat.py,sha256=VUDWxmu5rt4QHlGTRtAFu6W5jvfL6WBjeDAzeoBy0OM,2891 +setuptools/sandbox.py,sha256=9UbwfEL5QY436oMI1LtFWohhoZ-UzwHvGyZjUH_qhkw,14276 +setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218 +setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138 +setuptools/site-patch.py,sha256=OumkIHMuoSenRSW1382kKWI1VAwxNE86E5W8iDd34FY,2302 +setuptools/ssl_support.py,sha256=YBDJsCZjSp62CWjxmSkke9kn9rhHHj25Cus6zhJRW3c,8492 +setuptools/unicode_utils.py,sha256=NOiZ_5hD72A6w-4wVj8awHFM3n51Kmw1Ic_vx15XFqw,996 +setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144 +setuptools/wheel.py,sha256=A8hKSqHWZ5KM0-VP_DtptxpMxVF9pQwjWZcHGklxq2o,8102 +setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714 diff --git a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/WHEEL similarity index 70% rename from Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/WHEEL rename to Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/WHEEL index 9dff69d..c4bde30 100644 --- a/Shared/lib/python3.4/site-packages/backports_abc-0.4.dist-info/WHEEL +++ b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.24.0) +Generator: bdist_wheel (0.32.3) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any diff --git a/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/dependency_links.txt new file mode 100644 index 0000000..e87d021 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/dependency_links.txt @@ -0,0 +1,2 @@ +https://files.pythonhosted.org/packages/source/c/certifi/certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d +https://files.pythonhosted.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2 diff --git a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/entry_points.txt b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/entry_points.txt similarity index 94% rename from Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/entry_points.txt rename to Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/entry_points.txt index 5270e4a..b429e52 100644 --- a/Shared/lib/python3.4/site-packages/_markerlib-0.0.0.dist-info/entry_points.txt +++ b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/entry_points.txt @@ -6,9 +6,11 @@ alias = setuptools.command.alias:alias bdist_egg = setuptools.command.bdist_egg:bdist_egg bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst +build_clib = setuptools.command.build_clib:build_clib build_ext = setuptools.command.build_ext:build_ext build_py = setuptools.command.build_py:build_py develop = setuptools.command.develop:develop +dist_info = setuptools.command.dist_info:dist_info easy_install = setuptools.command.easy_install:easy_install egg_info = setuptools.command.egg_info:egg_info install = setuptools.command.install:install @@ -36,6 +38,7 @@ install_requires = setuptools.dist:check_requirements namespace_packages = setuptools.dist:check_nsp package_data = setuptools.dist:check_package_data packages = setuptools.dist:check_packages +python_requires = setuptools.dist:check_specifier setup_requires = setuptools.dist:check_requirements test_loader = setuptools.dist:check_importable test_runner = setuptools.dist:check_importable diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/zip-safe b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools-40.5.0.dist-info/zip-safe @@ -0,0 +1 @@ + diff --git a/Shared/lib/python3.4/site-packages/setuptools/__init__.py b/Shared/lib/python3.4/site-packages/setuptools/__init__.py index 67b57e4..54309b5 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/__init__.py +++ b/Shared/lib/python3.4/site-packages/setuptools/__init__.py @@ -1,25 +1,33 @@ """Extensions to the 'distutils' for large or complex distributions""" import os +import sys import functools import distutils.core import distutils.filelist -from distutils.core import Command as _Command from distutils.util import convert_path from fnmatch import fnmatchcase -from setuptools.extern.six.moves import filterfalse, map +from setuptools.extern.six import PY3 +from setuptools.extern.six.moves import filter, map import setuptools.version from setuptools.extension import Extension -from setuptools.dist import Distribution, Feature, _get_unpatched +from setuptools.dist import Distribution, Feature from setuptools.depends import Require +from . import monkey + +__metaclass__ = type + __all__ = [ 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require', 'find_packages' ] +if PY3: + __all__.append('find_namespace_packages') + __version__ = setuptools.version.__version__ bootstrap_install_from = None @@ -31,13 +39,19 @@ run_2to3_on_doctests = True lib2to3_fixer_packages = ['lib2to3.fixes'] -class PackageFinder(object): +class PackageFinder: + """ + Generate a list of all Python packages found within a directory + """ + @classmethod def find(cls, where='.', exclude=(), include=('*',)): """Return a list all Python packages found within directory 'where' - 'where' should be supplied as a "cross-platform" (i.e. URL-style) - path; it will be converted to the appropriate local path syntax. + 'where' is the root directory which will be searched for packages. It + should be supplied as a "cross-platform" (i.e. URL-style) path; it will + be converted to the appropriate local path syntax. + 'exclude' is a sequence of package names to exclude; '*' can be used as a wildcard in the names, such that 'foo.*' will exclude all subpackages of 'foo' (but not 'foo' itself). @@ -46,78 +60,90 @@ class PackageFinder(object): specified, only the named packages will be included. If it's not specified, all found packages will be included. 'include' can contain shell style wildcard patterns just like 'exclude'. + """ - The list of included packages is built up first and then any - explicitly excluded packages are removed from it. - """ - out = cls._find_packages_iter(convert_path(where)) - out = cls.require_parents(out) - includes = cls._build_filter(*include) - excludes = cls._build_filter('ez_setup', '*__pycache__', *exclude) - out = filter(includes, out) - out = filterfalse(excludes, out) - return list(out) - - @staticmethod - def require_parents(packages): - """ - Exclude any apparent package that apparently doesn't include its - parent. - - For example, exclude 'foo.bar' if 'foo' is not present. - """ - found = [] - for pkg in packages: - base, sep, child = pkg.rpartition('.') - if base and base not in found: - continue - found.append(pkg) - yield pkg - - @staticmethod - def _candidate_dirs(base_path): - """ - Return all dirs in base_path that might be packages. - """ - has_dot = lambda name: '.' in name - for root, dirs, files in os.walk(base_path, followlinks=True): - # Exclude directories that contain a period, as they cannot be - # packages. Mutate the list to avoid traversal. - dirs[:] = filterfalse(has_dot, dirs) - for dir in dirs: - yield os.path.relpath(os.path.join(root, dir), base_path) + return list(cls._find_packages_iter( + convert_path(where), + cls._build_filter('ez_setup', '*__pycache__', *exclude), + cls._build_filter(*include))) @classmethod - def _find_packages_iter(cls, base_path): - candidates = cls._candidate_dirs(base_path) - return ( - path.replace(os.path.sep, '.') - for path in candidates - if cls._looks_like_package(os.path.join(base_path, path)) - ) + def _find_packages_iter(cls, where, exclude, include): + """ + All the packages found in 'where' that pass the 'include' filter, but + not the 'exclude' filter. + """ + for root, dirs, files in os.walk(where, followlinks=True): + # Copy dirs to iterate over it, then empty dirs. + all_dirs = dirs[:] + dirs[:] = [] + + for dir in all_dirs: + full_path = os.path.join(root, dir) + rel_path = os.path.relpath(full_path, where) + package = rel_path.replace(os.path.sep, '.') + + # Skip directory trees that are not valid packages + if ('.' in dir or not cls._looks_like_package(full_path)): + continue + + # Should this package be included? + if include(package) and not exclude(package): + yield package + + # Keep searching subdirectories, as there may be more packages + # down there, even if the parent was excluded. + dirs.append(dir) @staticmethod def _looks_like_package(path): + """Does a directory look like a package?""" return os.path.isfile(os.path.join(path, '__init__.py')) @staticmethod def _build_filter(*patterns): """ Given a list of patterns, return a callable that will be true only if - the input matches one of the patterns. + the input matches at least one of the patterns. """ return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns) + class PEP420PackageFinder(PackageFinder): @staticmethod def _looks_like_package(path): return True + find_packages = PackageFinder.find -setup = distutils.core.setup +if PY3: + find_namespace_packages = PEP420PackageFinder.find + + +def _install_setup_requires(attrs): + # Note: do not use `setuptools.Distribution` directly, as + # our PEP 517 backend patch `distutils.core.Distribution`. + dist = distutils.core.Distribution(dict( + (k, v) for k, v in attrs.items() + if k in ('dependency_links', 'setup_requires') + )) + # Honor setup.cfg's options. + dist.parse_config_files(ignore_option_errors=True) + if dist.setup_requires: + dist.fetch_build_eggs(dist.setup_requires) + + +def setup(**attrs): + # Make sure we have any requirements needed to interpret 'attrs'. + _install_setup_requires(attrs) + return distutils.core.setup(**attrs) + +setup.__doc__ = distutils.core.setup.__doc__ + + +_Command = monkey.get_unpatched(distutils.core.Command) -_Command = _get_unpatched(_Command) class Command(_Command): __doc__ = _Command.__doc__ @@ -137,9 +163,6 @@ class Command(_Command): vars(cmd).update(kw) return cmd -# we can't patch distutils.cmd, alas -distutils.core.Command = Command - def _find_all_simple(path): """ @@ -165,5 +188,4 @@ def findall(dir=os.curdir): return list(files) -# fix findall bug in distutils (http://bugs.python.org/issue12885) -distutils.filelist.findall = findall +monkey.patch_all() diff --git a/Shared/lib/python3.4/site-packages/wheel/test/headers.dist/header.h b/Shared/lib/python3.4/site-packages/setuptools/_vendor/__init__.py similarity index 100% rename from Shared/lib/python3.4/site-packages/wheel/test/headers.dist/header.h rename to Shared/lib/python3.4/site-packages/setuptools/_vendor/__init__.py diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/__about__.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/__about__.py new file mode 100644 index 0000000..95d330e --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/__about__.py @@ -0,0 +1,21 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "16.8" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD or Apache License, Version 2.0" +__copyright__ = "Copyright 2014-2016 %s" % __author__ diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/__init__.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/__init__.py new file mode 100644 index 0000000..5ee6220 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/__init__.py @@ -0,0 +1,14 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +from .__about__ import ( + __author__, __copyright__, __email__, __license__, __summary__, __title__, + __uri__, __version__ +) + +__all__ = [ + "__title__", "__summary__", "__uri__", "__version__", "__author__", + "__email__", "__license__", "__copyright__", +] diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/_compat.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/_compat.py new file mode 100644 index 0000000..210bb80 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/_compat.py @@ -0,0 +1,30 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +# flake8: noqa + +if PY3: + string_types = str, +else: + string_types = basestring, + + +def with_metaclass(meta, *bases): + """ + Create a base class with a metaclass. + """ + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/_structures.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/_structures.py new file mode 100644 index 0000000..ccc2786 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/_structures.py @@ -0,0 +1,68 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + + +class Infinity(object): + + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + +Infinity = Infinity() + + +class NegativeInfinity(object): + + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + +NegativeInfinity = NegativeInfinity() diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/markers.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/markers.py new file mode 100644 index 0000000..031332a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/markers.py @@ -0,0 +1,301 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import operator +import os +import platform +import sys + +from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd +from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString +from setuptools.extern.pyparsing import Literal as L # noqa + +from ._compat import string_types +from .specifiers import Specifier, InvalidSpecifier + + +__all__ = [ + "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", + "Marker", "default_environment", +] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Node(object): + + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + def __repr__(self): + return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) + + def serialize(self): + raise NotImplementedError + + +class Variable(Node): + + def serialize(self): + return str(self) + + +class Value(Node): + + def serialize(self): + return '"{0}"'.format(self) + + +class Op(Node): + + def serialize(self): + return str(self) + + +VARIABLE = ( + L("implementation_version") | + L("platform_python_implementation") | + L("implementation_name") | + L("python_full_version") | + L("platform_release") | + L("platform_version") | + L("platform_machine") | + L("platform_system") | + L("python_version") | + L("sys_platform") | + L("os_name") | + L("os.name") | # PEP-345 + L("sys.platform") | # PEP-345 + L("platform.version") | # PEP-345 + L("platform.machine") | # PEP-345 + L("platform.python_implementation") | # PEP-345 + L("python_implementation") | # undocumented setuptools legacy + L("extra") +) +ALIASES = { + 'os.name': 'os_name', + 'sys.platform': 'sys_platform', + 'platform.version': 'platform_version', + 'platform.machine': 'platform_machine', + 'platform.python_implementation': 'platform_python_implementation', + 'python_implementation': 'platform_python_implementation' +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) + +VERSION_CMP = ( + L("===") | + L("==") | + L(">=") | + L("<=") | + L("!=") | + L("~=") | + L(">") | + L("<") +) + +MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) + +MARKER_VALUE = QuotedString("'") | QuotedString('"') +MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) + +BOOLOP = L("and") | L("or") + +MARKER_VAR = VARIABLE | MARKER_VALUE + +MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) +MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) + +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() + +MARKER_EXPR = Forward() +MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) +MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) + +MARKER = stringStart + MARKER_EXPR + stringEnd + + +def _coerce_parse_result(results): + if isinstance(results, ParseResults): + return [_coerce_parse_result(i) for i in results] + else: + return results + + +def _format_marker(marker, first=True): + assert isinstance(marker, (list, tuple, string_types)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if (isinstance(marker, list) and len(marker) == 1 and + isinstance(marker[0], (list, tuple))): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs, op, rhs): + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs) + + oper = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison( + "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) + ) + + return oper(lhs, rhs) + + +_undefined = object() + + +def _get_env(environment, name): + value = environment.get(name, _undefined) + + if value is _undefined: + raise UndefinedEnvironmentName( + "{0!r} does not exist in evaluation environment.".format(name) + ) + + return value + + +def _evaluate_markers(markers, environment): + groups = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, string_types)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + lhs_value = _get_env(environment, lhs.value) + rhs_value = rhs.value + else: + lhs_value = lhs.value + rhs_value = _get_env(environment, rhs.value) + + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info): + version = '{0.major}.{0.minor}.{0.micro}'.format(info) + kind = info.releaselevel + if kind != 'final': + version += kind[0] + str(info.serial) + return version + + +def default_environment(): + if hasattr(sys, 'implementation'): + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + else: + iver = '0' + implementation_name = '' + + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": platform.python_version()[:3], + "sys_platform": sys.platform, + } + + +class Marker(object): + + def __init__(self, marker): + try: + self._markers = _coerce_parse_result(MARKER.parseString(marker)) + except ParseException as e: + err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( + marker, marker[e.loc:e.loc + 8]) + raise InvalidMarker(err_str) + + def __str__(self): + return _format_marker(self._markers) + + def __repr__(self): + return "".format(str(self)) + + def evaluate(self, environment=None): + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + if environment is not None: + current_environment.update(environment) + + return _evaluate_markers(self._markers, current_environment) diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/requirements.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/requirements.py new file mode 100644 index 0000000..5b49341 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/requirements.py @@ -0,0 +1,127 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import string +import re + +from setuptools.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException +from setuptools.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine +from setuptools.extern.pyparsing import Literal as L # noqa +from setuptools.extern.six.moves.urllib import parse as urlparse + +from .markers import MARKER_EXPR, Marker +from .specifiers import LegacySpecifier, Specifier, SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +ALPHANUM = Word(string.ascii_letters + string.digits) + +LBRACKET = L("[").suppress() +RBRACKET = L("]").suppress() +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() +COMMA = L(",").suppress() +SEMICOLON = L(";").suppress() +AT = L("@").suppress() + +PUNCTUATION = Word("-_.") +IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) +IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) + +NAME = IDENTIFIER("name") +EXTRA = IDENTIFIER + +URI = Regex(r'[^ ]+')("url") +URL = (AT + URI) + +EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) +EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") + +VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) +VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) + +VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY +VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), + joinString=",", adjacent=False)("_raw_spec") +_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') + +VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") +VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) + +MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") +MARKER_EXPR.setParseAction( + lambda s, l, t: Marker(s[t._original_start:t._original_end]) +) +MARKER_SEPERATOR = SEMICOLON +MARKER = MARKER_SEPERATOR + MARKER_EXPR + +VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) +URL_AND_MARKER = URL + Optional(MARKER) + +NAMED_REQUIREMENT = \ + NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) + +REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd + + +class Requirement(object): + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string): + try: + req = REQUIREMENT.parseString(requirement_string) + except ParseException as e: + raise InvalidRequirement( + "Invalid requirement, parse error at \"{0!r}\"".format( + requirement_string[e.loc:e.loc + 8])) + + self.name = req.name + if req.url: + parsed_url = urlparse.urlparse(req.url) + if not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc): + raise InvalidRequirement("Invalid URL given") + self.url = req.url + else: + self.url = None + self.extras = set(req.extras.asList() if req.extras else []) + self.specifier = SpecifierSet(req.specifier) + self.marker = req.marker if req.marker else None + + def __str__(self): + parts = [self.name] + + if self.extras: + parts.append("[{0}]".format(",".join(sorted(self.extras)))) + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append("@ {0}".format(self.url)) + + if self.marker: + parts.append("; {0}".format(self.marker)) + + return "".join(parts) + + def __repr__(self): + return "".format(str(self)) diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/specifiers.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/specifiers.py new file mode 100644 index 0000000..7f5a76c --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/specifiers.py @@ -0,0 +1,774 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import abc +import functools +import itertools +import re + +from ._compat import string_types, with_metaclass +from .version import Version, LegacyVersion, parse + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): + + @abc.abstractmethod + def __str__(self): + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self): + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractmethod + def __ne__(self, other): + """ + Returns a boolean representing whether or not the two Specifier like + objects are not equal. + """ + + @abc.abstractproperty + def prereleases(self): + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value): + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item, prereleases=None): + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter(self, iterable, prereleases=None): + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators = {} + + def __init__(self, spec="", prereleases=None): + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) + + self._spec = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "<{0}({1!r}{2})>".format( + self.__class__.__name__, + str(self), + pre, + ) + + def __str__(self): + return "{0}{1}".format(*self._spec) + + def __hash__(self): + return hash(self._spec) + + def __eq__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec == other._spec + + def __ne__(self, other): + if isinstance(other, string_types): + try: + other = self.__class__(other) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._spec != other._spec + + def _get_operator(self, op): + return getattr(self, "_compare_{0}".format(self._operators[op])) + + def _coerce_version(self, version): + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self): + return self._spec[0] + + @property + def version(self): + return self._spec[1] + + @property + def prereleases(self): + return self._prereleases + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + return self._get_operator(self.operator)(item, self.version) + + def filter(self, iterable, prereleases=None): + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later incase nothing + # else matches this specifier. + if (parsed_version.is_prerelease and not + (prereleases or self.prereleases)): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the begining. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex_str = ( + r""" + (?P(==|!=|<=|>=|<|>)) + \s* + (?P + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. + ) + """ + ) + + _regex = re.compile( + r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def _coerce_version(self, version): + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective, spec): + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective, spec): + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective, spec): + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective, spec): + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective, spec): + return prospective > self._coerce_version(spec) + + +def _require_version_compare(fn): + @functools.wraps(fn) + def wrapped(self, prospective, spec): + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex_str = ( + r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective, spec): + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore post and dev releases and we want to treat the pre-release as + # it's own separate segment. + prefix = ".".join( + list( + itertools.takewhile( + lambda x: (not x.startswith("post") and not + x.startswith("dev")), + _version_split(spec), + ) + )[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return (self._get_operator(">=")(prospective, spec) and + self._get_operator("==")(prospective, prefix)) + + @_require_version_compare + def _compare_equal(self, prospective, spec): + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + prospective = prospective[:len(spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + spec, prospective = _pad_version(spec, prospective) + else: + # Convert our spec string into a Version + spec = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec.local: + prospective = Version(prospective.public) + + return prospective == spec + + @_require_version_compare + def _compare_not_equal(self, prospective, spec): + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective, spec): + return prospective <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal(self, prospective, spec): + return prospective >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective, spec): + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is techincally greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective, spec): + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self): + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version): + result = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _pad_version(left, right): + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]):]) + right_split.append(right[len(right_split[0]):]) + + # Insert our padding + left_split.insert( + 1, + ["0"] * max(0, len(right_split[0]) - len(left_split[0])), + ) + right_split.insert( + 1, + ["0"] * max(0, len(left_split[0]) - len(right_split[0])), + ) + + return ( + list(itertools.chain(*left_split)), + list(itertools.chain(*right_split)), + ) + + +class SpecifierSet(BaseSpecifier): + + def __init__(self, specifiers="", prereleases=None): + # Split on , to break each indidivual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed = set() + for specifier in specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self): + pre = ( + ", prereleases={0!r}".format(self.prereleases) + if self._prereleases is not None + else "" + ) + + return "".format(str(self), pre) + + def __str__(self): + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self): + return hash(self._specs) + + def __and__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __ne__(self, other): + if isinstance(other, string_types): + other = SpecifierSet(other) + elif isinstance(other, _IndividualSpecifier): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs != other._specs + + def __len__(self): + return len(self._specs) + + def __iter__(self): + return iter(self._specs) + + @property + def prereleases(self): + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value): + self._prereleases = value + + def __contains__(self, item): + return self.contains(item) + + def contains(self, item, prereleases=None): + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all( + s.contains(item, prereleases=prereleases) + for s in self._specs + ) + + def filter(self, iterable, prereleases=None): + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered = [] + found_prereleases = [] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/utils.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/utils.py new file mode 100644 index 0000000..942387c --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/utils.py @@ -0,0 +1,14 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import re + + +_canonicalize_regex = re.compile(r"[-_.]+") + + +def canonicalize_name(name): + # This is taken from PEP 503. + return _canonicalize_regex.sub("-", name).lower() diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/version.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/version.py new file mode 100644 index 0000000..83b5ee8 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/packaging/version.py @@ -0,0 +1,393 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. +from __future__ import absolute_import, division, print_function + +import collections +import itertools +import re + +from ._structures import Infinity + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" +] + + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion(object): + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                              # pre-release
    +            [-_\.]?
    +            (?P(a|b|c|rc|alpha|beta|pre|preview))
    +            [-_\.]?
    +            (?P[0-9]+)?
    +        )?
    +        (?P                                         # post release
    +            (?:-(?P[0-9]+))
    +            |
    +            (?:
    +                [-_\.]?
    +                (?Ppost|rev|r)
    +                [-_\.]?
    +                (?P[0-9]+)?
    +            )
    +        )?
    +        (?P                                          # dev release
    +            [-_\.]?
    +            (?Pdev)
    +            [-_\.]?
    +            (?P[0-9]+)?
    +        )?
    +    )
    +    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
    +"""
    +
    +
    +class Version(_BaseVersion):
    +
    +    _regex = re.compile(
    +        r"^\s*" + VERSION_PATTERN + r"\s*$",
    +        re.VERBOSE | re.IGNORECASE,
    +    )
    +
    +    def __init__(self, version):
    +        # Validate the version and parse it into pieces
    +        match = self._regex.search(version)
    +        if not match:
    +            raise InvalidVersion("Invalid version: '{0}'".format(version))
    +
    +        # Store the parsed out pieces of the version
    +        self._version = _Version(
    +            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
    +            release=tuple(int(i) for i in match.group("release").split(".")),
    +            pre=_parse_letter_version(
    +                match.group("pre_l"),
    +                match.group("pre_n"),
    +            ),
    +            post=_parse_letter_version(
    +                match.group("post_l"),
    +                match.group("post_n1") or match.group("post_n2"),
    +            ),
    +            dev=_parse_letter_version(
    +                match.group("dev_l"),
    +                match.group("dev_n"),
    +            ),
    +            local=_parse_local_version(match.group("local")),
    +        )
    +
    +        # Generate a key which will be used for sorting
    +        self._key = _cmpkey(
    +            self._version.epoch,
    +            self._version.release,
    +            self._version.pre,
    +            self._version.post,
    +            self._version.dev,
    +            self._version.local,
    +        )
    +
    +    def __repr__(self):
    +        return "".format(repr(str(self)))
    +
    +    def __str__(self):
    +        parts = []
    +
    +        # Epoch
    +        if self._version.epoch != 0:
    +            parts.append("{0}!".format(self._version.epoch))
    +
    +        # Release segment
    +        parts.append(".".join(str(x) for x in self._version.release))
    +
    +        # Pre-release
    +        if self._version.pre is not None:
    +            parts.append("".join(str(x) for x in self._version.pre))
    +
    +        # Post-release
    +        if self._version.post is not None:
    +            parts.append(".post{0}".format(self._version.post[1]))
    +
    +        # Development release
    +        if self._version.dev is not None:
    +            parts.append(".dev{0}".format(self._version.dev[1]))
    +
    +        # Local version segment
    +        if self._version.local is not None:
    +            parts.append(
    +                "+{0}".format(".".join(str(x) for x in self._version.local))
    +            )
    +
    +        return "".join(parts)
    +
    +    @property
    +    def public(self):
    +        return str(self).split("+", 1)[0]
    +
    +    @property
    +    def base_version(self):
    +        parts = []
    +
    +        # Epoch
    +        if self._version.epoch != 0:
    +            parts.append("{0}!".format(self._version.epoch))
    +
    +        # Release segment
    +        parts.append(".".join(str(x) for x in self._version.release))
    +
    +        return "".join(parts)
    +
    +    @property
    +    def local(self):
    +        version_string = str(self)
    +        if "+" in version_string:
    +            return version_string.split("+", 1)[1]
    +
    +    @property
    +    def is_prerelease(self):
    +        return bool(self._version.dev or self._version.pre)
    +
    +    @property
    +    def is_postrelease(self):
    +        return bool(self._version.post)
    +
    +
    +def _parse_letter_version(letter, number):
    +    if letter:
    +        # We consider there to be an implicit 0 in a pre-release if there is
    +        # not a numeral associated with it.
    +        if number is None:
    +            number = 0
    +
    +        # We normalize any letters to their lower case form
    +        letter = letter.lower()
    +
    +        # We consider some words to be alternate spellings of other words and
    +        # in those cases we want to normalize the spellings to our preferred
    +        # spelling.
    +        if letter == "alpha":
    +            letter = "a"
    +        elif letter == "beta":
    +            letter = "b"
    +        elif letter in ["c", "pre", "preview"]:
    +            letter = "rc"
    +        elif letter in ["rev", "r"]:
    +            letter = "post"
    +
    +        return letter, int(number)
    +    if not letter and number:
    +        # We assume if we are given a number, but we are not given a letter
    +        # then this is using the implicit post release syntax (e.g. 1.0-1)
    +        letter = "post"
    +
    +        return letter, int(number)
    +
    +
    +_local_version_seperators = re.compile(r"[\._-]")
    +
    +
    +def _parse_local_version(local):
    +    """
    +    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    +    """
    +    if local is not None:
    +        return tuple(
    +            part.lower() if not part.isdigit() else int(part)
    +            for part in _local_version_seperators.split(local)
    +        )
    +
    +
    +def _cmpkey(epoch, release, pre, post, dev, local):
    +    # When we compare a release version, we want to compare it with all of the
    +    # trailing zeros removed. So we'll use a reverse the list, drop all the now
    +    # leading zeros until we come to something non zero, then take the rest
    +    # re-reverse it back into the correct order and make it a tuple and use
    +    # that for our sorting key.
    +    release = tuple(
    +        reversed(list(
    +            itertools.dropwhile(
    +                lambda x: x == 0,
    +                reversed(release),
    +            )
    +        ))
    +    )
    +
    +    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
    +    # We'll do this by abusing the pre segment, but we _only_ want to do this
    +    # if there is not a pre or a post segment. If we have one of those then
    +    # the normal sorting rules will handle this case correctly.
    +    if pre is None and post is None and dev is not None:
    +        pre = -Infinity
    +    # Versions without a pre-release (except as noted above) should sort after
    +    # those with one.
    +    elif pre is None:
    +        pre = Infinity
    +
    +    # Versions without a post segment should sort before those with one.
    +    if post is None:
    +        post = -Infinity
    +
    +    # Versions without a development segment should sort after those with one.
    +    if dev is None:
    +        dev = Infinity
    +
    +    if local is None:
    +        # Versions without a local segment should sort before those with one.
    +        local = -Infinity
    +    else:
    +        # Versions with a local segment need that segment parsed to implement
    +        # the sorting rules in PEP440.
    +        # - Alpha numeric segments sort before numeric segments
    +        # - Alpha numeric segments sort lexicographically
    +        # - Numeric segments sort numerically
    +        # - Shorter versions sort before longer versions when the prefixes
    +        #   match exactly
    +        local = tuple(
    +            (i, "") if isinstance(i, int) else (-Infinity, i)
    +            for i in local
    +        )
    +
    +    return epoch, release, pre, post, dev, local
    diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/pyparsing.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/pyparsing.py
    new file mode 100644
    index 0000000..cf75e1e
    --- /dev/null
    +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/pyparsing.py
    @@ -0,0 +1,5742 @@
    +# module pyparsing.py
    +#
    +# Copyright (c) 2003-2018  Paul T. McGuire
    +#
    +# Permission is hereby granted, free of charge, to any person obtaining
    +# a copy of this software and associated documentation files (the
    +# "Software"), to deal in the Software without restriction, including
    +# without limitation the rights to use, copy, modify, merge, publish,
    +# distribute, sublicense, and/or sell copies of the Software, and to
    +# permit persons to whom the Software is furnished to do so, subject to
    +# the following conditions:
    +#
    +# The above copyright notice and this permission notice shall be
    +# included in all copies or substantial portions of the Software.
    +#
    +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
    +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
    +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
    +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
    +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
    +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
    +#
    +
    +__doc__ = \
    +"""
    +pyparsing module - Classes and methods to define and execute parsing grammars
    +=============================================================================
    +
    +The pyparsing module is an alternative approach to creating and executing simple grammars,
    +vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
    +don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
    +provides a library of classes that you use to construct the grammar directly in Python.
    +
    +Here is a program to parse "Hello, World!" (or any greeting of the form 
    +C{", !"}), built up using L{Word}, L{Literal}, and L{And} elements 
    +(L{'+'} operator gives L{And} expressions, strings are auto-converted to
    +L{Literal} expressions)::
    +
    +    from pyparsing import Word, alphas
    +
    +    # define grammar of a greeting
    +    greet = Word(alphas) + "," + Word(alphas) + "!"
    +
    +    hello = "Hello, World!"
    +    print (hello, "->", greet.parseString(hello))
    +
    +The program outputs the following::
    +
    +    Hello, World! -> ['Hello', ',', 'World', '!']
    +
    +The Python representation of the grammar is quite readable, owing to the self-explanatory
    +class names, and the use of '+', '|' and '^' operators.
    +
    +The L{ParseResults} object returned from L{ParserElement.parseString} can be accessed as a nested list, a dictionary, or an
    +object with named attributes.
    +
    +The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
    + - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
    + - quoted strings
    + - embedded comments
    +
    +
    +Getting Started -
    +-----------------
    +Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
    +classes inherit from. Use the docstrings for examples of how to:
    + - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
    + - construct character word-group expressions using the L{Word} class
    + - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
    + - use L{'+'}, L{'|'}, L{'^'}, and L{'&'} operators to combine simple expressions into more complex ones
    + - associate names with your parsed results using L{ParserElement.setResultsName}
    + - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
    + - find more useful common expressions in the L{pyparsing_common} namespace class
    +"""
    +
    +__version__ = "2.2.1"
    +__versionTime__ = "18 Sep 2018 00:49 UTC"
    +__author__ = "Paul McGuire "
    +
    +import string
    +from weakref import ref as wkref
    +import copy
    +import sys
    +import warnings
    +import re
    +import sre_constants
    +import collections
    +import pprint
    +import traceback
    +import types
    +from datetime import datetime
    +
    +try:
    +    from _thread import RLock
    +except ImportError:
    +    from threading import RLock
    +
    +try:
    +    # Python 3
    +    from collections.abc import Iterable
    +    from collections.abc import MutableMapping
    +except ImportError:
    +    # Python 2.7
    +    from collections import Iterable
    +    from collections import MutableMapping
    +
    +try:
    +    from collections import OrderedDict as _OrderedDict
    +except ImportError:
    +    try:
    +        from ordereddict import OrderedDict as _OrderedDict
    +    except ImportError:
    +        _OrderedDict = None
    +
    +#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
    +
    +__all__ = [
    +'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
    +'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
    +'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
    +'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
    +'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
    +'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 
    +'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
    +'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
    +'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
    +'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
    +'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
    +'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
    +'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
    +'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 
    +'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
    +'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
    +'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
    +'CloseMatch', 'tokenMap', 'pyparsing_common',
    +]
    +
    +system_version = tuple(sys.version_info)[:3]
    +PY_3 = system_version[0] == 3
    +if PY_3:
    +    _MAX_INT = sys.maxsize
    +    basestring = str
    +    unichr = chr
    +    _ustr = str
    +
    +    # build list of single arg builtins, that can be used as parse actions
    +    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
    +
    +else:
    +    _MAX_INT = sys.maxint
    +    range = xrange
    +
    +    def _ustr(obj):
    +        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
    +           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
    +           then < returns the unicode object | encodes it with the default encoding | ... >.
    +        """
    +        if isinstance(obj,unicode):
    +            return obj
    +
    +        try:
    +            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
    +            # it won't break any existing code.
    +            return str(obj)
    +
    +        except UnicodeEncodeError:
    +            # Else encode it
    +            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
    +            xmlcharref = Regex(r'&#\d+;')
    +            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
    +            return xmlcharref.transformString(ret)
    +
    +    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
    +    singleArgBuiltins = []
    +    import __builtin__
    +    for fname in "sum len sorted reversed list tuple set any all min max".split():
    +        try:
    +            singleArgBuiltins.append(getattr(__builtin__,fname))
    +        except AttributeError:
    +            continue
    +            
    +_generatorType = type((y for y in range(1)))
    + 
    +def _xml_escape(data):
    +    """Escape &, <, >, ", ', etc. in a string of data."""
    +
    +    # ampersand must be replaced first
    +    from_symbols = '&><"\''
    +    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
    +    for from_,to_ in zip(from_symbols, to_symbols):
    +        data = data.replace(from_, to_)
    +    return data
    +
    +class _Constants(object):
    +    pass
    +
    +alphas     = string.ascii_uppercase + string.ascii_lowercase
    +nums       = "0123456789"
    +hexnums    = nums + "ABCDEFabcdef"
    +alphanums  = alphas + nums
    +_bslash    = chr(92)
    +printables = "".join(c for c in string.printable if c not in string.whitespace)
    +
    +class ParseBaseException(Exception):
    +    """base exception class for all parsing runtime exceptions"""
    +    # Performance tuning: we construct a *lot* of these, so keep this
    +    # constructor as small and fast as possible
    +    def __init__( self, pstr, loc=0, msg=None, elem=None ):
    +        self.loc = loc
    +        if msg is None:
    +            self.msg = pstr
    +            self.pstr = ""
    +        else:
    +            self.msg = msg
    +            self.pstr = pstr
    +        self.parserElement = elem
    +        self.args = (pstr, loc, msg)
    +
    +    @classmethod
    +    def _from_exception(cls, pe):
    +        """
    +        internal factory method to simplify creating one type of ParseException 
    +        from another - avoids having __init__ signature conflicts among subclasses
    +        """
    +        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
    +
    +    def __getattr__( self, aname ):
    +        """supported attributes by name are:
    +            - lineno - returns the line number of the exception text
    +            - col - returns the column number of the exception text
    +            - line - returns the line containing the exception text
    +        """
    +        if( aname == "lineno" ):
    +            return lineno( self.loc, self.pstr )
    +        elif( aname in ("col", "column") ):
    +            return col( self.loc, self.pstr )
    +        elif( aname == "line" ):
    +            return line( self.loc, self.pstr )
    +        else:
    +            raise AttributeError(aname)
    +
    +    def __str__( self ):
    +        return "%s (at char %d), (line:%d, col:%d)" % \
    +                ( self.msg, self.loc, self.lineno, self.column )
    +    def __repr__( self ):
    +        return _ustr(self)
    +    def markInputline( self, markerString = ">!<" ):
    +        """Extracts the exception line from the input string, and marks
    +           the location of the exception with a special symbol.
    +        """
    +        line_str = self.line
    +        line_column = self.column - 1
    +        if markerString:
    +            line_str = "".join((line_str[:line_column],
    +                                markerString, line_str[line_column:]))
    +        return line_str.strip()
    +    def __dir__(self):
    +        return "lineno col line".split() + dir(type(self))
    +
    +class ParseException(ParseBaseException):
    +    """
    +    Exception thrown when parse expressions don't match class;
    +    supported attributes by name are:
    +     - lineno - returns the line number of the exception text
    +     - col - returns the column number of the exception text
    +     - line - returns the line containing the exception text
    +        
    +    Example::
    +        try:
    +            Word(nums).setName("integer").parseString("ABC")
    +        except ParseException as pe:
    +            print(pe)
    +            print("column: {}".format(pe.col))
    +            
    +    prints::
    +       Expected integer (at char 0), (line:1, col:1)
    +        column: 1
    +    """
    +    pass
    +
    +class ParseFatalException(ParseBaseException):
    +    """user-throwable exception thrown when inconsistent parse content
    +       is found; stops all parsing immediately"""
    +    pass
    +
    +class ParseSyntaxException(ParseFatalException):
    +    """just like L{ParseFatalException}, but thrown internally when an
    +       L{ErrorStop} ('-' operator) indicates that parsing is to stop 
    +       immediately because an unbacktrackable syntax error has been found"""
    +    pass
    +
    +#~ class ReparseException(ParseBaseException):
    +    #~ """Experimental class - parse actions can raise this exception to cause
    +       #~ pyparsing to reparse the input string:
    +        #~ - with a modified input string, and/or
    +        #~ - with a modified start location
    +       #~ Set the values of the ReparseException in the constructor, and raise the
    +       #~ exception in a parse action to cause pyparsing to use the new string/location.
    +       #~ Setting the values as None causes no change to be made.
    +       #~ """
    +    #~ def __init_( self, newstring, restartLoc ):
    +        #~ self.newParseText = newstring
    +        #~ self.reparseLoc = restartLoc
    +
    +class RecursiveGrammarException(Exception):
    +    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
    +    def __init__( self, parseElementList ):
    +        self.parseElementTrace = parseElementList
    +
    +    def __str__( self ):
    +        return "RecursiveGrammarException: %s" % self.parseElementTrace
    +
    +class _ParseResultsWithOffset(object):
    +    def __init__(self,p1,p2):
    +        self.tup = (p1,p2)
    +    def __getitem__(self,i):
    +        return self.tup[i]
    +    def __repr__(self):
    +        return repr(self.tup[0])
    +    def setOffset(self,i):
    +        self.tup = (self.tup[0],i)
    +
    +class ParseResults(object):
    +    """
    +    Structured parse results, to provide multiple means of access to the parsed data:
    +       - as a list (C{len(results)})
    +       - by list index (C{results[0], results[1]}, etc.)
    +       - by attribute (C{results.} - see L{ParserElement.setResultsName})
    +
    +    Example::
    +        integer = Word(nums)
    +        date_str = (integer.setResultsName("year") + '/' 
    +                        + integer.setResultsName("month") + '/' 
    +                        + integer.setResultsName("day"))
    +        # equivalent form:
    +        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
    +
    +        # parseString returns a ParseResults object
    +        result = date_str.parseString("1999/12/31")
    +
    +        def test(s, fn=repr):
    +            print("%s -> %s" % (s, fn(eval(s))))
    +        test("list(result)")
    +        test("result[0]")
    +        test("result['month']")
    +        test("result.day")
    +        test("'month' in result")
    +        test("'minutes' in result")
    +        test("result.dump()", str)
    +    prints::
    +        list(result) -> ['1999', '/', '12', '/', '31']
    +        result[0] -> '1999'
    +        result['month'] -> '12'
    +        result.day -> '31'
    +        'month' in result -> True
    +        'minutes' in result -> False
    +        result.dump() -> ['1999', '/', '12', '/', '31']
    +        - day: 31
    +        - month: 12
    +        - year: 1999
    +    """
    +    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
    +        if isinstance(toklist, cls):
    +            return toklist
    +        retobj = object.__new__(cls)
    +        retobj.__doinit = True
    +        return retobj
    +
    +    # Performance tuning: we construct a *lot* of these, so keep this
    +    # constructor as small and fast as possible
    +    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
    +        if self.__doinit:
    +            self.__doinit = False
    +            self.__name = None
    +            self.__parent = None
    +            self.__accumNames = {}
    +            self.__asList = asList
    +            self.__modal = modal
    +            if toklist is None:
    +                toklist = []
    +            if isinstance(toklist, list):
    +                self.__toklist = toklist[:]
    +            elif isinstance(toklist, _generatorType):
    +                self.__toklist = list(toklist)
    +            else:
    +                self.__toklist = [toklist]
    +            self.__tokdict = dict()
    +
    +        if name is not None and name:
    +            if not modal:
    +                self.__accumNames[name] = 0
    +            if isinstance(name,int):
    +                name = _ustr(name) # will always return a str, but use _ustr for consistency
    +            self.__name = name
    +            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
    +                if isinstance(toklist,basestring):
    +                    toklist = [ toklist ]
    +                if asList:
    +                    if isinstance(toklist,ParseResults):
    +                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
    +                    else:
    +                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
    +                    self[name].__name = name
    +                else:
    +                    try:
    +                        self[name] = toklist[0]
    +                    except (KeyError,TypeError,IndexError):
    +                        self[name] = toklist
    +
    +    def __getitem__( self, i ):
    +        if isinstance( i, (int,slice) ):
    +            return self.__toklist[i]
    +        else:
    +            if i not in self.__accumNames:
    +                return self.__tokdict[i][-1][0]
    +            else:
    +                return ParseResults([ v[0] for v in self.__tokdict[i] ])
    +
    +    def __setitem__( self, k, v, isinstance=isinstance ):
    +        if isinstance(v,_ParseResultsWithOffset):
    +            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
    +            sub = v[0]
    +        elif isinstance(k,(int,slice)):
    +            self.__toklist[k] = v
    +            sub = v
    +        else:
    +            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
    +            sub = v
    +        if isinstance(sub,ParseResults):
    +            sub.__parent = wkref(self)
    +
    +    def __delitem__( self, i ):
    +        if isinstance(i,(int,slice)):
    +            mylen = len( self.__toklist )
    +            del self.__toklist[i]
    +
    +            # convert int to slice
    +            if isinstance(i, int):
    +                if i < 0:
    +                    i += mylen
    +                i = slice(i, i+1)
    +            # get removed indices
    +            removed = list(range(*i.indices(mylen)))
    +            removed.reverse()
    +            # fixup indices in token dictionary
    +            for name,occurrences in self.__tokdict.items():
    +                for j in removed:
    +                    for k, (value, position) in enumerate(occurrences):
    +                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
    +        else:
    +            del self.__tokdict[i]
    +
    +    def __contains__( self, k ):
    +        return k in self.__tokdict
    +
    +    def __len__( self ): return len( self.__toklist )
    +    def __bool__(self): return ( not not self.__toklist )
    +    __nonzero__ = __bool__
    +    def __iter__( self ): return iter( self.__toklist )
    +    def __reversed__( self ): return iter( self.__toklist[::-1] )
    +    def _iterkeys( self ):
    +        if hasattr(self.__tokdict, "iterkeys"):
    +            return self.__tokdict.iterkeys()
    +        else:
    +            return iter(self.__tokdict)
    +
    +    def _itervalues( self ):
    +        return (self[k] for k in self._iterkeys())
    +            
    +    def _iteritems( self ):
    +        return ((k, self[k]) for k in self._iterkeys())
    +
    +    if PY_3:
    +        keys = _iterkeys       
    +        """Returns an iterator of all named result keys (Python 3.x only)."""
    +
    +        values = _itervalues
    +        """Returns an iterator of all named result values (Python 3.x only)."""
    +
    +        items = _iteritems
    +        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
    +
    +    else:
    +        iterkeys = _iterkeys
    +        """Returns an iterator of all named result keys (Python 2.x only)."""
    +
    +        itervalues = _itervalues
    +        """Returns an iterator of all named result values (Python 2.x only)."""
    +
    +        iteritems = _iteritems
    +        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
    +
    +        def keys( self ):
    +            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
    +            return list(self.iterkeys())
    +
    +        def values( self ):
    +            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
    +            return list(self.itervalues())
    +                
    +        def items( self ):
    +            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
    +            return list(self.iteritems())
    +
    +    def haskeys( self ):
    +        """Since keys() returns an iterator, this method is helpful in bypassing
    +           code that looks for the existence of any defined results names."""
    +        return bool(self.__tokdict)
    +        
    +    def pop( self, *args, **kwargs):
    +        """
    +        Removes and returns item at specified index (default=C{last}).
    +        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
    +        argument or an integer argument, it will use C{list} semantics
    +        and pop tokens from the list of parsed tokens. If passed a 
    +        non-integer argument (most likely a string), it will use C{dict}
    +        semantics and pop the corresponding value from any defined 
    +        results names. A second default return value argument is 
    +        supported, just as in C{dict.pop()}.
    +
    +        Example::
    +            def remove_first(tokens):
    +                tokens.pop(0)
    +            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
    +            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
    +
    +            label = Word(alphas)
    +            patt = label("LABEL") + OneOrMore(Word(nums))
    +            print(patt.parseString("AAB 123 321").dump())
    +
    +            # Use pop() in a parse action to remove named result (note that corresponding value is not
    +            # removed from list form of results)
    +            def remove_LABEL(tokens):
    +                tokens.pop("LABEL")
    +                return tokens
    +            patt.addParseAction(remove_LABEL)
    +            print(patt.parseString("AAB 123 321").dump())
    +        prints::
    +            ['AAB', '123', '321']
    +            - LABEL: AAB
    +
    +            ['AAB', '123', '321']
    +        """
    +        if not args:
    +            args = [-1]
    +        for k,v in kwargs.items():
    +            if k == 'default':
    +                args = (args[0], v)
    +            else:
    +                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
    +        if (isinstance(args[0], int) or 
    +                        len(args) == 1 or 
    +                        args[0] in self):
    +            index = args[0]
    +            ret = self[index]
    +            del self[index]
    +            return ret
    +        else:
    +            defaultvalue = args[1]
    +            return defaultvalue
    +
    +    def get(self, key, defaultValue=None):
    +        """
    +        Returns named result matching the given key, or if there is no
    +        such name, then returns the given C{defaultValue} or C{None} if no
    +        C{defaultValue} is specified.
    +
    +        Similar to C{dict.get()}.
    +        
    +        Example::
    +            integer = Word(nums)
    +            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
    +
    +            result = date_str.parseString("1999/12/31")
    +            print(result.get("year")) # -> '1999'
    +            print(result.get("hour", "not specified")) # -> 'not specified'
    +            print(result.get("hour")) # -> None
    +        """
    +        if key in self:
    +            return self[key]
    +        else:
    +            return defaultValue
    +
    +    def insert( self, index, insStr ):
    +        """
    +        Inserts new element at location index in the list of parsed tokens.
    +        
    +        Similar to C{list.insert()}.
    +
    +        Example::
    +            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
    +
    +            # use a parse action to insert the parse location in the front of the parsed results
    +            def insert_locn(locn, tokens):
    +                tokens.insert(0, locn)
    +            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
    +        """
    +        self.__toklist.insert(index, insStr)
    +        # fixup indices in token dictionary
    +        for name,occurrences in self.__tokdict.items():
    +            for k, (value, position) in enumerate(occurrences):
    +                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
    +
    +    def append( self, item ):
    +        """
    +        Add single element to end of ParseResults list of elements.
    +
    +        Example::
    +            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
    +            
    +            # use a parse action to compute the sum of the parsed integers, and add it to the end
    +            def append_sum(tokens):
    +                tokens.append(sum(map(int, tokens)))
    +            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
    +        """
    +        self.__toklist.append(item)
    +
    +    def extend( self, itemseq ):
    +        """
    +        Add sequence of elements to end of ParseResults list of elements.
    +
    +        Example::
    +            patt = OneOrMore(Word(alphas))
    +            
    +            # use a parse action to append the reverse of the matched strings, to make a palindrome
    +            def make_palindrome(tokens):
    +                tokens.extend(reversed([t[::-1] for t in tokens]))
    +                return ''.join(tokens)
    +            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
    +        """
    +        if isinstance(itemseq, ParseResults):
    +            self += itemseq
    +        else:
    +            self.__toklist.extend(itemseq)
    +
    +    def clear( self ):
    +        """
    +        Clear all elements and results names.
    +        """
    +        del self.__toklist[:]
    +        self.__tokdict.clear()
    +
    +    def __getattr__( self, name ):
    +        try:
    +            return self[name]
    +        except KeyError:
    +            return ""
    +            
    +        if name in self.__tokdict:
    +            if name not in self.__accumNames:
    +                return self.__tokdict[name][-1][0]
    +            else:
    +                return ParseResults([ v[0] for v in self.__tokdict[name] ])
    +        else:
    +            return ""
    +
    +    def __add__( self, other ):
    +        ret = self.copy()
    +        ret += other
    +        return ret
    +
    +    def __iadd__( self, other ):
    +        if other.__tokdict:
    +            offset = len(self.__toklist)
    +            addoffset = lambda a: offset if a<0 else a+offset
    +            otheritems = other.__tokdict.items()
    +            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
    +                                for (k,vlist) in otheritems for v in vlist]
    +            for k,v in otherdictitems:
    +                self[k] = v
    +                if isinstance(v[0],ParseResults):
    +                    v[0].__parent = wkref(self)
    +            
    +        self.__toklist += other.__toklist
    +        self.__accumNames.update( other.__accumNames )
    +        return self
    +
    +    def __radd__(self, other):
    +        if isinstance(other,int) and other == 0:
    +            # useful for merging many ParseResults using sum() builtin
    +            return self.copy()
    +        else:
    +            # this may raise a TypeError - so be it
    +            return other + self
    +        
    +    def __repr__( self ):
    +        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
    +
    +    def __str__( self ):
    +        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
    +
    +    def _asStringList( self, sep='' ):
    +        out = []
    +        for item in self.__toklist:
    +            if out and sep:
    +                out.append(sep)
    +            if isinstance( item, ParseResults ):
    +                out += item._asStringList()
    +            else:
    +                out.append( _ustr(item) )
    +        return out
    +
    +    def asList( self ):
    +        """
    +        Returns the parse results as a nested list of matching tokens, all converted to strings.
    +
    +        Example::
    +            patt = OneOrMore(Word(alphas))
    +            result = patt.parseString("sldkj lsdkj sldkj")
    +            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
    +            print(type(result), result) # ->  ['sldkj', 'lsdkj', 'sldkj']
    +            
    +            # Use asList() to create an actual list
    +            result_list = result.asList()
    +            print(type(result_list), result_list) # ->  ['sldkj', 'lsdkj', 'sldkj']
    +        """
    +        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
    +
    +    def asDict( self ):
    +        """
    +        Returns the named parse results as a nested dictionary.
    +
    +        Example::
    +            integer = Word(nums)
    +            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
    +            
    +            result = date_str.parseString('12/31/1999')
    +            print(type(result), repr(result)) # ->  (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
    +            
    +            result_dict = result.asDict()
    +            print(type(result_dict), repr(result_dict)) # ->  {'day': '1999', 'year': '12', 'month': '31'}
    +
    +            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
    +            import json
    +            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
    +            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
    +        """
    +        if PY_3:
    +            item_fn = self.items
    +        else:
    +            item_fn = self.iteritems
    +            
    +        def toItem(obj):
    +            if isinstance(obj, ParseResults):
    +                if obj.haskeys():
    +                    return obj.asDict()
    +                else:
    +                    return [toItem(v) for v in obj]
    +            else:
    +                return obj
    +                
    +        return dict((k,toItem(v)) for k,v in item_fn())
    +
    +    def copy( self ):
    +        """
    +        Returns a new copy of a C{ParseResults} object.
    +        """
    +        ret = ParseResults( self.__toklist )
    +        ret.__tokdict = self.__tokdict.copy()
    +        ret.__parent = self.__parent
    +        ret.__accumNames.update( self.__accumNames )
    +        ret.__name = self.__name
    +        return ret
    +
    +    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
    +        """
    +        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
    +        """
    +        nl = "\n"
    +        out = []
    +        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
    +                                                            for v in vlist)
    +        nextLevelIndent = indent + "  "
    +
    +        # collapse out indents if formatting is not desired
    +        if not formatted:
    +            indent = ""
    +            nextLevelIndent = ""
    +            nl = ""
    +
    +        selfTag = None
    +        if doctag is not None:
    +            selfTag = doctag
    +        else:
    +            if self.__name:
    +                selfTag = self.__name
    +
    +        if not selfTag:
    +            if namedItemsOnly:
    +                return ""
    +            else:
    +                selfTag = "ITEM"
    +
    +        out += [ nl, indent, "<", selfTag, ">" ]
    +
    +        for i,res in enumerate(self.__toklist):
    +            if isinstance(res,ParseResults):
    +                if i in namedItems:
    +                    out += [ res.asXML(namedItems[i],
    +                                        namedItemsOnly and doctag is None,
    +                                        nextLevelIndent,
    +                                        formatted)]
    +                else:
    +                    out += [ res.asXML(None,
    +                                        namedItemsOnly and doctag is None,
    +                                        nextLevelIndent,
    +                                        formatted)]
    +            else:
    +                # individual token, see if there is a name for it
    +                resTag = None
    +                if i in namedItems:
    +                    resTag = namedItems[i]
    +                if not resTag:
    +                    if namedItemsOnly:
    +                        continue
    +                    else:
    +                        resTag = "ITEM"
    +                xmlBodyText = _xml_escape(_ustr(res))
    +                out += [ nl, nextLevelIndent, "<", resTag, ">",
    +                                                xmlBodyText,
    +                                                "" ]
    +
    +        out += [ nl, indent, "" ]
    +        return "".join(out)
    +
    +    def __lookup(self,sub):
    +        for k,vlist in self.__tokdict.items():
    +            for v,loc in vlist:
    +                if sub is v:
    +                    return k
    +        return None
    +
    +    def getName(self):
    +        r"""
    +        Returns the results name for this token expression. Useful when several 
    +        different expressions might match at a particular location.
    +
    +        Example::
    +            integer = Word(nums)
    +            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
    +            house_number_expr = Suppress('#') + Word(nums, alphanums)
    +            user_data = (Group(house_number_expr)("house_number") 
    +                        | Group(ssn_expr)("ssn")
    +                        | Group(integer)("age"))
    +            user_info = OneOrMore(user_data)
    +            
    +            result = user_info.parseString("22 111-22-3333 #221B")
    +            for item in result:
    +                print(item.getName(), ':', item[0])
    +        prints::
    +            age : 22
    +            ssn : 111-22-3333
    +            house_number : 221B
    +        """
    +        if self.__name:
    +            return self.__name
    +        elif self.__parent:
    +            par = self.__parent()
    +            if par:
    +                return par.__lookup(self)
    +            else:
    +                return None
    +        elif (len(self) == 1 and
    +               len(self.__tokdict) == 1 and
    +               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
    +            return next(iter(self.__tokdict.keys()))
    +        else:
    +            return None
    +
    +    def dump(self, indent='', depth=0, full=True):
    +        """
    +        Diagnostic method for listing out the contents of a C{ParseResults}.
    +        Accepts an optional C{indent} argument so that this string can be embedded
    +        in a nested display of other data.
    +
    +        Example::
    +            integer = Word(nums)
    +            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
    +            
    +            result = date_str.parseString('12/31/1999')
    +            print(result.dump())
    +        prints::
    +            ['12', '/', '31', '/', '1999']
    +            - day: 1999
    +            - month: 31
    +            - year: 12
    +        """
    +        out = []
    +        NL = '\n'
    +        out.append( indent+_ustr(self.asList()) )
    +        if full:
    +            if self.haskeys():
    +                items = sorted((str(k), v) for k,v in self.items())
    +                for k,v in items:
    +                    if out:
    +                        out.append(NL)
    +                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
    +                    if isinstance(v,ParseResults):
    +                        if v:
    +                            out.append( v.dump(indent,depth+1) )
    +                        else:
    +                            out.append(_ustr(v))
    +                    else:
    +                        out.append(repr(v))
    +            elif any(isinstance(vv,ParseResults) for vv in self):
    +                v = self
    +                for i,vv in enumerate(v):
    +                    if isinstance(vv,ParseResults):
    +                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
    +                    else:
    +                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
    +            
    +        return "".join(out)
    +
    +    def pprint(self, *args, **kwargs):
    +        """
    +        Pretty-printer for parsed results as a list, using the C{pprint} module.
    +        Accepts additional positional or keyword args as defined for the 
    +        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
    +
    +        Example::
    +            ident = Word(alphas, alphanums)
    +            num = Word(nums)
    +            func = Forward()
    +            term = ident | num | Group('(' + func + ')')
    +            func <<= ident + Group(Optional(delimitedList(term)))
    +            result = func.parseString("fna a,b,(fnb c,d,200),100")
    +            result.pprint(width=40)
    +        prints::
    +            ['fna',
    +             ['a',
    +              'b',
    +              ['(', 'fnb', ['c', 'd', '200'], ')'],
    +              '100']]
    +        """
    +        pprint.pprint(self.asList(), *args, **kwargs)
    +
    +    # add support for pickle protocol
    +    def __getstate__(self):
    +        return ( self.__toklist,
    +                 ( self.__tokdict.copy(),
    +                   self.__parent is not None and self.__parent() or None,
    +                   self.__accumNames,
    +                   self.__name ) )
    +
    +    def __setstate__(self,state):
    +        self.__toklist = state[0]
    +        (self.__tokdict,
    +         par,
    +         inAccumNames,
    +         self.__name) = state[1]
    +        self.__accumNames = {}
    +        self.__accumNames.update(inAccumNames)
    +        if par is not None:
    +            self.__parent = wkref(par)
    +        else:
    +            self.__parent = None
    +
    +    def __getnewargs__(self):
    +        return self.__toklist, self.__name, self.__asList, self.__modal
    +
    +    def __dir__(self):
    +        return (dir(type(self)) + list(self.keys()))
    +
    +MutableMapping.register(ParseResults)
    +
    +def col (loc,strg):
    +    """Returns current column within a string, counting newlines as line separators.
    +   The first column is number 1.
    +
    +   Note: the default parsing behavior is to expand tabs in the input string
    +   before starting the parsing process.  See L{I{ParserElement.parseString}} for more information
    +   on parsing strings containing C{}s, and suggested methods to maintain a
    +   consistent view of the parsed string, the parse location, and line and column
    +   positions within the parsed string.
    +   """
    +    s = strg
    +    return 1 if 0} for more information
    +   on parsing strings containing C{}s, and suggested methods to maintain a
    +   consistent view of the parsed string, the parse location, and line and column
    +   positions within the parsed string.
    +   """
    +    return strg.count("\n",0,loc) + 1
    +
    +def line( loc, strg ):
    +    """Returns the line of text containing loc within a string, counting newlines as line separators.
    +       """
    +    lastCR = strg.rfind("\n", 0, loc)
    +    nextCR = strg.find("\n", loc)
    +    if nextCR >= 0:
    +        return strg[lastCR+1:nextCR]
    +    else:
    +        return strg[lastCR+1:]
    +
    +def _defaultStartDebugAction( instring, loc, expr ):
    +    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
    +
    +def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
    +    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
    +
    +def _defaultExceptionDebugAction( instring, loc, expr, exc ):
    +    print ("Exception raised:" + _ustr(exc))
    +
    +def nullDebugAction(*args):
    +    """'Do-nothing' debug action, to suppress debugging output during parsing."""
    +    pass
    +
    +# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
    +#~ 'decorator to trim function calls to match the arity of the target'
    +#~ def _trim_arity(func, maxargs=3):
    +    #~ if func in singleArgBuiltins:
    +        #~ return lambda s,l,t: func(t)
    +    #~ limit = 0
    +    #~ foundArity = False
    +    #~ def wrapper(*args):
    +        #~ nonlocal limit,foundArity
    +        #~ while 1:
    +            #~ try:
    +                #~ ret = func(*args[limit:])
    +                #~ foundArity = True
    +                #~ return ret
    +            #~ except TypeError:
    +                #~ if limit == maxargs or foundArity:
    +                    #~ raise
    +                #~ limit += 1
    +                #~ continue
    +    #~ return wrapper
    +
    +# this version is Python 2.x-3.x cross-compatible
    +'decorator to trim function calls to match the arity of the target'
    +def _trim_arity(func, maxargs=2):
    +    if func in singleArgBuiltins:
    +        return lambda s,l,t: func(t)
    +    limit = [0]
    +    foundArity = [False]
    +    
    +    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
    +    if system_version[:2] >= (3,5):
    +        def extract_stack(limit=0):
    +            # special handling for Python 3.5.0 - extra deep call stack by 1
    +            offset = -3 if system_version == (3,5,0) else -2
    +            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
    +            return [frame_summary[:2]]
    +        def extract_tb(tb, limit=0):
    +            frames = traceback.extract_tb(tb, limit=limit)
    +            frame_summary = frames[-1]
    +            return [frame_summary[:2]]
    +    else:
    +        extract_stack = traceback.extract_stack
    +        extract_tb = traceback.extract_tb
    +    
    +    # synthesize what would be returned by traceback.extract_stack at the call to 
    +    # user's parse action 'func', so that we don't incur call penalty at parse time
    +    
    +    LINE_DIFF = 6
    +    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 
    +    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
    +    this_line = extract_stack(limit=2)[-1]
    +    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
    +
    +    def wrapper(*args):
    +        while 1:
    +            try:
    +                ret = func(*args[limit[0]:])
    +                foundArity[0] = True
    +                return ret
    +            except TypeError:
    +                # re-raise TypeErrors if they did not come from our arity testing
    +                if foundArity[0]:
    +                    raise
    +                else:
    +                    try:
    +                        tb = sys.exc_info()[-1]
    +                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
    +                            raise
    +                    finally:
    +                        del tb
    +
    +                if limit[0] <= maxargs:
    +                    limit[0] += 1
    +                    continue
    +                raise
    +
    +    # copy func name to wrapper for sensible debug output
    +    func_name = ""
    +    try:
    +        func_name = getattr(func, '__name__', 
    +                            getattr(func, '__class__').__name__)
    +    except Exception:
    +        func_name = str(func)
    +    wrapper.__name__ = func_name
    +
    +    return wrapper
    +
    +class ParserElement(object):
    +    """Abstract base level parser element class."""
    +    DEFAULT_WHITE_CHARS = " \n\t\r"
    +    verbose_stacktrace = False
    +
    +    @staticmethod
    +    def setDefaultWhitespaceChars( chars ):
    +        r"""
    +        Overrides the default whitespace chars
    +
    +        Example::
    +            # default whitespace chars are space,  and newline
    +            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
    +            
    +            # change to just treat newline as significant
    +            ParserElement.setDefaultWhitespaceChars(" \t")
    +            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
    +        """
    +        ParserElement.DEFAULT_WHITE_CHARS = chars
    +
    +    @staticmethod
    +    def inlineLiteralsUsing(cls):
    +        """
    +        Set class to be used for inclusion of string literals into a parser.
    +        
    +        Example::
    +            # default literal class used is Literal
    +            integer = Word(nums)
    +            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
    +
    +            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
    +
    +
    +            # change to Suppress
    +            ParserElement.inlineLiteralsUsing(Suppress)
    +            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
    +
    +            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
    +        """
    +        ParserElement._literalStringClass = cls
    +
    +    def __init__( self, savelist=False ):
    +        self.parseAction = list()
    +        self.failAction = None
    +        #~ self.name = ""  # don't define self.name, let subclasses try/except upcall
    +        self.strRepr = None
    +        self.resultsName = None
    +        self.saveAsList = savelist
    +        self.skipWhitespace = True
    +        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
    +        self.copyDefaultWhiteChars = True
    +        self.mayReturnEmpty = False # used when checking for left-recursion
    +        self.keepTabs = False
    +        self.ignoreExprs = list()
    +        self.debug = False
    +        self.streamlined = False
    +        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
    +        self.errmsg = ""
    +        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
    +        self.debugActions = ( None, None, None ) #custom debug actions
    +        self.re = None
    +        self.callPreparse = True # used to avoid redundant calls to preParse
    +        self.callDuringTry = False
    +
    +    def copy( self ):
    +        """
    +        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
    +        for the same parsing pattern, using copies of the original parse element.
    +        
    +        Example::
    +            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
    +            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
    +            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
    +            
    +            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
    +        prints::
    +            [5120, 100, 655360, 268435456]
    +        Equivalent form of C{expr.copy()} is just C{expr()}::
    +            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
    +        """
    +        cpy = copy.copy( self )
    +        cpy.parseAction = self.parseAction[:]
    +        cpy.ignoreExprs = self.ignoreExprs[:]
    +        if self.copyDefaultWhiteChars:
    +            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
    +        return cpy
    +
    +    def setName( self, name ):
    +        """
    +        Define name for this expression, makes debugging and exception messages clearer.
    +        
    +        Example::
    +            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
    +            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
    +        """
    +        self.name = name
    +        self.errmsg = "Expected " + self.name
    +        if hasattr(self,"exception"):
    +            self.exception.msg = self.errmsg
    +        return self
    +
    +    def setResultsName( self, name, listAllMatches=False ):
    +        """
    +        Define name for referencing matching tokens as a nested attribute
    +        of the returned parse results.
    +        NOTE: this returns a *copy* of the original C{ParserElement} object;
    +        this is so that the client can define a basic element, such as an
    +        integer, and reference it in multiple places with different names.
    +
    +        You can also set results names using the abbreviated syntax,
    +        C{expr("name")} in place of C{expr.setResultsName("name")} - 
    +        see L{I{__call__}<__call__>}.
    +
    +        Example::
    +            date_str = (integer.setResultsName("year") + '/' 
    +                        + integer.setResultsName("month") + '/' 
    +                        + integer.setResultsName("day"))
    +
    +            # equivalent form:
    +            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
    +        """
    +        newself = self.copy()
    +        if name.endswith("*"):
    +            name = name[:-1]
    +            listAllMatches=True
    +        newself.resultsName = name
    +        newself.modalResults = not listAllMatches
    +        return newself
    +
    +    def setBreak(self,breakFlag = True):
    +        """Method to invoke the Python pdb debugger when this element is
    +           about to be parsed. Set C{breakFlag} to True to enable, False to
    +           disable.
    +        """
    +        if breakFlag:
    +            _parseMethod = self._parse
    +            def breaker(instring, loc, doActions=True, callPreParse=True):
    +                import pdb
    +                pdb.set_trace()
    +                return _parseMethod( instring, loc, doActions, callPreParse )
    +            breaker._originalParseMethod = _parseMethod
    +            self._parse = breaker
    +        else:
    +            if hasattr(self._parse,"_originalParseMethod"):
    +                self._parse = self._parse._originalParseMethod
    +        return self
    +
    +    def setParseAction( self, *fns, **kwargs ):
    +        """
    +        Define one or more actions to perform when successfully matching parse element definition.
    +        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
    +        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
    +         - s   = the original string being parsed (see note below)
    +         - loc = the location of the matching substring
    +         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
    +        If the functions in fns modify the tokens, they can return them as the return
    +        value from fn, and the modified list of tokens will replace the original.
    +        Otherwise, fn does not need to return any value.
    +
    +        Optional keyword arguments:
    +         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
    +
    +        Note: the default parsing behavior is to expand tabs in the input string
    +        before starting the parsing process.  See L{I{parseString}} for more information
    +        on parsing strings containing C{}s, and suggested methods to maintain a
    +        consistent view of the parsed string, the parse location, and line and column
    +        positions within the parsed string.
    +        
    +        Example::
    +            integer = Word(nums)
    +            date_str = integer + '/' + integer + '/' + integer
    +
    +            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
    +
    +            # use parse action to convert to ints at parse time
    +            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
    +            date_str = integer + '/' + integer + '/' + integer
    +
    +            # note that integer fields are now ints, not strings
    +            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
    +        """
    +        self.parseAction = list(map(_trim_arity, list(fns)))
    +        self.callDuringTry = kwargs.get("callDuringTry", False)
    +        return self
    +
    +    def addParseAction( self, *fns, **kwargs ):
    +        """
    +        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}}.
    +        
    +        See examples in L{I{copy}}.
    +        """
    +        self.parseAction += list(map(_trim_arity, list(fns)))
    +        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
    +        return self
    +
    +    def addCondition(self, *fns, **kwargs):
    +        """Add a boolean predicate function to expression's list of parse actions. See 
    +        L{I{setParseAction}} for function call signatures. Unlike C{setParseAction}, 
    +        functions passed to C{addCondition} need to return boolean success/fail of the condition.
    +
    +        Optional keyword arguments:
    +         - message = define a custom message to be used in the raised exception
    +         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
    +         
    +        Example::
    +            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
    +            year_int = integer.copy()
    +            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
    +            date_str = year_int + '/' + integer + '/' + integer
    +
    +            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
    +        """
    +        msg = kwargs.get("message", "failed user-defined condition")
    +        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
    +        for fn in fns:
    +            def pa(s,l,t):
    +                if not bool(_trim_arity(fn)(s,l,t)):
    +                    raise exc_type(s,l,msg)
    +            self.parseAction.append(pa)
    +        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
    +        return self
    +
    +    def setFailAction( self, fn ):
    +        """Define action to perform if parsing fails at this expression.
    +           Fail acton fn is a callable function that takes the arguments
    +           C{fn(s,loc,expr,err)} where:
    +            - s = string being parsed
    +            - loc = location where expression match was attempted and failed
    +            - expr = the parse expression that failed
    +            - err = the exception thrown
    +           The function returns no value.  It may throw C{L{ParseFatalException}}
    +           if it is desired to stop parsing immediately."""
    +        self.failAction = fn
    +        return self
    +
    +    def _skipIgnorables( self, instring, loc ):
    +        exprsFound = True
    +        while exprsFound:
    +            exprsFound = False
    +            for e in self.ignoreExprs:
    +                try:
    +                    while 1:
    +                        loc,dummy = e._parse( instring, loc )
    +                        exprsFound = True
    +                except ParseException:
    +                    pass
    +        return loc
    +
    +    def preParse( self, instring, loc ):
    +        if self.ignoreExprs:
    +            loc = self._skipIgnorables( instring, loc )
    +
    +        if self.skipWhitespace:
    +            wt = self.whiteChars
    +            instrlen = len(instring)
    +            while loc < instrlen and instring[loc] in wt:
    +                loc += 1
    +
    +        return loc
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        return loc, []
    +
    +    def postParse( self, instring, loc, tokenlist ):
    +        return tokenlist
    +
    +    #~ @profile
    +    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
    +        debugging = ( self.debug ) #and doActions )
    +
    +        if debugging or self.failAction:
    +            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
    +            if (self.debugActions[0] ):
    +                self.debugActions[0]( instring, loc, self )
    +            if callPreParse and self.callPreparse:
    +                preloc = self.preParse( instring, loc )
    +            else:
    +                preloc = loc
    +            tokensStart = preloc
    +            try:
    +                try:
    +                    loc,tokens = self.parseImpl( instring, preloc, doActions )
    +                except IndexError:
    +                    raise ParseException( instring, len(instring), self.errmsg, self )
    +            except ParseBaseException as err:
    +                #~ print ("Exception raised:", err)
    +                if self.debugActions[2]:
    +                    self.debugActions[2]( instring, tokensStart, self, err )
    +                if self.failAction:
    +                    self.failAction( instring, tokensStart, self, err )
    +                raise
    +        else:
    +            if callPreParse and self.callPreparse:
    +                preloc = self.preParse( instring, loc )
    +            else:
    +                preloc = loc
    +            tokensStart = preloc
    +            if self.mayIndexError or preloc >= len(instring):
    +                try:
    +                    loc,tokens = self.parseImpl( instring, preloc, doActions )
    +                except IndexError:
    +                    raise ParseException( instring, len(instring), self.errmsg, self )
    +            else:
    +                loc,tokens = self.parseImpl( instring, preloc, doActions )
    +
    +        tokens = self.postParse( instring, loc, tokens )
    +
    +        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
    +        if self.parseAction and (doActions or self.callDuringTry):
    +            if debugging:
    +                try:
    +                    for fn in self.parseAction:
    +                        tokens = fn( instring, tokensStart, retTokens )
    +                        if tokens is not None:
    +                            retTokens = ParseResults( tokens,
    +                                                      self.resultsName,
    +                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
    +                                                      modal=self.modalResults )
    +                except ParseBaseException as err:
    +                    #~ print "Exception raised in user parse action:", err
    +                    if (self.debugActions[2] ):
    +                        self.debugActions[2]( instring, tokensStart, self, err )
    +                    raise
    +            else:
    +                for fn in self.parseAction:
    +                    tokens = fn( instring, tokensStart, retTokens )
    +                    if tokens is not None:
    +                        retTokens = ParseResults( tokens,
    +                                                  self.resultsName,
    +                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
    +                                                  modal=self.modalResults )
    +        if debugging:
    +            #~ print ("Matched",self,"->",retTokens.asList())
    +            if (self.debugActions[1] ):
    +                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
    +
    +        return loc, retTokens
    +
    +    def tryParse( self, instring, loc ):
    +        try:
    +            return self._parse( instring, loc, doActions=False )[0]
    +        except ParseFatalException:
    +            raise ParseException( instring, loc, self.errmsg, self)
    +    
    +    def canParseNext(self, instring, loc):
    +        try:
    +            self.tryParse(instring, loc)
    +        except (ParseException, IndexError):
    +            return False
    +        else:
    +            return True
    +
    +    class _UnboundedCache(object):
    +        def __init__(self):
    +            cache = {}
    +            self.not_in_cache = not_in_cache = object()
    +
    +            def get(self, key):
    +                return cache.get(key, not_in_cache)
    +
    +            def set(self, key, value):
    +                cache[key] = value
    +
    +            def clear(self):
    +                cache.clear()
    +                
    +            def cache_len(self):
    +                return len(cache)
    +
    +            self.get = types.MethodType(get, self)
    +            self.set = types.MethodType(set, self)
    +            self.clear = types.MethodType(clear, self)
    +            self.__len__ = types.MethodType(cache_len, self)
    +
    +    if _OrderedDict is not None:
    +        class _FifoCache(object):
    +            def __init__(self, size):
    +                self.not_in_cache = not_in_cache = object()
    +
    +                cache = _OrderedDict()
    +
    +                def get(self, key):
    +                    return cache.get(key, not_in_cache)
    +
    +                def set(self, key, value):
    +                    cache[key] = value
    +                    while len(cache) > size:
    +                        try:
    +                            cache.popitem(False)
    +                        except KeyError:
    +                            pass
    +
    +                def clear(self):
    +                    cache.clear()
    +
    +                def cache_len(self):
    +                    return len(cache)
    +
    +                self.get = types.MethodType(get, self)
    +                self.set = types.MethodType(set, self)
    +                self.clear = types.MethodType(clear, self)
    +                self.__len__ = types.MethodType(cache_len, self)
    +
    +    else:
    +        class _FifoCache(object):
    +            def __init__(self, size):
    +                self.not_in_cache = not_in_cache = object()
    +
    +                cache = {}
    +                key_fifo = collections.deque([], size)
    +
    +                def get(self, key):
    +                    return cache.get(key, not_in_cache)
    +
    +                def set(self, key, value):
    +                    cache[key] = value
    +                    while len(key_fifo) > size:
    +                        cache.pop(key_fifo.popleft(), None)
    +                    key_fifo.append(key)
    +
    +                def clear(self):
    +                    cache.clear()
    +                    key_fifo.clear()
    +
    +                def cache_len(self):
    +                    return len(cache)
    +
    +                self.get = types.MethodType(get, self)
    +                self.set = types.MethodType(set, self)
    +                self.clear = types.MethodType(clear, self)
    +                self.__len__ = types.MethodType(cache_len, self)
    +
    +    # argument cache for optimizing repeated calls when backtracking through recursive expressions
    +    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
    +    packrat_cache_lock = RLock()
    +    packrat_cache_stats = [0, 0]
    +
    +    # this method gets repeatedly called during backtracking with the same arguments -
    +    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
    +    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
    +        HIT, MISS = 0, 1
    +        lookup = (self, instring, loc, callPreParse, doActions)
    +        with ParserElement.packrat_cache_lock:
    +            cache = ParserElement.packrat_cache
    +            value = cache.get(lookup)
    +            if value is cache.not_in_cache:
    +                ParserElement.packrat_cache_stats[MISS] += 1
    +                try:
    +                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
    +                except ParseBaseException as pe:
    +                    # cache a copy of the exception, without the traceback
    +                    cache.set(lookup, pe.__class__(*pe.args))
    +                    raise
    +                else:
    +                    cache.set(lookup, (value[0], value[1].copy()))
    +                    return value
    +            else:
    +                ParserElement.packrat_cache_stats[HIT] += 1
    +                if isinstance(value, Exception):
    +                    raise value
    +                return (value[0], value[1].copy())
    +
    +    _parse = _parseNoCache
    +
    +    @staticmethod
    +    def resetCache():
    +        ParserElement.packrat_cache.clear()
    +        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
    +
    +    _packratEnabled = False
    +    @staticmethod
    +    def enablePackrat(cache_size_limit=128):
    +        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
    +           Repeated parse attempts at the same string location (which happens
    +           often in many complex grammars) can immediately return a cached value,
    +           instead of re-executing parsing/validating code.  Memoizing is done of
    +           both valid results and parsing exceptions.
    +           
    +           Parameters:
    +            - cache_size_limit - (default=C{128}) - if an integer value is provided
    +              will limit the size of the packrat cache; if None is passed, then
    +              the cache size will be unbounded; if 0 is passed, the cache will
    +              be effectively disabled.
    +            
    +           This speedup may break existing programs that use parse actions that
    +           have side-effects.  For this reason, packrat parsing is disabled when
    +           you first import pyparsing.  To activate the packrat feature, your
    +           program must call the class method C{ParserElement.enablePackrat()}.  If
    +           your program uses C{psyco} to "compile as you go", you must call
    +           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
    +           Python will crash.  For best results, call C{enablePackrat()} immediately
    +           after importing pyparsing.
    +           
    +           Example::
    +               import pyparsing
    +               pyparsing.ParserElement.enablePackrat()
    +        """
    +        if not ParserElement._packratEnabled:
    +            ParserElement._packratEnabled = True
    +            if cache_size_limit is None:
    +                ParserElement.packrat_cache = ParserElement._UnboundedCache()
    +            else:
    +                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
    +            ParserElement._parse = ParserElement._parseCache
    +
    +    def parseString( self, instring, parseAll=False ):
    +        """
    +        Execute the parse expression with the given string.
    +        This is the main interface to the client code, once the complete
    +        expression has been built.
    +
    +        If you want the grammar to require that the entire input string be
    +        successfully parsed, then set C{parseAll} to True (equivalent to ending
    +        the grammar with C{L{StringEnd()}}).
    +
    +        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
    +        in order to report proper column numbers in parse actions.
    +        If the input string contains tabs and
    +        the grammar uses parse actions that use the C{loc} argument to index into the
    +        string being parsed, you can ensure you have a consistent view of the input
    +        string by:
    +         - calling C{parseWithTabs} on your grammar before calling C{parseString}
    +           (see L{I{parseWithTabs}})
    +         - define your parse action using the full C{(s,loc,toks)} signature, and
    +           reference the input string using the parse action's C{s} argument
    +         - explictly expand the tabs in your input string before calling
    +           C{parseString}
    +        
    +        Example::
    +            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
    +            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
    +        """
    +        ParserElement.resetCache()
    +        if not self.streamlined:
    +            self.streamline()
    +            #~ self.saveAsList = True
    +        for e in self.ignoreExprs:
    +            e.streamline()
    +        if not self.keepTabs:
    +            instring = instring.expandtabs()
    +        try:
    +            loc, tokens = self._parse( instring, 0 )
    +            if parseAll:
    +                loc = self.preParse( instring, loc )
    +                se = Empty() + StringEnd()
    +                se._parse( instring, loc )
    +        except ParseBaseException as exc:
    +            if ParserElement.verbose_stacktrace:
    +                raise
    +            else:
    +                # catch and re-raise exception from here, clears out pyparsing internal stack trace
    +                raise exc
    +        else:
    +            return tokens
    +
    +    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
    +        """
    +        Scan the input string for expression matches.  Each match will return the
    +        matching tokens, start location, and end location.  May be called with optional
    +        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
    +        C{overlap} is specified, then overlapping matches will be reported.
    +
    +        Note that the start and end locations are reported relative to the string
    +        being parsed.  See L{I{parseString}} for more information on parsing
    +        strings with embedded tabs.
    +
    +        Example::
    +            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
    +            print(source)
    +            for tokens,start,end in Word(alphas).scanString(source):
    +                print(' '*start + '^'*(end-start))
    +                print(' '*start + tokens[0])
    +        
    +        prints::
    +        
    +            sldjf123lsdjjkf345sldkjf879lkjsfd987
    +            ^^^^^
    +            sldjf
    +                    ^^^^^^^
    +                    lsdjjkf
    +                              ^^^^^^
    +                              sldkjf
    +                                       ^^^^^^
    +                                       lkjsfd
    +        """
    +        if not self.streamlined:
    +            self.streamline()
    +        for e in self.ignoreExprs:
    +            e.streamline()
    +
    +        if not self.keepTabs:
    +            instring = _ustr(instring).expandtabs()
    +        instrlen = len(instring)
    +        loc = 0
    +        preparseFn = self.preParse
    +        parseFn = self._parse
    +        ParserElement.resetCache()
    +        matches = 0
    +        try:
    +            while loc <= instrlen and matches < maxMatches:
    +                try:
    +                    preloc = preparseFn( instring, loc )
    +                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
    +                except ParseException:
    +                    loc = preloc+1
    +                else:
    +                    if nextLoc > loc:
    +                        matches += 1
    +                        yield tokens, preloc, nextLoc
    +                        if overlap:
    +                            nextloc = preparseFn( instring, loc )
    +                            if nextloc > loc:
    +                                loc = nextLoc
    +                            else:
    +                                loc += 1
    +                        else:
    +                            loc = nextLoc
    +                    else:
    +                        loc = preloc+1
    +        except ParseBaseException as exc:
    +            if ParserElement.verbose_stacktrace:
    +                raise
    +            else:
    +                # catch and re-raise exception from here, clears out pyparsing internal stack trace
    +                raise exc
    +
    +    def transformString( self, instring ):
    +        """
    +        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
    +        be returned from a parse action.  To use C{transformString}, define a grammar and
    +        attach a parse action to it that modifies the returned token list.
    +        Invoking C{transformString()} on a target string will then scan for matches,
    +        and replace the matched text patterns according to the logic in the parse
    +        action.  C{transformString()} returns the resulting transformed string.
    +        
    +        Example::
    +            wd = Word(alphas)
    +            wd.setParseAction(lambda toks: toks[0].title())
    +            
    +            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
    +        Prints::
    +            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
    +        """
    +        out = []
    +        lastE = 0
    +        # force preservation of s, to minimize unwanted transformation of string, and to
    +        # keep string locs straight between transformString and scanString
    +        self.keepTabs = True
    +        try:
    +            for t,s,e in self.scanString( instring ):
    +                out.append( instring[lastE:s] )
    +                if t:
    +                    if isinstance(t,ParseResults):
    +                        out += t.asList()
    +                    elif isinstance(t,list):
    +                        out += t
    +                    else:
    +                        out.append(t)
    +                lastE = e
    +            out.append(instring[lastE:])
    +            out = [o for o in out if o]
    +            return "".join(map(_ustr,_flatten(out)))
    +        except ParseBaseException as exc:
    +            if ParserElement.verbose_stacktrace:
    +                raise
    +            else:
    +                # catch and re-raise exception from here, clears out pyparsing internal stack trace
    +                raise exc
    +
    +    def searchString( self, instring, maxMatches=_MAX_INT ):
    +        """
    +        Another extension to C{L{scanString}}, simplifying the access to the tokens found
    +        to match the given parse expression.  May be called with optional
    +        C{maxMatches} argument, to clip searching after 'n' matches are found.
    +        
    +        Example::
    +            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
    +            cap_word = Word(alphas.upper(), alphas.lower())
    +            
    +            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
    +
    +            # the sum() builtin can be used to merge results into a single ParseResults object
    +            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
    +        prints::
    +            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
    +            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
    +        """
    +        try:
    +            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
    +        except ParseBaseException as exc:
    +            if ParserElement.verbose_stacktrace:
    +                raise
    +            else:
    +                # catch and re-raise exception from here, clears out pyparsing internal stack trace
    +                raise exc
    +
    +    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
    +        """
    +        Generator method to split a string using the given expression as a separator.
    +        May be called with optional C{maxsplit} argument, to limit the number of splits;
    +        and the optional C{includeSeparators} argument (default=C{False}), if the separating
    +        matching text should be included in the split results.
    +        
    +        Example::        
    +            punc = oneOf(list(".,;:/-!?"))
    +            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
    +        prints::
    +            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
    +        """
    +        splits = 0
    +        last = 0
    +        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
    +            yield instring[last:s]
    +            if includeSeparators:
    +                yield t[0]
    +            last = e
    +        yield instring[last:]
    +
    +    def __add__(self, other ):
    +        """
    +        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
    +        converts them to L{Literal}s by default.
    +        
    +        Example::
    +            greet = Word(alphas) + "," + Word(alphas) + "!"
    +            hello = "Hello, World!"
    +            print (hello, "->", greet.parseString(hello))
    +        Prints::
    +            Hello, World! -> ['Hello', ',', 'World', '!']
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return And( [ self, other ] )
    +
    +    def __radd__(self, other ):
    +        """
    +        Implementation of + operator when left operand is not a C{L{ParserElement}}
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return other + self
    +
    +    def __sub__(self, other):
    +        """
    +        Implementation of - operator, returns C{L{And}} with error stop
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return self + And._ErrorStop() + other
    +
    +    def __rsub__(self, other ):
    +        """
    +        Implementation of - operator when left operand is not a C{L{ParserElement}}
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return other - self
    +
    +    def __mul__(self,other):
    +        """
    +        Implementation of * operator, allows use of C{expr * 3} in place of
    +        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
    +        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
    +        may also include C{None} as in:
    +         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
    +              to C{expr*n + L{ZeroOrMore}(expr)}
    +              (read as "at least n instances of C{expr}")
    +         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
    +              (read as "0 to n instances of C{expr}")
    +         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
    +         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
    +
    +        Note that C{expr*(None,n)} does not raise an exception if
    +        more than n exprs exist in the input stream; that is,
    +        C{expr*(None,n)} does not enforce a maximum number of expr
    +        occurrences.  If this behavior is desired, then write
    +        C{expr*(None,n) + ~expr}
    +        """
    +        if isinstance(other,int):
    +            minElements, optElements = other,0
    +        elif isinstance(other,tuple):
    +            other = (other + (None, None))[:2]
    +            if other[0] is None:
    +                other = (0, other[1])
    +            if isinstance(other[0],int) and other[1] is None:
    +                if other[0] == 0:
    +                    return ZeroOrMore(self)
    +                if other[0] == 1:
    +                    return OneOrMore(self)
    +                else:
    +                    return self*other[0] + ZeroOrMore(self)
    +            elif isinstance(other[0],int) and isinstance(other[1],int):
    +                minElements, optElements = other
    +                optElements -= minElements
    +            else:
    +                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
    +        else:
    +            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
    +
    +        if minElements < 0:
    +            raise ValueError("cannot multiply ParserElement by negative value")
    +        if optElements < 0:
    +            raise ValueError("second tuple value must be greater or equal to first tuple value")
    +        if minElements == optElements == 0:
    +            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
    +
    +        if (optElements):
    +            def makeOptionalList(n):
    +                if n>1:
    +                    return Optional(self + makeOptionalList(n-1))
    +                else:
    +                    return Optional(self)
    +            if minElements:
    +                if minElements == 1:
    +                    ret = self + makeOptionalList(optElements)
    +                else:
    +                    ret = And([self]*minElements) + makeOptionalList(optElements)
    +            else:
    +                ret = makeOptionalList(optElements)
    +        else:
    +            if minElements == 1:
    +                ret = self
    +            else:
    +                ret = And([self]*minElements)
    +        return ret
    +
    +    def __rmul__(self, other):
    +        return self.__mul__(other)
    +
    +    def __or__(self, other ):
    +        """
    +        Implementation of | operator - returns C{L{MatchFirst}}
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return MatchFirst( [ self, other ] )
    +
    +    def __ror__(self, other ):
    +        """
    +        Implementation of | operator when left operand is not a C{L{ParserElement}}
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return other | self
    +
    +    def __xor__(self, other ):
    +        """
    +        Implementation of ^ operator - returns C{L{Or}}
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return Or( [ self, other ] )
    +
    +    def __rxor__(self, other ):
    +        """
    +        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return other ^ self
    +
    +    def __and__(self, other ):
    +        """
    +        Implementation of & operator - returns C{L{Each}}
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return Each( [ self, other ] )
    +
    +    def __rand__(self, other ):
    +        """
    +        Implementation of & operator when left operand is not a C{L{ParserElement}}
    +        """
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        if not isinstance( other, ParserElement ):
    +            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
    +                    SyntaxWarning, stacklevel=2)
    +            return None
    +        return other & self
    +
    +    def __invert__( self ):
    +        """
    +        Implementation of ~ operator - returns C{L{NotAny}}
    +        """
    +        return NotAny( self )
    +
    +    def __call__(self, name=None):
    +        """
    +        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
    +        
    +        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
    +        passed as C{True}.
    +           
    +        If C{name} is omitted, same as calling C{L{copy}}.
    +
    +        Example::
    +            # these are equivalent
    +            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
    +            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
    +        """
    +        if name is not None:
    +            return self.setResultsName(name)
    +        else:
    +            return self.copy()
    +
    +    def suppress( self ):
    +        """
    +        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
    +        cluttering up returned output.
    +        """
    +        return Suppress( self )
    +
    +    def leaveWhitespace( self ):
    +        """
    +        Disables the skipping of whitespace before matching the characters in the
    +        C{ParserElement}'s defined pattern.  This is normally only used internally by
    +        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
    +        """
    +        self.skipWhitespace = False
    +        return self
    +
    +    def setWhitespaceChars( self, chars ):
    +        """
    +        Overrides the default whitespace chars
    +        """
    +        self.skipWhitespace = True
    +        self.whiteChars = chars
    +        self.copyDefaultWhiteChars = False
    +        return self
    +
    +    def parseWithTabs( self ):
    +        """
    +        Overrides default behavior to expand C{}s to spaces before parsing the input string.
    +        Must be called before C{parseString} when the input grammar contains elements that
    +        match C{} characters.
    +        """
    +        self.keepTabs = True
    +        return self
    +
    +    def ignore( self, other ):
    +        """
    +        Define expression to be ignored (e.g., comments) while doing pattern
    +        matching; may be called repeatedly, to define multiple comment or other
    +        ignorable patterns.
    +        
    +        Example::
    +            patt = OneOrMore(Word(alphas))
    +            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
    +            
    +            patt.ignore(cStyleComment)
    +            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
    +        """
    +        if isinstance(other, basestring):
    +            other = Suppress(other)
    +
    +        if isinstance( other, Suppress ):
    +            if other not in self.ignoreExprs:
    +                self.ignoreExprs.append(other)
    +        else:
    +            self.ignoreExprs.append( Suppress( other.copy() ) )
    +        return self
    +
    +    def setDebugActions( self, startAction, successAction, exceptionAction ):
    +        """
    +        Enable display of debugging messages while doing pattern matching.
    +        """
    +        self.debugActions = (startAction or _defaultStartDebugAction,
    +                             successAction or _defaultSuccessDebugAction,
    +                             exceptionAction or _defaultExceptionDebugAction)
    +        self.debug = True
    +        return self
    +
    +    def setDebug( self, flag=True ):
    +        """
    +        Enable display of debugging messages while doing pattern matching.
    +        Set C{flag} to True to enable, False to disable.
    +
    +        Example::
    +            wd = Word(alphas).setName("alphaword")
    +            integer = Word(nums).setName("numword")
    +            term = wd | integer
    +            
    +            # turn on debugging for wd
    +            wd.setDebug()
    +
    +            OneOrMore(term).parseString("abc 123 xyz 890")
    +        
    +        prints::
    +            Match alphaword at loc 0(1,1)
    +            Matched alphaword -> ['abc']
    +            Match alphaword at loc 3(1,4)
    +            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
    +            Match alphaword at loc 7(1,8)
    +            Matched alphaword -> ['xyz']
    +            Match alphaword at loc 11(1,12)
    +            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
    +            Match alphaword at loc 15(1,16)
    +            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
    +
    +        The output shown is that produced by the default debug actions - custom debug actions can be
    +        specified using L{setDebugActions}. Prior to attempting
    +        to match the C{wd} expression, the debugging message C{"Match  at loc (,)"}
    +        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
    +        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
    +        which makes debugging and exception messages easier to understand - for instance, the default
    +        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
    +        """
    +        if flag:
    +            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
    +        else:
    +            self.debug = False
    +        return self
    +
    +    def __str__( self ):
    +        return self.name
    +
    +    def __repr__( self ):
    +        return _ustr(self)
    +
    +    def streamline( self ):
    +        self.streamlined = True
    +        self.strRepr = None
    +        return self
    +
    +    def checkRecursion( self, parseElementList ):
    +        pass
    +
    +    def validate( self, validateTrace=[] ):
    +        """
    +        Check defined expressions for valid structure, check for infinite recursive definitions.
    +        """
    +        self.checkRecursion( [] )
    +
    +    def parseFile( self, file_or_filename, parseAll=False ):
    +        """
    +        Execute the parse expression on the given file or filename.
    +        If a filename is specified (instead of a file object),
    +        the entire file is opened, read, and closed before parsing.
    +        """
    +        try:
    +            file_contents = file_or_filename.read()
    +        except AttributeError:
    +            with open(file_or_filename, "r") as f:
    +                file_contents = f.read()
    +        try:
    +            return self.parseString(file_contents, parseAll)
    +        except ParseBaseException as exc:
    +            if ParserElement.verbose_stacktrace:
    +                raise
    +            else:
    +                # catch and re-raise exception from here, clears out pyparsing internal stack trace
    +                raise exc
    +
    +    def __eq__(self,other):
    +        if isinstance(other, ParserElement):
    +            return self is other or vars(self) == vars(other)
    +        elif isinstance(other, basestring):
    +            return self.matches(other)
    +        else:
    +            return super(ParserElement,self)==other
    +
    +    def __ne__(self,other):
    +        return not (self == other)
    +
    +    def __hash__(self):
    +        return hash(id(self))
    +
    +    def __req__(self,other):
    +        return self == other
    +
    +    def __rne__(self,other):
    +        return not (self == other)
    +
    +    def matches(self, testString, parseAll=True):
    +        """
    +        Method for quick testing of a parser against a test string. Good for simple 
    +        inline microtests of sub expressions while building up larger parser.
    +           
    +        Parameters:
    +         - testString - to test against this expression for a match
    +         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
    +            
    +        Example::
    +            expr = Word(nums)
    +            assert expr.matches("100")
    +        """
    +        try:
    +            self.parseString(_ustr(testString), parseAll=parseAll)
    +            return True
    +        except ParseBaseException:
    +            return False
    +                
    +    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
    +        """
    +        Execute the parse expression on a series of test strings, showing each
    +        test, the parsed results or where the parse failed. Quick and easy way to
    +        run a parse expression against a list of sample strings.
    +           
    +        Parameters:
    +         - tests - a list of separate test strings, or a multiline string of test strings
    +         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
    +         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
    +              string; pass None to disable comment filtering
    +         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
    +              if False, only dump nested list
    +         - printResults - (default=C{True}) prints test output to stdout
    +         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
    +
    +        Returns: a (success, results) tuple, where success indicates that all tests succeeded
    +        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
    +        test's output
    +        
    +        Example::
    +            number_expr = pyparsing_common.number.copy()
    +
    +            result = number_expr.runTests('''
    +                # unsigned integer
    +                100
    +                # negative integer
    +                -100
    +                # float with scientific notation
    +                6.02e23
    +                # integer with scientific notation
    +                1e-12
    +                ''')
    +            print("Success" if result[0] else "Failed!")
    +
    +            result = number_expr.runTests('''
    +                # stray character
    +                100Z
    +                # missing leading digit before '.'
    +                -.100
    +                # too many '.'
    +                3.14.159
    +                ''', failureTests=True)
    +            print("Success" if result[0] else "Failed!")
    +        prints::
    +            # unsigned integer
    +            100
    +            [100]
    +
    +            # negative integer
    +            -100
    +            [-100]
    +
    +            # float with scientific notation
    +            6.02e23
    +            [6.02e+23]
    +
    +            # integer with scientific notation
    +            1e-12
    +            [1e-12]
    +
    +            Success
    +            
    +            # stray character
    +            100Z
    +               ^
    +            FAIL: Expected end of text (at char 3), (line:1, col:4)
    +
    +            # missing leading digit before '.'
    +            -.100
    +            ^
    +            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
    +
    +            # too many '.'
    +            3.14.159
    +                ^
    +            FAIL: Expected end of text (at char 4), (line:1, col:5)
    +
    +            Success
    +
    +        Each test string must be on a single line. If you want to test a string that spans multiple
    +        lines, create a test like this::
    +
    +            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
    +        
    +        (Note that this is a raw string literal, you must include the leading 'r'.)
    +        """
    +        if isinstance(tests, basestring):
    +            tests = list(map(str.strip, tests.rstrip().splitlines()))
    +        if isinstance(comment, basestring):
    +            comment = Literal(comment)
    +        allResults = []
    +        comments = []
    +        success = True
    +        for t in tests:
    +            if comment is not None and comment.matches(t, False) or comments and not t:
    +                comments.append(t)
    +                continue
    +            if not t:
    +                continue
    +            out = ['\n'.join(comments), t]
    +            comments = []
    +            try:
    +                t = t.replace(r'\n','\n')
    +                result = self.parseString(t, parseAll=parseAll)
    +                out.append(result.dump(full=fullDump))
    +                success = success and not failureTests
    +            except ParseBaseException as pe:
    +                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
    +                if '\n' in t:
    +                    out.append(line(pe.loc, t))
    +                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
    +                else:
    +                    out.append(' '*pe.loc + '^' + fatal)
    +                out.append("FAIL: " + str(pe))
    +                success = success and failureTests
    +                result = pe
    +            except Exception as exc:
    +                out.append("FAIL-EXCEPTION: " + str(exc))
    +                success = success and failureTests
    +                result = exc
    +
    +            if printResults:
    +                if fullDump:
    +                    out.append('')
    +                print('\n'.join(out))
    +
    +            allResults.append((t, result))
    +        
    +        return success, allResults
    +
    +        
    +class Token(ParserElement):
    +    """
    +    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
    +    """
    +    def __init__( self ):
    +        super(Token,self).__init__( savelist=False )
    +
    +
    +class Empty(Token):
    +    """
    +    An empty token, will always match.
    +    """
    +    def __init__( self ):
    +        super(Empty,self).__init__()
    +        self.name = "Empty"
    +        self.mayReturnEmpty = True
    +        self.mayIndexError = False
    +
    +
    +class NoMatch(Token):
    +    """
    +    A token that will never match.
    +    """
    +    def __init__( self ):
    +        super(NoMatch,self).__init__()
    +        self.name = "NoMatch"
    +        self.mayReturnEmpty = True
    +        self.mayIndexError = False
    +        self.errmsg = "Unmatchable token"
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        raise ParseException(instring, loc, self.errmsg, self)
    +
    +
    +class Literal(Token):
    +    """
    +    Token to exactly match a specified string.
    +    
    +    Example::
    +        Literal('blah').parseString('blah')  # -> ['blah']
    +        Literal('blah').parseString('blahfooblah')  # -> ['blah']
    +        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
    +    
    +    For case-insensitive matching, use L{CaselessLiteral}.
    +    
    +    For keyword matching (force word break before and after the matched string),
    +    use L{Keyword} or L{CaselessKeyword}.
    +    """
    +    def __init__( self, matchString ):
    +        super(Literal,self).__init__()
    +        self.match = matchString
    +        self.matchLen = len(matchString)
    +        try:
    +            self.firstMatchChar = matchString[0]
    +        except IndexError:
    +            warnings.warn("null string passed to Literal; use Empty() instead",
    +                            SyntaxWarning, stacklevel=2)
    +            self.__class__ = Empty
    +        self.name = '"%s"' % _ustr(self.match)
    +        self.errmsg = "Expected " + self.name
    +        self.mayReturnEmpty = False
    +        self.mayIndexError = False
    +
    +    # Performance tuning: this routine gets called a *lot*
    +    # if this is a single character match string  and the first character matches,
    +    # short-circuit as quickly as possible, and avoid calling startswith
    +    #~ @profile
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if (instring[loc] == self.firstMatchChar and
    +            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
    +            return loc+self.matchLen, self.match
    +        raise ParseException(instring, loc, self.errmsg, self)
    +_L = Literal
    +ParserElement._literalStringClass = Literal
    +
    +class Keyword(Token):
    +    """
    +    Token to exactly match a specified string as a keyword, that is, it must be
    +    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
    +     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
    +     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
    +    Accepts two optional constructor arguments in addition to the keyword string:
    +     - C{identChars} is a string of characters that would be valid identifier characters,
    +          defaulting to all alphanumerics + "_" and "$"
    +     - C{caseless} allows case-insensitive matching, default is C{False}.
    +       
    +    Example::
    +        Keyword("start").parseString("start")  # -> ['start']
    +        Keyword("start").parseString("starting")  # -> Exception
    +
    +    For case-insensitive matching, use L{CaselessKeyword}.
    +    """
    +    DEFAULT_KEYWORD_CHARS = alphanums+"_$"
    +
    +    def __init__( self, matchString, identChars=None, caseless=False ):
    +        super(Keyword,self).__init__()
    +        if identChars is None:
    +            identChars = Keyword.DEFAULT_KEYWORD_CHARS
    +        self.match = matchString
    +        self.matchLen = len(matchString)
    +        try:
    +            self.firstMatchChar = matchString[0]
    +        except IndexError:
    +            warnings.warn("null string passed to Keyword; use Empty() instead",
    +                            SyntaxWarning, stacklevel=2)
    +        self.name = '"%s"' % self.match
    +        self.errmsg = "Expected " + self.name
    +        self.mayReturnEmpty = False
    +        self.mayIndexError = False
    +        self.caseless = caseless
    +        if caseless:
    +            self.caselessmatch = matchString.upper()
    +            identChars = identChars.upper()
    +        self.identChars = set(identChars)
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if self.caseless:
    +            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
    +                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
    +                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
    +                return loc+self.matchLen, self.match
    +        else:
    +            if (instring[loc] == self.firstMatchChar and
    +                (self.matchLen==1 or instring.startswith(self.match,loc)) and
    +                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
    +                (loc == 0 or instring[loc-1] not in self.identChars) ):
    +                return loc+self.matchLen, self.match
    +        raise ParseException(instring, loc, self.errmsg, self)
    +
    +    def copy(self):
    +        c = super(Keyword,self).copy()
    +        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
    +        return c
    +
    +    @staticmethod
    +    def setDefaultKeywordChars( chars ):
    +        """Overrides the default Keyword chars
    +        """
    +        Keyword.DEFAULT_KEYWORD_CHARS = chars
    +
    +class CaselessLiteral(Literal):
    +    """
    +    Token to match a specified string, ignoring case of letters.
    +    Note: the matched results will always be in the case of the given
    +    match string, NOT the case of the input text.
    +
    +    Example::
    +        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
    +        
    +    (Contrast with example for L{CaselessKeyword}.)
    +    """
    +    def __init__( self, matchString ):
    +        super(CaselessLiteral,self).__init__( matchString.upper() )
    +        # Preserve the defining literal.
    +        self.returnString = matchString
    +        self.name = "'%s'" % self.returnString
    +        self.errmsg = "Expected " + self.name
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if instring[ loc:loc+self.matchLen ].upper() == self.match:
    +            return loc+self.matchLen, self.returnString
    +        raise ParseException(instring, loc, self.errmsg, self)
    +
    +class CaselessKeyword(Keyword):
    +    """
    +    Caseless version of L{Keyword}.
    +
    +    Example::
    +        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
    +        
    +    (Contrast with example for L{CaselessLiteral}.)
    +    """
    +    def __init__( self, matchString, identChars=None ):
    +        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
    +             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
    +            return loc+self.matchLen, self.match
    +        raise ParseException(instring, loc, self.errmsg, self)
    +
    +class CloseMatch(Token):
    +    """
    +    A variation on L{Literal} which matches "close" matches, that is, 
    +    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
    +     - C{match_string} - string to be matched
    +     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
    +    
    +    The results from a successful parse will contain the matched text from the input string and the following named results:
    +     - C{mismatches} - a list of the positions within the match_string where mismatches were found
    +     - C{original} - the original match_string used to compare against the input string
    +    
    +    If C{mismatches} is an empty list, then the match was an exact match.
    +    
    +    Example::
    +        patt = CloseMatch("ATCATCGAATGGA")
    +        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
    +        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
    +
    +        # exact match
    +        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
    +
    +        # close match allowing up to 2 mismatches
    +        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
    +        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
    +    """
    +    def __init__(self, match_string, maxMismatches=1):
    +        super(CloseMatch,self).__init__()
    +        self.name = match_string
    +        self.match_string = match_string
    +        self.maxMismatches = maxMismatches
    +        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
    +        self.mayIndexError = False
    +        self.mayReturnEmpty = False
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        start = loc
    +        instrlen = len(instring)
    +        maxloc = start + len(self.match_string)
    +
    +        if maxloc <= instrlen:
    +            match_string = self.match_string
    +            match_stringloc = 0
    +            mismatches = []
    +            maxMismatches = self.maxMismatches
    +
    +            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
    +                src,mat = s_m
    +                if src != mat:
    +                    mismatches.append(match_stringloc)
    +                    if len(mismatches) > maxMismatches:
    +                        break
    +            else:
    +                loc = match_stringloc + 1
    +                results = ParseResults([instring[start:loc]])
    +                results['original'] = self.match_string
    +                results['mismatches'] = mismatches
    +                return loc, results
    +
    +        raise ParseException(instring, loc, self.errmsg, self)
    +
    +
    +class Word(Token):
    +    """
    +    Token for matching words composed of allowed character sets.
    +    Defined with string containing all allowed initial characters,
    +    an optional string containing allowed body characters (if omitted,
    +    defaults to the initial character set), and an optional minimum,
    +    maximum, and/or exact length.  The default value for C{min} is 1 (a
    +    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    +    are 0, meaning no maximum or exact length restriction. An optional
    +    C{excludeChars} parameter can list characters that might be found in 
    +    the input C{bodyChars} string; useful to define a word of all printables
    +    except for one or two characters, for instance.
    +    
    +    L{srange} is useful for defining custom character set strings for defining 
    +    C{Word} expressions, using range notation from regular expression character sets.
    +    
    +    A common mistake is to use C{Word} to match a specific literal string, as in 
    +    C{Word("Address")}. Remember that C{Word} uses the string argument to define
    +    I{sets} of matchable characters. This expression would match "Add", "AAA",
    +    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
    +    To match an exact literal string, use L{Literal} or L{Keyword}.
    +
    +    pyparsing includes helper strings for building Words:
    +     - L{alphas}
    +     - L{nums}
    +     - L{alphanums}
    +     - L{hexnums}
    +     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
    +     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
    +     - L{printables} (any non-whitespace character)
    +
    +    Example::
    +        # a word composed of digits
    +        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
    +        
    +        # a word with a leading capital, and zero or more lowercase
    +        capital_word = Word(alphas.upper(), alphas.lower())
    +
    +        # hostnames are alphanumeric, with leading alpha, and '-'
    +        hostname = Word(alphas, alphanums+'-')
    +        
    +        # roman numeral (not a strict parser, accepts invalid mix of characters)
    +        roman = Word("IVXLCDM")
    +        
    +        # any string of non-whitespace characters, except for ','
    +        csv_value = Word(printables, excludeChars=",")
    +    """
    +    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
    +        super(Word,self).__init__()
    +        if excludeChars:
    +            initChars = ''.join(c for c in initChars if c not in excludeChars)
    +            if bodyChars:
    +                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
    +        self.initCharsOrig = initChars
    +        self.initChars = set(initChars)
    +        if bodyChars :
    +            self.bodyCharsOrig = bodyChars
    +            self.bodyChars = set(bodyChars)
    +        else:
    +            self.bodyCharsOrig = initChars
    +            self.bodyChars = set(initChars)
    +
    +        self.maxSpecified = max > 0
    +
    +        if min < 1:
    +            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
    +
    +        self.minLen = min
    +
    +        if max > 0:
    +            self.maxLen = max
    +        else:
    +            self.maxLen = _MAX_INT
    +
    +        if exact > 0:
    +            self.maxLen = exact
    +            self.minLen = exact
    +
    +        self.name = _ustr(self)
    +        self.errmsg = "Expected " + self.name
    +        self.mayIndexError = False
    +        self.asKeyword = asKeyword
    +
    +        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
    +            if self.bodyCharsOrig == self.initCharsOrig:
    +                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
    +            elif len(self.initCharsOrig) == 1:
    +                self.reString = "%s[%s]*" % \
    +                                      (re.escape(self.initCharsOrig),
    +                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
    +            else:
    +                self.reString = "[%s][%s]*" % \
    +                                      (_escapeRegexRangeChars(self.initCharsOrig),
    +                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
    +            if self.asKeyword:
    +                self.reString = r"\b"+self.reString+r"\b"
    +            try:
    +                self.re = re.compile( self.reString )
    +            except Exception:
    +                self.re = None
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if self.re:
    +            result = self.re.match(instring,loc)
    +            if not result:
    +                raise ParseException(instring, loc, self.errmsg, self)
    +
    +            loc = result.end()
    +            return loc, result.group()
    +
    +        if not(instring[ loc ] in self.initChars):
    +            raise ParseException(instring, loc, self.errmsg, self)
    +
    +        start = loc
    +        loc += 1
    +        instrlen = len(instring)
    +        bodychars = self.bodyChars
    +        maxloc = start + self.maxLen
    +        maxloc = min( maxloc, instrlen )
    +        while loc < maxloc and instring[loc] in bodychars:
    +            loc += 1
    +
    +        throwException = False
    +        if loc - start < self.minLen:
    +            throwException = True
    +        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
    +            throwException = True
    +        if self.asKeyword:
    +            if (start>0 and instring[start-1] in bodychars) or (loc4:
    +                    return s[:4]+"..."
    +                else:
    +                    return s
    +
    +            if ( self.initCharsOrig != self.bodyCharsOrig ):
    +                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
    +            else:
    +                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
    +
    +        return self.strRepr
    +
    +
    +class Regex(Token):
    +    r"""
    +    Token for matching strings that match a given regular expression.
    +    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
    +    If the given regex contains named groups (defined using C{(?P...)}), these will be preserved as 
    +    named parse results.
    +
    +    Example::
    +        realnum = Regex(r"[+-]?\d+\.\d*")
    +        date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
    +        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
    +        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
    +    """
    +    compiledREtype = type(re.compile("[A-Z]"))
    +    def __init__( self, pattern, flags=0):
    +        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
    +        super(Regex,self).__init__()
    +
    +        if isinstance(pattern, basestring):
    +            if not pattern:
    +                warnings.warn("null string passed to Regex; use Empty() instead",
    +                        SyntaxWarning, stacklevel=2)
    +
    +            self.pattern = pattern
    +            self.flags = flags
    +
    +            try:
    +                self.re = re.compile(self.pattern, self.flags)
    +                self.reString = self.pattern
    +            except sre_constants.error:
    +                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
    +                    SyntaxWarning, stacklevel=2)
    +                raise
    +
    +        elif isinstance(pattern, Regex.compiledREtype):
    +            self.re = pattern
    +            self.pattern = \
    +            self.reString = str(pattern)
    +            self.flags = flags
    +            
    +        else:
    +            raise ValueError("Regex may only be constructed with a string or a compiled RE object")
    +
    +        self.name = _ustr(self)
    +        self.errmsg = "Expected " + self.name
    +        self.mayIndexError = False
    +        self.mayReturnEmpty = True
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        result = self.re.match(instring,loc)
    +        if not result:
    +            raise ParseException(instring, loc, self.errmsg, self)
    +
    +        loc = result.end()
    +        d = result.groupdict()
    +        ret = ParseResults(result.group())
    +        if d:
    +            for k in d:
    +                ret[k] = d[k]
    +        return loc,ret
    +
    +    def __str__( self ):
    +        try:
    +            return super(Regex,self).__str__()
    +        except Exception:
    +            pass
    +
    +        if self.strRepr is None:
    +            self.strRepr = "Re:(%s)" % repr(self.pattern)
    +
    +        return self.strRepr
    +
    +
    +class QuotedString(Token):
    +    r"""
    +    Token for matching strings that are delimited by quoting characters.
    +    
    +    Defined with the following parameters:
    +        - quoteChar - string of one or more characters defining the quote delimiting string
    +        - escChar - character to escape quotes, typically backslash (default=C{None})
    +        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
    +        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
    +        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
    +        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
    +        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
    +
    +    Example::
    +        qs = QuotedString('"')
    +        print(qs.searchString('lsjdf "This is the quote" sldjf'))
    +        complex_qs = QuotedString('{{', endQuoteChar='}}')
    +        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
    +        sql_qs = QuotedString('"', escQuote='""')
    +        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
    +    prints::
    +        [['This is the quote']]
    +        [['This is the "quote"']]
    +        [['This is the quote with "embedded" quotes']]
    +    """
    +    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
    +        super(QuotedString,self).__init__()
    +
    +        # remove white space from quote chars - wont work anyway
    +        quoteChar = quoteChar.strip()
    +        if not quoteChar:
    +            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
    +            raise SyntaxError()
    +
    +        if endQuoteChar is None:
    +            endQuoteChar = quoteChar
    +        else:
    +            endQuoteChar = endQuoteChar.strip()
    +            if not endQuoteChar:
    +                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
    +                raise SyntaxError()
    +
    +        self.quoteChar = quoteChar
    +        self.quoteCharLen = len(quoteChar)
    +        self.firstQuoteChar = quoteChar[0]
    +        self.endQuoteChar = endQuoteChar
    +        self.endQuoteCharLen = len(endQuoteChar)
    +        self.escChar = escChar
    +        self.escQuote = escQuote
    +        self.unquoteResults = unquoteResults
    +        self.convertWhitespaceEscapes = convertWhitespaceEscapes
    +
    +        if multiline:
    +            self.flags = re.MULTILINE | re.DOTALL
    +            self.pattern = r'%s(?:[^%s%s]' % \
    +                ( re.escape(self.quoteChar),
    +                  _escapeRegexRangeChars(self.endQuoteChar[0]),
    +                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
    +        else:
    +            self.flags = 0
    +            self.pattern = r'%s(?:[^%s\n\r%s]' % \
    +                ( re.escape(self.quoteChar),
    +                  _escapeRegexRangeChars(self.endQuoteChar[0]),
    +                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
    +        if len(self.endQuoteChar) > 1:
    +            self.pattern += (
    +                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
    +                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
    +                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
    +                )
    +        if escQuote:
    +            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
    +        if escChar:
    +            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
    +            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
    +        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
    +
    +        try:
    +            self.re = re.compile(self.pattern, self.flags)
    +            self.reString = self.pattern
    +        except sre_constants.error:
    +            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
    +                SyntaxWarning, stacklevel=2)
    +            raise
    +
    +        self.name = _ustr(self)
    +        self.errmsg = "Expected " + self.name
    +        self.mayIndexError = False
    +        self.mayReturnEmpty = True
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
    +        if not result:
    +            raise ParseException(instring, loc, self.errmsg, self)
    +
    +        loc = result.end()
    +        ret = result.group()
    +
    +        if self.unquoteResults:
    +
    +            # strip off quotes
    +            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
    +
    +            if isinstance(ret,basestring):
    +                # replace escaped whitespace
    +                if '\\' in ret and self.convertWhitespaceEscapes:
    +                    ws_map = {
    +                        r'\t' : '\t',
    +                        r'\n' : '\n',
    +                        r'\f' : '\f',
    +                        r'\r' : '\r',
    +                    }
    +                    for wslit,wschar in ws_map.items():
    +                        ret = ret.replace(wslit, wschar)
    +
    +                # replace escaped characters
    +                if self.escChar:
    +                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
    +
    +                # replace escaped quotes
    +                if self.escQuote:
    +                    ret = ret.replace(self.escQuote, self.endQuoteChar)
    +
    +        return loc, ret
    +
    +    def __str__( self ):
    +        try:
    +            return super(QuotedString,self).__str__()
    +        except Exception:
    +            pass
    +
    +        if self.strRepr is None:
    +            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
    +
    +        return self.strRepr
    +
    +
    +class CharsNotIn(Token):
    +    """
    +    Token for matching words composed of characters I{not} in a given set (will
    +    include whitespace in matched characters if not listed in the provided exclusion set - see example).
    +    Defined with string containing all disallowed characters, and an optional
    +    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
    +    minimum value < 1 is not valid); the default values for C{max} and C{exact}
    +    are 0, meaning no maximum or exact length restriction.
    +
    +    Example::
    +        # define a comma-separated-value as anything that is not a ','
    +        csv_value = CharsNotIn(',')
    +        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
    +    prints::
    +        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
    +    """
    +    def __init__( self, notChars, min=1, max=0, exact=0 ):
    +        super(CharsNotIn,self).__init__()
    +        self.skipWhitespace = False
    +        self.notChars = notChars
    +
    +        if min < 1:
    +            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
    +
    +        self.minLen = min
    +
    +        if max > 0:
    +            self.maxLen = max
    +        else:
    +            self.maxLen = _MAX_INT
    +
    +        if exact > 0:
    +            self.maxLen = exact
    +            self.minLen = exact
    +
    +        self.name = _ustr(self)
    +        self.errmsg = "Expected " + self.name
    +        self.mayReturnEmpty = ( self.minLen == 0 )
    +        self.mayIndexError = False
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if instring[loc] in self.notChars:
    +            raise ParseException(instring, loc, self.errmsg, self)
    +
    +        start = loc
    +        loc += 1
    +        notchars = self.notChars
    +        maxlen = min( start+self.maxLen, len(instring) )
    +        while loc < maxlen and \
    +              (instring[loc] not in notchars):
    +            loc += 1
    +
    +        if loc - start < self.minLen:
    +            raise ParseException(instring, loc, self.errmsg, self)
    +
    +        return loc, instring[start:loc]
    +
    +    def __str__( self ):
    +        try:
    +            return super(CharsNotIn, self).__str__()
    +        except Exception:
    +            pass
    +
    +        if self.strRepr is None:
    +            if len(self.notChars) > 4:
    +                self.strRepr = "!W:(%s...)" % self.notChars[:4]
    +            else:
    +                self.strRepr = "!W:(%s)" % self.notChars
    +
    +        return self.strRepr
    +
    +class White(Token):
    +    """
    +    Special matching class for matching whitespace.  Normally, whitespace is ignored
    +    by pyparsing grammars.  This class is included when some whitespace structures
    +    are significant.  Define with a string containing the whitespace characters to be
    +    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
    +    as defined for the C{L{Word}} class.
    +    """
    +    whiteStrs = {
    +        " " : "",
    +        "\t": "",
    +        "\n": "",
    +        "\r": "",
    +        "\f": "",
    +        }
    +    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
    +        super(White,self).__init__()
    +        self.matchWhite = ws
    +        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
    +        #~ self.leaveWhitespace()
    +        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
    +        self.mayReturnEmpty = True
    +        self.errmsg = "Expected " + self.name
    +
    +        self.minLen = min
    +
    +        if max > 0:
    +            self.maxLen = max
    +        else:
    +            self.maxLen = _MAX_INT
    +
    +        if exact > 0:
    +            self.maxLen = exact
    +            self.minLen = exact
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if not(instring[ loc ] in self.matchWhite):
    +            raise ParseException(instring, loc, self.errmsg, self)
    +        start = loc
    +        loc += 1
    +        maxloc = start + self.maxLen
    +        maxloc = min( maxloc, len(instring) )
    +        while loc < maxloc and instring[loc] in self.matchWhite:
    +            loc += 1
    +
    +        if loc - start < self.minLen:
    +            raise ParseException(instring, loc, self.errmsg, self)
    +
    +        return loc, instring[start:loc]
    +
    +
    +class _PositionToken(Token):
    +    def __init__( self ):
    +        super(_PositionToken,self).__init__()
    +        self.name=self.__class__.__name__
    +        self.mayReturnEmpty = True
    +        self.mayIndexError = False
    +
    +class GoToColumn(_PositionToken):
    +    """
    +    Token to advance to a specific column of input text; useful for tabular report scraping.
    +    """
    +    def __init__( self, colno ):
    +        super(GoToColumn,self).__init__()
    +        self.col = colno
    +
    +    def preParse( self, instring, loc ):
    +        if col(loc,instring) != self.col:
    +            instrlen = len(instring)
    +            if self.ignoreExprs:
    +                loc = self._skipIgnorables( instring, loc )
    +            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
    +                loc += 1
    +        return loc
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        thiscol = col( loc, instring )
    +        if thiscol > self.col:
    +            raise ParseException( instring, loc, "Text not in expected column", self )
    +        newloc = loc + self.col - thiscol
    +        ret = instring[ loc: newloc ]
    +        return newloc, ret
    +
    +
    +class LineStart(_PositionToken):
    +    """
    +    Matches if current position is at the beginning of a line within the parse string
    +    
    +    Example::
    +    
    +        test = '''\
    +        AAA this line
    +        AAA and this line
    +          AAA but not this one
    +        B AAA and definitely not this one
    +        '''
    +
    +        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
    +            print(t)
    +    
    +    Prints::
    +        ['AAA', ' this line']
    +        ['AAA', ' and this line']    
    +
    +    """
    +    def __init__( self ):
    +        super(LineStart,self).__init__()
    +        self.errmsg = "Expected start of line"
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if col(loc, instring) == 1:
    +            return loc, []
    +        raise ParseException(instring, loc, self.errmsg, self)
    +
    +class LineEnd(_PositionToken):
    +    """
    +    Matches if current position is at the end of a line within the parse string
    +    """
    +    def __init__( self ):
    +        super(LineEnd,self).__init__()
    +        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
    +        self.errmsg = "Expected end of line"
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if loc len(instring):
    +            return loc, []
    +        else:
    +            raise ParseException(instring, loc, self.errmsg, self)
    +
    +class WordStart(_PositionToken):
    +    """
    +    Matches if the current position is at the beginning of a Word, and
    +    is not preceded by any character in a given set of C{wordChars}
    +    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
    +    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
    +    the string being parsed, or at the beginning of a line.
    +    """
    +    def __init__(self, wordChars = printables):
    +        super(WordStart,self).__init__()
    +        self.wordChars = set(wordChars)
    +        self.errmsg = "Not at the start of a word"
    +
    +    def parseImpl(self, instring, loc, doActions=True ):
    +        if loc != 0:
    +            if (instring[loc-1] in self.wordChars or
    +                instring[loc] not in self.wordChars):
    +                raise ParseException(instring, loc, self.errmsg, self)
    +        return loc, []
    +
    +class WordEnd(_PositionToken):
    +    """
    +    Matches if the current position is at the end of a Word, and
    +    is not followed by any character in a given set of C{wordChars}
    +    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
    +    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
    +    the string being parsed, or at the end of a line.
    +    """
    +    def __init__(self, wordChars = printables):
    +        super(WordEnd,self).__init__()
    +        self.wordChars = set(wordChars)
    +        self.skipWhitespace = False
    +        self.errmsg = "Not at the end of a word"
    +
    +    def parseImpl(self, instring, loc, doActions=True ):
    +        instrlen = len(instring)
    +        if instrlen>0 and loc maxExcLoc:
    +                    maxException = err
    +                    maxExcLoc = err.loc
    +            except IndexError:
    +                if len(instring) > maxExcLoc:
    +                    maxException = ParseException(instring,len(instring),e.errmsg,self)
    +                    maxExcLoc = len(instring)
    +            else:
    +                # save match among all matches, to retry longest to shortest
    +                matches.append((loc2, e))
    +
    +        if matches:
    +            matches.sort(key=lambda x: -x[0])
    +            for _,e in matches:
    +                try:
    +                    return e._parse( instring, loc, doActions )
    +                except ParseException as err:
    +                    err.__traceback__ = None
    +                    if err.loc > maxExcLoc:
    +                        maxException = err
    +                        maxExcLoc = err.loc
    +
    +        if maxException is not None:
    +            maxException.msg = self.errmsg
    +            raise maxException
    +        else:
    +            raise ParseException(instring, loc, "no defined alternatives to match", self)
    +
    +
    +    def __ixor__(self, other ):
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        return self.append( other ) #Or( [ self, other ] )
    +
    +    def __str__( self ):
    +        if hasattr(self,"name"):
    +            return self.name
    +
    +        if self.strRepr is None:
    +            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
    +
    +        return self.strRepr
    +
    +    def checkRecursion( self, parseElementList ):
    +        subRecCheckList = parseElementList[:] + [ self ]
    +        for e in self.exprs:
    +            e.checkRecursion( subRecCheckList )
    +
    +
    +class MatchFirst(ParseExpression):
    +    """
    +    Requires that at least one C{ParseExpression} is found.
    +    If two expressions match, the first one listed is the one that will match.
    +    May be constructed using the C{'|'} operator.
    +
    +    Example::
    +        # construct MatchFirst using '|' operator
    +        
    +        # watch the order of expressions to match
    +        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
    +        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]
    +
    +        # put more selective expression first
    +        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
    +        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
    +    """
    +    def __init__( self, exprs, savelist = False ):
    +        super(MatchFirst,self).__init__(exprs, savelist)
    +        if self.exprs:
    +            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
    +        else:
    +            self.mayReturnEmpty = True
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        maxExcLoc = -1
    +        maxException = None
    +        for e in self.exprs:
    +            try:
    +                ret = e._parse( instring, loc, doActions )
    +                return ret
    +            except ParseException as err:
    +                if err.loc > maxExcLoc:
    +                    maxException = err
    +                    maxExcLoc = err.loc
    +            except IndexError:
    +                if len(instring) > maxExcLoc:
    +                    maxException = ParseException(instring,len(instring),e.errmsg,self)
    +                    maxExcLoc = len(instring)
    +
    +        # only got here if no expression matched, raise exception for match that made it the furthest
    +        else:
    +            if maxException is not None:
    +                maxException.msg = self.errmsg
    +                raise maxException
    +            else:
    +                raise ParseException(instring, loc, "no defined alternatives to match", self)
    +
    +    def __ior__(self, other ):
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass( other )
    +        return self.append( other ) #MatchFirst( [ self, other ] )
    +
    +    def __str__( self ):
    +        if hasattr(self,"name"):
    +            return self.name
    +
    +        if self.strRepr is None:
    +            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
    +
    +        return self.strRepr
    +
    +    def checkRecursion( self, parseElementList ):
    +        subRecCheckList = parseElementList[:] + [ self ]
    +        for e in self.exprs:
    +            e.checkRecursion( subRecCheckList )
    +
    +
    +class Each(ParseExpression):
    +    """
    +    Requires all given C{ParseExpression}s to be found, but in any order.
    +    Expressions may be separated by whitespace.
    +    May be constructed using the C{'&'} operator.
    +
    +    Example::
    +        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
    +        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
    +        integer = Word(nums)
    +        shape_attr = "shape:" + shape_type("shape")
    +        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
    +        color_attr = "color:" + color("color")
    +        size_attr = "size:" + integer("size")
    +
    +        # use Each (using operator '&') to accept attributes in any order 
    +        # (shape and posn are required, color and size are optional)
    +        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
    +
    +        shape_spec.runTests('''
    +            shape: SQUARE color: BLACK posn: 100, 120
    +            shape: CIRCLE size: 50 color: BLUE posn: 50,80
    +            color:GREEN size:20 shape:TRIANGLE posn:20,40
    +            '''
    +            )
    +    prints::
    +        shape: SQUARE color: BLACK posn: 100, 120
    +        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
    +        - color: BLACK
    +        - posn: ['100', ',', '120']
    +          - x: 100
    +          - y: 120
    +        - shape: SQUARE
    +
    +
    +        shape: CIRCLE size: 50 color: BLUE posn: 50,80
    +        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
    +        - color: BLUE
    +        - posn: ['50', ',', '80']
    +          - x: 50
    +          - y: 80
    +        - shape: CIRCLE
    +        - size: 50
    +
    +
    +        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
    +        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
    +        - color: GREEN
    +        - posn: ['20', ',', '40']
    +          - x: 20
    +          - y: 40
    +        - shape: TRIANGLE
    +        - size: 20
    +    """
    +    def __init__( self, exprs, savelist = True ):
    +        super(Each,self).__init__(exprs, savelist)
    +        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
    +        self.skipWhitespace = True
    +        self.initExprGroups = True
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if self.initExprGroups:
    +            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
    +            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
    +            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
    +            self.optionals = opt1 + opt2
    +            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
    +            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
    +            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
    +            self.required += self.multirequired
    +            self.initExprGroups = False
    +        tmpLoc = loc
    +        tmpReqd = self.required[:]
    +        tmpOpt  = self.optionals[:]
    +        matchOrder = []
    +
    +        keepMatching = True
    +        while keepMatching:
    +            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
    +            failed = []
    +            for e in tmpExprs:
    +                try:
    +                    tmpLoc = e.tryParse( instring, tmpLoc )
    +                except ParseException:
    +                    failed.append(e)
    +                else:
    +                    matchOrder.append(self.opt1map.get(id(e),e))
    +                    if e in tmpReqd:
    +                        tmpReqd.remove(e)
    +                    elif e in tmpOpt:
    +                        tmpOpt.remove(e)
    +            if len(failed) == len(tmpExprs):
    +                keepMatching = False
    +
    +        if tmpReqd:
    +            missing = ", ".join(_ustr(e) for e in tmpReqd)
    +            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
    +
    +        # add any unmatched Optionals, in case they have default values defined
    +        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
    +
    +        resultlist = []
    +        for e in matchOrder:
    +            loc,results = e._parse(instring,loc,doActions)
    +            resultlist.append(results)
    +
    +        finalResults = sum(resultlist, ParseResults([]))
    +        return loc, finalResults
    +
    +    def __str__( self ):
    +        if hasattr(self,"name"):
    +            return self.name
    +
    +        if self.strRepr is None:
    +            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
    +
    +        return self.strRepr
    +
    +    def checkRecursion( self, parseElementList ):
    +        subRecCheckList = parseElementList[:] + [ self ]
    +        for e in self.exprs:
    +            e.checkRecursion( subRecCheckList )
    +
    +
    +class ParseElementEnhance(ParserElement):
    +    """
    +    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
    +    """
    +    def __init__( self, expr, savelist=False ):
    +        super(ParseElementEnhance,self).__init__(savelist)
    +        if isinstance( expr, basestring ):
    +            if issubclass(ParserElement._literalStringClass, Token):
    +                expr = ParserElement._literalStringClass(expr)
    +            else:
    +                expr = ParserElement._literalStringClass(Literal(expr))
    +        self.expr = expr
    +        self.strRepr = None
    +        if expr is not None:
    +            self.mayIndexError = expr.mayIndexError
    +            self.mayReturnEmpty = expr.mayReturnEmpty
    +            self.setWhitespaceChars( expr.whiteChars )
    +            self.skipWhitespace = expr.skipWhitespace
    +            self.saveAsList = expr.saveAsList
    +            self.callPreparse = expr.callPreparse
    +            self.ignoreExprs.extend(expr.ignoreExprs)
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if self.expr is not None:
    +            return self.expr._parse( instring, loc, doActions, callPreParse=False )
    +        else:
    +            raise ParseException("",loc,self.errmsg,self)
    +
    +    def leaveWhitespace( self ):
    +        self.skipWhitespace = False
    +        self.expr = self.expr.copy()
    +        if self.expr is not None:
    +            self.expr.leaveWhitespace()
    +        return self
    +
    +    def ignore( self, other ):
    +        if isinstance( other, Suppress ):
    +            if other not in self.ignoreExprs:
    +                super( ParseElementEnhance, self).ignore( other )
    +                if self.expr is not None:
    +                    self.expr.ignore( self.ignoreExprs[-1] )
    +        else:
    +            super( ParseElementEnhance, self).ignore( other )
    +            if self.expr is not None:
    +                self.expr.ignore( self.ignoreExprs[-1] )
    +        return self
    +
    +    def streamline( self ):
    +        super(ParseElementEnhance,self).streamline()
    +        if self.expr is not None:
    +            self.expr.streamline()
    +        return self
    +
    +    def checkRecursion( self, parseElementList ):
    +        if self in parseElementList:
    +            raise RecursiveGrammarException( parseElementList+[self] )
    +        subRecCheckList = parseElementList[:] + [ self ]
    +        if self.expr is not None:
    +            self.expr.checkRecursion( subRecCheckList )
    +
    +    def validate( self, validateTrace=[] ):
    +        tmp = validateTrace[:]+[self]
    +        if self.expr is not None:
    +            self.expr.validate(tmp)
    +        self.checkRecursion( [] )
    +
    +    def __str__( self ):
    +        try:
    +            return super(ParseElementEnhance,self).__str__()
    +        except Exception:
    +            pass
    +
    +        if self.strRepr is None and self.expr is not None:
    +            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
    +        return self.strRepr
    +
    +
    +class FollowedBy(ParseElementEnhance):
    +    """
    +    Lookahead matching of the given parse expression.  C{FollowedBy}
    +    does I{not} advance the parsing position within the input string, it only
    +    verifies that the specified parse expression matches at the current
    +    position.  C{FollowedBy} always returns a null token list.
    +
    +    Example::
    +        # use FollowedBy to match a label only if it is followed by a ':'
    +        data_word = Word(alphas)
    +        label = data_word + FollowedBy(':')
    +        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
    +        
    +        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
    +    prints::
    +        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
    +    """
    +    def __init__( self, expr ):
    +        super(FollowedBy,self).__init__(expr)
    +        self.mayReturnEmpty = True
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        self.expr.tryParse( instring, loc )
    +        return loc, []
    +
    +
    +class NotAny(ParseElementEnhance):
    +    """
    +    Lookahead to disallow matching with the given parse expression.  C{NotAny}
    +    does I{not} advance the parsing position within the input string, it only
    +    verifies that the specified parse expression does I{not} match at the current
    +    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
    +    always returns a null token list.  May be constructed using the '~' operator.
    +
    +    Example::
    +        
    +    """
    +    def __init__( self, expr ):
    +        super(NotAny,self).__init__(expr)
    +        #~ self.leaveWhitespace()
    +        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
    +        self.mayReturnEmpty = True
    +        self.errmsg = "Found unwanted token, "+_ustr(self.expr)
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        if self.expr.canParseNext(instring, loc):
    +            raise ParseException(instring, loc, self.errmsg, self)
    +        return loc, []
    +
    +    def __str__( self ):
    +        if hasattr(self,"name"):
    +            return self.name
    +
    +        if self.strRepr is None:
    +            self.strRepr = "~{" + _ustr(self.expr) + "}"
    +
    +        return self.strRepr
    +
    +class _MultipleMatch(ParseElementEnhance):
    +    def __init__( self, expr, stopOn=None):
    +        super(_MultipleMatch, self).__init__(expr)
    +        self.saveAsList = True
    +        ender = stopOn
    +        if isinstance(ender, basestring):
    +            ender = ParserElement._literalStringClass(ender)
    +        self.not_ender = ~ender if ender is not None else None
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        self_expr_parse = self.expr._parse
    +        self_skip_ignorables = self._skipIgnorables
    +        check_ender = self.not_ender is not None
    +        if check_ender:
    +            try_not_ender = self.not_ender.tryParse
    +        
    +        # must be at least one (but first see if we are the stopOn sentinel;
    +        # if so, fail)
    +        if check_ender:
    +            try_not_ender(instring, loc)
    +        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
    +        try:
    +            hasIgnoreExprs = (not not self.ignoreExprs)
    +            while 1:
    +                if check_ender:
    +                    try_not_ender(instring, loc)
    +                if hasIgnoreExprs:
    +                    preloc = self_skip_ignorables( instring, loc )
    +                else:
    +                    preloc = loc
    +                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
    +                if tmptokens or tmptokens.haskeys():
    +                    tokens += tmptokens
    +        except (ParseException,IndexError):
    +            pass
    +
    +        return loc, tokens
    +        
    +class OneOrMore(_MultipleMatch):
    +    """
    +    Repetition of one or more of the given expression.
    +    
    +    Parameters:
    +     - expr - expression that must match one or more times
    +     - stopOn - (default=C{None}) - expression for a terminating sentinel
    +          (only required if the sentinel would ordinarily match the repetition 
    +          expression)          
    +
    +    Example::
    +        data_word = Word(alphas)
    +        label = data_word + FollowedBy(':')
    +        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
    +
    +        text = "shape: SQUARE posn: upper left color: BLACK"
    +        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
    +
    +        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
    +        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
    +        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
    +        
    +        # could also be written as
    +        (attr_expr * (1,)).parseString(text).pprint()
    +    """
    +
    +    def __str__( self ):
    +        if hasattr(self,"name"):
    +            return self.name
    +
    +        if self.strRepr is None:
    +            self.strRepr = "{" + _ustr(self.expr) + "}..."
    +
    +        return self.strRepr
    +
    +class ZeroOrMore(_MultipleMatch):
    +    """
    +    Optional repetition of zero or more of the given expression.
    +    
    +    Parameters:
    +     - expr - expression that must match zero or more times
    +     - stopOn - (default=C{None}) - expression for a terminating sentinel
    +          (only required if the sentinel would ordinarily match the repetition 
    +          expression)          
    +
    +    Example: similar to L{OneOrMore}
    +    """
    +    def __init__( self, expr, stopOn=None):
    +        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
    +        self.mayReturnEmpty = True
    +        
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        try:
    +            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
    +        except (ParseException,IndexError):
    +            return loc, []
    +
    +    def __str__( self ):
    +        if hasattr(self,"name"):
    +            return self.name
    +
    +        if self.strRepr is None:
    +            self.strRepr = "[" + _ustr(self.expr) + "]..."
    +
    +        return self.strRepr
    +
    +class _NullToken(object):
    +    def __bool__(self):
    +        return False
    +    __nonzero__ = __bool__
    +    def __str__(self):
    +        return ""
    +
    +_optionalNotMatched = _NullToken()
    +class Optional(ParseElementEnhance):
    +    """
    +    Optional matching of the given expression.
    +
    +    Parameters:
    +     - expr - expression that must match zero or more times
    +     - default (optional) - value to be returned if the optional expression is not found.
    +
    +    Example::
    +        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
    +        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
    +        zip.runTests('''
    +            # traditional ZIP code
    +            12345
    +            
    +            # ZIP+4 form
    +            12101-0001
    +            
    +            # invalid ZIP
    +            98765-
    +            ''')
    +    prints::
    +        # traditional ZIP code
    +        12345
    +        ['12345']
    +
    +        # ZIP+4 form
    +        12101-0001
    +        ['12101-0001']
    +
    +        # invalid ZIP
    +        98765-
    +             ^
    +        FAIL: Expected end of text (at char 5), (line:1, col:6)
    +    """
    +    def __init__( self, expr, default=_optionalNotMatched ):
    +        super(Optional,self).__init__( expr, savelist=False )
    +        self.saveAsList = self.expr.saveAsList
    +        self.defaultValue = default
    +        self.mayReturnEmpty = True
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        try:
    +            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
    +        except (ParseException,IndexError):
    +            if self.defaultValue is not _optionalNotMatched:
    +                if self.expr.resultsName:
    +                    tokens = ParseResults([ self.defaultValue ])
    +                    tokens[self.expr.resultsName] = self.defaultValue
    +                else:
    +                    tokens = [ self.defaultValue ]
    +            else:
    +                tokens = []
    +        return loc, tokens
    +
    +    def __str__( self ):
    +        if hasattr(self,"name"):
    +            return self.name
    +
    +        if self.strRepr is None:
    +            self.strRepr = "[" + _ustr(self.expr) + "]"
    +
    +        return self.strRepr
    +
    +class SkipTo(ParseElementEnhance):
    +    """
    +    Token for skipping over all undefined text until the matched expression is found.
    +
    +    Parameters:
    +     - expr - target expression marking the end of the data to be skipped
    +     - include - (default=C{False}) if True, the target expression is also parsed 
    +          (the skipped text and target expression are returned as a 2-element list).
    +     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
    +          comments) that might contain false matches to the target expression
    +     - failOn - (default=C{None}) define expressions that are not allowed to be 
    +          included in the skipped test; if found before the target expression is found, 
    +          the SkipTo is not a match
    +
    +    Example::
    +        report = '''
    +            Outstanding Issues Report - 1 Jan 2000
    +
    +               # | Severity | Description                               |  Days Open
    +            -----+----------+-------------------------------------------+-----------
    +             101 | Critical | Intermittent system crash                 |          6
    +              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
    +              79 | Minor    | System slow when running too many reports |         47
    +            '''
    +        integer = Word(nums)
    +        SEP = Suppress('|')
    +        # use SkipTo to simply match everything up until the next SEP
    +        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
    +        # - parse action will call token.strip() for each matched token, i.e., the description body
    +        string_data = SkipTo(SEP, ignore=quotedString)
    +        string_data.setParseAction(tokenMap(str.strip))
    +        ticket_expr = (integer("issue_num") + SEP 
    +                      + string_data("sev") + SEP 
    +                      + string_data("desc") + SEP 
    +                      + integer("days_open"))
    +        
    +        for tkt in ticket_expr.searchString(report):
    +            print tkt.dump()
    +    prints::
    +        ['101', 'Critical', 'Intermittent system crash', '6']
    +        - days_open: 6
    +        - desc: Intermittent system crash
    +        - issue_num: 101
    +        - sev: Critical
    +        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
    +        - days_open: 14
    +        - desc: Spelling error on Login ('log|n')
    +        - issue_num: 94
    +        - sev: Cosmetic
    +        ['79', 'Minor', 'System slow when running too many reports', '47']
    +        - days_open: 47
    +        - desc: System slow when running too many reports
    +        - issue_num: 79
    +        - sev: Minor
    +    """
    +    def __init__( self, other, include=False, ignore=None, failOn=None ):
    +        super( SkipTo, self ).__init__( other )
    +        self.ignoreExpr = ignore
    +        self.mayReturnEmpty = True
    +        self.mayIndexError = False
    +        self.includeMatch = include
    +        self.asList = False
    +        if isinstance(failOn, basestring):
    +            self.failOn = ParserElement._literalStringClass(failOn)
    +        else:
    +            self.failOn = failOn
    +        self.errmsg = "No match found for "+_ustr(self.expr)
    +
    +    def parseImpl( self, instring, loc, doActions=True ):
    +        startloc = loc
    +        instrlen = len(instring)
    +        expr = self.expr
    +        expr_parse = self.expr._parse
    +        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
    +        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
    +        
    +        tmploc = loc
    +        while tmploc <= instrlen:
    +            if self_failOn_canParseNext is not None:
    +                # break if failOn expression matches
    +                if self_failOn_canParseNext(instring, tmploc):
    +                    break
    +                    
    +            if self_ignoreExpr_tryParse is not None:
    +                # advance past ignore expressions
    +                while 1:
    +                    try:
    +                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
    +                    except ParseBaseException:
    +                        break
    +            
    +            try:
    +                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
    +            except (ParseException, IndexError):
    +                # no match, advance loc in string
    +                tmploc += 1
    +            else:
    +                # matched skipto expr, done
    +                break
    +
    +        else:
    +            # ran off the end of the input string without matching skipto expr, fail
    +            raise ParseException(instring, loc, self.errmsg, self)
    +
    +        # build up return values
    +        loc = tmploc
    +        skiptext = instring[startloc:loc]
    +        skipresult = ParseResults(skiptext)
    +        
    +        if self.includeMatch:
    +            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
    +            skipresult += mat
    +
    +        return loc, skipresult
    +
    +class Forward(ParseElementEnhance):
    +    """
    +    Forward declaration of an expression to be defined later -
    +    used for recursive grammars, such as algebraic infix notation.
    +    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
    +
    +    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
    +    Specifically, '|' has a lower precedence than '<<', so that::
    +        fwdExpr << a | b | c
    +    will actually be evaluated as::
    +        (fwdExpr << a) | b | c
    +    thereby leaving b and c out as parseable alternatives.  It is recommended that you
    +    explicitly group the values inserted into the C{Forward}::
    +        fwdExpr << (a | b | c)
    +    Converting to use the '<<=' operator instead will avoid this problem.
    +
    +    See L{ParseResults.pprint} for an example of a recursive parser created using
    +    C{Forward}.
    +    """
    +    def __init__( self, other=None ):
    +        super(Forward,self).__init__( other, savelist=False )
    +
    +    def __lshift__( self, other ):
    +        if isinstance( other, basestring ):
    +            other = ParserElement._literalStringClass(other)
    +        self.expr = other
    +        self.strRepr = None
    +        self.mayIndexError = self.expr.mayIndexError
    +        self.mayReturnEmpty = self.expr.mayReturnEmpty
    +        self.setWhitespaceChars( self.expr.whiteChars )
    +        self.skipWhitespace = self.expr.skipWhitespace
    +        self.saveAsList = self.expr.saveAsList
    +        self.ignoreExprs.extend(self.expr.ignoreExprs)
    +        return self
    +        
    +    def __ilshift__(self, other):
    +        return self << other
    +    
    +    def leaveWhitespace( self ):
    +        self.skipWhitespace = False
    +        return self
    +
    +    def streamline( self ):
    +        if not self.streamlined:
    +            self.streamlined = True
    +            if self.expr is not None:
    +                self.expr.streamline()
    +        return self
    +
    +    def validate( self, validateTrace=[] ):
    +        if self not in validateTrace:
    +            tmp = validateTrace[:]+[self]
    +            if self.expr is not None:
    +                self.expr.validate(tmp)
    +        self.checkRecursion([])
    +
    +    def __str__( self ):
    +        if hasattr(self,"name"):
    +            return self.name
    +        return self.__class__.__name__ + ": ..."
    +
    +        # stubbed out for now - creates awful memory and perf issues
    +        self._revertClass = self.__class__
    +        self.__class__ = _ForwardNoRecurse
    +        try:
    +            if self.expr is not None:
    +                retString = _ustr(self.expr)
    +            else:
    +                retString = "None"
    +        finally:
    +            self.__class__ = self._revertClass
    +        return self.__class__.__name__ + ": " + retString
    +
    +    def copy(self):
    +        if self.expr is not None:
    +            return super(Forward,self).copy()
    +        else:
    +            ret = Forward()
    +            ret <<= self
    +            return ret
    +
    +class _ForwardNoRecurse(Forward):
    +    def __str__( self ):
    +        return "..."
    +
    +class TokenConverter(ParseElementEnhance):
    +    """
    +    Abstract subclass of C{ParseExpression}, for converting parsed results.
    +    """
    +    def __init__( self, expr, savelist=False ):
    +        super(TokenConverter,self).__init__( expr )#, savelist )
    +        self.saveAsList = False
    +
    +class Combine(TokenConverter):
    +    """
    +    Converter to concatenate all matching tokens to a single string.
    +    By default, the matching patterns must also be contiguous in the input string;
    +    this can be disabled by specifying C{'adjacent=False'} in the constructor.
    +
    +    Example::
    +        real = Word(nums) + '.' + Word(nums)
    +        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
    +        # will also erroneously match the following
    +        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
    +
    +        real = Combine(Word(nums) + '.' + Word(nums))
    +        print(real.parseString('3.1416')) # -> ['3.1416']
    +        # no match when there are internal spaces
    +        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
    +    """
    +    def __init__( self, expr, joinString="", adjacent=True ):
    +        super(Combine,self).__init__( expr )
    +        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
    +        if adjacent:
    +            self.leaveWhitespace()
    +        self.adjacent = adjacent
    +        self.skipWhitespace = True
    +        self.joinString = joinString
    +        self.callPreparse = True
    +
    +    def ignore( self, other ):
    +        if self.adjacent:
    +            ParserElement.ignore(self, other)
    +        else:
    +            super( Combine, self).ignore( other )
    +        return self
    +
    +    def postParse( self, instring, loc, tokenlist ):
    +        retToks = tokenlist.copy()
    +        del retToks[:]
    +        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
    +
    +        if self.resultsName and retToks.haskeys():
    +            return [ retToks ]
    +        else:
    +            return retToks
    +
    +class Group(TokenConverter):
    +    """
    +    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
    +
    +    Example::
    +        ident = Word(alphas)
    +        num = Word(nums)
    +        term = ident | num
    +        func = ident + Optional(delimitedList(term))
    +        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']
    +
    +        func = ident + Group(Optional(delimitedList(term)))
    +        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
    +    """
    +    def __init__( self, expr ):
    +        super(Group,self).__init__( expr )
    +        self.saveAsList = True
    +
    +    def postParse( self, instring, loc, tokenlist ):
    +        return [ tokenlist ]
    +
    +class Dict(TokenConverter):
    +    """
    +    Converter to return a repetitive expression as a list, but also as a dictionary.
    +    Each element can also be referenced using the first token in the expression as its key.
    +    Useful for tabular report scraping when the first column can be used as a item key.
    +
    +    Example::
    +        data_word = Word(alphas)
    +        label = data_word + FollowedBy(':')
    +        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
    +
    +        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
    +        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
    +        
    +        # print attributes as plain groups
    +        print(OneOrMore(attr_expr).parseString(text).dump())
    +        
    +        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
    +        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
    +        print(result.dump())
    +        
    +        # access named fields as dict entries, or output as dict
    +        print(result['shape'])        
    +        print(result.asDict())
    +    prints::
    +        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
    +
    +        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
    +        - color: light blue
    +        - posn: upper left
    +        - shape: SQUARE
    +        - texture: burlap
    +        SQUARE
    +        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
    +    See more examples at L{ParseResults} of accessing fields by results name.
    +    """
    +    def __init__( self, expr ):
    +        super(Dict,self).__init__( expr )
    +        self.saveAsList = True
    +
    +    def postParse( self, instring, loc, tokenlist ):
    +        for i,tok in enumerate(tokenlist):
    +            if len(tok) == 0:
    +                continue
    +            ikey = tok[0]
    +            if isinstance(ikey,int):
    +                ikey = _ustr(tok[0]).strip()
    +            if len(tok)==1:
    +                tokenlist[ikey] = _ParseResultsWithOffset("",i)
    +            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
    +                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
    +            else:
    +                dictvalue = tok.copy() #ParseResults(i)
    +                del dictvalue[0]
    +                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
    +                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
    +                else:
    +                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
    +
    +        if self.resultsName:
    +            return [ tokenlist ]
    +        else:
    +            return tokenlist
    +
    +
    +class Suppress(TokenConverter):
    +    """
    +    Converter for ignoring the results of a parsed expression.
    +
    +    Example::
    +        source = "a, b, c,d"
    +        wd = Word(alphas)
    +        wd_list1 = wd + ZeroOrMore(',' + wd)
    +        print(wd_list1.parseString(source))
    +
    +        # often, delimiters that are useful during parsing are just in the
    +        # way afterward - use Suppress to keep them out of the parsed output
    +        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
    +        print(wd_list2.parseString(source))
    +    prints::
    +        ['a', ',', 'b', ',', 'c', ',', 'd']
    +        ['a', 'b', 'c', 'd']
    +    (See also L{delimitedList}.)
    +    """
    +    def postParse( self, instring, loc, tokenlist ):
    +        return []
    +
    +    def suppress( self ):
    +        return self
    +
    +
    +class OnlyOnce(object):
    +    """
    +    Wrapper for parse actions, to ensure they are only called once.
    +    """
    +    def __init__(self, methodCall):
    +        self.callable = _trim_arity(methodCall)
    +        self.called = False
    +    def __call__(self,s,l,t):
    +        if not self.called:
    +            results = self.callable(s,l,t)
    +            self.called = True
    +            return results
    +        raise ParseException(s,l,"")
    +    def reset(self):
    +        self.called = False
    +
    +def traceParseAction(f):
    +    """
    +    Decorator for debugging parse actions. 
    +    
    +    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
    +    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
    +
    +    Example::
    +        wd = Word(alphas)
    +
    +        @traceParseAction
    +        def remove_duplicate_chars(tokens):
    +            return ''.join(sorted(set(''.join(tokens))))
    +
    +        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
    +        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
    +    prints::
    +        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
    +        <3:
    +            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
    +        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
    +        try:
    +            ret = f(*paArgs)
    +        except Exception as exc:
    +            sys.stderr.write( "< ['aa', 'bb', 'cc']
    +        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
    +    """
    +    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
    +    if combine:
    +        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
    +    else:
    +        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
    +
    +def countedArray( expr, intExpr=None ):
    +    """
    +    Helper to define a counted list of expressions.
    +    This helper defines a pattern of the form::
    +        integer expr expr expr...
    +    where the leading integer tells how many expr expressions follow.
    +    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
    +    
    +    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
    +
    +    Example::
    +        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']
    +
    +        # in this parser, the leading integer value is given in binary,
    +        # '10' indicating that 2 values are in the array
    +        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
    +        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
    +    """
    +    arrayExpr = Forward()
    +    def countFieldParseAction(s,l,t):
    +        n = t[0]
    +        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
    +        return []
    +    if intExpr is None:
    +        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
    +    else:
    +        intExpr = intExpr.copy()
    +    intExpr.setName("arrayLen")
    +    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
    +    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
    +
    +def _flatten(L):
    +    ret = []
    +    for i in L:
    +        if isinstance(i,list):
    +            ret.extend(_flatten(i))
    +        else:
    +            ret.append(i)
    +    return ret
    +
    +def matchPreviousLiteral(expr):
    +    """
    +    Helper to define an expression that is indirectly defined from
    +    the tokens matched in a previous expression, that is, it looks
    +    for a 'repeat' of a previous expression.  For example::
    +        first = Word(nums)
    +        second = matchPreviousLiteral(first)
    +        matchExpr = first + ":" + second
    +    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
    +    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
    +    If this is not desired, use C{matchPreviousExpr}.
    +    Do I{not} use with packrat parsing enabled.
    +    """
    +    rep = Forward()
    +    def copyTokenToRepeater(s,l,t):
    +        if t:
    +            if len(t) == 1:
    +                rep << t[0]
    +            else:
    +                # flatten t tokens
    +                tflat = _flatten(t.asList())
    +                rep << And(Literal(tt) for tt in tflat)
    +        else:
    +            rep << Empty()
    +    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
    +    rep.setName('(prev) ' + _ustr(expr))
    +    return rep
    +
    +def matchPreviousExpr(expr):
    +    """
    +    Helper to define an expression that is indirectly defined from
    +    the tokens matched in a previous expression, that is, it looks
    +    for a 'repeat' of a previous expression.  For example::
    +        first = Word(nums)
    +        second = matchPreviousExpr(first)
    +        matchExpr = first + ":" + second
    +    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
    +    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
    +    the expressions are evaluated first, and then compared, so
    +    C{"1"} is compared with C{"10"}.
    +    Do I{not} use with packrat parsing enabled.
    +    """
    +    rep = Forward()
    +    e2 = expr.copy()
    +    rep <<= e2
    +    def copyTokenToRepeater(s,l,t):
    +        matchTokens = _flatten(t.asList())
    +        def mustMatchTheseTokens(s,l,t):
    +            theseTokens = _flatten(t.asList())
    +            if  theseTokens != matchTokens:
    +                raise ParseException("",0,"")
    +        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
    +    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
    +    rep.setName('(prev) ' + _ustr(expr))
    +    return rep
    +
    +def _escapeRegexRangeChars(s):
    +    #~  escape these chars: ^-]
    +    for c in r"\^-]":
    +        s = s.replace(c,_bslash+c)
    +    s = s.replace("\n",r"\n")
    +    s = s.replace("\t",r"\t")
    +    return _ustr(s)
    +
    +def oneOf( strs, caseless=False, useRegex=True ):
    +    """
    +    Helper to quickly define a set of alternative Literals, and makes sure to do
    +    longest-first testing when there is a conflict, regardless of the input order,
    +    but returns a C{L{MatchFirst}} for best performance.
    +
    +    Parameters:
    +     - strs - a string of space-delimited literals, or a collection of string literals
    +     - caseless - (default=C{False}) - treat all literals as caseless
    +     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
    +          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
    +          if creating a C{Regex} raises an exception)
    +
    +    Example::
    +        comp_oper = oneOf("< = > <= >= !=")
    +        var = Word(alphas)
    +        number = Word(nums)
    +        term = var | number
    +        comparison_expr = term + comp_oper + term
    +        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
    +    prints::
    +        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
    +    """
    +    if caseless:
    +        isequal = ( lambda a,b: a.upper() == b.upper() )
    +        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
    +        parseElementClass = CaselessLiteral
    +    else:
    +        isequal = ( lambda a,b: a == b )
    +        masks = ( lambda a,b: b.startswith(a) )
    +        parseElementClass = Literal
    +
    +    symbols = []
    +    if isinstance(strs,basestring):
    +        symbols = strs.split()
    +    elif isinstance(strs, Iterable):
    +        symbols = list(strs)
    +    else:
    +        warnings.warn("Invalid argument to oneOf, expected string or iterable",
    +                SyntaxWarning, stacklevel=2)
    +    if not symbols:
    +        return NoMatch()
    +
    +    i = 0
    +    while i < len(symbols)-1:
    +        cur = symbols[i]
    +        for j,other in enumerate(symbols[i+1:]):
    +            if ( isequal(other, cur) ):
    +                del symbols[i+j+1]
    +                break
    +            elif ( masks(cur, other) ):
    +                del symbols[i+j+1]
    +                symbols.insert(i,other)
    +                cur = other
    +                break
    +        else:
    +            i += 1
    +
    +    if not caseless and useRegex:
    +        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
    +        try:
    +            if len(symbols)==len("".join(symbols)):
    +                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
    +            else:
    +                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
    +        except Exception:
    +            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
    +                    SyntaxWarning, stacklevel=2)
    +
    +
    +    # last resort, just use MatchFirst
    +    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
    +
    +def dictOf( key, value ):
    +    """
    +    Helper to easily and clearly define a dictionary by specifying the respective patterns
    +    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
    +    in the proper order.  The key pattern can include delimiting markers or punctuation,
    +    as long as they are suppressed, thereby leaving the significant key text.  The value
    +    pattern can include named results, so that the C{Dict} results can include named token
    +    fields.
    +
    +    Example::
    +        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
    +        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
    +        print(OneOrMore(attr_expr).parseString(text).dump())
    +        
    +        attr_label = label
    +        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
    +
    +        # similar to Dict, but simpler call format
    +        result = dictOf(attr_label, attr_value).parseString(text)
    +        print(result.dump())
    +        print(result['shape'])
    +        print(result.shape)  # object attribute access works too
    +        print(result.asDict())
    +    prints::
    +        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
    +        - color: light blue
    +        - posn: upper left
    +        - shape: SQUARE
    +        - texture: burlap
    +        SQUARE
    +        SQUARE
    +        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
    +    """
    +    return Dict( ZeroOrMore( Group ( key + value ) ) )
    +
    +def originalTextFor(expr, asString=True):
    +    """
    +    Helper to return the original, untokenized text for a given expression.  Useful to
    +    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
    +    revert separate tokens with intervening whitespace back to the original matching
    +    input text. By default, returns astring containing the original parsed text.  
    +       
    +    If the optional C{asString} argument is passed as C{False}, then the return value is a 
    +    C{L{ParseResults}} containing any results names that were originally matched, and a 
    +    single token containing the original matched text from the input string.  So if 
    +    the expression passed to C{L{originalTextFor}} contains expressions with defined
    +    results names, you must set C{asString} to C{False} if you want to preserve those
    +    results name values.
    +
    +    Example::
    +        src = "this is test  bold text  normal text "
    +        for tag in ("b","i"):
    +            opener,closer = makeHTMLTags(tag)
    +            patt = originalTextFor(opener + SkipTo(closer) + closer)
    +            print(patt.searchString(src)[0])
    +    prints::
    +        [' bold text ']
    +        ['text']
    +    """
    +    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
    +    endlocMarker = locMarker.copy()
    +    endlocMarker.callPreparse = False
    +    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
    +    if asString:
    +        extractText = lambda s,l,t: s[t._original_start:t._original_end]
    +    else:
    +        def extractText(s,l,t):
    +            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
    +    matchExpr.setParseAction(extractText)
    +    matchExpr.ignoreExprs = expr.ignoreExprs
    +    return matchExpr
    +
    +def ungroup(expr): 
    +    """
    +    Helper to undo pyparsing's default grouping of And expressions, even
    +    if all but one are non-empty.
    +    """
    +    return TokenConverter(expr).setParseAction(lambda t:t[0])
    +
    +def locatedExpr(expr):
    +    """
    +    Helper to decorate a returned token with its starting and ending locations in the input string.
    +    This helper adds the following results names:
    +     - locn_start = location where matched expression begins
    +     - locn_end = location where matched expression ends
    +     - value = the actual parsed results
    +
    +    Be careful if the input text contains C{} characters, you may want to call
    +    C{L{ParserElement.parseWithTabs}}
    +
    +    Example::
    +        wd = Word(alphas)
    +        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
    +            print(match)
    +    prints::
    +        [[0, 'ljsdf', 5]]
    +        [[8, 'lksdjjf', 15]]
    +        [[18, 'lkkjj', 23]]
    +    """
    +    locator = Empty().setParseAction(lambda s,l,t: l)
    +    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
    +
    +
    +# convenience constants for positional expressions
    +empty       = Empty().setName("empty")
    +lineStart   = LineStart().setName("lineStart")
    +lineEnd     = LineEnd().setName("lineEnd")
    +stringStart = StringStart().setName("stringStart")
    +stringEnd   = StringEnd().setName("stringEnd")
    +
    +_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
    +_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
    +_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
    +_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
    +_charRange = Group(_singleChar + Suppress("-") + _singleChar)
    +_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
    +
    +def srange(s):
    +    r"""
    +    Helper to easily define string ranges for use in Word construction.  Borrows
    +    syntax from regexp '[]' string range definitions::
    +        srange("[0-9]")   -> "0123456789"
    +        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
    +        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
    +    The input string must be enclosed in []'s, and the returned string is the expanded
    +    character set joined into a single string.
    +    The values enclosed in the []'s may be:
    +     - a single character
    +     - an escaped character with a leading backslash (such as C{\-} or C{\]})
    +     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
    +         (C{\0x##} is also supported for backwards compatibility) 
    +     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
    +     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
    +     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
    +    """
    +    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
    +    try:
    +        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
    +    except Exception:
    +        return ""
    +
    +def matchOnlyAtCol(n):
    +    """
    +    Helper method for defining parse actions that require matching at a specific
    +    column in the input text.
    +    """
    +    def verifyCol(strg,locn,toks):
    +        if col(locn,strg) != n:
    +            raise ParseException(strg,locn,"matched token not at column %d" % n)
    +    return verifyCol
    +
    +def replaceWith(replStr):
    +    """
    +    Helper method for common parse actions that simply return a literal value.  Especially
    +    useful when used with C{L{transformString}()}.
    +
    +    Example::
    +        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
    +        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
    +        term = na | num
    +        
    +        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
    +    """
    +    return lambda s,l,t: [replStr]
    +
    +def removeQuotes(s,l,t):
    +    """
    +    Helper parse action for removing quotation marks from parsed quoted strings.
    +
    +    Example::
    +        # by default, quotation marks are included in parsed results
    +        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
    +
    +        # use removeQuotes to strip quotation marks from parsed results
    +        quotedString.setParseAction(removeQuotes)
    +        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
    +    """
    +    return t[0][1:-1]
    +
    +def tokenMap(func, *args):
    +    """
    +    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
    +    args are passed, they are forwarded to the given function as additional arguments after
    +    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
    +    parsed data to an integer using base 16.
    +
    +    Example (compare the last to example in L{ParserElement.transformString}::
    +        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
    +        hex_ints.runTests('''
    +            00 11 22 aa FF 0a 0d 1a
    +            ''')
    +        
    +        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
    +        OneOrMore(upperword).runTests('''
    +            my kingdom for a horse
    +            ''')
    +
    +        wd = Word(alphas).setParseAction(tokenMap(str.title))
    +        OneOrMore(wd).setParseAction(' '.join).runTests('''
    +            now is the winter of our discontent made glorious summer by this sun of york
    +            ''')
    +    prints::
    +        00 11 22 aa FF 0a 0d 1a
    +        [0, 17, 34, 170, 255, 10, 13, 26]
    +
    +        my kingdom for a horse
    +        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
    +
    +        now is the winter of our discontent made glorious summer by this sun of york
    +        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
    +    """
    +    def pa(s,l,t):
    +        return [func(tokn, *args) for tokn in t]
    +
    +    try:
    +        func_name = getattr(func, '__name__', 
    +                            getattr(func, '__class__').__name__)
    +    except Exception:
    +        func_name = str(func)
    +    pa.__name__ = func_name
    +
    +    return pa
    +
    +upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
    +"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
    +
    +downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
    +"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
    +    
    +def _makeTags(tagStr, xml):
    +    """Internal helper to construct opening and closing tag expressions, given a tag name"""
    +    if isinstance(tagStr,basestring):
    +        resname = tagStr
    +        tagStr = Keyword(tagStr, caseless=not xml)
    +    else:
    +        resname = tagStr.name
    +
    +    tagAttrName = Word(alphas,alphanums+"_-:")
    +    if (xml):
    +        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
    +        openTag = Suppress("<") + tagStr("tag") + \
    +                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
    +                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
    +    else:
    +        printablesLessRAbrack = "".join(c for c in printables if c not in ">")
    +        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
    +        openTag = Suppress("<") + tagStr("tag") + \
    +                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
    +                Optional( Suppress("=") + tagAttrValue ) ))) + \
    +                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
    +    closeTag = Combine(_L("")
    +
    +    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
    +    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("" % resname)
    +    openTag.tag = resname
    +    closeTag.tag = resname
    +    return openTag, closeTag
    +
    +def makeHTMLTags(tagStr):
    +    """
    +    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
    +    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
    +
    +    Example::
    +        text = 'More info at the pyparsing wiki page'
    +        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
    +        a,a_end = makeHTMLTags("A")
    +        link_expr = a + SkipTo(a_end)("link_text") + a_end
    +        
    +        for link in link_expr.searchString(text):
    +            # attributes in the  tag (like "href" shown here) are also accessible as named results
    +            print(link.link_text, '->', link.href)
    +    prints::
    +        pyparsing -> http://pyparsing.wikispaces.com
    +    """
    +    return _makeTags( tagStr, False )
    +
    +def makeXMLTags(tagStr):
    +    """
    +    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
    +    tags only in the given upper/lower case.
    +
    +    Example: similar to L{makeHTMLTags}
    +    """
    +    return _makeTags( tagStr, True )
    +
    +def withAttribute(*args,**attrDict):
    +    """
    +    Helper to create a validating parse action to be used with start tags created
    +    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
    +    with a required attribute value, to avoid false matches on common tags such as
    +    C{} or C{
    }. + + Call C{withAttribute} with a series of attribute names and values. Specify the list + of filter attributes names and values as: + - keyword arguments, as in C{(align="right")}, or + - as an explicit dict with C{**} operator, when an attribute name is also a Python + reserved word, as in C{**{"class":"Customer", "align":"right"}} + - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) + For attribute names with a namespace prefix, you must use the second form. Attribute + names are matched insensitive to upper/lower case. + + If just testing for C{class} (with or without a namespace), use C{L{withClass}}. + + To verify that the attribute exists, but without specifying a value, pass + C{withAttribute.ANY_VALUE} as the value. + + Example:: + html = ''' +
    + Some text +
    1 4 0 1 0
    +
    1,3 2,3 1,1
    +
    this has no type
    +
    + + ''' + div,div_end = makeHTMLTags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().setParseAction(withAttribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attrDict.items() + attrs = [(k,v) for k,v in attrs] + def pa(s,l,tokens): + for attrName,attrValue in attrs: + if attrName not in tokens: + raise ParseException(s,l,"no matching attribute " + attrName) + if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % + (attrName, tokens[attrName], attrValue)) + return pa +withAttribute.ANY_VALUE = object() + +def withClass(classname, namespace=''): + """ + Simplified version of C{L{withAttribute}} when matching on a div class - made + difficult because C{class} is a reserved word in Python. + + Example:: + html = ''' +
    + Some text +
    1 4 0 1 0
    +
    1,3 2,3 1,1
    +
    this <div> has no class
    +
    + + ''' + div,div_end = makeHTMLTags("div") + div_grid = div().setParseAction(withClass("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.searchString(html): + print(grid_header.body) + + div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.searchString(html): + print(div_header.body) + prints:: + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "%s:class" % namespace if namespace else "class" + return withAttribute(**{classattr : classname}) + +opAssoc = _Constants() +opAssoc.LEFT = object() +opAssoc.RIGHT = object() + +def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): + """ + Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary or + binary, left- or right-associative. Parse actions can also be attached + to operator expressions. The generated parser will also recognize the use + of parentheses to override operator precedences (see example below). + + Note: if you define a deep operator list, you may see performance issues + when using infixNotation. See L{ParserElement.enablePackrat} for a + mechanism to potentially improve your parser performance. + + Parameters: + - baseExpr - expression representing the most basic element for the nested + - opList - list of tuples, one for each operator precedence level in the + expression grammar; each tuple is of the form + (opExpr, numTerms, rightLeftAssoc, parseAction), where: + - opExpr is the pyparsing expression for the operator; + may also be a string, which will be converted to a Literal; + if numTerms is 3, opExpr is a tuple of two expressions, for the + two operators separating the 3 terms + - numTerms is the number of terms for this operator (must + be 1, 2, or 3) + - rightLeftAssoc is the indicator whether the operator is + right or left associative, using the pyparsing-defined + constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. + - parseAction is the parse action to be associated with + expressions matching this operator expression (the + parse action tuple member may be omitted); if the parse action + is passed a tuple or list of functions, this is equivalent to + calling C{setParseAction(*fn)} (L{ParserElement.setParseAction}) + - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) + - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) + + Example:: + # simple example of four-function arithmetic with ints and variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infixNotation(integer | varname, + [ + ('-', 1, opAssoc.RIGHT), + (oneOf('* /'), 2, opAssoc.LEFT), + (oneOf('+ -'), 2, opAssoc.LEFT), + ]) + + arith_expr.runTests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', fullDump=False) + prints:: + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + ret = Forward() + lastExpr = baseExpr | ( lpar + ret + rpar ) + for i,operDef in enumerate(opList): + opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] + termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr + if arity == 3: + if opExpr is None or len(opExpr) != 2: + raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") + opExpr1, opExpr2 = opExpr + thisExpr = Forward().setName(termName) + if rightLeftAssoc == opAssoc.LEFT: + if arity == 1: + matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + else: + matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ + Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + elif rightLeftAssoc == opAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Optional): + opExpr = Optional(opExpr) + matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + elif arity == 2: + if opExpr is not None: + matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + else: + matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) + elif arity == 3: + matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ + Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) + else: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + else: + raise ValueError("operator must indicate right or left associativity") + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.setParseAction(*pa) + else: + matchExpr.setParseAction(pa) + thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) + lastExpr = thisExpr + ret <<= lastExpr + return ret + +operatorPrecedence = infixNotation +"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" + +dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") +sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") +quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") +unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") + +def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): + """ + Helper method for defining nested lists enclosed in opening and closing + delimiters ("(" and ")" are the default). + + Parameters: + - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression + - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression + - content - expression for items within the nested lists (default=C{None}) + - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) + + If an expression is not provided for the content argument, the nested + expression will capture all whitespace-delimited content between delimiters + as a list of separate values. + + Use the C{ignoreExpr} argument to define expressions that may contain + opening or closing characters that should not be treated as opening + or closing characters for nesting, such as quotedString or a comment + expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. + The default is L{quotedString}, but if no expressions are to be ignored, + then pass C{None} for this argument. + + Example:: + data_type = oneOf("void int short long char float double") + decl_data_type = Combine(data_type + Optional(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR,RPAR = map(Suppress, "()") + + code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Optional(delimitedList(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(cStyleComment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.searchString(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + prints:: + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener,basestring) and isinstance(closer,basestring): + if len(opener) == 1 and len(closer)==1: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS + ).setParseAction(lambda t:t[0].strip())) + else: + if ignoreExpr is not None: + content = (Combine(OneOrMore(~ignoreExpr + + ~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) + ).setParseAction(lambda t:t[0].strip())) + else: + raise ValueError("opening and closing arguments must be strings if no content expression is given") + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) + else: + ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) + ret.setName('nested %s%s expression' % (opener,closer)) + return ret + +def indentedBlock(blockStatementExpr, indentStack, indent=True): + """ + Helper method for defining space-delimited indentation blocks, such as + those used to define block statements in Python source code. + + Parameters: + - blockStatementExpr - expression defining syntax of statement that + is repeated within the indented block + - indentStack - list created by caller to manage indentation stack + (multiple statementWithIndentedBlock expressions within a single grammar + should share a common indentStack) + - indent - boolean indicating whether block must be indented beyond the + the current level; set to False for block of left-most statements + (default=C{True}) + + A valid block must contain at least one C{blockStatement}. + + Example:: + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group( funcDecl + func_body ) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << ( funcDef | assignment | identifier ) + + module_body = OneOrMore(stmt) + + parseTree = module_body.parseString(data) + parseTree.pprint() + prints:: + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + def checkPeerIndent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseFatalException(s,l,"illegal nesting") + raise ParseException(s,l,"not a peer entry") + + def checkSubIndent(s,l,t): + curCol = col(l,s) + if curCol > indentStack[-1]: + indentStack.append( curCol ) + else: + raise ParseException(s,l,"not a subentry") + + def checkUnindent(s,l,t): + if l >= len(s): return + curCol = col(l,s) + if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): + raise ParseException(s,l,"not an unindent") + indentStack.pop() + + NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) + INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') + PEER = Empty().setParseAction(checkPeerIndent).setName('') + UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') + if indent: + smExpr = Group( Optional(NL) + + #~ FollowedBy(blockStatementExpr) + + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) + else: + smExpr = Group( Optional(NL) + + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.setName('indented block') + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) +_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) +commonHTMLEntity = Regex('&(?P' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") +def replaceHTMLEntity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") +"Comment of the form C{/* ... */}" + +htmlComment = Regex(r"").setName("HTML comment") +"Comment of the form C{}" + +restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") +dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") +"Comment of the form C{// ... (to end of line)}" + +cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") +"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" + +javaStyleComment = cppStyleComment +"Same as C{L{cppStyleComment}}" + +pythonStyleComment = Regex(r"#.*").setName("Python style comment") +"Comment of the form C{# ... (to end of line)}" + +_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + + Optional( Word(" \t") + + ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") +commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") +"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. + This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """ + Here are some common low-level expressions that may be useful in jump-starting parser development: + - numeric forms (L{integers}, L{reals}, L{scientific notation}) + - common L{programming identifiers} + - network addresses (L{MAC}, L{IPv4}, L{IPv6}) + - ISO8601 L{dates} and L{datetime} + - L{UUID} + - L{comma-separated list} + Parse actions: + - C{L{convertToInteger}} + - C{L{convertToFloat}} + - C{L{convertToDate}} + - C{L{convertToDatetime}} + - C{L{stripHTMLTags}} + - C{L{upcaseTokens}} + - C{L{downcaseTokens}} + + Example:: + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + prints:: + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convertToInteger = tokenMap(int) + """ + Parse action for converting parsed integers to Python int + """ + + convertToFloat = tokenMap(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).setName("integer").setParseAction(convertToInteger) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.addParseAction(lambda t: t[0]/t[-1]) + + mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.addParseAction(sum) + + real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) + """expression that parses a floating point number and returns a float""" + + sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) + """expression that parses a floating point number with optional scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) + """any int or real number, returned as float""" + + identifier = Word(alphas+'_', alphanums+'_').setName("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") + "IPv4 address (C{0.0.0.0 - 255.255.255.255})" + + _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") + _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") + _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") + _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") + ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convertToDate(fmt="%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) + + Example:: + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + prints:: + [datetime.date(1999, 12, 31)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt).date() + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + @staticmethod + def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): + """ + Helper to create a parse action for converting parsed datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) + + Example:: + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + prints:: + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + def cvt_fn(s,l,t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + return cvt_fn + + iso8601_date = Regex(r'(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?').setName("ISO8601 date") + "ISO8601 date (C{yyyy-mm-dd})" + + iso8601_datetime = Regex(r'(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") + "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" + + uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") + "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" + + _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() + @staticmethod + def stripHTMLTags(s, l, tokens): + """ + Parse action to remove HTML tags from web page HTML source + + Example:: + # strip HTML links from normal text + text = 'More info at the
    pyparsing wiki page' + td,td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + + print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' + """ + return pyparsing_common._html_stripper.transformString(tokens[0]) + + _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') + + Optional( White(" \t") ) ) ).streamline().setName("commaItem") + comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) + """Parse action to convert tokens to upper case.""" + + downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) + """Parse action to convert tokens to lower case.""" + + +if __name__ == "__main__": + + selectToken = CaselessLiteral("select") + fromToken = CaselessLiteral("from") + + ident = Word(alphas, alphanums + "_$") + + columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + columnNameList = Group(delimitedList(columnName)).setName("columns") + columnSpec = ('*' | columnNameList) + + tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) + tableNameList = Group(delimitedList(tableName)).setName("tables") + + simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") + + # demo runTests method, including embedded comments in test string + simpleSQL.runTests(""" + # '*' as column list and dotted table name + select * from SYS.XYZZY + + # caseless match on "SELECT", and casts back to "select" + SELECT * from XYZZY, ABC + + # list of column names, and mixed case SELECT keyword + Select AA,BB,CC from Sys.dual + + # multiple tables + Select A, B, C from Sys.dual, Table2 + + # invalid SELECT keyword - should fail + Xelect A, B, C from Sys.dual + + # incomplete command - should fail + Select + + # invalid column name - should fail + Select ^^^ frox Sys.dual + + """) + + pyparsing_common.number.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + # any int or real number, returned as float + pyparsing_common.fnumber.runTests(""" + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + """) + + pyparsing_common.hex_integer.runTests(""" + 100 + FF + """) + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(""" + 12345678-1234-5678-1234-567812345678 + """) diff --git a/Shared/lib/python3.4/site-packages/setuptools/_vendor/six.py b/Shared/lib/python3.4/site-packages/setuptools/_vendor/six.py new file mode 100644 index 0000000..190c023 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/_vendor/six.py @@ -0,0 +1,868 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2015 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.10.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + if from_value is None: + raise value + raise value from from_value +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + raise value from from_value +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(meta): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/Shared/lib/python3.4/site-packages/setuptools/archive_util.py b/Shared/lib/python3.4/site-packages/setuptools/archive_util.py index b3c9fa5..8143604 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/archive_util.py +++ b/Shared/lib/python3.4/site-packages/setuptools/archive_util.py @@ -1,24 +1,26 @@ """Utilities for extracting common archive formats""" - -__all__ = [ - "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", - "UnrecognizedFormat", "extraction_drivers", "unpack_directory", -] - import zipfile import tarfile import os import shutil import posixpath import contextlib -from pkg_resources import ensure_directory, ContextualZipFile from distutils.errors import DistutilsError +from pkg_resources import ensure_directory + +__all__ = [ + "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", + "UnrecognizedFormat", "extraction_drivers", "unpack_directory", +] + + class UnrecognizedFormat(DistutilsError): """Couldn't recognize the archive type""" -def default_filter(src,dst): + +def default_filter(src, dst): """The default progress/filter callback; returns True for all files""" return dst @@ -96,7 +98,7 @@ def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): if not zipfile.is_zipfile(filename): raise UnrecognizedFormat("%s is not a zip file" % (filename,)) - with ContextualZipFile(filename) as z: + with zipfile.ZipFile(filename) as z: for info in z.infolist(): name = info.filename @@ -167,4 +169,5 @@ def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): pass return True + extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile diff --git a/Shared/lib/python3.4/site-packages/setuptools/build_meta.py b/Shared/lib/python3.4/site-packages/setuptools/build_meta.py new file mode 100644 index 0000000..0067a7a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/build_meta.py @@ -0,0 +1,182 @@ +"""A PEP 517 interface to setuptools + +Previously, when a user or a command line tool (let's call it a "frontend") +needed to make a request of setuptools to take a certain action, for +example, generating a list of installation requirements, the frontend would +would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. + +PEP 517 defines a different method of interfacing with setuptools. Rather +than calling "setup.py" directly, the frontend should: + + 1. Set the current directory to the directory with a setup.py file + 2. Import this module into a safe python interpreter (one in which + setuptools can potentially set global variables or crash hard). + 3. Call one of the functions defined in PEP 517. + +What each function does is defined in PEP 517. However, here is a "casual" +definition of the functions (this definition should not be relied on for +bug reports or API stability): + + - `build_wheel`: build a wheel in the folder and return the basename + - `get_requires_for_build_wheel`: get the `setup_requires` to build + - `prepare_metadata_for_build_wheel`: get the `install_requires` + - `build_sdist`: build an sdist in the folder and return the basename + - `get_requires_for_build_sdist`: get the `setup_requires` to build + +Again, this is not a formal definition! Just a "taste" of the module. +""" + +import os +import sys +import tokenize +import shutil +import contextlib + +import setuptools +import distutils + + +class SetupRequirementsError(BaseException): + def __init__(self, specifiers): + self.specifiers = specifiers + + +class Distribution(setuptools.dist.Distribution): + def fetch_build_eggs(self, specifiers): + raise SetupRequirementsError(specifiers) + + @classmethod + @contextlib.contextmanager + def patch(cls): + """ + Replace + distutils.dist.Distribution with this class + for the duration of this context. + """ + orig = distutils.core.Distribution + distutils.core.Distribution = cls + try: + yield + finally: + distutils.core.Distribution = orig + + +def _to_str(s): + """ + Convert a filename to a string (on Python 2, explicitly + a byte string, not Unicode) as distutils checks for the + exact type str. + """ + if sys.version_info[0] == 2 and not isinstance(s, str): + # Assume it's Unicode, as that's what the PEP says + # should be provided. + return s.encode(sys.getfilesystemencoding()) + return s + + +def _run_setup(setup_script='setup.py'): + # Note that we can reuse our build directory between calls + # Correctness comes first, then optimization later + __file__ = setup_script + __name__ = '__main__' + f = getattr(tokenize, 'open', open)(__file__) + code = f.read().replace('\\r\\n', '\\n') + f.close() + exec(compile(code, __file__, 'exec'), locals()) + + +def _fix_config(config_settings): + config_settings = config_settings or {} + config_settings.setdefault('--global-option', []) + return config_settings + + +def _get_build_requires(config_settings, requirements): + config_settings = _fix_config(config_settings) + + sys.argv = sys.argv[:1] + ['egg_info'] + \ + config_settings["--global-option"] + try: + with Distribution.patch(): + _run_setup() + except SetupRequirementsError as e: + requirements += e.specifiers + + return requirements + + +def _get_immediate_subdirectories(a_dir): + return [name for name in os.listdir(a_dir) + if os.path.isdir(os.path.join(a_dir, name))] + + +def get_requires_for_build_wheel(config_settings=None): + config_settings = _fix_config(config_settings) + return _get_build_requires(config_settings, requirements=['setuptools', 'wheel']) + + +def get_requires_for_build_sdist(config_settings=None): + config_settings = _fix_config(config_settings) + return _get_build_requires(config_settings, requirements=['setuptools']) + + +def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None): + sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)] + _run_setup() + + dist_info_directory = metadata_directory + while True: + dist_infos = [f for f in os.listdir(dist_info_directory) + if f.endswith('.dist-info')] + + if len(dist_infos) == 0 and \ + len(_get_immediate_subdirectories(dist_info_directory)) == 1: + dist_info_directory = os.path.join( + dist_info_directory, os.listdir(dist_info_directory)[0]) + continue + + assert len(dist_infos) == 1 + break + + # PEP 517 requires that the .dist-info directory be placed in the + # metadata_directory. To comply, we MUST copy the directory to the root + if dist_info_directory != metadata_directory: + shutil.move( + os.path.join(dist_info_directory, dist_infos[0]), + metadata_directory) + shutil.rmtree(dist_info_directory, ignore_errors=True) + + return dist_infos[0] + + +def build_wheel(wheel_directory, config_settings=None, + metadata_directory=None): + config_settings = _fix_config(config_settings) + wheel_directory = os.path.abspath(wheel_directory) + sys.argv = sys.argv[:1] + ['bdist_wheel'] + \ + config_settings["--global-option"] + _run_setup() + if wheel_directory != 'dist': + shutil.rmtree(wheel_directory) + shutil.copytree('dist', wheel_directory) + + wheels = [f for f in os.listdir(wheel_directory) + if f.endswith('.whl')] + + assert len(wheels) == 1 + return wheels[0] + + +def build_sdist(sdist_directory, config_settings=None): + config_settings = _fix_config(config_settings) + sdist_directory = os.path.abspath(sdist_directory) + sys.argv = sys.argv[:1] + ['sdist'] + \ + config_settings["--global-option"] + \ + ["--dist-dir", sdist_directory] + _run_setup() + + sdists = [f for f in os.listdir(sdist_directory) + if f.endswith('.tar.gz')] + + assert len(sdists) == 1 + return sdists[0] diff --git a/Shared/lib/python3.4/site-packages/setuptools/cli-arm-32.exe b/Shared/lib/python3.4/site-packages/setuptools/cli-arm-32.exe deleted file mode 100644 index 2f40402..0000000 Binary files a/Shared/lib/python3.4/site-packages/setuptools/cli-arm-32.exe and /dev/null differ diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/__init__.py b/Shared/lib/python3.4/site-packages/setuptools/command/__init__.py index 3fb2f6d..fe619e2 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/__init__.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/__init__.py @@ -2,7 +2,8 @@ __all__ = [ 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop', 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts', 'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts', - 'register', 'bdist_wininst', 'upload_docs', 'upload', + 'register', 'bdist_wininst', 'upload_docs', 'upload', 'build_clib', + 'dist_info', ] from distutils.command.bdist import bdist @@ -10,7 +11,6 @@ import sys from setuptools.command import install_scripts - if 'egg' not in bdist.format_commands: bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") bdist.format_commands.append('egg') diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/bdist_egg.py b/Shared/lib/python3.4/site-packages/setuptools/command/bdist_egg.py index 9cebd7f..9f8df91 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/bdist_egg.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/bdist_egg.py @@ -8,8 +8,9 @@ from distutils import log from types import CodeType import sys import os -import marshal +import re import textwrap +import marshal from setuptools.extern import six @@ -39,6 +40,16 @@ def strip_module(filename): return filename +def sorted_walk(dir): + """Do os.walk in a reproducible way, + independent of indeterministic filesystem readdir order + """ + for base, dirs, files in os.walk(dir): + dirs.sort() + files.sort() + yield base, dirs, files + + def write_stub(resource, pyfile): _stub_template = textwrap.dedent(""" def __bootstrap__(): @@ -129,7 +140,7 @@ class bdist_egg(Command): self.distribution.data_files.append(item) try: - log.info("installing package data to %s" % self.bdist_dir) + log.info("installing package data to %s", self.bdist_dir) self.call_command('install_data', force=0, root=None) finally: self.distribution.data_files = old @@ -152,7 +163,7 @@ class bdist_egg(Command): self.run_command("egg_info") # We run install_lib before install_data, because some data hacks # pull their data path from the install_lib command. - log.info("installing library code to %s" % self.bdist_dir) + log.info("installing library code to %s", self.bdist_dir) instcmd = self.get_finalized_command('install') old_root = instcmd.root instcmd.root = None @@ -169,7 +180,7 @@ class bdist_egg(Command): pyfile = os.path.join(self.bdist_dir, strip_module(filename) + '.py') self.stubs.append(pyfile) - log.info("creating stub loader for %s" % ext_name) + log.info("creating stub loader for %s", ext_name) if not self.dry_run: write_stub(os.path.basename(ext_name), pyfile) to_compile.append(pyfile) @@ -186,14 +197,14 @@ class bdist_egg(Command): self.mkpath(egg_info) if self.distribution.scripts: script_dir = os.path.join(egg_info, 'scripts') - log.info("installing scripts to %s" % script_dir) + log.info("installing scripts to %s", script_dir) self.call_command('install_scripts', install_dir=script_dir, no_ep=1) self.copy_metadata_to(egg_info) native_libs = os.path.join(egg_info, "native_libs.txt") if all_outputs: - log.info("writing %s" % native_libs) + log.info("writing %s", native_libs) if not self.dry_run: ensure_directory(native_libs) libs_file = open(native_libs, 'wt') @@ -201,7 +212,7 @@ class bdist_egg(Command): libs_file.write('\n') libs_file.close() elif os.path.isfile(native_libs): - log.info("removing %s" % native_libs) + log.info("removing %s", native_libs) if not self.dry_run: os.unlink(native_libs) @@ -232,11 +243,28 @@ class bdist_egg(Command): log.info("Removing .py files from temporary directory") for base, dirs, files in walk_egg(self.bdist_dir): for name in files: + path = os.path.join(base, name) + if name.endswith('.py'): - path = os.path.join(base, name) log.debug("Deleting %s", path) os.unlink(path) + if base.endswith('__pycache__'): + path_old = path + + pattern = r'(?P.+)\.(?P[^.]+)\.pyc' + m = re.match(pattern, name) + path_new = os.path.join( + base, os.pardir, m.group('name') + '.pyc') + log.info( + "Renaming file from [%s] to [%s]" + % (path_old, path_new)) + try: + os.remove(path_new) + except OSError: + pass + os.rename(path_old, path_new) + def zip_safe(self): safe = getattr(self.distribution, 'zip_safe', None) if safe is not None: @@ -302,7 +330,7 @@ class bdist_egg(Command): ext_outputs = [] paths = {self.bdist_dir: ''} - for base, dirs, files in os.walk(self.bdist_dir): + for base, dirs, files in sorted_walk(self.bdist_dir): for filename in files: if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: all_outputs.append(paths[base] + filename) @@ -329,7 +357,7 @@ NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) def walk_egg(egg_dir): """Walk an unpacked egg's contents, skipping the metadata directory""" - walker = os.walk(egg_dir) + walker = sorted_walk(egg_dir) base, dirs, files = next(walker) if 'EGG-INFO' in dirs: dirs.remove('EGG-INFO') @@ -383,10 +411,12 @@ def scan_module(egg_dir, base, name, stubs): return True # Extension module pkg = base[len(egg_dir) + 1:].replace(os.sep, '.') module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0] - if sys.version_info < (3, 3): + if six.PY2: skip = 8 # skip magic & date - else: + elif sys.version_info < (3, 7): skip = 12 # skip magic & date & file size + else: + skip = 16 # skip magic & reserved? & date & file size f = open(filename, 'rb') f.read(skip) code = marshal.load(f) @@ -429,6 +459,7 @@ def can_scan(): log.warn("Please ask the author to include a 'zip_safe'" " setting (either True or False) in the package's setup.py") + # Attribute names of options for commands that might need to be convinced to # install to the egg build directory @@ -457,15 +488,15 @@ def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True, p = path[len(base_dir) + 1:] if not dry_run: z.write(path, p) - log.debug("adding '%s'" % p) + log.debug("adding '%s'", p) compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED if not dry_run: z = zipfile.ZipFile(zip_filename, mode, compression=compression) - for dirname, dirs, files in os.walk(base_dir): + for dirname, dirs, files in sorted_walk(base_dir): visit(z, dirname, files) z.close() else: - for dirname, dirs, files in os.walk(base_dir): + for dirname, dirs, files in sorted_walk(base_dir): visit(None, dirname, files) return zip_filename diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/build_clib.py b/Shared/lib/python3.4/site-packages/setuptools/command/build_clib.py new file mode 100644 index 0000000..09caff6 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/command/build_clib.py @@ -0,0 +1,98 @@ +import distutils.command.build_clib as orig +from distutils.errors import DistutilsSetupError +from distutils import log +from setuptools.dep_util import newer_pairwise_group + + +class build_clib(orig.build_clib): + """ + Override the default build_clib behaviour to do the following: + + 1. Implement a rudimentary timestamp-based dependency system + so 'compile()' doesn't run every time. + 2. Add more keys to the 'build_info' dictionary: + * obj_deps - specify dependencies for each object compiled. + this should be a dictionary mapping a key + with the source filename to a list of + dependencies. Use an empty string for global + dependencies. + * cflags - specify a list of additional flags to pass to + the compiler. + """ + + def build_libraries(self, libraries): + for (lib_name, build_info) in libraries: + sources = build_info.get('sources') + if sources is None or not isinstance(sources, (list, tuple)): + raise DistutilsSetupError( + "in 'libraries' option (library '%s'), " + "'sources' must be present and must be " + "a list of source filenames" % lib_name) + sources = list(sources) + + log.info("building '%s' library", lib_name) + + # Make sure everything is the correct type. + # obj_deps should be a dictionary of keys as sources + # and a list/tuple of files that are its dependencies. + obj_deps = build_info.get('obj_deps', dict()) + if not isinstance(obj_deps, dict): + raise DistutilsSetupError( + "in 'libraries' option (library '%s'), " + "'obj_deps' must be a dictionary of " + "type 'source: list'" % lib_name) + dependencies = [] + + # Get the global dependencies that are specified by the '' key. + # These will go into every source's dependency list. + global_deps = obj_deps.get('', list()) + if not isinstance(global_deps, (list, tuple)): + raise DistutilsSetupError( + "in 'libraries' option (library '%s'), " + "'obj_deps' must be a dictionary of " + "type 'source: list'" % lib_name) + + # Build the list to be used by newer_pairwise_group + # each source will be auto-added to its dependencies. + for source in sources: + src_deps = [source] + src_deps.extend(global_deps) + extra_deps = obj_deps.get(source, list()) + if not isinstance(extra_deps, (list, tuple)): + raise DistutilsSetupError( + "in 'libraries' option (library '%s'), " + "'obj_deps' must be a dictionary of " + "type 'source: list'" % lib_name) + src_deps.extend(extra_deps) + dependencies.append(src_deps) + + expected_objects = self.compiler.object_filenames( + sources, + output_dir=self.build_temp + ) + + if newer_pairwise_group(dependencies, expected_objects) != ([], []): + # First, compile the source code to object files in the library + # directory. (This should probably change to putting object + # files in a temporary build directory.) + macros = build_info.get('macros') + include_dirs = build_info.get('include_dirs') + cflags = build_info.get('cflags') + objects = self.compiler.compile( + sources, + output_dir=self.build_temp, + macros=macros, + include_dirs=include_dirs, + extra_postargs=cflags, + debug=self.debug + ) + + # Now "link" the object files together into a static library. + # (On Unix at least, this isn't really linking -- it just + # builds an archive. Whatever.) + self.compiler.create_static_lib( + expected_objects, + lib_name, + output_dir=self.build_clib, + debug=self.debug + ) diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/build_ext.py b/Shared/lib/python3.4/site-packages/setuptools/command/build_ext.py index 92e4a18..60a8a32 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/build_ext.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/build_ext.py @@ -1,30 +1,50 @@ -from distutils.command.build_ext import build_ext as _du_build_ext -from distutils.file_util import copy_file -from distutils.ccompiler import new_compiler -from distutils.sysconfig import customize_compiler -from distutils.errors import DistutilsError -from distutils import log import os import sys import itertools +import imp +from distutils.command.build_ext import build_ext as _du_build_ext +from distutils.file_util import copy_file +from distutils.ccompiler import new_compiler +from distutils.sysconfig import customize_compiler, get_config_var +from distutils.errors import DistutilsError +from distutils import log from setuptools.extension import Library +from setuptools.extern import six try: # Attempt to use Cython for building extensions, if available from Cython.Distutils.build_ext import build_ext as _build_ext + # Additionally, assert that the compiler module will load + # also. Ref #1229. + __import__('Cython.Compiler.Main') except ImportError: _build_ext = _du_build_ext -try: - # Python 2.7 or >=3.2 - from sysconfig import _CONFIG_VARS -except ImportError: - from distutils.sysconfig import get_config_var +# make sure _config_vars is initialized +get_config_var("LDSHARED") +from distutils.sysconfig import _config_vars as _CONFIG_VARS + + +def _customize_compiler_for_shlib(compiler): + if sys.platform == "darwin": + # building .dylib requires additional compiler flags on OSX; here we + # temporarily substitute the pyconfig.h variables so that distutils' + # 'customize_compiler' uses them before we build the shared libraries. + tmp = _CONFIG_VARS.copy() + try: + # XXX Help! I don't have any idea whether these are right... + _CONFIG_VARS['LDSHARED'] = ( + "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup") + _CONFIG_VARS['CCSHARED'] = " -dynamiclib" + _CONFIG_VARS['SO'] = ".dylib" + customize_compiler(compiler) + finally: + _CONFIG_VARS.clear() + _CONFIG_VARS.update(tmp) + else: + customize_compiler(compiler) - get_config_var("LDSHARED") # make sure _config_vars is initialized - del get_config_var - from distutils.sysconfig import _config_vars as _CONFIG_VARS have_rtld = False use_stubs = False @@ -39,9 +59,18 @@ elif os.name != 'nt': except ImportError: pass - if_dl = lambda s: s if have_rtld else '' + +def get_abi3_suffix(): + """Return the file extension for an abi3-compliant Extension()""" + for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION): + if '.abi3' in suffix: # Unix + return suffix + elif suffix == '.pyd': # Windows + return suffix + + class build_ext(_build_ext): def run(self): """Build extensions in build directory, then copy if --inplace""" @@ -77,6 +106,15 @@ class build_ext(_build_ext): filename = _build_ext.get_ext_filename(self, fullname) if fullname in self.ext_map: ext = self.ext_map[fullname] + use_abi3 = ( + six.PY3 + and getattr(ext, 'py_limited_api') + and get_abi3_suffix() + ) + if use_abi3: + so_ext = get_config_var('EXT_SUFFIX') + filename = filename[:-len(so_ext)] + filename = filename + get_abi3_suffix() if isinstance(ext, Library): fn, ext = os.path.splitext(filename) return self.shlib_compiler.library_filename(fn, libtype) @@ -124,20 +162,7 @@ class build_ext(_build_ext): compiler = self.shlib_compiler = new_compiler( compiler=self.compiler, dry_run=self.dry_run, force=self.force ) - if sys.platform == "darwin": - tmp = _CONFIG_VARS.copy() - try: - # XXX Help! I don't have any idea whether these are right... - _CONFIG_VARS['LDSHARED'] = ( - "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup") - _CONFIG_VARS['CCSHARED'] = " -dynamiclib" - _CONFIG_VARS['SO'] = ".dylib" - customize_compiler(compiler) - finally: - _CONFIG_VARS.clear() - _CONFIG_VARS.update(tmp) - else: - customize_compiler(compiler) + _customize_compiler_for_shlib(compiler) if self.include_dirs is not None: compiler.set_include_dirs(self.include_dirs) diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/build_py.py b/Shared/lib/python3.4/site-packages/setuptools/command/build_py.py index 8623c77..b0314fd 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/build_py.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/build_py.py @@ -6,14 +6,15 @@ import fnmatch import textwrap import io import distutils.errors -import collections import itertools -from setuptools.extern.six.moves import map +from setuptools.extern import six +from setuptools.extern.six.moves import map, filter, filterfalse try: from setuptools.lib2to3_ex import Mixin2to3 except ImportError: + class Mixin2to3: def run_2to3(self, files, doctests=True): "do nothing" @@ -67,6 +68,9 @@ class build_py(orig.build_py, Mixin2to3): return orig.build_py.__getattr__(self, attr) def build_module(self, module, module_file, package): + if six.PY2 and isinstance(package, six.string_types): + # avoid errors on Python 2 when unicode is passed (#190) + package = package.split('.') outfile, copied = orig.build_py.build_module(self, module, module_file, package) if copied: @@ -94,12 +98,19 @@ class build_py(orig.build_py, Mixin2to3): def find_data_files(self, package, src_dir): """Return filenames for package's data files in 'src_dir'""" - globs = (self.package_data.get('', []) - + self.package_data.get(package, [])) - files = self.manifest_files.get(package, [])[:] - for pattern in globs: - # Each pattern has to be converted to a platform-specific path - files.extend(glob(os.path.join(src_dir, convert_path(pattern)))) + patterns = self._get_platform_patterns( + self.package_data, + package, + src_dir, + ) + globs_expanded = map(glob, patterns) + # flatten the expanded globs into an iterable of matches + globs_matches = itertools.chain.from_iterable(globs_expanded) + glob_files = filter(os.path.isfile, globs_matches) + files = itertools.chain( + self.manifest_files.get(package, []), + glob_files, + ) return self.exclude_data_files(package, src_dir, files) def build_package_data(self): @@ -184,26 +195,63 @@ class build_py(orig.build_py, Mixin2to3): def exclude_data_files(self, package, src_dir, files): """Filter filenames for package's data files in 'src_dir'""" - globs = ( - self.exclude_package_data.get('', []) - + self.exclude_package_data.get(package, []) + files = list(files) + patterns = self._get_platform_patterns( + self.exclude_package_data, + package, + src_dir, ) - bad = set( - item - for pattern in globs - for item in fnmatch.filter( - files, - os.path.join(src_dir, convert_path(pattern)), - ) + match_groups = ( + fnmatch.filter(files, pattern) + for pattern in patterns ) - seen = collections.defaultdict(itertools.count) - return [ + # flatten the groups of matches into an iterable of matches + matches = itertools.chain.from_iterable(match_groups) + bad = set(matches) + keepers = ( fn for fn in files if fn not in bad - # ditch dupes - and not next(seen[fn]) - ] + ) + # ditch dupes + return list(_unique_everseen(keepers)) + + @staticmethod + def _get_platform_patterns(spec, package, src_dir): + """ + yield platform-specific path patterns (suitable for glob + or fn_match) from a glob-based spec (such as + self.package_data or self.exclude_package_data) + matching package in src_dir. + """ + raw_patterns = itertools.chain( + spec.get('', []), + spec.get(package, []), + ) + return ( + # Each pattern has to be converted to a platform-specific path + os.path.join(src_dir, convert_path(pattern)) + for pattern in raw_patterns + ) + + +# from Python docs +def _unique_everseen(iterable, key=None): + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + # unique_everseen('ABBCcAD', str.lower) --> A B C D + seen = set() + seen_add = seen.add + if key is None: + for element in filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element def assert_relative(path): diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/develop.py b/Shared/lib/python3.4/site-packages/setuptools/command/develop.py index 11b5df1..fdc9fc4 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/develop.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/develop.py @@ -9,10 +9,13 @@ from setuptools.extern import six from pkg_resources import Distribution, PathMetadata, normalize_path from setuptools.command.easy_install import easy_install +from setuptools import namespaces import setuptools +__metaclass__ = type -class develop(easy_install): + +class develop(namespaces.DevelopInstaller, easy_install): """Set up package for development""" description = "install package in 'development mode'" @@ -30,6 +33,7 @@ class develop(easy_install): if self.uninstall: self.multi_version = True self.uninstall_link() + self.uninstall_namespaces() else: self.install_for_development() self.warn_deprecated_options() @@ -77,15 +81,30 @@ class develop(easy_install): project_name=ei.egg_name ) - p = self.egg_base.replace(os.sep, '/') - if p != os.curdir: - p = '../' * (p.count('/') + 1) - self.setup_path = p - p = normalize_path(os.path.join(self.install_dir, self.egg_path, p)) - if p != normalize_path(os.curdir): + self.setup_path = self._resolve_setup_path( + self.egg_base, + self.install_dir, + self.egg_path, + ) + + @staticmethod + def _resolve_setup_path(egg_base, install_dir, egg_path): + """ + Generate a path from egg_base back to '.' where the + setup script resides and ensure that path points to the + setup path from $install_dir/$egg_path. + """ + path_to_setup = egg_base.replace(os.sep, '/').rstrip('/') + if path_to_setup != os.curdir: + path_to_setup = '../' * (path_to_setup.count('/') + 1) + resolved = normalize_path( + os.path.join(install_dir, egg_path, path_to_setup) + ) + if resolved != normalize_path(os.curdir): raise DistutilsOptionError( "Can't get a consistent path to setup script from" - " installation directory", p, normalize_path(os.curdir)) + " installation directory", resolved, normalize_path(os.curdir)) + return path_to_setup def install_for_development(self): if six.PY3 and getattr(self.distribution, 'use_2to3', False): @@ -123,6 +142,8 @@ class develop(easy_install): self.easy_install(setuptools.bootstrap_install_from) setuptools.bootstrap_install_from = None + self.install_namespaces() + # create an .egg-link in the installation dir, pointing to our egg log.info("Creating %s (link to %s)", self.egg_link, self.egg_base) if not self.dry_run: @@ -173,7 +194,7 @@ class develop(easy_install): return easy_install.install_wrapper_scripts(self, dist) -class VersionlessRequirement(object): +class VersionlessRequirement: """ Adapt a pkg_resources.Distribution to simply return the project name as the 'requirement' so that scripts will work across @@ -186,6 +207,7 @@ class VersionlessRequirement(object): >>> str(adapted_dist.as_requirement()) 'foo' """ + def __init__(self, dist): self.__dist = dist diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/dist_info.py b/Shared/lib/python3.4/site-packages/setuptools/command/dist_info.py new file mode 100644 index 0000000..c45258f --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/command/dist_info.py @@ -0,0 +1,36 @@ +""" +Create a dist_info directory +As defined in the wheel specification +""" + +import os + +from distutils.core import Command +from distutils import log + + +class dist_info(Command): + + description = 'create a .dist-info directory' + + user_options = [ + ('egg-base=', 'e', "directory containing .egg-info directories" + " (default: top of the source tree)"), + ] + + def initialize_options(self): + self.egg_base = None + + def finalize_options(self): + pass + + def run(self): + egg_info = self.get_finalized_command('egg_info') + egg_info.egg_base = self.egg_base + egg_info.finalize_options() + egg_info.run() + dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info' + log.info("creating '{}'".format(os.path.abspath(dist_info_dir))) + + bdist_wheel = self.get_finalized_command('bdist_wheel') + bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir) diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/easy_install.py b/Shared/lib/python3.4/site-packages/setuptools/command/easy_install.py index 9fc287e..7115f0b 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/easy_install.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/easy_install.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - """ Easy Install ------------ @@ -8,15 +7,17 @@ A tool for doing automatic download/extract/build of distutils-based Python packages. For detailed documentation, see the accompanying EasyInstall.txt file, or visit the `EasyInstall home page`__. -__ https://pythonhosted.org/setuptools/easy_install.html +__ https://setuptools.readthedocs.io/en/latest/easy_install.html """ from glob import glob from distutils.util import get_platform from distutils.util import convert_path, subst_vars -from distutils.errors import DistutilsArgError, DistutilsOptionError, \ - DistutilsError, DistutilsPlatformError +from distutils.errors import ( + DistutilsArgError, DistutilsOptionError, + DistutilsError, DistutilsPlatformError, +) from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS from distutils import log, dir_util from distutils.command.build_scripts import first_line_re @@ -30,7 +31,6 @@ import zipfile import re import stat import random -import platform import textwrap import warnings import site @@ -40,29 +40,34 @@ import subprocess import shlex import io +from sysconfig import get_config_vars, get_path + from setuptools.extern import six from setuptools.extern.six.moves import configparser, map from setuptools import Command from setuptools.sandbox import run_setup -from setuptools.py31compat import get_path, get_config_vars +from setuptools.py27compat import rmtree_safe from setuptools.command import setopt from setuptools.archive_util import unpack_archive -from setuptools.package_index import PackageIndex -from setuptools.package_index import URL_SCHEME +from setuptools.package_index import ( + PackageIndex, parse_requirement_arg, URL_SCHEME, +) from setuptools.command import bdist_egg, egg_info +from setuptools.wheel import Wheel from pkg_resources import ( yield_lines, normalize_path, resource_string, ensure_directory, get_distribution, find_distributions, Environment, Requirement, Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, VersionConflict, DEVELOP_DIST, ) -import pkg_resources +import pkg_resources.py31compat + +__metaclass__ = type # Turn on PEP440Warnings warnings.filterwarnings("default", category=pkg_resources.PEP440Warning) - __all__ = [ 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg', 'main', 'get_exe_prefixes', @@ -74,6 +79,12 @@ def is_64bit(): def samefile(p1, p2): + """ + Determine if two paths reference the same file. + + Augments os.path.samefile to work on Windows and + suppresses errors if the path doesn't exist. + """ both_exist = os.path.exists(p1) and os.path.exists(p2) use_samefile = hasattr(os.path, 'samefile') and both_exist if use_samefile: @@ -84,7 +95,8 @@ def samefile(p1, p2): if six.PY2: - def _to_ascii(s): + + def _to_bytes(s): return s def isascii(s): @@ -94,8 +106,9 @@ if six.PY2: except UnicodeError: return False else: - def _to_ascii(s): - return s.encode('ascii') + + def _to_bytes(s): + return s.encode('utf8') def isascii(s): try: @@ -105,6 +118,9 @@ else: return False +_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ') + + class easy_install(Command): """Manage a download/build/install process""" description = "Find/get/install Python packages" @@ -269,11 +285,15 @@ class easy_install(Command): if not self.install_layout.lower() in ['deb']: raise DistutilsOptionError("unknown value for --install-layout") self.install_layout = self.install_layout.lower() + import sysconfig if sys.version_info[:2] >= (3, 3): self.multiarch = sysconfig.get_config_var('MULTIARCH') - self._expand('install_dir', 'script_dir', 'build_directory', - 'site_dirs') + + self._expand( + 'install_dir', 'script_dir', 'build_directory', + 'site_dirs', + ) # If a non-default installation directory was specified, default the # script directory to match it. if self.script_dir is None: @@ -327,7 +347,7 @@ consider to install to another location, or use the option self.all_site_dirs.append(normalize_path(d)) if not self.editable: self.check_site_dir() - self.index_url = self.index_url or "https://pypi.python.org/simple" + self.index_url = self.index_url or "https://pypi.org/simple/" self.shadow_path = self.all_site_dirs[:] for path_item in self.install_dir, normalize_path(self.script_dir): if path_item not in self.shadow_path: @@ -402,9 +422,15 @@ consider to install to another location, or use the option def expand_dirs(self): """Calls `os.path.expanduser` on install dirs.""" - self._expand_attrs(['install_purelib', 'install_platlib', - 'install_lib', 'install_headers', - 'install_scripts', 'install_data', ]) + dirs = [ + 'install_purelib', + 'install_platlib', + 'install_lib', + 'install_headers', + 'install_scripts', + 'install_data', + ] + self._expand_attrs(dirs) def run(self): if self.verbose != self.distribution.verbose: @@ -413,7 +439,7 @@ consider to install to another location, or use the option for spec in self.args: self.easy_install(spec, not self.no_deps) if self.record: - outputs = self.outputs + outputs = list(sorted(self.outputs)) if self.root: # strip any package prefix root_len = len(self.root) for counter in range(len(outputs)): @@ -436,7 +462,7 @@ consider to install to another location, or use the option """ try: pid = os.getpid() - except: + except Exception: pid = random.randint(0, sys.maxsize) return os.path.join(self.install_dir, "test-easy-install-%s" % pid) @@ -477,8 +503,7 @@ consider to install to another location, or use the option else: self.pth_file = None - PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep) - if instdir not in map(normalize_path, filter(None, PYTHONPATH)): + if instdir not in map(normalize_path, _pythonpath()): # only PYTHONPATH dirs need a site.py, so pretend it's there self.sitepy_installed = True elif self.multi_version and not os.path.exists(pth_file): @@ -517,7 +542,7 @@ consider to install to another location, or use the option For information on other options, you may wish to consult the documentation at: - https://pythonhosted.org/setuptools/easy_install.html + https://setuptools.readthedocs.io/en/latest/easy_install.html Please make the appropriate changes for your system and try again. """).lstrip() @@ -538,27 +563,34 @@ consider to install to another location, or use the option pth_file = self.pseudo_tempname() + ".pth" ok_file = pth_file + '.ok' ok_exists = os.path.exists(ok_file) + tmpl = _one_liner(""" + import os + f = open({ok_file!r}, 'w') + f.write('OK') + f.close() + """) + '\n' try: if ok_exists: os.unlink(ok_file) dirname = os.path.dirname(ok_file) - if not os.path.exists(dirname): - os.makedirs(dirname) + pkg_resources.py31compat.makedirs(dirname, exist_ok=True) f = open(pth_file, 'w') except (OSError, IOError): self.cant_write_to_target() else: try: - f.write("import os; f = open(%r, 'w'); f.write('OK'); " - "f.close()\n" % (ok_file,)) + f.write(tmpl.format(**locals())) f.close() f = None executable = sys.executable if os.name == 'nt': dirname, basename = os.path.split(executable) alt = os.path.join(dirname, 'pythonw.exe') - if (basename.lower() == 'python.exe' and - os.path.exists(alt)): + use_alt = ( + basename.lower() == 'python.exe' and + os.path.exists(alt) + ) + if use_alt: # use pythonw.exe to avoid opening a console window executable = alt @@ -623,20 +655,26 @@ consider to install to another location, or use the option (spec.key, self.build_directory) ) + @contextlib.contextmanager + def _tmpdir(self): + tmpdir = tempfile.mkdtemp(prefix=u"easy_install-") + try: + # cast to str as workaround for #709 and #710 and #712 + yield str(tmpdir) + finally: + os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir)) + def easy_install(self, spec, deps=False): - tmpdir = tempfile.mkdtemp(prefix="easy_install-") - download = None if not self.editable: self.install_site_py() - try: + with self._tmpdir() as tmpdir: if not isinstance(spec, Requirement): if URL_SCHEME(spec): # It's a url, download it to tmpdir and process self.not_editable(spec) - download = self.package_index.download(spec, tmpdir) - return self.install_item(None, download, tmpdir, deps, - True) + dl = self.package_index.download(spec, tmpdir) + return self.install_item(None, dl, tmpdir, deps, True) elif os.path.exists(spec): # Existing file or directory, just process it directly @@ -662,10 +700,6 @@ consider to install to another location, or use the option else: return self.install_item(spec, dist.location, tmpdir, deps) - finally: - if os.path.exists(tmpdir): - rmtree(tmpdir) - def install_item(self, spec, download, tmpdir, deps, install_needed=False): # Installation is also needed if file in tmpdir or is not an egg @@ -733,10 +767,7 @@ consider to install to another location, or use the option elif requirement is None or dist not in requirement: # if we wound up with a different version, resolve what we've got distreq = dist.as_requirement() - requirement = requirement or distreq - requirement = Requirement( - distreq.project_name, distreq.specs, requirement.extras - ) + requirement = Requirement(str(distreq)) log.info("Processing dependencies for %s", requirement) try: distros = WorkingSet([]).resolve( @@ -765,8 +796,9 @@ consider to install to another location, or use the option def maybe_move(self, spec, dist_filename, setup_base): dst = os.path.join(self.build_directory, spec.key) if os.path.exists(dst): - msg = ("%r already exists in %s; build directory %s will not be " - "kept") + msg = ( + "%r already exists in %s; build directory %s will not be kept" + ) log.warn(msg, spec.key, self.build_directory, setup_base) return setup_base if os.path.isdir(dist_filename): @@ -798,7 +830,7 @@ consider to install to another location, or use the option if is_script: body = self._load_template(dev_path) % locals() script_text = ScriptWriter.get_header(script_text) + body - self.write_script(script_name, _to_ascii(script_text), 'b') + self.write_script(script_name, _to_bytes(script_text), 'b') @staticmethod def _load_template(dev_path): @@ -806,7 +838,7 @@ consider to install to another location, or use the option There are a couple of template scripts in the package. This function loads one of them and prepares it for use. """ - # See https://bitbucket.org/pypa/setuptools/issue/134 for info + # See https://github.com/pypa/setuptools/issues/134 for info # on script file naming and downstream issues with SVR4 name = 'script.tmpl' if dev_path: @@ -824,14 +856,16 @@ consider to install to another location, or use the option target = os.path.join(self.script_dir, script_name) self.add_output(target) + if self.dry_run: + return + mask = current_umask() - if not self.dry_run: - ensure_directory(target) - if os.path.exists(target): - os.unlink(target) - with open(target, "w" + mode) as f: - f.write(contents) - chmod(target, 0o777 - mask) + ensure_directory(target) + if os.path.exists(target): + os.unlink(target) + with open(target, "w" + mode) as f: + f.write(contents) + chmod(target, 0o777 - mask) def install_eggs(self, spec, dist_filename, tmpdir): # .egg dirs or files are already built, so just return them @@ -839,6 +873,8 @@ consider to install to another location, or use the option return [self.install_egg(dist_filename, tmpdir)] elif dist_filename.lower().endswith('.exe'): return [self.install_exe(dist_filename, tmpdir)] + elif dist_filename.lower().endswith('.whl'): + return [self.install_wheel(dist_filename, tmpdir)] # Anything else, try to extract and build setup_base = tmpdir @@ -884,8 +920,10 @@ consider to install to another location, or use the option return Distribution.from_filename(egg_path, metadata=metadata) def install_egg(self, egg_path, tmpdir): - destination = os.path.join(self.install_dir, - os.path.basename(egg_path)) + destination = os.path.join( + self.install_dir, + os.path.basename(egg_path), + ) destination = os.path.abspath(destination) if not self.dry_run: ensure_directory(destination) @@ -895,8 +933,11 @@ consider to install to another location, or use the option if os.path.isdir(destination) and not os.path.islink(destination): dir_util.remove_tree(destination, dry_run=self.dry_run) elif os.path.exists(destination): - self.execute(os.unlink, (destination,), "Removing " + - destination) + self.execute( + os.unlink, + (destination,), + "Removing " + destination, + ) try: new_dist_is_zipped = False if os.path.isdir(egg_path): @@ -913,13 +954,19 @@ consider to install to another location, or use the option f, m = shutil.move, "Moving" else: f, m = shutil.copy2, "Copying" - self.execute(f, (egg_path, destination), - (m + " %s to %s") % - (os.path.basename(egg_path), - os.path.dirname(destination))) - update_dist_caches(destination, - fix_zipimporter_caches=new_dist_is_zipped) - except: + self.execute( + f, + (egg_path, destination), + (m + " %s to %s") % ( + os.path.basename(egg_path), + os.path.dirname(destination) + ), + ) + update_dist_caches( + destination, + fix_zipimporter_caches=new_dist_is_zipped, + ) + except Exception: update_dist_caches(destination, fix_zipimporter_caches=False) raise @@ -941,8 +988,8 @@ consider to install to another location, or use the option ) # Convert the .exe to an unpacked egg - egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() + - '.egg') + egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg') + dist.location = egg_path egg_tmp = egg_path + '.tmp' _egg_info = os.path.join(egg_tmp, 'EGG-INFO') pkg_inf = os.path.join(_egg_info, 'PKG-INFO') @@ -960,13 +1007,13 @@ consider to install to another location, or use the option f.close() script_dir = os.path.join(_egg_info, 'scripts') # delete entry-point scripts to avoid duping - self.delete_blockers( - [os.path.join(script_dir, args[0]) for args in - ScriptWriter.get_args(dist)] - ) + self.delete_blockers([ + os.path.join(script_dir, args[0]) + for args in ScriptWriter.get_args(dist) + ]) # Build .egg file from tmpdir bdist_egg.make_zipfile( - egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run + egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run, ) # install the .egg return self.install_egg(egg_path, tmpdir) @@ -1024,6 +1071,35 @@ consider to install to another location, or use the option f.write('\n'.join(locals()[name]) + '\n') f.close() + def install_wheel(self, wheel_path, tmpdir): + wheel = Wheel(wheel_path) + assert wheel.is_compatible() + destination = os.path.join(self.install_dir, wheel.egg_name()) + destination = os.path.abspath(destination) + if not self.dry_run: + ensure_directory(destination) + if os.path.isdir(destination) and not os.path.islink(destination): + dir_util.remove_tree(destination, dry_run=self.dry_run) + elif os.path.exists(destination): + self.execute( + os.unlink, + (destination,), + "Removing " + destination, + ) + try: + self.execute( + wheel.install_as_egg, + (destination,), + ("Installing %s to %s") % ( + os.path.basename(wheel_path), + os.path.dirname(destination) + ), + ) + finally: + update_dist_caches(destination, fix_zipimporter_caches=False) + self.add_output(destination) + return self.egg_distribution(destination) + __mv_warning = textwrap.dedent(""" Because this distribution was installed --multi-version, before you can import modules from this package in an application, you will need to @@ -1154,7 +1230,7 @@ consider to install to another location, or use the option if dist.location in self.pth_file.paths: log.info( "%s is already the active version in easy-install.pth", - dist + dist, ) else: log.info("Adding %s to easy-install.pth file", dist) @@ -1202,7 +1278,6 @@ consider to install to another location, or use the option def byte_compile(self, to_compile): if sys.dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') return from distutils.util import byte_compile @@ -1215,7 +1290,7 @@ consider to install to another location, or use the option if self.optimize: byte_compile( to_compile, optimize=self.optimize, force=1, - dry_run=self.dry_run + dry_run=self.dry_run, ) finally: log.set_verbosity(self.verbose) # restore original verbosity @@ -1246,7 +1321,8 @@ consider to install to another location, or use the option * You can set up the installation directory to support ".pth" files by using one of the approaches described here: - https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations + https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations + Please make the appropriate changes for your system and try again.""").lstrip() @@ -1262,17 +1338,14 @@ consider to install to another location, or use the option sitepy = os.path.join(self.install_dir, "site.py") source = resource_string("setuptools", "site-patch.py") + source = source.decode('utf-8') current = "" if os.path.exists(sitepy): log.debug("Checking existing site.py in %s", self.install_dir) - f = open(sitepy, 'rb') - current = f.read() - # we want str, not bytes - if six.PY3: - current = current.decode() + with io.open(sitepy) as strm: + current = strm.read() - f.close() if not current.startswith('def __boot():'): raise DistutilsError( "%s is not a setuptools-generated site.py; please" @@ -1283,9 +1356,8 @@ consider to install to another location, or use the option log.info("Creating %s", sitepy) if not self.dry_run: ensure_directory(sitepy) - f = open(sitepy, 'wb') - f.write(source) - f.close() + with io.open(sitepy, 'w', encoding='utf-8') as strm: + strm.write(source) self.byte_compile([sitepy]) self.sitepy_installed = True @@ -1359,10 +1431,21 @@ consider to install to another location, or use the option setattr(self, attr, val) +def _pythonpath(): + items = os.environ.get('PYTHONPATH', '').split(os.pathsep) + return filter(None, items) + + def get_site_dirs(): - # return a list of 'site' dirs - sitedirs = [_f for _f in os.environ.get('PYTHONPATH', - '').split(os.pathsep) if _f] + """ + Return a list of 'site' dirs + """ + + sitedirs = [] + + # start with PYTHONPATH + sitedirs.extend(_pythonpath()) + prefixes = [sys.prefix] if sys.exec_prefix != sys.prefix: prefixes.append(sys.exec_prefix) @@ -1371,20 +1454,26 @@ def get_site_dirs(): if sys.platform in ('os2emx', 'riscos'): sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) elif os.sep == '/': - sitedirs.extend([os.path.join(prefix, - "lib", - "python" + sys.version[:3], - "site-packages"), - os.path.join(prefix, "lib", "site-python")]) + sitedirs.extend([ + os.path.join( + prefix, + "local/lib", + "python" + sys.version[:3], + "dist-packages", + ), + os.path.join( + prefix, + "lib", + "python" + sys.version[:3], + "dist-packages", + ), + os.path.join(prefix, "lib", "site-python"), + ]) else: - if sys.version[:3] in ('2.3', '2.4', '2.5'): - sdir = "site-packages" - else: - sdir = "dist-packages" - sitedirs.extend( - [os.path.join(prefix, "local/lib", "python" + sys.version[:3], sdir), - os.path.join(prefix, "lib", "python" + sys.version[:3], sdir)] - ) + sitedirs.extend([ + prefix, + os.path.join(prefix, "lib", "site-packages"), + ]) if sys.platform == 'darwin': # for framework builds *only* we add the standard Apple # locations. Currently only per-user, but /Library and @@ -1392,12 +1481,14 @@ def get_site_dirs(): if 'Python.framework' in prefix: home = os.environ.get('HOME') if home: - sitedirs.append( - os.path.join(home, - 'Library', - 'Python', - sys.version[:3], - 'site-packages')) + home_sp = os.path.join( + home, + 'Library', + 'Python', + sys.version[:3], + 'site-packages', + ) + sitedirs.append(home_sp) lib_paths = get_path('purelib'), get_path('platlib') for site_lib in lib_paths: if site_lib not in sitedirs: @@ -1406,6 +1497,11 @@ def get_site_dirs(): if site.ENABLE_USER_SITE: sitedirs.append(site.USER_SITE) + try: + sitedirs.extend(site.getsitepackages()) + except AttributeError: + pass + sitedirs = list(map(normalize_path, sitedirs)) return sitedirs @@ -1473,8 +1569,8 @@ def extract_wininst_cfg(dist_filename): return None # not a valid tag f.seek(prepended - (12 + cfglen)) - cfg = configparser.RawConfigParser( - {'version': '', 'target_version': ''}) + init = {'version': '', 'target_version': ''} + cfg = configparser.RawConfigParser(init) try: part = f.read(cfglen) # Read up to the first null byte. @@ -1497,7 +1593,8 @@ def get_exe_prefixes(exe_filename): """Get exe->egg path translations for a given .exe file""" prefixes = [ - ('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''), + ('PURELIB/', ''), + ('PLATLIB/pywin32_system32', ''), ('PLATLIB/', ''), ('SCRIPTS/', 'EGG-INFO/scripts/'), ('DATA/lib/site-packages', ''), @@ -1531,15 +1628,6 @@ def get_exe_prefixes(exe_filename): return prefixes -def parse_requirement_arg(spec): - try: - return Requirement.parse(spec) - except ValueError: - raise DistutilsError( - "Not a URL, existing file, or requirement spec: %r" % (spec,) - ) - - class PthDistributions(Environment): """A .pth file with Distribution paths in it""" @@ -1649,7 +1737,6 @@ class PthDistributions(Environment): class RewritePthDistributions(PthDistributions): - @classmethod def _wrap_lines(cls, lines): yield cls.prelude @@ -1657,12 +1744,11 @@ class RewritePthDistributions(PthDistributions): yield line yield cls.postlude - _inline = lambda text: textwrap.dedent(text).strip().replace('\n', '; ') - prelude = _inline(""" + prelude = _one_liner(""" import sys sys.__plen = len(sys.path) """) - postlude = _inline(""" + postlude = _one_liner(""" import sys new = sys.path[sys.__plen:] del sys.path[sys.__plen:] @@ -1672,7 +1758,7 @@ class RewritePthDistributions(PthDistributions): """) -if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'rewrite') == 'rewrite': +if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': PthDistributions = RewritePthDistributions @@ -1689,7 +1775,7 @@ def _first_line_re(): def auto_chmod(func, arg, exc): - if func is os.remove and os.name == 'nt': + if func in [os.unlink, os.remove] and os.name == 'nt': chmod(arg, stat.S_IWRITE) return func(arg) et, ev, _ = sys.exc_info() @@ -1821,8 +1907,8 @@ def _update_zipimporter_cache(normalized_path, cache, updater=None): # * Does not support the dict.pop() method, forcing us to use the # get/del patterns instead. For more detailed information see the # following links: - # https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960 - # https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99 + # https://github.com/pypa/setuptools/issues/202#issuecomment-202913420 + # http://bit.ly/2h9itJX old_entry = cache[p] del cache[p] new_entry = updater and updater(p, old_entry) @@ -1842,6 +1928,7 @@ def _remove_and_clear_zip_directory_cache_data(normalized_path): normalized_path, zipimport._zip_directory_cache, updater=clear_and_remove_cached_zip_archive_directory_data) + # PyPy Python implementation does not allow directly writing to the # zipimport._zip_directory_cache and so prevents us from attempting to correct # its content. The best we can do there is clear the problematic cache content @@ -1854,6 +1941,7 @@ if '__pypy__' in sys.builtin_module_names: _replace_zip_directory_cache_data = \ _remove_and_clear_zip_directory_cache_data else: + def _replace_zip_directory_cache_data(normalized_path): def replace_cached_zip_archive_directory_data(path, old_entry): # N.B. In theory, we could load the zip directory information just @@ -1996,11 +2084,21 @@ class CommandSpec(list): def as_header(self): return self._render(self + list(self.options)) + @staticmethod + def _strip_quotes(item): + _QUOTES = '"\'' + for q in _QUOTES: + if item.startswith(q) and item.endswith(q): + return item[1:-1] + return item + @staticmethod def _render(items): - cmdline = subprocess.list2cmdline(items) + cmdline = subprocess.list2cmdline( + CommandSpec._strip_quotes(item.strip()) for item in items) return '#!' + cmdline + '\n' + # For pbr compat; will be removed in a future version. sys_executable = CommandSpec._sys_executable() @@ -2009,19 +2107,21 @@ class WindowsCommandSpec(CommandSpec): split_args = dict(posix=False) -class ScriptWriter(object): +class ScriptWriter: """ Encapsulates behavior around writing entry point scripts for console and gui apps. """ - template = textwrap.dedent(""" + template = textwrap.dedent(r""" # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r __requires__ = %(spec)r + import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point(%(spec)r, %(group)r, %(name)r)() ) @@ -2040,12 +2140,10 @@ class ScriptWriter(object): @classmethod def get_script_header(cls, script_text, executable=None, wininst=False): # for backward compatibility - warnings.warn("Use get_header", DeprecationWarning) + warnings.warn("Use get_header", DeprecationWarning, stacklevel=2) if wininst: executable = "python.exe" - cmd = cls.command_spec_class.best().from_param(executable) - cmd.install_options(script_text) - return cmd.as_header() + return cls.get_header(script_text, executable) @classmethod def get_args(cls, dist, header=None): @@ -2130,8 +2228,11 @@ class WindowsScriptWriter(ScriptWriter): "For Windows, add a .py extension" ext = dict(console='.pya', gui='.pyw')[type_] if ext not in os.environ['PATHEXT'].lower().split(';'): - warnings.warn("%s not listed in PATHEXT; scripts will not be " - "recognized as executables." % ext, UserWarning) + msg = ( + "{ext} not listed in PATHEXT; scripts will not be " + "recognized as executables." + ).format(**locals()) + warnings.warn(msg, UserWarning) old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe'] old.remove(ext) header = cls._adjust_header(type_, header) @@ -2210,8 +2311,6 @@ def get_win_launcher(type): Returns the executable as a byte string. """ launcher_fn = '%s.exe' % type - if platform.machine().lower() == 'arm': - launcher_fn = launcher_fn.replace(".", "-arm.") if is_64bit(): launcher_fn = launcher_fn.replace(".", "-64.") else: @@ -2228,39 +2327,7 @@ def load_launcher_manifest(name): def rmtree(path, ignore_errors=False, onerror=auto_chmod): - """Recursively delete a directory tree. - - This code is taken from the Python 2.4 version of 'shutil', because - the 2.3 version doesn't really work right. - """ - if ignore_errors: - def onerror(*args): - pass - elif onerror is None: - def onerror(*args): - raise - names = [] - try: - names = os.listdir(path) - except os.error: - onerror(os.listdir, path, sys.exc_info()) - for name in names: - fullname = os.path.join(path, name) - try: - mode = os.lstat(fullname).st_mode - except os.error: - mode = 0 - if stat.S_ISDIR(mode): - rmtree(fullname, ignore_errors, onerror) - else: - try: - os.remove(fullname) - except os.error: - onerror(os.remove, fullname, sys.exc_info()) - try: - os.rmdir(path) - except os.error: - onerror(os.rmdir, path, sys.exc_info()) + return shutil.rmtree(path, ignore_errors, onerror) def current_umask(): @@ -2297,7 +2364,8 @@ def main(argv=None, **kw): setup( script_args=['-q', 'easy_install', '-v'] + argv, script_name=sys.argv[0] or 'easy_install', - distclass=DistributionWithoutHelpCommands, **kw + distclass=DistributionWithoutHelpCommands, + **kw ) diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/egg_info.py b/Shared/lib/python3.4/site-packages/setuptools/command/egg_info.py index d1bd9b0..f3ad36b 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/egg_info.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/egg_info.py @@ -3,6 +3,7 @@ Create a distribution's .egg-info directory and contents""" from distutils.filelist import FileList as _FileList +from distutils.errors import DistutilsInternalError from distutils.util import convert_path from distutils import log import distutils.errors @@ -13,6 +14,7 @@ import sys import io import warnings import time +import collections from setuptools.extern import six from setuptools.extern.six.moves import map @@ -26,60 +28,175 @@ from pkg_resources import ( parse_requirements, safe_name, parse_version, safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) import setuptools.unicode_utils as unicode_utils +from setuptools.glob import glob -from pkg_resources.extern import packaging - -try: - from setuptools_svn import svn_utils -except ImportError: - pass +from setuptools.extern import packaging -class egg_info(Command): +def translate_pattern(glob): + """ + Translate a file path glob like '*.txt' in to a regular expression. + This differs from fnmatch.translate which allows wildcards to match + directory separators. It also knows about '**/' which matches any number of + directories. + """ + pat = '' + + # This will split on '/' within [character classes]. This is deliberate. + chunks = glob.split(os.path.sep) + + sep = re.escape(os.sep) + valid_char = '[^%s]' % (sep,) + + for c, chunk in enumerate(chunks): + last_chunk = c == len(chunks) - 1 + + # Chunks that are a literal ** are globstars. They match anything. + if chunk == '**': + if last_chunk: + # Match anything if this is the last component + pat += '.*' + else: + # Match '(name/)*' + pat += '(?:%s+%s)*' % (valid_char, sep) + continue # Break here as the whole path component has been handled + + # Find any special characters in the remainder + i = 0 + chunk_len = len(chunk) + while i < chunk_len: + char = chunk[i] + if char == '*': + # Match any number of name characters + pat += valid_char + '*' + elif char == '?': + # Match a name character + pat += valid_char + elif char == '[': + # Character class + inner_i = i + 1 + # Skip initial !/] chars + if inner_i < chunk_len and chunk[inner_i] == '!': + inner_i = inner_i + 1 + if inner_i < chunk_len and chunk[inner_i] == ']': + inner_i = inner_i + 1 + + # Loop till the closing ] is found + while inner_i < chunk_len and chunk[inner_i] != ']': + inner_i = inner_i + 1 + + if inner_i >= chunk_len: + # Got to the end of the string without finding a closing ] + # Do not treat this as a matching group, but as a literal [ + pat += re.escape(char) + else: + # Grab the insides of the [brackets] + inner = chunk[i + 1:inner_i] + char_class = '' + + # Class negation + if inner[0] == '!': + char_class = '^' + inner = inner[1:] + + char_class += re.escape(inner) + pat += '[%s]' % (char_class,) + + # Skip to the end ] + i = inner_i + else: + pat += re.escape(char) + i += 1 + + # Join each chunk with the dir separator + if not last_chunk: + pat += sep + + pat += r'\Z' + return re.compile(pat, flags=re.MULTILINE|re.DOTALL) + + +class InfoCommon: + tag_build = None + tag_date = None + + @property + def name(self): + return safe_name(self.distribution.get_name()) + + def tagged_version(self): + version = self.distribution.get_version() + # egg_info may be called more than once for a distribution, + # in which case the version string already contains all tags. + if self.vtags and version.endswith(self.vtags): + return safe_version(version) + return safe_version(version + self.vtags) + + def tags(self): + version = '' + if self.tag_build: + version += self.tag_build + if self.tag_date: + version += time.strftime("-%Y%m%d") + return version + vtags = property(tags) + + +class egg_info(InfoCommon, Command): description = "create a distribution's .egg-info directory" user_options = [ ('egg-base=', 'e', "directory containing .egg-info directories" " (default: top of the source tree)"), - ('tag-svn-revision', 'r', - "Add subversion revision ID to version number"), ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), ('tag-build=', 'b', "Specify explicit tag to add to version number"), - ('no-svn-revision', 'R', - "Don't add subversion revision ID [default]"), ('no-date', 'D', "Don't include date stamp [default]"), ] - boolean_options = ['tag-date', 'tag-svn-revision'] - negative_opt = {'no-svn-revision': 'tag-svn-revision', - 'no-date': 'tag-date'} + boolean_options = ['tag-date'] + negative_opt = { + 'no-date': 'tag-date', + } def initialize_options(self): - self.egg_name = None - self.egg_version = None self.egg_base = None + self.egg_name = None self.egg_info = None - self.tag_build = None - self.tag_svn_revision = 0 - self.tag_date = 0 + self.egg_version = None self.broken_egg_info = False - self.vtags = None + + #################################### + # allow the 'tag_svn_revision' to be detected and + # set, supporting sdists built on older Setuptools. + @property + def tag_svn_revision(self): + pass + + @tag_svn_revision.setter + def tag_svn_revision(self, value): + pass + #################################### def save_version_info(self, filename): - values = dict( - egg_info=dict( - tag_svn_revision=0, - tag_date=0, - tag_build=self.tags(), - ) - ) - edit_config(filename, values) + """ + Materialize the value of date into the + build tag. Install build keys in a deterministic order + to avoid arbitrary reordering on subsequent builds. + """ + egg_info = collections.OrderedDict() + # follow the order these keys would have been added + # when PYTHONHASHSEED=0 + egg_info['tag_build'] = self.tags() + egg_info['tag_date'] = 0 + edit_config(filename, dict(egg_info=egg_info)) def finalize_options(self): - self.egg_name = safe_name(self.distribution.get_name()) - self.vtags = self.tags() + # Note: we need to capture the current value returned + # by `self.tagged_version()`, so we can later update + # `self.distribution.metadata.version` without + # repercussions. + self.egg_name = self.name self.egg_version = self.tagged_version() - parsed_version = parse_version(self.egg_version) try: @@ -162,16 +279,9 @@ class egg_info(Command): if not self.dry_run: os.unlink(filename) - def tagged_version(self): - version = self.distribution.get_version() - # egg_info may be called more than once for a distribution, - # in which case the version string already contains all tags. - if self.vtags and version.endswith(self.vtags): - return safe_version(version) - return safe_version(version + self.vtags) - def run(self): self.mkpath(self.egg_info) + os.utime(self.egg_info, None) installer = self.distribution.fetch_build_egg for ep in iter_entry_points('egg_info.writers'): ep.require(installer=installer) @@ -185,22 +295,6 @@ class egg_info(Command): self.find_sources() - def tags(self): - version = '' - if self.tag_build: - version += self.tag_build - if self.tag_svn_revision: - version += '-r%s' % self.get_svn_revision() - if self.tag_date: - version += time.strftime("-%Y%m%d") - return version - - @staticmethod - def get_svn_revision(): - if 'svn_utils' not in globals(): - return "0" - return str(svn_utils.SvnInfo.load(os.curdir).get_revision()) - def find_sources(self): """Generate SOURCES.txt manifest file""" manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") @@ -226,7 +320,155 @@ class egg_info(Command): class FileList(_FileList): - """File list that accepts only existing, platform-independent paths""" + # Implementations of the various MANIFEST.in commands + + def process_template_line(self, line): + # Parse the line: split it up, make sure the right number of words + # is there, and return the relevant words. 'action' is always + # defined: it's the first word of the line. Which of the other + # three are defined depends on the action; it'll be either + # patterns, (dir and patterns), or (dir_pattern). + (action, patterns, dir, dir_pattern) = self._parse_template_line(line) + + # OK, now we know that the action is valid and we have the + # right number of words on the line for that action -- so we + # can proceed with minimal error-checking. + if action == 'include': + self.debug_print("include " + ' '.join(patterns)) + for pattern in patterns: + if not self.include(pattern): + log.warn("warning: no files found matching '%s'", pattern) + + elif action == 'exclude': + self.debug_print("exclude " + ' '.join(patterns)) + for pattern in patterns: + if not self.exclude(pattern): + log.warn(("warning: no previously-included files " + "found matching '%s'"), pattern) + + elif action == 'global-include': + self.debug_print("global-include " + ' '.join(patterns)) + for pattern in patterns: + if not self.global_include(pattern): + log.warn(("warning: no files found matching '%s' " + "anywhere in distribution"), pattern) + + elif action == 'global-exclude': + self.debug_print("global-exclude " + ' '.join(patterns)) + for pattern in patterns: + if not self.global_exclude(pattern): + log.warn(("warning: no previously-included files matching " + "'%s' found anywhere in distribution"), + pattern) + + elif action == 'recursive-include': + self.debug_print("recursive-include %s %s" % + (dir, ' '.join(patterns))) + for pattern in patterns: + if not self.recursive_include(dir, pattern): + log.warn(("warning: no files found matching '%s' " + "under directory '%s'"), + pattern, dir) + + elif action == 'recursive-exclude': + self.debug_print("recursive-exclude %s %s" % + (dir, ' '.join(patterns))) + for pattern in patterns: + if not self.recursive_exclude(dir, pattern): + log.warn(("warning: no previously-included files matching " + "'%s' found under directory '%s'"), + pattern, dir) + + elif action == 'graft': + self.debug_print("graft " + dir_pattern) + if not self.graft(dir_pattern): + log.warn("warning: no directories found matching '%s'", + dir_pattern) + + elif action == 'prune': + self.debug_print("prune " + dir_pattern) + if not self.prune(dir_pattern): + log.warn(("no previously-included directories found " + "matching '%s'"), dir_pattern) + + else: + raise DistutilsInternalError( + "this cannot happen: invalid action '%s'" % action) + + def _remove_files(self, predicate): + """ + Remove all files from the file list that match the predicate. + Return True if any matching files were removed + """ + found = False + for i in range(len(self.files) - 1, -1, -1): + if predicate(self.files[i]): + self.debug_print(" removing " + self.files[i]) + del self.files[i] + found = True + return found + + def include(self, pattern): + """Include files that match 'pattern'.""" + found = [f for f in glob(pattern) if not os.path.isdir(f)] + self.extend(found) + return bool(found) + + def exclude(self, pattern): + """Exclude files that match 'pattern'.""" + match = translate_pattern(pattern) + return self._remove_files(match.match) + + def recursive_include(self, dir, pattern): + """ + Include all files anywhere in 'dir/' that match the pattern. + """ + full_pattern = os.path.join(dir, '**', pattern) + found = [f for f in glob(full_pattern, recursive=True) + if not os.path.isdir(f)] + self.extend(found) + return bool(found) + + def recursive_exclude(self, dir, pattern): + """ + Exclude any file anywhere in 'dir/' that match the pattern. + """ + match = translate_pattern(os.path.join(dir, '**', pattern)) + return self._remove_files(match.match) + + def graft(self, dir): + """Include all files from 'dir/'.""" + found = [ + item + for match_dir in glob(dir) + for item in distutils.filelist.findall(match_dir) + ] + self.extend(found) + return bool(found) + + def prune(self, dir): + """Filter out files from 'dir/'.""" + match = translate_pattern(os.path.join(dir, '**')) + return self._remove_files(match.match) + + def global_include(self, pattern): + """ + Include all files anywhere in the current directory that match the + pattern. This is very inefficient on large file trees. + """ + if self.allfiles is None: + self.findall() + match = translate_pattern(os.path.join('**', pattern)) + found = [f for f in self.allfiles if match.match(f)] + self.extend(found) + return bool(found) + + def global_exclude(self, pattern): + """ + Exclude all files anywhere that match the pattern. + """ + match = translate_pattern(os.path.join('**', pattern)) + return self._remove_files(match.match) def append(self, item): if item.endswith('\r'): # Fix older sdists built on Windows @@ -289,7 +531,6 @@ class manifest_maker(sdist): self.filelist = FileList() if not os.path.exists(self.manifest): self.write_manifest() # it must exist so it'll get in the list - self.filelist.findall() self.add_defaults() if os.path.exists(self.template): self.read_template() @@ -314,10 +555,17 @@ class manifest_maker(sdist): msg = "writing manifest file '%s'" % self.manifest self.execute(write_file, (self.manifest, files), msg) - def warn(self, msg): # suppress missing-file warnings from sdist - if not msg.startswith("standard file not found:"): + def warn(self, msg): + if not self._should_suppress_warning(msg): sdist.warn(self, msg) + @staticmethod + def _should_suppress_warning(msg): + """ + suppress missing-file warnings from sdist + """ + return re.match(r"standard file .*not found", msg) + def add_defaults(self): sdist.add_defaults(self) self.filelist.append(self.template) @@ -328,38 +576,13 @@ class manifest_maker(sdist): elif os.path.exists(self.manifest): self.read_manifest() ei_cmd = self.get_finalized_command('egg_info') - self._add_egg_info(cmd=ei_cmd) - self.filelist.include_pattern("*", prefix=ei_cmd.egg_info) - - def _add_egg_info(self, cmd): - """ - Add paths for egg-info files for an external egg-base. - - The egg-info files are written to egg-base. If egg-base is - outside the current working directory, this method - searchs the egg-base directory for files to include - in the manifest. Uses distutils.filelist.findall (which is - really the version monkeypatched in by setuptools/__init__.py) - to perform the search. - - Since findall records relative paths, prefix the returned - paths with cmd.egg_base, so add_default's include_pattern call - (which is looking for the absolute cmd.egg_info) will match - them. - """ - if cmd.egg_base == os.curdir: - # egg-info files were already added by something else - return - - discovered = distutils.filelist.findall(cmd.egg_base) - resolved = (os.path.join(cmd.egg_base, path) for path in discovered) - self.filelist.allfiles.extend(resolved) + self.filelist.graft(ei_cmd.egg_info) def prune_file_list(self): build = self.get_finalized_command('build') base_dir = self.distribution.get_fullname() - self.filelist.exclude_pattern(None, prefix=build.build_base) - self.filelist.exclude_pattern(None, prefix=base_dir) + self.filelist.prune(build.build_base) + self.filelist.prune(base_dir) sep = re.escape(os.sep) self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep, is_regex=1) @@ -384,6 +607,7 @@ def write_pkg_info(cmd, basename, filename): metadata = cmd.distribution.metadata metadata.version, oldver = cmd.egg_version, metadata.version metadata.name, oldname = cmd.egg_name, metadata.name + try: # write unescaped data to PKG-INFO, so older pkg_resources # can still parse it @@ -407,7 +631,7 @@ def warn_depends_obsolete(cmd, basename, filename): def _write_requirements(stream, reqs): lines = yield_lines(reqs or ()) append_cr = lambda line: line + '\n' - lines = map(append_cr, lines) + lines = map(append_cr, sorted(lines)) stream.writelines(lines) @@ -423,7 +647,7 @@ def write_requirements(cmd, basename, filename): def write_setup_requirements(cmd, basename, filename): - data = StringIO() + data = io.StringIO() _write_requirements(data, cmd.distribution.setup_requires) cmd.write_or_delete_file("setup-requirements", filename, data.getvalue()) diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/install.py b/Shared/lib/python3.4/site-packages/setuptools/command/install.py index d2bca2e..31a5ddb 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/install.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/install.py @@ -8,7 +8,7 @@ import distutils.command.install as orig import setuptools # Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for -# now. See https://bitbucket.org/pypa/setuptools/issue/199/ +# now. See https://github.com/pypa/setuptools/issues/199/ _install = orig.install diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/install_egg_info.py b/Shared/lib/python3.4/site-packages/setuptools/command/install_egg_info.py index ae0325d..5f405bc 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/install_egg_info.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/install_egg_info.py @@ -1,14 +1,13 @@ from distutils import log, dir_util import os, sys -from setuptools.extern.six.moves import map - from setuptools import Command +from setuptools import namespaces from setuptools.archive_util import unpack_archive import pkg_resources -class install_egg_info(Command): +class install_egg_info(namespaces.Installer, Command): """Install an .egg-info directory for the package""" description = "Install an .egg-info directory for the package" @@ -81,58 +80,3 @@ class install_egg_info(Command): return dst unpack_archive(self.source, self.target, skimmer) - - def install_namespaces(self): - nsp = self._get_all_ns_packages() - if not nsp: - return - filename, ext = os.path.splitext(self.target) - filename += '-nspkg.pth' - self.outputs.append(filename) - log.info("Installing %s", filename) - lines = map(self._gen_nspkg_line, nsp) - - if self.dry_run: - # always generate the lines, even in dry run - list(lines) - return - - with open(filename, 'wt') as f: - f.writelines(lines) - - _nspkg_tmpl = ( - "import sys, types, os", - "p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)", - "ie = os.path.exists(os.path.join(p,'__init__.py'))", - "m = not ie and " - "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))", - "mp = (m or []) and m.__dict__.setdefault('__path__',[])", - "(p not in mp) and mp.append(p)", - ) - "lines for the namespace installer" - - _nspkg_tmpl_multi = ( - 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', - ) - "additional line(s) when a parent package is indicated" - - @classmethod - def _gen_nspkg_line(cls, pkg): - # ensure pkg is not a unicode string under Python 2.7 - pkg = str(pkg) - pth = tuple(pkg.split('.')) - tmpl_lines = cls._nspkg_tmpl - parent, sep, child = pkg.rpartition('.') - if parent: - tmpl_lines += cls._nspkg_tmpl_multi - return ';'.join(tmpl_lines) % locals() + '\n' - - def _get_all_ns_packages(self): - """Return sorted list of all package namespaces""" - nsp = set() - for pkg in self.distribution.namespace_packages or []: - pkg = pkg.split('.') - while pkg: - nsp.add('.'.join(pkg)) - pkg.pop() - return sorted(nsp) diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/install_lib.py b/Shared/lib/python3.4/site-packages/setuptools/command/install_lib.py index 696b776..578e002 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/install_lib.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/install_lib.py @@ -4,6 +4,7 @@ import imp from itertools import product, starmap import distutils.command.install_lib as orig + class install_lib(orig.install_lib): """Don't add compiled flags to filenames of non-Python files""" diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/install_scripts.py b/Shared/lib/python3.4/site-packages/setuptools/command/install_scripts.py index be66cb2..1623427 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/install_scripts.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/install_scripts.py @@ -1,6 +1,7 @@ from distutils import log import distutils.command.install_scripts as orig import os +import sys from pkg_resources import Distribution, PathMetadata, ensure_directory @@ -37,6 +38,10 @@ class install_scripts(orig.install_scripts): if is_wininst: exec_param = "python.exe" writer = ei.WindowsScriptWriter + if exec_param == sys.executable: + # In case the path to the Python executable contains a space, wrap + # it so it's not split up. + exec_param = [exec_param] # resolve the writer to the environment writer = writer.best() cmd = writer.command_spec_class.best().from_param(exec_param) diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/py36compat.py b/Shared/lib/python3.4/site-packages/setuptools/command/py36compat.py new file mode 100644 index 0000000..61063e7 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/command/py36compat.py @@ -0,0 +1,136 @@ +import os +from glob import glob +from distutils.util import convert_path +from distutils.command import sdist + +from setuptools.extern.six.moves import filter + + +class sdist_add_defaults: + """ + Mix-in providing forward-compatibility for functionality as found in + distutils on Python 3.7. + + Do not edit the code in this class except to update functionality + as implemented in distutils. Instead, override in the subclass. + """ + + def add_defaults(self): + """Add all the default files to self.filelist: + - README or README.txt + - setup.py + - test/test*.py + - all pure Python modules mentioned in setup script + - all files pointed by package_data (build_py) + - all files defined in data_files. + - all files defined as scripts. + - all C sources listed as part of extensions or C libraries + in the setup script (doesn't catch C headers!) + Warns if (README or README.txt) or setup.py are missing; everything + else is optional. + """ + self._add_defaults_standards() + self._add_defaults_optional() + self._add_defaults_python() + self._add_defaults_data_files() + self._add_defaults_ext() + self._add_defaults_c_libs() + self._add_defaults_scripts() + + @staticmethod + def _cs_path_exists(fspath): + """ + Case-sensitive path existence check + + >>> sdist_add_defaults._cs_path_exists(__file__) + True + >>> sdist_add_defaults._cs_path_exists(__file__.upper()) + False + """ + if not os.path.exists(fspath): + return False + # make absolute so we always have a directory + abspath = os.path.abspath(fspath) + directory, filename = os.path.split(abspath) + return filename in os.listdir(directory) + + def _add_defaults_standards(self): + standards = [self.READMES, self.distribution.script_name] + for fn in standards: + if isinstance(fn, tuple): + alts = fn + got_it = False + for fn in alts: + if self._cs_path_exists(fn): + got_it = True + self.filelist.append(fn) + break + + if not got_it: + self.warn("standard file not found: should have one of " + + ', '.join(alts)) + else: + if self._cs_path_exists(fn): + self.filelist.append(fn) + else: + self.warn("standard file '%s' not found" % fn) + + def _add_defaults_optional(self): + optional = ['test/test*.py', 'setup.cfg'] + for pattern in optional: + files = filter(os.path.isfile, glob(pattern)) + self.filelist.extend(files) + + def _add_defaults_python(self): + # build_py is used to get: + # - python modules + # - files defined in package_data + build_py = self.get_finalized_command('build_py') + + # getting python files + if self.distribution.has_pure_modules(): + self.filelist.extend(build_py.get_source_files()) + + # getting package_data files + # (computed in build_py.data_files by build_py.finalize_options) + for pkg, src_dir, build_dir, filenames in build_py.data_files: + for filename in filenames: + self.filelist.append(os.path.join(src_dir, filename)) + + def _add_defaults_data_files(self): + # getting distribution.data_files + if self.distribution.has_data_files(): + for item in self.distribution.data_files: + if isinstance(item, str): + # plain file + item = convert_path(item) + if os.path.isfile(item): + self.filelist.append(item) + else: + # a (dirname, filenames) tuple + dirname, filenames = item + for f in filenames: + f = convert_path(f) + if os.path.isfile(f): + self.filelist.append(f) + + def _add_defaults_ext(self): + if self.distribution.has_ext_modules(): + build_ext = self.get_finalized_command('build_ext') + self.filelist.extend(build_ext.get_source_files()) + + def _add_defaults_c_libs(self): + if self.distribution.has_c_libraries(): + build_clib = self.get_finalized_command('build_clib') + self.filelist.extend(build_clib.get_source_files()) + + def _add_defaults_scripts(self): + if self.distribution.has_scripts(): + build_scripts = self.get_finalized_command('build_scripts') + self.filelist.extend(build_scripts.get_source_files()) + + +if hasattr(sdist.sdist, '_add_defaults_standards'): + # disable the functionality already available upstream + class sdist_add_defaults: + pass diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/register.py b/Shared/lib/python3.4/site-packages/setuptools/command/register.py index 8d6336a..98bc015 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/register.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/register.py @@ -1,3 +1,4 @@ +from distutils import log import distutils.command.register as orig @@ -5,6 +6,13 @@ class register(orig.register): __doc__ = orig.register.__doc__ def run(self): - # Make sure that we are using valid current name/version info - self.run_command('egg_info') - orig.register.run(self) + try: + # Make sure that we are using valid current name/version info + self.run_command('egg_info') + orig.register.run(self) + finally: + self.announce( + "WARNING: Registering is deprecated, use twine to " + "upload instead (https://pypi.org/p/twine/)", + log.WARN + ) diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/rotate.py b/Shared/lib/python3.4/site-packages/setuptools/command/rotate.py index 804f962..b89353f 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/rotate.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/rotate.py @@ -2,6 +2,7 @@ from distutils.util import convert_path from distutils import log from distutils.errors import DistutilsOptionError import os +import shutil from setuptools.extern import six @@ -59,4 +60,7 @@ class rotate(Command): for (t, f) in files: log.info("Deleting %s", f) if not self.dry_run: - os.unlink(f) + if os.path.isdir(f): + shutil.rmtree(f) + else: + os.unlink(f) diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/sdist.py b/Shared/lib/python3.4/site-packages/setuptools/command/sdist.py index 6640d4e..bcfae4d 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/sdist.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/sdist.py @@ -1,20 +1,19 @@ -from glob import glob from distutils import log import distutils.command.sdist as orig import os import sys import io +import contextlib from setuptools.extern import six -from setuptools.utils import cs_path_exists +from .py36compat import sdist_add_defaults import pkg_resources -READMES = 'README', 'README.rst', 'README.txt' - _default_revctrl = list + def walk_revctrl(dirname=''): """Find all files under revision control""" for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): @@ -22,7 +21,7 @@ def walk_revctrl(dirname=''): yield item -class sdist(orig.sdist): +class sdist(sdist_add_defaults, orig.sdist): """Smart sdist that finds anything supported by revision control""" user_options = [ @@ -38,6 +37,9 @@ class sdist(orig.sdist): negative_opt = {} + README_EXTENSIONS = ['', '.rst', '.txt', '.md'] + READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS) + def run(self): self.run_command('egg_info') ei_cmd = self.get_finalized_command('egg_info') @@ -49,13 +51,6 @@ class sdist(orig.sdist): for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) - # Call check_metadata only if no 'check' command - # (distutils <= 2.6) - import distutils.command - - if 'check' not in distutils.command.__all__: - self.check_metadata() - self.make_distribution() dist_files = getattr(self.distribution, 'dist_files', []) @@ -64,6 +59,45 @@ class sdist(orig.sdist): if data not in dist_files: dist_files.append(data) + def initialize_options(self): + orig.sdist.initialize_options(self) + + self._default_to_gztar() + + def _default_to_gztar(self): + # only needed on Python prior to 3.6. + if sys.version_info >= (3, 6, 0, 'beta', 1): + return + self.formats = ['gztar'] + + def make_distribution(self): + """ + Workaround for #516 + """ + with self._remove_os_link(): + orig.sdist.make_distribution(self) + + @staticmethod + @contextlib.contextmanager + def _remove_os_link(): + """ + In a context, remove and restore os.link if it exists + """ + + class NoValue: + pass + + orig_val = getattr(os, 'link', NoValue) + try: + del os.link + except Exception: + pass + try: + yield + finally: + if orig_val is not NoValue: + setattr(os, 'link', orig_val) + def __read_template_hack(self): # This grody hack closes the template file (MANIFEST.in) if an # exception occurs during read_template. @@ -71,7 +105,7 @@ class sdist(orig.sdist): # file. try: orig.sdist.read_template(self) - except: + except Exception: _, _, tb = sys.exc_info() tb.tb_next.tb_frame.f_locals['template'].close() raise @@ -87,35 +121,8 @@ class sdist(orig.sdist): if has_leaky_handle: read_template = __read_template_hack - def add_defaults(self): - standards = [READMES, - self.distribution.script_name] - for fn in standards: - if isinstance(fn, tuple): - alts = fn - got_it = 0 - for fn in alts: - if cs_path_exists(fn): - got_it = 1 - self.filelist.append(fn) - break - - if not got_it: - self.warn("standard file not found: should have one of " + - ', '.join(alts)) - else: - if cs_path_exists(fn): - self.filelist.append(fn) - else: - self.warn("standard file '%s' not found" % fn) - - optional = ['test/test*.py', 'setup.cfg'] - for pattern in optional: - files = list(filter(cs_path_exists, glob(pattern))) - if files: - self.filelist.extend(files) - - # getting python files + def _add_defaults_python(self): + """getting python files""" if self.distribution.has_pure_modules(): build_py = self.get_finalized_command('build_py') self.filelist.extend(build_py.get_source_files()) @@ -128,26 +135,23 @@ class sdist(orig.sdist): self.filelist.extend([os.path.join(src_dir, filename) for filename in filenames]) - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - self.filelist.extend(build_ext.get_source_files()) - - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.filelist.extend(build_clib.get_source_files()) - - if self.distribution.has_scripts(): - build_scripts = self.get_finalized_command('build_scripts') - self.filelist.extend(build_scripts.get_source_files()) + def _add_defaults_data_files(self): + try: + if six.PY2: + sdist_add_defaults._add_defaults_data_files(self) + else: + super()._add_defaults_data_files() + except TypeError: + log.warn("data_files contains unexpected objects") def check_readme(self): - for f in READMES: + for f in self.READMES: if os.path.exists(f): return else: self.warn( "standard file not found: should have one of " + - ', '.join(READMES) + ', '.join(self.READMES) ) def make_release_tree(self, base_dir, files): @@ -179,7 +183,7 @@ class sdist(orig.sdist): distribution. """ log.info("reading manifest file '%s'", self.manifest) - manifest = open(self.manifest, 'rbU') + manifest = open(self.manifest, 'rb') for line in manifest: # The manifest must contain UTF-8. See #303. if six.PY3: diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/setopt.py b/Shared/lib/python3.4/site-packages/setuptools/command/setopt.py index 7f332be..7e57cc0 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/setopt.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/setopt.py @@ -8,7 +8,6 @@ from setuptools.extern.six.moves import configparser from setuptools import Command - __all__ = ['config_file', 'edit_config', 'option_base', 'setopt'] diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/test.py b/Shared/lib/python3.4/site-packages/setuptools/command/test.py index 371e913..dde0118 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/test.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/test.py @@ -1,18 +1,30 @@ -from distutils.errors import DistutilsOptionError -from unittest import TestLoader +import os +import operator import sys +import contextlib +import itertools +import unittest +from distutils.errors import DistutilsError, DistutilsOptionError +from distutils import log +from unittest import TestLoader from setuptools.extern import six -from setuptools.extern.six.moves import map +from setuptools.extern.six.moves import map, filter from pkg_resources import (resource_listdir, resource_exists, normalize_path, - working_set, _namespace_packages, + working_set, _namespace_packages, evaluate_marker, add_activation_listener, require, EntryPoint) from setuptools import Command -from setuptools.py31compat import unittest_main + +__metaclass__ = type class ScanningLoader(TestLoader): + + def __init__(self): + TestLoader.__init__(self) + self._visited = set() + def loadTestsFromModule(self, module, pattern=None): """Return a suite of all tests cases contained in the given module @@ -20,6 +32,10 @@ class ScanningLoader(TestLoader): If the module has an ``additional_tests`` function, call it and add the return value to the tests. """ + if module in self._visited: + return None + self._visited.add(module) + tests = [] tests.append(TestLoader.loadTestsFromModule(self, module)) @@ -44,7 +60,7 @@ class ScanningLoader(TestLoader): # adapted from jaraco.classes.properties:NonDataProperty -class NonDataProperty(object): +class NonDataProperty: def __init__(self, fget): self.fget = fget @@ -62,7 +78,7 @@ class test(Command): user_options = [ ('test-module=', 'm', "Run 'test_suite' in specified module"), ('test-suite=', 's', - "Test suite to run (e.g. 'some_module.test_suite')"), + "Run single test, case or suite (e.g. 'module.test_suite')"), ('test-runner=', 'r', "Test runner to use"), ] @@ -96,12 +112,22 @@ class test(Command): return list(self._test_args()) def _test_args(self): + if not self.test_suite and sys.version_info >= (2, 7): + yield 'discover' if self.verbose: yield '--verbose' if self.test_suite: yield self.test_suite def with_project_on_sys_path(self, func): + """ + Backward compatibility for project_on_sys_path context. + """ + with self.project_on_sys_path(): + func() + + @contextlib.contextmanager + def project_on_sys_path(self, include_dists=[]): with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False) if with_2to3: @@ -133,30 +159,73 @@ class test(Command): old_modules = sys.modules.copy() try: - sys.path.insert(0, normalize_path(ei_cmd.egg_base)) + project_path = normalize_path(ei_cmd.egg_base) + sys.path.insert(0, project_path) working_set.__init__() add_activation_listener(lambda dist: dist.activate()) require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version)) - func() + with self.paths_on_pythonpath([project_path]): + yield finally: sys.path[:] = old_path sys.modules.clear() sys.modules.update(old_modules) working_set.__init__() + @staticmethod + @contextlib.contextmanager + def paths_on_pythonpath(paths): + """ + Add the indicated paths to the head of the PYTHONPATH environment + variable so that subprocesses will also see the packages at + these paths. + + Do this in a context that restores the value on exit. + """ + nothing = object() + orig_pythonpath = os.environ.get('PYTHONPATH', nothing) + current_pythonpath = os.environ.get('PYTHONPATH', '') + try: + prefix = os.pathsep.join(paths) + to_join = filter(None, [prefix, current_pythonpath]) + new_path = os.pathsep.join(to_join) + if new_path: + os.environ['PYTHONPATH'] = new_path + yield + finally: + if orig_pythonpath is nothing: + os.environ.pop('PYTHONPATH', None) + else: + os.environ['PYTHONPATH'] = orig_pythonpath + + @staticmethod + def install_dists(dist): + """ + Install the requirements indicated by self.distribution and + return an iterable of the dists that were built. + """ + ir_d = dist.fetch_build_eggs(dist.install_requires) + tr_d = dist.fetch_build_eggs(dist.tests_require or []) + er_d = dist.fetch_build_eggs( + v for k, v in dist.extras_require.items() + if k.startswith(':') and evaluate_marker(k[1:]) + ) + return itertools.chain(ir_d, tr_d, er_d) + def run(self): - if self.distribution.install_requires: - self.distribution.fetch_build_eggs( - self.distribution.install_requires) - if self.distribution.tests_require: - self.distribution.fetch_build_eggs(self.distribution.tests_require) + installed_dists = self.install_dists(self.distribution) cmd = ' '.join(self._argv) if self.dry_run: self.announce('skipping "%s" (dry run)' % cmd) - else: - self.announce('running "%s"' % cmd) - self.with_project_on_sys_path(self.run_tests) + return + + self.announce('running "%s"' % cmd) + + paths = map(operator.attrgetter('location'), installed_dists) + with self.paths_on_pythonpath(paths): + with self.project_on_sys_path(): + self.run_tests() def run_tests(self): # Purge modules under test from sys.modules. The test loader will @@ -174,11 +243,16 @@ class test(Command): del_modules.append(name) list(map(sys.modules.__delitem__, del_modules)) - unittest_main( + test = unittest.main( None, None, self._argv, testLoader=self._resolve_as_ep(self.test_loader), testRunner=self._resolve_as_ep(self.test_runner), + exit=False, ) + if not test.result.wasSuccessful(): + msg = 'Test failed: %s' % test.result + self.announce(msg, log.ERROR) + raise DistutilsError(msg) @property def _argv(self): diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/upload.py b/Shared/lib/python3.4/site-packages/setuptools/command/upload.py index 08c20ba..72f24d8 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/upload.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/upload.py @@ -1,15 +1,37 @@ +import getpass +from distutils import log from distutils.command import upload as orig class upload(orig.upload): """ - Override default upload behavior to look up password - in the keyring if available. + Override default upload behavior to obtain password + in a variety of different ways. """ + def run(self): + try: + orig.upload.run(self) + finally: + self.announce( + "WARNING: Uploading via this command is deprecated, use twine " + "to upload instead (https://pypi.org/p/twine/)", + log.WARN + ) + def finalize_options(self): orig.upload.finalize_options(self) - self.password or self._load_password_from_keyring() + self.username = ( + self.username or + getpass.getuser() + ) + # Attempt to obtain password. Short circuit evaluation at the first + # sign of success. + self.password = ( + self.password or + self._load_password_from_keyring() or + self._prompt_for_password() + ) def _load_password_from_keyring(self): """ @@ -17,7 +39,15 @@ class upload(orig.upload): """ try: keyring = __import__('keyring') - self.password = keyring.get_password(self.repository, - self.username) + return keyring.get_password(self.repository, self.username) except Exception: pass + + def _prompt_for_password(self): + """ + Prompt for a password on the tty. Suppress Exceptions. + """ + try: + return getpass.getpass() + except (Exception, KeyboardInterrupt): + pass diff --git a/Shared/lib/python3.4/site-packages/setuptools/command/upload_docs.py b/Shared/lib/python3.4/site-packages/setuptools/command/upload_docs.py index f887b47..07aa564 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/command/upload_docs.py +++ b/Shared/lib/python3.4/site-packages/setuptools/command/upload_docs.py @@ -13,6 +13,8 @@ import socket import zipfile import tempfile import shutil +import itertools +import functools from setuptools.extern import six from setuptools.extern.six.moves import http_client, urllib @@ -21,18 +23,16 @@ from pkg_resources import iter_entry_points from .upload import upload -errors = 'surrogateescape' if six.PY3 else 'strict' - - -# This is not just a replacement for byte literals -# but works as a general purpose encoder -def b(s, encoding='utf-8'): - if isinstance(s, six.text_type): - return s.encode(encoding, errors) - return s +def _encode(s): + errors = 'surrogateescape' if six.PY3 else 'strict' + return s.encode('utf-8', errors) class upload_docs(upload): + # override the default repository as upload_docs isn't + # supported by Warehouse (and won't be). + DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/' + description = 'Upload documentation to PyPI' user_options = [ @@ -68,6 +68,8 @@ class upload_docs(upload): else: self.ensure_dirname('upload_dir') self.target_dir = self.upload_dir + if 'pypi.python.org' in self.repository: + log.warn("Upload_docs command is deprecated. Use RTD instead.") self.announce('Using upload directory %s' % self.target_dir) def create_zipfile(self, filename): @@ -76,9 +78,8 @@ class upload_docs(upload): self.mkpath(self.target_dir) # just in case for root, dirs, files in os.walk(self.target_dir): if root == self.target_dir and not files: - raise DistutilsOptionError( - "no files found in upload directory '%s'" - % self.target_dir) + tmpl = "no files found in upload directory '%s'" + raise DistutilsOptionError(tmpl % self.target_dir) for name in files: full = os.path.join(root, name) relative = root[len(self.target_dir):].lstrip(os.path.sep) @@ -101,10 +102,48 @@ class upload_docs(upload): finally: shutil.rmtree(tmp_dir) + @staticmethod + def _build_part(item, sep_boundary): + key, values = item + title = '\nContent-Disposition: form-data; name="%s"' % key + # handle multiple entries for the same name + if not isinstance(values, list): + values = [values] + for value in values: + if isinstance(value, tuple): + title += '; filename="%s"' % value[0] + value = value[1] + else: + value = _encode(value) + yield sep_boundary + yield _encode(title) + yield b"\n\n" + yield value + if value and value[-1:] == b'\r': + yield b'\n' # write an extra newline (lurve Macs) + + @classmethod + def _build_multipart(cls, data): + """ + Build up the MIME payload for the POST data + """ + boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' + sep_boundary = b'\n--' + boundary + end_boundary = sep_boundary + b'--' + end_items = end_boundary, b"\n", + builder = functools.partial( + cls._build_part, + sep_boundary=sep_boundary, + ) + part_groups = map(builder, data.items()) + parts = itertools.chain.from_iterable(part_groups) + body_items = itertools.chain(parts, end_items) + content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii') + return b''.join(body_items), content_type + def upload_file(self, filename): - f = open(filename, 'rb') - content = f.read() - f.close() + with open(filename, 'rb') as f: + content = f.read() meta = self.distribution.metadata data = { ':action': 'doc_upload', @@ -112,40 +151,16 @@ class upload_docs(upload): 'content': (os.path.basename(filename), content), } # set up the authentication - credentials = b(self.username + ':' + self.password) + credentials = _encode(self.username + ':' + self.password) credentials = standard_b64encode(credentials) if six.PY3: credentials = credentials.decode('ascii') auth = "Basic " + credentials - # Build up the MIME payload for the POST data - boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = b('\n--') + b(boundary) - end_boundary = sep_boundary + b('--') - body = [] - for key, values in six.iteritems(data): - title = '\nContent-Disposition: form-data; name="%s"' % key - # handle multiple entries for the same name - if not isinstance(values, list): - values = [values] - for value in values: - if type(value) is tuple: - title += '; filename="%s"' % value[0] - value = value[1] - else: - value = b(value) - body.append(sep_boundary) - body.append(b(title)) - body.append(b("\n\n")) - body.append(value) - if value and value[-1:] == b('\r'): - body.append(b('\n')) # write an extra newline (lurve Macs) - body.append(end_boundary) - body.append(b("\n")) - body = b('').join(body) + body, ct = self._build_multipart(data) - self.announce("Submitting documentation to %s" % (self.repository), - log.INFO) + msg = "Submitting documentation to %s" % (self.repository) + self.announce(msg, log.INFO) # build the Request # We can't use urllib2 since we need to send the Basic @@ -164,7 +179,7 @@ class upload_docs(upload): try: conn.connect() conn.putrequest("POST", url) - content_type = 'multipart/form-data; boundary=%s' % boundary + content_type = ct conn.putheader('Content-type', content_type) conn.putheader('Content-length', str(len(body))) conn.putheader('Authorization', auth) @@ -176,16 +191,16 @@ class upload_docs(upload): r = conn.getresponse() if r.status == 200: - self.announce('Server response (%s): %s' % (r.status, r.reason), - log.INFO) + msg = 'Server response (%s): %s' % (r.status, r.reason) + self.announce(msg, log.INFO) elif r.status == 301: location = r.getheader('Location') if location is None: location = 'https://pythonhosted.org/%s/' % meta.get_name() - self.announce('Upload successful. Visit %s' % location, - log.INFO) + msg = 'Upload successful. Visit %s' % location + self.announce(msg, log.INFO) else: - self.announce('Upload failed (%s): %s' % (r.status, r.reason), - log.ERROR) + msg = 'Upload failed (%s): %s' % (r.status, r.reason) + self.announce(msg, log.ERROR) if self.show_response: print('-' * 75, r.read(), '-' * 75) diff --git a/Shared/lib/python3.4/site-packages/setuptools/config.py b/Shared/lib/python3.4/site-packages/setuptools/config.py new file mode 100644 index 0000000..73a3bf7 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/config.py @@ -0,0 +1,608 @@ +from __future__ import absolute_import, unicode_literals +import io +import os +import sys +from collections import defaultdict +from functools import partial +from importlib import import_module + +from distutils.errors import DistutilsOptionError, DistutilsFileError +from setuptools.extern.packaging.version import LegacyVersion, parse +from setuptools.extern.six import string_types, PY3 + + +__metaclass__ = type + + +def read_configuration( + filepath, find_others=False, ignore_option_errors=False): + """Read given configuration file and returns options from it as a dict. + + :param str|unicode filepath: Path to configuration file + to get options from. + + :param bool find_others: Whether to search for other configuration files + which could be on in various places. + + :param bool ignore_option_errors: Whether to silently ignore + options, values of which could not be resolved (e.g. due to exceptions + in directives such as file:, attr:, etc.). + If False exceptions are propagated as expected. + + :rtype: dict + """ + from setuptools.dist import Distribution, _Distribution + + filepath = os.path.abspath(filepath) + + if not os.path.isfile(filepath): + raise DistutilsFileError( + 'Configuration file %s does not exist.' % filepath) + + current_directory = os.getcwd() + os.chdir(os.path.dirname(filepath)) + + try: + dist = Distribution() + + filenames = dist.find_config_files() if find_others else [] + if filepath not in filenames: + filenames.append(filepath) + + _Distribution.parse_config_files(dist, filenames=filenames) + + handlers = parse_configuration( + dist, dist.command_options, + ignore_option_errors=ignore_option_errors) + + finally: + os.chdir(current_directory) + + return configuration_to_dict(handlers) + + +def configuration_to_dict(handlers): + """Returns configuration data gathered by given handlers as a dict. + + :param list[ConfigHandler] handlers: Handlers list, + usually from parse_configuration() + + :rtype: dict + """ + config_dict = defaultdict(dict) + + for handler in handlers: + + obj_alias = handler.section_prefix + target_obj = handler.target_obj + + for option in handler.set_options: + getter = getattr(target_obj, 'get_%s' % option, None) + + if getter is None: + value = getattr(target_obj, option) + + else: + value = getter() + + config_dict[obj_alias][option] = value + + return config_dict + + +def parse_configuration( + distribution, command_options, ignore_option_errors=False): + """Performs additional parsing of configuration options + for a distribution. + + Returns a list of used option handlers. + + :param Distribution distribution: + :param dict command_options: + :param bool ignore_option_errors: Whether to silently ignore + options, values of which could not be resolved (e.g. due to exceptions + in directives such as file:, attr:, etc.). + If False exceptions are propagated as expected. + :rtype: list + """ + options = ConfigOptionsHandler( + distribution, command_options, ignore_option_errors) + options.parse() + + meta = ConfigMetadataHandler( + distribution.metadata, command_options, ignore_option_errors, distribution.package_dir) + meta.parse() + + return meta, options + + +class ConfigHandler: + """Handles metadata supplied in configuration files.""" + + section_prefix = None + """Prefix for config sections handled by this handler. + Must be provided by class heirs. + + """ + + aliases = {} + """Options aliases. + For compatibility with various packages. E.g.: d2to1 and pbr. + Note: `-` in keys is replaced with `_` by config parser. + + """ + + def __init__(self, target_obj, options, ignore_option_errors=False): + sections = {} + + section_prefix = self.section_prefix + for section_name, section_options in options.items(): + if not section_name.startswith(section_prefix): + continue + + section_name = section_name.replace(section_prefix, '').strip('.') + sections[section_name] = section_options + + self.ignore_option_errors = ignore_option_errors + self.target_obj = target_obj + self.sections = sections + self.set_options = [] + + @property + def parsers(self): + """Metadata item name to parser function mapping.""" + raise NotImplementedError( + '%s must provide .parsers property' % self.__class__.__name__) + + def __setitem__(self, option_name, value): + unknown = tuple() + target_obj = self.target_obj + + # Translate alias into real name. + option_name = self.aliases.get(option_name, option_name) + + current_value = getattr(target_obj, option_name, unknown) + + if current_value is unknown: + raise KeyError(option_name) + + if current_value: + # Already inhabited. Skipping. + return + + skip_option = False + parser = self.parsers.get(option_name) + if parser: + try: + value = parser(value) + + except Exception: + skip_option = True + if not self.ignore_option_errors: + raise + + if skip_option: + return + + setter = getattr(target_obj, 'set_%s' % option_name, None) + if setter is None: + setattr(target_obj, option_name, value) + else: + setter(value) + + self.set_options.append(option_name) + + @classmethod + def _parse_list(cls, value, separator=','): + """Represents value as a list. + + Value is split either by separator (defaults to comma) or by lines. + + :param value: + :param separator: List items separator character. + :rtype: list + """ + if isinstance(value, list): # _get_parser_compound case + return value + + if '\n' in value: + value = value.splitlines() + else: + value = value.split(separator) + + return [chunk.strip() for chunk in value if chunk.strip()] + + @classmethod + def _parse_dict(cls, value): + """Represents value as a dict. + + :param value: + :rtype: dict + """ + separator = '=' + result = {} + for line in cls._parse_list(value): + key, sep, val = line.partition(separator) + if sep != separator: + raise DistutilsOptionError( + 'Unable to parse option value to dict: %s' % value) + result[key.strip()] = val.strip() + + return result + + @classmethod + def _parse_bool(cls, value): + """Represents value as boolean. + + :param value: + :rtype: bool + """ + value = value.lower() + return value in ('1', 'true', 'yes') + + @classmethod + def _parse_file(cls, value): + """Represents value as a string, allowing including text + from nearest files using `file:` directive. + + Directive is sandboxed and won't reach anything outside + directory with setup.py. + + Examples: + file: LICENSE + file: README.rst, CHANGELOG.md, src/file.txt + + :param str value: + :rtype: str + """ + include_directive = 'file:' + + if not isinstance(value, string_types): + return value + + if not value.startswith(include_directive): + return value + + spec = value[len(include_directive):] + filepaths = (os.path.abspath(path.strip()) for path in spec.split(',')) + return '\n'.join( + cls._read_file(path) + for path in filepaths + if (cls._assert_local(path) or True) + and os.path.isfile(path) + ) + + @staticmethod + def _assert_local(filepath): + if not filepath.startswith(os.getcwd()): + raise DistutilsOptionError( + '`file:` directive can not access %s' % filepath) + + @staticmethod + def _read_file(filepath): + with io.open(filepath, encoding='utf-8') as f: + return f.read() + + @classmethod + def _parse_attr(cls, value, package_dir=None): + """Represents value as a module attribute. + + Examples: + attr: package.attr + attr: package.module.attr + + :param str value: + :rtype: str + """ + attr_directive = 'attr:' + if not value.startswith(attr_directive): + return value + + attrs_path = value.replace(attr_directive, '').strip().split('.') + attr_name = attrs_path.pop() + + module_name = '.'.join(attrs_path) + module_name = module_name or '__init__' + + parent_path = os.getcwd() + if package_dir: + if attrs_path[0] in package_dir: + # A custom path was specified for the module we want to import + custom_path = package_dir[attrs_path[0]] + parts = custom_path.rsplit('/', 1) + if len(parts) > 1: + parent_path = os.path.join(os.getcwd(), parts[0]) + module_name = parts[1] + else: + module_name = custom_path + elif '' in package_dir: + # A custom parent directory was specified for all root modules + parent_path = os.path.join(os.getcwd(), package_dir['']) + sys.path.insert(0, parent_path) + try: + module = import_module(module_name) + value = getattr(module, attr_name) + + finally: + sys.path = sys.path[1:] + + return value + + @classmethod + def _get_parser_compound(cls, *parse_methods): + """Returns parser function to represents value as a list. + + Parses a value applying given methods one after another. + + :param parse_methods: + :rtype: callable + """ + def parse(value): + parsed = value + + for method in parse_methods: + parsed = method(parsed) + + return parsed + + return parse + + @classmethod + def _parse_section_to_dict(cls, section_options, values_parser=None): + """Parses section options into a dictionary. + + Optionally applies a given parser to values. + + :param dict section_options: + :param callable values_parser: + :rtype: dict + """ + value = {} + values_parser = values_parser or (lambda val: val) + for key, (_, val) in section_options.items(): + value[key] = values_parser(val) + return value + + def parse_section(self, section_options): + """Parses configuration file section. + + :param dict section_options: + """ + for (name, (_, value)) in section_options.items(): + try: + self[name] = value + + except KeyError: + pass # Keep silent for a new option may appear anytime. + + def parse(self): + """Parses configuration file items from one + or more related sections. + + """ + for section_name, section_options in self.sections.items(): + + method_postfix = '' + if section_name: # [section.option] variant + method_postfix = '_%s' % section_name + + section_parser_method = getattr( + self, + # Dots in section names are tranlsated into dunderscores. + ('parse_section%s' % method_postfix).replace('.', '__'), + None) + + if section_parser_method is None: + raise DistutilsOptionError( + 'Unsupported distribution option section: [%s.%s]' % ( + self.section_prefix, section_name)) + + section_parser_method(section_options) + + +class ConfigMetadataHandler(ConfigHandler): + + section_prefix = 'metadata' + + aliases = { + 'home_page': 'url', + 'summary': 'description', + 'classifier': 'classifiers', + 'platform': 'platforms', + } + + strict_mode = False + """We need to keep it loose, to be partially compatible with + `pbr` and `d2to1` packages which also uses `metadata` section. + + """ + + def __init__(self, target_obj, options, ignore_option_errors=False, + package_dir=None): + super(ConfigMetadataHandler, self).__init__(target_obj, options, + ignore_option_errors) + self.package_dir = package_dir + + @property + def parsers(self): + """Metadata item name to parser function mapping.""" + parse_list = self._parse_list + parse_file = self._parse_file + parse_dict = self._parse_dict + + return { + 'platforms': parse_list, + 'keywords': parse_list, + 'provides': parse_list, + 'requires': parse_list, + 'obsoletes': parse_list, + 'classifiers': self._get_parser_compound(parse_file, parse_list), + 'license': parse_file, + 'description': parse_file, + 'long_description': parse_file, + 'version': self._parse_version, + 'project_urls': parse_dict, + } + + def _parse_version(self, value): + """Parses `version` option value. + + :param value: + :rtype: str + + """ + version = self._parse_file(value) + + if version != value: + version = version.strip() + # Be strict about versions loaded from file because it's easy to + # accidentally include newlines and other unintended content + if isinstance(parse(version), LegacyVersion): + raise DistutilsOptionError('Version loaded from %s does not comply with PEP 440: %s' % ( + value, version + )) + return version + + version = self._parse_attr(value, self.package_dir) + + if callable(version): + version = version() + + if not isinstance(version, string_types): + if hasattr(version, '__iter__'): + version = '.'.join(map(str, version)) + else: + version = '%s' % version + + return version + + +class ConfigOptionsHandler(ConfigHandler): + + section_prefix = 'options' + + @property + def parsers(self): + """Metadata item name to parser function mapping.""" + parse_list = self._parse_list + parse_list_semicolon = partial(self._parse_list, separator=';') + parse_bool = self._parse_bool + parse_dict = self._parse_dict + + return { + 'zip_safe': parse_bool, + 'use_2to3': parse_bool, + 'include_package_data': parse_bool, + 'package_dir': parse_dict, + 'use_2to3_fixers': parse_list, + 'use_2to3_exclude_fixers': parse_list, + 'convert_2to3_doctests': parse_list, + 'scripts': parse_list, + 'eager_resources': parse_list, + 'dependency_links': parse_list, + 'namespace_packages': parse_list, + 'install_requires': parse_list_semicolon, + 'setup_requires': parse_list_semicolon, + 'tests_require': parse_list_semicolon, + 'packages': self._parse_packages, + 'entry_points': self._parse_file, + 'py_modules': parse_list, + } + + def _parse_packages(self, value): + """Parses `packages` option value. + + :param value: + :rtype: list + """ + find_directives = ['find:', 'find_namespace:'] + trimmed_value = value.strip() + + if not trimmed_value in find_directives: + return self._parse_list(value) + + findns = trimmed_value == find_directives[1] + if findns and not PY3: + raise DistutilsOptionError('find_namespace: directive is unsupported on Python < 3.3') + + # Read function arguments from a dedicated section. + find_kwargs = self.parse_section_packages__find( + self.sections.get('packages.find', {})) + + if findns: + from setuptools import find_namespace_packages as find_packages + else: + from setuptools import find_packages + + return find_packages(**find_kwargs) + + def parse_section_packages__find(self, section_options): + """Parses `packages.find` configuration file section. + + To be used in conjunction with _parse_packages(). + + :param dict section_options: + """ + section_data = self._parse_section_to_dict( + section_options, self._parse_list) + + valid_keys = ['where', 'include', 'exclude'] + + find_kwargs = dict( + [(k, v) for k, v in section_data.items() if k in valid_keys and v]) + + where = find_kwargs.get('where') + if where is not None: + find_kwargs['where'] = where[0] # cast list to single val + + return find_kwargs + + def parse_section_entry_points(self, section_options): + """Parses `entry_points` configuration file section. + + :param dict section_options: + """ + parsed = self._parse_section_to_dict(section_options, self._parse_list) + self['entry_points'] = parsed + + def _parse_package_data(self, section_options): + parsed = self._parse_section_to_dict(section_options, self._parse_list) + + root = parsed.get('*') + if root: + parsed[''] = root + del parsed['*'] + + return parsed + + def parse_section_package_data(self, section_options): + """Parses `package_data` configuration file section. + + :param dict section_options: + """ + self['package_data'] = self._parse_package_data(section_options) + + def parse_section_exclude_package_data(self, section_options): + """Parses `exclude_package_data` configuration file section. + + :param dict section_options: + """ + self['exclude_package_data'] = self._parse_package_data( + section_options) + + def parse_section_extras_require(self, section_options): + """Parses `extras_require` configuration file section. + + :param dict section_options: + """ + parse_list = partial(self._parse_list, separator=';') + self['extras_require'] = self._parse_section_to_dict( + section_options, parse_list) + + def parse_section_data_files(self, section_options): + """Parses `data_files` configuration file section. + + :param dict section_options: + """ + parsed = self._parse_section_to_dict(section_options, self._parse_list) + self['data_files'] = [(k, v) for k, v in parsed.items()] diff --git a/Shared/lib/python3.4/site-packages/setuptools/dep_util.py b/Shared/lib/python3.4/site-packages/setuptools/dep_util.py new file mode 100644 index 0000000..2931c13 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/dep_util.py @@ -0,0 +1,23 @@ +from distutils.dep_util import newer_group + +# yes, this is was almost entirely copy-pasted from +# 'newer_pairwise()', this is just another convenience +# function. +def newer_pairwise_group(sources_groups, targets): + """Walk both arguments in parallel, testing if each source group is newer + than its corresponding target. Returns a pair of lists (sources_groups, + targets) where sources is newer than target, according to the semantics + of 'newer_group()'. + """ + if len(sources_groups) != len(targets): + raise ValueError("'sources_group' and 'targets' must be the same length") + + # build a pair of lists (sources_groups, targets) where source is newer + n_sources = [] + n_targets = [] + for i in range(len(sources_groups)): + if newer_group(sources_groups[i], targets[i]): + n_sources.append(sources_groups[i]) + n_targets.append(targets[i]) + + return n_sources, n_targets diff --git a/Shared/lib/python3.4/site-packages/setuptools/depends.py b/Shared/lib/python3.4/site-packages/setuptools/depends.py index 9f7c9a3..45e7052 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/depends.py +++ b/Shared/lib/python3.4/site-packages/setuptools/depends.py @@ -1,15 +1,17 @@ import sys import imp import marshal -from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN from distutils.version import StrictVersion +from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN + +from .py33compat import Bytecode -from setuptools.extern import six __all__ = [ 'Require', 'find_module', 'get_module_constant', 'extract_constant' ] + class Require: """A prerequisite to building or installing a distribution""" @@ -30,7 +32,7 @@ class Require: def full_name(self): """Return full package/distribution name, w/version""" if self.requested_version is not None: - return '%s-%s' % (self.name,self.requested_version) + return '%s-%s' % (self.name, self.requested_version) return self.name def version_ok(self, version): @@ -39,7 +41,6 @@ class Require: str(version) != "unknown" and version >= self.requested_version def get_version(self, paths=None, default="unknown"): - """Get version number of installed module, 'None', or 'default' Search 'paths' for module. If not found, return 'None'. If found, @@ -52,8 +53,9 @@ class Require: if self.attribute is None: try: - f,p,i = find_module(self.module,paths) - if f: f.close() + f, p, i = find_module(self.module, paths) + if f: + f.close() return default except ImportError: return None @@ -77,40 +79,6 @@ class Require: return self.version_ok(version) -def _iter_code(code): - - """Yield '(op,arg)' pair for each operation in code object 'code'""" - - from array import array - from dis import HAVE_ARGUMENT, EXTENDED_ARG - - bytes = array('b',code.co_code) - eof = len(code.co_code) - - ptr = 0 - extended_arg = 0 - - while ptr=HAVE_ARGUMENT: - - arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg - ptr += 3 - - if op==EXTENDED_ARG: - long_type = six.integer_types[-1] - extended_arg = arg * long_type(65536) - continue - - else: - arg = None - ptr += 1 - - yield op,arg - - def find_module(module, paths=None): """Just like 'imp.find_module()', but with package support""" @@ -118,20 +86,19 @@ def find_module(module, paths=None): while parts: part = parts.pop(0) - f, path, (suffix,mode,kind) = info = imp.find_module(part, paths) + f, path, (suffix, mode, kind) = info = imp.find_module(part, paths) - if kind==PKG_DIRECTORY: + if kind == PKG_DIRECTORY: parts = parts or ['__init__'] paths = [path] elif parts: - raise ImportError("Can't find %r in %s" % (parts,module)) + raise ImportError("Can't find %r in %s" % (parts, module)) return info def get_module_constant(module, symbol, default=-1, paths=None): - """Find 'module' by searching 'paths', and extract 'symbol' Return 'None' if 'module' does not exist on 'paths', or it does not define @@ -145,12 +112,12 @@ def get_module_constant(module, symbol, default=-1, paths=None): return None try: - if kind==PY_COMPILED: - f.read(8) # skip magic & date + if kind == PY_COMPILED: + f.read(8) # skip magic & date code = marshal.load(f) - elif kind==PY_FROZEN: + elif kind == PY_FROZEN: code = imp.get_frozen_object(module) - elif kind==PY_SOURCE: + elif kind == PY_SOURCE: code = compile(f.read(), path, 'exec') else: # Not something we can parse; we'll have to import it. :( @@ -177,9 +144,8 @@ def extract_constant(code, symbol, default=-1): only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' must be present in 'code.co_names'. """ - if symbol not in code.co_names: - # name's not there, can't possibly be an assigment + # name's not there, can't possibly be an assignment return None name_idx = list(code.co_names).index(symbol) @@ -190,11 +156,13 @@ def extract_constant(code, symbol, default=-1): const = default - for op, arg in _iter_code(code): + for byte_code in Bytecode(code): + op = byte_code.opcode + arg = byte_code.arg - if op==LOAD_CONST: + if op == LOAD_CONST: const = code.co_consts[arg] - elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL): + elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL): return const else: const = default @@ -214,4 +182,5 @@ def _update_globals(): del globals()[name] __all__.remove(name) + _update_globals() diff --git a/Shared/lib/python3.4/site-packages/setuptools/dist.py b/Shared/lib/python3.4/site-packages/setuptools/dist.py index 7785541..2360e20 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/dist.py +++ b/Shared/lib/python3.4/site-packages/setuptools/dist.py @@ -1,118 +1,189 @@ +# -*- coding: utf-8 -*- __all__ = ['Distribution'] import re import os -import sys import warnings import numbers import distutils.log import distutils.core import distutils.cmd import distutils.dist -from distutils.core import Distribution as _Distribution -from distutils.errors import (DistutilsOptionError, DistutilsPlatformError, - DistutilsSetupError) +import itertools +from collections import defaultdict +from distutils.errors import ( + DistutilsOptionError, DistutilsPlatformError, DistutilsSetupError, +) +from distutils.util import rfc822_escape +from distutils.version import StrictVersion from setuptools.extern import six -from setuptools.extern.six.moves import map -from pkg_resources.extern import packaging +from setuptools.extern import packaging +from setuptools.extern.six.moves import map, filter, filterfalse from setuptools.depends import Require from setuptools import windows_support +from setuptools.monkey import get_unpatched +from setuptools.config import parse_configuration import pkg_resources +from .py36compat import Distribution_parse_config_files + +__import__('setuptools.extern.packaging.specifiers') +__import__('setuptools.extern.packaging.version') def _get_unpatched(cls): - """Protect against re-patching the distutils if reloaded + warnings.warn("Do not call this function", DeprecationWarning) + return get_unpatched(cls) - Also ensures that no other distutils extension monkeypatched the distutils - first. + +def get_metadata_version(dist_md): + if dist_md.long_description_content_type or dist_md.provides_extras: + return StrictVersion('2.1') + elif (dist_md.maintainer is not None or + dist_md.maintainer_email is not None or + getattr(dist_md, 'python_requires', None) is not None): + return StrictVersion('1.2') + elif (dist_md.provides or dist_md.requires or dist_md.obsoletes or + dist_md.classifiers or dist_md.download_url): + return StrictVersion('1.1') + + return StrictVersion('1.0') + + +# Based on Python 3.5 version +def write_pkg_file(self, file): + """Write the PKG-INFO format data to a file object. """ - while cls.__module__.startswith('setuptools'): - cls, = cls.__bases__ - if not cls.__module__.startswith('distutils'): - raise AssertionError( - "distutils has already been patched by %r" % cls + version = get_metadata_version(self) + + file.write('Metadata-Version: %s\n' % version) + file.write('Name: %s\n' % self.get_name()) + file.write('Version: %s\n' % self.get_version()) + file.write('Summary: %s\n' % self.get_description()) + file.write('Home-page: %s\n' % self.get_url()) + + if version < StrictVersion('1.2'): + file.write('Author: %s\n' % self.get_contact()) + file.write('Author-email: %s\n' % self.get_contact_email()) + else: + optional_fields = ( + ('Author', 'author'), + ('Author-email', 'author_email'), + ('Maintainer', 'maintainer'), + ('Maintainer-email', 'maintainer_email'), ) - return cls -_Distribution = _get_unpatched(_Distribution) + for field, attr in optional_fields: + attr_val = getattr(self, attr) + if six.PY2: + attr_val = self._encode_field(attr_val) -def _patch_distribution_metadata_write_pkg_info(): - """ - Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local - encoding to save the pkg_info. Monkey-patch its write_pkg_info method to - correct this undesirable behavior. - """ - environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2) - if not environment_local: - return + if attr_val is not None: + file.write('%s: %s\n' % (field, attr_val)) - # from Python 3.4 - def write_pkg_info(self, base_dir): - """Write the PKG-INFO file into the release tree. - """ - with open(os.path.join(base_dir, 'PKG-INFO'), 'w', - encoding='UTF-8') as pkg_info: - self.write_pkg_file(pkg_info) + file.write('License: %s\n' % self.get_license()) + if self.download_url: + file.write('Download-URL: %s\n' % self.download_url) + for project_url in self.project_urls.items(): + file.write('Project-URL: %s, %s\n' % project_url) + + long_desc = rfc822_escape(self.get_long_description()) + file.write('Description: %s\n' % long_desc) + + keywords = ','.join(self.get_keywords()) + if keywords: + file.write('Keywords: %s\n' % keywords) + + if version >= StrictVersion('1.2'): + for platform in self.get_platforms(): + file.write('Platform: %s\n' % platform) + else: + self._write_list(file, 'Platform', self.get_platforms()) + + self._write_list(file, 'Classifier', self.get_classifiers()) + + # PEP 314 + self._write_list(file, 'Requires', self.get_requires()) + self._write_list(file, 'Provides', self.get_provides()) + self._write_list(file, 'Obsoletes', self.get_obsoletes()) + + # Setuptools specific for PEP 345 + if hasattr(self, 'python_requires'): + file.write('Requires-Python: %s\n' % self.python_requires) + + # PEP 566 + if self.long_description_content_type: + file.write( + 'Description-Content-Type: %s\n' % + self.long_description_content_type + ) + if self.provides_extras: + for extra in sorted(self.provides_extras): + file.write('Provides-Extra: %s\n' % extra) - distutils.dist.DistributionMetadata.write_pkg_info = write_pkg_info -_patch_distribution_metadata_write_pkg_info() sequence = tuple, list + def check_importable(dist, attr, value): try: - ep = pkg_resources.EntryPoint.parse('x='+value) + ep = pkg_resources.EntryPoint.parse('x=' + value) assert not ep.extras - except (TypeError,ValueError,AttributeError,AssertionError): + except (TypeError, ValueError, AttributeError, AssertionError): raise DistutilsSetupError( "%r must be importable 'module:attrs' string (got %r)" - % (attr,value) + % (attr, value) ) def assert_string_list(dist, attr, value): """Verify that value is a string list or None""" try: - assert ''.join(value)!=value - except (TypeError,ValueError,AttributeError,AssertionError): + assert ''.join(value) != value + except (TypeError, ValueError, AttributeError, AssertionError): raise DistutilsSetupError( - "%r must be a list of strings (got %r)" % (attr,value) + "%r must be a list of strings (got %r)" % (attr, value) ) + + def check_nsp(dist, attr, value): """Verify that namespace packages are valid""" - assert_string_list(dist,attr,value) - for nsp in value: + ns_packages = value + assert_string_list(dist, attr, ns_packages) + for nsp in ns_packages: if not dist.has_contents_for(nsp): raise DistutilsSetupError( "Distribution contains no modules or packages for " + "namespace package %r" % nsp ) - if '.' in nsp: - parent = '.'.join(nsp.split('.')[:-1]) - if parent not in value: - distutils.log.warn( - "WARNING: %r is declared as a package namespace, but %r" - " is not: please correct this in setup.py", nsp, parent - ) + parent, sep, child = nsp.rpartition('.') + if parent and parent not in ns_packages: + distutils.log.warn( + "WARNING: %r is declared as a package namespace, but %r" + " is not: please correct this in setup.py", nsp, parent + ) + def check_extras(dist, attr, value): """Verify that extras_require mapping is valid""" try: - for k,v in value.items(): - if ':' in k: - k,m = k.split(':',1) - if pkg_resources.invalid_marker(m): - raise DistutilsSetupError("Invalid environment marker: "+m) - list(pkg_resources.parse_requirements(v)) - except (TypeError,ValueError,AttributeError): + list(itertools.starmap(_check_extra, value.items())) + except (TypeError, ValueError, AttributeError): raise DistutilsSetupError( "'extras_require' must be a dictionary whose values are " "strings or lists of strings containing valid project/version " "requirement specifiers." ) + +def _check_extra(extra, reqs): + name, sep, marker = extra.partition(':') + if marker and pkg_resources.invalid_marker(marker): + raise DistutilsSetupError("Invalid environment marker: " + marker) + list(pkg_resources.parse_requirements(reqs)) + + def assert_bool(dist, attr, value): """Verify that value is True, False, 0, or 1""" if bool(value) != value: @@ -124,6 +195,8 @@ def check_requirements(dist, attr, value): """Verify that install_requires is a valid requirements list""" try: list(pkg_resources.parse_requirements(value)) + if isinstance(value, (dict, set)): + raise TypeError("Unordered types are not allowed") except (TypeError, ValueError) as error: tmpl = ( "{attr!r} must be a string or list of strings " @@ -131,6 +204,19 @@ def check_requirements(dist, attr, value): ) raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) + +def check_specifier(dist, attr, value): + """Verify that value is a valid version specifier""" + try: + packaging.specifiers.SpecifierSet(value) + except packaging.specifiers.InvalidSpecifier as error: + tmpl = ( + "{attr!r} must be a string " + "containing valid version specifiers; {error}" + ) + raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) + + def check_entry_points(dist, attr, value): """Verify that entry_points map is parseable""" try: @@ -138,25 +224,30 @@ def check_entry_points(dist, attr, value): except ValueError as e: raise DistutilsSetupError(e) + def check_test_suite(dist, attr, value): if not isinstance(value, six.string_types): raise DistutilsSetupError("test_suite must be a string") + def check_package_data(dist, attr, value): """Verify that value is a dictionary of package names to glob lists""" - if isinstance(value,dict): - for k,v in value.items(): - if not isinstance(k,str): break - try: iter(v) + if isinstance(value, dict): + for k, v in value.items(): + if not isinstance(k, str): + break + try: + iter(v) except TypeError: break else: return raise DistutilsSetupError( - attr+" must be a dictionary mapping package names to lists of " + attr + " must be a dictionary mapping package names to lists of " "wildcard patterns" ) + def check_packages(dist, attr, value): for pkgname in value: if not re.match(r'\w+(\.\w+)*', pkgname): @@ -166,7 +257,10 @@ def check_packages(dist, attr, value): ) -class Distribution(_Distribution): +_Distribution = get_unpatched(distutils.core.Distribution) + + +class Distribution(Distribution_parse_config_files, _Distribution): """Distribution with support for features, tests, and package data This is an enhanced version of 'distutils.dist.Distribution' that @@ -234,6 +328,12 @@ class Distribution(_Distribution): distribution for the included and excluded features. """ + _DISTUTILS_UNSUPPORTED_METADATA = { + 'long_description_content_type': None, + 'project_urls': dict, + 'provides_extras': set, + } + _patched_dist = None def patch_missing_pkg_info(self, attrs): @@ -253,23 +353,36 @@ class Distribution(_Distribution): have_package_data = hasattr(self, "package_data") if not have_package_data: self.package_data = {} - _attrs_dict = attrs or {} - if 'features' in _attrs_dict or 'require_features' in _attrs_dict: + attrs = attrs or {} + if 'features' in attrs or 'require_features' in attrs: Feature.warn_deprecated() self.require_features = [] self.features = {} self.dist_files = [] - self.src_root = attrs and attrs.pop("src_root", None) + # Filter-out setuptools' specific options. + self.src_root = attrs.pop("src_root", None) self.patch_missing_pkg_info(attrs) - # Make sure we have any eggs needed to interpret 'attrs' - if attrs is not None: - self.dependency_links = attrs.pop('dependency_links', []) - assert_string_list(self,'dependency_links',self.dependency_links) - if attrs and 'setup_requires' in attrs: - self.fetch_build_eggs(attrs['setup_requires']) + self.dependency_links = attrs.pop('dependency_links', []) + self.setup_requires = attrs.pop('setup_requires', []) for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): vars(self).setdefault(ep.name, None) - _Distribution.__init__(self,attrs) + _Distribution.__init__(self, { + k: v for k, v in attrs.items() + if k not in self._DISTUTILS_UNSUPPORTED_METADATA + }) + + # Fill-in missing metadata fields not supported by distutils. + # Note some fields may have been set by other tools (e.g. pbr) + # above; they are taken preferrentially to setup() arguments + for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items(): + for source in self.metadata.__dict__, attrs: + if option in source: + value = source[option] + break + else: + value = default() if default else None + setattr(self.metadata, option, value) + if isinstance(self.metadata.version, numbers.Number): # Some people apparently take "version number" too literally :) self.metadata.version = str(self.metadata.version) @@ -293,6 +406,94 @@ class Distribution(_Distribution): "setuptools, pip, and PyPI. Please see PEP 440 for more " "details." % self.metadata.version ) + self._finalize_requires() + + def _finalize_requires(self): + """ + Set `metadata.python_requires` and fix environment markers + in `install_requires` and `extras_require`. + """ + if getattr(self, 'python_requires', None): + self.metadata.python_requires = self.python_requires + + if getattr(self, 'extras_require', None): + for extra in self.extras_require.keys(): + # Since this gets called multiple times at points where the + # keys have become 'converted' extras, ensure that we are only + # truly adding extras we haven't seen before here. + extra = extra.split(':')[0] + if extra: + self.metadata.provides_extras.add(extra) + + self._convert_extras_requirements() + self._move_install_requirements_markers() + + def _convert_extras_requirements(self): + """ + Convert requirements in `extras_require` of the form + `"extra": ["barbazquux; {marker}"]` to + `"extra:{marker}": ["barbazquux"]`. + """ + spec_ext_reqs = getattr(self, 'extras_require', None) or {} + self._tmp_extras_require = defaultdict(list) + for section, v in spec_ext_reqs.items(): + # Do not strip empty sections. + self._tmp_extras_require[section] + for r in pkg_resources.parse_requirements(v): + suffix = self._suffix_for(r) + self._tmp_extras_require[section + suffix].append(r) + + @staticmethod + def _suffix_for(req): + """ + For a requirement, return the 'extras_require' suffix for + that requirement. + """ + return ':' + str(req.marker) if req.marker else '' + + def _move_install_requirements_markers(self): + """ + Move requirements in `install_requires` that are using environment + markers `extras_require`. + """ + + # divide the install_requires into two sets, simple ones still + # handled by install_requires and more complex ones handled + # by extras_require. + + def is_simple_req(req): + return not req.marker + + spec_inst_reqs = getattr(self, 'install_requires', None) or () + inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs)) + simple_reqs = filter(is_simple_req, inst_reqs) + complex_reqs = filterfalse(is_simple_req, inst_reqs) + self.install_requires = list(map(str, simple_reqs)) + + for r in complex_reqs: + self._tmp_extras_require[':' + str(r.marker)].append(r) + self.extras_require = dict( + (k, [str(r) for r in map(self._clean_req, v)]) + for k, v in self._tmp_extras_require.items() + ) + + def _clean_req(self, req): + """ + Given a Requirement, remove environment markers and return it. + """ + req.marker = None + return req + + def parse_config_files(self, filenames=None, ignore_option_errors=False): + """Parses configuration files from various levels + and loads configuration. + + """ + _Distribution.parse_config_files(self, filenames=filenames) + + parse_configuration(self, self.command_options, + ignore_option_errors=ignore_option_errors) + self._finalize_requires() def parse_command_line(self): """Process features after parsing command line options""" @@ -301,9 +502,9 @@ class Distribution(_Distribution): self._finalize_features() return result - def _feature_attrname(self,name): + def _feature_attrname(self, name): """Convert feature name to corresponding option attribute name""" - return 'with_'+name.replace('-','_') + return 'with_' + name.replace('-', '_') def fetch_build_eggs(self, requires): """Resolve pre-setup requirements""" @@ -314,6 +515,7 @@ class Distribution(_Distribution): ) for dist in resolved_dists: pkg_resources.working_set.add(dist, replace=True) + return resolved_dists def finalize_options(self): _Distribution.finalize_options(self) @@ -321,13 +523,16 @@ class Distribution(_Distribution): self._set_global_opts_from_features() for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): - value = getattr(self,ep.name,None) + value = getattr(self, ep.name, None) if value is not None: ep.require(installer=self.fetch_build_egg) ep.load()(self, ep.name, value) if getattr(self, 'convert_2to3_doctests', None): # XXX may convert to set here when we can rely on set being builtin - self.convert_2to3_doctests = [os.path.abspath(p) for p in self.convert_2to3_doctests] + self.convert_2to3_doctests = [ + os.path.abspath(p) + for p in self.convert_2to3_doctests + ] else: self.convert_2to3_doctests = [] @@ -348,35 +553,31 @@ class Distribution(_Distribution): def fetch_build_egg(self, req): """Fetch an egg needed for building""" - - try: - cmd = self._egg_fetcher - cmd.package_index.to_scan = [] - except AttributeError: - from setuptools.command.easy_install import easy_install - dist = self.__class__({'script_args':['easy_install']}) - dist.parse_config_files() - opts = dist.get_option_dict('easy_install') - keep = ( - 'find_links', 'site_dirs', 'index_url', 'optimize', - 'site_dirs', 'allow_hosts' - ) - for key in list(opts): - if key not in keep: - del opts[key] # don't use any other settings - if self.dependency_links: - links = self.dependency_links[:] - if 'find_links' in opts: - links = opts['find_links'][1].split() + links - opts['find_links'] = ('setup', links) - install_dir = self.get_egg_cache_dir() - cmd = easy_install( - dist, args=["x"], install_dir=install_dir, exclude_scripts=True, - always_copy=False, build_directory=None, editable=False, - upgrade=False, multi_version=True, no_report=True, user=False - ) - cmd.ensure_finalized() - self._egg_fetcher = cmd + from setuptools.command.easy_install import easy_install + dist = self.__class__({'script_args': ['easy_install']}) + opts = dist.get_option_dict('easy_install') + opts.clear() + opts.update( + (k, v) + for k, v in self.get_option_dict('easy_install').items() + if k in ( + # don't use any other settings + 'find_links', 'site_dirs', 'index_url', + 'optimize', 'site_dirs', 'allow_hosts', + )) + if self.dependency_links: + links = self.dependency_links[:] + if 'find_links' in opts: + links = opts['find_links'][1] + links + opts['find_links'] = ('setup', links) + install_dir = self.get_egg_cache_dir() + cmd = easy_install( + dist, args=["x"], install_dir=install_dir, + exclude_scripts=True, + always_copy=False, build_directory=None, editable=False, + upgrade=False, multi_version=True, no_report=True, user=False + ) + cmd.ensure_finalized() return cmd.easy_install(req) def _set_global_opts_from_features(self): @@ -385,20 +586,23 @@ class Distribution(_Distribution): go = [] no = self.negative_opt.copy() - for name,feature in self.features.items(): - self._set_feature(name,None) + for name, feature in self.features.items(): + self._set_feature(name, None) feature.validate(self) if feature.optional: descr = feature.description incdef = ' (default)' - excdef='' + excdef = '' if not feature.include_by_default(): excdef, incdef = incdef, excdef - go.append(('with-'+name, None, 'include '+descr+incdef)) - go.append(('without-'+name, None, 'exclude '+descr+excdef)) - no['without-'+name] = 'with-'+name + new = ( + ('with-' + name, None, 'include ' + descr + incdef), + ('without-' + name, None, 'exclude ' + descr + excdef), + ) + go.extend(new) + no['without-' + name] = 'with-' + name self.global_options = self.feature_options = go + self.global_options self.negative_opt = self.feature_negopt = no @@ -407,25 +611,26 @@ class Distribution(_Distribution): """Add/remove features and resolve dependencies between them""" # First, flag all the enabled items (and thus their dependencies) - for name,feature in self.features.items(): + for name, feature in self.features.items(): enabled = self.feature_is_included(name) if enabled or (enabled is None and feature.include_by_default()): feature.include_in(self) - self._set_feature(name,1) + self._set_feature(name, 1) # Then disable the rest, so that off-by-default features don't # get flagged as errors when they're required by an enabled feature - for name,feature in self.features.items(): + for name, feature in self.features.items(): if not self.feature_is_included(name): feature.exclude_from(self) - self._set_feature(name,0) + self._set_feature(name, 0) def get_command_class(self, command): """Pluggable version of get_command_class()""" if command in self.cmdclass: return self.cmdclass[command] - for ep in pkg_resources.iter_entry_points('distutils.commands',command): + eps = pkg_resources.iter_entry_points('distutils.commands', command) + for ep in eps: ep.require(installer=self.fetch_build_egg) self.cmdclass[command] = cmdclass = ep.load() return cmdclass @@ -448,26 +653,26 @@ class Distribution(_Distribution): self.cmdclass[ep.name] = cmdclass return _Distribution.get_command_list(self) - def _set_feature(self,name,status): + def _set_feature(self, name, status): """Set feature's inclusion status""" - setattr(self,self._feature_attrname(name),status) + setattr(self, self._feature_attrname(name), status) - def feature_is_included(self,name): + def feature_is_included(self, name): """Return 1 if feature is included, 0 if excluded, 'None' if unknown""" - return getattr(self,self._feature_attrname(name)) + return getattr(self, self._feature_attrname(name)) - def include_feature(self,name): + def include_feature(self, name): """Request inclusion of feature named 'name'""" - if self.feature_is_included(name)==0: + if self.feature_is_included(name) == 0: descr = self.features[name].description raise DistutilsOptionError( descr + " is required, but was excluded or is not available" ) self.features[name].include_in(self) - self._set_feature(name,1) + self._set_feature(name, 1) - def include(self,**attrs): + def include(self, **attrs): """Add items to distribution that are named in keyword arguments For example, 'dist.exclude(py_modules=["x"])' would add 'x' to @@ -482,86 +687,87 @@ class Distribution(_Distribution): will try to call 'dist._include_foo({"bar":"baz"})', which can then handle whatever special inclusion logic is needed. """ - for k,v in attrs.items(): - include = getattr(self, '_include_'+k, None) + for k, v in attrs.items(): + include = getattr(self, '_include_' + k, None) if include: include(v) else: - self._include_misc(k,v) + self._include_misc(k, v) - def exclude_package(self,package): + def exclude_package(self, package): """Remove packages, modules, and extensions in named package""" - pfx = package+'.' + pfx = package + '.' if self.packages: self.packages = [ p for p in self.packages - if p != package and not p.startswith(pfx) + if p != package and not p.startswith(pfx) ] if self.py_modules: self.py_modules = [ p for p in self.py_modules - if p != package and not p.startswith(pfx) + if p != package and not p.startswith(pfx) ] if self.ext_modules: self.ext_modules = [ p for p in self.ext_modules - if p.name != package and not p.name.startswith(pfx) + if p.name != package and not p.name.startswith(pfx) ] - def has_contents_for(self,package): + def has_contents_for(self, package): """Return true if 'exclude_package(package)' would do something""" - pfx = package+'.' + pfx = package + '.' for p in self.iter_distribution_names(): - if p==package or p.startswith(pfx): + if p == package or p.startswith(pfx): return True - def _exclude_misc(self,name,value): + def _exclude_misc(self, name, value): """Handle 'exclude()' for list/tuple attrs without a special handler""" - if not isinstance(value,sequence): + if not isinstance(value, sequence): raise DistutilsSetupError( "%s: setting must be a list or tuple (%r)" % (name, value) ) try: - old = getattr(self,name) + old = getattr(self, name) except AttributeError: raise DistutilsSetupError( "%s: No such distribution setting" % name ) - if old is not None and not isinstance(old,sequence): + if old is not None and not isinstance(old, sequence): raise DistutilsSetupError( - name+": this setting cannot be changed via include/exclude" + name + ": this setting cannot be changed via include/exclude" ) elif old: - setattr(self,name,[item for item in old if item not in value]) + setattr(self, name, [item for item in old if item not in value]) - def _include_misc(self,name,value): + def _include_misc(self, name, value): """Handle 'include()' for list/tuple attrs without a special handler""" - if not isinstance(value,sequence): + if not isinstance(value, sequence): raise DistutilsSetupError( "%s: setting must be a list (%r)" % (name, value) ) try: - old = getattr(self,name) + old = getattr(self, name) except AttributeError: raise DistutilsSetupError( "%s: No such distribution setting" % name ) if old is None: - setattr(self,name,value) - elif not isinstance(old,sequence): + setattr(self, name, value) + elif not isinstance(old, sequence): raise DistutilsSetupError( - name+": this setting cannot be changed via include/exclude" + name + ": this setting cannot be changed via include/exclude" ) else: - setattr(self,name,old+[item for item in value if item not in old]) + new = [item for item in value if item not in old] + setattr(self, name, old + new) - def exclude(self,**attrs): + def exclude(self, **attrs): """Remove items from distribution that are named in keyword arguments For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from @@ -577,15 +783,15 @@ class Distribution(_Distribution): will try to call 'dist._exclude_foo({"bar":"baz"})', which can then handle whatever special exclusion logic is needed. """ - for k,v in attrs.items(): - exclude = getattr(self, '_exclude_'+k, None) + for k, v in attrs.items(): + exclude = getattr(self, '_exclude_' + k, None) if exclude: exclude(v) else: - self._exclude_misc(k,v) + self._exclude_misc(k, v) - def _exclude_packages(self,packages): - if not isinstance(packages,sequence): + def _exclude_packages(self, packages): + if not isinstance(packages, sequence): raise DistutilsSetupError( "packages: setting must be a list or tuple (%r)" % (packages,) ) @@ -600,17 +806,17 @@ class Distribution(_Distribution): command = args[0] aliases = self.get_option_dict('aliases') while command in aliases: - src,alias = aliases[command] - del aliases[command] # ensure each alias can expand only once! + src, alias = aliases[command] + del aliases[command] # ensure each alias can expand only once! import shlex - args[:1] = shlex.split(alias,True) + args[:1] = shlex.split(alias, True) command = args[0] nargs = _Distribution._parse_command_opts(self, parser, args) # Handle commands that want to consume all remaining arguments cmd_class = self.get_command_class(command) - if getattr(cmd_class,'command_consumes_arguments',None): + if getattr(cmd_class, 'command_consumes_arguments', None): self.get_option_dict(command)['args'] = ("command line", nargs) if nargs is not None: return [] @@ -629,31 +835,31 @@ class Distribution(_Distribution): d = {} - for cmd,opts in self.command_options.items(): + for cmd, opts in self.command_options.items(): - for opt,(src,val) in opts.items(): + for opt, (src, val) in opts.items(): if src != "command line": continue - opt = opt.replace('_','-') + opt = opt.replace('_', '-') - if val==0: + if val == 0: cmdobj = self.get_command_obj(cmd) neg_opt = self.negative_opt.copy() - neg_opt.update(getattr(cmdobj,'negative_opt',{})) - for neg,pos in neg_opt.items(): - if pos==opt: - opt=neg - val=None + neg_opt.update(getattr(cmdobj, 'negative_opt', {})) + for neg, pos in neg_opt.items(): + if pos == opt: + opt = neg + val = None break else: raise AssertionError("Shouldn't be able to get here") - elif val==1: + elif val == 1: val = None - d.setdefault(cmd,{})[opt] = val + d.setdefault(cmd, {})[opt] = val return d @@ -667,7 +873,7 @@ class Distribution(_Distribution): yield module for ext in self.ext_modules or (): - if isinstance(ext,tuple): + if isinstance(ext, tuple): name, buildinfo = ext else: name = ext.name @@ -711,16 +917,11 @@ class Distribution(_Distribution): sys.stdout.detach(), encoding, errors, newline, line_buffering) -# Install it throughout the distutils -for module in distutils.dist, distutils.core, distutils.cmd: - module.Distribution = Distribution - - class Feature: """ **deprecated** -- The `Feature` facility was never completely implemented or supported, `has reported issues - `_ and will be removed in + `_ and will be removed in a future version. A subset of the distribution that can be excluded if unneeded/wanted @@ -775,14 +976,14 @@ class Feature: @staticmethod def warn_deprecated(): - warnings.warn( + msg = ( "Features are deprecated and will be removed in a future " - "version. See http://bitbucket.org/pypa/setuptools/65.", - DeprecationWarning, - stacklevel=3, + "version. See https://github.com/pypa/setuptools/issues/65." ) + warnings.warn(msg, DeprecationWarning, stacklevel=3) - def __init__(self, description, standard=False, available=True, + def __init__( + self, description, standard=False, available=True, optional=True, require_features=(), remove=(), **extras): self.warn_deprecated() @@ -790,32 +991,32 @@ class Feature: self.standard = standard self.available = available self.optional = optional - if isinstance(require_features,(str,Require)): + if isinstance(require_features, (str, Require)): require_features = require_features, self.require_features = [ - r for r in require_features if isinstance(r,str) + r for r in require_features if isinstance(r, str) ] - er = [r for r in require_features if not isinstance(r,str)] - if er: extras['require_features'] = er + er = [r for r in require_features if not isinstance(r, str)] + if er: + extras['require_features'] = er - if isinstance(remove,str): + if isinstance(remove, str): remove = remove, self.remove = remove self.extras = extras if not remove and not require_features and not extras: raise DistutilsSetupError( - "Feature %s: must define 'require_features', 'remove', or at least one" - " of 'packages', 'py_modules', etc." + "Feature %s: must define 'require_features', 'remove', or " + "at least one of 'packages', 'py_modules', etc." ) def include_by_default(self): """Should this feature be included by default?""" return self.available and self.standard - def include_in(self,dist): - + def include_in(self, dist): """Ensure feature and its requirements are included in distribution You may override this in a subclass to perform additional operations on @@ -826,7 +1027,7 @@ class Feature: if not self.available: raise DistutilsPlatformError( - self.description+" is required, " + self.description + " is required, " "but is not available on this platform" ) @@ -835,8 +1036,7 @@ class Feature: for f in self.require_features: dist.include_feature(f) - def exclude_from(self,dist): - + def exclude_from(self, dist): """Ensure feature is excluded from distribution You may override this in a subclass to perform additional operations on @@ -851,8 +1051,7 @@ class Feature: for item in self.remove: dist.exclude_package(item) - def validate(self,dist): - + def validate(self, dist): """Verify that feature makes sense in context of distribution This method is called by the distribution just before it parses its diff --git a/Shared/lib/python3.4/site-packages/setuptools/extension.py b/Shared/lib/python3.4/site-packages/setuptools/extension.py index d10609b..2946889 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/extension.py +++ b/Shared/lib/python3.4/site-packages/setuptools/extension.py @@ -1,4 +1,3 @@ -import sys import re import functools import distutils.core @@ -7,18 +6,14 @@ import distutils.extension from setuptools.extern.six.moves import map -from .dist import _get_unpatched -from . import msvc9_support +from .monkey import get_unpatched -_Extension = _get_unpatched(distutils.core.Extension) - -msvc9_support.patch_for_specialized_compiler() def _have_cython(): """ Return True if Cython can be imported. """ - cython_impl = 'Cython.Distutils.build_ext', + cython_impl = 'Cython.Distutils.build_ext' try: # from (cython_impl) import build_ext __import__(cython_impl, fromlist=['build_ext']).build_ext @@ -27,13 +22,22 @@ def _have_cython(): pass return False + # for compatibility have_pyrex = _have_cython +_Extension = get_unpatched(distutils.core.Extension) + class Extension(_Extension): """Extension that uses '.c' files in place of '.pyx' files""" + def __init__(self, name, sources, *args, **kw): + # The *args is needed for compatibility as calls may use positional + # arguments. py_limited_api may be set only via keyword. + self.py_limited_api = kw.pop("py_limited_api", False) + _Extension.__init__(self, name, sources, *args, **kw) + def _convert_pyx_sources_to_lang(self): """ Replace sources with .pyx extensions to sources with the target @@ -48,10 +52,6 @@ class Extension(_Extension): sub = functools.partial(re.sub, '.pyx$', target_ext) self.sources = list(map(sub, self.sources)) + class Library(Extension): """Just like a regular Extension, but built as a library instead""" - -distutils.core.Extension = Extension -distutils.extension.Extension = Extension -if 'distutils.command.build_ext' in sys.modules: - sys.modules['distutils.command.build_ext'].Extension = Extension diff --git a/Shared/lib/python3.4/site-packages/setuptools/extern/__init__.py b/Shared/lib/python3.4/site-packages/setuptools/extern/__init__.py index 6859aa5..cb2fa32 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/extern/__init__.py +++ b/Shared/lib/python3.4/site-packages/setuptools/extern/__init__.py @@ -1,5 +1,73 @@ -from pkg_resources.extern import VendorImporter +import sys -names = 'six', -VendorImporter(__name__, names, 'pkg_resources._vendor').install() +class VendorImporter: + """ + A PEP 302 meta path importer for finding optionally-vendored + or otherwise naturally-installed packages from root_name. + """ + + def __init__(self, root_name, vendored_names=(), vendor_pkg=None): + self.root_name = root_name + self.vendored_names = set(vendored_names) + self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') + + @property + def search_path(self): + """ + Search first the vendor package then as a natural package. + """ + yield self.vendor_pkg + '.' + yield '' + + def find_module(self, fullname, path=None): + """ + Return self when fullname starts with root_name and the + target module is one vendored through this importer. + """ + root, base, target = fullname.partition(self.root_name + '.') + if root: + return + if not any(map(target.startswith, self.vendored_names)): + return + return self + + def load_module(self, fullname): + """ + Iterate over the search path to locate and load fullname. + """ + root, base, target = fullname.partition(self.root_name + '.') + for prefix in self.search_path: + try: + extant = prefix + target + __import__(extant) + mod = sys.modules[extant] + sys.modules[fullname] = mod + # mysterious hack: + # Remove the reference to the extant package/module + # on later Python versions to cause relative imports + # in the vendor package to resolve the same modules + # as those going through this importer. + if sys.version_info >= (3, ): + del sys.modules[extant] + return mod + except ImportError: + pass + else: + raise ImportError( + "The '{target}' package is required; " + "normally this is bundled with this package so if you get " + "this warning, consult the packager of your " + "distribution.".format(**locals()) + ) + + def install(self): + """ + Install this importer into sys.meta_path if not already present. + """ + if self not in sys.meta_path: + sys.meta_path.append(self) + + +names = 'six', 'packaging', 'pyparsing', +VendorImporter(__name__, names, 'setuptools._vendor').install() diff --git a/Shared/lib/python3.4/site-packages/setuptools/glibc.py b/Shared/lib/python3.4/site-packages/setuptools/glibc.py new file mode 100644 index 0000000..a134591 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/glibc.py @@ -0,0 +1,86 @@ +# This file originally from pip: +# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/utils/glibc.py +from __future__ import absolute_import + +import ctypes +import re +import warnings + + +def glibc_version_string(): + "Returns glibc version string, or None if not using glibc." + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + process_namespace = ctypes.CDLL(None) + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +# Separated out from have_compatible_glibc for easier unit testing +def check_glibc_version(version_str, required_major, minimum_minor): + # Parse string and check against requested version. + # + # We use a regexp instead of str.split because we want to discard any + # random junk that might come after the minor version -- this might happen + # in patched/forked versions of glibc (e.g. Linaro's version of glibc + # uses version strings like "2.20-2014.11"). See gh-3588. + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn("Expected glibc version with 2 components major.minor," + " got: %s" % version_str, RuntimeWarning) + return False + return (int(m.group("major")) == required_major and + int(m.group("minor")) >= minimum_minor) + + +def have_compatible_glibc(required_major, minimum_minor): + version_str = glibc_version_string() + if version_str is None: + return False + return check_glibc_version(version_str, required_major, minimum_minor) + + +# platform.libc_ver regularly returns completely nonsensical glibc +# versions. E.g. on my computer, platform says: +# +# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.7') +# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' +# ('glibc', '2.9') +# +# But the truth is: +# +# ~$ ldd --version +# ldd (Debian GLIBC 2.22-11) 2.22 +# +# This is unfortunate, because it means that the linehaul data on libc +# versions that was generated by pip 8.1.2 and earlier is useless and +# misleading. Solution: instead of using platform, use our code that actually +# works. +def libc_ver(): + """Try to determine the glibc version + + Returns a tuple of strings (lib, version) which default to empty strings + in case the lookup fails. + """ + glibc_version = glibc_version_string() + if glibc_version is None: + return ("", "") + else: + return ("glibc", glibc_version) diff --git a/Shared/lib/python3.4/site-packages/setuptools/glob.py b/Shared/lib/python3.4/site-packages/setuptools/glob.py new file mode 100644 index 0000000..9d7cbc5 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/glob.py @@ -0,0 +1,174 @@ +""" +Filename globbing utility. Mostly a copy of `glob` from Python 3.5. + +Changes include: + * `yield from` and PEP3102 `*` removed. + * Hidden files are not ignored. +""" + +import os +import re +import fnmatch + +__all__ = ["glob", "iglob", "escape"] + + +def glob(pathname, recursive=False): + """Return a list of paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + """ + return list(iglob(pathname, recursive=recursive)) + + +def iglob(pathname, recursive=False): + """Return an iterator which yields the paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + """ + it = _iglob(pathname, recursive) + if recursive and _isrecursive(pathname): + s = next(it) # skip empty string + assert not s + return it + + +def _iglob(pathname, recursive): + dirname, basename = os.path.split(pathname) + if not has_magic(pathname): + if basename: + if os.path.lexists(pathname): + yield pathname + else: + # Patterns ending with a slash should match only directories + if os.path.isdir(dirname): + yield pathname + return + if not dirname: + if recursive and _isrecursive(basename): + for x in glob2(dirname, basename): + yield x + else: + for x in glob1(dirname, basename): + yield x + return + # `os.path.split()` returns the argument itself as a dirname if it is a + # drive or UNC path. Prevent an infinite recursion if a drive or UNC path + # contains magic characters (i.e. r'\\?\C:'). + if dirname != pathname and has_magic(dirname): + dirs = _iglob(dirname, recursive) + else: + dirs = [dirname] + if has_magic(basename): + if recursive and _isrecursive(basename): + glob_in_dir = glob2 + else: + glob_in_dir = glob1 + else: + glob_in_dir = glob0 + for dirname in dirs: + for name in glob_in_dir(dirname, basename): + yield os.path.join(dirname, name) + + +# These 2 helper functions non-recursively glob inside a literal directory. +# They return a list of basenames. `glob1` accepts a pattern while `glob0` +# takes a literal basename (so it only has to check for its existence). + + +def glob1(dirname, pattern): + if not dirname: + if isinstance(pattern, bytes): + dirname = os.curdir.encode('ASCII') + else: + dirname = os.curdir + try: + names = os.listdir(dirname) + except OSError: + return [] + return fnmatch.filter(names, pattern) + + +def glob0(dirname, basename): + if not basename: + # `os.path.split()` returns an empty basename for paths ending with a + # directory separator. 'q*x/' should match only directories. + if os.path.isdir(dirname): + return [basename] + else: + if os.path.lexists(os.path.join(dirname, basename)): + return [basename] + return [] + + +# This helper function recursively yields relative pathnames inside a literal +# directory. + + +def glob2(dirname, pattern): + assert _isrecursive(pattern) + yield pattern[:0] + for x in _rlistdir(dirname): + yield x + + +# Recursively yields relative pathnames inside a literal directory. +def _rlistdir(dirname): + if not dirname: + if isinstance(dirname, bytes): + dirname = os.curdir.encode('ASCII') + else: + dirname = os.curdir + try: + names = os.listdir(dirname) + except os.error: + return + for x in names: + yield x + path = os.path.join(dirname, x) if dirname else x + for y in _rlistdir(path): + yield os.path.join(x, y) + + +magic_check = re.compile('([*?[])') +magic_check_bytes = re.compile(b'([*?[])') + + +def has_magic(s): + if isinstance(s, bytes): + match = magic_check_bytes.search(s) + else: + match = magic_check.search(s) + return match is not None + + +def _isrecursive(pattern): + if isinstance(pattern, bytes): + return pattern == b'**' + else: + return pattern == '**' + + +def escape(pathname): + """Escape all special characters. + """ + # Escaping is done by wrapping any of "*?[" between square brackets. + # Metacharacters do not work in the drive part and shouldn't be escaped. + drive, pathname = os.path.splitdrive(pathname) + if isinstance(pathname, bytes): + pathname = magic_check_bytes.sub(br'[\1]', pathname) + else: + pathname = magic_check.sub(r'[\1]', pathname) + return drive + pathname diff --git a/Shared/lib/python3.4/site-packages/setuptools/gui-arm-32.exe b/Shared/lib/python3.4/site-packages/setuptools/gui-arm-32.exe deleted file mode 100644 index 537aff3..0000000 Binary files a/Shared/lib/python3.4/site-packages/setuptools/gui-arm-32.exe and /dev/null differ diff --git a/Shared/lib/python3.4/site-packages/setuptools/launch.py b/Shared/lib/python3.4/site-packages/setuptools/launch.py index 06e15e1..308283e 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/launch.py +++ b/Shared/lib/python3.4/site-packages/setuptools/launch.py @@ -11,25 +11,25 @@ import sys def run(): - """ - Run the script in sys.argv[1] as if it had - been invoked naturally. - """ - __builtins__ - script_name = sys.argv[1] - namespace = dict( - __file__ = script_name, - __name__ = '__main__', - __doc__ = None, - ) - sys.argv[:] = sys.argv[1:] + """ + Run the script in sys.argv[1] as if it had + been invoked naturally. + """ + __builtins__ + script_name = sys.argv[1] + namespace = dict( + __file__=script_name, + __name__='__main__', + __doc__=None, + ) + sys.argv[:] = sys.argv[1:] - open_ = getattr(tokenize, 'open', open) - script = open_(script_name).read() - norm_script = script.replace('\\r\\n', '\\n') - code = compile(norm_script, script_name, 'exec') - exec(code, namespace) + open_ = getattr(tokenize, 'open', open) + script = open_(script_name).read() + norm_script = script.replace('\\r\\n', '\\n') + code = compile(norm_script, script_name, 'exec') + exec(code, namespace) if __name__ == '__main__': - run() + run() diff --git a/Shared/lib/python3.4/site-packages/setuptools/lib2to3_ex.py b/Shared/lib/python3.4/site-packages/setuptools/lib2to3_ex.py index feef591..4b1a73f 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/lib2to3_ex.py +++ b/Shared/lib/python3.4/site-packages/setuptools/lib2to3_ex.py @@ -10,8 +10,10 @@ This module raises an ImportError on Python 2. from distutils.util import Mixin2to3 as _Mixin2to3 from distutils import log from lib2to3.refactor import RefactoringTool, get_fixers_from_package + import setuptools + class DistutilsRefactoringTool(RefactoringTool): def log_error(self, msg, *args, **kw): log.error(msg, *args) @@ -22,15 +24,16 @@ class DistutilsRefactoringTool(RefactoringTool): def log_debug(self, msg, *args): log.debug(msg, *args) + class Mixin2to3(_Mixin2to3): - def run_2to3(self, files, doctests = False): + def run_2to3(self, files, doctests=False): # See of the distribution option has been set, otherwise check the # setuptools default. if self.distribution.use_2to3 is not True: return if not files: return - log.info("Fixing "+" ".join(files)) + log.info("Fixing " + " ".join(files)) self.__build_fixer_names() self.__exclude_fixers() if doctests: @@ -41,7 +44,8 @@ class Mixin2to3(_Mixin2to3): _Mixin2to3.run_2to3(self, files) def __build_fixer_names(self): - if self.fixer_names: return + if self.fixer_names: + return self.fixer_names = [] for p in setuptools.lib2to3_fixer_packages: self.fixer_names.extend(get_fixers_from_package(p)) diff --git a/Shared/lib/python3.4/site-packages/setuptools/monkey.py b/Shared/lib/python3.4/site-packages/setuptools/monkey.py new file mode 100644 index 0000000..05a738b --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/monkey.py @@ -0,0 +1,179 @@ +""" +Monkey patching of distutils. +""" + +import sys +import distutils.filelist +import platform +import types +import functools +from importlib import import_module +import inspect + +from setuptools.extern import six + +import setuptools + +__all__ = [] +""" +Everything is private. Contact the project team +if you think you need this functionality. +""" + + +def _get_mro(cls): + """ + Returns the bases classes for cls sorted by the MRO. + + Works around an issue on Jython where inspect.getmro will not return all + base classes if multiple classes share the same name. Instead, this + function will return a tuple containing the class itself, and the contents + of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024. + """ + if platform.python_implementation() == "Jython": + return (cls,) + cls.__bases__ + return inspect.getmro(cls) + + +def get_unpatched(item): + lookup = ( + get_unpatched_class if isinstance(item, six.class_types) else + get_unpatched_function if isinstance(item, types.FunctionType) else + lambda item: None + ) + return lookup(item) + + +def get_unpatched_class(cls): + """Protect against re-patching the distutils if reloaded + + Also ensures that no other distutils extension monkeypatched the distutils + first. + """ + external_bases = ( + cls + for cls in _get_mro(cls) + if not cls.__module__.startswith('setuptools') + ) + base = next(external_bases) + if not base.__module__.startswith('distutils'): + msg = "distutils has already been patched by %r" % cls + raise AssertionError(msg) + return base + + +def patch_all(): + # we can't patch distutils.cmd, alas + distutils.core.Command = setuptools.Command + + has_issue_12885 = sys.version_info <= (3, 5, 3) + + if has_issue_12885: + # fix findall bug in distutils (http://bugs.python.org/issue12885) + distutils.filelist.findall = setuptools.findall + + needs_warehouse = ( + sys.version_info < (2, 7, 13) + or + (3, 4) < sys.version_info < (3, 4, 6) + or + (3, 5) < sys.version_info <= (3, 5, 3) + ) + + if needs_warehouse: + warehouse = 'https://upload.pypi.org/legacy/' + distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse + + _patch_distribution_metadata_write_pkg_file() + + # Install Distribution throughout the distutils + for module in distutils.dist, distutils.core, distutils.cmd: + module.Distribution = setuptools.dist.Distribution + + # Install the patched Extension + distutils.core.Extension = setuptools.extension.Extension + distutils.extension.Extension = setuptools.extension.Extension + if 'distutils.command.build_ext' in sys.modules: + sys.modules['distutils.command.build_ext'].Extension = ( + setuptools.extension.Extension + ) + + patch_for_msvc_specialized_compiler() + + +def _patch_distribution_metadata_write_pkg_file(): + """Patch write_pkg_file to also write Requires-Python/Requires-External""" + distutils.dist.DistributionMetadata.write_pkg_file = ( + setuptools.dist.write_pkg_file + ) + + +def patch_func(replacement, target_mod, func_name): + """ + Patch func_name in target_mod with replacement + + Important - original must be resolved by name to avoid + patching an already patched function. + """ + original = getattr(target_mod, func_name) + + # set the 'unpatched' attribute on the replacement to + # point to the original. + vars(replacement).setdefault('unpatched', original) + + # replace the function in the original module + setattr(target_mod, func_name, replacement) + + +def get_unpatched_function(candidate): + return getattr(candidate, 'unpatched') + + +def patch_for_msvc_specialized_compiler(): + """ + Patch functions in distutils to use standalone Microsoft Visual C++ + compilers. + """ + # import late to avoid circular imports on Python < 3.5 + msvc = import_module('setuptools.msvc') + + if platform.system() != 'Windows': + # Compilers only availables on Microsoft Windows + return + + def patch_params(mod_name, func_name): + """ + Prepare the parameters for patch_func to patch indicated function. + """ + repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_' + repl_name = repl_prefix + func_name.lstrip('_') + repl = getattr(msvc, repl_name) + mod = import_module(mod_name) + if not hasattr(mod, func_name): + raise ImportError(func_name) + return repl, mod, func_name + + # Python 2.7 to 3.4 + msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler') + + # Python 3.5+ + msvc14 = functools.partial(patch_params, 'distutils._msvccompiler') + + try: + # Patch distutils.msvc9compiler + patch_func(*msvc9('find_vcvarsall')) + patch_func(*msvc9('query_vcvarsall')) + except ImportError: + pass + + try: + # Patch distutils._msvccompiler._get_vc_env + patch_func(*msvc14('_get_vc_env')) + except ImportError: + pass + + try: + # Patch distutils._msvccompiler.gen_lib_options for Numpy + patch_func(*msvc14('gen_lib_options')) + except ImportError: + pass diff --git a/Shared/lib/python3.4/site-packages/setuptools/msvc.py b/Shared/lib/python3.4/site-packages/setuptools/msvc.py new file mode 100644 index 0000000..b9c472f --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/msvc.py @@ -0,0 +1,1301 @@ +""" +Improved support for Microsoft Visual C++ compilers. + +Known supported compilers: +-------------------------- +Microsoft Visual C++ 9.0: + Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) + Microsoft Windows SDK 6.1 (x86, x64, ia64) + Microsoft Windows SDK 7.0 (x86, x64, ia64) + +Microsoft Visual C++ 10.0: + Microsoft Windows SDK 7.1 (x86, x64, ia64) + +Microsoft Visual C++ 14.0: + Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) + Microsoft Visual Studio 2017 (x86, x64, arm, arm64) + Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) +""" + +import os +import sys +import platform +import itertools +import distutils.errors +from setuptools.extern.packaging.version import LegacyVersion + +from setuptools.extern.six.moves import filterfalse + +from .monkey import get_unpatched + +if platform.system() == 'Windows': + from setuptools.extern.six.moves import winreg + safe_env = os.environ +else: + """ + Mock winreg and environ so the module can be imported + on this platform. + """ + + class winreg: + HKEY_USERS = None + HKEY_CURRENT_USER = None + HKEY_LOCAL_MACHINE = None + HKEY_CLASSES_ROOT = None + + safe_env = dict() + +_msvc9_suppress_errors = ( + # msvc9compiler isn't available on some platforms + ImportError, + + # msvc9compiler raises DistutilsPlatformError in some + # environments. See #1118. + distutils.errors.DistutilsPlatformError, +) + +try: + from distutils.msvc9compiler import Reg +except _msvc9_suppress_errors: + pass + + +def msvc9_find_vcvarsall(version): + """ + Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone + compiler build for Python (VCForPython). Fall back to original behavior + when the standalone compiler is not available. + + Redirect the path of "vcvarsall.bat". + + Known supported compilers + ------------------------- + Microsoft Visual C++ 9.0: + Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) + + Parameters + ---------- + version: float + Required Microsoft Visual C++ version. + + Return + ------ + vcvarsall.bat path: str + """ + VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f' + key = VC_BASE % ('', version) + try: + # Per-user installs register the compiler path here + productdir = Reg.get_value(key, "installdir") + except KeyError: + try: + # All-user installs on a 64-bit system register here + key = VC_BASE % ('Wow6432Node\\', version) + productdir = Reg.get_value(key, "installdir") + except KeyError: + productdir = None + + if productdir: + vcvarsall = os.path.os.path.join(productdir, "vcvarsall.bat") + if os.path.isfile(vcvarsall): + return vcvarsall + + return get_unpatched(msvc9_find_vcvarsall)(version) + + +def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs): + """ + Patched "distutils.msvc9compiler.query_vcvarsall" for support extra + compilers. + + Set environment without use of "vcvarsall.bat". + + Known supported compilers + ------------------------- + Microsoft Visual C++ 9.0: + Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) + Microsoft Windows SDK 6.1 (x86, x64, ia64) + Microsoft Windows SDK 7.0 (x86, x64, ia64) + + Microsoft Visual C++ 10.0: + Microsoft Windows SDK 7.1 (x86, x64, ia64) + + Parameters + ---------- + ver: float + Required Microsoft Visual C++ version. + arch: str + Target architecture. + + Return + ------ + environment: dict + """ + # Try to get environement from vcvarsall.bat (Classical way) + try: + orig = get_unpatched(msvc9_query_vcvarsall) + return orig(ver, arch, *args, **kwargs) + except distutils.errors.DistutilsPlatformError: + # Pass error if Vcvarsall.bat is missing + pass + except ValueError: + # Pass error if environment not set after executing vcvarsall.bat + pass + + # If error, try to set environment directly + try: + return EnvironmentInfo(arch, ver).return_env() + except distutils.errors.DistutilsPlatformError as exc: + _augment_exception(exc, ver, arch) + raise + + +def msvc14_get_vc_env(plat_spec): + """ + Patched "distutils._msvccompiler._get_vc_env" for support extra + compilers. + + Set environment without use of "vcvarsall.bat". + + Known supported compilers + ------------------------- + Microsoft Visual C++ 14.0: + Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) + Microsoft Visual Studio 2017 (x86, x64, arm, arm64) + Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) + + Parameters + ---------- + plat_spec: str + Target architecture. + + Return + ------ + environment: dict + """ + # Try to get environment from vcvarsall.bat (Classical way) + try: + return get_unpatched(msvc14_get_vc_env)(plat_spec) + except distutils.errors.DistutilsPlatformError: + # Pass error Vcvarsall.bat is missing + pass + + # If error, try to set environment directly + try: + return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env() + except distutils.errors.DistutilsPlatformError as exc: + _augment_exception(exc, 14.0) + raise + + +def msvc14_gen_lib_options(*args, **kwargs): + """ + Patched "distutils._msvccompiler.gen_lib_options" for fix + compatibility between "numpy.distutils" and "distutils._msvccompiler" + (for Numpy < 1.11.2) + """ + if "numpy.distutils" in sys.modules: + import numpy as np + if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'): + return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) + return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs) + + +def _augment_exception(exc, version, arch=''): + """ + Add details to the exception message to help guide the user + as to what action will resolve it. + """ + # Error if MSVC++ directory not found or environment not set + message = exc.args[0] + + if "vcvarsall" in message.lower() or "visual c" in message.lower(): + # Special error message if MSVC++ not installed + tmpl = 'Microsoft Visual C++ {version:0.1f} is required.' + message = tmpl.format(**locals()) + msdownload = 'www.microsoft.com/download/details.aspx?id=%d' + if version == 9.0: + if arch.lower().find('ia64') > -1: + # For VC++ 9.0, if IA64 support is needed, redirect user + # to Windows SDK 7.0 + message += ' Get it with "Microsoft Windows SDK 7.0": ' + message += msdownload % 3138 + else: + # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : + # This redirection link is maintained by Microsoft. + # Contact vspython@microsoft.com if it needs updating. + message += ' Get it from http://aka.ms/vcpython27' + elif version == 10.0: + # For VC++ 10.0 Redirect user to Windows SDK 7.1 + message += ' Get it with "Microsoft Windows SDK 7.1": ' + message += msdownload % 8279 + elif version >= 14.0: + # For VC++ 14.0 Redirect user to Visual C++ Build Tools + message += (' Get it with "Microsoft Visual C++ Build Tools": ' + r'https://visualstudio.microsoft.com/downloads/') + + exc.args = (message, ) + + +class PlatformInfo: + """ + Current and Target Architectures informations. + + Parameters + ---------- + arch: str + Target architecture. + """ + current_cpu = safe_env.get('processor_architecture', '').lower() + + def __init__(self, arch): + self.arch = arch.lower().replace('x64', 'amd64') + + @property + def target_cpu(self): + return self.arch[self.arch.find('_') + 1:] + + def target_is_x86(self): + return self.target_cpu == 'x86' + + def current_is_x86(self): + return self.current_cpu == 'x86' + + def current_dir(self, hidex86=False, x64=False): + """ + Current platform specific subfolder. + + Parameters + ---------- + hidex86: bool + return '' and not '\x86' if architecture is x86. + x64: bool + return '\x64' and not '\amd64' if architecture is amd64. + + Return + ------ + subfolder: str + '\target', or '' (see hidex86 parameter) + """ + return ( + '' if (self.current_cpu == 'x86' and hidex86) else + r'\x64' if (self.current_cpu == 'amd64' and x64) else + r'\%s' % self.current_cpu + ) + + def target_dir(self, hidex86=False, x64=False): + r""" + Target platform specific subfolder. + + Parameters + ---------- + hidex86: bool + return '' and not '\x86' if architecture is x86. + x64: bool + return '\x64' and not '\amd64' if architecture is amd64. + + Return + ------ + subfolder: str + '\current', or '' (see hidex86 parameter) + """ + return ( + '' if (self.target_cpu == 'x86' and hidex86) else + r'\x64' if (self.target_cpu == 'amd64' and x64) else + r'\%s' % self.target_cpu + ) + + def cross_dir(self, forcex86=False): + r""" + Cross platform specific subfolder. + + Parameters + ---------- + forcex86: bool + Use 'x86' as current architecture even if current acritecture is + not x86. + + Return + ------ + subfolder: str + '' if target architecture is current architecture, + '\current_target' if not. + """ + current = 'x86' if forcex86 else self.current_cpu + return ( + '' if self.target_cpu == current else + self.target_dir().replace('\\', '\\%s_' % current) + ) + + +class RegistryInfo: + """ + Microsoft Visual Studio related registry informations. + + Parameters + ---------- + platform_info: PlatformInfo + "PlatformInfo" instance. + """ + HKEYS = (winreg.HKEY_USERS, + winreg.HKEY_CURRENT_USER, + winreg.HKEY_LOCAL_MACHINE, + winreg.HKEY_CLASSES_ROOT) + + def __init__(self, platform_info): + self.pi = platform_info + + @property + def visualstudio(self): + """ + Microsoft Visual Studio root registry key. + """ + return 'VisualStudio' + + @property + def sxs(self): + """ + Microsoft Visual Studio SxS registry key. + """ + return os.path.join(self.visualstudio, 'SxS') + + @property + def vc(self): + """ + Microsoft Visual C++ VC7 registry key. + """ + return os.path.join(self.sxs, 'VC7') + + @property + def vs(self): + """ + Microsoft Visual Studio VS7 registry key. + """ + return os.path.join(self.sxs, 'VS7') + + @property + def vc_for_python(self): + """ + Microsoft Visual C++ for Python registry key. + """ + return r'DevDiv\VCForPython' + + @property + def microsoft_sdk(self): + """ + Microsoft SDK registry key. + """ + return 'Microsoft SDKs' + + @property + def windows_sdk(self): + """ + Microsoft Windows/Platform SDK registry key. + """ + return os.path.join(self.microsoft_sdk, 'Windows') + + @property + def netfx_sdk(self): + """ + Microsoft .NET Framework SDK registry key. + """ + return os.path.join(self.microsoft_sdk, 'NETFXSDK') + + @property + def windows_kits_roots(self): + """ + Microsoft Windows Kits Roots registry key. + """ + return r'Windows Kits\Installed Roots' + + def microsoft(self, key, x86=False): + """ + Return key in Microsoft software registry. + + Parameters + ---------- + key: str + Registry key path where look. + x86: str + Force x86 software registry. + + Return + ------ + str: value + """ + node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node' + return os.path.join('Software', node64, 'Microsoft', key) + + def lookup(self, key, name): + """ + Look for values in registry in Microsoft software registry. + + Parameters + ---------- + key: str + Registry key path where look. + name: str + Value name to find. + + Return + ------ + str: value + """ + KEY_READ = winreg.KEY_READ + openkey = winreg.OpenKey + ms = self.microsoft + for hkey in self.HKEYS: + try: + bkey = openkey(hkey, ms(key), 0, KEY_READ) + except (OSError, IOError): + if not self.pi.current_is_x86(): + try: + bkey = openkey(hkey, ms(key, True), 0, KEY_READ) + except (OSError, IOError): + continue + else: + continue + try: + return winreg.QueryValueEx(bkey, name)[0] + except (OSError, IOError): + pass + + +class SystemInfo: + """ + Microsoft Windows and Visual Studio related system inormations. + + Parameters + ---------- + registry_info: RegistryInfo + "RegistryInfo" instance. + vc_ver: float + Required Microsoft Visual C++ version. + """ + + # Variables and properties in this class use originals CamelCase variables + # names from Microsoft source files for more easy comparaison. + WinDir = safe_env.get('WinDir', '') + ProgramFiles = safe_env.get('ProgramFiles', '') + ProgramFilesx86 = safe_env.get('ProgramFiles(x86)', ProgramFiles) + + def __init__(self, registry_info, vc_ver=None): + self.ri = registry_info + self.pi = self.ri.pi + self.vc_ver = vc_ver or self._find_latest_available_vc_ver() + + def _find_latest_available_vc_ver(self): + try: + return self.find_available_vc_vers()[-1] + except IndexError: + err = 'No Microsoft Visual C++ version found' + raise distutils.errors.DistutilsPlatformError(err) + + def find_available_vc_vers(self): + """ + Find all available Microsoft Visual C++ versions. + """ + ms = self.ri.microsoft + vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs) + vc_vers = [] + for hkey in self.ri.HKEYS: + for key in vckeys: + try: + bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ) + except (OSError, IOError): + continue + subkeys, values, _ = winreg.QueryInfoKey(bkey) + for i in range(values): + try: + ver = float(winreg.EnumValue(bkey, i)[0]) + if ver not in vc_vers: + vc_vers.append(ver) + except ValueError: + pass + for i in range(subkeys): + try: + ver = float(winreg.EnumKey(bkey, i)) + if ver not in vc_vers: + vc_vers.append(ver) + except ValueError: + pass + return sorted(vc_vers) + + @property + def VSInstallDir(self): + """ + Microsoft Visual Studio directory. + """ + # Default path + name = 'Microsoft Visual Studio %0.1f' % self.vc_ver + default = os.path.join(self.ProgramFilesx86, name) + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vs, '%0.1f' % self.vc_ver) or default + + @property + def VCInstallDir(self): + """ + Microsoft Visual C++ directory. + """ + self.VSInstallDir + + guess_vc = self._guess_vc() or self._guess_vc_legacy() + + # Try to get "VC++ for Python" path from registry as default path + reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) + python_vc = self.ri.lookup(reg_path, 'installdir') + default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc + + # Try to get path from registry, if fail use default path + path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc + + if not os.path.isdir(path): + msg = 'Microsoft Visual C++ directory not found' + raise distutils.errors.DistutilsPlatformError(msg) + + return path + + def _guess_vc(self): + """ + Locate Visual C for 2017 + """ + if self.vc_ver <= 14.0: + return + + default = r'VC\Tools\MSVC' + guess_vc = os.path.join(self.VSInstallDir, default) + # Subdir with VC exact version as name + try: + vc_exact_ver = os.listdir(guess_vc)[-1] + return os.path.join(guess_vc, vc_exact_ver) + except (OSError, IOError, IndexError): + pass + + def _guess_vc_legacy(self): + """ + Locate Visual C for versions prior to 2017 + """ + default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver + return os.path.join(self.ProgramFilesx86, default) + + @property + def WindowsSdkVersion(self): + """ + Microsoft Windows SDK versions for specified MSVC++ version. + """ + if self.vc_ver <= 9.0: + return ('7.0', '6.1', '6.0a') + elif self.vc_ver == 10.0: + return ('7.1', '7.0a') + elif self.vc_ver == 11.0: + return ('8.0', '8.0a') + elif self.vc_ver == 12.0: + return ('8.1', '8.1a') + elif self.vc_ver >= 14.0: + return ('10.0', '8.1') + + @property + def WindowsSdkLastVersion(self): + """ + Microsoft Windows SDK last version + """ + return self._use_last_dir_name(os.path.join( + self.WindowsSdkDir, 'lib')) + + @property + def WindowsSdkDir(self): + """ + Microsoft Windows SDK directory. + """ + sdkdir = '' + for ver in self.WindowsSdkVersion: + # Try to get it from registry + loc = os.path.join(self.ri.windows_sdk, 'v%s' % ver) + sdkdir = self.ri.lookup(loc, 'installationfolder') + if sdkdir: + break + if not sdkdir or not os.path.isdir(sdkdir): + # Try to get "VC++ for Python" version from registry + path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) + install_base = self.ri.lookup(path, 'installdir') + if install_base: + sdkdir = os.path.join(install_base, 'WinSDK') + if not sdkdir or not os.path.isdir(sdkdir): + # If fail, use default new path + for ver in self.WindowsSdkVersion: + intver = ver[:ver.rfind('.')] + path = r'Microsoft SDKs\Windows Kits\%s' % (intver) + d = os.path.join(self.ProgramFiles, path) + if os.path.isdir(d): + sdkdir = d + if not sdkdir or not os.path.isdir(sdkdir): + # If fail, use default old path + for ver in self.WindowsSdkVersion: + path = r'Microsoft SDKs\Windows\v%s' % ver + d = os.path.join(self.ProgramFiles, path) + if os.path.isdir(d): + sdkdir = d + if not sdkdir: + # If fail, use Platform SDK + sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK') + return sdkdir + + @property + def WindowsSDKExecutablePath(self): + """ + Microsoft Windows SDK executable directory. + """ + # Find WinSDK NetFx Tools registry dir name + if self.vc_ver <= 11.0: + netfxver = 35 + arch = '' + else: + netfxver = 40 + hidex86 = True if self.vc_ver <= 12.0 else False + arch = self.pi.current_dir(x64=True, hidex86=hidex86) + fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-')) + + # liste all possibles registry paths + regpaths = [] + if self.vc_ver >= 14.0: + for ver in self.NetFxSdkVersion: + regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)] + + for ver in self.WindowsSdkVersion: + regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)] + + # Return installation folder from the more recent path + for path in regpaths: + execpath = self.ri.lookup(path, 'installationfolder') + if execpath: + break + return execpath + + @property + def FSharpInstallDir(self): + """ + Microsoft Visual F# directory. + """ + path = r'%0.1f\Setup\F#' % self.vc_ver + path = os.path.join(self.ri.visualstudio, path) + return self.ri.lookup(path, 'productdir') or '' + + @property + def UniversalCRTSdkDir(self): + """ + Microsoft Universal CRT SDK directory. + """ + # Set Kit Roots versions for specified MSVC++ version + if self.vc_ver >= 14.0: + vers = ('10', '81') + else: + vers = () + + # Find path of the more recent Kit + for ver in vers: + sdkdir = self.ri.lookup(self.ri.windows_kits_roots, + 'kitsroot%s' % ver) + if sdkdir: + break + return sdkdir or '' + + @property + def UniversalCRTSdkLastVersion(self): + """ + Microsoft Universal C Runtime SDK last version + """ + return self._use_last_dir_name(os.path.join( + self.UniversalCRTSdkDir, 'lib')) + + @property + def NetFxSdkVersion(self): + """ + Microsoft .NET Framework SDK versions. + """ + # Set FxSdk versions for specified MSVC++ version + if self.vc_ver >= 14.0: + return ('4.6.1', '4.6') + else: + return () + + @property + def NetFxSdkDir(self): + """ + Microsoft .NET Framework SDK directory. + """ + for ver in self.NetFxSdkVersion: + loc = os.path.join(self.ri.netfx_sdk, ver) + sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder') + if sdkdir: + break + return sdkdir or '' + + @property + def FrameworkDir32(self): + """ + Microsoft .NET Framework 32bit directory. + """ + # Default path + guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework') + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw + + @property + def FrameworkDir64(self): + """ + Microsoft .NET Framework 64bit directory. + """ + # Default path + guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64') + + # Try to get path from registry, if fail use default path + return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw + + @property + def FrameworkVersion32(self): + """ + Microsoft .NET Framework 32bit versions. + """ + return self._find_dot_net_versions(32) + + @property + def FrameworkVersion64(self): + """ + Microsoft .NET Framework 64bit versions. + """ + return self._find_dot_net_versions(64) + + def _find_dot_net_versions(self, bits): + """ + Find Microsoft .NET Framework versions. + + Parameters + ---------- + bits: int + Platform number of bits: 32 or 64. + """ + # Find actual .NET version in registry + reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) + dot_net_dir = getattr(self, 'FrameworkDir%d' % bits) + ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or '' + + # Set .NET versions for specified MSVC++ version + if self.vc_ver >= 12.0: + frameworkver = (ver, 'v4.0') + elif self.vc_ver >= 10.0: + frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver, + 'v3.5') + elif self.vc_ver == 9.0: + frameworkver = ('v3.5', 'v2.0.50727') + if self.vc_ver == 8.0: + frameworkver = ('v3.0', 'v2.0.50727') + return frameworkver + + def _use_last_dir_name(self, path, prefix=''): + """ + Return name of the last dir in path or '' if no dir found. + + Parameters + ---------- + path: str + Use dirs in this path + prefix: str + Use only dirs startings by this prefix + """ + matching_dirs = ( + dir_name + for dir_name in reversed(os.listdir(path)) + if os.path.isdir(os.path.join(path, dir_name)) and + dir_name.startswith(prefix) + ) + return next(matching_dirs, None) or '' + + +class EnvironmentInfo: + """ + Return environment variables for specified Microsoft Visual C++ version + and platform : Lib, Include, Path and libpath. + + This function is compatible with Microsoft Visual C++ 9.0 to 14.0. + + Script created by analysing Microsoft environment configuration files like + "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ... + + Parameters + ---------- + arch: str + Target architecture. + vc_ver: float + Required Microsoft Visual C++ version. If not set, autodetect the last + version. + vc_min_ver: float + Minimum Microsoft Visual C++ version. + """ + + # Variables and properties in this class use originals CamelCase variables + # names from Microsoft source files for more easy comparaison. + + def __init__(self, arch, vc_ver=None, vc_min_ver=0): + self.pi = PlatformInfo(arch) + self.ri = RegistryInfo(self.pi) + self.si = SystemInfo(self.ri, vc_ver) + + if self.vc_ver < vc_min_ver: + err = 'No suitable Microsoft Visual C++ version found' + raise distutils.errors.DistutilsPlatformError(err) + + @property + def vc_ver(self): + """ + Microsoft Visual C++ version. + """ + return self.si.vc_ver + + @property + def VSTools(self): + """ + Microsoft Visual Studio Tools + """ + paths = [r'Common7\IDE', r'Common7\Tools'] + + if self.vc_ver >= 14.0: + arch_subdir = self.pi.current_dir(hidex86=True, x64=True) + paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow'] + paths += [r'Team Tools\Performance Tools'] + paths += [r'Team Tools\Performance Tools%s' % arch_subdir] + + return [os.path.join(self.si.VSInstallDir, path) for path in paths] + + @property + def VCIncludes(self): + """ + Microsoft Visual C++ & Microsoft Foundation Class Includes + """ + return [os.path.join(self.si.VCInstallDir, 'Include'), + os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')] + + @property + def VCLibraries(self): + """ + Microsoft Visual C++ & Microsoft Foundation Class Libraries + """ + if self.vc_ver >= 15.0: + arch_subdir = self.pi.target_dir(x64=True) + else: + arch_subdir = self.pi.target_dir(hidex86=True) + paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir] + + if self.vc_ver >= 14.0: + paths += [r'Lib\store%s' % arch_subdir] + + return [os.path.join(self.si.VCInstallDir, path) for path in paths] + + @property + def VCStoreRefs(self): + """ + Microsoft Visual C++ store references Libraries + """ + if self.vc_ver < 14.0: + return [] + return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')] + + @property + def VCTools(self): + """ + Microsoft Visual C++ Tools + """ + si = self.si + tools = [os.path.join(si.VCInstallDir, 'VCPackages')] + + forcex86 = True if self.vc_ver <= 10.0 else False + arch_subdir = self.pi.cross_dir(forcex86) + if arch_subdir: + tools += [os.path.join(si.VCInstallDir, 'Bin%s' % arch_subdir)] + + if self.vc_ver == 14.0: + path = 'Bin%s' % self.pi.current_dir(hidex86=True) + tools += [os.path.join(si.VCInstallDir, path)] + + elif self.vc_ver >= 15.0: + host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else + r'bin\HostX64%s') + tools += [os.path.join( + si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))] + + if self.pi.current_cpu != self.pi.target_cpu: + tools += [os.path.join( + si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))] + + else: + tools += [os.path.join(si.VCInstallDir, 'Bin')] + + return tools + + @property + def OSLibraries(self): + """ + Microsoft Windows SDK Libraries + """ + if self.vc_ver <= 10.0: + arch_subdir = self.pi.target_dir(hidex86=True, x64=True) + return [os.path.join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)] + + else: + arch_subdir = self.pi.target_dir(x64=True) + lib = os.path.join(self.si.WindowsSdkDir, 'lib') + libver = self._sdk_subdir + return [os.path.join(lib, '%sum%s' % (libver , arch_subdir))] + + @property + def OSIncludes(self): + """ + Microsoft Windows SDK Include + """ + include = os.path.join(self.si.WindowsSdkDir, 'include') + + if self.vc_ver <= 10.0: + return [include, os.path.join(include, 'gl')] + + else: + if self.vc_ver >= 14.0: + sdkver = self._sdk_subdir + else: + sdkver = '' + return [os.path.join(include, '%sshared' % sdkver), + os.path.join(include, '%sum' % sdkver), + os.path.join(include, '%swinrt' % sdkver)] + + @property + def OSLibpath(self): + """ + Microsoft Windows SDK Libraries Paths + """ + ref = os.path.join(self.si.WindowsSdkDir, 'References') + libpath = [] + + if self.vc_ver <= 9.0: + libpath += self.OSLibraries + + if self.vc_ver >= 11.0: + libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')] + + if self.vc_ver >= 14.0: + libpath += [ + ref, + os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'), + os.path.join( + ref, + 'Windows.Foundation.UniversalApiContract', + '1.0.0.0', + ), + os.path.join( + ref, + 'Windows.Foundation.FoundationContract', + '1.0.0.0', + ), + os.path.join( + ref, + 'Windows.Networking.Connectivity.WwanContract', + '1.0.0.0', + ), + os.path.join( + self.si.WindowsSdkDir, + 'ExtensionSDKs', + 'Microsoft.VCLibs', + '%0.1f' % self.vc_ver, + 'References', + 'CommonConfiguration', + 'neutral', + ), + ] + return libpath + + @property + def SdkTools(self): + """ + Microsoft Windows SDK Tools + """ + return list(self._sdk_tools()) + + def _sdk_tools(self): + """ + Microsoft Windows SDK Tools paths generator + """ + if self.vc_ver < 15.0: + bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86' + yield os.path.join(self.si.WindowsSdkDir, bin_dir) + + if not self.pi.current_is_x86(): + arch_subdir = self.pi.current_dir(x64=True) + path = 'Bin%s' % arch_subdir + yield os.path.join(self.si.WindowsSdkDir, path) + + if self.vc_ver == 10.0 or self.vc_ver == 11.0: + if self.pi.target_is_x86(): + arch_subdir = '' + else: + arch_subdir = self.pi.current_dir(hidex86=True, x64=True) + path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir + yield os.path.join(self.si.WindowsSdkDir, path) + + elif self.vc_ver >= 15.0: + path = os.path.join(self.si.WindowsSdkDir, 'Bin') + arch_subdir = self.pi.current_dir(x64=True) + sdkver = self.si.WindowsSdkLastVersion + yield os.path.join(path, '%s%s' % (sdkver, arch_subdir)) + + if self.si.WindowsSDKExecutablePath: + yield self.si.WindowsSDKExecutablePath + + @property + def _sdk_subdir(self): + """ + Microsoft Windows SDK version subdir + """ + ucrtver = self.si.WindowsSdkLastVersion + return ('%s\\' % ucrtver) if ucrtver else '' + + @property + def SdkSetup(self): + """ + Microsoft Windows SDK Setup + """ + if self.vc_ver > 9.0: + return [] + + return [os.path.join(self.si.WindowsSdkDir, 'Setup')] + + @property + def FxTools(self): + """ + Microsoft .NET Framework Tools + """ + pi = self.pi + si = self.si + + if self.vc_ver <= 10.0: + include32 = True + include64 = not pi.target_is_x86() and not pi.current_is_x86() + else: + include32 = pi.target_is_x86() or pi.current_is_x86() + include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64' + + tools = [] + if include32: + tools += [os.path.join(si.FrameworkDir32, ver) + for ver in si.FrameworkVersion32] + if include64: + tools += [os.path.join(si.FrameworkDir64, ver) + for ver in si.FrameworkVersion64] + return tools + + @property + def NetFxSDKLibraries(self): + """ + Microsoft .Net Framework SDK Libraries + """ + if self.vc_ver < 14.0 or not self.si.NetFxSdkDir: + return [] + + arch_subdir = self.pi.target_dir(x64=True) + return [os.path.join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)] + + @property + def NetFxSDKIncludes(self): + """ + Microsoft .Net Framework SDK Includes + """ + if self.vc_ver < 14.0 or not self.si.NetFxSdkDir: + return [] + + return [os.path.join(self.si.NetFxSdkDir, r'include\um')] + + @property + def VsTDb(self): + """ + Microsoft Visual Studio Team System Database + """ + return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')] + + @property + def MSBuild(self): + """ + Microsoft Build Engine + """ + if self.vc_ver < 12.0: + return [] + elif self.vc_ver < 15.0: + base_path = self.si.ProgramFilesx86 + arch_subdir = self.pi.current_dir(hidex86=True) + else: + base_path = self.si.VSInstallDir + arch_subdir = '' + + path = r'MSBuild\%0.1f\bin%s' % (self.vc_ver, arch_subdir) + build = [os.path.join(base_path, path)] + + if self.vc_ver >= 15.0: + # Add Roslyn C# & Visual Basic Compiler + build += [os.path.join(base_path, path, 'Roslyn')] + + return build + + @property + def HTMLHelpWorkshop(self): + """ + Microsoft HTML Help Workshop + """ + if self.vc_ver < 11.0: + return [] + + return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')] + + @property + def UCRTLibraries(self): + """ + Microsoft Universal C Runtime SDK Libraries + """ + if self.vc_ver < 14.0: + return [] + + arch_subdir = self.pi.target_dir(x64=True) + lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib') + ucrtver = self._ucrt_subdir + return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))] + + @property + def UCRTIncludes(self): + """ + Microsoft Universal C Runtime SDK Include + """ + if self.vc_ver < 14.0: + return [] + + include = os.path.join(self.si.UniversalCRTSdkDir, 'include') + return [os.path.join(include, '%sucrt' % self._ucrt_subdir)] + + @property + def _ucrt_subdir(self): + """ + Microsoft Universal C Runtime SDK version subdir + """ + ucrtver = self.si.UniversalCRTSdkLastVersion + return ('%s\\' % ucrtver) if ucrtver else '' + + @property + def FSharp(self): + """ + Microsoft Visual F# + """ + if self.vc_ver < 11.0 and self.vc_ver > 12.0: + return [] + + return self.si.FSharpInstallDir + + @property + def VCRuntimeRedist(self): + """ + Microsoft Visual C++ runtime redistribuable dll + """ + arch_subdir = self.pi.target_dir(x64=True) + if self.vc_ver < 15: + redist_path = self.si.VCInstallDir + vcruntime = 'redist%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll' + else: + redist_path = self.si.VCInstallDir.replace('\\Tools', '\\Redist') + vcruntime = 'onecore%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll' + + # Visual Studio 2017 is still Visual C++ 14.0 + dll_ver = 14.0 if self.vc_ver == 15 else self.vc_ver + + vcruntime = vcruntime % (arch_subdir, self.vc_ver, dll_ver) + return os.path.join(redist_path, vcruntime) + + def return_env(self, exists=True): + """ + Return environment dict. + + Parameters + ---------- + exists: bool + It True, only return existing paths. + """ + env = dict( + include=self._build_paths('include', + [self.VCIncludes, + self.OSIncludes, + self.UCRTIncludes, + self.NetFxSDKIncludes], + exists), + lib=self._build_paths('lib', + [self.VCLibraries, + self.OSLibraries, + self.FxTools, + self.UCRTLibraries, + self.NetFxSDKLibraries], + exists), + libpath=self._build_paths('libpath', + [self.VCLibraries, + self.FxTools, + self.VCStoreRefs, + self.OSLibpath], + exists), + path=self._build_paths('path', + [self.VCTools, + self.VSTools, + self.VsTDb, + self.SdkTools, + self.SdkSetup, + self.FxTools, + self.MSBuild, + self.HTMLHelpWorkshop, + self.FSharp], + exists), + ) + if self.vc_ver >= 14 and os.path.isfile(self.VCRuntimeRedist): + env['py_vcruntime_redist'] = self.VCRuntimeRedist + return env + + def _build_paths(self, name, spec_path_lists, exists): + """ + Given an environment variable name and specified paths, + return a pathsep-separated string of paths containing + unique, extant, directories from those paths and from + the environment variable. Raise an error if no paths + are resolved. + """ + # flatten spec_path_lists + spec_paths = itertools.chain.from_iterable(spec_path_lists) + env_paths = safe_env.get(name, '').split(os.pathsep) + paths = itertools.chain(spec_paths, env_paths) + extant_paths = list(filter(os.path.isdir, paths)) if exists else paths + if not extant_paths: + msg = "%s environment variable is empty" % name.upper() + raise distutils.errors.DistutilsPlatformError(msg) + unique_paths = self._unique_everseen(extant_paths) + return os.pathsep.join(unique_paths) + + # from Python docs + def _unique_everseen(self, iterable, key=None): + """ + List unique elements, preserving order. + Remember all elements ever seen. + + _unique_everseen('AAAABBBCCDAABBB') --> A B C D + + _unique_everseen('ABBCcAD', str.lower) --> A B C D + """ + seen = set() + seen_add = seen.add + if key is None: + for element in filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element + else: + for element in iterable: + k = key(element) + if k not in seen: + seen_add(k) + yield element diff --git a/Shared/lib/python3.4/site-packages/setuptools/msvc9_support.py b/Shared/lib/python3.4/site-packages/setuptools/msvc9_support.py deleted file mode 100644 index a69c747..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools/msvc9_support.py +++ /dev/null @@ -1,63 +0,0 @@ -try: - import distutils.msvc9compiler -except ImportError: - pass - -unpatched = dict() - -def patch_for_specialized_compiler(): - """ - Patch functions in distutils.msvc9compiler to use the standalone compiler - build for Python (Windows only). Fall back to original behavior when the - standalone compiler is not available. - """ - if 'distutils' not in globals(): - # The module isn't available to be patched - return - - if unpatched: - # Already patched - return - - unpatched.update(vars(distutils.msvc9compiler)) - - distutils.msvc9compiler.find_vcvarsall = find_vcvarsall - distutils.msvc9compiler.query_vcvarsall = query_vcvarsall - -def find_vcvarsall(version): - Reg = distutils.msvc9compiler.Reg - VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f' - key = VC_BASE % ('', version) - try: - # Per-user installs register the compiler path here - productdir = Reg.get_value(key, "installdir") - except KeyError: - try: - # All-user installs on a 64-bit system register here - key = VC_BASE % ('Wow6432Node\\', version) - productdir = Reg.get_value(key, "installdir") - except KeyError: - productdir = None - - if productdir: - import os - vcvarsall = os.path.join(productdir, "vcvarsall.bat") - if os.path.isfile(vcvarsall): - return vcvarsall - - return unpatched['find_vcvarsall'](version) - -def query_vcvarsall(version, *args, **kwargs): - try: - return unpatched['query_vcvarsall'](version, *args, **kwargs) - except distutils.errors.DistutilsPlatformError as exc: - if exc and "vcvarsall.bat" in exc.args[0]: - message = 'Microsoft Visual C++ %0.1f is required (%s).' % (version, exc.args[0]) - if int(version) == 9: - # This redirection link is maintained by Microsoft. - # Contact vspython@microsoft.com if it needs updating. - raise distutils.errors.DistutilsPlatformError( - message + ' Get it from http://aka.ms/vcpython27' - ) - raise distutils.errors.DistutilsPlatformError(message) - raise diff --git a/Shared/lib/python3.4/site-packages/setuptools/namespaces.py b/Shared/lib/python3.4/site-packages/setuptools/namespaces.py new file mode 100644 index 0000000..dc16106 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/namespaces.py @@ -0,0 +1,107 @@ +import os +from distutils import log +import itertools + +from setuptools.extern.six.moves import map + + +flatten = itertools.chain.from_iterable + + +class Installer: + + nspkg_ext = '-nspkg.pth' + + def install_namespaces(self): + nsp = self._get_all_ns_packages() + if not nsp: + return + filename, ext = os.path.splitext(self._get_target()) + filename += self.nspkg_ext + self.outputs.append(filename) + log.info("Installing %s", filename) + lines = map(self._gen_nspkg_line, nsp) + + if self.dry_run: + # always generate the lines, even in dry run + list(lines) + return + + with open(filename, 'wt') as f: + f.writelines(lines) + + def uninstall_namespaces(self): + filename, ext = os.path.splitext(self._get_target()) + filename += self.nspkg_ext + if not os.path.exists(filename): + return + log.info("Removing %s", filename) + os.remove(filename) + + def _get_target(self): + return self.target + + _nspkg_tmpl = ( + "import sys, types, os", + "has_mfs = sys.version_info > (3, 5)", + "p = os.path.join(%(root)s, *%(pth)r)", + "importlib = has_mfs and __import__('importlib.util')", + "has_mfs and __import__('importlib.machinery')", + "m = has_mfs and " + "sys.modules.setdefault(%(pkg)r, " + "importlib.util.module_from_spec(" + "importlib.machinery.PathFinder.find_spec(%(pkg)r, " + "[os.path.dirname(p)])))", + "m = m or " + "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))", + "mp = (m or []) and m.__dict__.setdefault('__path__',[])", + "(p not in mp) and mp.append(p)", + ) + "lines for the namespace installer" + + _nspkg_tmpl_multi = ( + 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', + ) + "additional line(s) when a parent package is indicated" + + def _get_root(self): + return "sys._getframe(1).f_locals['sitedir']" + + def _gen_nspkg_line(self, pkg): + # ensure pkg is not a unicode string under Python 2.7 + pkg = str(pkg) + pth = tuple(pkg.split('.')) + root = self._get_root() + tmpl_lines = self._nspkg_tmpl + parent, sep, child = pkg.rpartition('.') + if parent: + tmpl_lines += self._nspkg_tmpl_multi + return ';'.join(tmpl_lines) % locals() + '\n' + + def _get_all_ns_packages(self): + """Return sorted list of all package namespaces""" + pkgs = self.distribution.namespace_packages or [] + return sorted(flatten(map(self._pkg_names, pkgs))) + + @staticmethod + def _pkg_names(pkg): + """ + Given a namespace package, yield the components of that + package. + + >>> names = Installer._pkg_names('a.b.c') + >>> set(names) == set(['a', 'a.b', 'a.b.c']) + True + """ + parts = pkg.split('.') + while parts: + yield '.'.join(parts) + parts.pop() + + +class DevelopInstaller(Installer): + def _get_root(self): + return repr(str(self.egg_path)) + + def _get_target(self): + return self.egg_link diff --git a/Shared/lib/python3.4/site-packages/setuptools/package_index.py b/Shared/lib/python3.4/site-packages/setuptools/package_index.py index c53343e..1608b91 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/package_index.py +++ b/Shared/lib/python3.4/site-packages/setuptools/package_index.py @@ -7,36 +7,35 @@ import socket import base64 import hashlib import itertools +import warnings from functools import wraps -try: - from urllib.parse import splituser -except ImportError: - from urllib2 import splituser - from setuptools.extern import six from setuptools.extern.six.moves import urllib, http_client, configparser, map +import setuptools from pkg_resources import ( CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST, - require, Environment, find_distributions, safe_name, safe_version, - to_filename, Requirement, DEVELOP_DIST, + Environment, find_distributions, safe_name, safe_version, + to_filename, Requirement, DEVELOP_DIST, EGG_DIST, ) from setuptools import ssl_support from distutils import log from distutils.errors import DistutilsError from fnmatch import translate -from setuptools.py26compat import strip_fragment from setuptools.py27compat import get_all_headers +from setuptools.py33compat import unescape +from setuptools.wheel import Wheel -EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.]+)$') -HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I) -# this is here to fix emacs' cruddy broken syntax highlighting +__metaclass__ = type + +EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$') +HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I) PYPI_MD5 = re.compile( - '([^<]+)\n\s+\\(md5\\)' + r'([^<]+)\n\s+\(md5\)' ) -URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):',re.I).match +URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() __all__ = [ @@ -46,6 +45,19 @@ __all__ = [ _SOCKET_TIMEOUT = 15 +_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}" +user_agent = _tmpl.format(py_major=sys.version[:3], setuptools=setuptools) + + +def parse_requirement_arg(spec): + try: + return Requirement.parse(spec) + except ValueError: + raise DistutilsError( + "Not a URL, existing file, or requirement spec: %r" % (spec,) + ) + + def parse_bdist_wininst(name): """Return (base,pyversion) or (None,None) for possible .exe name""" @@ -56,48 +68,63 @@ def parse_bdist_wininst(name): if lower.endswith('.win32.exe'): base = name[:-10] plat = 'win32' - elif lower.startswith('.win32-py',-16): + elif lower.startswith('.win32-py', -16): py_ver = name[-7:-4] base = name[:-16] plat = 'win32' elif lower.endswith('.win-amd64.exe'): base = name[:-14] plat = 'win-amd64' - elif lower.startswith('.win-amd64-py',-20): + elif lower.startswith('.win-amd64-py', -20): py_ver = name[-7:-4] base = name[:-20] plat = 'win-amd64' - return base,py_ver,plat + return base, py_ver, plat def egg_info_for_url(url): parts = urllib.parse.urlparse(url) scheme, server, path, parameters, query, fragment = parts base = urllib.parse.unquote(path.split('/')[-1]) - if server=='sourceforge.net' and base=='download': # XXX Yuck + if server == 'sourceforge.net' and base == 'download': # XXX Yuck base = urllib.parse.unquote(path.split('/')[-2]) - if '#' in base: base, fragment = base.split('#',1) - return base,fragment + if '#' in base: + base, fragment = base.split('#', 1) + return base, fragment + def distros_for_url(url, metadata=None): """Yield egg or source distribution objects that might be found at a URL""" base, fragment = egg_info_for_url(url) - for dist in distros_for_location(url, base, metadata): yield dist + for dist in distros_for_location(url, base, metadata): + yield dist if fragment: match = EGG_FRAGMENT.match(fragment) if match: for dist in interpret_distro_name( - url, match.group(1), metadata, precedence = CHECKOUT_DIST + url, match.group(1), metadata, precedence=CHECKOUT_DIST ): yield dist + def distros_for_location(location, basename, metadata=None): """Yield egg or source distribution objects based on basename""" if basename.endswith('.egg.zip'): - basename = basename[:-4] # strip the .zip + basename = basename[:-4] # strip the .zip if basename.endswith('.egg') and '-' in basename: # only one, unambiguous interpretation return [Distribution.from_location(location, basename, metadata)] + if basename.endswith('.whl') and '-' in basename: + wheel = Wheel(basename) + if not wheel.is_compatible(): + return [] + return [Distribution( + location=location, + project_name=wheel.project_name, + version=wheel.version, + # Increase priority over eggs. + precedence=EGG_DIST + 1, + )] if basename.endswith('.exe'): win_base, py_ver, platform = parse_bdist_wininst(basename) if win_base is not None: @@ -112,6 +139,7 @@ def distros_for_location(location, basename, metadata=None): return interpret_distro_name(location, basename, metadata) return [] # no extension matched + def distros_for_filename(filename, metadata=None): """Yield possible egg or source distribution objects based on a filename""" return distros_for_location( @@ -122,7 +150,7 @@ def distros_for_filename(filename, metadata=None): def interpret_distro_name( location, basename, metadata, py_version=None, precedence=SOURCE_DIST, platform=None - ): +): """Generate alternative interpretations of a source distro name Note: if `location` is a filesystem filename, you should call @@ -142,17 +170,18 @@ def interpret_distro_name( # versions in distribution archive names (sdist and bdist). parts = basename.split('-') - if not py_version and any(re.match('py\d\.\d$', p) for p in parts[2:]): + if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]): # it is a bdist_dumb, not an sdist -- bail out return - for p in range(1,len(parts)+1): + for p in range(1, len(parts) + 1): yield Distribution( location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), - py_version=py_version, precedence = precedence, - platform = platform + py_version=py_version, precedence=precedence, + platform=platform ) + # From Python 2.7 docs def unique_everseen(iterable, key=None): "List unique elements, preserving order. Remember all elements ever seen." @@ -171,19 +200,24 @@ def unique_everseen(iterable, key=None): seen_add(k) yield element + def unique_values(func): """ Wrap a function returning an iterable such that the resulting iterable only ever yields unique items. """ + @wraps(func) def wrapper(*args, **kwargs): return unique_everseen(func(*args, **kwargs)) + return wrapper -REL = re.compile("""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) + +REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) # this line is here to fix emacs' cruddy broken syntax highlighting + @unique_values def find_external_links(url, page): """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" @@ -197,19 +231,17 @@ def find_external_links(url, page): for tag in ("Home Page", "Download URL"): pos = page.find(tag) - if pos!=-1: - match = HREF.search(page,pos) + if pos != -1: + match = HREF.search(page, pos) if match: yield urllib.parse.urljoin(url, htmldecode(match.group(1))) -user_agent = "Python-urllib/%s setuptools/%s" % ( - sys.version[:3], require('setuptools')[0].version -) -class ContentChecker(object): +class ContentChecker: """ A null content checker that defines the interface for checking content """ + def feed(self, block): """ Feed a block of data to the hash. @@ -229,6 +261,7 @@ class ContentChecker(object): """ return + class HashChecker(ContentChecker): pattern = re.compile( r'(?Psha1|sha224|sha384|sha256|sha512|md5)=' @@ -266,19 +299,25 @@ class PackageIndex(Environment): """A distribution index that scans web pages for download URLs""" def __init__( - self, index_url="https://pypi.python.org/simple", hosts=('*',), + self, index_url="https://pypi.org/simple/", hosts=('*',), ca_bundle=None, verify_ssl=True, *args, **kw - ): - Environment.__init__(self,*args,**kw) - self.index_url = index_url + "/"[:not index_url.endswith('/')] + ): + Environment.__init__(self, *args, **kw) + self.index_url = index_url + "/" [:not index_url.endswith('/')] self.scanned_urls = {} self.fetched_urls = {} self.package_pages = {} - self.allows = re.compile('|'.join(map(translate,hosts))).match + self.allows = re.compile('|'.join(map(translate, hosts))).match self.to_scan = [] - if verify_ssl and ssl_support.is_available and (ca_bundle or ssl_support.find_ca_bundle()): + use_ssl = ( + verify_ssl + and ssl_support.is_available + and (ca_bundle or ssl_support.find_ca_bundle()) + ) + if use_ssl: self.opener = ssl_support.opener_for(ca_bundle) - else: self.opener = urllib.request.urlopen + else: + self.opener = urllib.request.urlopen def process_url(self, url, retrieve=False): """Evaluate a URL as a possible download, and maybe retrieve it""" @@ -304,17 +343,20 @@ class PackageIndex(Environment): return self.info("Reading %s", url) - self.fetched_urls[url] = True # prevent multiple fetch attempts - f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url) - if f is None: return + self.fetched_urls[url] = True # prevent multiple fetch attempts + tmpl = "Download error on %s: %%s -- Some packages may not be found!" + f = self.open_url(url, tmpl % url) + if f is None: + return self.fetched_urls[f.url] = True if 'html' not in f.headers.get('content-type', '').lower(): - f.close() # not html, we can't process it + f.close() # not html, we can't process it return - base = f.url # handle redirects + base = f.url # handle redirects page = f.read() - if not isinstance(page, str): # We are in Python 3 and got bytes. We want str. + if not isinstance(page, str): + # In Python 3 and got bytes but want str. if isinstance(f, urllib.error.HTTPError): # Errors have no charset, assume latin1: charset = 'latin-1' @@ -325,7 +367,7 @@ class PackageIndex(Environment): for match in HREF.finditer(page): link = urllib.parse.urljoin(base, htmldecode(match.group(1))) self.process_url(link) - if url.startswith(self.index_url) and getattr(f,'code',None)!=404: + if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: page = self.process_index(url, page) def process_filename(self, fn, nested=False): @@ -337,7 +379,7 @@ class PackageIndex(Environment): if os.path.isdir(fn) and not nested: path = os.path.realpath(fn) for item in os.listdir(path): - self.process_filename(os.path.join(path,item), True) + self.process_filename(os.path.join(path, item), True) dists = distros_for_filename(fn) if dists: @@ -346,10 +388,12 @@ class PackageIndex(Environment): def url_ok(self, url, fatal=False): s = URL_SCHEME(url) - if (s and s.group(1).lower()=='file') or self.allows(urllib.parse.urlparse(url)[1]): + is_file = s and s.group(1).lower() == 'file' + if is_file or self.allows(urllib.parse.urlparse(url)[1]): return True - msg = ("\nNote: Bypassing %s (disallowed host; see " - "http://bit.ly/1dg9ijs for details).\n") + msg = ( + "\nNote: Bypassing %s (disallowed host; see " + "http://bit.ly/2hrImnY for details).\n") if fatal: raise DistutilsError(msg % url) else: @@ -381,19 +425,20 @@ class PackageIndex(Environment): dist.precedence = SOURCE_DIST self.add(dist) - def process_index(self,url,page): + def process_index(self, url, page): """Process the contents of a PyPI page""" + def scan(link): # Process a URL to see if it's for a package page if link.startswith(self.index_url): parts = list(map( urllib.parse.unquote, link[len(self.index_url):].split('/') )) - if len(parts)==2 and '#' not in parts[1]: + if len(parts) == 2 and '#' not in parts[1]: # it's a package page, sanitize and index it pkg = safe_name(parts[0]) ver = safe_version(parts[1]) - self.package_pages.setdefault(pkg.lower(),{})[link] = True + self.package_pages.setdefault(pkg.lower(), {})[link] = True return to_filename(pkg), to_filename(ver) return None, None @@ -404,7 +449,7 @@ class PackageIndex(Environment): except ValueError: pass - pkg, ver = scan(url) # ensure this page is in the page index + pkg, ver = scan(url) # ensure this page is in the page index if pkg: # process individual package page for new_url in find_external_links(url, page): @@ -412,16 +457,16 @@ class PackageIndex(Environment): base, frag = egg_info_for_url(new_url) if base.endswith('.py') and not frag: if ver: - new_url+='#egg=%s-%s' % (pkg,ver) + new_url += '#egg=%s-%s' % (pkg, ver) else: self.need_version_info(url) self.scan_url(new_url) return PYPI_MD5.sub( - lambda m: '%s' % m.group(1,3,2), page + lambda m: '%s' % m.group(1, 3, 2), page ) else: - return "" # no sense double-scanning non-package pages + return "" # no sense double-scanning non-package pages def need_version_info(self, url): self.scan_all( @@ -431,24 +476,25 @@ class PackageIndex(Environment): def scan_all(self, msg=None, *args): if self.index_url not in self.fetched_urls: - if msg: self.warn(msg,*args) + if msg: + self.warn(msg, *args) self.info( "Scanning index of all packages (this may take a while)" ) self.scan_url(self.index_url) def find_packages(self, requirement): - self.scan_url(self.index_url + requirement.unsafe_name+'/') + self.scan_url(self.index_url + requirement.unsafe_name + '/') if not self.package_pages.get(requirement.key): # Fall back to safe version of the name - self.scan_url(self.index_url + requirement.project_name+'/') + self.scan_url(self.index_url + requirement.project_name + '/') if not self.package_pages.get(requirement.key): # We couldn't find the target package, so search the index page too self.not_found_in_index(requirement) - for url in list(self.package_pages.get(requirement.key,())): + for url in list(self.package_pages.get(requirement.key, ())): # scan each page that might be related to the desired package self.scan_url(url) @@ -459,31 +505,32 @@ class PackageIndex(Environment): if dist in requirement: return dist self.debug("%s does not match %s", requirement, dist) - return super(PackageIndex, self).obtain(requirement,installer) + return super(PackageIndex, self).obtain(requirement, installer) def check_hash(self, checker, filename, tfp): """ checker is a ContentChecker """ - checker.report(self.debug, + checker.report( + self.debug, "Validating %%s checksum for %s" % filename) if not checker.is_valid(): tfp.close() os.unlink(filename) raise DistutilsError( "%s validation failed for %s; " - "possible download problem?" % ( - checker.hash.name, os.path.basename(filename)) + "possible download problem?" + % (checker.hash.name, os.path.basename(filename)) ) def add_find_links(self, urls): """Add `urls` to the list that will be prescanned for searches""" for url in urls: if ( - self.to_scan is None # if we have already "gone online" - or not URL_SCHEME(url) # or it's a local file/directory + self.to_scan is None # if we have already "gone online" + or not URL_SCHEME(url) # or it's a local file/directory or url.startswith('file:') - or list(distros_for_url(url)) # or a direct package link + or list(distros_for_url(url)) # or a direct package link ): # then go ahead and process it now self.scan_url(url) @@ -495,13 +542,14 @@ class PackageIndex(Environment): """Scan urls scheduled for prescanning (e.g. --find-links)""" if self.to_scan: list(map(self.scan_url, self.to_scan)) - self.to_scan = None # from now on, go ahead and process immediately + self.to_scan = None # from now on, go ahead and process immediately def not_found_in_index(self, requirement): - if self[requirement.key]: # we've seen at least one distro + if self[requirement.key]: # we've seen at least one distro meth, msg = self.info, "Couldn't retrieve index page for %r" - else: # no distros seen for this name, might be misspelled - meth, msg = (self.warn, + else: # no distros seen for this name, might be misspelled + meth, msg = ( + self.warn, "Couldn't find index page for %r (maybe misspelled?)") meth(msg, requirement.unsafe_name) self.scan_all() @@ -524,32 +572,25 @@ class PackageIndex(Environment): of `tmpdir`, and the local filename is returned. Various errors may be raised if a problem occurs during downloading. """ - if not isinstance(spec,Requirement): + if not isinstance(spec, Requirement): scheme = URL_SCHEME(spec) if scheme: # It's a url, download it to tmpdir found = self._download_url(scheme.group(1), spec, tmpdir) base, fragment = egg_info_for_url(spec) if base.endswith('.py'): - found = self.gen_setup(found,fragment,tmpdir) + found = self.gen_setup(found, fragment, tmpdir) return found elif os.path.exists(spec): # Existing file or directory, just return it return spec else: - try: - spec = Requirement.parse(spec) - except ValueError: - raise DistutilsError( - "Not a URL, existing file, or requirement spec: %r" % - (spec,) - ) - return getattr(self.fetch_distribution(spec, tmpdir),'location',None) + spec = parse_requirement_arg(spec) + return getattr(self.fetch_distribution(spec, tmpdir), 'location', None) def fetch_distribution( self, requirement, tmpdir, force_scan=False, source=False, - develop_ok=False, local_index=None - ): + develop_ok=False, local_index=None): """Obtain a distribution suitable for fulfilling `requirement` `requirement` must be a ``pkg_resources.Requirement`` instance. @@ -578,22 +619,31 @@ class PackageIndex(Environment): for dist in env[req.key]: - if dist.precedence==DEVELOP_DIST and not develop_ok: + if dist.precedence == DEVELOP_DIST and not develop_ok: if dist not in skipped: - self.warn("Skipping development or system egg: %s",dist) + self.warn( + "Skipping development or system egg: %s", dist, + ) skipped[dist] = 1 continue - if dist in req and (dist.precedence<=SOURCE_DIST or not source): - return dist + test = ( + dist in req + and (dist.precedence <= SOURCE_DIST or not source) + ) + if test: + loc = self.download(dist.location, tmpdir) + dist.download_location = loc + if os.path.exists(dist.download_location): + return dist if force_scan: self.prescan() self.find_packages(requirement) dist = find(requirement) - if local_index is not None: - dist = dist or find(requirement, local_index) + if not dist and local_index is not None: + dist = find(requirement, local_index) if dist is None: if self.to_scan is not None: @@ -606,13 +656,13 @@ class PackageIndex(Environment): if dist is None: self.warn( - "No local packages or download links found for %s%s", + "No local packages or working download links found for %s%s", (source and "a source distribution of " or ""), requirement, ) else: self.info("Best match: %s", dist) - return dist.clone(location=self.download(dist.location, tmpdir)) + return dist.clone(location=dist.download_location) def fetch(self, requirement, tmpdir, force_scan=False, source=False): """Obtain a file suitable for fulfilling `requirement` @@ -622,7 +672,7 @@ class PackageIndex(Environment): ``location`` of the downloaded distribution instead of a distribution object. """ - dist = self.fetch_distribution(requirement,tmpdir,force_scan,source) + dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) if dist is not None: return dist.location return None @@ -634,7 +684,7 @@ class PackageIndex(Environment): interpret_distro_name(filename, match.group(1), None) if d.version ] or [] - if len(dists)==1: # unambiguous ``#egg`` fragment + if len(dists) == 1: # unambiguous ``#egg`` fragment basename = os.path.basename(filename) # Make sure the file has been downloaded to the temp dir. @@ -643,7 +693,7 @@ class PackageIndex(Environment): from setuptools.command.easy_install import samefile if not samefile(filename, dst): shutil.copy2(filename, dst) - filename=dst + filename = dst with open(os.path.join(tmpdir, 'setup.py'), 'w') as file: file.write( @@ -660,7 +710,7 @@ class PackageIndex(Environment): raise DistutilsError( "Can't unambiguously interpret project/version identifier %r; " "any dashes in the name or version should be escaped using " - "underscores. %r" % (fragment,dists) + "underscores. %r" % (fragment, dists) ) else: raise DistutilsError( @@ -669,16 +719,17 @@ class PackageIndex(Environment): ) dl_blocksize = 8192 + def _download_to(self, url, filename): self.info("Downloading %s", url) # Download the file - fp, info = None, None + fp = None try: checker = HashChecker.from_url(url) - fp = self.open_url(strip_fragment(url)) + fp = self.open_url(url) if isinstance(fp, urllib.error.HTTPError): raise DistutilsError( - "Can't download %s: %s %s" % (url, fp.code,fp.msg) + "Can't download %s: %s %s" % (url, fp.code, fp.msg) ) headers = fp.info() blocknum = 0 @@ -689,7 +740,7 @@ class PackageIndex(Environment): sizes = get_all_headers(headers, 'Content-Length') size = max(map(int, sizes)) self.reporthook(url, filename, blocknum, bs, size) - with open(filename,'wb') as tfp: + with open(filename, 'wb') as tfp: while True: block = fp.read(bs) if block: @@ -702,10 +753,11 @@ class PackageIndex(Environment): self.check_hash(checker, filename, tfp) return headers finally: - if fp: fp.close() + if fp: + fp.close() def reporthook(self, url, filename, blocknum, blksize, size): - pass # no-op + pass # no-op def open_url(self, url, warning=None): if url.startswith('file:'): @@ -735,7 +787,7 @@ class PackageIndex(Environment): 'down, %s' % (url, v.line) ) - except http_client.HTTPException as v: + except (http_client.HTTPException, socket.error) as v: if warning: self.warn(warning, v) else: @@ -748,27 +800,27 @@ class PackageIndex(Environment): name, fragment = egg_info_for_url(url) if name: while '..' in name: - name = name.replace('..','.').replace('\\','_') + name = name.replace('..', '.').replace('\\', '_') else: - name = "__downloaded__" # default if URL has no path contents + name = "__downloaded__" # default if URL has no path contents if name.endswith('.egg.zip'): - name = name[:-4] # strip the extra .zip before download + name = name[:-4] # strip the extra .zip before download - filename = os.path.join(tmpdir,name) + filename = os.path.join(tmpdir, name) # Download the file # - if scheme=='svn' or scheme.startswith('svn+'): + if scheme == 'svn' or scheme.startswith('svn+'): return self._download_svn(url, filename) - elif scheme=='git' or scheme.startswith('git+'): + elif scheme == 'git' or scheme.startswith('git+'): return self._download_git(url, filename) elif scheme.startswith('hg+'): return self._download_hg(url, filename) - elif scheme=='file': + elif scheme == 'file': return urllib.request.url2pathname(urllib.parse.urlparse(url)[2]) else: - self.url_ok(url, True) # raises error if not allowed + self.url_ok(url, True) # raises error if not allowed return self._attempt_download(url, filename) def scan_url(self, url): @@ -776,7 +828,7 @@ class PackageIndex(Environment): def _attempt_download(self, url, filename): headers = self._download_to(url, filename) - if 'html' in headers.get('content-type','').lower(): + if 'html' in headers.get('content-type', '').lower(): return self._download_html(url, headers, filename) else: return filename @@ -791,25 +843,26 @@ class PackageIndex(Environment): file.close() os.unlink(filename) return self._download_svn(url, filename) - break # not an index page + break # not an index page file.close() os.unlink(filename) - raise DistutilsError("Unexpected HTML page found at "+url) + raise DistutilsError("Unexpected HTML page found at " + url) def _download_svn(self, url, filename): - url = url.split('#',1)[0] # remove any fragment for svn's sake + warnings.warn("SVN download support is deprecated", UserWarning) + url = url.split('#', 1)[0] # remove any fragment for svn's sake creds = '' if url.lower().startswith('svn:') and '@' in url: scheme, netloc, path, p, q, f = urllib.parse.urlparse(url) if not netloc and path.startswith('//') and '/' in path[2:]: - netloc, path = path[2:].split('/',1) - auth, host = splituser(netloc) + netloc, path = path[2:].split('/', 1) + auth, host = urllib.parse.splituser(netloc) if auth: if ':' in auth: - user, pw = auth.split(':',1) + user, pw = auth.split(':', 1) creds = " --username=%s --password=%s" % (user, pw) else: - creds = " --username="+auth + creds = " --username=" + auth netloc = host parts = scheme, netloc, url, p, q, f url = urllib.parse.urlunparse(parts) @@ -824,7 +877,7 @@ class PackageIndex(Environment): scheme = scheme.split('+', 1)[-1] # Some fragment identification fails - path = path.split('#',1)[0] + path = path.split('#', 1)[0] rev = None if '@' in path: @@ -836,7 +889,7 @@ class PackageIndex(Environment): return url, rev def _download_git(self, url, filename): - filename = filename.split('#',1)[0] + filename = filename.split('#', 1)[0] url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) self.info("Doing git clone from %s to %s", url, filename) @@ -852,7 +905,7 @@ class PackageIndex(Environment): return filename def _download_hg(self, url, filename): - filename = filename.split('#',1)[0] + filename = filename.split('#', 1)[0] url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) self.info("Doing hg clone from %s to %s", url, filename) @@ -860,7 +913,7 @@ class PackageIndex(Environment): if rev is not None: self.info("Updating to %s", rev) - os.system("(cd %s && hg up -C -r %s >&-)" % ( + os.system("(cd %s && hg up -C -r %s -q)" % ( filename, rev, )) @@ -876,30 +929,29 @@ class PackageIndex(Environment): def warn(self, msg, *args): log.warn(msg, *args) + # This pattern matches a character entity reference (a decimal numeric # references, a hexadecimal numeric reference, or a named reference). entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub -def uchr(c): - if not isinstance(c, int): - return c - if c>255: return six.unichr(c) - return chr(c) def decode_entity(match): - what = match.group(1) - if what.startswith('#x'): - what = int(what[2:], 16) - elif what.startswith('#'): - what = int(what[1:]) - else: - what = six.moves.html_entities.name2codepoint.get(what, match.group(0)) - return uchr(what) + what = match.group(0) + return unescape(what) + def htmldecode(text): - """Decode HTML entities in the given text.""" + """ + Decode HTML entities in the given text. + + >>> htmldecode( + ... 'https://../package_name-0.1.2.tar.gz' + ... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz') + 'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz' + """ return entity_sub(decode_entity, text) + def socket_timeout(timeout=15): def _socket_timeout(func): def _socket_timeout(*args, **kwargs): @@ -909,9 +961,12 @@ def socket_timeout(timeout=15): return func(*args, **kwargs) finally: socket.setdefaulttimeout(old_timeout) + return _socket_timeout + return _socket_timeout + def _encode_auth(auth): """ A function compatible with Python 2.3-3.3 that will encode @@ -927,17 +982,18 @@ def _encode_auth(auth): auth_s = urllib.parse.unquote(auth) # convert to bytes auth_bytes = auth_s.encode() - # use the legacy interface for Python 2.3 support - encoded_bytes = base64.encodestring(auth_bytes) + encoded_bytes = base64.b64encode(auth_bytes) # convert back to a string encoded = encoded_bytes.decode() # strip the trailing carriage return - return encoded.replace('\n','') + return encoded.replace('\n', '') -class Credential(object): + +class Credential: """ A username/password pair. Use like a namedtuple. """ + def __init__(self, username, password): self.username = username self.password = password @@ -949,8 +1005,8 @@ class Credential(object): def __str__(self): return '%(username)s:%(password)s' % vars(self) -class PyPIConfig(configparser.RawConfigParser): +class PyPIConfig(configparser.RawConfigParser): def __init__(self): """ Load from ~/.pypirc @@ -999,7 +1055,7 @@ def open_with_auth(url, opener=urllib.request.urlopen): raise http_client.InvalidURL("nonnumeric port: ''") if scheme in ('http', 'https'): - auth, host = splituser(netloc) + auth, host = urllib.parse.splituser(netloc) else: auth = None @@ -1008,7 +1064,7 @@ def open_with_auth(url, opener=urllib.request.urlopen): if cred: auth = str(cred) info = cred.username, url - log.info('Authenticating as %s for %s (from .pypirc)' % info) + log.info('Authenticating as %s for %s (from .pypirc)', *info) if auth: auth = "Basic " + _encode_auth(auth) @@ -1026,18 +1082,20 @@ def open_with_auth(url, opener=urllib.request.urlopen): # Put authentication info back into request URL if same host, # so that links found on the page will work s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url) - if s2==scheme and h2==host: + if s2 == scheme and h2 == host: parts = s2, netloc, path2, param2, query2, frag2 fp.url = urllib.parse.urlunparse(parts) return fp + # adding a timeout to avoid freezing package_index open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) def fix_sf_url(url): - return url # backward compatibility + return url # backward compatibility + def local_open(url): """Read a local path, with special support for directories""" @@ -1057,7 +1115,8 @@ def local_open(url): f += '/' files.append('{name}'.format(name=f)) else: - tmpl = ("{url}" + tmpl = ( + "{url}" "{files}") body = tmpl.format(url=url, files='\n'.join(files)) status, message = 200, "OK" diff --git a/Shared/lib/python3.4/site-packages/setuptools/pep425tags.py b/Shared/lib/python3.4/site-packages/setuptools/pep425tags.py new file mode 100644 index 0000000..8bf4277 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/pep425tags.py @@ -0,0 +1,319 @@ +# This file originally from pip: +# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/pep425tags.py +"""Generate and work with PEP 425 Compatibility Tags.""" +from __future__ import absolute_import + +import distutils.util +from distutils import log +import platform +import re +import sys +import sysconfig +import warnings +from collections import OrderedDict + +from .extern import six + +from . import glibc + +_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') + + +def get_config_var(var): + try: + return sysconfig.get_config_var(var) + except IOError as e: # Issue #1074 + warnings.warn("{}".format(e), RuntimeWarning) + return None + + +def get_abbr_impl(): + """Return abbreviated implementation name.""" + if hasattr(sys, 'pypy_version_info'): + pyimpl = 'pp' + elif sys.platform.startswith('java'): + pyimpl = 'jy' + elif sys.platform == 'cli': + pyimpl = 'ip' + else: + pyimpl = 'cp' + return pyimpl + + +def get_impl_ver(): + """Return implementation version.""" + impl_ver = get_config_var("py_version_nodot") + if not impl_ver or get_abbr_impl() == 'pp': + impl_ver = ''.join(map(str, get_impl_version_info())) + return impl_ver + + +def get_impl_version_info(): + """Return sys.version_info-like tuple for use in decrementing the minor + version.""" + if get_abbr_impl() == 'pp': + # as per https://github.com/pypa/pip/issues/2882 + return (sys.version_info[0], sys.pypy_version_info.major, + sys.pypy_version_info.minor) + else: + return sys.version_info[0], sys.version_info[1] + + +def get_impl_tag(): + """ + Returns the Tag for this specific implementation. + """ + return "{}{}".format(get_abbr_impl(), get_impl_ver()) + + +def get_flag(var, fallback, expected=True, warn=True): + """Use a fallback method for determining SOABI flags if the needed config + var is unset or unavailable.""" + val = get_config_var(var) + if val is None: + if warn: + log.debug("Config variable '%s' is unset, Python ABI tag may " + "be incorrect", var) + return fallback() + return val == expected + + +def get_abi_tag(): + """Return the ABI tag based on SOABI (if available) or emulate SOABI + (CPython 2, PyPy).""" + soabi = get_config_var('SOABI') + impl = get_abbr_impl() + if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): + d = '' + m = '' + u = '' + if get_flag('Py_DEBUG', + lambda: hasattr(sys, 'gettotalrefcount'), + warn=(impl == 'cp')): + d = 'd' + if get_flag('WITH_PYMALLOC', + lambda: impl == 'cp', + warn=(impl == 'cp')): + m = 'm' + if get_flag('Py_UNICODE_SIZE', + lambda: sys.maxunicode == 0x10ffff, + expected=4, + warn=(impl == 'cp' and + six.PY2)) \ + and six.PY2: + u = 'u' + abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) + elif soabi and soabi.startswith('cpython-'): + abi = 'cp' + soabi.split('-')[1] + elif soabi: + abi = soabi.replace('.', '_').replace('-', '_') + else: + abi = None + return abi + + +def _is_running_32bit(): + return sys.maxsize == 2147483647 + + +def get_platform(): + """Return our platform name 'win32', 'linux_x86_64'""" + if sys.platform == 'darwin': + # distutils.util.get_platform() returns the release based on the value + # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may + # be significantly older than the user's current machine. + release, _, machine = platform.mac_ver() + split_ver = release.split('.') + + if machine == "x86_64" and _is_running_32bit(): + machine = "i386" + elif machine == "ppc64" and _is_running_32bit(): + machine = "ppc" + + return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine) + + # XXX remove distutils dependency + result = distutils.util.get_platform().replace('.', '_').replace('-', '_') + if result == "linux_x86_64" and _is_running_32bit(): + # 32 bit Python program (running on a 64 bit Linux): pip should only + # install and run 32 bit compiled extensions in that case. + result = "linux_i686" + + return result + + +def is_manylinux1_compatible(): + # Only Linux, and only x86-64 / i686 + if get_platform() not in {"linux_x86_64", "linux_i686"}: + return False + + # Check for presence of _manylinux module + try: + import _manylinux + return bool(_manylinux.manylinux1_compatible) + except (ImportError, AttributeError): + # Fall through to heuristic check below + pass + + # Check glibc version. CentOS 5 uses glibc 2.5. + return glibc.have_compatible_glibc(2, 5) + + +def get_darwin_arches(major, minor, machine): + """Return a list of supported arches (including group arches) for + the given major, minor and machine architecture of an macOS machine. + """ + arches = [] + + def _supports_arch(major, minor, arch): + # Looking at the application support for macOS versions in the chart + # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears + # our timeline looks roughly like: + # + # 10.0 - Introduces ppc support. + # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64 + # and x86_64 support is CLI only, and cannot be used for GUI + # applications. + # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications. + # 10.6 - Drops support for ppc64 + # 10.7 - Drops support for ppc + # + # Given that we do not know if we're installing a CLI or a GUI + # application, we must be conservative and assume it might be a GUI + # application and behave as if ppc64 and x86_64 support did not occur + # until 10.5. + # + # Note: The above information is taken from the "Application support" + # column in the chart not the "Processor support" since I believe + # that we care about what instruction sets an application can use + # not which processors the OS supports. + if arch == 'ppc': + return (major, minor) <= (10, 5) + if arch == 'ppc64': + return (major, minor) == (10, 5) + if arch == 'i386': + return (major, minor) >= (10, 4) + if arch == 'x86_64': + return (major, minor) >= (10, 5) + if arch in groups: + for garch in groups[arch]: + if _supports_arch(major, minor, garch): + return True + return False + + groups = OrderedDict([ + ("fat", ("i386", "ppc")), + ("intel", ("x86_64", "i386")), + ("fat64", ("x86_64", "ppc64")), + ("fat32", ("x86_64", "i386", "ppc")), + ]) + + if _supports_arch(major, minor, machine): + arches.append(machine) + + for garch in groups: + if machine in groups[garch] and _supports_arch(major, minor, garch): + arches.append(garch) + + arches.append('universal') + + return arches + + +def get_supported(versions=None, noarch=False, platform=None, + impl=None, abi=None): + """Return a list of supported tags for each version specified in + `versions`. + + :param versions: a list of string versions, of the form ["33", "32"], + or None. The first version will be assumed to support our ABI. + :param platform: specify the exact platform you want valid + tags for, or None. If None, use the local system platform. + :param impl: specify the exact implementation you want valid + tags for, or None. If None, use the local interpreter impl. + :param abi: specify the exact abi you want valid + tags for, or None. If None, use the local interpreter abi. + """ + supported = [] + + # Versions must be given with respect to the preference + if versions is None: + versions = [] + version_info = get_impl_version_info() + major = version_info[:-1] + # Support all previous minor Python versions. + for minor in range(version_info[-1], -1, -1): + versions.append(''.join(map(str, major + (minor,)))) + + impl = impl or get_abbr_impl() + + abis = [] + + abi = abi or get_abi_tag() + if abi: + abis[0:0] = [abi] + + abi3s = set() + import imp + for suffix in imp.get_suffixes(): + if suffix[0].startswith('.abi'): + abi3s.add(suffix[0].split('.', 2)[1]) + + abis.extend(sorted(list(abi3s))) + + abis.append('none') + + if not noarch: + arch = platform or get_platform() + if arch.startswith('macosx'): + # support macosx-10.6-intel on macosx-10.9-x86_64 + match = _osx_arch_pat.match(arch) + if match: + name, major, minor, actual_arch = match.groups() + tpl = '{}_{}_%i_%s'.format(name, major) + arches = [] + for m in reversed(range(int(minor) + 1)): + for a in get_darwin_arches(int(major), m, actual_arch): + arches.append(tpl % (m, a)) + else: + # arch pattern didn't match (?!) + arches = [arch] + elif platform is None and is_manylinux1_compatible(): + arches = [arch.replace('linux', 'manylinux1'), arch] + else: + arches = [arch] + + # Current version, current API (built specifically for our Python): + for abi in abis: + for arch in arches: + supported.append(('%s%s' % (impl, versions[0]), abi, arch)) + + # abi3 modules compatible with older version of Python + for version in versions[1:]: + # abi3 was introduced in Python 3.2 + if version in {'31', '30'}: + break + for abi in abi3s: # empty set if not Python 3 + for arch in arches: + supported.append(("%s%s" % (impl, version), abi, arch)) + + # Has binaries, does not use the Python API: + for arch in arches: + supported.append(('py%s' % (versions[0][0]), 'none', arch)) + + # No abi / arch, but requires our implementation: + supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) + # Tagged specifically as being cross-version compatible + # (with just the major version specified) + supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) + + # No abi / arch, generic Python + for i, version in enumerate(versions): + supported.append(('py%s' % (version,), 'none', 'any')) + if i == 0: + supported.append(('py%s' % (version[0]), 'none', 'any')) + + return supported + + +implementation_tag = get_impl_tag() diff --git a/Shared/lib/python3.4/site-packages/setuptools/py26compat.py b/Shared/lib/python3.4/site-packages/setuptools/py26compat.py deleted file mode 100644 index e52bd85..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools/py26compat.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -Compatibility Support for Python 2.6 and earlier -""" - -import sys - -try: - from urllib.parse import splittag -except ImportError: - from urllib import splittag - -def strip_fragment(url): - """ - In `Python 8280 `_, Python 2.7 and - later was patched to disregard the fragment when making URL requests. - Do the same for Python 2.6 and earlier. - """ - url, fragment = splittag(url) - return url - -if sys.version_info >= (2,7): - strip_fragment = lambda x: x diff --git a/Shared/lib/python3.4/site-packages/setuptools/py27compat.py b/Shared/lib/python3.4/site-packages/setuptools/py27compat.py index 9d2886d..2985011 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/py27compat.py +++ b/Shared/lib/python3.4/site-packages/setuptools/py27compat.py @@ -2,14 +2,27 @@ Compatibility Support for Python 2.7 and earlier """ -import sys +import platform + +from setuptools.extern import six + def get_all_headers(message, key): - """ - Given an HTTPMessage, return all headers matching a given key. - """ - return message.get_all(key) + """ + Given an HTTPMessage, return all headers matching a given key. + """ + return message.get_all(key) -if sys.version_info < (3,): - def get_all_headers(message, key): - return message.getheaders(key) + +if six.PY2: + def get_all_headers(message, key): + return message.getheaders(key) + + +linux_py2_ascii = ( + platform.system() == 'Linux' and + six.PY2 +) + +rmtree_safe = str if linux_py2_ascii else lambda x: x +"""Workaround for http://bugs.python.org/issue24672""" diff --git a/Shared/lib/python3.4/site-packages/setuptools/py31compat.py b/Shared/lib/python3.4/site-packages/setuptools/py31compat.py index 8fe6dd9..1a0705e 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/py31compat.py +++ b/Shared/lib/python3.4/site-packages/setuptools/py31compat.py @@ -1,17 +1,7 @@ -import sys -import unittest +__all__ = [] -__all__ = ['get_config_vars', 'get_path'] +__metaclass__ = type -try: - # Python 2.7 or >=3.2 - from sysconfig import get_config_vars, get_path -except ImportError: - from distutils.sysconfig import get_config_vars, get_python_lib - def get_path(name): - if name not in ('platlib', 'purelib'): - raise ValueError("Name must be purelib or platlib") - return get_python_lib(name=='platlib') try: # Python >=3.2 @@ -19,14 +9,16 @@ try: except ImportError: import shutil import tempfile - class TemporaryDirectory(object): + + class TemporaryDirectory: """ Very simple temporary directory context manager. Will try to delete afterward, but will also ignore OS and similar errors on deletion. """ + def __init__(self): - self.name = None # Handle mkdtemp raising an exception + self.name = None # Handle mkdtemp raising an exception self.name = tempfile.mkdtemp() def __enter__(self): @@ -35,18 +27,6 @@ except ImportError: def __exit__(self, exctype, excvalue, exctrace): try: shutil.rmtree(self.name, True) - except OSError: #removal errors are not the only possible + except OSError: # removal errors are not the only possible pass self.name = None - - -unittest_main = unittest.main - -_PY31 = (3, 1) <= sys.version_info[:2] < (3, 2) -if _PY31: - # on Python 3.1, translate testRunner==None to TextTestRunner - # for compatibility with Python 2.6, 2.7, and 3.2+ - def unittest_main(*args, **kwargs): - if 'testRunner' in kwargs and kwargs['testRunner'] is None: - kwargs['testRunner'] = unittest.TextTestRunner - return unittest.main(*args, **kwargs) diff --git a/Shared/lib/python3.4/site-packages/setuptools/py33compat.py b/Shared/lib/python3.4/site-packages/setuptools/py33compat.py new file mode 100644 index 0000000..87cf539 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/py33compat.py @@ -0,0 +1,55 @@ +import dis +import array +import collections + +try: + import html +except ImportError: + html = None + +from setuptools.extern import six +from setuptools.extern.six.moves import html_parser + +__metaclass__ = type + +OpArg = collections.namedtuple('OpArg', 'opcode arg') + + +class Bytecode_compat: + def __init__(self, code): + self.code = code + + def __iter__(self): + """Yield '(op,arg)' pair for each operation in code object 'code'""" + + bytes = array.array('b', self.code.co_code) + eof = len(self.code.co_code) + + ptr = 0 + extended_arg = 0 + + while ptr < eof: + + op = bytes[ptr] + + if op >= dis.HAVE_ARGUMENT: + + arg = bytes[ptr + 1] + bytes[ptr + 2] * 256 + extended_arg + ptr += 3 + + if op == dis.EXTENDED_ARG: + long_type = six.integer_types[-1] + extended_arg = arg * long_type(65536) + continue + + else: + arg = None + ptr += 1 + + yield OpArg(op, arg) + + +Bytecode = getattr(dis, 'Bytecode', Bytecode_compat) + + +unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape) diff --git a/Shared/lib/python3.4/site-packages/setuptools/py36compat.py b/Shared/lib/python3.4/site-packages/setuptools/py36compat.py new file mode 100644 index 0000000..f527969 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/py36compat.py @@ -0,0 +1,82 @@ +import sys +from distutils.errors import DistutilsOptionError +from distutils.util import strtobool +from distutils.debug import DEBUG + + +class Distribution_parse_config_files: + """ + Mix-in providing forward-compatibility for functionality to be + included by default on Python 3.7. + + Do not edit the code in this class except to update functionality + as implemented in distutils. + """ + def parse_config_files(self, filenames=None): + from configparser import ConfigParser + + # Ignore install directory options if we have a venv + if sys.prefix != sys.base_prefix: + ignore_options = [ + 'install-base', 'install-platbase', 'install-lib', + 'install-platlib', 'install-purelib', 'install-headers', + 'install-scripts', 'install-data', 'prefix', 'exec-prefix', + 'home', 'user', 'root'] + else: + ignore_options = [] + + ignore_options = frozenset(ignore_options) + + if filenames is None: + filenames = self.find_config_files() + + if DEBUG: + self.announce("Distribution.parse_config_files():") + + parser = ConfigParser(interpolation=None) + for filename in filenames: + if DEBUG: + self.announce(" reading %s" % filename) + parser.read(filename) + for section in parser.sections(): + options = parser.options(section) + opt_dict = self.get_option_dict(section) + + for opt in options: + if opt != '__name__' and opt not in ignore_options: + val = parser.get(section,opt) + opt = opt.replace('-', '_') + opt_dict[opt] = (filename, val) + + # Make the ConfigParser forget everything (so we retain + # the original filenames that options come from) + parser.__init__() + + # If there was a "global" section in the config file, use it + # to set Distribution options. + + if 'global' in self.command_options: + for (opt, (src, val)) in self.command_options['global'].items(): + alias = self.negative_opt.get(opt) + try: + if alias: + setattr(self, alias, not strtobool(val)) + elif opt in ('verbose', 'dry_run'): # ugh! + setattr(self, opt, strtobool(val)) + else: + setattr(self, opt, val) + except ValueError as msg: + raise DistutilsOptionError(msg) + + +if sys.version_info < (3,): + # Python 2 behavior is sufficient + class Distribution_parse_config_files: + pass + + +if False: + # When updated behavior is available upstream, + # disable override here. + class Distribution_parse_config_files: + pass diff --git a/Shared/lib/python3.4/site-packages/setuptools/sandbox.py b/Shared/lib/python3.4/site-packages/setuptools/sandbox.py index 23e296b..685f3f7 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/sandbox.py +++ b/Shared/lib/python3.4/site-packages/setuptools/sandbox.py @@ -7,11 +7,12 @@ import itertools import re import contextlib import pickle +import textwrap from setuptools.extern import six from setuptools.extern.six.moves import builtins, map -import pkg_resources +import pkg_resources.py31compat if sys.platform.startswith('java'): import org.python.modules.posix.PosixModule as _os @@ -25,10 +26,12 @@ _open = open from distutils.errors import DistutilsError from pkg_resources import working_set + __all__ = [ "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup", ] + def _execfile(filename, globals, locals=None): """ Python 3 implementation of execfile. @@ -36,10 +39,6 @@ def _execfile(filename, globals, locals=None): mode = 'rb' with open(filename, mode) as stream: script = stream.read() - # compile() function in Python 2.6 and 3.1 requires LF line endings. - if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2): - script = script.replace(b'\r\n', b'\n') - script = script.replace(b'\r', b'\n') if locals is None: locals = globals code = compile(script, filename, 'exec') @@ -71,8 +70,7 @@ def override_temp(replacement): """ Monkey-patch tempfile.tempdir with replacement, ensuring it exists """ - if not os.path.isdir(replacement): - os.makedirs(replacement) + pkg_resources.py31compat.makedirs(replacement, exist_ok=True) saved = tempfile.tempdir @@ -98,6 +96,7 @@ class UnpickleableException(Exception): """ An exception representing another Exception that could not be pickled. """ + @staticmethod def dump(type, exc): """ @@ -117,6 +116,7 @@ class ExceptionSaver: A Context Manager that will save an exception, serialized, and restore it later. """ + def __enter__(self): return self @@ -212,7 +212,7 @@ def _needs_hiding(mod_name): >>> _needs_hiding('Cython') True """ - pattern = re.compile('(setuptools|pkg_resources|distutils|Cython)(\.|$)') + pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)') return bool(pattern.match(mod_name)) @@ -232,15 +232,22 @@ def run_setup(setup_script, args): setup_dir = os.path.abspath(os.path.dirname(setup_script)) with setup_context(setup_dir): try: - sys.argv[:] = [setup_script]+list(args) + sys.argv[:] = [setup_script] + list(args) sys.path.insert(0, setup_dir) # reset to include setup dir, w/clean callback list working_set.__init__() - working_set.callbacks.append(lambda dist:dist.activate()) - def runner(): - ns = dict(__file__=setup_script, __name__='__main__') + working_set.callbacks.append(lambda dist: dist.activate()) + + # __file__ should be a byte string on Python 2 (#712) + dunder_file = ( + setup_script + if isinstance(setup_script, str) else + setup_script.encode(sys.getfilesystemencoding()) + ) + + with DirectorySandbox(setup_dir): + ns = dict(__file__=dunder_file, __name__='__main__') _execfile(setup_script, ns) - DirectorySandbox(setup_dir).run(runner) except SystemExit as v: if v.args and v.args[0]: raise @@ -255,46 +262,54 @@ class AbstractSandbox: def __init__(self): self._attrs = [ name for name in dir(_os) - if not name.startswith('_') and hasattr(self,name) + if not name.startswith('_') and hasattr(self, name) ] def _copy(self, source): for name in self._attrs: - setattr(os, name, getattr(source,name)) + setattr(os, name, getattr(source, name)) + + def __enter__(self): + self._copy(self) + if _file: + builtins.file = self._file + builtins.open = self._open + self._active = True + + def __exit__(self, exc_type, exc_value, traceback): + self._active = False + if _file: + builtins.file = _file + builtins.open = _open + self._copy(_os) def run(self, func): """Run 'func' under os sandboxing""" - try: - self._copy(self) - if _file: - builtins.file = self._file - builtins.open = self._open - self._active = True + with self: return func() - finally: - self._active = False - if _file: - builtins.file = _file - builtins.open = _open - self._copy(_os) def _mk_dual_path_wrapper(name): - original = getattr(_os,name) - def wrap(self,src,dst,*args,**kw): + original = getattr(_os, name) + + def wrap(self, src, dst, *args, **kw): if self._active: - src,dst = self._remap_pair(name,src,dst,*args,**kw) - return original(src,dst,*args,**kw) + src, dst = self._remap_pair(name, src, dst, *args, **kw) + return original(src, dst, *args, **kw) + return wrap for name in ["rename", "link", "symlink"]: - if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name) + if hasattr(_os, name): + locals()[name] = _mk_dual_path_wrapper(name) def _mk_single_path_wrapper(name, original=None): - original = original or getattr(_os,name) - def wrap(self,path,*args,**kw): + original = original or getattr(_os, name) + + def wrap(self, path, *args, **kw): if self._active: - path = self._remap_input(name,path,*args,**kw) - return original(path,*args,**kw) + path = self._remap_input(name, path, *args, **kw) + return original(path, *args, **kw) + return wrap if _file: @@ -305,49 +320,56 @@ class AbstractSandbox: "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat", "startfile", "mkfifo", "mknod", "pathconf", "access" ]: - if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name) + if hasattr(_os, name): + locals()[name] = _mk_single_path_wrapper(name) def _mk_single_with_return(name): - original = getattr(_os,name) - def wrap(self,path,*args,**kw): + original = getattr(_os, name) + + def wrap(self, path, *args, **kw): if self._active: - path = self._remap_input(name,path,*args,**kw) - return self._remap_output(name, original(path,*args,**kw)) - return original(path,*args,**kw) + path = self._remap_input(name, path, *args, **kw) + return self._remap_output(name, original(path, *args, **kw)) + return original(path, *args, **kw) + return wrap for name in ['readlink', 'tempnam']: - if hasattr(_os,name): locals()[name] = _mk_single_with_return(name) + if hasattr(_os, name): + locals()[name] = _mk_single_with_return(name) def _mk_query(name): - original = getattr(_os,name) - def wrap(self,*args,**kw): - retval = original(*args,**kw) + original = getattr(_os, name) + + def wrap(self, *args, **kw): + retval = original(*args, **kw) if self._active: return self._remap_output(name, retval) return retval + return wrap for name in ['getcwd', 'tmpnam']: - if hasattr(_os,name): locals()[name] = _mk_query(name) + if hasattr(_os, name): + locals()[name] = _mk_query(name) - def _validate_path(self,path): + def _validate_path(self, path): """Called to remap or validate any path, whether input or output""" return path - def _remap_input(self,operation,path,*args,**kw): + def _remap_input(self, operation, path, *args, **kw): """Called for path inputs""" return self._validate_path(path) - def _remap_output(self,operation,path): + def _remap_output(self, operation, path): """Called for path outputs""" return self._validate_path(path) - def _remap_pair(self,operation,src,dst,*args,**kw): + def _remap_pair(self, operation, src, dst, *args, **kw): """Called for path pairs like rename, link, and symlink operations""" return ( - self._remap_input(operation+'-from',src,*args,**kw), - self._remap_input(operation+'-to',dst,*args,**kw) + self._remap_input(operation + '-from', src, *args, **kw), + self._remap_input(operation + '-to', dst, *args, **kw) ) @@ -356,13 +378,6 @@ if hasattr(os, 'devnull'): else: _EXCEPTIONS = [] -try: - from win32com.client.gencache import GetGeneratePath - _EXCEPTIONS.append(GetGeneratePath()) - del GetGeneratePath -except ImportError: - # it appears pywin32 is not installed, so no need to exclude. - pass class DirectorySandbox(AbstractSandbox): """Restrict operations to a single subdirectory - pseudo-chroot""" @@ -374,13 +389,13 @@ class DirectorySandbox(AbstractSandbox): _exception_patterns = [ # Allow lib2to3 to attempt to save a pickled grammar object (#121) - '.*lib2to3.*\.pickle$', + r'.*lib2to3.*\.pickle$', ] "exempt writing to paths that match the pattern" def __init__(self, sandbox, exceptions=_EXCEPTIONS): self._sandbox = os.path.normcase(os.path.realpath(sandbox)) - self._prefix = os.path.join(self._sandbox,'') + self._prefix = os.path.join(self._sandbox, '') self._exceptions = [ os.path.normcase(os.path.realpath(path)) for path in exceptions @@ -392,15 +407,16 @@ class DirectorySandbox(AbstractSandbox): raise SandboxViolation(operation, args, kw) if _file: + def _file(self, path, mode='r', *args, **kw): if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): self._violation("file", path, mode, *args, **kw) - return _file(path,mode,*args,**kw) + return _file(path, mode, *args, **kw) def _open(self, path, mode='r', *args, **kw): if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): self._violation("open", path, mode, *args, **kw) - return _open(path,mode,*args,**kw) + return _open(path, mode, *args, **kw) def tmpnam(self): self._violation("tmpnam") @@ -440,57 +456,36 @@ class DirectorySandbox(AbstractSandbox): """Called for path pairs like rename, link, and symlink operations""" if not self._ok(src) or not self._ok(dst): self._violation(operation, src, dst, *args, **kw) - return (src,dst) + return (src, dst) def open(self, file, flags, mode=0o777, *args, **kw): """Called for low-level os.open()""" if flags & WRITE_FLAGS and not self._ok(file): self._violation("os.open", file, flags, mode, *args, **kw) - return _os.open(file,flags,mode, *args, **kw) + return _os.open(file, flags, mode, *args, **kw) + WRITE_FLAGS = functools.reduce( operator.or_, [getattr(_os, a, 0) for a in "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()] ) + class SandboxViolation(DistutilsError): """A setup script attempted to modify the filesystem outside the sandbox""" + tmpl = textwrap.dedent(""" + SandboxViolation: {cmd}{args!r} {kwargs} + + The package setup script has attempted to modify files on your system + that are not within the EasyInstall build area, and has been aborted. + + This package cannot be safely installed by EasyInstall, and may not + support alternate installation locations even if you run its setup + script by hand. Please inform the package's author and the EasyInstall + maintainers to find out if a fix or workaround is available. + """).lstrip() + def __str__(self): - return """SandboxViolation: %s%r %s - -The package setup script has attempted to modify files on your system -that are not within the EasyInstall build area, and has been aborted. - -This package cannot be safely installed by EasyInstall, and may not -support alternate installation locations even if you run its setup -script by hand. Please inform the package's author and the EasyInstall -maintainers to find out if a fix or workaround is available.""" % self.args - - - - - - - - - - - - - - - - - - - - - - - - - - - -# + cmd, args, kwargs = self.args + return self.tmpl.format(**locals()) diff --git a/Shared/lib/python3.4/site-packages/setuptools/script (dev).tmpl b/Shared/lib/python3.4/site-packages/setuptools/script (dev).tmpl index d58b1bb..39a24b0 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/script (dev).tmpl +++ b/Shared/lib/python3.4/site-packages/setuptools/script (dev).tmpl @@ -2,4 +2,5 @@ __requires__ = %(spec)r __import__('pkg_resources').require(%(spec)r) __file__ = %(dev_path)r -exec(compile(open(__file__).read(), __file__, 'exec')) +with open(__file__) as f: + exec(compile(f.read(), __file__, 'exec')) diff --git a/Shared/lib/python3.4/site-packages/setuptools/site-patch.py b/Shared/lib/python3.4/site-packages/setuptools/site-patch.py index c216801..40b00de 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/site-patch.py +++ b/Shared/lib/python3.4/site-packages/setuptools/site-patch.py @@ -2,19 +2,18 @@ def __boot(): import sys import os PYTHONPATH = os.environ.get('PYTHONPATH') - if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH): + if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH): PYTHONPATH = [] else: PYTHONPATH = PYTHONPATH.split(os.pathsep) - pic = getattr(sys,'path_importer_cache',{}) + pic = getattr(sys, 'path_importer_cache', {}) stdpath = sys.path[len(PYTHONPATH):] mydir = os.path.dirname(__file__) - #print "searching",stdpath,sys.path for item in stdpath: - if item==mydir or not item: - continue # skip if current dir. on Windows, or my own directory + if item == mydir or not item: + continue # skip if current dir. on Windows, or my own directory importer = pic.get(item) if importer is not None: loader = importer.find_module('site') @@ -24,32 +23,30 @@ def __boot(): break else: try: - import imp # Avoid import loop in Python >= 3.3 - stream, path, descr = imp.find_module('site',[item]) + import imp # Avoid import loop in Python 3 + stream, path, descr = imp.find_module('site', [item]) except ImportError: continue if stream is None: continue try: # This should actually reload the current module - imp.load_module('site',stream,path,descr) + imp.load_module('site', stream, path, descr) finally: stream.close() break else: raise ImportError("Couldn't find the real 'site' module") - #print "loaded", __file__ + known_paths = dict([(makepath(item)[1], 1) for item in sys.path]) # 2.2 comp - known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp - - oldpos = getattr(sys,'__egginsert',0) # save old insertion position - sys.__egginsert = 0 # and reset the current one + oldpos = getattr(sys, '__egginsert', 0) # save old insertion position + sys.__egginsert = 0 # and reset the current one for item in PYTHONPATH: addsitedir(item) - sys.__egginsert += oldpos # restore effective old position + sys.__egginsert += oldpos # restore effective old position d, nd = makepath(stdpath[0]) insert_at = None @@ -58,7 +55,7 @@ def __boot(): for item in sys.path: p, np = makepath(item) - if np==nd and insert_at is None: + if np == nd and insert_at is None: # We've hit the first 'system' path entry, so added entries go here insert_at = len(new_path) @@ -71,6 +68,7 @@ def __boot(): sys.path[:] = new_path -if __name__=='site': + +if __name__ == 'site': __boot() del __boot diff --git a/Shared/lib/python3.4/site-packages/setuptools/ssl_support.py b/Shared/lib/python3.4/site-packages/setuptools/ssl_support.py index 657197c..6362f1f 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/ssl_support.py +++ b/Shared/lib/python3.4/site-packages/setuptools/ssl_support.py @@ -2,10 +2,10 @@ import os import socket import atexit import re +import functools -from setuptools.extern.six.moves import urllib, http_client, map +from setuptools.extern.six.moves import urllib, http_client, map, filter -import pkg_resources from pkg_resources import ResolutionError, ExtractionError try: @@ -26,9 +26,9 @@ cert_paths = """ /etc/ssl/cert.pem /System/Library/OpenSSL/certs/cert.pem /usr/local/share/certs/ca-root-nss.crt +/etc/ssl/ca-bundle.pem """.strip().split() - try: HTTPSHandler = urllib.request.HTTPSHandler HTTPSConnection = http_client.HTTPSConnection @@ -49,10 +49,13 @@ except ImportError: match_hostname = None if not CertificateError: + class CertificateError(ValueError): pass + if not match_hostname: + def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 @@ -161,6 +164,7 @@ class VerifyingHTTPSHandler(HTTPSHandler): class VerifyingHTTPSConn(HTTPSConnection): """Simple verifying connection: no auth, subclasses, timeouts, etc.""" + def __init__(self, host, ca_bundle, **kw): HTTPSConnection.__init__(self, host, **kw) self.ca_bundle = ca_bundle @@ -182,9 +186,14 @@ class VerifyingHTTPSConn(HTTPSConnection): else: actual_host = self.host - self.sock = ssl.wrap_socket( - sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle - ) + if hasattr(ssl, 'create_default_context'): + ctx = ssl.create_default_context(cafile=self.ca_bundle) + self.sock = ctx.wrap_socket(sock, server_hostname=actual_host) + else: + # This is for python < 2.7.9 and < 3.4? + self.sock = ssl.wrap_socket( + sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle + ) try: match_hostname(self.sock.getpeercert(), actual_host) except CertificateError: @@ -192,6 +201,7 @@ class VerifyingHTTPSConn(HTTPSConnection): self.sock.close() raise + def opener_for(ca_bundle=None): """Get a urlopen() replacement that uses ca_bundle for verification""" return urllib.request.build_opener( @@ -199,45 +209,52 @@ def opener_for(ca_bundle=None): ).open -_wincerts = None +# from jaraco.functools +def once(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + if not hasattr(func, 'always_returns'): + func.always_returns = func(*args, **kwargs) + return func.always_returns + return wrapper + +@once def get_win_certfile(): - global _wincerts - if _wincerts is not None: - return _wincerts.name - try: - from wincertstore import CertFile + import wincertstore except ImportError: return None - class MyCertFile(CertFile): - def __init__(self, stores=(), certs=()): - CertFile.__init__(self) - for store in stores: - self.addstore(store) - self.addcerts(certs) + class CertFile(wincertstore.CertFile): + def __init__(self): + super(CertFile, self).__init__() atexit.register(self.close) def close(self): try: - super(MyCertFile, self).close() + super(CertFile, self).close() except OSError: pass - _wincerts = MyCertFile(stores=['CA', 'ROOT']) + _wincerts = CertFile() + _wincerts.addstore('CA') + _wincerts.addstore('ROOT') return _wincerts.name def find_ca_bundle(): """Return an existing CA bundle path, or None""" - if os.name=='nt': - return get_win_certfile() - else: - for cert_path in cert_paths: - if os.path.isfile(cert_path): - return cert_path + extant_cert_paths = filter(os.path.isfile, cert_paths) + return ( + get_win_certfile() + or next(extant_cert_paths, None) + or _certifi_where() + ) + + +def _certifi_where(): try: - return pkg_resources.resource_filename('certifi', 'cacert.pem') + return __import__('certifi').where() except (ImportError, ResolutionError, ExtractionError): - return None + pass diff --git a/Shared/lib/python3.4/site-packages/setuptools/unicode_utils.py b/Shared/lib/python3.4/site-packages/setuptools/unicode_utils.py index ffab3e2..7c63efd 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/unicode_utils.py +++ b/Shared/lib/python3.4/site-packages/setuptools/unicode_utils.py @@ -3,6 +3,7 @@ import sys from setuptools.extern import six + # HFS Plus uses decomposed UTF-8 def decompose(path): if isinstance(path, six.text_type): diff --git a/Shared/lib/python3.4/site-packages/setuptools/utils.py b/Shared/lib/python3.4/site-packages/setuptools/utils.py deleted file mode 100644 index 91e4b87..0000000 --- a/Shared/lib/python3.4/site-packages/setuptools/utils.py +++ /dev/null @@ -1,11 +0,0 @@ -import os -import os.path - - -def cs_path_exists(fspath): - if not os.path.exists(fspath): - return False - # make absolute so we always have a directory - abspath = os.path.abspath(fspath) - directory, filename = os.path.split(abspath) - return filename in os.listdir(directory) \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/setuptools/version.py b/Shared/lib/python3.4/site-packages/setuptools/version.py index 4494728..95e1869 100644 --- a/Shared/lib/python3.4/site-packages/setuptools/version.py +++ b/Shared/lib/python3.4/site-packages/setuptools/version.py @@ -1 +1,6 @@ -__version__ = '20.1.1' +import pkg_resources + +try: + __version__ = pkg_resources.get_distribution('setuptools').version +except Exception: + __version__ = 'unknown' diff --git a/Shared/lib/python3.4/site-packages/setuptools/wheel.py b/Shared/lib/python3.4/site-packages/setuptools/wheel.py new file mode 100644 index 0000000..95a794a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/setuptools/wheel.py @@ -0,0 +1,210 @@ +"""Wheels support.""" + +from distutils.util import get_platform +import email +import itertools +import os +import posixpath +import re +import zipfile + +from pkg_resources import Distribution, PathMetadata, parse_version +from setuptools.extern.packaging.utils import canonicalize_name +from setuptools.extern.six import PY3 +from setuptools import Distribution as SetuptoolsDistribution +from setuptools import pep425tags +from setuptools.command.egg_info import write_requirements + + +__metaclass__ = type + + +WHEEL_NAME = re.compile( + r"""^(?P.+?)-(?P\d.*?) + ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) + )\.whl$""", + re.VERBOSE).match + +NAMESPACE_PACKAGE_INIT = '''\ +try: + __import__('pkg_resources').declare_namespace(__name__) +except ImportError: + __path__ = __import__('pkgutil').extend_path(__path__, __name__) +''' + + +def unpack(src_dir, dst_dir): + '''Move everything under `src_dir` to `dst_dir`, and delete the former.''' + for dirpath, dirnames, filenames in os.walk(src_dir): + subdir = os.path.relpath(dirpath, src_dir) + for f in filenames: + src = os.path.join(dirpath, f) + dst = os.path.join(dst_dir, subdir, f) + os.renames(src, dst) + for n, d in reversed(list(enumerate(dirnames))): + src = os.path.join(dirpath, d) + dst = os.path.join(dst_dir, subdir, d) + if not os.path.exists(dst): + # Directory does not exist in destination, + # rename it and prune it from os.walk list. + os.renames(src, dst) + del dirnames[n] + # Cleanup. + for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True): + assert not filenames + os.rmdir(dirpath) + + +class Wheel: + + def __init__(self, filename): + match = WHEEL_NAME(os.path.basename(filename)) + if match is None: + raise ValueError('invalid wheel name: %r' % filename) + self.filename = filename + for k, v in match.groupdict().items(): + setattr(self, k, v) + + def tags(self): + '''List tags (py_version, abi, platform) supported by this wheel.''' + return itertools.product( + self.py_version.split('.'), + self.abi.split('.'), + self.platform.split('.'), + ) + + def is_compatible(self): + '''Is the wheel is compatible with the current platform?''' + supported_tags = pep425tags.get_supported() + return next((True for t in self.tags() if t in supported_tags), False) + + def egg_name(self): + return Distribution( + project_name=self.project_name, version=self.version, + platform=(None if self.platform == 'any' else get_platform()), + ).egg_name() + '.egg' + + def get_dist_info(self, zf): + # find the correct name of the .dist-info dir in the wheel file + for member in zf.namelist(): + dirname = posixpath.dirname(member) + if (dirname.endswith('.dist-info') and + canonicalize_name(dirname).startswith( + canonicalize_name(self.project_name))): + return dirname + raise ValueError("unsupported wheel format. .dist-info not found") + + def install_as_egg(self, destination_eggdir): + '''Install wheel as an egg directory.''' + with zipfile.ZipFile(self.filename) as zf: + self._install_as_egg(destination_eggdir, zf) + + def _install_as_egg(self, destination_eggdir, zf): + dist_basename = '%s-%s' % (self.project_name, self.version) + dist_info = self.get_dist_info(zf) + dist_data = '%s.data' % dist_basename + egg_info = os.path.join(destination_eggdir, 'EGG-INFO') + + self._convert_metadata(zf, destination_eggdir, dist_info, egg_info) + self._move_data_entries(destination_eggdir, dist_data) + self._fix_namespace_packages(egg_info, destination_eggdir) + + @staticmethod + def _convert_metadata(zf, destination_eggdir, dist_info, egg_info): + def get_metadata(name): + with zf.open(posixpath.join(dist_info, name)) as fp: + value = fp.read().decode('utf-8') if PY3 else fp.read() + return email.parser.Parser().parsestr(value) + + wheel_metadata = get_metadata('WHEEL') + # Check wheel format version is supported. + wheel_version = parse_version(wheel_metadata.get('Wheel-Version')) + wheel_v1 = ( + parse_version('1.0') <= wheel_version < parse_version('2.0dev0') + ) + if not wheel_v1: + raise ValueError( + 'unsupported wheel format version: %s' % wheel_version) + # Extract to target directory. + os.mkdir(destination_eggdir) + zf.extractall(destination_eggdir) + # Convert metadata. + dist_info = os.path.join(destination_eggdir, dist_info) + dist = Distribution.from_location( + destination_eggdir, dist_info, + metadata=PathMetadata(destination_eggdir, dist_info), + ) + + # Note: Evaluate and strip markers now, + # as it's difficult to convert back from the syntax: + # foobar; "linux" in sys_platform and extra == 'test' + def raw_req(req): + req.marker = None + return str(req) + install_requires = list(sorted(map(raw_req, dist.requires()))) + extras_require = { + extra: sorted( + req + for req in map(raw_req, dist.requires((extra,))) + if req not in install_requires + ) + for extra in dist.extras + } + os.rename(dist_info, egg_info) + os.rename( + os.path.join(egg_info, 'METADATA'), + os.path.join(egg_info, 'PKG-INFO'), + ) + setup_dist = SetuptoolsDistribution( + attrs=dict( + install_requires=install_requires, + extras_require=extras_require, + ), + ) + write_requirements( + setup_dist.get_command_obj('egg_info'), + None, + os.path.join(egg_info, 'requires.txt'), + ) + + @staticmethod + def _move_data_entries(destination_eggdir, dist_data): + """Move data entries to their correct location.""" + dist_data = os.path.join(destination_eggdir, dist_data) + dist_data_scripts = os.path.join(dist_data, 'scripts') + if os.path.exists(dist_data_scripts): + egg_info_scripts = os.path.join( + destination_eggdir, 'EGG-INFO', 'scripts') + os.mkdir(egg_info_scripts) + for entry in os.listdir(dist_data_scripts): + # Remove bytecode, as it's not properly handled + # during easy_install scripts install phase. + if entry.endswith('.pyc'): + os.unlink(os.path.join(dist_data_scripts, entry)) + else: + os.rename( + os.path.join(dist_data_scripts, entry), + os.path.join(egg_info_scripts, entry), + ) + os.rmdir(dist_data_scripts) + for subdir in filter(os.path.exists, ( + os.path.join(dist_data, d) + for d in ('data', 'headers', 'purelib', 'platlib') + )): + unpack(subdir, destination_eggdir) + if os.path.exists(dist_data): + os.rmdir(dist_data) + + @staticmethod + def _fix_namespace_packages(egg_info, destination_eggdir): + namespace_packages = os.path.join( + egg_info, 'namespace_packages.txt') + if os.path.exists(namespace_packages): + with open(namespace_packages) as fp: + namespace_packages = fp.read().split() + for mod in namespace_packages: + mod_dir = os.path.join(destination_eggdir, *mod.split('.')) + mod_init = os.path.join(mod_dir, '__init__.py') + if os.path.exists(mod_dir) and not os.path.exists(mod_init): + with open(mod_init, 'w') as fp: + fp.write(NAMESPACE_PACKAGE_INIT) diff --git a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/DESCRIPTION.rst deleted file mode 100644 index 2e2607d..0000000 --- a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,18 +0,0 @@ -Six is a Python 2 and 3 compatibility library. It provides utility functions -for smoothing over the differences between the Python versions with the goal of -writing Python code that is compatible on both Python versions. See the -documentation for more information on what is provided. - -Six supports every Python version since 2.6. It is contained in only one Python -file, so it can be easily copied into your project. (The copyright and license -notice must be retained.) - -Online documentation is at https://pythonhosted.org/six/. - -Bugs can be reported to https://bitbucket.org/gutworth/six. The code can also -be found there. - -For questions about six or porting in general, email the python-porting mailing -list: https://mail.python.org/mailman/listinfo/python-porting - - diff --git a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/RECORD deleted file mode 100644 index f4ec904..0000000 --- a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/RECORD +++ /dev/null @@ -1,9 +0,0 @@ -six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 -six-1.10.0.dist-info/DESCRIPTION.rst,sha256=QWBtSTT2zzabwJv1NQbTfClSX13m-Qc6tqU4TRL1RLs,774 -six-1.10.0.dist-info/METADATA,sha256=5HceJsUnHof2IRamlCKO2MwNjve1eSP4rLzVQDfwpCQ,1283 -six-1.10.0.dist-info/RECORD,, -six-1.10.0.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 -six-1.10.0.dist-info/metadata.json,sha256=jtOeeTBubYDChl_5Ql5ZPlKoHgg6rdqRIjOz1e5Ek2U,658 -six-1.10.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4 -six-1.10.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -__pycache__/six.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/metadata.json deleted file mode 100644 index 21f9f6c..0000000 --- a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"generator": "bdist_wheel (0.26.0)", "summary": "Python 2 and 3 compatibility utilities", "classifiers": ["Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Libraries", "Topic :: Utilities"], "extensions": {"python.details": {"project_urls": {"Home": "http://pypi.python.org/pypi/six/"}, "contacts": [{"email": "benjamin@python.org", "name": "Benjamin Peterson", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "license": "MIT", "metadata_version": "2.0", "name": "six", "version": "1.10.0"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/INSTALLER similarity index 100% rename from Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/INSTALLER rename to Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/INSTALLER diff --git a/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/LICENSE b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/LICENSE new file mode 100644 index 0000000..365d107 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2010-2018 Benjamin Peterson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/METADATA b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/METADATA similarity index 54% rename from Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/METADATA rename to Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/METADATA index 4fc3d07..df8db11 100644 --- a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/METADATA +++ b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/METADATA @@ -1,18 +1,36 @@ -Metadata-Version: 2.0 +Metadata-Version: 2.1 Name: six -Version: 1.10.0 +Version: 1.12.0 Summary: Python 2 and 3 compatibility utilities -Home-page: http://pypi.python.org/pypi/six/ +Home-page: https://github.com/benjaminp/six Author: Benjamin Peterson Author-email: benjamin@python.org License: MIT Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 3 Classifier: Intended Audience :: Developers Classifier: License :: OSI Approved :: MIT License Classifier: Topic :: Software Development :: Libraries Classifier: Topic :: Utilities +Requires-Python: >=2.6, !=3.0.*, !=3.1.* + +.. image:: https://img.shields.io/pypi/v/six.svg + :target: https://pypi.org/project/six/ + :alt: six on PyPI + +.. image:: https://travis-ci.org/benjaminp/six.svg?branch=master + :target: https://travis-ci.org/benjaminp/six + :alt: six on TravisCI + +.. image:: https://readthedocs.org/projects/six/badge/?version=latest + :target: https://six.readthedocs.io/ + :alt: six's documentation on Read the Docs + +.. image:: https://img.shields.io/badge/license-MIT-green.svg + :target: https://github.com/benjaminp/six/blob/master/LICENSE + :alt: MIT License badge Six is a Python 2 and 3 compatibility library. It provides utility functions for smoothing over the differences between the Python versions with the goal of @@ -23,9 +41,9 @@ Six supports every Python version since 2.6. It is contained in only one Python file, so it can be easily copied into your project. (The copyright and license notice must be retained.) -Online documentation is at https://pythonhosted.org/six/. +Online documentation is at https://six.readthedocs.io/. -Bugs can be reported to https://bitbucket.org/gutworth/six. The code can also +Bugs can be reported to https://github.com/benjaminp/six. The code can also be found there. For questions about six or porting in general, email the python-porting mailing diff --git a/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/RECORD new file mode 100644 index 0000000..c2634d8 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/RECORD @@ -0,0 +1,8 @@ +__pycache__/six.cpython-37.pyc,, +six-1.12.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +six-1.12.0.dist-info/LICENSE,sha256=5zL1TaWPPpzwxI6LUSlIk2_Pc2G9WK-mOpo8OSv3lK0,1066 +six-1.12.0.dist-info/METADATA,sha256=CRdYkKPKCFJr7-qA8PDpBklGXfXJ3xu4mu5tkLBDL04,1940 +six-1.12.0.dist-info/RECORD,, +six-1.12.0.dist-info/WHEEL,sha256=_wJFdOYk7i3xxT8ElOkUJvOdOvfNGbR9g-bf6UQT6sU,110 +six-1.12.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4 +six.py,sha256=h9jch2pS86y4R36pKRS3LOYUCVFNIJMRwjZ4fJDtJ44,32452 diff --git a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/WHEEL similarity index 70% rename from Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/WHEEL rename to Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/WHEEL index 0de529b..c4bde30 100644 --- a/Shared/lib/python3.4/site-packages/setuptools-20.2.2.dist-info/WHEEL +++ b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.26.0) +Generator: bdist_wheel (0.32.3) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any diff --git a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/six-1.12.0.dist-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/six.py b/Shared/lib/python3.4/site-packages/six.py index 190c023..89b2188 100644 --- a/Shared/lib/python3.4/site-packages/six.py +++ b/Shared/lib/python3.4/site-packages/six.py @@ -1,6 +1,4 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson +# Copyright (c) 2010-2018 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -20,6 +18,8 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +"""Utilities for writing code that runs on Python 2 and 3""" + from __future__ import absolute_import import functools @@ -29,7 +29,7 @@ import sys import types __author__ = "Benjamin Peterson " -__version__ = "1.10.0" +__version__ = "1.12.0" # Useful for very coarse version differentiation. @@ -241,6 +241,7 @@ _moved_attributes = [ MovedAttribute("map", "itertools", "builtins", "imap", "map"), MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), MovedAttribute("reduce", "__builtin__", "functools"), @@ -262,10 +263,11 @@ _moved_attributes = [ MovedModule("html_entities", "htmlentitydefs", "html.entities"), MovedModule("html_parser", "HTMLParser", "html.parser"), MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), @@ -337,10 +339,12 @@ _urllib_parse_moved_attributes = [ MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), MovedAttribute("uses_params", "urlparse", "urllib.parse"), @@ -416,6 +420,8 @@ _urllib_request_moved_attributes = [ MovedAttribute("URLopener", "urllib", "urllib.request"), MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), ] for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) @@ -679,11 +685,15 @@ if PY3: exec_ = getattr(moves.builtins, "exec") def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None else: def exec_(_code_, _globs_=None, _locs_=None): @@ -699,19 +709,28 @@ else: exec("""exec _code_ in _globs_, _locs_""") exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb + try: + raise tp, value, tb + finally: + tb = None """) if sys.version_info[:2] == (3, 2): exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None """) elif sys.version_info[:2] > (3, 2): exec_("""def raise_from(value, from_value): - raise value from from_value + try: + raise value from from_value + finally: + value = None """) else: def raise_from(value, from_value): @@ -802,10 +821,14 @@ def with_metaclass(meta, *bases): # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. - class metaclass(meta): + class metaclass(type): def __new__(cls, name, this_bases, d): return meta(name, bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) return type.__new__(metaclass, 'temporary_class', (), {}) @@ -821,10 +844,71 @@ def add_metaclass(metaclass): orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) + if hasattr(cls, '__qualname__'): + orig_vars['__qualname__'] = cls.__qualname__ return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper +def ensure_binary(s, encoding='utf-8', errors='strict'): + """Coerce **s** to six.binary_type. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> encoded to `bytes` + - `bytes` -> `bytes` + """ + if isinstance(s, text_type): + return s.encode(encoding, errors) + elif isinstance(s, binary_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + +def ensure_str(s, encoding='utf-8', errors='strict'): + """Coerce *s* to `str`. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if not isinstance(s, (text_type, binary_type)): + raise TypeError("not expecting type '%s'" % type(s)) + if PY2 and isinstance(s, text_type): + s = s.encode(encoding, errors) + elif PY3 and isinstance(s, binary_type): + s = s.decode(encoding, errors) + return s + + +def ensure_text(s, encoding='utf-8', errors='strict'): + """Coerce *s* to six.text_type. + + For Python 2: + - `unicode` -> `unicode` + - `str` -> `unicode` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if isinstance(s, binary_type): + return s.decode(encoding, errors) + elif isinstance(s, text_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + + def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. diff --git a/Shared/lib/python3.4/site-packages/socks.py b/Shared/lib/python3.4/site-packages/socks.py index 56bfca8..7bcf590 100644 --- a/Shared/lib/python3.4/site-packages/socks.py +++ b/Shared/lib/python3.4/site-packages/socks.py @@ -1,11 +1,9 @@ -""" -SocksiPy - Python SOCKS module. -Version 1.5.6 +"""SocksiPy - Python SOCKS module. Copyright 2006 Dan-Haim. All rights reserved. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, @@ -23,7 +21,7 @@ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT -OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE. +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This module provides a standard socket-like interface for Python @@ -39,27 +37,46 @@ mainly to merge bug fixes found in Sourceforge Modifications made by Anorov (https://github.com/Anorov) -Forked and renamed to PySocks --Fixed issue with HTTP proxy failure checking (same bug that was in the old ___recvall() method) +-Fixed issue with HTTP proxy failure checking (same bug that was in the + old ___recvall() method) -Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler, - courtesy of e000 (https://github.com/e000): https://gist.github.com/869791#file_socksipyhandler.py + courtesy of e000 (https://github.com/e000): + https://gist.github.com/869791#file_socksipyhandler.py -Re-styled code to make it readable -Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc. -Improved exception handling and output - -Removed irritating use of sequence indexes, replaced with tuple unpacked variables + -Removed irritating use of sequence indexes, replaced with tuple unpacked + variables -Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03" -Other general fixes --Added clarification that the HTTP proxy connection method only supports CONNECT-style tunneling HTTP proxies +-Added clarification that the HTTP proxy connection method only supports + CONNECT-style tunneling HTTP proxies -Various small bug fixes """ -__version__ = "1.5.6" - +from base64 import b64encode +from collections import Callable +from errno import EOPNOTSUPP, EINVAL, EAGAIN +import functools +from io import BytesIO +import logging +import os +from os import SEEK_CUR import socket import struct -from errno import EOPNOTSUPP, EINVAL, EAGAIN -from io import BytesIO -from os import SEEK_CUR -from collections import Callable +import sys + +__version__ = "1.6.7" + + +if os.name == "nt" and sys.version_info < (3, 0): + try: + import win_inet_pton + except ImportError: + raise ImportError( + "To run PySocks on Windows you must install win_inet_pton") + +log = logging.getLogger(__name__) PROXY_TYPE_SOCKS4 = SOCKS4 = 1 PROXY_TYPE_SOCKS5 = SOCKS5 = 2 @@ -70,10 +87,28 @@ PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys())) _orgsocket = _orig_socket = socket.socket + +def set_self_blocking(function): + + @functools.wraps(function) + def wrapper(*args, **kwargs): + self = args[0] + try: + _is_blocking = self.gettimeout() + if _is_blocking == 0: + self.setblocking(True) + return function(*args, **kwargs) + except Exception as e: + raise + finally: + # set orgin blocking + if _is_blocking == 0: + self.setblocking(False) + return wrapper + + class ProxyError(IOError): - """ - socket_err contains original socket.error exception. - """ + """Socket_err contains original socket.error exception.""" def __init__(self, msg, socket_err=None): self.msg = msg self.socket_err = socket_err @@ -84,61 +119,82 @@ class ProxyError(IOError): def __str__(self): return self.msg -class GeneralProxyError(ProxyError): pass -class ProxyConnectionError(ProxyError): pass -class SOCKS5AuthError(ProxyError): pass -class SOCKS5Error(ProxyError): pass -class SOCKS4Error(ProxyError): pass -class HTTPError(ProxyError): pass -SOCKS4_ERRORS = { 0x5B: "Request rejected or failed", - 0x5C: "Request rejected because SOCKS server cannot connect to identd on the client", - 0x5D: "Request rejected because the client program and identd report different user-ids" - } +class GeneralProxyError(ProxyError): + pass -SOCKS5_ERRORS = { 0x01: "General SOCKS server failure", - 0x02: "Connection not allowed by ruleset", - 0x03: "Network unreachable", - 0x04: "Host unreachable", - 0x05: "Connection refused", - 0x06: "TTL expired", - 0x07: "Command not supported, or protocol error", - 0x08: "Address type not supported" - } -DEFAULT_PORTS = { SOCKS4: 1080, - SOCKS5: 1080, - HTTP: 8080 - } +class ProxyConnectionError(ProxyError): + pass -def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None): - """ - set_default_proxy(proxy_type, addr[, port[, rdns[, username, password]]]) - Sets a default proxy which all further socksocket objects will use, - unless explicitly changed. All parameters are as for socket.set_proxy(). - """ +class SOCKS5AuthError(ProxyError): + pass + + +class SOCKS5Error(ProxyError): + pass + + +class SOCKS4Error(ProxyError): + pass + + +class HTTPError(ProxyError): + pass + +SOCKS4_ERRORS = { + 0x5B: "Request rejected or failed", + 0x5C: ("Request rejected because SOCKS server cannot connect to identd on" + " the client"), + 0x5D: ("Request rejected because the client program and identd report" + " different user-ids") +} + +SOCKS5_ERRORS = { + 0x01: "General SOCKS server failure", + 0x02: "Connection not allowed by ruleset", + 0x03: "Network unreachable", + 0x04: "Host unreachable", + 0x05: "Connection refused", + 0x06: "TTL expired", + 0x07: "Command not supported, or protocol error", + 0x08: "Address type not supported" +} + +DEFAULT_PORTS = {SOCKS4: 1080, SOCKS5: 1080, HTTP: 8080} + + +def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, + username=None, password=None): + """Sets a default proxy. + + All further socksocket objects will use the default unless explicitly + changed. All parameters are as for socket.set_proxy().""" socksocket.default_proxy = (proxy_type, addr, port, rdns, username.encode() if username else None, password.encode() if password else None) -setdefaultproxy = set_default_proxy + +def setdefaultproxy(*args, **kwargs): + if "proxytype" in kwargs: + kwargs["proxy_type"] = kwargs.pop("proxytype") + return set_default_proxy(*args, **kwargs) + def get_default_proxy(): - """ - Returns the default proxy, set by set_default_proxy. - """ + """Returns the default proxy, set by set_default_proxy.""" return socksocket.default_proxy getdefaultproxy = get_default_proxy + def wrap_module(module): - """ - Attempts to replace a module's socket library with a SOCKS socket. Must set - a default proxy using set_default_proxy(...) first. - This will only work on modules that import socket directly into the namespace; - most of the Python Standard Library falls into this category. - """ + """Attempts to replace a module's socket library with a SOCKS socket. + + Must set a default proxy using set_default_proxy(...) first. This will + only work on modules that import socket directly into the namespace; + most of the Python Standard Library falls into this category.""" if socksocket.default_proxy: module.socket.socket = socksocket else: @@ -146,10 +202,12 @@ def wrap_module(module): wrapmodule = wrap_module -def create_connection(dest_pair, proxy_type=None, proxy_addr=None, + +def create_connection(dest_pair, + timeout=None, source_address=None, + proxy_type=None, proxy_addr=None, proxy_port=None, proxy_rdns=True, proxy_username=None, proxy_password=None, - timeout=None, source_address=None, socket_options=None): """create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object @@ -162,24 +220,52 @@ def create_connection(dest_pair, proxy_type=None, proxy_addr=None, source_address - tuple (host, port) for the socket to bind to as its source address before connecting (only for compatibility) """ - sock = socksocket() - if socket_options is not None: - for opt in socket_options: - sock.setsockopt(*opt) - if isinstance(timeout, (int, float)): - sock.settimeout(timeout) - if proxy_type is not None: - sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns, - proxy_username, proxy_password) - if source_address is not None: - sock.bind(source_address) + # Remove IPv6 brackets on the remote address and proxy address. + remote_host, remote_port = dest_pair + if remote_host.startswith("["): + remote_host = remote_host.strip("[]") + if proxy_addr and proxy_addr.startswith("["): + proxy_addr = proxy_addr.strip("[]") + + err = None + + # Allow the SOCKS proxy to be on IPv4 or IPv6 addresses. + for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM): + family, socket_type, proto, canonname, sa = r + sock = None + try: + sock = socksocket(family, socket_type, proto) + + if socket_options: + for opt in socket_options: + sock.setsockopt(*opt) + + if isinstance(timeout, (int, float)): + sock.settimeout(timeout) + + if proxy_type: + sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns, + proxy_username, proxy_password) + if source_address: + sock.bind(source_address) + + sock.connect((remote_host, remote_port)) + return sock + + except (socket.error, ProxyConnectionError) as e: + err = e + if sock: + sock.close() + sock = None + + if err: + raise err + + raise socket.error("gai returned empty list.") - sock.connect(dest_pair) - return sock class _BaseSocket(socket.socket): - """Allows Python 2's "delegated" methods such as send() to be overridden - """ + """Allows Python 2 delegated methods such as send() to be overridden.""" def __init__(self, *pos, **kw): _orig_socket.__init__(self, *pos, **kw) @@ -190,6 +276,7 @@ class _BaseSocket(socket.socket): _savenames = list() + def _makemethod(name): return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw) for name in ("sendto", "send", "recvfrom", "recv"): @@ -203,6 +290,7 @@ for name in ("sendto", "send", "recvfrom", "recv"): _BaseSocket._savenames.append(name) setattr(_BaseSocket, name, _makemethod(name)) + class socksocket(_BaseSocket): """socksocket([family[, type[, proto]]]) -> socket object @@ -214,12 +302,13 @@ class socksocket(_BaseSocket): default_proxy = None - def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs): + def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, + proto=0, *args, **kwargs): if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM): msg = "Socket type must be stream or datagram, not {!r}" raise ValueError(msg.format(type)) - _BaseSocket.__init__(self, family, type, proto, *args, **kwargs) + super(socksocket, self).__init__(family, type, proto, *args, **kwargs) self._proxyconn = None # TCP connection to keep UDP relay alive if self.default_proxy: @@ -229,11 +318,12 @@ class socksocket(_BaseSocket): self.proxy_sockname = None self.proxy_peername = None + self._timeout = None + def _readall(self, file, count): - """ - Receive EXACTLY the number of bytes requested from the file object. - Blocks until the required number of bytes have been received. - """ + """Receive EXACTLY the number of bytes requested from the file object. + + Blocks until the required number of bytes have been received.""" data = b"" while len(data) < count: d = file.read(count - len(data)) @@ -242,36 +332,56 @@ class socksocket(_BaseSocket): data += d return data - def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None): - """set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]]) - Sets the proxy to be used. + def settimeout(self, timeout): + self._timeout = timeout + try: + # test if we're connected, if so apply timeout + peer = self.get_proxy_peername() + super(socksocket, self).settimeout(self._timeout) + except socket.error: + pass - proxy_type - The type of the proxy to be used. Three types + def gettimeout(self): + return self._timeout + + def setblocking(self, v): + if v: + self.settimeout(None) + else: + self.settimeout(0.0) + + def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, + username=None, password=None): + """ Sets the proxy to be used. + + proxy_type - The type of the proxy to be used. Three types are supported: PROXY_TYPE_SOCKS4 (including socks4a), PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP addr - The address of the server (IP or DNS). port - The port of the server. Defaults to 1080 for SOCKS - servers and 8080 for HTTP proxy servers. + servers and 8080 for HTTP proxy servers. rdns - Should DNS queries be performed on the remote side (rather than the local side). The default is True. Note: This has no effect with SOCKS4 servers. username - Username to authenticate with to the server. The default is no authentication. password - Password to authenticate with to the server. - Only relevant when username is also provided. - """ + Only relevant when username is also provided.""" self.proxy = (proxy_type, addr, port, rdns, username.encode() if username else None, password.encode() if password else None) - setproxy = set_proxy + def setproxy(self, *args, **kwargs): + if "proxytype" in kwargs: + kwargs["proxy_type"] = kwargs.pop("proxytype") + return self.set_proxy(*args, **kwargs) def bind(self, *pos, **kw): - """ - Implements proxy connection for UDP sockets, - which happens during the bind() phase. - """ - proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy + """Implements proxy connection for UDP sockets. + + Happens during the bind() phase.""" + (proxy_type, proxy_addr, proxy_port, rdns, username, + password) = self.proxy if not proxy_type or self.type != socket.SOCK_DGRAM: return _orig_socket.bind(self, *pos, **kw) @@ -280,7 +390,7 @@ class socksocket(_BaseSocket): if proxy_type != SOCKS5: msg = "UDP only supported by SOCKS5 proxy type" raise socket.error(EOPNOTSUPP, msg) - _BaseSocket.bind(self, *pos, **kw) + super(socksocket, self).bind(*pos, **kw) # Need to specify actual local port because # some relays drop packets if a port of zero is specified. @@ -299,12 +409,13 @@ class socksocket(_BaseSocket): # but some proxies return a private IP address (10.x.y.z) host, _ = proxy _, port = relay - _BaseSocket.connect(self, (host, port)) + super(socksocket, self).connect((host, port)) + super(socksocket, self).settimeout(self._timeout) self.proxy_sockname = ("0.0.0.0", 0) # Unknown def sendto(self, bytes, *args, **kwargs): if self.type != socket.SOCK_DGRAM: - return _BaseSocket.sendto(self, bytes, *args, **kwargs) + return super(socksocket, self).sendto(bytes, *args, **kwargs) if not self._proxyconn: self.bind(("", 0)) @@ -318,23 +429,24 @@ class socksocket(_BaseSocket): header.write(STANDALONE) self._write_SOCKS5_address(address, header) - sent = _BaseSocket.send(self, header.getvalue() + bytes, *flags, **kwargs) + sent = super(socksocket, self).send(header.getvalue() + bytes, *flags, + **kwargs) return sent - header.tell() def send(self, bytes, flags=0, **kwargs): if self.type == socket.SOCK_DGRAM: return self.sendto(bytes, flags, self.proxy_peername, **kwargs) else: - return _BaseSocket.send(self, bytes, flags, **kwargs) + return super(socksocket, self).send(bytes, flags, **kwargs) def recvfrom(self, bufsize, flags=0): if self.type != socket.SOCK_DGRAM: - return _BaseSocket.recvfrom(self, bufsize, flags) + return super(socksocket, self).recvfrom(bufsize, flags) if not self._proxyconn: self.bind(("", 0)) - buf = BytesIO(_BaseSocket.recv(self, bufsize, flags)) - buf.seek(+2, SEEK_CUR) + buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags)) + buf.seek(2, SEEK_CUR) frag = buf.read(1) if ord(frag): raise NotImplementedError("Received UDP packet fragment") @@ -345,7 +457,7 @@ class socksocket(_BaseSocket): if fromhost != peerhost or peerport not in (0, fromport): raise socket.error(EAGAIN, "Packet filtered") - return (buf.read(), (fromhost, fromport)) + return (buf.read(bufsize), (fromhost, fromport)) def recv(self, *pos, **kw): bytes, _ = self.recvfrom(*pos, **kw) @@ -354,12 +466,10 @@ class socksocket(_BaseSocket): def close(self): if self._proxyconn: self._proxyconn.close() - return _BaseSocket.close(self) + return super(socksocket, self).close() def get_proxy_sockname(self): - """ - Returns the bound IP address and port number at the proxy. - """ + """Returns the bound IP address and port number at the proxy.""" return self.proxy_sockname getproxysockname = get_proxy_sockname @@ -368,26 +478,23 @@ class socksocket(_BaseSocket): """ Returns the IP and port number of the proxy. """ - return _BaseSocket.getpeername(self) + return self.getpeername() getproxypeername = get_proxy_peername def get_peername(self): - """ - Returns the IP address and port number of the destination - machine (note: get_proxy_peername returns the proxy) - """ + """Returns the IP address and port number of the destination machine. + + Note: get_proxy_peername returns the proxy.""" return self.proxy_peername getpeername = get_peername def _negotiate_SOCKS5(self, *dest_addr): - """ - Negotiates a stream connection through a SOCKS5 server. - """ + """Negotiates a stream connection through a SOCKS5 server.""" CONNECT = b"\x01" - self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self, - CONNECT, dest_addr) + self.proxy_peername, self.proxy_sockname = self._SOCKS5_request( + self, CONNECT, dest_addr) def _SOCKS5_request(self, conn, cmd, dst): """ @@ -418,7 +525,8 @@ class socksocket(_BaseSocket): if chosen_auth[0:1] != b"\x05": # Note: string[i:i+1] is used because indexing of a bytestring # via bytestring[i] yields an integer in Python 3 - raise GeneralProxyError("SOCKS5 proxy server sent invalid data") + raise GeneralProxyError( + "SOCKS5 proxy server sent invalid data") # Check the chosen authentication method @@ -433,7 +541,8 @@ class socksocket(_BaseSocket): auth_status = self._readall(reader, 2) if auth_status[0:1] != b"\x01": # Bad response - raise GeneralProxyError("SOCKS5 proxy server sent invalid data") + raise GeneralProxyError( + "SOCKS5 proxy server sent invalid data") if auth_status[1:2] != b"\x00": # Authentication failed raise SOCKS5AuthError("SOCKS5 authentication failed") @@ -444,9 +553,12 @@ class socksocket(_BaseSocket): elif chosen_auth[1:2] != b"\x00": # Reaching here is always bad if chosen_auth[1:2] == b"\xFF": - raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected") + raise SOCKS5AuthError( + "All offered SOCKS5 authentication methods were" + " rejected") else: - raise GeneralProxyError("SOCKS5 proxy server sent invalid data") + raise GeneralProxyError( + "SOCKS5 proxy server sent invalid data") # Now we can request the actual connection writer.write(b"\x05" + cmd + b"\x00") @@ -456,7 +568,8 @@ class socksocket(_BaseSocket): # Get the response resp = self._readall(reader, 3) if resp[0:1] != b"\x05": - raise GeneralProxyError("SOCKS5 proxy server sent invalid data") + raise GeneralProxyError( + "SOCKS5 proxy server sent invalid data") status = ord(resp[1:2]) if status != 0x00: @@ -466,6 +579,8 @@ class socksocket(_BaseSocket): # Get the bound address/port bnd = self._read_SOCKS5_address(reader) + + super(socksocket, self).settimeout(self._timeout) return (resolved, bnd) finally: reader.close() @@ -478,25 +593,41 @@ class socksocket(_BaseSocket): """ host, port = addr proxy_type, _, _, rdns, username, password = self.proxy + family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"} # If the given destination address is an IP address, we'll - # use the IPv4 address request even if remote resolving was specified. - try: - addr_bytes = socket.inet_aton(host) - file.write(b"\x01" + addr_bytes) - host = socket.inet_ntoa(addr_bytes) - except socket.error: - # Well it's not an IP number, so it's probably a DNS name. - if rdns: - # Resolve remotely - host_bytes = host.encode('idna') - file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes) - else: - # Resolve locally - addr_bytes = socket.inet_aton(socket.gethostbyname(host)) - file.write(b"\x01" + addr_bytes) - host = socket.inet_ntoa(addr_bytes) + # use the IP address request even if remote resolving was specified. + # Detect whether the address is IPv4/6 directly. + for family in (socket.AF_INET, socket.AF_INET6): + try: + addr_bytes = socket.inet_pton(family, host) + file.write(family_to_byte[family] + addr_bytes) + host = socket.inet_ntop(family, addr_bytes) + file.write(struct.pack(">H", port)) + return host, port + except socket.error: + continue + # Well it's not an IP number, so it's probably a DNS name. + if rdns: + # Resolve remotely + host_bytes = host.encode("idna") + file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes) + else: + # Resolve locally + addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC, + socket.SOCK_STREAM, + socket.IPPROTO_TCP, + socket.AI_ADDRCONFIG) + # We can't really work out what IP is reachable, so just pick the + # first. + target_addr = addresses[0] + family = target_addr[0] + host = target_addr[4][0] + + addr_bytes = socket.inet_pton(family, host) + file.write(family_to_byte[family] + addr_bytes) + host = socket.inet_ntop(family, addr_bytes) file.write(struct.pack(">H", port)) return host, port @@ -507,6 +638,8 @@ class socksocket(_BaseSocket): elif atyp == b"\x03": length = self._readall(file, 1) addr = self._readall(file, ord(length)) + elif atyp == b"\x04": + addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16)) else: raise GeneralProxyError("SOCKS5 proxy server sent invalid data") @@ -514,9 +647,7 @@ class socksocket(_BaseSocket): return addr, port def _negotiate_SOCKS4(self, dest_addr, dest_port): - """ - Negotiates a connection through a SOCKS4 server. - """ + """Negotiates a connection through a SOCKS4 server.""" proxy_type, addr, port, rdns, username, password = self.proxy writer = self.makefile("wb") @@ -532,7 +663,8 @@ class socksocket(_BaseSocket): addr_bytes = b"\x00\x00\x00\x01" remote_resolve = True else: - addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr)) + addr_bytes = socket.inet_aton( + socket.gethostbyname(dest_addr)) # Construct the request packet writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port)) @@ -547,14 +679,15 @@ class socksocket(_BaseSocket): # NOTE: This is actually an extension to the SOCKS4 protocol # called SOCKS4A and may not be supported in all cases. if remote_resolve: - writer.write(dest_addr.encode('idna') + b"\x00") + writer.write(dest_addr.encode("idna") + b"\x00") writer.flush() # Get the response from the server resp = self._readall(reader, 8) if resp[0:1] != b"\x00": # Bad data - raise GeneralProxyError("SOCKS4 proxy server sent invalid data") + raise GeneralProxyError( + "SOCKS4 proxy server sent invalid data") status = ord(resp[1:2]) if status != 0x5A: @@ -563,7 +696,8 @@ class socksocket(_BaseSocket): raise SOCKS4Error("{0:#04x}: {1}".format(status, error)) # Get the bound address/port - self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0]) + self.proxy_sockname = (socket.inet_ntoa(resp[4:]), + struct.unpack(">H", resp[2:4])[0]) if remote_resolve: self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port else: @@ -573,17 +707,27 @@ class socksocket(_BaseSocket): writer.close() def _negotiate_HTTP(self, dest_addr, dest_port): - """ - Negotiates a connection through an HTTP server. - NOTE: This currently only supports HTTP CONNECT-style proxies. - """ + """Negotiates a connection through an HTTP server. + + NOTE: This currently only supports HTTP CONNECT-style proxies.""" proxy_type, addr, port, rdns, username, password = self.proxy # If we need to resolve locally, we do this now addr = dest_addr if rdns else socket.gethostbyname(dest_addr) - self.sendall(b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + - b" HTTP/1.1\r\n" + b"Host: " + dest_addr.encode('idna') + b"\r\n\r\n") + http_headers = [ + (b"CONNECT " + addr.encode("idna") + b":" + + str(dest_port).encode() + b" HTTP/1.1"), + b"Host: " + dest_addr.encode("idna") + ] + + if username and password: + http_headers.append(b"Proxy-Authorization: basic " + + b64encode(username + b":" + password)) + + http_headers.append(b"\r\n") + + self.sendall(b"\r\n".join(http_headers)) # We just need the first line to check if the connection was successful fobj = self.makefile() @@ -599,19 +743,23 @@ class socksocket(_BaseSocket): raise GeneralProxyError("HTTP proxy server sent invalid response") if not proto.startswith("HTTP/"): - raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy") + raise GeneralProxyError( + "Proxy server does not appear to be an HTTP proxy") try: status_code = int(status_code) except ValueError: - raise HTTPError("HTTP proxy server did not return a valid HTTP status") + raise HTTPError( + "HTTP proxy server did not return a valid HTTP status") if status_code != 200: error = "{0}: {1}".format(status_code, status_msg) if status_code in (400, 403, 405): - # It's likely that the HTTP proxy server does not support the CONNECT tunneling method - error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks" - " (must be a CONNECT tunnel proxy)") + # It's likely that the HTTP proxy server does not support the + # CONNECT tunneling method + error += ("\n[*] Note: The HTTP proxy server may not be" + " supported by PySocks (must be a CONNECT tunnel" + " proxy)") raise HTTPError(error) self.proxy_sockname = (b"0.0.0.0", 0) @@ -623,7 +771,7 @@ class socksocket(_BaseSocket): HTTP: _negotiate_HTTP } - + @set_self_blocking def connect(self, dest_pair): """ Connects to the specified destination through a proxy. @@ -636,7 +784,8 @@ class socksocket(_BaseSocket): # Probably IPv6, not supported -- raise an error, and hope # Happy Eyeballs (RFC6555) makes sure at least the IPv4 # connection works... - raise socket.error("PySocks doesn't support IPv6") + raise socket.error("PySocks doesn't support IPv6: %s" + % str(dest_pair)) dest_addr, dest_port = dest_pair @@ -653,27 +802,34 @@ class socksocket(_BaseSocket): self.proxy_peername = (dest_addr, dest_port) return - proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy + (proxy_type, proxy_addr, proxy_port, rdns, username, + password) = self.proxy # Do a minimal input check first if (not isinstance(dest_pair, (list, tuple)) or len(dest_pair) != 2 or not dest_addr or not isinstance(dest_port, int)): - raise GeneralProxyError("Invalid destination-connection (host, port) pair") + # Inputs failed, raise an error + raise GeneralProxyError( + "Invalid destination-connection (host, port) pair") + # We set the timeout here so that we don't hang in connection or during + # negotiation. + super(socksocket, self).settimeout(self._timeout) if proxy_type is None: # Treat like regular socket object self.proxy_peername = dest_pair - _BaseSocket.connect(self, (dest_addr, dest_port)) + super(socksocket, self).settimeout(self._timeout) + super(socksocket, self).connect((dest_addr, dest_port)) return proxy_addr = self._proxy_addr() try: - # Initial connection to proxy server - _BaseSocket.connect(self, proxy_addr) + # Initial connection to proxy server. + super(socksocket, self).connect(proxy_addr) except socket.error as error: # Error while connecting to proxy @@ -683,7 +839,8 @@ class socksocket(_BaseSocket): printable_type = PRINTABLE_PROXY_TYPES[proxy_type] msg = "Error connecting to {0} proxy {1}".format(printable_type, - proxy_server) + proxy_server) + log.debug("%s due to: %s", msg, error) raise ProxyConnectionError(msg, error) else: @@ -705,7 +862,8 @@ class socksocket(_BaseSocket): """ Return proxy address to connect to as tuple object """ - proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy + (proxy_type, proxy_addr, proxy_port, rdns, username, + password) = self.proxy proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type) if not proxy_port: raise GeneralProxyError("Invalid proxy type") diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/DESCRIPTION.rst deleted file mode 100644 index 888ff83..0000000 --- a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,135 +0,0 @@ -================================================================= -sqlitedict -- persistent ``dict``, backed-up by SQLite and pickle -================================================================= - -|Travis|_ -|Downloads|_ -|License|_ - -.. |Travis| image:: https://img.shields.io/travis/piskvorky/sqlitedict.svg -.. |Downloads| image:: https://img.shields.io/pypi/dm/sqlitedict.svg -.. |License| image:: https://img.shields.io/pypi/l/sqlitedict.svg -.. _Travis: https://travis-ci.org/piskvorky/sqlitedict -.. _Downloads: https://pypi.python.org/pypi/sqlitedict -.. _License: https://pypi.python.org/pypi/sqlitedict - -A lightweight wrapper around Python's sqlite3 database with a simple, Pythonic -dict-like interface and support for multi-thread access: - -.. code-block:: python - - >>> from sqlitedict import SqliteDict - >>> mydict = SqliteDict('./my_db.sqlite', autocommit=True) - >>> mydict['some_key'] = any_picklable_object - >>> print mydict['some_key'] # prints the new value - >>> for key, value in mydict.iteritems(): - >>> print key, value - >>> print len(mydict) # etc... all dict functions work - >>> mydict.close() - -Pickle is used internally to (de)serialize the values. Keys are arbitrary strings, -values arbitrary pickle-able objects. - -If you don't use autocommit (default is no autocommit for performance), then -don't forget to call ``mydict.commit()`` when done with a transaction: - -.. code-block:: python - - >>> # using SqliteDict as context manager works too (RECOMMENDED) - >>> with SqliteDict('./my_db.sqlite') as mydict: # note no autocommit=True - ... mydict['some_key'] = u"first value" - ... mydict['another_key'] = range(10) - ... mydict.commit() - ... mydict['some_key'] = u"new value" - ... # no explicit commit here - >>> with SqliteDict('./my_db.sqlite') as mydict: # re-open the same DB - ... print mydict['some_key'] # outputs 'first value', not 'new value' - - -Features --------- - -* Values can be **any picklable objects** (uses ``cPickle`` with the highest protocol). -* Support for **multiple tables** (=dicts) living in the same database file. -* Support for **access from multiple threads** to the same connection (needed by e.g. Pyro). - Vanilla sqlite3 gives you ``ProgrammingError: SQLite objects created in a thread can - only be used in that same thread.`` - -Concurrent requests are still serialized internally, so this "multithreaded support" -**doesn't** give you any performance benefits. It is a work-around for sqlite limitations in Python. - -Installation ------------- - -The module has no dependencies beyond Python itself. The minimum Python version is 2.5, continuously tested on Python 2.6, 2.7, 3.3 and 3.4 `on Travis `_. - -Install or upgrade with:: - - easy_install -U sqlitedict - -or from the `source tar.gz `_:: - - python setup.py install - -Documentation -------------- - -Standard Python document strings are inside the module: - -.. code-block:: python - - >>> import sqlitedict - >>> help(sqlitedict) - -(but it's just ``dict`` with a commit, really). - -**Beware**: because of Python semantics, ``sqlitedict`` cannot know when a mutable -SqliteDict-backed entry was modified in RAM. For example, ``mydict.setdefault('new_key', []).append(1)`` -will leave ``mydict['new_key']`` equal to empty list, not ``[1]``. You'll need to -explicitly assign the mutated object back to SqliteDict to achieve the same effect: - -.. code-block:: python - - >>> val = mydict.get('new_key', []) - >>> val.append(1) # sqlite DB not updated here! - >>> mydict['new_key'] = val # now updated - - -For developers --------------- - -Install:: - - # pip install nose - # pip install coverage - -To perform all tests:: - - # make test-all - -To perform all tests with coverage:: - - # make test-all-with-coverage - - -Comments, bug reports ---------------------- - -``sqlitedict`` resides on `github `_. You can file -issues or pull requests there. - -History -------- - -**1.4.0**: fix regression where iterating over keys/values/items returned a full list instead of iterator - -**1.3.0**: improve error handling in multithreading (`PR #28 `_); 100% test coverage. - -**1.2.0**: full python 3 support, continuous testing via `Travis CI `_. - ----- - -``sqlitedict`` is open source software released under the `Apache 2.0 license `_. -Copyright (c) 2011-now `Radim Řehůřek `_ and contributors. - - diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/METADATA b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/METADATA deleted file mode 100644 index 23afb11..0000000 --- a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/METADATA +++ /dev/null @@ -1,158 +0,0 @@ -Metadata-Version: 2.0 -Name: sqlitedict -Version: 1.4.0 -Summary: Persistent dict in Python, backed up by sqlite3 and pickle, multithread-safe. -Home-page: https://github.com/piskvorky/sqlitedict -Author: Radim Rehurek -Author-email: me@radimrehurek.com -License: Apache 2.0 -Download-URL: http://pypi.python.org/pypi/sqlitedict -Keywords: sqlite,persistent dict,multithreaded -Platform: any -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Console -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: Apache Software License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python :: 2.5 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Topic :: Database :: Front-Ends - -================================================================= -sqlitedict -- persistent ``dict``, backed-up by SQLite and pickle -================================================================= - -|Travis|_ -|Downloads|_ -|License|_ - -.. |Travis| image:: https://img.shields.io/travis/piskvorky/sqlitedict.svg -.. |Downloads| image:: https://img.shields.io/pypi/dm/sqlitedict.svg -.. |License| image:: https://img.shields.io/pypi/l/sqlitedict.svg -.. _Travis: https://travis-ci.org/piskvorky/sqlitedict -.. _Downloads: https://pypi.python.org/pypi/sqlitedict -.. _License: https://pypi.python.org/pypi/sqlitedict - -A lightweight wrapper around Python's sqlite3 database with a simple, Pythonic -dict-like interface and support for multi-thread access: - -.. code-block:: python - - >>> from sqlitedict import SqliteDict - >>> mydict = SqliteDict('./my_db.sqlite', autocommit=True) - >>> mydict['some_key'] = any_picklable_object - >>> print mydict['some_key'] # prints the new value - >>> for key, value in mydict.iteritems(): - >>> print key, value - >>> print len(mydict) # etc... all dict functions work - >>> mydict.close() - -Pickle is used internally to (de)serialize the values. Keys are arbitrary strings, -values arbitrary pickle-able objects. - -If you don't use autocommit (default is no autocommit for performance), then -don't forget to call ``mydict.commit()`` when done with a transaction: - -.. code-block:: python - - >>> # using SqliteDict as context manager works too (RECOMMENDED) - >>> with SqliteDict('./my_db.sqlite') as mydict: # note no autocommit=True - ... mydict['some_key'] = u"first value" - ... mydict['another_key'] = range(10) - ... mydict.commit() - ... mydict['some_key'] = u"new value" - ... # no explicit commit here - >>> with SqliteDict('./my_db.sqlite') as mydict: # re-open the same DB - ... print mydict['some_key'] # outputs 'first value', not 'new value' - - -Features --------- - -* Values can be **any picklable objects** (uses ``cPickle`` with the highest protocol). -* Support for **multiple tables** (=dicts) living in the same database file. -* Support for **access from multiple threads** to the same connection (needed by e.g. Pyro). - Vanilla sqlite3 gives you ``ProgrammingError: SQLite objects created in a thread can - only be used in that same thread.`` - -Concurrent requests are still serialized internally, so this "multithreaded support" -**doesn't** give you any performance benefits. It is a work-around for sqlite limitations in Python. - -Installation ------------- - -The module has no dependencies beyond Python itself. The minimum Python version is 2.5, continuously tested on Python 2.6, 2.7, 3.3 and 3.4 `on Travis `_. - -Install or upgrade with:: - - easy_install -U sqlitedict - -or from the `source tar.gz `_:: - - python setup.py install - -Documentation -------------- - -Standard Python document strings are inside the module: - -.. code-block:: python - - >>> import sqlitedict - >>> help(sqlitedict) - -(but it's just ``dict`` with a commit, really). - -**Beware**: because of Python semantics, ``sqlitedict`` cannot know when a mutable -SqliteDict-backed entry was modified in RAM. For example, ``mydict.setdefault('new_key', []).append(1)`` -will leave ``mydict['new_key']`` equal to empty list, not ``[1]``. You'll need to -explicitly assign the mutated object back to SqliteDict to achieve the same effect: - -.. code-block:: python - - >>> val = mydict.get('new_key', []) - >>> val.append(1) # sqlite DB not updated here! - >>> mydict['new_key'] = val # now updated - - -For developers --------------- - -Install:: - - # pip install nose - # pip install coverage - -To perform all tests:: - - # make test-all - -To perform all tests with coverage:: - - # make test-all-with-coverage - - -Comments, bug reports ---------------------- - -``sqlitedict`` resides on `github `_. You can file -issues or pull requests there. - -History -------- - -**1.4.0**: fix regression where iterating over keys/values/items returned a full list instead of iterator - -**1.3.0**: improve error handling in multithreading (`PR #28 `_); 100% test coverage. - -**1.2.0**: full python 3 support, continuous testing via `Travis CI `_. - ----- - -``sqlitedict`` is open source software released under the `Apache 2.0 license `_. -Copyright (c) 2011-now `Radim Řehůřek `_ and contributors. - - diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/RECORD deleted file mode 100644 index bdb8a03..0000000 --- a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/RECORD +++ /dev/null @@ -1,10 +0,0 @@ -sqlitedict.py,sha256=9zIUg7xFsnHFXc07KklfLKKvtcF6Ph6VjHI7biEii98,17577 -sqlitedict-1.4.0.dist-info/DESCRIPTION.rst,sha256=0DtdGW_JnxlMrNq3w5Z8X89LZR2GEmNUpc3bqUN-elY,4592 -sqlitedict-1.4.0.dist-info/METADATA,sha256=U_yEc91oeI2LREBIsyZv9scFcKu4IF8XBxrBaRAjX_Y,5514 -sqlitedict-1.4.0.dist-info/RECORD,, -sqlitedict-1.4.0.dist-info/WHEEL,sha256=lCqt3ViRAf9c8mCs6o7ffkwROUdYSy8_YHn5f_rulB4,93 -sqlitedict-1.4.0.dist-info/metadata.json,sha256=Gu_BazsbomIjBcE4XjoiZV6U7DUJpWklZudlIYduauI,1070 -sqlitedict-1.4.0.dist-info/pbr.json,sha256=wraF_0ld56r3l9udmVdBYB-N7W8nh7Ax8-HRVqiGRFE,46 -sqlitedict-1.4.0.dist-info/top_level.txt,sha256=gRsHHG_lHd0G92cPsIV8dhQS7yZfJUYW5GY_oqapYik,11 -sqlitedict-1.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -__pycache__/sqlitedict.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/WHEEL deleted file mode 100644 index 6d9801a..0000000 --- a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: cp34-none-any - diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/metadata.json deleted file mode 100644 index f1333a7..0000000 --- a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Topic :: Database :: Front-Ends"], "download_url": "http://pypi.python.org/pypi/sqlitedict", "extensions": {"python.details": {"contacts": [{"email": "me@radimrehurek.com", "name": "Radim Rehurek", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/piskvorky/sqlitedict"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["sqlite", "persistent", "dict", "multithreaded"], "license": "Apache 2.0", "metadata_version": "2.0", "name": "sqlitedict", "platform": "any", "summary": "Persistent dict in Python, backed up by sqlite3 and pickle, multithread-safe.", "version": "1.4.0"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/PKG-INFO new file mode 100644 index 0000000..833dbcc --- /dev/null +++ b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/PKG-INFO @@ -0,0 +1,158 @@ +Metadata-Version: 1.2 +Name: sqlitedict +Version: 1.4.0 +Summary: Persistent dict in Python, backed up by sqlite3 and pickle, multithread-safe. +Home-page: https://github.com/piskvorky/sqlitedict +Author: Radim Rehurek, Victor R. Escobar, Andrey Usov, Prasanna Swaminathan, Jeff Quast +Author-email: various +Maintainer: Radim Rehurek +Maintainer-email: me@radimrehurek.com +License: Apache 2.0 +Download-URL: http://pypi.python.org/pypi/sqlitedict +Description: ================================================================= + sqlitedict -- persistent ``dict``, backed-up by SQLite and pickle + ================================================================= + + |Travis|_ + |Downloads|_ + |License|_ + + .. |Travis| image:: https://img.shields.io/travis/piskvorky/sqlitedict.svg + .. |Downloads| image:: https://img.shields.io/pypi/dm/sqlitedict.svg + .. |License| image:: https://img.shields.io/pypi/l/sqlitedict.svg + .. _Travis: https://travis-ci.org/piskvorky/sqlitedict + .. _Downloads: https://pypi.python.org/pypi/sqlitedict + .. _License: https://pypi.python.org/pypi/sqlitedict + + A lightweight wrapper around Python's sqlite3 database with a simple, Pythonic + dict-like interface and support for multi-thread access: + + .. code-block:: python + + >>> from sqlitedict import SqliteDict + >>> mydict = SqliteDict('./my_db.sqlite', autocommit=True) + >>> mydict['some_key'] = any_picklable_object + >>> print mydict['some_key'] # prints the new value + >>> for key, value in mydict.iteritems(): + >>> print key, value + >>> print len(mydict) # etc... all dict functions work + >>> mydict.close() + + Pickle is used internally to (de)serialize the values. Keys are arbitrary strings, + values arbitrary pickle-able objects. + + If you don't use autocommit (default is no autocommit for performance), then + don't forget to call ``mydict.commit()`` when done with a transaction: + + .. code-block:: python + + >>> # using SqliteDict as context manager works too (RECOMMENDED) + >>> with SqliteDict('./my_db.sqlite') as mydict: # note no autocommit=True + ... mydict['some_key'] = u"first value" + ... mydict['another_key'] = range(10) + ... mydict.commit() + ... mydict['some_key'] = u"new value" + ... # no explicit commit here + >>> with SqliteDict('./my_db.sqlite') as mydict: # re-open the same DB + ... print mydict['some_key'] # outputs 'first value', not 'new value' + + + Features + -------- + + * Values can be **any picklable objects** (uses ``cPickle`` with the highest protocol). + * Support for **multiple tables** (=dicts) living in the same database file. + * Support for **access from multiple threads** to the same connection (needed by e.g. Pyro). + Vanilla sqlite3 gives you ``ProgrammingError: SQLite objects created in a thread can + only be used in that same thread.`` + + Concurrent requests are still serialized internally, so this "multithreaded support" + **doesn't** give you any performance benefits. It is a work-around for sqlite limitations in Python. + + Installation + ------------ + + The module has no dependencies beyond Python itself. The minimum Python version is 2.5, continuously tested on Python 2.6, 2.7, 3.3 and 3.4 `on Travis `_. + + Install or upgrade with:: + + easy_install -U sqlitedict + + or from the `source tar.gz `_:: + + python setup.py install + + Documentation + ------------- + + Standard Python document strings are inside the module: + + .. code-block:: python + + >>> import sqlitedict + >>> help(sqlitedict) + + (but it's just ``dict`` with a commit, really). + + **Beware**: because of Python semantics, ``sqlitedict`` cannot know when a mutable + SqliteDict-backed entry was modified in RAM. For example, ``mydict.setdefault('new_key', []).append(1)`` + will leave ``mydict['new_key']`` equal to empty list, not ``[1]``. You'll need to + explicitly assign the mutated object back to SqliteDict to achieve the same effect: + + .. code-block:: python + + >>> val = mydict.get('new_key', []) + >>> val.append(1) # sqlite DB not updated here! + >>> mydict['new_key'] = val # now updated + + + For developers + -------------- + + Install:: + + # pip install nose + # pip install coverage + + To perform all tests:: + + # make test-all + + To perform all tests with coverage:: + + # make test-all-with-coverage + + + Comments, bug reports + --------------------- + + ``sqlitedict`` resides on `github `_. You can file + issues or pull requests there. + + History + ------- + + **1.4.0**: fix regression where iterating over keys/values/items returned a full list instead of iterator + + **1.3.0**: improve error handling in multithreading (`PR #28 `_); 100% test coverage. + + **1.2.0**: full python 3 support, continuous testing via `Travis CI `_. + + ---- + + ``sqlitedict`` is open source software released under the `Apache 2.0 license `_. + Copyright (c) 2011-now `Radim Řehůřek `_ and contributors. + +Keywords: sqlite,persistent dict,multithreaded +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2.5 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Topic :: Database :: Front-Ends diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/SOURCES.txt new file mode 100644 index 0000000..855c862 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/SOURCES.txt @@ -0,0 +1,17 @@ +MANIFEST.in +Makefile +README.rst +setup.cfg +setup.py +sqlitedict.py +sqlitedict.egg-info/PKG-INFO +sqlitedict.egg-info/SOURCES.txt +sqlitedict.egg-info/dependency_links.txt +sqlitedict.egg-info/pbr.json +sqlitedict.egg-info/top_level.txt +tests/accessories.py +tests/test_core.py +tests/test_keytypes.py +tests/test_named_db.py +tests/test_onimport.py +tests/test_temp_db.py \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/installed-files.txt new file mode 100644 index 0000000..d0b8b4b --- /dev/null +++ b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/installed-files.txt @@ -0,0 +1,7 @@ +../__pycache__/sqlitedict.cpython-37.pyc +../sqlitedict.py +PKG-INFO +SOURCES.txt +dependency_links.txt +pbr.json +top_level.txt diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/pbr.json b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/pbr.json similarity index 100% rename from Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/pbr.json rename to Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/pbr.json diff --git a/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/sqlitedict-1.4.0.egg-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/DESCRIPTION.rst deleted file mode 100644 index 66e7e67..0000000 --- a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,9 +0,0 @@ -Tor . -Home-page: https://stem.torproject.org/ -Author: Damian Johnson -Author-email: atagar@torproject.org -License: LGPLv3 -Description: UNKNOWN -Keywords: tor onion controller -Platform: UNKNOWN -Provides: stem diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/METADATA b/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/METADATA deleted file mode 100644 index 9700cc7..0000000 --- a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/METADATA +++ /dev/null @@ -1,14 +0,0 @@ -Metadata-Version: 2.0 -Name: stem -Version: 1.4.0 -Summary: Stem is a Python controller library that allows applications to interact with - -Tor . -Home-page: https://stem.torproject.org/ -Author: Damian Johnson -Author-email: atagar@torproject.org -License: LGPLv3 -Description: UNKNOWN -Keywords: tor onion controller -Platform: UNKNOWN -Provides: stem diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/RECORD deleted file mode 100644 index f3756f7..0000000 --- a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/RECORD +++ /dev/null @@ -1,100 +0,0 @@ -stem/__init__.py,sha256=PkGaNRRxHCw9LR95bzoGI9W_omi7NHHrsrxOk6dteVU,25346 -stem/connection.py,sha256=L6YKinmNiiCEVii8bj_GBM0_cNPcHMYdrrcYUHZIgC4,48656 -stem/control.py,sha256=Un6F80EPo9E_A6V4W0UBs0IoOYjkgZ9l96UVp0jpiLQ,128760 -stem/exit_policy.py,sha256=E-KVqA5UDG9N7D0qvv08pS46voC3_v6DHsAPIt7nuZM,34569 -stem/prereq.py,sha256=0Ef4LS2RHujf6S895STZ20xFMgnYNM-Qi08NAqSspTQ,3430 -stem/process.py,sha256=hglijQAkJm6DXfWinVu576VZQzmfwZ8AsBgDHaO_No8,8996 -stem/socket.py,sha256=_pFG1XP6NMpnvi6TP2c_Em0ic5DUKbT1OT5yfnnl7-4,21337 -stem/version.py,sha256=7mdeMMpgagpIswr7ZZ94rp9Y5OWyhKXACUmcYOuaxLg,12810 -stem/descriptor/__init__.py,sha256=rRqvvZXw04S4s08qTRfVHRxmNEl7yPq-6qmT_kPO0PY,31596 -stem/descriptor/export.py,sha256=iaoulMHE4J5XKvCZt7fgUJZM6d0yTaXYgLKrYVMpAkA,3981 -stem/descriptor/extrainfo_descriptor.py,sha256=WMtrj4T46iEEiwFtlDuPnCaaVCZ5TLoPeP6rzPV2l8A,39833 -stem/descriptor/hidden_service_descriptor.py,sha256=D0et0CrhxeLK-PRAewOFKew_kr92oXJcvYxHoTJUW-8,15839 -stem/descriptor/microdescriptor.py,sha256=w2F7jeXNiHlBsizBTK3Dmp3fIcUlkY-fQ81Ek7Otp1A,9866 -stem/descriptor/networkstatus.py,sha256=xlhhncPoDGc0VLXDKEzlubzqOtVufHLkFbSVrjztviM,56353 -stem/descriptor/reader.py,sha256=rF7wwZQid4tNcWoHLrkxwIk_qmliJMvjsvaGwXlf9lc,18933 -stem/descriptor/remote.py,sha256=igX3WBzJFHqyszq3xzBpFXrUG2dXm9m_NErmD1Qwu3w,26854 -stem/descriptor/router_status_entry.py,sha256=RWIfBZosZNR5u4FHEiAXpYaN79dyhVE1aMJbkQODFms,21084 -stem/descriptor/server_descriptor.py,sha256=oPnCyBYNpNNt8-FVzAlI6KtPbuR0mRKudz-CTrhruv0,30404 -stem/descriptor/tordnsel.py,sha256=OAGQXiEWIbNIEmd9T8AmV2hfdPl0Cs3JLkwA8IAc4U8,3950 -stem/interpreter/__init__.py,sha256=7Ax1NIXMpd0ucWqVh3-S2kyA8AyFvnCnGn1fawT_QYo,3896 -stem/interpreter/arguments.py,sha256=OE3A2jPS741EHft5ydE7O0y51h0MRQzyr-FtCoNiaTU,2522 -stem/interpreter/autocomplete.py,sha256=NmbmsBzScDVGTUwW95AZyJ7VwK-IWvD_oEnKNdFoB9o,3024 -stem/interpreter/commands.py,sha256=lldiZicDAvlzP53UDw8iNBvM6ucOydHkUzcP2ivvwAM,11489 -stem/interpreter/help.py,sha256=JLDTSE0PSrOd5kNegSsba_ViOUGRQIcx-ZTdDhANZ08,3760 -stem/interpreter/settings.cfg,sha256=VsJj1VVlZ5Qtmmvk1lOnbZTW9Yn-tdHNjjgEx1VT7Cs,12047 -stem/response/__init__.py,sha256=3mIdmPgBS696MLYkBAInSongQZdFL1_hodCivJQu3LM,18335 -stem/response/add_onion.py,sha256=OMzb8FVdZWedM4BgovSPr4B7XGDuGg3W4Jc2G-9ptAM,1375 -stem/response/authchallenge.py,sha256=injpBy3M-RvDsqRKEjdhdp88ak6VplmqFf4V3oKBw3c,1910 -stem/response/events.py,sha256=V33py7CJL1AjDN6prLFmcTjqXMCFTIfioQzTyXKhCDA,47264 -stem/response/getconf.py,sha256=5D0jNrIzCDX6OBidrQmLRDjQGVhRriMCPqrPYgHzwjA,1653 -stem/response/getinfo.py,sha256=aJQzP7J5dNt9qiPiGHQIlwlFSqzzq-gAP4Lm4X0uvo0,2591 -stem/response/mapaddress.py,sha256=uRQSWEH0Zy9eoMqPUjDL99417wA3kYjtjT7_CF6usDQ,1326 -stem/response/protocolinfo.py,sha256=VI93d3d8Yr-dLNzaSCROgw34xP0azQS0SJRRtSBQ9oc,5107 -stem/util/__init__.py,sha256=8pk1LDlWdX38LHSLAQCfXNdLyo4fO7ReSSB4hLZJ-fA,306 -stem/util/conf.py,sha256=L4y3L9P2WIPLR0g6i8cCIbeYjt64un5mgOBzPE8mmMQ,24141 -stem/util/connection.py,sha256=sqpBfriN7tw43vmakkhpZrgdCSskP0e2CMsBBtWlLvc,20557 -stem/util/enum.py,sha256=pRe9F1pTu07mnFCq5QJCkT59HSghIxLsCTb1ner7kpQ,4385 -stem/util/log.py,sha256=AnZhdoW1Qpp8G8rtX5vZIiDO6P3BL_6AnmglHB-zuYc,7053 -stem/util/lru_cache.py,sha256=32QZigCWWn_oLNRoG2aJcvItbhdPCRmmfB2DFRN93lk,7373 -stem/util/ordereddict.py,sha256=iQxAmvQilCGsHNhBezI4JRR10iSdYmSvnXLjCflO-w8,3961 -stem/util/ports.cfg,sha256=XvAtNtLOXuHvYD3pvQO8QRQoaPv0L2UtPvK4QxfK-s8,6117 -stem/util/proc.py,sha256=WZ63k4nEAEQ18AH-69NiKCbZdPaGRX2GLYGS1_cssXk,16165 -stem/util/str_tools.py,sha256=253jCw6_1Bagdgd_5WPoiEbmACgGxnAakM7ODJLE9Vs,15814 -stem/util/system.py,sha256=gv1IsFTzQw4q66a-VrPngcRc9v_PkBjJyi0qLaOG_pM,33742 -stem/util/term.py,sha256=Ad_lpW_4-ltzyGhO_rrBGfmGBs87XEyEasyNvj_Z7xs,3686 -stem/util/test_tools.py,sha256=92rRfzVtOJHdeU3nvwQcF1pKce9vStgH_Ej2LYFR9EM,10810 -stem/util/tor_tools.py,sha256=cMrLOsy0E7rM_34iAOFC8YyzKTYglJm7pPlPS0Ynhdc,4084 -../../../bin/tor-prompt,sha256=thC-bVr5IE77Ldm3FV82jQnLM-nnaE9P8_XSZsBjML8,181 -stem-1.4.0.dist-info/DESCRIPTION.rst,sha256=ubjzxCg4KhHZWjf3ClRD8L9vY8LCP1FlZy1V-oHQbYQ,235 -stem-1.4.0.dist-info/METADATA,sha256=QXjp7lhyV0a-uPQPIXNSLz9uAiyvP6AKf5n7f7A26rU,371 -stem-1.4.0.dist-info/RECORD,, -stem-1.4.0.dist-info/WHEEL,sha256=lCqt3ViRAf9c8mCs6o7ffkwROUdYSy8_YHn5f_rulB4,93 -stem-1.4.0.dist-info/metadata.json,sha256=_FibkGOQ6hFO-jAnIVQ-8FasdcMDbn7E5yI_b8QfV84,282 -stem-1.4.0.dist-info/top_level.txt,sha256=_Fv_hT3iFjDmFwOMcKq7iIsbarreb2UO1-H2frEcwHU,5 -stem-1.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -stem/__pycache__/socket.cpython-34.pyc,, -stem/descriptor/__pycache__/reader.cpython-34.pyc,, -stem/interpreter/__pycache__/commands.cpython-34.pyc,, -stem/util/__pycache__/test_tools.cpython-34.pyc,, -stem/descriptor/__pycache__/tordnsel.cpython-34.pyc,, -stem/descriptor/__pycache__/networkstatus.cpython-34.pyc,, -stem/util/__pycache__/term.cpython-34.pyc,, -stem/__pycache__/control.cpython-34.pyc,, -stem/response/__pycache__/getinfo.cpython-34.pyc,, -stem/__pycache__/exit_policy.cpython-34.pyc,, -stem/util/__pycache__/conf.cpython-34.pyc,, -stem/__pycache__/connection.cpython-34.pyc,, -stem/util/__pycache__/log.cpython-34.pyc,, -stem/descriptor/__pycache__/export.cpython-34.pyc,, -stem/descriptor/__pycache__/remote.cpython-34.pyc,, -stem/util/__pycache__/__init__.cpython-34.pyc,, -stem/__pycache__/prereq.cpython-34.pyc,, -stem/util/__pycache__/enum.cpython-34.pyc,, -stem/interpreter/__pycache__/__init__.cpython-34.pyc,, -stem/interpreter/__pycache__/autocomplete.cpython-34.pyc,, -stem/util/__pycache__/str_tools.cpython-34.pyc,, -stem/util/__pycache__/lru_cache.cpython-34.pyc,, -stem/descriptor/__pycache__/server_descriptor.cpython-34.pyc,, -stem/response/__pycache__/events.cpython-34.pyc,, -stem/__pycache__/process.cpython-34.pyc,, -stem/util/__pycache__/proc.cpython-34.pyc,, -stem/response/__pycache__/mapaddress.cpython-34.pyc,, -stem/response/__pycache__/getconf.cpython-34.pyc,, -stem/response/__pycache__/protocolinfo.cpython-34.pyc,, -stem/response/__pycache__/add_onion.cpython-34.pyc,, -stem/util/__pycache__/ordereddict.cpython-34.pyc,, -stem/descriptor/__pycache__/router_status_entry.cpython-34.pyc,, -stem/descriptor/__pycache__/hidden_service_descriptor.cpython-34.pyc,, -stem/__pycache__/version.cpython-34.pyc,, -stem/response/__pycache__/authchallenge.cpython-34.pyc,, -stem/util/__pycache__/connection.cpython-34.pyc,, -stem/response/__pycache__/__init__.cpython-34.pyc,, -stem/interpreter/__pycache__/arguments.cpython-34.pyc,, -stem/interpreter/__pycache__/help.cpython-34.pyc,, -stem/descriptor/__pycache__/extrainfo_descriptor.cpython-34.pyc,, -stem/descriptor/__pycache__/__init__.cpython-34.pyc,, -stem/util/__pycache__/tor_tools.cpython-34.pyc,, -stem/__pycache__/__init__.cpython-34.pyc,, -stem/descriptor/__pycache__/microdescriptor.cpython-34.pyc,, -stem/util/__pycache__/system.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/WHEEL deleted file mode 100644 index 6d9801a..0000000 --- a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: cp34-none-any - diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/metadata.json deleted file mode 100644 index 9d88d25..0000000 --- a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}}}, "generator": "bdist_wheel (0.29.0)", "metadata_version": "2.0", "name": "stem", "summary": "Stem is a Python controller library that allows applications to interact with", "version": "1.4.0"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/PKG-INFO new file mode 100644 index 0000000..c9d2712 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/PKG-INFO @@ -0,0 +1,33 @@ +Metadata-Version: 1.1 +Name: stem +Version: 1.7.0 +Summary: Stem is a Python controller library that allows applications to interact with Tor (https://www.torproject.org/). +Home-page: https://stem.torproject.org/ +Author: Damian Johnson +Author-email: atagar@torproject.org +License: LGPLv3 +Description: For tutorials and API documentation see `Stem's homepage `_. + + Quick Start + ----------- + + To install you can either use... + + :: + + pip install stem + + ... or install from the source tarball. Stem supports both the python 2.x and 3.x series. To use its python3 counterpart you simply need to install using that version of python. + + :: + + python3 setup.py install + + After that, give some `tutorials `_ a try! For questions or to discuss project ideas we're available on `irc `_ and the `tor-dev@ email list `_. +Keywords: tor onion controller +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3) +Classifier: Topic :: Security +Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/SOURCES.txt new file mode 100644 index 0000000..feb8060 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/SOURCES.txt @@ -0,0 +1,443 @@ +LICENSE +MANIFEST.in +README.md +cache_fallback_directories.py +cache_manual.py +requirements.txt +run_tests.py +tor-prompt +tox.ini +docs/Makefile +docs/api.rst +docs/change_log.rst +docs/conf.py +docs/contents.rst +docs/download.rst +docs/faq.rst +docs/index.rst +docs/republish +docs/republish.py +docs/roles.py +docs/tutorials.rst +docs/_static/bandwidth_graph_output.png +docs/_static/doctor.png +docs/_static/duck_duck_go_hidden_service.png +docs/_static/exit_map.png +docs/_static/favicon.png +docs/_static/haiku.css_t +docs/_static/hidden_service.png +docs/_static/locale_selection_output.png +docs/_static/logo.png +docs/_static/logo.svg +docs/_static/logo_orig.svg +docs/_static/manual_output.png +docs/_static/nyx.png +docs/_static/style.css +docs/_static/twitter_output.png +docs/_static/words_with.png +docs/_static/buttons/api.png +docs/_static/buttons/back.png +docs/_static/buttons/bug_tracker.png +docs/_static/buttons/change_log.png +docs/_static/buttons/download.png +docs/_static/buttons/faq.png +docs/_static/buttons/tutorials.png +docs/_static/buttons/resources/api.xcf +docs/_static/buttons/resources/back.xcf +docs/_static/buttons/resources/bug_tracker.xcf +docs/_static/buttons/resources/button_background.xcf +docs/_static/buttons/resources/change_log.xcf +docs/_static/buttons/resources/download.xcf +docs/_static/buttons/resources/faq.xcf +docs/_static/buttons/resources/tutorials.xcf +docs/_static/example/benchmark_metrics_lib.java +docs/_static/example/benchmark_server_descriptor_metrics_lib.java +docs/_static/example/benchmark_server_descriptor_stem.py +docs/_static/example/benchmark_server_descriptor_zoossh.go +docs/_static/example/benchmark_stem.py +docs/_static/example/benchmark_zoossh.go +docs/_static/example/client_usage_using_pycurl.py +docs/_static/example/client_usage_using_socksipy.py +docs/_static/example/compare_flags.py +docs/_static/example/create_descriptor.py +docs/_static/example/create_descriptor_content.py +docs/_static/example/current_descriptors.py +docs/_static/example/custom_path_selection.py +docs/_static/example/descriptor_from_orport.py +docs/_static/example/descriptor_from_tor_control_socket.py +docs/_static/example/descriptor_from_tor_data_directory.py +docs/_static/example/download_descriptor.py +docs/_static/example/ephemeral_hidden_services.py +docs/_static/example/event_listening.py +docs/_static/example/exit_used.py +docs/_static/example/fibonacci_multiprocessing.py +docs/_static/example/fibonacci_threaded.py +docs/_static/example/get_hidden_service_descriptor.py +docs/_static/example/hello_world.py +docs/_static/example/introduction_points.py +docs/_static/example/list_circuits.py +docs/_static/example/load_test.py +docs/_static/example/manual_config_options.py +docs/_static/example/outdated_relays.py +docs/_static/example/past_descriptors.py +docs/_static/example/persisting_a_consensus.py +docs/_static/example/persisting_a_consensus_with_parse_file.py +docs/_static/example/read_with_parse_file.py +docs/_static/example/reading_twitter.py +docs/_static/example/relay_connections.py +docs/_static/example/resuming_ephemeral_hidden_service.py +docs/_static/example/running_hidden_service.py +docs/_static/example/saving_and_loading_descriptors.py +docs/_static/example/tor_descriptors.py +docs/_static/example/utilities.py +docs/_static/example/validate_descriptor_content.py +docs/_static/example/votes_by_bandwidth_authorities.py +docs/_static/example/words_with.py +docs/_static/label/archlinux.png +docs/_static/label/debian.png +docs/_static/label/doctor.png +docs/_static/label/double_double_toil_and_trouble.png +docs/_static/label/down_the_rabbit_hole.png +docs/_static/label/east_of_the_sun.png +docs/_static/label/exit_map.png +docs/_static/label/fedora.png +docs/_static/label/freebsd.png +docs/_static/label/gentoo.png +docs/_static/label/mirror_mirror_on_the_wall.png +docs/_static/label/nyx.png +docs/_static/label/openbsd.png +docs/_static/label/osx.png +docs/_static/label/over_the_river.png +docs/_static/label/python_package_index.png +docs/_static/label/redhat.png +docs/_static/label/slackware.png +docs/_static/label/source_repository.png +docs/_static/label/the_little_relay_that_could.png +docs/_static/label/to_russia_with_love.png +docs/_static/label/tortoise_and_the_hare.png +docs/_static/label/ubuntu.png +docs/_static/label/resources/archlinux.xcf +docs/_static/label/resources/debian.xcf +docs/_static/label/resources/doctor.xcf +docs/_static/label/resources/double_double_toil_and_trouble.xcf +docs/_static/label/resources/down_the_rabbit_hole.xcf +docs/_static/label/resources/east_of_the_sun.xcf +docs/_static/label/resources/exit_map.xcf +docs/_static/label/resources/fedora.xcf +docs/_static/label/resources/freebsd.xcf +docs/_static/label/resources/gentoo.xcf +docs/_static/label/resources/mirror_mirror_on_the_wall.xcf +docs/_static/label/resources/nyx.xcf +docs/_static/label/resources/openbsd.xcf +docs/_static/label/resources/osx.xcf +docs/_static/label/resources/over_the_river.xcf +docs/_static/label/resources/python_package_index.xcf +docs/_static/label/resources/redhat.xcf +docs/_static/label/resources/slackware.xcf +docs/_static/label/resources/source_repository.xcf +docs/_static/label/resources/the_little_relay_that_could.xcf +docs/_static/label/resources/to_russia_with_love.xcf +docs/_static/label/resources/tortoise_and_the_hare.xcf +docs/_static/label/resources/ubuntu.xcf +docs/_static/prompt/attach.png +docs/_static/prompt/events_command.png +docs/_static/prompt/events_variable.png +docs/_static/prompt/help.png +docs/_static/prompt/info.png +docs/_static/prompt/python.png +docs/_static/prompt/run.png +docs/_static/prompt/run_events.png +docs/_static/prompt/run_events_pipe.png +docs/_static/prompt/run_file.png +docs/_static/prompt/starting_tor.png +docs/_static/prompt/tor_commands.png +docs/_static/resources/exit_map_alt.png +docs/_static/section/download/archlinux.png +docs/_static/section/download/debian.png +docs/_static/section/download/fedora.png +docs/_static/section/download/freebsd.png +docs/_static/section/download/gentoo.png +docs/_static/section/download/git.png +docs/_static/section/download/git_alt.png +docs/_static/section/download/openbsd.png +docs/_static/section/download/osx.png +docs/_static/section/download/pypi.png +docs/_static/section/download/redhat.png +docs/_static/section/download/slackware.png +docs/_static/section/download/ubuntu.png +docs/_static/section/download/resources/fedora.svg +docs/_static/section/tutorials/cauldron.png +docs/_static/section/tutorials/mad_hatter.png +docs/_static/section/tutorials/mirror.png +docs/_static/section/tutorials/riding_hood.png +docs/_static/section/tutorials/soviet.png +docs/_static/section/tutorials/tortoise.png +docs/_static/section/tutorials/train.png +docs/_static/section/tutorials/windrose.png +docs/_static/section/tutorials/resources/mad_hatter.xcf +docs/_static/section/tutorials/resources/riding_hood.svg +docs/_static/section/tutorials/resources/tortoise_large.png +docs/_static/section/tutorials/resources/windrose.svg +docs/_static/section/tutorials/resources/windrose.xcf +docs/_templates/layout.html +docs/api/connection.rst +docs/api/control.rst +docs/api/directory.rst +docs/api/exit_policy.rst +docs/api/manual.rst +docs/api/process.rst +docs/api/response.rst +docs/api/socket.rst +docs/api/version.rst +docs/api/descriptor/certificate.rst +docs/api/descriptor/descriptor.rst +docs/api/descriptor/export.rst +docs/api/descriptor/extrainfo_descriptor.rst +docs/api/descriptor/hidden_service_descriptor.rst +docs/api/descriptor/microdescriptor.rst +docs/api/descriptor/networkstatus.rst +docs/api/descriptor/reader.rst +docs/api/descriptor/remote.rst +docs/api/descriptor/router_status_entry.rst +docs/api/descriptor/server_descriptor.rst +docs/api/descriptor/tordnsel.rst +docs/api/util/conf.rst +docs/api/util/connection.rst +docs/api/util/enum.rst +docs/api/util/init.rst +docs/api/util/log.rst +docs/api/util/proc.rst +docs/api/util/str_tools.rst +docs/api/util/system.rst +docs/api/util/term.rst +docs/api/util/test_tools.rst +docs/api/util/tor_tools.rst +docs/tutorials/double_double_toil_and_trouble.rst +docs/tutorials/down_the_rabbit_hole.rst +docs/tutorials/east_of_the_sun.rst +docs/tutorials/mirror_mirror_on_the_wall.rst +docs/tutorials/over_the_river.rst +docs/tutorials/the_little_relay_that_could.rst +docs/tutorials/to_russia_with_love.rst +docs/tutorials/tortoise_and_the_hare.rst +docs/tutorials/examples/compare_flags.rst +docs/tutorials/examples/download_descriptor.rst +docs/tutorials/examples/exit_used.rst +docs/tutorials/examples/list_circuits.rst +docs/tutorials/examples/outdated_relays.rst +docs/tutorials/examples/persisting_a_consensus.rst +docs/tutorials/examples/relay_connections.rst +docs/tutorials/examples/votes_by_bandwidth_authorities.rst +stem/__init__.py +stem/cached_fallbacks.cfg +stem/cached_manual.sqlite +stem/connection.py +stem/control.py +stem/directory.py +stem/exit_policy.py +stem/manual.py +stem/prereq.py +stem/process.py +stem/settings.cfg +stem/socket.py +stem/version.py +stem.egg-info/PKG-INFO +stem.egg-info/SOURCES.txt +stem.egg-info/dependency_links.txt +stem.egg-info/top_level.txt +stem/client/__init__.py +stem/client/cell.py +stem/client/datatype.py +stem/descriptor/__init__.py +stem/descriptor/certificate.py +stem/descriptor/export.py +stem/descriptor/extrainfo_descriptor.py +stem/descriptor/hidden_service_descriptor.py +stem/descriptor/microdescriptor.py +stem/descriptor/networkstatus.py +stem/descriptor/reader.py +stem/descriptor/remote.py +stem/descriptor/router_status_entry.py +stem/descriptor/server_descriptor.py +stem/descriptor/tordnsel.py +stem/interpreter/__init__.py +stem/interpreter/arguments.py +stem/interpreter/autocomplete.py +stem/interpreter/commands.py +stem/interpreter/help.py +stem/interpreter/settings.cfg +stem/response/__init__.py +stem/response/add_onion.py +stem/response/authchallenge.py +stem/response/events.py +stem/response/getconf.py +stem/response/getinfo.py +stem/response/mapaddress.py +stem/response/protocolinfo.py +stem/util/__init__.py +stem/util/conf.py +stem/util/connection.py +stem/util/enum.py +stem/util/log.py +stem/util/lru_cache.py +stem/util/ordereddict.py +stem/util/ports.cfg +stem/util/proc.py +stem/util/str_tools.py +stem/util/system.py +stem/util/term.py +stem/util/test_tools.py +stem/util/tor_tools.py +test/__init__.py +test/arguments.py +test/network.py +test/output.py +test/require.py +test/runner.py +test/settings.cfg +test/task.py +test/integ/__init__.py +test/integ/installation.py +test/integ/interpreter.py +test/integ/manual.py +test/integ/process.py +test/integ/version.py +test/integ/client/__init__.py +test/integ/client/connection.py +test/integ/connection/__init__.py +test/integ/connection/authentication.py +test/integ/connection/connect.py +test/integ/control/__init__.py +test/integ/control/base_controller.py +test/integ/control/controller.py +test/integ/descriptor/__init__.py +test/integ/descriptor/extrainfo_descriptor.py +test/integ/descriptor/microdescriptor.py +test/integ/descriptor/networkstatus.py +test/integ/descriptor/remote.py +test/integ/descriptor/server_descriptor.py +test/integ/directory/__init__.py +test/integ/directory/authority.py +test/integ/directory/fallback.py +test/integ/response/__init__.py +test/integ/response/protocolinfo.py +test/integ/socket/__init__.py +test/integ/socket/control_message.py +test/integ/socket/control_socket.py +test/integ/util/__init__.py +test/integ/util/conf.py +test/integ/util/connection.py +test/integ/util/proc.py +test/integ/util/system.py +test/unit/__init__.py +test/unit/doctest.py +test/unit/endpoint.py +test/unit/installation.py +test/unit/manual.py +test/unit/tor_man_example +test/unit/tor_man_with_unknown +test/unit/tutorial.py +test/unit/tutorial_examples.py +test/unit/version.py +test/unit/client/__init__.py +test/unit/client/address.py +test/unit/client/cell.py +test/unit/client/certificate.py +test/unit/client/kdf.py +test/unit/client/link_protocol.py +test/unit/client/size.py +test/unit/client/data/new_link_cells +test/unit/connection/__init__.py +test/unit/connection/authentication.py +test/unit/connection/connect.py +test/unit/control/__init__.py +test/unit/control/controller.py +test/unit/descriptor/__init__.py +test/unit/descriptor/certificate.py +test/unit/descriptor/export.py +test/unit/descriptor/extrainfo_descriptor.py +test/unit/descriptor/hidden_service_descriptor.py +test/unit/descriptor/microdescriptor.py +test/unit/descriptor/reader.py +test/unit/descriptor/remote.py +test/unit/descriptor/router_status_entry.py +test/unit/descriptor/server_descriptor.py +test/unit/descriptor/tordnsel.py +test/unit/descriptor/data/bridge_descriptor +test/unit/descriptor/data/bridge_descriptor_with_ed25519 +test/unit/descriptor/data/bridge_extrainfo_descriptor_with_ed25519 +test/unit/descriptor/data/bridge_network_status +test/unit/descriptor/data/cached-certs +test/unit/descriptor/data/cached-consensus +test/unit/descriptor/data/cached-consensus-v2 +test/unit/descriptor/data/cached-microdescs +test/unit/descriptor/data/compressed_gzip +test/unit/descriptor/data/compressed_identity +test/unit/descriptor/data/compressed_lzma +test/unit/descriptor/data/compressed_zstd +test/unit/descriptor/data/cr_in_contact_line +test/unit/descriptor/data/descriptor_archive.tar +test/unit/descriptor/data/descriptor_archive.tar.bz2 +test/unit/descriptor/data/descriptor_archive.tar.gz +test/unit/descriptor/data/example_descriptor +test/unit/descriptor/data/extrainfo_bridge_descriptor +test/unit/descriptor/data/extrainfo_bridge_descriptor_multiple +test/unit/descriptor/data/extrainfo_descriptor_with_ed25519 +test/unit/descriptor/data/extrainfo_relay_descriptor +test/unit/descriptor/data/hidden_service_basic_auth +test/unit/descriptor/data/hidden_service_duckduckgo +test/unit/descriptor/data/hidden_service_facebook +test/unit/descriptor/data/hidden_service_stealth_auth +test/unit/descriptor/data/metrics_cert +test/unit/descriptor/data/metrics_consensus +test/unit/descriptor/data/metrics_server_desc_multiple +test/unit/descriptor/data/metrics_vote +test/unit/descriptor/data/negative_uptime +test/unit/descriptor/data/non-ascii_descriptor +test/unit/descriptor/data/old_descriptor +test/unit/descriptor/data/server_descriptor_with_ed25519 +test/unit/descriptor/data/unparseable/cached-microdesc-consensus_with_carriage_returns +test/unit/descriptor/data/unparseable/extrainfo_nonascii_v3_reqs +test/unit/descriptor/data/unparseable/new_metrics_type +test/unit/descriptor/data/unparseable/riddle +test/unit/descriptor/data/unparseable/tiny.png +test/unit/descriptor/data/unparseable/vote +test/unit/descriptor/networkstatus/__init__.py +test/unit/descriptor/networkstatus/bridge_document.py +test/unit/descriptor/networkstatus/directory_authority.py +test/unit/descriptor/networkstatus/document_v2.py +test/unit/descriptor/networkstatus/document_v3.py +test/unit/descriptor/networkstatus/key_certificate.py +test/unit/directory/__init__.py +test/unit/directory/authority.py +test/unit/directory/fallback.py +test/unit/exit_policy/__init__.py +test/unit/exit_policy/policy.py +test/unit/exit_policy/rule.py +test/unit/interpreter/__init__.py +test/unit/interpreter/arguments.py +test/unit/interpreter/autocomplete.py +test/unit/interpreter/commands.py +test/unit/interpreter/help.py +test/unit/response/__init__.py +test/unit/response/add_onion.py +test/unit/response/authchallenge.py +test/unit/response/control_line.py +test/unit/response/control_message.py +test/unit/response/events.py +test/unit/response/getconf.py +test/unit/response/getinfo.py +test/unit/response/mapaddress.py +test/unit/response/protocolinfo.py +test/unit/response/singleline.py +test/unit/util/__init__.py +test/unit/util/conf.py +test/unit/util/connection.py +test/unit/util/enum.py +test/unit/util/log.py +test/unit/util/proc.py +test/unit/util/str_tools.py +test/unit/util/system.py +test/unit/util/term.py +test/unit/util/text_file +test/unit/util/tor_tools.py \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/installed-files.txt new file mode 100644 index 0000000..0ea77fe --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/installed-files.txt @@ -0,0 +1,112 @@ +../../../../bin/tor-prompt +../stem/__init__.py +../stem/__pycache__/__init__.cpython-37.pyc +../stem/__pycache__/connection.cpython-37.pyc +../stem/__pycache__/control.cpython-37.pyc +../stem/__pycache__/directory.cpython-37.pyc +../stem/__pycache__/exit_policy.cpython-37.pyc +../stem/__pycache__/manual.cpython-37.pyc +../stem/__pycache__/prereq.cpython-37.pyc +../stem/__pycache__/process.cpython-37.pyc +../stem/__pycache__/socket.cpython-37.pyc +../stem/__pycache__/version.cpython-37.pyc +../stem/cached_fallbacks.cfg +../stem/cached_manual.sqlite +../stem/client/__init__.py +../stem/client/__pycache__/__init__.cpython-37.pyc +../stem/client/__pycache__/cell.cpython-37.pyc +../stem/client/__pycache__/datatype.cpython-37.pyc +../stem/client/cell.py +../stem/client/datatype.py +../stem/connection.py +../stem/control.py +../stem/descriptor/__init__.py +../stem/descriptor/__pycache__/__init__.cpython-37.pyc +../stem/descriptor/__pycache__/certificate.cpython-37.pyc +../stem/descriptor/__pycache__/export.cpython-37.pyc +../stem/descriptor/__pycache__/extrainfo_descriptor.cpython-37.pyc +../stem/descriptor/__pycache__/hidden_service_descriptor.cpython-37.pyc +../stem/descriptor/__pycache__/microdescriptor.cpython-37.pyc +../stem/descriptor/__pycache__/networkstatus.cpython-37.pyc +../stem/descriptor/__pycache__/reader.cpython-37.pyc +../stem/descriptor/__pycache__/remote.cpython-37.pyc +../stem/descriptor/__pycache__/router_status_entry.cpython-37.pyc +../stem/descriptor/__pycache__/server_descriptor.cpython-37.pyc +../stem/descriptor/__pycache__/tordnsel.cpython-37.pyc +../stem/descriptor/certificate.py +../stem/descriptor/export.py +../stem/descriptor/extrainfo_descriptor.py +../stem/descriptor/hidden_service_descriptor.py +../stem/descriptor/microdescriptor.py +../stem/descriptor/networkstatus.py +../stem/descriptor/reader.py +../stem/descriptor/remote.py +../stem/descriptor/router_status_entry.py +../stem/descriptor/server_descriptor.py +../stem/descriptor/tordnsel.py +../stem/directory.py +../stem/exit_policy.py +../stem/interpreter/__init__.py +../stem/interpreter/__pycache__/__init__.cpython-37.pyc +../stem/interpreter/__pycache__/arguments.cpython-37.pyc +../stem/interpreter/__pycache__/autocomplete.cpython-37.pyc +../stem/interpreter/__pycache__/commands.cpython-37.pyc +../stem/interpreter/__pycache__/help.cpython-37.pyc +../stem/interpreter/arguments.py +../stem/interpreter/autocomplete.py +../stem/interpreter/commands.py +../stem/interpreter/help.py +../stem/interpreter/settings.cfg +../stem/manual.py +../stem/prereq.py +../stem/process.py +../stem/response/__init__.py +../stem/response/__pycache__/__init__.cpython-37.pyc +../stem/response/__pycache__/add_onion.cpython-37.pyc +../stem/response/__pycache__/authchallenge.cpython-37.pyc +../stem/response/__pycache__/events.cpython-37.pyc +../stem/response/__pycache__/getconf.cpython-37.pyc +../stem/response/__pycache__/getinfo.cpython-37.pyc +../stem/response/__pycache__/mapaddress.cpython-37.pyc +../stem/response/__pycache__/protocolinfo.cpython-37.pyc +../stem/response/add_onion.py +../stem/response/authchallenge.py +../stem/response/events.py +../stem/response/getconf.py +../stem/response/getinfo.py +../stem/response/mapaddress.py +../stem/response/protocolinfo.py +../stem/settings.cfg +../stem/socket.py +../stem/util/__init__.py +../stem/util/__pycache__/__init__.cpython-37.pyc +../stem/util/__pycache__/conf.cpython-37.pyc +../stem/util/__pycache__/connection.cpython-37.pyc +../stem/util/__pycache__/enum.cpython-37.pyc +../stem/util/__pycache__/log.cpython-37.pyc +../stem/util/__pycache__/lru_cache.cpython-37.pyc +../stem/util/__pycache__/ordereddict.cpython-37.pyc +../stem/util/__pycache__/proc.cpython-37.pyc +../stem/util/__pycache__/str_tools.cpython-37.pyc +../stem/util/__pycache__/system.cpython-37.pyc +../stem/util/__pycache__/term.cpython-37.pyc +../stem/util/__pycache__/test_tools.cpython-37.pyc +../stem/util/__pycache__/tor_tools.cpython-37.pyc +../stem/util/conf.py +../stem/util/connection.py +../stem/util/enum.py +../stem/util/log.py +../stem/util/lru_cache.py +../stem/util/ordereddict.py +../stem/util/ports.cfg +../stem/util/proc.py +../stem/util/str_tools.py +../stem/util/system.py +../stem/util/term.py +../stem/util/test_tools.py +../stem/util/tor_tools.py +../stem/version.py +PKG-INFO +SOURCES.txt +dependency_links.txt +top_level.txt diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/stem-1.4.0.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/stem-1.7.0.egg-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/stem/__init__.py b/Shared/lib/python3.4/site-packages/stem/__init__.py index 5134c2f..9a9aec5 100644 --- a/Shared/lib/python3.4/site-packages/stem/__init__.py +++ b/Shared/lib/python3.4/site-packages/stem/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -8,14 +8,23 @@ Library for working with the tor process. :: + Endpoint - Networking endpoint. + |- ORPort - Tor relay endpoint. + +- DirPort - Descriptor mirror. + ControllerError - Base exception raised when using the controller. |- ProtocolError - Malformed socket data. + | |- OperationFailed - Tor was unable to successfully complete the operation. | |- UnsatisfiableRequest - Tor was unable to satisfy a valid request. - | | +- CircuitExtensionFailed - Attempt to make or extend a circuit failed. - | |- DescriptorUnavailable - The given relay descriptor is unavailable. + | | |- CircuitExtensionFailed - Attempt to make or extend a circuit failed. + | | |- DescriptorUnavailable - The given relay descriptor is unavailable. + | | +- Timeout - Caller requested timeout was reached. + | | + | | | +- InvalidRequest - Invalid request. | +- InvalidArguments - Invalid request parameters. + | +- SocketError - Communication with the socket failed. +- SocketClosed - Socket has been shut down. @@ -37,6 +46,9 @@ Library for working with the tor process. Signals that the tor process will accept. + .. versionchanged:: 1.3.0 + Added the HEARTBEAT signal. + ========================= =========== Signal Description ========================= =========== @@ -57,37 +69,45 @@ Library for working with the tor process. **Note:** The BADDIRECTORY flag was `removed from tor `_. - ================= =========== - Flag Description - ================= =========== - **AUTHORITY** relay is a directory authority - **BADEXIT** relay shouldn't be used as an exit due to being either problematic or malicious (`wiki `_) - **BADDIRECTORY** relay shouldn't be used for directory information - **EXIT** relay's exit policy makes it more useful as an exit rather than middle hop - **FAST** relay's suitable for high-bandwidth circuits - **GUARD** relay's suitable for being an entry guard (first hop) - **HSDIR** relay is being used as a v2 hidden service directory - **NAMED** relay can be referred to by its nickname - **RUNNING** relay is currently usable - **STABLE** relay's suitable for long-lived circuits - **UNNAMED** relay isn't currently bound to a nickname - **V2DIR** relay supports the v2 directory protocol - **VALID** relay has been validated - ================= =========== + .. versionchanged:: 1.5.0 + Added the NO_ED_CONSENSUS flag. + + =================== =========== + Flag Description + =================== =========== + **AUTHORITY** relay is a directory authority + **BADEXIT** relay shouldn't be used as an exit due to being either problematic or malicious + **BADDIRECTORY** relay shouldn't be used for directory information + **EXIT** relay's exit policy makes it more useful as an exit rather than middle hop + **FAST** relay's suitable for high-bandwidth circuits + **GUARD** relay's suitable for being an entry guard (first hop) + **HSDIR** relay is being used as a v2 hidden service directory + **NAMED** relay can be referred to by its nickname + **NO_ED_CONSENSUS** relay's Ed25519 doesn't reflrect the consensus + **RUNNING** relay is currently usable + **STABLE** relay's suitable for long-lived circuits + **UNNAMED** relay isn't currently bound to a nickname + **V2DIR** relay supports the v2 directory protocol + **VALID** relay has been validated + =================== =========== .. data:: CircStatus (enum) Statuses that a circuit can be in. Tor may provide statuses not in this enum. - ============ =========== - CircStatus Description - ============ =========== - **LAUNCHED** new circuit was created - **BUILT** circuit finished being created and can accept traffic - **EXTENDED** circuit has been extended by a hop - **FAILED** circuit construction failed - **CLOSED** circuit has been closed - ============ =========== + .. versionchanged:: 1.6.0 + Added the GUARD_WAIT signal. + + ============== =========== + CircStatus Description + ============== =========== + **LAUNCHED** new circuit was created + **BUILT** circuit finished being created and can accept traffic + **GUARD_WAIT** waiting to see if there's a circuit with a better guard before using + **EXTENDED** circuit has been extended by a hop + **FAILED** circuit construction failed + **CLOSED** circuit has been closed + ============== =========== .. data:: CircBuildFlag (enum) @@ -378,6 +398,8 @@ Library for working with the tor process. The meaning behind these values is a bit unclear, pending :trac:`10086`. + .. versionadded:: 1.2.0 + =============== =========== ConnectionType Description =============== =========== @@ -390,6 +412,8 @@ Library for working with the tor process. Bucket categories of TB_EMPTY events. + .. versionadded:: 1.2.0 + =============== =========== TokenBucket Description =============== =========== @@ -402,6 +426,14 @@ Library for working with the tor process. Action beeing taken in a HS_DESC event. + .. versionadded:: 1.2.0 + + .. versionchanged:: 1.4.0 + Added the UPLOAD and UPLOADED actions. + + .. versionchanged:: 1.5.0 + Added the CREATED action. + =============== =========== HSDescAction Description =============== =========== @@ -411,12 +443,21 @@ Library for working with the tor process. **UPLOADED** descriptor was uploaded with HSPOST **IGNORE** fetched descriptor was ignored because we already have its v0 descriptor **FAILED** we were unable to retrieve the descriptor + **CREATED** hidden service descriptor was just created =============== =========== .. data:: HSDescReason (enum) Reason for the hidden service descriptor to fail to be fetched. + .. versionadded:: 1.3.0 + + .. versionchanged:: 1.4.0 + Added the UPLOAD_REJECTED reason. + + .. versionchanged:: 1.6.0 + Added the QUERY_NO_HSDIR reason. + =================== =========== HSDescReason Description =================== =========== @@ -424,6 +465,7 @@ Library for working with the tor process. **QUERY_REJECTED** hidden service directory refused to provide the descriptor **UPLOAD_REJECTED** descriptor was rejected by the hidden service directory **NOT_FOUND** descriptor with the given identifier wasn't found + **QUERY_NO_HSDIR** no hidden service directory was found **UNEXPECTED** failure type is unknown =================== =========== @@ -431,6 +473,8 @@ Library for working with the tor process. Type of authentication being used for a HS_DESC event. + .. versionadded:: 1.2.0 + ================= =========== HSAuth Description ================= =========== @@ -441,18 +485,23 @@ Library for working with the tor process. ================= =========== """ -__version__ = '1.4.0' +import stem.util +import stem.util.enum + +__version__ = '1.7.0' __author__ = 'Damian Johnson' __contact__ = 'atagar@torproject.org' __url__ = 'https://stem.torproject.org/' __license__ = 'LGPLv3' __all__ = [ + 'client', 'descriptor', 'response', 'util', 'connection', 'control', + 'directory', 'exit_policy', 'prereq', 'process', @@ -464,6 +513,7 @@ __all__ = [ 'UnsatisfiableRequest', 'CircuitExtensionFailed', 'DescriptorUnavailable', + 'Timeout', 'InvalidRequest', 'InvalidArguments', 'SocketError', @@ -494,16 +544,8 @@ __all__ = [ 'TimeoutSetType', ] -import stem.prereq - -if stem.prereq.is_python_3(): - str_type = str - int_type = int -else: - str_type = unicode - int_type = long - -import stem.util.enum +# Constant that we use by default for our User-Agent when downloading descriptors +stem.USER_AGENT = 'Stem/%s' % __version__ # Constant to indicate an undefined argument default. Usually we'd use None for # this, but users will commonly provide None as the argument so need something @@ -512,6 +554,57 @@ import stem.util.enum UNDEFINED = '' +class Endpoint(object): + """ + Tor endpint that can be connected to. + + .. versionadded:: 1.7.0 + + :var str address: ip address of the endpoint + :var int port: port of the endpoint + """ + + def __init__(self, address, port): + if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address): + raise ValueError("'%s' isn't a valid IPv4 or IPv6 address" % address) + elif not stem.util.connection.is_valid_port(port): + raise ValueError("'%s' isn't a valid port" % port) + + self.address = address + self.port = int(port) + + def __hash__(self): + return stem.util._hash_attr(self, 'address', 'port', cache = True) + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, Endpoint) else False + + def __ne__(self, other): + return not self == other + + +class ORPort(Endpoint): + """ + Tor relay's ORPort. The endpoint on which Tor accepts relay traffic. + + :var list link_protocols: link protocol version we're willing to establish + """ + + def __init__(self, address, port, link_protocols = None): + super(ORPort, self).__init__(address, port) + self.link_protocols = link_protocols + + def __hash__(self): + return stem.util._hash_attr(self, 'link_protocols', parent = Endpoint, cache = True) + + +class DirPort(Endpoint): + """ + Tor relay's DirPort. The endpoint on which Tor provides http access for + downloading descriptors. + """ + + class ControllerError(Exception): 'Base error for controller communication issues.' @@ -553,15 +646,29 @@ class CircuitExtensionFailed(UnsatisfiableRequest): self.circ = circ -class DescriptorUnavailable(OperationFailed): +class DescriptorUnavailable(UnsatisfiableRequest): """ Tor was unable to provide a descriptor for the given relay. + + .. versionchanged:: 1.7.0 + Subclassed under UnsatisfiableRequest rather than OperationFailed. """ def __init__(self, message): super(DescriptorUnavailable, self).__init__(message = message) +class Timeout(UnsatisfiableRequest): + """ + Timeout requested by the caller was reached. + + .. versionadded:: 1.7.0 + """ + + def __init__(self, message): + super(Timeout, self).__init__(message = message) + + class InvalidRequest(OperationFailed): """ Exception raised when the request was invalid or malformed. @@ -590,6 +697,7 @@ class SocketError(ControllerError): class SocketClosed(SocketError): 'Control socket was closed before completing the message.' + Runlevel = stem.util.enum.UppercaseEnum( 'DEBUG', 'INFO', @@ -607,6 +715,7 @@ Flag = stem.util.enum.Enum( ('GUARD', 'Guard'), ('HSDIR', 'HSDir'), ('NAMED', 'Named'), + ('NO_ED_CONSENSUS', 'NoEdConsensus'), ('RUNNING', 'Running'), ('STABLE', 'Stable'), ('UNNAMED', 'Unnamed'), @@ -634,6 +743,7 @@ Signal = stem.util.enum.UppercaseEnum( CircStatus = stem.util.enum.UppercaseEnum( 'LAUNCHED', 'BUILT', + 'GUARD_WAIT', 'EXTENDED', 'FAILED', 'CLOSED', @@ -815,6 +925,7 @@ HSDescAction = stem.util.enum.UppercaseEnum( 'UPLOADED', 'IGNORE', 'FAILED', + 'CREATED', ) HSDescReason = stem.util.enum.UppercaseEnum( @@ -822,6 +933,7 @@ HSDescReason = stem.util.enum.UppercaseEnum( 'QUERY_REJECTED', 'UPLOAD_REJECTED', 'NOT_FOUND', + 'QUERY_NO_HSDIR', 'UNEXPECTED', ) @@ -831,3 +943,6 @@ HSAuth = stem.util.enum.UppercaseEnum( 'STEALTH_AUTH', 'UNKNOWN', ) + + +import stem.util.connection # importing afterward to avoid circular dependency diff --git a/Shared/lib/python3.4/site-packages/stem/cached_fallbacks.cfg b/Shared/lib/python3.4/site-packages/stem/cached_fallbacks.cfg new file mode 100644 index 0000000..1e88c7e --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/cached_fallbacks.cfg @@ -0,0 +1,855 @@ +tor_commit a42e52dded44a2c58a7200511e27a5c0e01cd78b +stem_commit 4d7cc882b5b8966f69232d8489bb5b07226abc81 +header.timestamp 20180106205601 +header.version 2.0.0 +header.type fallback +001524DD403D729F08F7E5D77813EF12756CFA8D.address 185.13.39.197 +001524DD403D729F08F7E5D77813EF12756CFA8D.or_port 443 +001524DD403D729F08F7E5D77813EF12756CFA8D.dir_port 80 +001524DD403D729F08F7E5D77813EF12756CFA8D.nickname Neldoreth +001524DD403D729F08F7E5D77813EF12756CFA8D.has_extrainfo false +0111BA9B604669E636FFD5B503F382A4B7AD6E80.address 176.10.104.240 +0111BA9B604669E636FFD5B503F382A4B7AD6E80.or_port 443 +0111BA9B604669E636FFD5B503F382A4B7AD6E80.dir_port 80 +0111BA9B604669E636FFD5B503F382A4B7AD6E80.nickname DigiGesTor1e1 +0111BA9B604669E636FFD5B503F382A4B7AD6E80.has_extrainfo false +025B66CEBC070FCB0519D206CF0CF4965C20C96E.address 185.100.85.61 +025B66CEBC070FCB0519D206CF0CF4965C20C96E.or_port 443 +025B66CEBC070FCB0519D206CF0CF4965C20C96E.dir_port 80 +025B66CEBC070FCB0519D206CF0CF4965C20C96E.nickname nibbana +025B66CEBC070FCB0519D206CF0CF4965C20C96E.has_extrainfo false +0756B7CD4DFC8182BE23143FAC0642F515182CEB.address 5.9.110.236 +0756B7CD4DFC8182BE23143FAC0642F515182CEB.or_port 9001 +0756B7CD4DFC8182BE23143FAC0642F515182CEB.dir_port 9030 +0756B7CD4DFC8182BE23143FAC0642F515182CEB.nickname rueckgrat +0756B7CD4DFC8182BE23143FAC0642F515182CEB.has_extrainfo true +0756B7CD4DFC8182BE23143FAC0642F515182CEB.orport6_address 2a01:4f8:162:51e2::2 +0756B7CD4DFC8182BE23143FAC0642F515182CEB.orport6_port 9001 +0B85617241252517E8ECF2CFC7F4C1A32DCD153F.address 163.172.149.155 +0B85617241252517E8ECF2CFC7F4C1A32DCD153F.or_port 443 +0B85617241252517E8ECF2CFC7F4C1A32DCD153F.dir_port 80 +0B85617241252517E8ECF2CFC7F4C1A32DCD153F.nickname niij02 +0B85617241252517E8ECF2CFC7F4C1A32DCD153F.has_extrainfo false +0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.address 5.39.92.199 +0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.or_port 443 +0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.dir_port 80 +0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.nickname BaelorTornodePw +0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.has_extrainfo false +0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.orport6_address 2001:41d0:8:b1c7::1 +0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.orport6_port 443 +0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.address 163.172.25.118 +0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.or_port 22 +0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.dir_port 80 +0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.nickname torpidsFRonline4 +0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.has_extrainfo false +0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.address 178.62.197.82 +0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.or_port 443 +0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.dir_port 80 +0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.nickname HY100 +0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.has_extrainfo false +0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.address 185.100.86.100 +0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.or_port 443 +0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.dir_port 80 +0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.nickname saveyourprivacyex1 +0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.has_extrainfo false +11DF0017A43AF1F08825CD5D973297F81AB00FF3.address 37.120.174.249 +11DF0017A43AF1F08825CD5D973297F81AB00FF3.or_port 443 +11DF0017A43AF1F08825CD5D973297F81AB00FF3.dir_port 80 +11DF0017A43AF1F08825CD5D973297F81AB00FF3.nickname gGDHjdcC6zAlM8k08lX +11DF0017A43AF1F08825CD5D973297F81AB00FF3.has_extrainfo false +11DF0017A43AF1F08825CD5D973297F81AB00FF3.orport6_address 2a03:4000:6:724c:df98:15f9:b34d:443 +11DF0017A43AF1F08825CD5D973297F81AB00FF3.orport6_port 443 +12AD30E5D25AA67F519780E2111E611A455FDC89.address 193.11.114.43 +12AD30E5D25AA67F519780E2111E611A455FDC89.or_port 9001 +12AD30E5D25AA67F519780E2111E611A455FDC89.dir_port 9030 +12AD30E5D25AA67F519780E2111E611A455FDC89.nickname mdfnet1 +12AD30E5D25AA67F519780E2111E611A455FDC89.has_extrainfo false +12AD30E5D25AA67F519780E2111E611A455FDC89.orport6_address 2001:6b0:30:1000::99 +12AD30E5D25AA67F519780E2111E611A455FDC89.orport6_port 9050 +12FD624EE73CEF37137C90D38B2406A66F68FAA2.address 37.157.195.87 +12FD624EE73CEF37137C90D38B2406A66F68FAA2.or_port 443 +12FD624EE73CEF37137C90D38B2406A66F68FAA2.dir_port 8030 +12FD624EE73CEF37137C90D38B2406A66F68FAA2.nickname thanatosCZ +12FD624EE73CEF37137C90D38B2406A66F68FAA2.has_extrainfo false +136F9299A5009A4E0E96494E723BDB556FB0A26B.address 178.16.208.59 +136F9299A5009A4E0E96494E723BDB556FB0A26B.or_port 443 +136F9299A5009A4E0E96494E723BDB556FB0A26B.dir_port 80 +136F9299A5009A4E0E96494E723BDB556FB0A26B.nickname bakunin2 +136F9299A5009A4E0E96494E723BDB556FB0A26B.has_extrainfo false +136F9299A5009A4E0E96494E723BDB556FB0A26B.orport6_address 2a00:1c20:4089:1234:bff6:e1bb:1ce3:8dc6 +136F9299A5009A4E0E96494E723BDB556FB0A26B.orport6_port 443 +16102E458460349EE45C0901DAA6C30094A9BBEA.address 163.172.138.22 +16102E458460349EE45C0901DAA6C30094A9BBEA.or_port 443 +16102E458460349EE45C0901DAA6C30094A9BBEA.dir_port 80 +16102E458460349EE45C0901DAA6C30094A9BBEA.nickname mkultra +16102E458460349EE45C0901DAA6C30094A9BBEA.has_extrainfo false +16102E458460349EE45C0901DAA6C30094A9BBEA.orport6_address 2001:bc8:4400:2100::1:3 +16102E458460349EE45C0901DAA6C30094A9BBEA.orport6_port 443 +175921396C7C426309AB03775A9930B6F611F794.address 178.62.60.37 +175921396C7C426309AB03775A9930B6F611F794.or_port 443 +175921396C7C426309AB03775A9930B6F611F794.dir_port 80 +175921396C7C426309AB03775A9930B6F611F794.nickname lovejoy +175921396C7C426309AB03775A9930B6F611F794.has_extrainfo false +185663B7C12777F052B2C2D23D7A239D8DA88A0F.address 171.25.193.25 +185663B7C12777F052B2C2D23D7A239D8DA88A0F.or_port 443 +185663B7C12777F052B2C2D23D7A239D8DA88A0F.dir_port 80 +185663B7C12777F052B2C2D23D7A239D8DA88A0F.nickname DFRI5 +185663B7C12777F052B2C2D23D7A239D8DA88A0F.has_extrainfo false +185663B7C12777F052B2C2D23D7A239D8DA88A0F.orport6_address 2001:67c:289c::25 +185663B7C12777F052B2C2D23D7A239D8DA88A0F.orport6_port 443 +1938EBACBB1A7BFA888D9623C90061130E63BB3F.address 149.56.141.138 +1938EBACBB1A7BFA888D9623C90061130E63BB3F.or_port 9001 +1938EBACBB1A7BFA888D9623C90061130E63BB3F.dir_port 9030 +1938EBACBB1A7BFA888D9623C90061130E63BB3F.nickname Aerodynamik04 +1938EBACBB1A7BFA888D9623C90061130E63BB3F.has_extrainfo false +1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.address 81.7.14.253 +1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.or_port 443 +1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.dir_port 9001 +1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.nickname Ichotolot60 +1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.has_extrainfo false +1C90D3AEADFF3BCD079810632C8B85637924A58E.address 163.172.53.84 +1C90D3AEADFF3BCD079810632C8B85637924A58E.or_port 21 +1C90D3AEADFF3BCD079810632C8B85637924A58E.dir_port 143 +1C90D3AEADFF3BCD079810632C8B85637924A58E.nickname Multivac +1C90D3AEADFF3BCD079810632C8B85637924A58E.has_extrainfo false +1C90D3AEADFF3BCD079810632C8B85637924A58E.orport6_address 2001:bc8:24f8:: +1C90D3AEADFF3BCD079810632C8B85637924A58E.orport6_port 21 +1DBAED235E3957DE1ABD25B4206BE71406FB61F8.address 46.101.151.222 +1DBAED235E3957DE1ABD25B4206BE71406FB61F8.or_port 443 +1DBAED235E3957DE1ABD25B4206BE71406FB61F8.dir_port 80 +1DBAED235E3957DE1ABD25B4206BE71406FB61F8.nickname flanders +1DBAED235E3957DE1ABD25B4206BE71406FB61F8.has_extrainfo false +1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.address 91.219.237.229 +1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.or_port 443 +1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.dir_port 80 +1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.nickname JakeDidNothingWrong +1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.has_extrainfo false +1F6ABD086F40B890A33C93CC4606EE68B31C9556.address 199.184.246.250 +1F6ABD086F40B890A33C93CC4606EE68B31C9556.or_port 443 +1F6ABD086F40B890A33C93CC4606EE68B31C9556.dir_port 80 +1F6ABD086F40B890A33C93CC4606EE68B31C9556.nickname dao +1F6ABD086F40B890A33C93CC4606EE68B31C9556.has_extrainfo false +1F6ABD086F40B890A33C93CC4606EE68B31C9556.orport6_address 2620:124:1009:1::171 +1F6ABD086F40B890A33C93CC4606EE68B31C9556.orport6_port 443 +1FA8F638298645BE58AC905276680889CB795A94.address 185.129.249.124 +1FA8F638298645BE58AC905276680889CB795A94.or_port 9001 +1FA8F638298645BE58AC905276680889CB795A94.dir_port 9030 +1FA8F638298645BE58AC905276680889CB795A94.nickname treadstone +1FA8F638298645BE58AC905276680889CB795A94.has_extrainfo false +20462CBA5DA4C2D963567D17D0B7249718114A68.address 212.47.229.2 +20462CBA5DA4C2D963567D17D0B7249718114A68.or_port 9001 +20462CBA5DA4C2D963567D17D0B7249718114A68.dir_port 9030 +20462CBA5DA4C2D963567D17D0B7249718114A68.nickname scaletor +20462CBA5DA4C2D963567D17D0B7249718114A68.has_extrainfo false +20462CBA5DA4C2D963567D17D0B7249718114A68.orport6_address 2001:bc8:4400:2100::f03 +20462CBA5DA4C2D963567D17D0B7249718114A68.orport6_port 9001 +204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.address 77.247.181.164 +204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.or_port 443 +204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.dir_port 80 +204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.nickname HaveHeart +204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.has_extrainfo false +230A8B2A8BA861210D9B4BA97745AEC217A94207.address 163.172.176.167 +230A8B2A8BA861210D9B4BA97745AEC217A94207.or_port 443 +230A8B2A8BA861210D9B4BA97745AEC217A94207.dir_port 80 +230A8B2A8BA861210D9B4BA97745AEC217A94207.nickname niij01 +230A8B2A8BA861210D9B4BA97745AEC217A94207.has_extrainfo false +231C2B9C8C31C295C472D031E06964834B745996.address 37.200.98.5 +231C2B9C8C31C295C472D031E06964834B745996.or_port 443 +231C2B9C8C31C295C472D031E06964834B745996.dir_port 80 +231C2B9C8C31C295C472D031E06964834B745996.nickname torpidsDEdomainf +231C2B9C8C31C295C472D031E06964834B745996.has_extrainfo false +231C2B9C8C31C295C472D031E06964834B745996.orport6_address 2a00:1158:3::11a +231C2B9C8C31C295C472D031E06964834B745996.orport6_port 993 +2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.address 138.201.250.33 +2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.or_port 9011 +2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.dir_port 9012 +2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.nickname storm +2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.has_extrainfo false +2CDCFED0142B28B002E89D305CBA2E26063FADE2.address 178.16.208.56 +2CDCFED0142B28B002E89D305CBA2E26063FADE2.or_port 443 +2CDCFED0142B28B002E89D305CBA2E26063FADE2.dir_port 80 +2CDCFED0142B28B002E89D305CBA2E26063FADE2.nickname jaures +2CDCFED0142B28B002E89D305CBA2E26063FADE2.has_extrainfo false +2CDCFED0142B28B002E89D305CBA2E26063FADE2.orport6_address 2a00:1c20:4089:1234:cd49:b58a:9ebe:67ec +2CDCFED0142B28B002E89D305CBA2E26063FADE2.orport6_port 443 +2F0F32AB1E5B943CA7D062C03F18960C86E70D94.address 97.74.237.196 +2F0F32AB1E5B943CA7D062C03F18960C86E70D94.or_port 9001 +2F0F32AB1E5B943CA7D062C03F18960C86E70D94.dir_port 9030 +2F0F32AB1E5B943CA7D062C03F18960C86E70D94.nickname Minotaur +2F0F32AB1E5B943CA7D062C03F18960C86E70D94.has_extrainfo false +30C19B81981F450C402306E2E7CFB6C3F79CB6B2.address 64.113.32.29 +30C19B81981F450C402306E2E7CFB6C3F79CB6B2.or_port 9001 +30C19B81981F450C402306E2E7CFB6C3F79CB6B2.dir_port 9030 +30C19B81981F450C402306E2E7CFB6C3F79CB6B2.nickname Libero +30C19B81981F450C402306E2E7CFB6C3F79CB6B2.has_extrainfo false +328E54981C6DDD7D89B89E418724A4A7881E3192.address 80.127.117.180 +328E54981C6DDD7D89B89E418724A4A7881E3192.or_port 443 +328E54981C6DDD7D89B89E418724A4A7881E3192.dir_port 80 +328E54981C6DDD7D89B89E418724A4A7881E3192.nickname sjc01 +328E54981C6DDD7D89B89E418724A4A7881E3192.has_extrainfo false +328E54981C6DDD7D89B89E418724A4A7881E3192.orport6_address 2001:985:e77:10::4 +328E54981C6DDD7D89B89E418724A4A7881E3192.orport6_port 443 +330CD3DB6AD266DC70CDB512B036957D03D9BC59.address 185.100.84.212 +330CD3DB6AD266DC70CDB512B036957D03D9BC59.or_port 443 +330CD3DB6AD266DC70CDB512B036957D03D9BC59.dir_port 80 +330CD3DB6AD266DC70CDB512B036957D03D9BC59.nickname TeamTardis +330CD3DB6AD266DC70CDB512B036957D03D9BC59.has_extrainfo false +330CD3DB6AD266DC70CDB512B036957D03D9BC59.orport6_address 2a06:1700:0:7::1 +330CD3DB6AD266DC70CDB512B036957D03D9BC59.orport6_port 443 +33DA0CAB7C27812EFF2E22C9705630A54D101FEB.address 163.172.13.165 +33DA0CAB7C27812EFF2E22C9705630A54D101FEB.or_port 9001 +33DA0CAB7C27812EFF2E22C9705630A54D101FEB.dir_port 9030 +33DA0CAB7C27812EFF2E22C9705630A54D101FEB.nickname mullbinde9 +33DA0CAB7C27812EFF2E22C9705630A54D101FEB.has_extrainfo false +33DA0CAB7C27812EFF2E22C9705630A54D101FEB.orport6_address 2001:bc8:38cb:201::8 +33DA0CAB7C27812EFF2E22C9705630A54D101FEB.orport6_port 9001 +3711E80B5B04494C971FB0459D4209AB7F2EA799.address 91.121.23.100 +3711E80B5B04494C971FB0459D4209AB7F2EA799.or_port 9001 +3711E80B5B04494C971FB0459D4209AB7F2EA799.dir_port 9030 +3711E80B5B04494C971FB0459D4209AB7F2EA799.nickname 0x3d002 +3711E80B5B04494C971FB0459D4209AB7F2EA799.has_extrainfo false +379FB450010D17078B3766C2273303C358C3A442.address 176.126.252.12 +379FB450010D17078B3766C2273303C358C3A442.or_port 8080 +379FB450010D17078B3766C2273303C358C3A442.dir_port 21 +379FB450010D17078B3766C2273303C358C3A442.nickname aurora +379FB450010D17078B3766C2273303C358C3A442.has_extrainfo true +379FB450010D17078B3766C2273303C358C3A442.orport6_address 2a02:59e0:0:7::12 +379FB450010D17078B3766C2273303C358C3A442.orport6_port 81 +387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.address 62.210.92.11 +387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.or_port 9101 +387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.dir_port 9130 +387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.nickname redjohn1 +387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.has_extrainfo false +387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.orport6_address 2001:bc8:338c::1 +387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.orport6_port 9101 +39F096961ED2576975C866D450373A9913AFDC92.address 198.50.191.95 +39F096961ED2576975C866D450373A9913AFDC92.or_port 443 +39F096961ED2576975C866D450373A9913AFDC92.dir_port 80 +39F096961ED2576975C866D450373A9913AFDC92.nickname thomas +39F096961ED2576975C866D450373A9913AFDC92.has_extrainfo false +3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.address 164.132.77.175 +3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.or_port 9001 +3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.dir_port 9030 +3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.nickname rofltor1 +3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.has_extrainfo false +3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.address 212.83.154.33 +3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.or_port 443 +3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.dir_port 8888 +3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.nickname bauruine203 +3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.has_extrainfo false +3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.address 176.10.107.180 +3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.or_port 9001 +3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.dir_port 9030 +3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.nickname schokomilch +3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.has_extrainfo false +3E53D3979DB07EFD736661C934A1DED14127B684.address 217.79.179.177 +3E53D3979DB07EFD736661C934A1DED14127B684.or_port 9001 +3E53D3979DB07EFD736661C934A1DED14127B684.dir_port 9030 +3E53D3979DB07EFD736661C934A1DED14127B684.nickname Unnamed +3E53D3979DB07EFD736661C934A1DED14127B684.has_extrainfo false +3E53D3979DB07EFD736661C934A1DED14127B684.orport6_address 2001:4ba0:fff9:131:6c4f::90d3 +3E53D3979DB07EFD736661C934A1DED14127B684.orport6_port 9001 +4061C553CA88021B8302F0814365070AAE617270.address 185.100.85.101 +4061C553CA88021B8302F0814365070AAE617270.or_port 9001 +4061C553CA88021B8302F0814365070AAE617270.dir_port 9030 +4061C553CA88021B8302F0814365070AAE617270.nickname TorExitRomania +4061C553CA88021B8302F0814365070AAE617270.has_extrainfo false +40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.address 199.249.223.61 +40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.or_port 443 +40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.dir_port 80 +40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.nickname Quintex12 +40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.has_extrainfo false +41C59606AFE1D1AA6EC6EF6719690B856F0B6587.address 178.17.170.156 +41C59606AFE1D1AA6EC6EF6719690B856F0B6587.or_port 9001 +41C59606AFE1D1AA6EC6EF6719690B856F0B6587.dir_port 9030 +41C59606AFE1D1AA6EC6EF6719690B856F0B6587.nickname TorExitMoldova2 +41C59606AFE1D1AA6EC6EF6719690B856F0B6587.has_extrainfo false +439D0447772CB107B886F7782DBC201FA26B92D1.address 178.62.86.96 +439D0447772CB107B886F7782DBC201FA26B92D1.or_port 9001 +439D0447772CB107B886F7782DBC201FA26B92D1.dir_port 9030 +439D0447772CB107B886F7782DBC201FA26B92D1.nickname pablobm001 +439D0447772CB107B886F7782DBC201FA26B92D1.has_extrainfo false +439D0447772CB107B886F7782DBC201FA26B92D1.orport6_address 2a03:b0c0:1:d0::3cf:7001 +439D0447772CB107B886F7782DBC201FA26B92D1.orport6_port 9050 +4623A9EC53BFD83155929E56D6F7B55B5E718C24.address 163.172.157.213 +4623A9EC53BFD83155929E56D6F7B55B5E718C24.or_port 443 +4623A9EC53BFD83155929E56D6F7B55B5E718C24.dir_port 8080 +4623A9EC53BFD83155929E56D6F7B55B5E718C24.nickname Cotopaxi +4623A9EC53BFD83155929E56D6F7B55B5E718C24.has_extrainfo false +46791D156C9B6C255C2665D4D8393EC7DBAA7798.address 31.31.78.49 +46791D156C9B6C255C2665D4D8393EC7DBAA7798.or_port 443 +46791D156C9B6C255C2665D4D8393EC7DBAA7798.dir_port 80 +46791D156C9B6C255C2665D4D8393EC7DBAA7798.nickname KrigHaBandolo +46791D156C9B6C255C2665D4D8393EC7DBAA7798.has_extrainfo false +484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.address 193.70.43.76 +484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.or_port 9001 +484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.dir_port 9030 +484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.nickname Aerodynamik03 +484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.has_extrainfo false +489D94333DF66D57FFE34D9D59CC2D97E2CB0053.address 37.187.102.186 +489D94333DF66D57FFE34D9D59CC2D97E2CB0053.or_port 9001 +489D94333DF66D57FFE34D9D59CC2D97E2CB0053.dir_port 9030 +489D94333DF66D57FFE34D9D59CC2D97E2CB0053.nickname txtfileTorNode65536 +489D94333DF66D57FFE34D9D59CC2D97E2CB0053.has_extrainfo false +489D94333DF66D57FFE34D9D59CC2D97E2CB0053.orport6_address 2001:41d0:a:26ba::1 +489D94333DF66D57FFE34D9D59CC2D97E2CB0053.orport6_port 9001 +4CC9CC9195EC38645B699A33307058624F660CCF.address 51.254.101.242 +4CC9CC9195EC38645B699A33307058624F660CCF.or_port 9001 +4CC9CC9195EC38645B699A33307058624F660CCF.dir_port 9002 +4CC9CC9195EC38645B699A33307058624F660CCF.nickname devsum +4CC9CC9195EC38645B699A33307058624F660CCF.has_extrainfo false +4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.address 108.53.208.157 +4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.or_port 443 +4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.dir_port 80 +4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.nickname Binnacle +4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.has_extrainfo true +50586E25BE067FD1F739998550EDDCB1A14CA5B2.address 212.51.134.123 +50586E25BE067FD1F739998550EDDCB1A14CA5B2.or_port 9001 +50586E25BE067FD1F739998550EDDCB1A14CA5B2.dir_port 9030 +50586E25BE067FD1F739998550EDDCB1A14CA5B2.nickname Jans +50586E25BE067FD1F739998550EDDCB1A14CA5B2.has_extrainfo false +51E1CF613FD6F9F11FE24743C91D6F9981807D82.address 81.7.16.182 +51E1CF613FD6F9F11FE24743C91D6F9981807D82.or_port 443 +51E1CF613FD6F9F11FE24743C91D6F9981807D82.dir_port 80 +51E1CF613FD6F9F11FE24743C91D6F9981807D82.nickname torpidsDEisppro3 +51E1CF613FD6F9F11FE24743C91D6F9981807D82.has_extrainfo false +51E1CF613FD6F9F11FE24743C91D6F9981807D82.orport6_address 2a02:180:1:1::517:10b6 +51E1CF613FD6F9F11FE24743C91D6F9981807D82.orport6_port 993 +52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.address 85.25.159.65 +52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.or_port 80 +52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.dir_port 995 +52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.nickname BeastieJoy63 +52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.has_extrainfo false +587E0A9552E4274B251F29B5B2673D38442EE4BF.address 95.130.12.119 +587E0A9552E4274B251F29B5B2673D38442EE4BF.or_port 443 +587E0A9552E4274B251F29B5B2673D38442EE4BF.dir_port 80 +587E0A9552E4274B251F29B5B2673D38442EE4BF.nickname Nuath +587E0A9552E4274B251F29B5B2673D38442EE4BF.has_extrainfo false +58ED9C9C35E433EE58764D62892B4FFD518A3CD0.address 185.21.100.50 +58ED9C9C35E433EE58764D62892B4FFD518A3CD0.or_port 9001 +58ED9C9C35E433EE58764D62892B4FFD518A3CD0.dir_port 9030 +58ED9C9C35E433EE58764D62892B4FFD518A3CD0.nickname SamAAdams2 +58ED9C9C35E433EE58764D62892B4FFD518A3CD0.has_extrainfo false +58ED9C9C35E433EE58764D62892B4FFD518A3CD0.orport6_address 2a00:1158:2:cd00:0:74:6f:72 +58ED9C9C35E433EE58764D62892B4FFD518A3CD0.orport6_port 443 +5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.address 172.98.193.43 +5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.or_port 443 +5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.dir_port 80 +5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.nickname Backplane +5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.has_extrainfo false +5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.address 199.249.223.74 +5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.or_port 443 +5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.dir_port 80 +5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.nickname QuintexAirVPN7 +5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.has_extrainfo false +616081EC829593AF4232550DE6FFAA1D75B37A90.address 95.128.43.164 +616081EC829593AF4232550DE6FFAA1D75B37A90.or_port 443 +616081EC829593AF4232550DE6FFAA1D75B37A90.dir_port 80 +616081EC829593AF4232550DE6FFAA1D75B37A90.nickname AquaRayTerminus +616081EC829593AF4232550DE6FFAA1D75B37A90.has_extrainfo false +616081EC829593AF4232550DE6FFAA1D75B37A90.orport6_address 2a02:ec0:209:10::4 +616081EC829593AF4232550DE6FFAA1D75B37A90.orport6_port 443 +68F175CCABE727AA2D2309BCD8789499CEE36ED7.address 163.172.139.104 +68F175CCABE727AA2D2309BCD8789499CEE36ED7.or_port 443 +68F175CCABE727AA2D2309BCD8789499CEE36ED7.dir_port 8080 +68F175CCABE727AA2D2309BCD8789499CEE36ED7.nickname Pichincha +68F175CCABE727AA2D2309BCD8789499CEE36ED7.has_extrainfo false +6A7551EEE18F78A9813096E82BF84F740D32B911.address 85.214.62.48 +6A7551EEE18F78A9813096E82BF84F740D32B911.or_port 443 +6A7551EEE18F78A9813096E82BF84F740D32B911.dir_port 80 +6A7551EEE18F78A9813096E82BF84F740D32B911.nickname TorMachine +6A7551EEE18F78A9813096E82BF84F740D32B911.has_extrainfo false +6EF897645B79B6CB35E853B32506375014DE3621.address 80.127.137.19 +6EF897645B79B6CB35E853B32506375014DE3621.or_port 443 +6EF897645B79B6CB35E853B32506375014DE3621.dir_port 80 +6EF897645B79B6CB35E853B32506375014DE3621.nickname d6relay +6EF897645B79B6CB35E853B32506375014DE3621.has_extrainfo false +6EF897645B79B6CB35E853B32506375014DE3621.orport6_address 2001:981:47c1:1::6 +6EF897645B79B6CB35E853B32506375014DE3621.orport6_port 443 +72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.address 85.235.250.88 +72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.or_port 443 +72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.dir_port 80 +72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.nickname TykRelay01 +72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.has_extrainfo false +7600680249A22080ECC6173FBBF64D6FCF330A61.address 81.7.14.31 +7600680249A22080ECC6173FBBF64D6FCF330A61.or_port 443 +7600680249A22080ECC6173FBBF64D6FCF330A61.dir_port 9001 +7600680249A22080ECC6173FBBF64D6FCF330A61.nickname Ichotolot62 +7600680249A22080ECC6173FBBF64D6FCF330A61.has_extrainfo false +763C9556602BD6207771A7A3D958091D44C43228.address 134.119.36.135 +763C9556602BD6207771A7A3D958091D44C43228.or_port 443 +763C9556602BD6207771A7A3D958091D44C43228.dir_port 80 +763C9556602BD6207771A7A3D958091D44C43228.nickname torpidsDEdomainf2 +763C9556602BD6207771A7A3D958091D44C43228.has_extrainfo false +763C9556602BD6207771A7A3D958091D44C43228.orport6_address 2a00:1158:3::2a8 +763C9556602BD6207771A7A3D958091D44C43228.orport6_port 993 +774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.address 188.166.133.133 +774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.or_port 9001 +774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.dir_port 9030 +774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.nickname dropsy +774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.has_extrainfo false +774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.orport6_address 2a03:b0c0:2:d0::26c0:1 +774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.orport6_port 9001 +775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.address 5.196.23.64 +775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.or_port 9001 +775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.dir_port 9030 +775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.nickname Aerodynamik01 +775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.has_extrainfo false +789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.address 81.30.158.213 +789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.or_port 9001 +789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.dir_port 9030 +789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.nickname dumpster +789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.has_extrainfo false +789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.orport6_address 2001:4ba0:cafe:e84::1 +789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.orport6_port 9001 +78E2BE744A53631B4AAB781468E94C52AB73968B.address 104.200.20.46 +78E2BE744A53631B4AAB781468E94C52AB73968B.or_port 9001 +78E2BE744A53631B4AAB781468E94C52AB73968B.dir_port 80 +78E2BE744A53631B4AAB781468E94C52AB73968B.nickname bynumlawtor +78E2BE744A53631B4AAB781468E94C52AB73968B.has_extrainfo false +79E169B25E4C7CE99584F6ED06F379478F23E2B8.address 62.210.129.246 +79E169B25E4C7CE99584F6ED06F379478F23E2B8.or_port 443 +79E169B25E4C7CE99584F6ED06F379478F23E2B8.dir_port 80 +79E169B25E4C7CE99584F6ED06F379478F23E2B8.nickname MilesPrower +79E169B25E4C7CE99584F6ED06F379478F23E2B8.has_extrainfo false +7A32C9519D80CA458FC8B034A28F5F6815649A98.address 82.223.21.74 +7A32C9519D80CA458FC8B034A28F5F6815649A98.or_port 9001 +7A32C9519D80CA458FC8B034A28F5F6815649A98.dir_port 9030 +7A32C9519D80CA458FC8B034A28F5F6815649A98.nickname silentrocket +7A32C9519D80CA458FC8B034A28F5F6815649A98.has_extrainfo false +7A32C9519D80CA458FC8B034A28F5F6815649A98.orport6_address 2001:470:53e0::cafe +7A32C9519D80CA458FC8B034A28F5F6815649A98.orport6_port 9050 +7BB70F8585DFC27E75D692970C0EEB0F22983A63.address 51.254.136.195 +7BB70F8585DFC27E75D692970C0EEB0F22983A63.or_port 443 +7BB70F8585DFC27E75D692970C0EEB0F22983A63.dir_port 80 +7BB70F8585DFC27E75D692970C0EEB0F22983A63.nickname torproxy02 +7BB70F8585DFC27E75D692970C0EEB0F22983A63.has_extrainfo false +7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.address 77.247.181.162 +7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.or_port 443 +7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.dir_port 80 +7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.nickname sofia +7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.has_extrainfo false +7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.address 185.100.84.82 +7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.or_port 443 +7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.dir_port 80 +7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.nickname saveyourprivacyexit +7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.has_extrainfo false +7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.address 199.249.223.69 +7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.or_port 443 +7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.dir_port 80 +7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.nickname Quintex20 +7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.has_extrainfo false +80AAF8D5956A43C197104CEF2550CD42D165C6FB.address 193.11.114.45 +80AAF8D5956A43C197104CEF2550CD42D165C6FB.or_port 9002 +80AAF8D5956A43C197104CEF2550CD42D165C6FB.dir_port 9031 +80AAF8D5956A43C197104CEF2550CD42D165C6FB.nickname mdfnet2 +80AAF8D5956A43C197104CEF2550CD42D165C6FB.has_extrainfo false +8456DFA94161CDD99E480C2A2992C366C6564410.address 62.210.254.132 +8456DFA94161CDD99E480C2A2992C366C6564410.or_port 443 +8456DFA94161CDD99E480C2A2992C366C6564410.dir_port 80 +8456DFA94161CDD99E480C2A2992C366C6564410.nickname turingmachine +8456DFA94161CDD99E480C2A2992C366C6564410.has_extrainfo false +855BC2DABE24C861CD887DB9B2E950424B49FC34.address 85.230.184.93 +855BC2DABE24C861CD887DB9B2E950424B49FC34.or_port 443 +855BC2DABE24C861CD887DB9B2E950424B49FC34.dir_port 9030 +855BC2DABE24C861CD887DB9B2E950424B49FC34.nickname Logforme +855BC2DABE24C861CD887DB9B2E950424B49FC34.has_extrainfo false +8567AD0A6369ED08527A8A8533A5162AC00F7678.address 72.52.75.27 +8567AD0A6369ED08527A8A8533A5162AC00F7678.or_port 9001 +8567AD0A6369ED08527A8A8533A5162AC00F7678.dir_port 9030 +8567AD0A6369ED08527A8A8533A5162AC00F7678.nickname piecoopdotnet +8567AD0A6369ED08527A8A8533A5162AC00F7678.has_extrainfo false +86C281AD135058238D7A337D546C902BE8505DDE.address 185.96.88.29 +86C281AD135058238D7A337D546C902BE8505DDE.or_port 443 +86C281AD135058238D7A337D546C902BE8505DDE.dir_port 80 +86C281AD135058238D7A337D546C902BE8505DDE.nickname TykRelay05 +86C281AD135058238D7A337D546C902BE8505DDE.has_extrainfo false +88487BDD980BF6E72092EE690E8C51C0AA4A538C.address 176.10.104.243 +88487BDD980BF6E72092EE690E8C51C0AA4A538C.or_port 443 +88487BDD980BF6E72092EE690E8C51C0AA4A538C.dir_port 80 +88487BDD980BF6E72092EE690E8C51C0AA4A538C.nickname DigiGesTor2e1 +88487BDD980BF6E72092EE690E8C51C0AA4A538C.has_extrainfo false +8C00FA7369A7A308F6A137600F0FA07990D9D451.address 163.172.194.53 +8C00FA7369A7A308F6A137600F0FA07990D9D451.or_port 9001 +8C00FA7369A7A308F6A137600F0FA07990D9D451.dir_port 9030 +8C00FA7369A7A308F6A137600F0FA07990D9D451.nickname GrmmlLitavis +8C00FA7369A7A308F6A137600F0FA07990D9D451.has_extrainfo false +8C00FA7369A7A308F6A137600F0FA07990D9D451.orport6_address 2001:bc8:225f:142:6c69:7461:7669:73 +8C00FA7369A7A308F6A137600F0FA07990D9D451.orport6_port 9001 +8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.address 5.189.169.190 +8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.or_port 8080 +8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.dir_port 8030 +8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.nickname thanatosDE +8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.has_extrainfo false +9007C1D8E4F03D506A4A011B907A9E8D04E3C605.address 151.80.42.103 +9007C1D8E4F03D506A4A011B907A9E8D04E3C605.or_port 9001 +9007C1D8E4F03D506A4A011B907A9E8D04E3C605.dir_port 9030 +9007C1D8E4F03D506A4A011B907A9E8D04E3C605.nickname matlink +9007C1D8E4F03D506A4A011B907A9E8D04E3C605.has_extrainfo false +9007C1D8E4F03D506A4A011B907A9E8D04E3C605.orport6_address 2001:41d0:e:f67::114 +9007C1D8E4F03D506A4A011B907A9E8D04E3C605.orport6_port 9001 +91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.address 37.187.20.59 +91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.or_port 443 +91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.dir_port 80 +91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.nickname torpidsFRovh +91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.has_extrainfo false +91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.orport6_address 2001:41d0:a:143b::1 +91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.orport6_port 993 +9285B22F7953D7874604EEE2B470609AD81C74E9.address 62.138.7.171 +9285B22F7953D7874604EEE2B470609AD81C74E9.or_port 8001 +9285B22F7953D7874604EEE2B470609AD81C74E9.dir_port 8030 +9285B22F7953D7874604EEE2B470609AD81C74E9.nickname 0x3d005 +9285B22F7953D7874604EEE2B470609AD81C74E9.has_extrainfo false +92CFD9565B24646CAC2D172D3DB503D69E777B8A.address 178.16.208.57 +92CFD9565B24646CAC2D172D3DB503D69E777B8A.or_port 443 +92CFD9565B24646CAC2D172D3DB503D69E777B8A.dir_port 80 +92CFD9565B24646CAC2D172D3DB503D69E777B8A.nickname bakunin +92CFD9565B24646CAC2D172D3DB503D69E777B8A.has_extrainfo false +92CFD9565B24646CAC2D172D3DB503D69E777B8A.orport6_address 2a00:1c20:4089:1234:7825:2c5d:1ecd:c66f +92CFD9565B24646CAC2D172D3DB503D69E777B8A.orport6_port 443 +92ECC9E0E2AF81BB954719B189AC362E254AD4A5.address 91.219.237.244 +92ECC9E0E2AF81BB954719B189AC362E254AD4A5.or_port 443 +92ECC9E0E2AF81BB954719B189AC362E254AD4A5.dir_port 80 +92ECC9E0E2AF81BB954719B189AC362E254AD4A5.nickname lewwerDuarUesSlaav +92ECC9E0E2AF81BB954719B189AC362E254AD4A5.has_extrainfo false +9772EFB535397C942C3AB8804FB35CFFAD012438.address 37.153.1.10 +9772EFB535397C942C3AB8804FB35CFFAD012438.or_port 9001 +9772EFB535397C942C3AB8804FB35CFFAD012438.dir_port 9030 +9772EFB535397C942C3AB8804FB35CFFAD012438.nickname smallsweatnode +9772EFB535397C942C3AB8804FB35CFFAD012438.has_extrainfo false +998BF3ED7F70E33D1C307247B9626D9E7573C438.address 163.172.223.200 +998BF3ED7F70E33D1C307247B9626D9E7573C438.or_port 443 +998BF3ED7F70E33D1C307247B9626D9E7573C438.dir_port 80 +998BF3ED7F70E33D1C307247B9626D9E7573C438.nickname Outfall2 +998BF3ED7F70E33D1C307247B9626D9E7573C438.has_extrainfo false +9A0D54D3A6D2E0767596BF1515E6162A75B3293F.address 91.229.20.27 +9A0D54D3A6D2E0767596BF1515E6162A75B3293F.or_port 9001 +9A0D54D3A6D2E0767596BF1515E6162A75B3293F.dir_port 9030 +9A0D54D3A6D2E0767596BF1515E6162A75B3293F.nickname gordonkeybag +9A0D54D3A6D2E0767596BF1515E6162A75B3293F.has_extrainfo false +9A68B85A02318F4E7E87F2828039FBD5D75B0142.address 66.111.2.20 +9A68B85A02318F4E7E87F2828039FBD5D75B0142.or_port 9001 +9A68B85A02318F4E7E87F2828039FBD5D75B0142.dir_port 9030 +9A68B85A02318F4E7E87F2828039FBD5D75B0142.nickname NYCBUG0 +9A68B85A02318F4E7E87F2828039FBD5D75B0142.has_extrainfo false +9B31F1F1C1554F9FFB3455911F82E818EF7C7883.address 185.100.86.128 +9B31F1F1C1554F9FFB3455911F82E818EF7C7883.or_port 9001 +9B31F1F1C1554F9FFB3455911F82E818EF7C7883.dir_port 9030 +9B31F1F1C1554F9FFB3455911F82E818EF7C7883.nickname TorExitFinland +9B31F1F1C1554F9FFB3455911F82E818EF7C7883.has_extrainfo false +9EC5E097663862DF861A18C32B37C5F82284B27D.address 146.185.177.103 +9EC5E097663862DF861A18C32B37C5F82284B27D.or_port 9030 +9EC5E097663862DF861A18C32B37C5F82284B27D.dir_port 80 +9EC5E097663862DF861A18C32B37C5F82284B27D.nickname Winter +9EC5E097663862DF861A18C32B37C5F82284B27D.has_extrainfo false +9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.address 199.249.223.64 +9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.or_port 443 +9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.dir_port 80 +9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.nickname Quintex15 +9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.has_extrainfo false +9F7D6E6420183C2B76D3CE99624EBC98A21A967E.address 46.28.110.244 +9F7D6E6420183C2B76D3CE99624EBC98A21A967E.or_port 443 +9F7D6E6420183C2B76D3CE99624EBC98A21A967E.dir_port 80 +9F7D6E6420183C2B76D3CE99624EBC98A21A967E.nickname Nivrim +9F7D6E6420183C2B76D3CE99624EBC98A21A967E.has_extrainfo false +9FBEB75E8BC142565F12CBBE078D63310236A334.address 91.121.84.137 +9FBEB75E8BC142565F12CBBE078D63310236A334.or_port 4052 +9FBEB75E8BC142565F12CBBE078D63310236A334.dir_port 4952 +9FBEB75E8BC142565F12CBBE078D63310236A334.nickname lindon +9FBEB75E8BC142565F12CBBE078D63310236A334.has_extrainfo false +A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.address 46.165.230.5 +A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.or_port 443 +A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.dir_port 80 +A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.nickname Dhalgren +A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.has_extrainfo true +A10C4F666D27364036B562823E5830BC448E046A.address 171.25.193.77 +A10C4F666D27364036B562823E5830BC448E046A.or_port 443 +A10C4F666D27364036B562823E5830BC448E046A.dir_port 80 +A10C4F666D27364036B562823E5830BC448E046A.nickname DFRI1 +A10C4F666D27364036B562823E5830BC448E046A.has_extrainfo false +A10C4F666D27364036B562823E5830BC448E046A.orport6_address 2001:67c:289c:3::77 +A10C4F666D27364036B562823E5830BC448E046A.orport6_port 443 +A2E6BB5C391CD46B38C55B4329C35304540771F1.address 81.7.3.67 +A2E6BB5C391CD46B38C55B4329C35304540771F1.or_port 443 +A2E6BB5C391CD46B38C55B4329C35304540771F1.dir_port 993 +A2E6BB5C391CD46B38C55B4329C35304540771F1.nickname BeastieJoy62 +A2E6BB5C391CD46B38C55B4329C35304540771F1.has_extrainfo false +A478E421F83194C114F41E94F95999672AED51FE.address 171.25.193.78 +A478E421F83194C114F41E94F95999672AED51FE.or_port 443 +A478E421F83194C114F41E94F95999672AED51FE.dir_port 80 +A478E421F83194C114F41E94F95999672AED51FE.nickname DFRI4 +A478E421F83194C114F41E94F95999672AED51FE.has_extrainfo false +A478E421F83194C114F41E94F95999672AED51FE.orport6_address 2001:67c:289c:3::78 +A478E421F83194C114F41E94F95999672AED51FE.orport6_port 443 +A4C98CEA3F34E05299417E9F885A642C88EF6029.address 178.16.208.58 +A4C98CEA3F34E05299417E9F885A642C88EF6029.or_port 443 +A4C98CEA3F34E05299417E9F885A642C88EF6029.dir_port 80 +A4C98CEA3F34E05299417E9F885A642C88EF6029.nickname jaures2 +A4C98CEA3F34E05299417E9F885A642C88EF6029.has_extrainfo false +A4C98CEA3F34E05299417E9F885A642C88EF6029.orport6_address 2a00:1c20:4089:1234:cdae:1b3e:cc38:3d45 +A4C98CEA3F34E05299417E9F885A642C88EF6029.orport6_port 443 +A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.address 163.172.149.122 +A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.or_port 443 +A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.dir_port 80 +A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.nickname niij03 +A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.has_extrainfo false +AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.address 195.154.164.243 +AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.or_port 443 +AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.dir_port 80 +AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.nickname torpidsFRonline3 +AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.has_extrainfo false +ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.address 86.59.119.88 +ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.or_port 443 +ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.dir_port 80 +ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.nickname ph3x +ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.has_extrainfo false +ACDD9E85A05B127BA010466C13C8C47212E8A38F.address 185.129.62.62 +ACDD9E85A05B127BA010466C13C8C47212E8A38F.or_port 9001 +ACDD9E85A05B127BA010466C13C8C47212E8A38F.dir_port 9030 +ACDD9E85A05B127BA010466C13C8C47212E8A38F.nickname kramse +ACDD9E85A05B127BA010466C13C8C47212E8A38F.has_extrainfo false +ACDD9E85A05B127BA010466C13C8C47212E8A38F.orport6_address 2a06:d380:0:3700::62 +ACDD9E85A05B127BA010466C13C8C47212E8A38F.orport6_port 9001 +AD19490C7DBB26D3A68EFC824F67E69B0A96E601.address 188.40.128.246 +AD19490C7DBB26D3A68EFC824F67E69B0A96E601.or_port 9001 +AD19490C7DBB26D3A68EFC824F67E69B0A96E601.dir_port 9030 +AD19490C7DBB26D3A68EFC824F67E69B0A96E601.nickname sputnik +AD19490C7DBB26D3A68EFC824F67E69B0A96E601.has_extrainfo false +AD19490C7DBB26D3A68EFC824F67E69B0A96E601.orport6_address 2a01:4f8:221:1ac1:dead:beef:7005:9001 +AD19490C7DBB26D3A68EFC824F67E69B0A96E601.orport6_port 9001 +B0279A521375F3CB2AE210BDBFC645FDD2E1973A.address 176.126.252.11 +B0279A521375F3CB2AE210BDBFC645FDD2E1973A.or_port 9001 +B0279A521375F3CB2AE210BDBFC645FDD2E1973A.dir_port 443 +B0279A521375F3CB2AE210BDBFC645FDD2E1973A.nickname chulak +B0279A521375F3CB2AE210BDBFC645FDD2E1973A.has_extrainfo true +B0279A521375F3CB2AE210BDBFC645FDD2E1973A.orport6_address 2a02:59e0:0:7::11 +B0279A521375F3CB2AE210BDBFC645FDD2E1973A.orport6_port 9003 +B0553175AADB0501E5A61FC61CEA3970BE130FF2.address 5.9.147.226 +B0553175AADB0501E5A61FC61CEA3970BE130FF2.or_port 9001 +B0553175AADB0501E5A61FC61CEA3970BE130FF2.dir_port 9030 +B0553175AADB0501E5A61FC61CEA3970BE130FF2.nickname zwiubel +B0553175AADB0501E5A61FC61CEA3970BE130FF2.has_extrainfo false +B0553175AADB0501E5A61FC61CEA3970BE130FF2.orport6_address 2a01:4f8:190:30e1::2 +B0553175AADB0501E5A61FC61CEA3970BE130FF2.orport6_port 9001 +B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.address 178.17.174.14 +B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.or_port 9001 +B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.dir_port 9030 +B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.nickname TorExitMoldova +B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.has_extrainfo false +B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.address 199.249.223.40 +B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.or_port 443 +B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.dir_port 80 +B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.nickname Quintex31 +B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.has_extrainfo false +B143D439B72D239A419F8DCE07B8A8EB1B486FA7.address 212.129.62.232 +B143D439B72D239A419F8DCE07B8A8EB1B486FA7.or_port 443 +B143D439B72D239A419F8DCE07B8A8EB1B486FA7.dir_port 80 +B143D439B72D239A419F8DCE07B8A8EB1B486FA7.nickname wardsback +B143D439B72D239A419F8DCE07B8A8EB1B486FA7.has_extrainfo false +B291D30517D23299AD7CEE3E60DFE60D0E3A4664.address 136.243.214.137 +B291D30517D23299AD7CEE3E60DFE60D0E3A4664.or_port 443 +B291D30517D23299AD7CEE3E60DFE60D0E3A4664.dir_port 80 +B291D30517D23299AD7CEE3E60DFE60D0E3A4664.nickname TorKIT +B291D30517D23299AD7CEE3E60DFE60D0E3A4664.has_extrainfo false +B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.address 212.47.233.86 +B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.or_port 9001 +B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.dir_port 9030 +B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.nickname netimanmu +B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.has_extrainfo false +B5212DB685A2A0FCFBAE425738E478D12361710D.address 93.115.97.242 +B5212DB685A2A0FCFBAE425738E478D12361710D.or_port 9001 +B5212DB685A2A0FCFBAE425738E478D12361710D.dir_port 9030 +B5212DB685A2A0FCFBAE425738E478D12361710D.nickname firstor +B5212DB685A2A0FCFBAE425738E478D12361710D.has_extrainfo false +B6904ADD4C0D10CDA7179E051962350A69A63243.address 81.2.209.10 +B6904ADD4C0D10CDA7179E051962350A69A63243.or_port 80 +B6904ADD4C0D10CDA7179E051962350A69A63243.dir_port 443 +B6904ADD4C0D10CDA7179E051962350A69A63243.nickname torzabehlice +B6904ADD4C0D10CDA7179E051962350A69A63243.has_extrainfo false +B6904ADD4C0D10CDA7179E051962350A69A63243.orport6_address 2001:15e8:201:1::d10a +B6904ADD4C0D10CDA7179E051962350A69A63243.orport6_port 80 +B83DC1558F0D34353BB992EF93AFEAFDB226A73E.address 193.11.114.46 +B83DC1558F0D34353BB992EF93AFEAFDB226A73E.or_port 9003 +B83DC1558F0D34353BB992EF93AFEAFDB226A73E.dir_port 9032 +B83DC1558F0D34353BB992EF93AFEAFDB226A73E.nickname mdfnet3 +B83DC1558F0D34353BB992EF93AFEAFDB226A73E.has_extrainfo false +B86137AE9681701901C6720E55C16805B46BD8E3.address 81.7.11.186 +B86137AE9681701901C6720E55C16805B46BD8E3.or_port 443 +B86137AE9681701901C6720E55C16805B46BD8E3.dir_port 1080 +B86137AE9681701901C6720E55C16805B46BD8E3.nickname BeastieJoy60 +B86137AE9681701901C6720E55C16805B46BD8E3.has_extrainfo false +BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.address 197.231.221.211 +BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.or_port 443 +BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.dir_port 9030 +BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.nickname IPredator +BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.has_extrainfo false +BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.address 198.96.155.3 +BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.or_port 5001 +BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.dir_port 8080 +BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.nickname gurgle +BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.has_extrainfo false +BCEF908195805E03E92CCFE669C48738E556B9C5.address 128.199.55.207 +BCEF908195805E03E92CCFE669C48738E556B9C5.or_port 9001 +BCEF908195805E03E92CCFE669C48738E556B9C5.dir_port 9030 +BCEF908195805E03E92CCFE669C48738E556B9C5.nickname EldritchReaper +BCEF908195805E03E92CCFE669C48738E556B9C5.has_extrainfo false +BCEF908195805E03E92CCFE669C48738E556B9C5.orport6_address 2a03:b0c0:2:d0::158:3001 +BCEF908195805E03E92CCFE669C48738E556B9C5.orport6_port 9001 +BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.address 213.141.138.174 +BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.or_port 9001 +BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.dir_port 9030 +BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.nickname Schakalium +BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.has_extrainfo false +BF735F669481EE1CCC348F0731551C933D1E2278.address 104.192.5.248 +BF735F669481EE1CCC348F0731551C933D1E2278.or_port 9001 +BF735F669481EE1CCC348F0731551C933D1E2278.dir_port 9030 +BF735F669481EE1CCC348F0731551C933D1E2278.nickname Freeway11 +BF735F669481EE1CCC348F0731551C933D1E2278.has_extrainfo false +C2AAB088555850FC434E68943F551072042B85F1.address 31.185.104.21 +C2AAB088555850FC434E68943F551072042B85F1.or_port 443 +C2AAB088555850FC434E68943F551072042B85F1.dir_port 80 +C2AAB088555850FC434E68943F551072042B85F1.nickname Digitalcourage3ip3 +C2AAB088555850FC434E68943F551072042B85F1.has_extrainfo false +C37BC191AC389179674578C3E6944E925FE186C2.address 213.239.217.18 +C37BC191AC389179674578C3E6944E925FE186C2.or_port 1337 +C37BC191AC389179674578C3E6944E925FE186C2.dir_port 1338 +C37BC191AC389179674578C3E6944E925FE186C2.nickname xzdsb +C37BC191AC389179674578C3E6944E925FE186C2.has_extrainfo false +C37BC191AC389179674578C3E6944E925FE186C2.orport6_address 2a01:4f8:a0:746a:101:1:1:1 +C37BC191AC389179674578C3E6944E925FE186C2.orport6_port 1337 +C414F28FD2BEC1553024299B31D4E726BEB8E788.address 188.138.112.60 +C414F28FD2BEC1553024299B31D4E726BEB8E788.or_port 1521 +C414F28FD2BEC1553024299B31D4E726BEB8E788.dir_port 1433 +C414F28FD2BEC1553024299B31D4E726BEB8E788.nickname zebra620 +C414F28FD2BEC1553024299B31D4E726BEB8E788.has_extrainfo false +C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.address 199.249.223.66 +C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.or_port 443 +C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.dir_port 80 +C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.nickname Quintex17 +C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.has_extrainfo false +CE47F0356D86CF0A1A2008D97623216D560FB0A8.address 85.25.213.211 +CE47F0356D86CF0A1A2008D97623216D560FB0A8.or_port 80 +CE47F0356D86CF0A1A2008D97623216D560FB0A8.dir_port 465 +CE47F0356D86CF0A1A2008D97623216D560FB0A8.nickname BeastieJoy61 +CE47F0356D86CF0A1A2008D97623216D560FB0A8.has_extrainfo false +CED527EAC230E7B56E5B363F839671829C3BA01B.address 51.15.13.245 +CED527EAC230E7B56E5B363F839671829C3BA01B.or_port 9001 +CED527EAC230E7B56E5B363F839671829C3BA01B.dir_port 9030 +CED527EAC230E7B56E5B363F839671829C3BA01B.nickname 0x3d006 +CED527EAC230E7B56E5B363F839671829C3BA01B.has_extrainfo false +D30E9D4D639068611D6D96861C95C2099140B805.address 46.38.237.221 +D30E9D4D639068611D6D96861C95C2099140B805.or_port 9001 +D30E9D4D639068611D6D96861C95C2099140B805.dir_port 9030 +D30E9D4D639068611D6D96861C95C2099140B805.nickname mine +D30E9D4D639068611D6D96861C95C2099140B805.has_extrainfo false +D3E5EDDBE5159388704D6785BE51930AAFACEC6F.address 31.171.155.108 +D3E5EDDBE5159388704D6785BE51930AAFACEC6F.or_port 9001 +D3E5EDDBE5159388704D6785BE51930AAFACEC6F.dir_port 9030 +D3E5EDDBE5159388704D6785BE51930AAFACEC6F.nickname TorNodeAlbania +D3E5EDDBE5159388704D6785BE51930AAFACEC6F.has_extrainfo false +D64366987CB39F61AD21DBCF8142FA0577B92811.address 37.221.162.226 +D64366987CB39F61AD21DBCF8142FA0577B92811.or_port 9001 +D64366987CB39F61AD21DBCF8142FA0577B92811.dir_port 9030 +D64366987CB39F61AD21DBCF8142FA0577B92811.nickname kasperskytor01 +D64366987CB39F61AD21DBCF8142FA0577B92811.has_extrainfo false +D760C5B436E42F93D77EF2D969157EEA14F9B39C.address 46.101.169.151 +D760C5B436E42F93D77EF2D969157EEA14F9B39C.or_port 9001 +D760C5B436E42F93D77EF2D969157EEA14F9B39C.dir_port 9030 +D760C5B436E42F93D77EF2D969157EEA14F9B39C.nickname DanWin1210 +D760C5B436E42F93D77EF2D969157EEA14F9B39C.has_extrainfo false +D760C5B436E42F93D77EF2D969157EEA14F9B39C.orport6_address 2a03:b0c0:3:d0::74f:a001 +D760C5B436E42F93D77EF2D969157EEA14F9B39C.orport6_port 9001 +D8B7A3A6542AA54D0946B9DC0257C53B6C376679.address 85.10.201.47 +D8B7A3A6542AA54D0946B9DC0257C53B6C376679.or_port 9001 +D8B7A3A6542AA54D0946B9DC0257C53B6C376679.dir_port 9030 +D8B7A3A6542AA54D0946B9DC0257C53B6C376679.nickname sif +D8B7A3A6542AA54D0946B9DC0257C53B6C376679.has_extrainfo false +D8B7A3A6542AA54D0946B9DC0257C53B6C376679.orport6_address 2a01:4f8:a0:43eb::beef +D8B7A3A6542AA54D0946B9DC0257C53B6C376679.orport6_port 9001 +DAA39FC00B196B353C2A271459C305C429AF09E4.address 193.35.52.53 +DAA39FC00B196B353C2A271459C305C429AF09E4.or_port 9001 +DAA39FC00B196B353C2A271459C305C429AF09E4.dir_port 9030 +DAA39FC00B196B353C2A271459C305C429AF09E4.nickname Arne +DAA39FC00B196B353C2A271459C305C429AF09E4.has_extrainfo false +DD823AFB415380A802DCAEB9461AE637604107FB.address 178.33.183.251 +DD823AFB415380A802DCAEB9461AE637604107FB.or_port 443 +DD823AFB415380A802DCAEB9461AE637604107FB.dir_port 80 +DD823AFB415380A802DCAEB9461AE637604107FB.nickname grenouille +DD823AFB415380A802DCAEB9461AE637604107FB.has_extrainfo false +DD823AFB415380A802DCAEB9461AE637604107FB.orport6_address 2001:41d0:2:a683::251 +DD823AFB415380A802DCAEB9461AE637604107FB.orport6_port 443 +DD8BD7307017407FCC36F8D04A688F74A0774C02.address 171.25.193.20 +DD8BD7307017407FCC36F8D04A688F74A0774C02.or_port 443 +DD8BD7307017407FCC36F8D04A688F74A0774C02.dir_port 80 +DD8BD7307017407FCC36F8D04A688F74A0774C02.nickname DFRI0 +DD8BD7307017407FCC36F8D04A688F74A0774C02.has_extrainfo false +DD8BD7307017407FCC36F8D04A688F74A0774C02.orport6_address 2001:67c:289c::20 +DD8BD7307017407FCC36F8D04A688F74A0774C02.orport6_port 443 +DED6892FF89DBD737BA689698A171B2392EB3E82.address 92.222.38.67 +DED6892FF89DBD737BA689698A171B2392EB3E82.or_port 443 +DED6892FF89DBD737BA689698A171B2392EB3E82.dir_port 80 +DED6892FF89DBD737BA689698A171B2392EB3E82.nickname ThorExit +DED6892FF89DBD737BA689698A171B2392EB3E82.has_extrainfo false +E3DB2E354B883B59E8DC56B3E7A353DDFD457812.address 166.70.207.2 +E3DB2E354B883B59E8DC56B3E7A353DDFD457812.or_port 9001 +E3DB2E354B883B59E8DC56B3E7A353DDFD457812.dir_port 9030 +E3DB2E354B883B59E8DC56B3E7A353DDFD457812.nickname xmission +E3DB2E354B883B59E8DC56B3E7A353DDFD457812.has_extrainfo false +E480D577F58E782A5BC4FA6F49A6650E9389302F.address 199.249.223.43 +E480D577F58E782A5BC4FA6F49A6650E9389302F.or_port 443 +E480D577F58E782A5BC4FA6F49A6650E9389302F.dir_port 80 +E480D577F58E782A5BC4FA6F49A6650E9389302F.nickname Quintex34 +E480D577F58E782A5BC4FA6F49A6650E9389302F.has_extrainfo false +E589316576A399C511A9781A73DA4545640B479D.address 46.252.26.2 +E589316576A399C511A9781A73DA4545640B479D.or_port 49991 +E589316576A399C511A9781A73DA4545640B479D.dir_port 45212 +E589316576A399C511A9781A73DA4545640B479D.nickname marlen +E589316576A399C511A9781A73DA4545640B479D.has_extrainfo false +E781F4EC69671B3F1864AE2753E0890351506329.address 176.31.180.157 +E781F4EC69671B3F1864AE2753E0890351506329.or_port 22 +E781F4EC69671B3F1864AE2753E0890351506329.dir_port 143 +E781F4EC69671B3F1864AE2753E0890351506329.nickname armbrust +E781F4EC69671B3F1864AE2753E0890351506329.has_extrainfo false +E781F4EC69671B3F1864AE2753E0890351506329.orport6_address 2001:41d0:8:eb9d::1 +E781F4EC69671B3F1864AE2753E0890351506329.orport6_port 22 +E81EF60A73B3809F8964F73766B01BAA0A171E20.address 212.47.244.38 +E81EF60A73B3809F8964F73766B01BAA0A171E20.or_port 443 +E81EF60A73B3809F8964F73766B01BAA0A171E20.dir_port 8080 +E81EF60A73B3809F8964F73766B01BAA0A171E20.nickname Chimborazo +E81EF60A73B3809F8964F73766B01BAA0A171E20.has_extrainfo false +EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.address 217.182.75.181 +EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.or_port 9001 +EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.dir_port 9030 +EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.nickname Aerodynamik02 +EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.has_extrainfo false +F4263275CF54A6836EE7BD527B1328836A6F06E1.address 37.187.102.108 +F4263275CF54A6836EE7BD527B1328836A6F06E1.or_port 443 +F4263275CF54A6836EE7BD527B1328836A6F06E1.dir_port 80 +F4263275CF54A6836EE7BD527B1328836A6F06E1.nickname EvilMoe +F4263275CF54A6836EE7BD527B1328836A6F06E1.has_extrainfo false +F4263275CF54A6836EE7BD527B1328836A6F06E1.orport6_address 2001:41d0:a:266c::1 +F4263275CF54A6836EE7BD527B1328836A6F06E1.orport6_port 443 +F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.address 46.28.109.231 +F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.or_port 9001 +F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.dir_port 9030 +F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.nickname wedostor +F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.has_extrainfo false +F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.orport6_address 2a02:2b88:2:1::4205:1 +F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.orport6_port 9001 +F93D8F37E35C390BCAD9F9069E13085B745EC216.address 185.96.180.29 +F93D8F37E35C390BCAD9F9069E13085B745EC216.or_port 443 +F93D8F37E35C390BCAD9F9069E13085B745EC216.dir_port 80 +F93D8F37E35C390BCAD9F9069E13085B745EC216.nickname TykRelay06 +F93D8F37E35C390BCAD9F9069E13085B745EC216.has_extrainfo false +FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.address 86.59.119.83 +FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.or_port 443 +FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.dir_port 80 +FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.nickname ph3x +FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.has_extrainfo false +FE296180018833AF03A8EACD5894A614623D3F76.address 149.56.45.200 +FE296180018833AF03A8EACD5894A614623D3F76.or_port 9001 +FE296180018833AF03A8EACD5894A614623D3F76.dir_port 9030 +FE296180018833AF03A8EACD5894A614623D3F76.nickname PiotrTorpotkinOne +FE296180018833AF03A8EACD5894A614623D3F76.has_extrainfo false +FE296180018833AF03A8EACD5894A614623D3F76.orport6_address 2607:5300:201:3000::17d3 +FE296180018833AF03A8EACD5894A614623D3F76.orport6_port 9002 diff --git a/Shared/lib/python3.4/site-packages/stem/cached_manual.sqlite b/Shared/lib/python3.4/site-packages/stem/cached_manual.sqlite new file mode 100644 index 0000000..9fe8c7a Binary files /dev/null and b/Shared/lib/python3.4/site-packages/stem/cached_manual.sqlite differ diff --git a/Shared/lib/python3.4/site-packages/stem/client/__init__.py b/Shared/lib/python3.4/site-packages/stem/client/__init__.py new file mode 100644 index 0000000..8e8bb11 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/client/__init__.py @@ -0,0 +1,287 @@ +# Copyright 2018, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Interaction with a Tor relay's ORPort. :class:`~stem.client.Relay` is +a wrapper for :class:`~stem.socket.RelaySocket`, much the same way as +:class:`~stem.control.Controller` provides higher level functions for +:class:`~stem.socket.ControlSocket`. + +.. versionadded:: 1.7.0 + +:: + + Relay - Connection with a tor relay's ORPort. + | +- connect - Establishes a connection with a relay. + | + |- is_alive - reports if our connection is open or closed + |- connection_time - time when we last connected or disconnected + |- close - shuts down our connection + | + +- create_circuit - establishes a new circuit + + Circuit - Circuit we've established through a relay. + |- send - sends a message through this circuit + +- close - closes this circuit +""" + +import hashlib +import threading + +import stem +import stem.client.cell +import stem.socket +import stem.util.connection + +from stem.client.datatype import ZERO, LinkProtocol, Address, KDF, split + +__all__ = [ + 'cell', + 'datatype', +] + +DEFAULT_LINK_PROTOCOLS = (3, 4, 5) + + +class Relay(object): + """ + Connection with a Tor relay's ORPort. + + :var int link_protocol: link protocol version we established + """ + + def __init__(self, orport, link_protocol): + self.link_protocol = LinkProtocol(link_protocol) + self._orport = orport + self._orport_lock = threading.RLock() + self._circuits = {} + + @staticmethod + def connect(address, port, link_protocols = DEFAULT_LINK_PROTOCOLS): + """ + Establishes a connection with the given ORPort. + + :param str address: ip address of the relay + :param int port: ORPort of the relay + :param tuple link_protocols: acceptable link protocol versions + + :raises: + * **ValueError** if address or port are invalid + * :class:`stem.SocketError` if we're unable to establish a connection + """ + + relay_addr = Address(address) + + if not stem.util.connection.is_valid_port(port): + raise ValueError("'%s' isn't a valid port" % port) + elif not link_protocols: + raise ValueError("Connection can't be established without a link protocol.") + + try: + conn = stem.socket.RelaySocket(address, port) + except stem.SocketError as exc: + if 'Connection refused' in str(exc): + raise stem.SocketError("Failed to connect to %s:%i. Maybe it isn't an ORPort?" % (address, port)) + + # If not an ORPort (for instance, mistakenly connecting to a ControlPort + # instead) we'll likely fail during SSL negotiation. This can result + # in a variety of responses so normalizing what we can... + # + # Debian 9.5: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:661) + # Ubuntu 16.04: [SSL: UNKNOWN_PROTOCOL] unknown protocol (_ssl.c:590) + # Ubuntu 12.04: [Errno 1] _ssl.c:504: error:140770FC:SSL routines:SSL23_GET_SERVER_HELLO:unknown protocol + + if 'unknown protocol' in str(exc) or 'wrong version number' in str(exc): + raise stem.SocketError("Failed to SSL authenticate to %s:%i. Maybe it isn't an ORPort?" % (address, port)) + + raise + + # To negotiate our link protocol the first VERSIONS cell is expected to use + # a circuit ID field size from protocol version 1-3 for backward + # compatibility... + # + # The first VERSIONS cell, and any cells sent before the + # first VERSIONS cell, always have CIRCID_LEN == 2 for backward + # compatibility. + + conn.send(stem.client.cell.VersionsCell(link_protocols).pack(2)) + response = conn.recv() + + # Link negotiation ends right away if we lack a common protocol + # version. (#25139) + + if not response: + conn.close() + raise stem.SocketError('Unable to establish a common link protocol with %s:%i' % (address, port)) + + versions_reply = stem.client.cell.Cell.pop(response, 2)[0] + common_protocols = set(link_protocols).intersection(versions_reply.versions) + + if not common_protocols: + conn.close() + raise stem.SocketError('Unable to find a common link protocol. We support %s but %s:%i supports %s.' % (', '.join(link_protocols), address, port, ', '.join(versions_reply.versions))) + + # Establishing connections requires sending a NETINFO, but including our + # address is optional. We can revisit including it when we have a usecase + # where it would help. + + link_protocol = max(common_protocols) + conn.send(stem.client.cell.NetinfoCell(relay_addr, []).pack(link_protocol)) + + return Relay(conn, link_protocol) + + def is_alive(self): + """ + Checks if our socket is currently connected. This is a pass-through for our + socket's :func:`~stem.socket.BaseSocket.is_alive` method. + + :returns: **bool** that's **True** if our socket is connected and **False** otherwise + """ + + return self._orport.is_alive() + + def connection_time(self): + """ + Provides the unix timestamp for when our socket was either connected or + disconnected. That is to say, the time we connected if we're currently + connected and the time we disconnected if we're not connected. + + :returns: **float** for when we last connected or disconnected, zero if + we've never connected + """ + + return self._orport.connection_time() + + def close(self): + """ + Closes our socket connection. This is a pass-through for our socket's + :func:`~stem.socket.BaseSocket.close` method. + """ + + with self._orport_lock: + return self._orport.close() + + def create_circuit(self): + """ + Establishes a new circuit. + """ + + with self._orport_lock: + circ_id = max(self._circuits) + 1 if self._circuits else self.link_protocol.first_circ_id + + create_fast_cell = stem.client.cell.CreateFastCell(circ_id) + self._orport.send(create_fast_cell.pack(self.link_protocol)) + + response = stem.client.cell.Cell.unpack(self._orport.recv(), self.link_protocol) + created_fast_cells = filter(lambda cell: isinstance(cell, stem.client.cell.CreatedFastCell), response) + + if not created_fast_cells: + raise ValueError('We should get a CREATED_FAST response from a CREATE_FAST request') + + created_fast_cell = list(created_fast_cells)[0] + kdf = KDF.from_value(create_fast_cell.key_material + created_fast_cell.key_material) + + if created_fast_cell.derivative_key != kdf.key_hash: + raise ValueError('Remote failed to prove that it knows our shared key') + + circ = Circuit(self, circ_id, kdf) + self._circuits[circ.id] = circ + + return circ + + def __iter__(self): + with self._orport_lock: + for circ in self._circuits.values(): + yield circ + + def __enter__(self): + return self + + def __exit__(self, exit_type, value, traceback): + self.close() + + +class Circuit(object): + """ + Circuit through which requests can be made of a `Tor relay's ORPort + `_. + + :var stem.client.Relay relay: relay through which this circuit has been established + :var int id: circuit id + :var hashlib.sha1 forward_digest: digest for forward integrity check + :var hashlib.sha1 backward_digest: digest for backward integrity check + :var bytes forward_key: forward encryption key + :var bytes backward_key: backward encryption key + """ + + def __init__(self, relay, circ_id, kdf): + if not stem.prereq.is_crypto_available(): + raise ImportError('Circuit construction requires the cryptography module') + + from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes + from cryptography.hazmat.backends import default_backend + + ctr = modes.CTR(ZERO * (algorithms.AES.block_size // 8)) + + self.relay = relay + self.id = circ_id + self.forward_digest = hashlib.sha1(kdf.forward_digest) + self.backward_digest = hashlib.sha1(kdf.backward_digest) + self.forward_key = Cipher(algorithms.AES(kdf.forward_key), ctr, default_backend()).encryptor() + self.backward_key = Cipher(algorithms.AES(kdf.backward_key), ctr, default_backend()).decryptor() + + def send(self, command, data = '', stream_id = 0): + """ + Sends a message over the circuit. + + :param stem.client.datatype.RelayCommand command: command to be issued + :param bytes data: message payload + :param int stream_id: specific stream this concerns + + :returns: **list** of :class:`~stem.client.cell.RelayCell` responses + """ + + with self.relay._orport_lock: + # Encrypt and send the cell. Our digest/key only updates if the cell is + # successfully sent. + + cell = stem.client.cell.RelayCell(self.id, command, data, stream_id = stream_id) + payload, forward_key, forward_digest = cell.encrypt(self.relay.link_protocol, self.forward_key, self.forward_digest) + self.relay._orport.send(payload) + + self.forward_digest = forward_digest + self.forward_key = forward_key + + # Decrypt relay cells received in response. Again, our digest/key only + # updates when handled successfully. + + reply = self.relay._orport.recv() + reply_cells = [] + + if len(reply) % self.relay.link_protocol.fixed_cell_length != 0: + raise stem.ProtocolError('Circuit response should be a series of RELAY cells, but received an unexpected size for a response: %i' % len(reply)) + + while reply: + encrypted_cell, reply = split(reply, self.relay.link_protocol.fixed_cell_length) + decrypted_cell, backward_key, backward_digest = stem.client.cell.RelayCell.decrypt(self.relay.link_protocol, encrypted_cell, self.backward_key, self.backward_digest) + + if self.id != decrypted_cell.circ_id: + raise stem.ProtocolError('Response should be for circuit id %i, not %i' % (self.id, decrypted_cell.circ_id)) + + self.backward_digest = backward_digest + self.backward_key = backward_key + + reply_cells.append(decrypted_cell) + + return reply_cells + + def close(self): + with self.relay._orport_lock: + self.relay._orport.send(stem.client.cell.DestroyCell(self.id).pack(self.relay.link_protocol)) + del self.relay._circuits[self.id] + + def __enter__(self): + return self + + def __exit__(self, exit_type, value, traceback): + self.close() diff --git a/Shared/lib/python3.4/site-packages/stem/client/cell.py b/Shared/lib/python3.4/site-packages/stem/client/cell.py new file mode 100644 index 0000000..91dec14 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/client/cell.py @@ -0,0 +1,859 @@ +# Copyright 2018, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Messages communicated over a Tor relay's ORPort. + +.. versionadded:: 1.7.0 + +**Module Overview:** + +:: + + Cell - Base class for ORPort messages. + |- CircuitCell - Circuit management. + | |- CreateCell - Create a circuit. (section 5.1) + | |- CreatedCell - Acknowledge create. (section 5.1) + | |- RelayCell - End-to-end data. (section 6.1) + | |- DestroyCell - Stop using a circuit. (section 5.4) + | |- CreateFastCell - Create a circuit, no PK. (section 5.1) + | |- CreatedFastCell - Circuit created, no PK. (section 5.1) + | |- RelayEarlyCell - End-to-end data; limited. (section 5.6) + | |- Create2Cell - Extended CREATE cell. (section 5.1) + | +- Created2Cell - Extended CREATED cell. (section 5.1) + | + |- PaddingCell - Padding negotiation. (section 7.2) + |- VersionsCell - Negotiate proto version. (section 4) + |- NetinfoCell - Time and address info. (section 4.5) + |- PaddingNegotiateCell - Padding negotiation. (section 7.2) + |- VPaddingCell - Variable-length padding. (section 7.2) + |- CertsCell - Relay certificates. (section 4.2) + |- AuthChallengeCell - Challenge value. (section 4.3) + |- AuthenticateCell - Client authentication. (section 4.5) + |- AuthorizeCell - Client authorization. (not yet used) + | + |- pack - encodes cell into bytes + |- unpack - decodes series of cells + +- pop - decodes cell with remainder +""" + +import copy +import datetime +import inspect +import os +import sys + +import stem.util + +from stem import UNDEFINED +from stem.client.datatype import HASH_LEN, ZERO, LinkProtocol, Address, Certificate, CloseReason, RelayCommand, Size, split +from stem.util import datetime_to_unix, str_tools + +FIXED_PAYLOAD_LEN = 509 # PAYLOAD_LEN, per tor-spec section 0.2 +AUTH_CHALLENGE_SIZE = 32 +RELAY_DIGEST_SIZE = Size.LONG + +STREAM_ID_REQUIRED = ( + RelayCommand.BEGIN, + RelayCommand.DATA, + RelayCommand.END, + RelayCommand.CONNECTED, + RelayCommand.RESOLVE, + RelayCommand.RESOLVED, + RelayCommand.BEGIN_DIR, +) + +STREAM_ID_DISALLOWED = ( + RelayCommand.EXTEND, + RelayCommand.EXTENDED, + RelayCommand.TRUNCATE, + RelayCommand.TRUNCATED, + RelayCommand.DROP, + RelayCommand.EXTEND2, + RelayCommand.EXTENDED2, +) + + +class Cell(object): + """ + Metadata for ORPort cells. + + Unused padding are **not** used in equality checks or hashing. If two cells + differ only in their *unused* attribute they are functionally equal. + + The following cell types explicitly don't have *unused* content: + * PaddingCell (we consider all content part of payload) + * VersionsCell (all content is unpacked and treated as a version specification) + * VPaddingCell (we consider all content part of payload) + + :var bytes unused: unused filler that padded the cell to the expected size + """ + + NAME = 'UNKNOWN' + VALUE = -1 + IS_FIXED_SIZE = False + + def __init__(self, unused = b''): + super(Cell, self).__init__() + self.unused = unused + + @staticmethod + def by_name(name): + """ + Provides cell attributes by its name. + + :param str name: cell command to fetch + + :raises: **ValueError** if cell type is invalid + """ + + for _, cls in inspect.getmembers(sys.modules[__name__]): + if name == getattr(cls, 'NAME', UNDEFINED): + return cls + + raise ValueError("'%s' isn't a valid cell type" % name) + + @staticmethod + def by_value(value): + """ + Provides cell attributes by its value. + + :param int value: cell value to fetch + + :raises: **ValueError** if cell type is invalid + """ + + for _, cls in inspect.getmembers(sys.modules[__name__]): + if value == getattr(cls, 'VALUE', UNDEFINED): + return cls + + raise ValueError("'%s' isn't a valid cell value" % value) + + def pack(self, link_protocol): + raise NotImplementedError('Packing not yet implemented for %s cells' % type(self).NAME) + + @staticmethod + def unpack(content, link_protocol): + """ + Unpacks all cells from a response. + + :param bytes content: payload to decode + :param int link_protocol: link protocol version + + :returns: :class:`~stem.client.cell.Cell` generator + + :raises: + * ValueError if content is malformed + * NotImplementedError if unable to unpack any of the cell types + """ + + while content: + cell, content = Cell.pop(content, link_protocol) + yield cell + + @staticmethod + def pop(content, link_protocol): + """ + Unpacks the first cell. + + :param bytes content: payload to decode + :param int link_protocol: link protocol version + + :returns: (:class:`~stem.client.cell.Cell`, remainder) tuple + + :raises: + * ValueError if content is malformed + * NotImplementedError if unable to unpack this cell type + """ + + link_protocol = LinkProtocol(link_protocol) + + circ_id, content = link_protocol.circ_id_size.pop(content) + command, content = Size.CHAR.pop(content) + cls = Cell.by_value(command) + + if cls.IS_FIXED_SIZE: + payload_len = FIXED_PAYLOAD_LEN + else: + payload_len, content = Size.SHORT.pop(content) + + if len(content) < payload_len: + raise ValueError('%s cell should have a payload of %i bytes, but only had %i' % (cls.NAME, payload_len, len(content))) + + payload, content = split(content, payload_len) + return cls._unpack(payload, circ_id, link_protocol), content + + @classmethod + def _pack(cls, link_protocol, payload, unused = b'', circ_id = None): + """ + Provides bytes that can be used on the wire for these cell attributes. + Format of a properly packed cell depends on if it's fixed or variable + sized... + + :: + + Fixed: [ CircuitID ][ Command ][ Payload ][ Padding ] + Variable: [ CircuitID ][ Command ][ Size ][ Payload ] + + :param str name: cell command + :param int link_protocol: link protocol version + :param bytes payload: cell payload + :param int circ_id: circuit id, if a CircuitCell + + :returns: **bytes** with the encoded payload + + :raises: **ValueError** if cell type invalid or payload makes cell too large + """ + + if issubclass(cls, CircuitCell): + if circ_id is None: + raise ValueError('%s cells require a circuit identifier' % cls.NAME) + elif circ_id < 1: + raise ValueError('Circuit identifiers must a positive integer, not %s' % circ_id) + else: + if circ_id is not None: + raise ValueError('%s cells should not specify a circuit identifier' % cls.NAME) + + circ_id = 0 # cell doesn't concern a circuit, default field to zero + + link_protocol = LinkProtocol(link_protocol) + + cell = bytearray() + cell += link_protocol.circ_id_size.pack(circ_id) + cell += Size.CHAR.pack(cls.VALUE) + cell += b'' if cls.IS_FIXED_SIZE else Size.SHORT.pack(len(payload) + len(unused)) + cell += payload + + # include the unused portion (typically from unpacking) + cell += unused + + # pad fixed sized cells to the required length + + if cls.IS_FIXED_SIZE: + if len(cell) > link_protocol.fixed_cell_length: + raise ValueError('Cell of type %s is too large (%i bytes), must not be more than %i. Check payload size (was %i bytes)' % (cls.NAME, len(cell), link_protocol.fixed_cell_length, len(payload))) + + cell += ZERO * (link_protocol.fixed_cell_length - len(cell)) + + return bytes(cell) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + """ + Subclass implementation for unpacking cell content. + + :param bytes content: payload to decode + :param stem.client.datatype.LinkProtocol link_protocol: link protocol version + :param int circ_id: circuit id cell is for + + :returns: instance of this cell type + + :raises: **ValueError** if content is malformed + """ + + raise NotImplementedError('Unpacking not yet implemented for %s cells' % cls.NAME) + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, Cell) else False + + def __ne__(self, other): + return not self == other + + +class CircuitCell(Cell): + """ + Cell concerning circuits. + + :var int circ_id: circuit id + """ + + def __init__(self, circ_id, unused = b''): + super(CircuitCell, self).__init__(unused) + self.circ_id = circ_id + + +class PaddingCell(Cell): + """ + Randomized content to either keep activity going on a circuit. + + :var bytes payload: randomized payload + """ + + NAME = 'PADDING' + VALUE = 0 + IS_FIXED_SIZE = True + + def __init__(self, payload = None): + if not payload: + payload = os.urandom(FIXED_PAYLOAD_LEN) + elif len(payload) != FIXED_PAYLOAD_LEN: + raise ValueError('Padding payload should be %i bytes, but was %i' % (FIXED_PAYLOAD_LEN, len(payload))) + + super(PaddingCell, self).__init__() + self.payload = payload + + def pack(self, link_protocol): + return PaddingCell._pack(link_protocol, self.payload) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + return PaddingCell(content) + + def __hash__(self): + return stem.util._hash_attr(self, 'payload', cache = True) + + +class CreateCell(CircuitCell): + NAME = 'CREATE' + VALUE = 1 + IS_FIXED_SIZE = True + + def __init__(self): + super(CreateCell, self).__init__() # TODO: implement + + +class CreatedCell(CircuitCell): + NAME = 'CREATED' + VALUE = 2 + IS_FIXED_SIZE = True + + def __init__(self): + super(CreatedCell, self).__init__() # TODO: implement + + +class RelayCell(CircuitCell): + """ + Command concerning a relay circuit. + + Our 'recognized' attribute provides a cheap (but incomplete) check for if our + cell payload is encrypted. If non-zero our payload *IS* encrypted, but if + zero we're *PROBABLY* fully decrypted. This uncertainty is because encrypted + cells have a small chance of coincidently producing zero for this value as + well. + + :var stem.client.RelayCommand command: command to be issued + :var int command_int: integer value of our command + :var bytes data: payload of the cell + :var int recognized: non-zero if payload is encrypted + :var int digest: running digest held with the relay + :var int stream_id: specific stream this concerns + """ + + NAME = 'RELAY' + VALUE = 3 + IS_FIXED_SIZE = True + + def __init__(self, circ_id, command, data, digest = 0, stream_id = 0, recognized = 0, unused = b''): + if 'HASH' in str(type(digest)): + # Unfortunately hashlib generates from a dynamic private class so + # isinstance() isn't such a great option. With python2/python3 the + # name is 'hashlib.HASH' whereas PyPy calls it just 'HASH'. + + digest_packed = digest.digest()[:RELAY_DIGEST_SIZE.size] + digest = RELAY_DIGEST_SIZE.unpack(digest_packed) + elif stem.util._is_str(digest): + digest_packed = digest[:RELAY_DIGEST_SIZE.size] + digest = RELAY_DIGEST_SIZE.unpack(digest_packed) + elif stem.util._is_int(digest): + pass + else: + raise ValueError('RELAY cell digest must be a hash, string, or int but was a %s' % type(digest).__name__) + + super(RelayCell, self).__init__(circ_id, unused) + self.command, self.command_int = RelayCommand.get(command) + self.recognized = recognized + self.stream_id = stream_id + self.digest = digest + self.data = str_tools._to_bytes(data) + + if digest == 0: + if not stream_id and self.command in STREAM_ID_REQUIRED: + raise ValueError('%s relay cells require a stream id' % self.command) + elif stream_id and self.command in STREAM_ID_DISALLOWED: + raise ValueError('%s relay cells concern the circuit itself and cannot have a stream id' % self.command) + + def pack(self, link_protocol): + payload = bytearray() + payload += Size.CHAR.pack(self.command_int) + payload += Size.SHORT.pack(self.recognized) + payload += Size.SHORT.pack(self.stream_id) + payload += Size.LONG.pack(self.digest) + payload += Size.SHORT.pack(len(self.data)) + payload += self.data + + return RelayCell._pack(link_protocol, bytes(payload), self.unused, self.circ_id) + + @staticmethod + def decrypt(link_protocol, content, key, digest): + """ + Decrypts content as a relay cell addressed to us. This provides back a + tuple of the form... + + :: + + (cell (RelayCell), new_key (CipherContext), new_digest (HASH)) + + :param int link_protocol: link protocol version + :param bytes content: cell content to be decrypted + :param cryptography.hazmat.primitives.ciphers.CipherContext key: + key established with the relay we received this cell from + :param HASH digest: running digest held with the relay + + :returns: **tuple** with our decrypted cell and updated key/digest + + :raises: :class:`stem.ProtocolError` if content doesn't belong to a relay + cell + """ + + new_key = copy.copy(key) + new_digest = digest.copy() + + if len(content) != link_protocol.fixed_cell_length: + raise stem.ProtocolError('RELAY cells should be %i bytes, but received %i' % (link_protocol.fixed_cell_length, len(content))) + + circ_id, content = link_protocol.circ_id_size.pop(content) + command, encrypted_payload = Size.CHAR.pop(content) + + if command != RelayCell.VALUE: + raise stem.ProtocolError('Cannot decrypt as a RELAY cell. This had command %i instead.' % command) + + payload = new_key.update(encrypted_payload) + + cell = RelayCell._unpack(payload, circ_id, link_protocol) + + # TODO: Implement our decryption digest. It is used to support relaying + # within multi-hop circuits. On first glance this should go something + # like... + # + # # Our updated digest is calculated based on this cell with a blanked + # # digest field. + # + # digest_cell = RelayCell(self.circ_id, self.command, self.data, 0, self.stream_id, self.recognized, self.unused) + # new_digest.update(digest_cell.pack(link_protocol)) + # + # is_encrypted == cell.recognized != 0 or self.digest == new_digest + # + # ... or something like that. Until we attempt to support relaying this is + # both moot and difficult to exercise in order to ensure we get it right. + + return cell, new_key, new_digest + + def encrypt(self, link_protocol, key, digest): + """ + Encrypts our cell content to be sent with the given key. This provides back + a tuple of the form... + + :: + + (payload (bytes), new_key (CipherContext), new_digest (HASH)) + + :param int link_protocol: link protocol version + :param cryptography.hazmat.primitives.ciphers.CipherContext key: + key established with the relay we're sending this cell to + :param HASH digest: running digest held with the relay + + :returns: **tuple** with our encrypted payload and updated key/digest + """ + + new_key = copy.copy(key) + new_digest = digest.copy() + + # Digests are computed from our payload, not including our header's circuit + # id (2 or 4 bytes) and command (1 byte). + + header_size = link_protocol.circ_id_size.size + 1 + payload_without_digest = self.pack(link_protocol)[header_size:] + new_digest.update(payload_without_digest) + + # Pack a copy of ourselves with our newly calculated digest, and encrypt + # the payload. Header remains plaintext. + + cell = RelayCell(self.circ_id, self.command, self.data, new_digest, self.stream_id, self.recognized, self.unused) + header, payload = split(cell.pack(link_protocol), header_size) + + return header + new_key.update(payload), new_key, new_digest + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + command, content = Size.CHAR.pop(content) + recognized, content = Size.SHORT.pop(content) # 'recognized' field + stream_id, content = Size.SHORT.pop(content) + digest, content = Size.LONG.pop(content) + data_len, content = Size.SHORT.pop(content) + data, unused = split(content, data_len) + + if len(data) != data_len: + raise ValueError('%s cell said it had %i bytes of data, but only had %i' % (cls.NAME, data_len, len(data))) + + return RelayCell(circ_id, command, data, digest, stream_id, recognized, unused) + + def __hash__(self): + return stem.util._hash_attr(self, 'command_int', 'stream_id', 'digest', 'data', cache = True) + + +class DestroyCell(CircuitCell): + """ + Closes the given circuit. + + :var stem.client.CloseReason reason: reason the circuit is being closed + :var int reason_int: integer value of our closure reason + """ + + NAME = 'DESTROY' + VALUE = 4 + IS_FIXED_SIZE = True + + def __init__(self, circ_id, reason = CloseReason.NONE, unused = b''): + super(DestroyCell, self).__init__(circ_id, unused) + self.reason, self.reason_int = CloseReason.get(reason) + + def pack(self, link_protocol): + return DestroyCell._pack(link_protocol, Size.CHAR.pack(self.reason_int), self.unused, self.circ_id) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + reason, unused = Size.CHAR.pop(content) + return DestroyCell(circ_id, reason, unused) + + def __hash__(self): + return stem.util._hash_attr(self, 'circ_id', 'reason_int', cache = True) + + +class CreateFastCell(CircuitCell): + """ + Create a circuit with our first hop. This is lighter weight than further hops + because we've already established the relay's identity and secret key. + + :var bytes key_material: randomized key material + """ + + NAME = 'CREATE_FAST' + VALUE = 5 + IS_FIXED_SIZE = True + + def __init__(self, circ_id, key_material = None, unused = b''): + if not key_material: + key_material = os.urandom(HASH_LEN) + elif len(key_material) != HASH_LEN: + raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material))) + + super(CreateFastCell, self).__init__(circ_id, unused) + self.key_material = key_material + + def pack(self, link_protocol): + return CreateFastCell._pack(link_protocol, self.key_material, self.unused, self.circ_id) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + key_material, unused = split(content, HASH_LEN) + + if len(key_material) != HASH_LEN: + raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material))) + + return CreateFastCell(circ_id, key_material, unused) + + def __hash__(self): + return stem.util._hash_attr(self, 'circ_id', 'key_material', cache = True) + + +class CreatedFastCell(CircuitCell): + """ + CREATE_FAST reply. + + :var bytes key_material: randomized key material + :var bytes derivative_key: hash proving the relay knows our shared key + """ + + NAME = 'CREATED_FAST' + VALUE = 6 + IS_FIXED_SIZE = True + + def __init__(self, circ_id, derivative_key, key_material = None, unused = b''): + if not key_material: + key_material = os.urandom(HASH_LEN) + elif len(key_material) != HASH_LEN: + raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material))) + + if len(derivative_key) != HASH_LEN: + raise ValueError('Derivatived key should be %i bytes, but was %i' % (HASH_LEN, len(derivative_key))) + + super(CreatedFastCell, self).__init__(circ_id, unused) + self.key_material = key_material + self.derivative_key = derivative_key + + def pack(self, link_protocol): + return CreatedFastCell._pack(link_protocol, self.key_material + self.derivative_key, self.unused, self.circ_id) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + if len(content) < HASH_LEN * 2: + raise ValueError('Key material and derivatived key should be %i bytes, but was %i' % (HASH_LEN * 2, len(content))) + + key_material, content = split(content, HASH_LEN) + derivative_key, content = split(content, HASH_LEN) + + return CreatedFastCell(circ_id, derivative_key, key_material, content) + + def __hash__(self): + return stem.util._hash_attr(self, 'circ_id', 'derivative_key', 'key_material', cache = True) + + +class VersionsCell(Cell): + """ + Link version negotiation cell. + + :var list versions: link versions + """ + + NAME = 'VERSIONS' + VALUE = 7 + IS_FIXED_SIZE = False + + def __init__(self, versions): + super(VersionsCell, self).__init__() + self.versions = versions + + def pack(self, link_protocol): + payload = b''.join([Size.SHORT.pack(v) for v in self.versions]) + return VersionsCell._pack(link_protocol, payload) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + link_protocols = [] + + while content: + version, content = Size.SHORT.pop(content) + link_protocols.append(version) + + return VersionsCell(link_protocols) + + def __hash__(self): + return stem.util._hash_attr(self, 'versions', cache = True) + + +class NetinfoCell(Cell): + """ + Information relays exchange about each other. + + :var datetime timestamp: current time + :var stem.client.Address receiver_address: receiver's OR address + :var list sender_addresses: sender's OR addresses + """ + + NAME = 'NETINFO' + VALUE = 8 + IS_FIXED_SIZE = True + + def __init__(self, receiver_address, sender_addresses, timestamp = None, unused = b''): + super(NetinfoCell, self).__init__(unused) + self.timestamp = timestamp if timestamp else datetime.datetime.now() + self.receiver_address = receiver_address + self.sender_addresses = sender_addresses + + def pack(self, link_protocol): + payload = bytearray() + payload += Size.LONG.pack(int(datetime_to_unix(self.timestamp))) + payload += self.receiver_address.pack() + payload += Size.CHAR.pack(len(self.sender_addresses)) + + for addr in self.sender_addresses: + payload += addr.pack() + + return NetinfoCell._pack(link_protocol, bytes(payload), self.unused) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + timestamp, content = Size.LONG.pop(content) + receiver_address, content = Address.pop(content) + + sender_addresses = [] + sender_addr_count, content = Size.CHAR.pop(content) + + for i in range(sender_addr_count): + addr, content = Address.pop(content) + sender_addresses.append(addr) + + return NetinfoCell(receiver_address, sender_addresses, datetime.datetime.utcfromtimestamp(timestamp), unused = content) + + def __hash__(self): + return stem.util._hash_attr(self, 'timestamp', 'receiver_address', 'sender_addresses', cache = True) + + +class RelayEarlyCell(CircuitCell): + NAME = 'RELAY_EARLY' + VALUE = 9 + IS_FIXED_SIZE = True + + def __init__(self): + super(RelayEarlyCell, self).__init__() # TODO: implement + + +class Create2Cell(CircuitCell): + NAME = 'CREATE2' + VALUE = 10 + IS_FIXED_SIZE = True + + def __init__(self): + super(Create2Cell, self).__init__() # TODO: implement + + +class Created2Cell(Cell): + NAME = 'CREATED2' + VALUE = 11 + IS_FIXED_SIZE = True + + def __init__(self): + super(Created2Cell, self).__init__() # TODO: implement + + +class PaddingNegotiateCell(Cell): + NAME = 'PADDING_NEGOTIATE' + VALUE = 12 + IS_FIXED_SIZE = True + + def __init__(self): + super(PaddingNegotiateCell, self).__init__() # TODO: implement + + +class VPaddingCell(Cell): + """ + Variable length randomized content to either keep activity going on a circuit. + + :var bytes payload: randomized payload + """ + + NAME = 'VPADDING' + VALUE = 128 + IS_FIXED_SIZE = False + + def __init__(self, size = None, payload = None): + if size is None and payload is None: + raise ValueError('VPaddingCell constructor must specify payload or size') + elif size is not None and size < 0: + raise ValueError('VPaddingCell size (%s) cannot be negative' % size) + elif size is not None and payload is not None and size != len(payload): + raise ValueError('VPaddingCell constructor specified both a size of %i bytes and payload of %i bytes' % (size, len(payload))) + + super(VPaddingCell, self).__init__() + self.payload = payload if payload is not None else os.urandom(size) + + def pack(self, link_protocol): + return VPaddingCell._pack(link_protocol, self.payload) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + return VPaddingCell(payload = content) + + def __hash__(self): + return stem.util._hash_attr(self, 'payload', cache = True) + + +class CertsCell(Cell): + """ + Certificate held by the relay we're communicating with. + + :var list certificates: :class:`~stem.client.Certificate` of the relay + """ + + NAME = 'CERTS' + VALUE = 129 + IS_FIXED_SIZE = False + + def __init__(self, certs, unused = b''): + super(CertsCell, self).__init__(unused) + self.certificates = certs + + def pack(self, link_protocol): + return CertsCell._pack(link_protocol, Size.CHAR.pack(len(self.certificates)) + b''.join([cert.pack() for cert in self.certificates]), self.unused) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + cert_count, content = Size.CHAR.pop(content) + certs = [] + + for i in range(cert_count): + if not content: + raise ValueError('CERTS cell indicates it should have %i certificates, but only contained %i' % (cert_count, len(certs))) + + cert, content = Certificate.pop(content) + certs.append(cert) + + return CertsCell(certs, unused = content) + + def __hash__(self): + return stem.util._hash_attr(self, 'certificates', cache = True) + + +class AuthChallengeCell(Cell): + """ + First step of the authentication handshake. + + :var bytes challenge: random bytes for us to sign to authenticate + :var list methods: authentication methods supported by the relay we're + communicating with + """ + + NAME = 'AUTH_CHALLENGE' + VALUE = 130 + IS_FIXED_SIZE = False + + def __init__(self, methods, challenge = None, unused = b''): + if not challenge: + challenge = os.urandom(AUTH_CHALLENGE_SIZE) + elif len(challenge) != AUTH_CHALLENGE_SIZE: + raise ValueError('AUTH_CHALLENGE must be %i bytes, but was %i' % (AUTH_CHALLENGE_SIZE, len(challenge))) + + super(AuthChallengeCell, self).__init__(unused) + self.challenge = challenge + self.methods = methods + + def pack(self, link_protocol): + payload = bytearray() + payload += self.challenge + payload += Size.SHORT.pack(len(self.methods)) + + for method in self.methods: + payload += Size.SHORT.pack(method) + + return AuthChallengeCell._pack(link_protocol, bytes(payload), self.unused) + + @classmethod + def _unpack(cls, content, circ_id, link_protocol): + min_size = AUTH_CHALLENGE_SIZE + Size.SHORT.size + if len(content) < min_size: + raise ValueError('AUTH_CHALLENGE payload should be at least %i bytes, but was %i' % (min_size, len(content))) + + challenge, content = split(content, AUTH_CHALLENGE_SIZE) + method_count, content = Size.SHORT.pop(content) + + if len(content) < method_count * Size.SHORT.size: + raise ValueError('AUTH_CHALLENGE should have %i methods, but only had %i bytes for it' % (method_count, len(content))) + + methods = [] + + for i in range(method_count): + method, content = Size.SHORT.pop(content) + methods.append(method) + + return AuthChallengeCell(methods, challenge, unused = content) + + def __hash__(self): + return stem.util._hash_attr(self, 'challenge', 'methods', cache = True) + + +class AuthenticateCell(Cell): + NAME = 'AUTHENTICATE' + VALUE = 131 + IS_FIXED_SIZE = False + + def __init__(self): + super(AuthenticateCell, self).__init__() # TODO: implement + + +class AuthorizeCell(Cell): + NAME = 'AUTHORIZE' + VALUE = 132 + IS_FIXED_SIZE = False + + def __init__(self): + super(AuthorizeCell, self).__init__() # TODO: implement diff --git a/Shared/lib/python3.4/site-packages/stem/client/datatype.py b/Shared/lib/python3.4/site-packages/stem/client/datatype.py new file mode 100644 index 0000000..de67fcd --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/client/datatype.py @@ -0,0 +1,558 @@ +# Copyright 2018, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Support for `Tor's ORPort protocol +`_. + +**This module only consists of low level components, and is not intended for +users.** See our :class:`~stem.client.Relay` the API you probably want. + +.. versionadded:: 1.7.0 + +:: + + split - splits bytes into substrings + + LinkProtocol - ORPort protocol version. + + Field - Packable and unpackable datatype. + |- Size - Field of a static size. + |- Address - Relay address. + |- Certificate - Relay certificate. + | + |- pack - encodes content + |- unpack - decodes content + +- pop - decodes content with remainder + + KDF - KDF-TOR derivatived attributes + +- from_value - parses key material + +.. data:: AddrType (enum) + + Form an address takes. + + ===================== =========== + AddressType Description + ===================== =========== + **HOSTNAME** relay hostname + **IPv4** IPv4 address + **IPv6** IPv6 address + **ERROR_TRANSIENT** temporarily error retrieving address + **ERROR_PERMANENT** permanent error retrieving address + **UNKNOWN** unrecognized address type + ===================== =========== + +.. data:: RelayCommand (enum) + + Command concerning streams and circuits we've established with a relay. + Commands have two characteristics... + + * **forward/backward**: **forward** commands are issued from the orgin, + whereas **backward** come from the relay + + * **stream/circuit**: **steam** commands concern an individual steam, whereas + **circuit** concern the entire circuit we've established with a relay + + ===================== =========== + RelayCommand Description + ===================== =========== + **BEGIN** begin a stream (**forward**, **stream**) + **DATA** transmit data (**forward/backward**, **stream**) + **END** end a stream (**forward/backward**, **stream**) + **CONNECTED** BEGIN reply (**backward**, **stream**) + **SENDME** ready to accept more cells (**forward/backward**, **stream/circuit**) + **EXTEND** extend the circuit through another relay (**forward**, **circuit**) + **EXTENDED** EXTEND reply (**backward**, **circuit**) + **TRUNCATE** remove last circuit hop (**forward**, **circuit**) + **TRUNCATED** TRUNCATE reply (**backward**, **circuit**) + **DROP** ignorable no-op (**forward/backward**, **circuit**) + **RESOLVE** request DNS resolution (**forward**, **stream**) + **RESOLVED** RESOLVE reply (**backward**, **stream**) + **BEGIN_DIR** request descriptor (**forward**, **steam**) + **EXTEND2** ntor EXTEND request (**forward**, **circuit**) + **EXTENDED2** EXTEND2 reply (**backward**, **circuit**) + **UNKNOWN** unrecognized command + ===================== =========== + +.. data:: CertType (enum) + + Relay certificate type. + + ===================== =========== + CertType Description + ===================== =========== + **LINK** link key certificate certified by RSA1024 identity + **IDENTITY** RSA1024 Identity certificate + **AUTHENTICATE** RSA1024 AUTHENTICATE cell link certificate + **UNKNOWN** unrecognized certificate type + ===================== =========== + +.. data:: CloseReason (enum) + + Reason a relay is closed. + + ===================== =========== + CloseReason Description + ===================== =========== + **NONE** no reason given + **PROTOCOL** tor protocol violation + **INTERNAL** internal error + **REQUESTED** client sent a TRUNCATE command + **HIBERNATING** relay suspended, trying to save bandwidth + **RESOURCELIMIT** out of memory, sockets, or circuit IDs + **CONNECTFAILED** unable to reach relay + **OR_IDENTITY** connected, but its OR identity was not as expected + **OR_CONN_CLOSED** connection that was carrying this circuit died + **FINISHED** circuit has expired for being dirty or old + **TIMEOUT** circuit construction took too long + **DESTROYED** circuit was destroyed without a client TRUNCATE + **NOSUCHSERVICE** request was for an unknown hidden service + **UNKNOWN** unrecognized reason + ===================== =========== +""" + +import collections +import hashlib +import struct + +import stem.client.cell +import stem.prereq +import stem.util +import stem.util.connection +import stem.util.enum + +ZERO = b'\x00' +HASH_LEN = 20 +KEY_LEN = 16 + + +class _IntegerEnum(stem.util.enum.Enum): + """ + Integer backed enumeration. Enumerations of this type always have an implicit + **UNKNOWN** value for integer values that lack a mapping. + """ + + def __init__(self, *args): + self._enum_to_int = {} + self._int_to_enum = {} + parent_args = [] + + for entry in args: + if len(entry) == 2: + enum, int_val = entry + str_val = enum + elif len(entry) == 3: + enum, str_val, int_val = entry + else: + raise ValueError('IntegerEnums can only be constructed with two or three value tuples: %s' % repr(entry)) + + self._enum_to_int[str_val] = int_val + self._int_to_enum[int_val] = str_val + parent_args.append((enum, str_val)) + + parent_args.append(('UNKNOWN', 'UNKNOWN')) + super(_IntegerEnum, self).__init__(*parent_args) + + def get(self, val): + """ + Provides the (enum, int_value) tuple for a given value. + """ + + if stem.util._is_int(val): + return self._int_to_enum.get(val, self.UNKNOWN), val + elif val in self: + return val, self._enum_to_int.get(val, val) + else: + raise ValueError("Invalid enumeration '%s', options are %s" % (val, ', '.join(self))) + + +AddrType = _IntegerEnum( + ('HOSTNAME', 0), + ('IPv4', 4), + ('IPv6', 6), + ('ERROR_TRANSIENT', 16), + ('ERROR_PERMANENT', 17), +) + +RelayCommand = _IntegerEnum( + ('BEGIN', 'RELAY_BEGIN', 1), + ('DATA', 'RELAY_DATA', 2), + ('END', 'RELAY_END', 3), + ('CONNECTED', 'RELAY_CONNECTED', 4), + ('SENDME', 'RELAY_SENDME', 5), + ('EXTEND', 'RELAY_EXTEND', 6), + ('EXTENDED', 'RELAY_EXTENDED', 7), + ('TRUNCATE', 'RELAY_TRUNCATE', 8), + ('TRUNCATED', 'RELAY_TRUNCATED', 9), + ('DROP', 'RELAY_DROP', 10), + ('RESOLVE', 'RELAY_RESOLVE', 11), + ('RESOLVED', 'RELAY_RESOLVED', 12), + ('BEGIN_DIR', 'RELAY_BEGIN_DIR', 13), + ('EXTEND2', 'RELAY_EXTEND2', 14), + ('EXTENDED2', 'RELAY_EXTENDED2', 15), +) + +CertType = _IntegerEnum( + ('LINK', 1), + ('IDENTITY', 2), + ('AUTHENTICATE', 3), +) + +CloseReason = _IntegerEnum( + ('NONE', 0), + ('PROTOCOL', 1), + ('INTERNAL', 2), + ('REQUESTED', 3), + ('HIBERNATING', 4), + ('RESOURCELIMIT', 5), + ('CONNECTFAILED', 6), + ('OR_IDENTITY', 7), + ('OR_CONN_CLOSED', 8), + ('FINISHED', 9), + ('TIMEOUT', 10), + ('DESTROYED', 11), + ('NOSUCHSERVICE', 12), +) + + +def split(content, size): + """ + Simple split of bytes into two substrings. + + :param bytes content: string to split + :param int size: index to split the string on + + :returns: two value tuple with the split bytes + """ + + return content[:size], content[size:] + + +class LinkProtocol(int): + """ + Constants that vary by our link protocol version. + + :var int version: link protocol version + :var stem.client.datatype.Size circ_id_size: circuit identifier field size + :var int fixed_cell_length: size of cells with a fixed length + :var int first_circ_id: When creating circuits we pick an unused identifier + from a range that's determined by our link protocol. + """ + + def __new__(cls, version): + if isinstance(version, LinkProtocol): + return version # already a LinkProtocol + + protocol = int.__new__(cls, version) + protocol.version = version + protocol.circ_id_size = Size.LONG if version > 3 else Size.SHORT + protocol.first_circ_id = 0x80000000 if version > 3 else 0x01 + + cell_header_size = protocol.circ_id_size.size + 1 # circuit id (2 or 4 bytes) + command (1 byte) + protocol.fixed_cell_length = cell_header_size + stem.client.cell.FIXED_PAYLOAD_LEN + + return protocol + + def __hash__(self): + # All LinkProtocol attributes can be derived from our version, so that's + # all we need in our hash. Offsetting by our type so we don't hash conflict + # with ints. + + return self.version * hash(str(type(self))) + + def __eq__(self, other): + if isinstance(other, int): + return self.version == other + elif isinstance(other, LinkProtocol): + return hash(self) == hash(other) + else: + return False + + def __ne__(self, other): + return not self == other + + def __int__(self): + return self.version + + +class Field(object): + """ + Packable and unpackable datatype. + """ + + def pack(self): + """ + Encodes field into bytes. + + :returns: **bytes** that can be communicated over Tor's ORPort + + :raises: **ValueError** if incorrect type or size + """ + + raise NotImplementedError('Not yet available') + + @classmethod + def unpack(cls, packed): + """ + Decodes bytes into a field of this type. + + :param bytes packed: content to decode + + :returns: instance of this class + + :raises: **ValueError** if packed data is malformed + """ + + unpacked, remainder = cls.pop(packed) + + if remainder: + raise ValueError('%s is the wrong size for a %s field' % (repr(packed), cls.__name__)) + + return unpacked + + @staticmethod + def pop(packed): + """ + Decodes bytes as this field type, providing it and the remainder. + + :param bytes packed: content to decode + + :returns: tuple of the form (unpacked, remainder) + + :raises: **ValueError** if packed data is malformed + """ + + raise NotImplementedError('Not yet available') + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, Field) else False + + def __ne__(self, other): + return not self == other + + +class Size(Field): + """ + Unsigned `struct.pack format + ` for + network-order fields. + + ==================== =========== + Pack Description + ==================== =========== + CHAR Unsigned char (1 byte) + SHORT Unsigned short (2 bytes) + LONG Unsigned long (4 bytes) + LONG_LONG Unsigned long long (8 bytes) + ==================== =========== + """ + + def __init__(self, name, size, pack_format): + self.name = name + self.size = size + self.format = pack_format + + @staticmethod + def pop(packed): + raise NotImplementedError("Use our constant's unpack() and pop() instead") + + def pack(self, content): + # TODO: Python 2.6's struct module behaves a little differently in a couple + # respsects... + # + # * Invalid types raise a TypeError rather than a struct.error. + # + # * Negative values are happily packed despite being unsigned fields with + # a message printed to stdout (!) that says... + # + # stem/client/datatype.py:362: DeprecationWarning: struct integer overflow masking is deprecated + # packed = struct.pack(self.format, content) + # stem/client/datatype.py:362: DeprecationWarning: 'B' format requires 0 <= number <= 255 + # packed = struct.pack(self.format, content) + # + # Rather than adjust this method to account for these differences doing + # duplicate upfront checks just for python 2.6. When we drop 2.6 support + # this can obviously be dropped. + + if stem.prereq._is_python_26(): + if not stem.util._is_int(content): + raise ValueError('Size.pack encodes an integer, but was a %s' % type(content).__name__) + elif content < 0: + raise ValueError('Packed values must be positive (attempted to pack %i as a %s)' % (content, self.name)) + + try: + packed = struct.pack(self.format, content) + except struct.error: + if not stem.util._is_int(content): + raise ValueError('Size.pack encodes an integer, but was a %s' % type(content).__name__) + elif content < 0: + raise ValueError('Packed values must be positive (attempted to pack %i as a %s)' % (content, self.name)) + else: + raise # some other struct exception + + if self.size != len(packed): + raise ValueError('%s is the wrong size for a %s field' % (repr(packed), self.name)) + + return packed + + def unpack(self, packed): + if self.size != len(packed): + raise ValueError('%s is the wrong size for a %s field' % (repr(packed), self.name)) + + return struct.unpack(self.format, packed)[0] + + def pop(self, packed): + to_unpack, remainder = split(packed, self.size) + + return self.unpack(to_unpack), remainder + + def __hash__(self): + return stem.util._hash_attr(self, 'name', 'size', 'format', cache = True) + + +class Address(Field): + """ + Relay address. + + :var stem.client.AddrType type: address type + :var int type_int: integer value of the address type + :var unicode value: address value + :var bytes value_bin: encoded address value + """ + + def __init__(self, value, addr_type = None): + if addr_type is None: + if stem.util.connection.is_valid_ipv4_address(value): + addr_type = AddrType.IPv4 + elif stem.util.connection.is_valid_ipv6_address(value): + addr_type = AddrType.IPv6 + else: + raise ValueError("'%s' isn't an IPv4 or IPv6 address" % value) + + self.type, self.type_int = AddrType.get(addr_type) + + if self.type == AddrType.IPv4: + if stem.util.connection.is_valid_ipv4_address(value): + self.value = value + self.value_bin = b''.join([Size.CHAR.pack(int(v)) for v in value.split('.')]) + else: + if len(value) != 4: + raise ValueError('Packed IPv4 addresses should be four bytes, but was: %s' % repr(value)) + + self.value = '.'.join([str(Size.CHAR.unpack(value[i:i + 1])) for i in range(4)]) + self.value_bin = value + elif self.type == AddrType.IPv6: + if stem.util.connection.is_valid_ipv6_address(value): + self.value = stem.util.connection.expand_ipv6_address(value).lower() + self.value_bin = b''.join([Size.SHORT.pack(int(v, 16)) for v in self.value.split(':')]) + else: + if len(value) != 16: + raise ValueError('Packed IPv6 addresses should be sixteen bytes, but was: %s' % repr(value)) + + self.value = ':'.join(['%04x' % Size.SHORT.unpack(value[i * 2:(i + 1) * 2]) for i in range(8)]) + self.value_bin = value + else: + # The spec doesn't really tell us what form to expect errors to be. For + # now just leaving the value unset so we can fill it in later when we + # know what would be most useful. + + self.value = None + self.value_bin = value + + def pack(self): + cell = bytearray() + cell += Size.CHAR.pack(self.type_int) + cell += Size.CHAR.pack(len(self.value_bin)) + cell += self.value_bin + return bytes(cell) + + @staticmethod + def pop(content): + addr_type, content = Size.CHAR.pop(content) + addr_length, content = Size.CHAR.pop(content) + + if len(content) < addr_length: + raise ValueError('Address specified a payload of %i bytes, but only had %i' % (addr_length, len(content))) + + addr_value, content = split(content, addr_length) + + return Address(addr_value, addr_type), content + + def __hash__(self): + return stem.util._hash_attr(self, 'type_int', 'value_bin', cache = True) + + +class Certificate(Field): + """ + Relay certificate as defined in tor-spec section 4.2. + + :var stem.client.CertType type: certificate type + :var int type_int: integer value of the certificate type + :var bytes value: certificate value + """ + + def __init__(self, cert_type, value): + self.type, self.type_int = CertType.get(cert_type) + self.value = value + + def pack(self): + cell = bytearray() + cell += Size.CHAR.pack(self.type_int) + cell += Size.SHORT.pack(len(self.value)) + cell += self.value + return bytes(cell) + + @staticmethod + def pop(content): + cert_type, content = Size.CHAR.pop(content) + cert_size, content = Size.SHORT.pop(content) + + if cert_size > len(content): + raise ValueError('CERTS cell should have a certificate with %i bytes, but only had %i remaining' % (cert_size, len(content))) + + cert_bytes, content = split(content, cert_size) + return Certificate(cert_type, cert_bytes), content + + def __hash__(self): + return stem.util._hash_attr(self, 'type_int', 'value') + + +class KDF(collections.namedtuple('KDF', ['key_hash', 'forward_digest', 'backward_digest', 'forward_key', 'backward_key'])): + """ + Computed KDF-TOR derived values for TAP, CREATE_FAST handshakes, and hidden + service protocols as defined tor-spec section 5.2.1. + + :var bytes key_hash: hash that proves knowledge of our shared key + :var bytes forward_digest: forward digest hash seed + :var bytes backward_digest: backward digest hash seed + :var bytes forward_key: forward encryption key + :var bytes backward_key: backward encryption key + """ + + @staticmethod + def from_value(key_material): + # Derived key material, as per... + # + # K = H(K0 | [00]) | H(K0 | [01]) | H(K0 | [02]) | ... + + derived_key = b'' + counter = 0 + + while len(derived_key) < KEY_LEN * 2 + HASH_LEN * 3: + derived_key += hashlib.sha1(key_material + Size.CHAR.pack(counter)).digest() + counter += 1 + + key_hash, derived_key = split(derived_key, HASH_LEN) + forward_digest, derived_key = split(derived_key, HASH_LEN) + backward_digest, derived_key = split(derived_key, HASH_LEN) + forward_key, derived_key = split(derived_key, KEY_LEN) + backward_key, derived_key = split(derived_key, KEY_LEN) + + return KDF(key_hash, forward_digest, backward_digest, forward_key, backward_key) + + +setattr(Size, 'CHAR', Size('CHAR', 1, '!B')) +setattr(Size, 'SHORT', Size('SHORT', 2, '!H')) +setattr(Size, 'LONG', Size('LONG', 4, '!L')) +setattr(Size, 'LONG_LONG', Size('LONG_LONG', 8, '!Q')) diff --git a/Shared/lib/python3.4/site-packages/stem/connection.py b/Shared/lib/python3.4/site-packages/stem/connection.py index fb85225..16882c7 100644 --- a/Shared/lib/python3.4/site-packages/stem/connection.py +++ b/Shared/lib/python3.4/site-packages/stem/connection.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -198,8 +198,14 @@ CONNECT_MESSAGES = { 'wrong_socket_type': WRONG_SOCKET_TYPE_MSG.strip(), } +COMMON_TOR_COMMANDS = ( + 'tor', + 'tor.real', # TBB command ran + '/usr/local/bin/tor', # FreeBSD expands the whole path, this is the default location +) -def connect(control_port = ('127.0.0.1', 9051), control_socket = '/var/run/tor/control', password = None, password_prompt = False, chroot_path = None, controller = stem.control.Controller): + +def connect(control_port = ('127.0.0.1', 'default'), control_socket = '/var/run/tor/control', password = None, password_prompt = False, chroot_path = None, controller = stem.control.Controller): """ Convenience function for quickly getting a control connection. This is very handy for debugging or CLI setup, handling setup and prompting for a password @@ -214,8 +220,15 @@ def connect(control_port = ('127.0.0.1', 9051), control_socket = '/var/run/tor/c details of how this works. Messages and details of this function's behavior could change in the future. + If the **port** is **'default'** then this checks on both 9051 (default for + relays) and 9151 (default for the Tor Browser). This default may change in + the future. + .. versionadded:: 1.2.0 + .. versionchanged:: 1.5.0 + Use both port 9051 and 9151 by default. + :param tuple contol_port: address and port tuple, for instance **('127.0.0.1', 9051)** :param str path: path where the control socket is located :param str password: passphrase to authenticate to the socket @@ -238,7 +251,7 @@ def connect(control_port = ('127.0.0.1', 9051), control_socket = '/var/run/tor/c raise ValueError('The control_port argument for connect() should be an (address, port) tuple.') elif not stem.util.connection.is_valid_ipv4_address(control_port[0]): raise ValueError("'%s' isn't a vaid IPv4 address" % control_port[0]) - elif not stem.util.connection.is_valid_port(control_port[1]): + elif control_port[1] != 'default' and not stem.util.connection.is_valid_port(control_port[1]): raise ValueError("'%s' isn't a valid port" % control_port[1]) control_connection, error_msg = None, '' @@ -256,19 +269,20 @@ def connect(control_port = ('127.0.0.1', 9051), control_socket = '/var/run/tor/c address, port = control_port try: - control_connection = stem.socket.ControlPort(address, port) + if port == 'default': + control_connection = _connection_for_default_port(address) + else: + control_connection = stem.socket.ControlPort(address, int(port)) except stem.SocketError as exc: error_msg = CONNECT_MESSAGES['unable_to_use_port'].format(address = address, port = port, error = exc) # If unable to connect to either a control socket or port then finally fail # out. If we only attempted to connect to one of them then provide the error # output from that. Otherwise we provide a more generic error message. - # - # We check for a 'tor.real' process name because that's what TBB uses. if not control_connection: if control_socket and control_port: - is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real') + is_tor_running = stem.util.system.is_running(COMMON_TOR_COMMANDS) error_msg = CONNECT_MESSAGES['no_control_port'] if is_tor_running else CONNECT_MESSAGES['tor_isnt_running'] print(error_msg) @@ -361,7 +375,7 @@ def _connect_auth(control_socket, password, password_prompt, chroot_path, contro return controller(control_socket, is_authenticated = True) except IncorrectSocketType: if isinstance(control_socket, stem.socket.ControlPort): - print(CONNECT_MESSAGES['wrong_port_type'].format(port = control_socket.get_port())) + print(CONNECT_MESSAGES['wrong_port_type'].format(port = control_socket.port)) else: print(CONNECT_MESSAGES['wrong_socket_type']) @@ -574,6 +588,9 @@ def authenticate(controller, password = None, chroot_path = None, protocolinfo_r else: authenticate_cookie(controller, cookie_path, False) + if isinstance(controller, stem.control.BaseController): + controller._post_authentication() + return # success! except OpenAuthRejected as exc: auth_exceptions.append(exc) @@ -655,7 +672,7 @@ def authenticate_none(controller, suppress_ctl_errors = True): pass if not suppress_ctl_errors: - raise exc + raise else: raise OpenAuthRejected('Socket failed (%s)' % exc) @@ -725,7 +742,7 @@ def authenticate_password(controller, password, suppress_ctl_errors = True): pass if not suppress_ctl_errors: - raise exc + raise else: raise PasswordAuthRejected('Socket failed (%s)' % exc) @@ -815,7 +832,7 @@ def authenticate_cookie(controller, cookie_path, suppress_ctl_errors = True): pass if not suppress_ctl_errors: - raise exc + raise else: raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, False) @@ -912,7 +929,7 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True) pass if not suppress_ctl_errors: - raise exc + raise else: raise AuthChallengeFailed('Socket failed (%s)' % exc, cookie_path, True) @@ -920,7 +937,7 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True) stem.response.convert('AUTHCHALLENGE', authchallenge_response) except stem.ProtocolError as exc: if not suppress_ctl_errors: - raise exc + raise else: raise AuthChallengeFailed('Unable to parse AUTHCHALLENGE response: %s' % exc, cookie_path) @@ -944,7 +961,7 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True) pass if not suppress_ctl_errors: - raise exc + raise else: raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, True, auth_response) @@ -972,11 +989,6 @@ def get_protocolinfo(controller): the tor process running on it. If the socket is already closed then it is first reconnected. - According to the control spec the cookie_file is an absolute path. However, - this often is not the case (especially for the Tor Browser Bundle). If the - path is relative then we'll make an attempt (which may not work) to correct - this (:trac:`1101`). - This can authenticate to either a :class:`~stem.control.BaseController` or :class:`~stem.socket.ControlSocket`. @@ -1008,27 +1020,6 @@ def get_protocolinfo(controller): raise stem.SocketError(exc) stem.response.convert('PROTOCOLINFO', protocolinfo_response) - - # attempt to expand relative cookie paths - - if protocolinfo_response.cookie_path: - _expand_cookie_path(protocolinfo_response, stem.util.system.pid_by_name, 'tor') - - # attempt to expand relative cookie paths via the control port or socket file - - if isinstance(controller, stem.socket.ControlSocket): - control_socket = controller - else: - control_socket = controller.get_socket() - - if isinstance(control_socket, stem.socket.ControlPort): - if control_socket.get_address() == '127.0.0.1': - pid_method = stem.util.system.pid_by_port - _expand_cookie_path(protocolinfo_response, pid_method, control_socket.get_port()) - elif isinstance(control_socket, stem.socket.ControlSocketFile): - pid_method = stem.util.system.pid_by_open_file - _expand_cookie_path(protocolinfo_response, pid_method, control_socket.get_socket_path()) - return protocolinfo_response @@ -1045,6 +1036,28 @@ def _msg(controller, message): return controller.msg(message) +def _connection_for_default_port(address): + """ + Attempts to provide a controller connection for either port 9051 (default for + relays) or 9151 (default for Tor Browser). If both fail then this raises the + exception for port 9051. + + :param str address: address to connect to + + :returns: :class:`~stem.socket.ControlPort` for the controller conneciton + + :raises: :class:`stem.SocketError` if we're unable to establish a connection + """ + + try: + return stem.socket.ControlPort(address, 9051) + except stem.SocketError as exc: + try: + return stem.socket.ControlPort(address, 9151) + except stem.SocketError: + raise exc + + def _read_cookie(cookie_path, is_safecookie): """ Provides the contents of a given cookie file. @@ -1087,40 +1100,6 @@ def _read_cookie(cookie_path, is_safecookie): raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie) -def _expand_cookie_path(protocolinfo_response, pid_resolver, pid_resolution_arg): - """ - Attempts to expand a relative cookie path with the given pid resolver. This - leaves the cookie_path alone if it's already absolute, **None**, or the - system calls fail. - """ - - cookie_path = protocolinfo_response.cookie_path - if cookie_path and not os.path.isabs(cookie_path): - try: - tor_pid = pid_resolver(pid_resolution_arg) - - if not tor_pid: - raise IOError('pid lookup failed') - - tor_cwd = stem.util.system.cwd(tor_pid) - - if not tor_cwd: - raise IOError('cwd lookup failed') - - cookie_path = stem.util.system.expand_path(cookie_path, tor_cwd) - except IOError as exc: - resolver_labels = { - stem.util.system.pid_by_name: ' by name', - stem.util.system.pid_by_port: ' by port', - stem.util.system.pid_by_open_file: ' by socket file', - } - - pid_resolver_label = resolver_labels.get(pid_resolver, '') - log.debug('unable to expand relative tor cookie path%s: %s' % (pid_resolver_label, exc)) - - protocolinfo_response.cookie_path = cookie_path - - class AuthenticationFailure(Exception): """ Base error for authentication failures. @@ -1265,7 +1244,9 @@ class NoAuthCookie(MissingAuthInfo): super(NoAuthCookie, self).__init__(message) self.is_safecookie = is_safecookie + # authentication exceptions ordered as per the authenticate function's pydocs + AUTHENTICATE_EXCEPTIONS = ( IncorrectSocketType, UnrecognizedAuthMethods, diff --git a/Shared/lib/python3.4/site-packages/stem/control.py b/Shared/lib/python3.4/site-packages/stem/control.py index 657f559..2e89378 100644 --- a/Shared/lib/python3.4/site-packages/stem/control.py +++ b/Shared/lib/python3.4/site-packages/stem/control.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -66,11 +66,14 @@ If you're fine with allowing your script to raise exceptions then this can be mo :: + event_description - brief description of a tor event type + Controller - General controller class intended for direct use | |- from_port - Provides a Controller based on a port connection. | +- from_socket_file - Provides a Controller based on a socket file connection. | |- authenticate - authenticates this controller with tor + |- reconnect - reconnects and authenticates to socket | |- get_info - issues a GETINFO query for a parameter |- get_version - provides our tor version @@ -81,17 +84,19 @@ If you're fine with allowing your script to raise exceptions then this can be mo |- get_protocolinfo - information about the controller interface |- get_user - provides the user tor is running as |- get_pid - provides the pid of our tor process + |- is_user_traffic_allowed - checks if we send or receive direct user traffic | |- get_microdescriptor - querying the microdescriptor for a relay |- get_microdescriptors - provides all currently available microdescriptors |- get_server_descriptor - querying the server descriptor for a relay |- get_server_descriptors - provides all currently available server descriptors |- get_network_status - querying the router status entry for a relay - |- get_network_statuses - provides all preently available router status entries + |- get_network_statuses - provides all presently available router status entries |- get_hidden_service_descriptor - queries the given hidden service descriptor | |- get_conf - gets the value of a configuration option |- get_conf_map - gets the values of multiple configuration options + |- is_set - determines if an option differs from its default |- set_conf - sets the value of a configuration option |- reset_conf - reverts configuration options to their default values |- set_options - sets or resets the values of multiple configuration options @@ -148,8 +153,7 @@ If you're fine with allowing your script to raise exceptions then this can be mo |- get_socket - provides the socket used for control communication |- get_latest_heartbeat - timestamp for when we last heard from tor |- add_status_listener - notifies a callback of changes in our status - |- remove_status_listener - prevents further notification of status changes - +- __enter__ / __exit__ - manages socket connection + +- remove_status_listener - prevents further notification of status changes .. data:: State (enum) @@ -176,6 +180,11 @@ If you're fine with allowing your script to raise exceptions then this can be mo Enums are mapped to :class:`~stem.response.events.Event` subclasses as follows... + .. deprecated:: 1.6.0 + + Tor dropped EventType.AUTHDIR_NEWDESCS as of version 0.3.2.1. + (:spec:`6e887ba`) + ======================= =========== EventType Event Class ======================= =========== @@ -197,6 +206,7 @@ If you're fine with allowing your script to raise exceptions then this can be mo **HS_DESC** :class:`stem.response.events.HSDescEvent` **HS_DESC_CONTENT** :class:`stem.response.events.HSDescContentEvent` **INFO** :class:`stem.response.events.LogEvent` + **NETWORK_LIVENESS** :class:`stem.response.events.NetworkLivenessEvent` **NEWCONSENSUS** :class:`stem.response.events.NewConsensusEvent` **NEWDESC** :class:`stem.response.events.NewDescEvent` **NOTICE** :class:`stem.response.events.LogEvent` @@ -246,11 +256,10 @@ except ImportError: from stem.util.ordereddict import OrderedDict try: + # Added in 3.x import queue - from io import StringIO except ImportError: import Queue as queue - from StringIO import StringIO import stem.descriptor.microdescriptor import stem.descriptor.reader @@ -260,6 +269,8 @@ import stem.exit_policy import stem.response import stem.response.events import stem.socket +import stem.util +import stem.util.conf import stem.util.connection import stem.util.enum import stem.util.str_tools @@ -267,9 +278,16 @@ import stem.util.system import stem.util.tor_tools import stem.version -from stem import UNDEFINED, CircStatus, Signal, str_type +from stem import UNDEFINED, CircStatus, Signal from stem.util import log +# When closing the controller we attempt to finish processing enqueued events, +# but if it takes longer than this we terminate. + +EVENTS_LISTENING_TIMEOUT = 0.1 + +MALFORMED_EVENTS = 'MALFORMED_EVENTS' + # state changes a control socket can have State = stem.util.enum.Enum('INIT', 'RESET', 'CLOSED') @@ -293,6 +311,7 @@ EventType = stem.util.enum.UppercaseEnum( 'HS_DESC', 'HS_DESC_CONTENT', 'INFO', + 'NETWORK_LIVENESS', 'NEWCONSENSUS', 'NEWDESC', 'NOTICE', @@ -319,6 +338,28 @@ Listener = stem.util.enum.UppercaseEnum( 'CONTROL', ) +# torrc options that cannot be changed once tor's running + +IMMUTABLE_CONFIG_OPTIONS = set(map(stem.util.str_tools._to_unicode, map(str.lower, ( + 'AccelDir', + 'AccelName', + 'DataDirectory', + 'DisableAllSwap', + 'DisableDebuggerAttachment', + 'HardwareAccel', + 'HiddenServiceNonAnonymousMode', + 'HiddenServiceSingleHopMode', + 'KeepBindCapabilities', + 'PidFile', + 'RunAsDaemon', + 'Sandbox', + 'SyslogIdentityTag', + 'TokenBucketRefillInterval', + 'User', +)))) + +LOG_CACHE_FETCHES = True # provide trace level logging for cache hits + # Configuration options that are fetched by a special key. The keys are # lowercase to make case insensitive lookups easier. @@ -333,6 +374,7 @@ MAPPED_CONFIG_KEYS = { # unchangeable GETINFO parameters CACHEABLE_GETINFO_PARAMS = ( + 'address', 'version', 'config-file', 'exit-policy/default', @@ -343,6 +385,11 @@ CACHEABLE_GETINFO_PARAMS = ( 'events/names', 'features/names', 'process/descriptor-limit', + 'status/version/current', +) + +CACHEABLE_GETINFO_PARAMS_UNTIL_SETCONF = ( + 'accounting/enabled', ) # GETCONF parameters we shouldn't cache. This includes hidden service @@ -357,33 +404,55 @@ UNCACHEABLE_GETCONF_PARAMS = ( 'hiddenserviceauthorizeclient', ) -# number of sequential attempts before we decide that the Tor geoip database -# is unavailable -GEOIP_FAILURE_THRESHOLD = 5 - SERVER_DESCRIPTORS_UNSUPPORTED = "Tor is currently not configured to retrieve \ server descriptors. As of Tor version 0.2.3.25 it downloads microdescriptors \ instead unless you set 'UseMicrodescriptors 0' in your torrc." -AccountingStats = collections.namedtuple('AccountingStats', [ - 'retrieved', - 'status', - 'interval_end', - 'time_until_reset', - 'read_bytes', - 'read_bytes_left', - 'read_limit', - 'written_bytes', - 'write_bytes_left', - 'write_limit', -]) +EVENT_DESCRIPTIONS = None -CreateHiddenServiceOutput = collections.namedtuple('CreateHiddenServiceOutput', [ - 'path', - 'hostname', - 'hostname_for_client', - 'config', -]) + +class AccountingStats(collections.namedtuple('AccountingStats', ['retrieved', 'status', 'interval_end', 'time_until_reset', 'read_bytes', 'read_bytes_left', 'read_limit', 'written_bytes', 'write_bytes_left', 'write_limit'])): + """ + Accounting information, determining the limits where our relay suspends + itself. + + :var float retrieved: unix timestamp for when this was fetched + :var str status: hibernation status of 'awake', 'soft', or 'hard' + :var datetime interval_end: time when our limits reset + :var int time_until_reset: seconds until our limits reset + :var int read_bytes: number of bytes we've read relaying + :var int read_bytes_left: number of bytes we can read until we suspend + :var int read_limit: reading threshold where we suspend + :var int written_bytes: number of bytes we've written relaying + :var int write_bytes_left: number of bytes we can write until we suspend + :var int write_limit: writing threshold where we suspend + """ + + +class UserTrafficAllowed(collections.namedtuple('UserTrafficAllowed', ['inbound', 'outbound'])): + """ + Indicates if we're likely to be servicing direct user traffic or not. + + :var bool inbound: if **True** we're likely providing guard or bridge connnections + :var bool outbound: if **True** we're likely providng exit connections + """ + + +class CreateHiddenServiceOutput(collections.namedtuple('CreateHiddenServiceOutput', ['path', 'hostname', 'hostname_for_client', 'config'])): + """ + Attributes of a hidden service we've created. + + Both the **hostnames** and **hostname_for_client** attributes can only be + provided if we're able to read the hidden service directory. If the method + was called with **client_names** then we may provide the + **hostname_for_client**, and otherwise can provide the **hostnames**. + + :var str path: hidden service directory + :var str hostname: content of the hostname file if available + :var dict hostname_for_client: mapping of client names to their onion address + if available + :var dict config: tor's new hidden service configuration + """ def with_default(yields = False): @@ -407,11 +476,11 @@ def with_default(yields = False): def wrapped(self, *args, **kwargs): try: return func(self, *args, **kwargs) - except Exception as exc: + except: default = get_default(func, args, kwargs) if default == UNDEFINED: - raise exc + raise else: return default else: @@ -420,11 +489,11 @@ def with_default(yields = False): try: for val in func(self, *args, **kwargs): yield val - except Exception as exc: + except: default = get_default(func, args, kwargs) if default == UNDEFINED: - raise exc + raise else: if default is not None: for val in default: @@ -435,6 +504,32 @@ def with_default(yields = False): return decorator +def event_description(event): + """ + Provides a description for Tor events. + + :param str event: the event for which a description is needed + + :returns: **str** The event description or **None** if this is an event name + we don't have a description for + """ + + global EVENT_DESCRIPTIONS + + if EVENT_DESCRIPTIONS is None: + config = stem.util.conf.Config() + config_path = os.path.join(os.path.dirname(__file__), 'settings.cfg') + + try: + config.load(config_path) + EVENT_DESCRIPTIONS = dict([(key.lower()[18:], config.get_value(key)) for key in config.keys() if key.startswith('event.description.')]) + except Exception as exc: + log.warn("BUG: stem failed to load its internal manual information from '%s': %s" % (config_path, exc)) + return None + + return EVENT_DESCRIPTIONS.get(event.lower()) + + class BaseController(object): """ Controller for the tor process. This is a minimal base class for other @@ -562,27 +657,20 @@ class BaseController(object): if isinstance(response, stem.ControllerError): raise response else: - # I really, really don't like putting hooks into this method, but - # this is the most reliable method I can think of for taking actions - # immediately after successfully authenticating to a connection. - - if message.upper().startswith('AUTHENTICATE'): - self._post_authentication() - return response - except stem.SocketClosed as exc: + except stem.SocketClosed: # If the recv() thread caused the SocketClosed then we could still be # in the process of closing. Calling close() here so that we can # provide an assurance to the caller that when we raise a SocketClosed # exception we are shut down afterward for realz. self.close() - raise exc + raise def is_alive(self): """ Checks if our socket is currently connected. This is a pass-through for our - socket's :func:`~stem.socket.ControlSocket.is_alive` method. + socket's :func:`~stem.socket.BaseSocket.is_alive` method. :returns: **bool** that's **True** if our socket is connected and **False** otherwise """ @@ -622,10 +710,7 @@ class BaseController(object): and **False** otherwise """ - if self.is_alive(): - return self._is_authenticated - - return False + return self._is_authenticated if self.is_alive() else False def connect(self): """ @@ -640,7 +725,7 @@ class BaseController(object): def close(self): """ Closes our socket connection. This is a pass-through for our socket's - :func:`~stem.socket.ControlSocket.close` method. + :func:`~stem.socket.BaseSocket.close` method. """ self._socket.close() @@ -809,10 +894,9 @@ class BaseController(object): for listener, spawn in self._status_listeners: if spawn: - name = '%s notification' % state args = (self, state, change_timestamp) - notice_thread = threading.Thread(target = listener, args = args, name = name) + notice_thread = threading.Thread(target = listener, args = args, name = '%s notification' % state) notice_thread.setDaemon(True) notice_thread.start() self._state_change_threads.append(notice_thread) @@ -830,12 +914,12 @@ class BaseController(object): with self._socket._get_send_lock(): if not self._reader_thread or not self._reader_thread.is_alive(): - self._reader_thread = threading.Thread(target = self._reader_loop, name = 'Tor Listener') + self._reader_thread = threading.Thread(target = self._reader_loop, name = 'Tor listener') self._reader_thread.setDaemon(True) self._reader_thread.start() if not self._event_thread or not self._event_thread.is_alive(): - self._event_thread = threading.Thread(target = self._event_loop, name = 'Event Notifier') + self._event_thread = threading.Thread(target = self._event_loop, name = 'Event notifier') self._event_thread.setDaemon(True) self._event_thread.start() @@ -876,10 +960,20 @@ class BaseController(object): socket. """ + socket_closed_at = None + while True: try: event_message = self._event_queue.get_nowait() self._handle_event(event_message) + + # Attempt to finish processing enqueued events when our controller closes + + if not self.is_alive(): + if not socket_closed_at: + socket_closed_at = time.time() + elif time.time() - socket_closed_at > EVENTS_LISTENING_TIMEOUT: + break except queue.Empty: if not self.is_alive(): break @@ -890,15 +984,22 @@ class BaseController(object): class Controller(BaseController): """ - Communicates with a control socket. This is built on top of the + Connection with Tor's control socket. This is built on top of the BaseController and provides a more user friendly API for library users. """ @staticmethod - def from_port(address = '127.0.0.1', port = 9051): + def from_port(address = '127.0.0.1', port = 'default'): """ Constructs a :class:`~stem.socket.ControlPort` based Controller. + If the **port** is **'default'** then this checks on both 9051 (default + for relays) and 9151 (default for the Tor Browser). This default may change + in the future. + + .. versionchanged:: 1.5.0 + Use both port 9051 and 9151 by default. + :param str address: ip address of the controller :param int port: port number of the controller @@ -907,12 +1008,18 @@ class Controller(BaseController): :raises: :class:`stem.SocketError` if we're unable to establish a connection """ + import stem.connection + if not stem.util.connection.is_valid_ipv4_address(address): raise ValueError('Invalid IP address: %s' % address) - elif not stem.util.connection.is_valid_port(port): + elif port != 'default' and not stem.util.connection.is_valid_port(port): raise ValueError('Invalid port: %s' % port) - control_port = stem.socket.ControlPort(address, port) + if port == 'default': + control_port = stem.connection._connection_for_default_port(address) + else: + control_port = stem.socket.ControlPort(address, port) + return Controller(control_port) @staticmethod @@ -941,11 +1048,11 @@ class Controller(BaseController): self._event_listeners = {} self._event_listeners_lock = threading.RLock() - - # number of sequential 'GETINFO ip-to-country/*' lookups that have failed - - self._geoip_failure_count = 0 self._enabled_features = [] + self._is_geoip_unavailable = None + + self._last_address_exc = None + self._last_fingerprint_exc = None super(Controller, self).__init__(control_socket, is_authenticated) @@ -958,27 +1065,29 @@ class Controller(BaseController): def _confchanged_listener(event): if self.is_caching_enabled(): - self._set_cache(dict((k, None) for k in event.config), 'getconf') + to_cache_changed = dict((k.lower(), v) for k, v in event.changed.items()) + to_cache_unset = dict((k.lower(), []) for k in event.unset) # [] represents None value in cache - if 'exitpolicy' in event.config.keys(): - self._set_cache({'exitpolicy': None}) + to_cache = {} + to_cache.update(to_cache_changed) + to_cache.update(to_cache_unset) + + self._set_cache(to_cache, 'getconf') + + self._confchanged_cache_invalidation(to_cache) self.add_event_listener(_confchanged_listener, EventType.CONF_CHANGED) - def connect(self): - super(Controller, self).connect() - self.clear_cache() + def _address_changed_listener(event): + if event.action in ('EXTERNAL_ADDRESS', 'DNS_USELESS'): + self._set_cache({'exit_policy': None}) + self._set_cache({'address': None}, 'getinfo') + self._last_address_exc = None + + self.add_event_listener(_address_changed_listener, EventType.STATUS_SERVER) def close(self): - # making a best-effort attempt to quit before detaching the socket - if self.is_alive(): - try: - self.msg('QUIT') - except: - pass - - self.clear_cache() - + self.clear_cache() super(Controller, self).close() def authenticate(self, *args, **kwargs): @@ -990,6 +1099,22 @@ class Controller(BaseController): import stem.connection stem.connection.authenticate(self, *args, **kwargs) + def reconnect(self, *args, **kwargs): + """ + Reconnects and authenticates to our control socket. + + .. versionadded:: 1.5.0 + + :raises: + * :class:`stem.SocketError` if unable to re-establish socket + * :class:`stem.connection.AuthenticationFailure` if unable to authenticate + """ + + with self._msg_lock: + self.connect() + self.clear_cache() + self.authenticate(*args, **kwargs) + @with_default() def get_info(self, params, default = UNDEFINED, get_bytes = False): """ @@ -1003,6 +1128,10 @@ class Controller(BaseController): .. versionchanged:: 1.1.0 Added the get_bytes argument. + .. versionchanged:: 1.7.0 + Errors commonly provided a :class:`stem.ProtocolError` when we should + raise a :class:`stem.OperationFailed`. + :param str,list params: GETINFO option or options to be queried :param object default: response if the query fails :param bool get_bytes: provides **bytes** values rather than a **str** under python 3.x @@ -1019,14 +1148,13 @@ class Controller(BaseController): provided a default response * :class:`stem.InvalidArguments` if the 'params' requested was invalid - * :class:`stem.ProtocolError` if the geoip database is known to be - unavailable + * :class:`stem.ProtocolError` if the geoip database is unavailable """ start_time = time.time() reply = {} - if isinstance(params, (bytes, str_type)): + if stem.util._is_str(params): is_multiple = False params = set([params]) else: @@ -1036,6 +1164,14 @@ class Controller(BaseController): is_multiple = True params = set(params) + for param in params: + if param.startswith('ip-to-country/') and param != 'ip-to-country/0.0.0.0' and self.is_geoip_unavailable(): + raise stem.ProtocolError('Tor geoip database is unavailable') + elif param == 'address' and self._last_address_exc: + raise self._last_address_exc # we already know we can't resolve an address + elif param == 'fingerprint' and self._last_fingerprint_exc and self.get_conf('ORPort', None) is None: + raise self._last_fingerprint_exc # we already know we're not a relay + # check for cached results from_cache = [param.lower() for param in params] @@ -1046,15 +1182,10 @@ class Controller(BaseController): reply[user_expected_key] = cached_results[key] params.remove(user_expected_key) - for param in params: - if param.startswith('ip-to-country/') and self.is_geoip_unavailable(): - # the geoip database already looks to be unavailable - abort the request - - raise stem.ProtocolError('Tor geoip database is unavailable') - # if everything was cached then short circuit making the query if not params: - log.trace('GETINFO %s (cache fetch)' % ' '.join(reply.keys())) + if LOG_CACHE_FETCHES: + log.trace('GETINFO %s (cache fetch)' % ' '.join(reply.keys())) if is_multiple: return reply @@ -1079,15 +1210,19 @@ class Controller(BaseController): for key, value in response.entries.items(): key = key.lower() # make case insensitive - if key in CACHEABLE_GETINFO_PARAMS: + if key in CACHEABLE_GETINFO_PARAMS or key in CACHEABLE_GETINFO_PARAMS_UNTIL_SETCONF: to_cache[key] = value elif key.startswith('ip-to-country/'): - # both cache-able and means that we should reset the geoip failure count to_cache[key] = value - self._geoip_failure_count = -1 self._set_cache(to_cache, 'getinfo') + if 'address' in params: + self._last_address_exc = None + + if 'fingerprint' in params: + self._last_fingerprint_exc = None + log.debug('GETINFO %s (runtime: %0.4f)' % (' '.join(params), time.time() - start_time)) if is_multiple: @@ -1095,22 +1230,14 @@ class Controller(BaseController): else: return list(reply.values())[0] except stem.ControllerError as exc: - # bump geoip failure count if... - # * we're caching results - # * this was soley a geoip lookup - # * we've never had a successful geoip lookup (failure count isn't -1) + if 'address' in params: + self._last_address_exc = exc - is_geoip_request = len(params) == 1 and list(params)[0].startswith('ip-to-country/') - - if is_geoip_request and self.is_caching_enabled() and self._geoip_failure_count != -1: - self._geoip_failure_count += 1 - - if self.is_geoip_unavailable(): - log.warn("Tor's geoip database is unavailable.") + if 'fingerprint' in params: + self._last_fingerprint_exc = exc log.debug('GETINFO %s (failed: %s)' % (' '.join(params), exc)) - - raise exc + raise @with_default() def get_version(self, default = UNDEFINED): @@ -1135,7 +1262,8 @@ class Controller(BaseController): version = self._get_cache('version') if not version: - version = stem.version.Version(self.get_info('version')) + version_str = self.get_info('version') + version = stem.version.Version(version_str[4:] if version_str.startswith('Tor ') else version_str) self._set_cache({'version': version}) return version @@ -1145,8 +1273,12 @@ class Controller(BaseController): """ get_exit_policy(default = UNDEFINED) - Effective ExitPolicy for our relay. This accounts for - ExitPolicyRejectPrivate and default policies. + Effective ExitPolicy for our relay. + + .. versionchanged:: 1.7.0 + Policies retrieved through 'GETINFO exit-policy/full' rather than + parsing the user's torrc entries. This should be more reliable for + some edge cases. (:trac:`25739`) :param object default: response if the query fails @@ -1160,24 +1292,46 @@ class Controller(BaseController): An exception is only raised if we weren't provided a default response. """ - with self._msg_lock: - config_policy = self._get_cache('exit_policy') + policy = self._get_cache('exit_policy') - if not config_policy: - policy = [] + if not policy: + try: + policy = stem.exit_policy.ExitPolicy(*self.get_info('exit-policy/full').splitlines()) + self._set_cache({'exit_policy': policy}) + except stem.OperationFailed: + # There's a few situations where 'GETINFO exit-policy/full' will fail, + # most commonly... + # + # * Error 551: Descriptor still rebuilding - not ready yet + # + # Tor hasn't yet finished making our server descriptor. This often + # arises when tor has first started. + # + # * Error 552: Not running in server mode + # + # We're not configured to be a relay (no ORPort), or haven't yet + # been able to determine our externally facing IP address. + # + # When these arise best we can do is infer our policy from the torrc. + # Skipping caching so we'll retry GETINFO policy resolution next time + # we're called. + + rules = [] + + if self.get_conf('ExitRelay') == '0': + rules.append('reject *:*') if self.get_conf('ExitPolicyRejectPrivate') == '1': - policy.append('reject private:*') + rules.append('reject private:*') for policy_line in self.get_conf('ExitPolicy', multiple = True): - policy += policy_line.split(',') + rules += policy_line.split(',') - policy += self.get_info('exit-policy/default').split(',') + rules += self.get_info('exit-policy/default').split(',') - config_policy = stem.exit_policy.get_config_policy(policy, self.get_info('address', None)) - self._set_cache({'exit_policy': config_policy}) + policy = stem.exit_policy.get_config_policy(rules, self.get_info('address', None)) - return config_policy + return policy @with_default() def get_ports(self, listener_type, default = UNDEFINED): @@ -1202,7 +1356,19 @@ class Controller(BaseController): and no default was provided """ - return [port for (addr, port) in self.get_listeners(listener_type) if addr == '127.0.0.1'] + def is_localhost(address): + if stem.util.connection.is_valid_ipv4_address(address): + return address == '0.0.0.0' or address.startswith('127.') + elif stem.util.connection.is_valid_ipv6_address(address): + return stem.util.connection.expand_ipv6_address(address) in ( + '0000:0000:0000:0000:0000:0000:0000:0000', + '0000:0000:0000:0000:0000:0000:0000:0001', + ) + else: + log.info("Request for %s ports got an address that's neither IPv4 or IPv6: %s" % (listener_type, address)) + return False + + return [port for (addr, port) in self.get_listeners(listener_type) if is_localhost(addr)] @with_default() def get_listeners(self, listener_type, default = UNDEFINED): @@ -1216,6 +1382,9 @@ class Controller(BaseController): .. versionadded:: 1.2.0 + .. versionchanged:: 1.5.0 + Recognize listeners with IPv6 addresses. + :param stem.control.Listener listener_type: connection type being handled by the listeners we return :param object default: response if the query fails @@ -1227,72 +1396,85 @@ class Controller(BaseController): and no default was provided """ - proxy_addrs = [] - query = 'net/listeners/%s' % listener_type.lower() + listeners = self._get_cache(listener_type, 'listeners') - try: - for listener in self.get_info(query).split(): - if not (listener.startswith('"') and listener.endswith('"')): - raise stem.ProtocolError("'GETINFO %s' responses are expected to be quoted: %s" % (query, listener)) - elif ':' not in listener: - raise stem.ProtocolError("'GETINFO %s' had a listener without a colon: %s" % (query, listener)) + if listeners is None: + proxy_addrs = [] + query = 'net/listeners/%s' % listener_type.lower() - listener = listener[1:-1] # strip quotes - addr, port = listener.split(':') + try: + for listener in self.get_info(query).split(): + if not (listener.startswith('"') and listener.endswith('"')): + raise stem.ProtocolError("'GETINFO %s' responses are expected to be quoted: %s" % (query, listener)) + elif ':' not in listener: + raise stem.ProtocolError("'GETINFO %s' had a listener without a colon: %s" % (query, listener)) - # Skip unix sockets, for instance... - # - # GETINFO net/listeners/control - # 250-net/listeners/control="unix:/tmp/tor/socket" - # 250 OK + listener = listener[1:-1] # strip quotes + addr, port = listener.rsplit(':', 1) - if addr == 'unix': - continue + # Skip unix sockets, for instance... + # + # GETINFO net/listeners/control + # 250-net/listeners/control="unix:/tmp/tor/socket" + # 250 OK - proxy_addrs.append((addr, port)) - except stem.InvalidArguments: - # Tor version is old (pre-tor-0.2.2.26-beta), use get_conf() instead. - # Some options (like the ORPort) can have optional attributes after the - # actual port number. + if addr == 'unix': + continue - port_option = { - Listener.OR: 'ORPort', - Listener.DIR: 'DirPort', - Listener.SOCKS: 'SocksPort', - Listener.TRANS: 'TransPort', - Listener.NATD: 'NatdPort', - Listener.DNS: 'DNSPort', - Listener.CONTROL: 'ControlPort', - }[listener_type] + if addr.startswith('[') and addr.endswith(']'): + addr = addr[1:-1] # unbracket ipv6 address - listener_option = { - Listener.OR: 'ORListenAddress', - Listener.DIR: 'DirListenAddress', - Listener.SOCKS: 'SocksListenAddress', - Listener.TRANS: 'TransListenAddress', - Listener.NATD: 'NatdListenAddress', - Listener.DNS: 'DNSListenAddress', - Listener.CONTROL: 'ControlListenAddress', - }[listener_type] - - port_value = self.get_conf(port_option).split()[0] - - for listener in self.get_conf(listener_option, multiple = True): - if ':' in listener: - addr, port = listener.split(':') proxy_addrs.append((addr, port)) - else: - proxy_addrs.append((listener, port_value)) + except stem.InvalidArguments: + # Tor version is old (pre-tor-0.2.2.26-beta), use get_conf() instead. + # Some options (like the ORPort) can have optional attributes after the + # actual port number. - # validate that address/ports are valid, and convert ports to ints + port_option = { + Listener.OR: 'ORPort', + Listener.DIR: 'DirPort', + Listener.SOCKS: 'SocksPort', + Listener.TRANS: 'TransPort', + Listener.NATD: 'NatdPort', + Listener.DNS: 'DNSPort', + Listener.CONTROL: 'ControlPort', + }[listener_type] - for addr, port in proxy_addrs: - if not stem.util.connection.is_valid_ipv4_address(addr): - raise stem.ProtocolError('Invalid address for a %s listener: %s' % (listener_type, addr)) - elif not stem.util.connection.is_valid_port(port): - raise stem.ProtocolError('Invalid port for a %s listener: %s' % (listener_type, port)) + listener_option = { + Listener.OR: 'ORListenAddress', + Listener.DIR: 'DirListenAddress', + Listener.SOCKS: 'SocksListenAddress', + Listener.TRANS: 'TransListenAddress', + Listener.NATD: 'NatdListenAddress', + Listener.DNS: 'DNSListenAddress', + Listener.CONTROL: 'ControlListenAddress', + }[listener_type] - return [(addr, int(port)) for (addr, port) in proxy_addrs] + port_value = self.get_conf(port_option).split()[0] + + for listener in self.get_conf(listener_option, multiple = True): + if ':' in listener: + addr, port = listener.rsplit(':', 1) + + if addr.startswith('[') and addr.endswith(']'): + addr = addr[1:-1] # unbracket ipv6 address + + proxy_addrs.append((addr, port)) + else: + proxy_addrs.append((listener, port_value)) + + # validate that address/ports are valid, and convert ports to ints + + for addr, port in proxy_addrs: + if not stem.util.connection.is_valid_ipv4_address(addr) and not stem.util.connection.is_valid_ipv6_address(addr): + raise stem.ProtocolError('Invalid address for a %s listener: %s' % (listener_type, addr)) + elif not stem.util.connection.is_valid_port(port): + raise stem.ProtocolError('Invalid port for a %s listener: %s' % (listener_type, port)) + + listeners = [(addr, int(port)) for (addr, port) in proxy_addrs] + self._set_cache({listener_type: listeners}, 'listeners') + + return listeners @with_default() def get_accounting_stats(self, default = UNDEFINED): @@ -1300,25 +1482,13 @@ class Controller(BaseController): get_accounting_stats(default = UNDEFINED) Provides stats related to our relaying limitations if AccountingMax was set - in our torrc. This provides a **namedtuple** with the following - attributes... - - * retrieved (float) - unix timestamp for when this was fetched - * status (str) - hibernation status of 'awake', 'soft', or 'hard' - * interval_end (datetime) - * time_until_reset (int) - seconds until our limits reset - * read_bytes (int) - * read_bytes_left (int) - * read_limit (int) - * written_bytes (int) - * write_bytes_left (int) - * write_limit (int) + in our torrc. .. versionadded:: 1.3.0 :param object default: response if the query fails - :returns: **namedtuple** with our accounting stats + :returns: :class:`~stem.control.AccountingStats` with our accounting stats :raises: :class:`stem.ControllerError` if unable to determine the listeners and no default was provided @@ -1469,9 +1639,9 @@ class Controller(BaseController): control_socket = self.get_socket() if isinstance(control_socket, stem.socket.ControlPort): - pid = stem.util.system.pid_by_port(control_socket.get_port()) + pid = stem.util.system.pid_by_port(control_socket.port) elif isinstance(control_socket, stem.socket.ControlSocketFile): - pid = stem.util.system.pid_by_open_file(control_socket.get_socket_path()) + pid = stem.util.system.pid_by_open_file(control_socket.path) if pid: self._set_cache({'pid': pid}) @@ -1479,6 +1649,47 @@ class Controller(BaseController): else: raise ValueError("Unable to resolve tor's pid" if self.is_localhost() else "Tor isn't running locally") + def is_user_traffic_allowed(self): + """ + Checks if we're likely to service direct user traffic. This essentially + boils down to... + + * If we're a bridge or guard relay, inbound connections are possibly from + users. + + * If our exit policy allows traffic then output connections are possibly + from users. + + Note the word 'likely'. These is a decent guess in practice, but not always + correct. For instance, information about which flags we have are only + fetched periodically. + + This method is intended to help you avoid eavesdropping on user traffic. + Monitoring user connections is not only unethical, but likely a violation + of wiretapping laws. + + .. versionadded:: 1.5.0 + + :returns: :class:`~stem.cotroller.UserTrafficAllowed` with **inbound** and + **outbound** boolean attributes to indicate if we're likely servicing + direct user traffic + """ + + inbound_allowed, outbound_allowed = False, False + + if self.get_conf('BridgeRelay', None) == '1': + inbound_allowed = True + + if self.get_conf('ORPort', None): + if not inbound_allowed: + consensus_entry = self.get_network_status(default = None) + inbound_allowed = consensus_entry and 'Guard' in consensus_entry.flags + + exit_policy = self.get_exit_policy(None) + outbound_allowed = exit_policy and exit_policy.is_exiting_allowed() + + return UserTrafficAllowed(inbound_allowed, outbound_allowed) + @with_default() def get_microdescriptor(self, relay = None, default = UNDEFINED): """ @@ -1530,7 +1741,7 @@ class Controller(BaseController): if str(exc).startswith('GETINFO request contained unrecognized keywords:'): raise stem.DescriptorUnavailable("Tor was unable to provide the descriptor for '%s'" % relay) else: - raise exc + raise if not desc_content: raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it') @@ -1545,9 +1756,10 @@ class Controller(BaseController): Provides an iterator for all of the microdescriptors that tor currently knows about. - **Tor does not expose this information via the control protocol** - (:trac:`8323`). Until it does this reads the microdescriptors from disk, - and hence won't work remotely or if we lack read permissions. + Prior to Tor 0.3.5.1 this information was not available via the control + protocol. When connected to prior versions we read the microdescriptors + directly from disk instead, which will not work remotely or if our process + lacks read permissions. :param list default: items to provide if the query fails @@ -1559,27 +1771,38 @@ class Controller(BaseController): default was provided """ - try: - data_directory = self.get_conf('DataDirectory') - except stem.ControllerError as exc: - raise stem.OperationFailed(message = 'Unable to determine the data directory (%s)' % exc) + if self.get_version() >= stem.version.Requirement.GETINFO_MICRODESCRIPTORS: + desc_content = self.get_info('md/all', get_bytes = True) - cached_descriptor_path = os.path.join(data_directory, 'cached-microdescs') - - if not os.path.exists(data_directory): - raise stem.OperationFailed(message = "Data directory reported by tor doesn't exist (%s)" % data_directory) - elif not os.path.exists(cached_descriptor_path): - raise stem.OperationFailed(message = "Data directory doens't contain cached microescriptors (%s)" % cached_descriptor_path) - - with stem.descriptor.reader.DescriptorReader([cached_descriptor_path]) as reader: - for desc in reader: - # It shouldn't be possible for these to be something other than - # microdescriptors but as the saying goes: trust but verify. - - if not isinstance(desc, stem.descriptor.microdescriptor.Microdescriptor): - raise stem.OperationFailed(message = 'BUG: Descriptor reader provided non-microdescriptor content (%s)' % type(desc)) + if not desc_content: + raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it') + for desc in stem.descriptor.microdescriptor._parse_file(io.BytesIO(desc_content)): yield desc + else: + # TODO: remove when tor versions that require this are obsolete + + try: + data_directory = self.get_conf('DataDirectory') + except stem.ControllerError as exc: + raise stem.OperationFailed(message = 'Unable to determine the data directory (%s)' % exc) + + cached_descriptor_path = os.path.join(data_directory, 'cached-microdescs') + + if not os.path.exists(data_directory): + raise stem.OperationFailed(message = "Data directory reported by tor doesn't exist (%s)" % data_directory) + elif not os.path.exists(cached_descriptor_path): + raise stem.OperationFailed(message = "Data directory doesn't contain cached microdescriptors (%s)" % cached_descriptor_path) + + with stem.descriptor.reader.DescriptorReader([cached_descriptor_path]) as reader: + for desc in reader: + # It shouldn't be possible for these to be something other than + # microdescriptors but as the saying goes: trust but verify. + + if not isinstance(desc, stem.descriptor.microdescriptor.Microdescriptor): + raise stem.OperationFailed(message = 'BUG: Descriptor reader provided non-microdescriptor content (%s)' % type(desc)) + + yield desc @with_default() def get_server_descriptor(self, relay = None, default = UNDEFINED): @@ -1638,17 +1861,17 @@ class Controller(BaseController): if str(exc).startswith('GETINFO request contained unrecognized keywords:'): raise stem.DescriptorUnavailable("Tor was unable to provide the descriptor for '%s'" % relay) else: - raise exc + raise if not desc_content: raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it') return stem.descriptor.server_descriptor.RelayDescriptor(desc_content) - except Exception as exc: + except: if not self._is_server_descriptors_available(): raise ValueError(SERVER_DESCRIPTORS_UNSUPPORTED) - raise exc + raise @with_default(yields = True) def get_server_descriptors(self, default = UNDEFINED): @@ -1694,6 +1917,11 @@ class Controller(BaseController): Checks to see if tor server descriptors should be available or not. """ + # TODO: Replace with a 'GETINFO desc/download-enabled' request when they're + # widely available... + # + # https://gitweb.torproject.org/torspec.git/commit/?id=378699c + return self.get_version() < stem.version.Requirement.MICRODESCRIPTOR_IS_DEFAULT or \ self.get_conf('UseMicrodescriptors', None) == '0' @@ -1706,17 +1934,6 @@ class Controller(BaseController): or nickname. If the relay identifier could be either a fingerprint *or* nickname then it's queried as a fingerprint. - This provides - :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` - instances if tor is using microdescriptors... - - :: - - controller.get_conf('UseMicrodescriptors', '0') == '1' - - ... and :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` - otherwise. - If no **relay** is provided then this defaults to ourselves. Remember that this requires that we've retrieved our own descriptor from remote authorities so this both won't be available for newly started relays and @@ -1728,7 +1945,7 @@ class Controller(BaseController): :param str relay: fingerprint or nickname of the relay to be queried :param object default: response if the query fails - :returns: :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` + :returns: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the given relay :raises: @@ -1760,15 +1977,12 @@ class Controller(BaseController): if str(exc).startswith('GETINFO request contained unrecognized keywords:'): raise stem.DescriptorUnavailable("Tor was unable to provide the descriptor for '%s'" % relay) else: - raise exc + raise if not desc_content: raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it') - if self.get_conf('UseMicrodescriptors', '0') == '1': - return stem.descriptor.router_status_entry.RouterStatusEntryMicroV3(desc_content) - else: - return stem.descriptor.router_status_entry.RouterStatusEntryV3(desc_content) + return stem.descriptor.router_status_entry.RouterStatusEntryV3(desc_content) @with_default(yields = True) def get_network_statuses(self, default = UNDEFINED): @@ -1778,21 +1992,10 @@ class Controller(BaseController): Provides an iterator for all of the router status entries that tor currently knows about. - This provides - :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` - instances if tor is using microdescriptors... - - :: - - controller.get_conf('UseMicrodescriptors', '0') == '1' - - ... and :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` - otherwise. - :param list default: items to provide if the query fails :returns: iterates over - :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` for + :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for relays in the tor network :raises: :class:`stem.ControllerError` if unable to query tor and no @@ -1804,11 +2007,6 @@ class Controller(BaseController): # # https://trac.torproject.org/8248 - if self.get_conf('UseMicrodescriptors', '0') == '1': - desc_class = stem.descriptor.router_status_entry.RouterStatusEntryMicroV3 - else: - desc_class = stem.descriptor.router_status_entry.RouterStatusEntryV3 - desc_content = self.get_info('ns/all', get_bytes = True) if not desc_content: @@ -1816,15 +2014,15 @@ class Controller(BaseController): desc_iterator = stem.descriptor.router_status_entry._parse_file( io.BytesIO(desc_content), - True, - entry_class = desc_class, + False, + entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3, ) for desc in desc_iterator: yield desc @with_default() - def get_hidden_service_descriptor(self, address, default = UNDEFINED, servers = None, await_result = True): + def get_hidden_service_descriptor(self, address, default = UNDEFINED, servers = None, await_result = True, timeout = None): """ get_hidden_service_descriptor(address, default = UNDEFINED, servers = None, await_result = True) @@ -1835,11 +2033,17 @@ class Controller(BaseController): If **await_result** is **True** then this blocks until we either receive the descriptor or the request fails. If **False** this returns right away. + **This method only supports v2 hidden services, not v3.** (:trac:`25417`) + .. versionadded:: 1.4.0 + .. versionchanged:: 1.7.0 + Added the timeout argument. + :param str address: address of the hidden service descriptor, the '.onion' suffix is optional :param object default: response if the query fails :param list servers: requrest the descriptor from these specific servers + :param float timeout: seconds to wait when **await_result** is **True** :returns: :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor` for the given service if **await_result** is **True**, or **None** otherwise @@ -1847,6 +2051,7 @@ class Controller(BaseController): :raises: * :class:`stem.DescriptorUnavailable` if **await_result** is **True** and unable to provide a descriptor for the given service + * :class:`stem.Timeout` if **timeout** was reached * :class:`stem.ControllerError` if unable to query the descriptor * **ValueError** if **address** doesn't conform with the pattern of a hidden service address @@ -1865,6 +2070,7 @@ class Controller(BaseController): hs_desc_queue, hs_desc_listener = queue.Queue(), None hs_desc_content_queue, hs_desc_content_listener = queue.Queue(), None + start_time = time.time() if await_result: def hs_desc_listener(event): @@ -1880,7 +2086,7 @@ class Controller(BaseController): request = 'HSFETCH %s' % address if servers: - request += ' '.join(['SERVER=%s' % s for s in servers]) + request += ' ' + ' '.join(['SERVER=%s' % s for s in servers]) response = self.msg(request) stem.response.convert('SINGLELINE', response) @@ -1892,7 +2098,7 @@ class Controller(BaseController): return None # not waiting, so nothing to provide back else: while True: - event = hs_desc_content_queue.get() + event = _get_with_timeout(hs_desc_content_queue, timeout, start_time) if event.address == address: if event.descriptor: @@ -1901,7 +2107,7 @@ class Controller(BaseController): # no descriptor, looking through HS_DESC to figure out why while True: - event = hs_desc_queue.get() + event = _get_with_timeout(hs_desc_queue, timeout, start_time) if event.address == address and event.action == stem.HSDescAction.FAILED: if event.reason == stem.HSDescReason.NOT_FOUND: @@ -1917,6 +2123,8 @@ class Controller(BaseController): def get_conf(self, param, default = UNDEFINED, multiple = False): """ + get_conf(param, default = UNDEFINED, multiple = False) + Queries the current value for a configuration option. Some configuration options (like the ExitPolicy) can have multiple values. This provides a **list** with all of the values if **multiple** is **True**. Otherwise this @@ -1964,6 +2172,8 @@ class Controller(BaseController): def get_conf_map(self, params, default = UNDEFINED, multiple = True): """ + get_conf_map(params, default = UNDEFINED, multiple = True) + Similar to :func:`~stem.control.Controller.get_conf` but queries multiple configuration options, providing back a mapping of those options to their values. @@ -1983,6 +2193,9 @@ class Controller(BaseController): **HiddenServiceOptions** was the only option that falls into the third category. + **Note:** HiddenServiceOptions are best retrieved via the + :func:`~stem.control.Controller.get_hidden_service_conf` method instead. + :param str,list params: configuration option(s) to be queried :param object default: value for the mappings if the configuration option is either undefined or the query fails @@ -2008,7 +2221,7 @@ class Controller(BaseController): start_time = time.time() reply = {} - if isinstance(params, (bytes, str_type)): + if stem.util._is_str(params): params = [params] # remove strings which contain only whitespace @@ -2032,7 +2245,9 @@ class Controller(BaseController): # if everything was cached then short circuit making the query if not lookup_params: - log.trace('GETCONF %s (cache fetch)' % ' '.join(reply.keys())) + if LOG_CACHE_FETCHES: + log.trace('GETCONF %s (cache fetch)' % ' '.join(reply.keys())) + return self._get_conf_dict_to_response(reply, default, multiple) try: @@ -2043,10 +2258,6 @@ class Controller(BaseController): if self.is_caching_enabled(): to_cache = dict((k.lower(), v) for k, v in response.entries.items()) - for key in UNCACHEABLE_GETCONF_PARAMS: - if key in to_cache: - del to_cache[key] - self._set_cache(to_cache, 'getconf') # Maps the entries back to the parameters that the user requested so the @@ -2075,7 +2286,7 @@ class Controller(BaseController): if default != UNDEFINED: return dict((param, default) for param in params) else: - raise exc + raise def _get_conf_dict_to_response(self, config_dict, default, multiple): """ @@ -2098,6 +2309,54 @@ class Controller(BaseController): return return_dict + @with_default() + def is_set(self, param, default = UNDEFINED): + """ + is_set(param, default = UNDEFINED) + + Checks if a configuration option differs from its default or not. + + .. versionadded:: 1.5.0 + + :param str param: configuration option to check + :param object default: response if the query fails + + :returns: **True** if option differs from its default and **False** + otherwise + + :raises: :class:`stem.ControllerError` if the call fails and we weren't + provided a default response + """ + + return param in self._get_custom_options() + + def _get_custom_options(self): + result = self._get_cache('get_custom_options') + + if not result: + config_lines = self.get_info('config-text').splitlines() + + # Tor provides some config options even if they haven't been set... + # + # https://trac.torproject.org/projects/tor/ticket/2362 + # https://trac.torproject.org/projects/tor/ticket/17909 + + default_lines = ( + 'Log notice stdout', + 'Log notice file /var/log/tor/log', + 'DataDirectory /home/%s/.tor' % self.get_user('undefined'), + 'HiddenServiceStatistics 0', + ) + + for line in default_lines: + if line in config_lines: + config_lines.remove(line) + + result = dict([line.split(' ', 1) for line in config_lines]) + self._set_cache({'get_custom_options': result}) + + return result + def set_conf(self, param, value): """ Changes the value of a tor configuration option. Our value can be any of @@ -2180,10 +2439,12 @@ class Controller(BaseController): for param, value in params: if isinstance(value, str): query_comp.append('%s="%s"' % (param, value.strip())) - elif value: + elif isinstance(value, collections.Iterable): query_comp.extend(['%s="%s"' % (param, val.strip()) for val in value]) - else: + elif not value: query_comp.append(param) + else: + raise ValueError('Cannot set %s to %s since the value was a %s but we only accept strings' % (param, value, type(value).__name__)) query = ' '.join(query_comp) response = self.msg(query) @@ -2193,22 +2454,16 @@ class Controller(BaseController): log.debug('%s (runtime: %0.4f)' % (query, time.time() - start_time)) if self.is_caching_enabled(): - to_cache = {} - - for param, value in params: - param = param.lower() - - if isinstance(value, (bytes, str_type)): - value = [value] - - to_cache[param] = value - - if param == 'exitpolicy': - self._set_cache({'exitpolicy': None}) - + # clear cache for params; the CONF_CHANGED event will set cache for changes + to_cache = dict((k.lower(), None) for k, v in params) self._set_cache(to_cache, 'getconf') + self._confchanged_cache_invalidation(dict(params)) else: log.debug('%s (failed, code: %s, message: %s)' % (query, response.code, response.message)) + immutable_params = [k for k, v in params if stem.util.str_tools._to_unicode(k).lower() in IMMUTABLE_CONFIG_OPTIONS] + + if immutable_params: + raise stem.InvalidArguments(message = "%s cannot be changed while tor's running" % ', '.join(sorted(immutable_params)), arguments = immutable_params) if response.code == '552': if response.message.startswith("Unrecognized option: Unknown option '"): @@ -2256,6 +2511,14 @@ class Controller(BaseController): provided a default response """ + service_dir_map = self._get_cache('hidden_service_conf') + + if service_dir_map is not None: + if LOG_CACHE_FETCHES: + log.trace('GETCONF HiddenServiceOptions (cache fetch)') + + return service_dir_map + start_time = time.time() try: @@ -2265,7 +2528,7 @@ class Controller(BaseController): (time.time() - start_time)) except stem.ControllerError as exc: log.debug('GETCONF HiddenServiceOptions (failed: %s)' % exc) - raise exc + raise service_dir_map = OrderedDict() directory = None @@ -2292,7 +2555,7 @@ class Controller(BaseController): if target.isdigit(): target_port = target else: - target_address, target_port = target.split(':') + target_address, target_port = target.rsplit(':', 1) if not stem.util.connection.is_valid_port(port): raise stem.ProtocolError('GETCONF provided an invalid HiddenServicePort port (%s): %s' % (port, content)) @@ -2305,6 +2568,7 @@ class Controller(BaseController): else: service_dir_map[directory][k] = v + self._set_cache({'hidden_service_conf': service_dir_map}) return service_dir_map def set_hidden_service_conf(self, conf): @@ -2375,26 +2639,14 @@ class Controller(BaseController): def create_hidden_service(self, path, port, target_address = None, target_port = None, auth_type = None, client_names = None): """ Create a new hidden service. If the directory is already present, a - new port is added. This provides a **namedtuple** of the following... - - * path (str) - hidden service directory - - * hostname (str) - Content of the hostname file, if no **client_names** - are provided this is the onion address of the service. This is only - retrieved if we can read the hidden service directory. - - * hostname_for_client (dict) - mapping of client names to their onion - address, this is only set if the **client_names** was provided and we - can read the hidden service directory - - * config (dict) - tor's new hidden service configuration + new port is added. Our *.onion address is fetched by reading the hidden service directory. However, this directory is only readable by the tor user, so if unavailable the **hostname** will be **None**. - **As of Tor 0.2.7.1 there's two ways for creating hidden services. This is - no longer the recommended method.** Rather, try using + **As of Tor 0.2.7.1 there's two ways for creating hidden services, and this + method is no longer recommended.** Rather, try using :func:`~stem.control.Controller.create_ephemeral_hidden_service` instead. .. versionadded:: 1.3.0 @@ -2410,7 +2662,8 @@ class Controller(BaseController): :param str auth_type: authentication type: basic, stealth or None to disable auth :param list client_names: client names (1-16 characters "A-Za-z0-9+-_") - :returns: **CreateHiddenServiceOutput** if we create or update a hidden service, **None** otherwise + :returns: :class:`~stem.cotroller.CreateHiddenServiceOutput` if we create + or update a hidden service, **None** otherwise :raises: :class:`stem.ControllerError` if the call fails """ @@ -2439,6 +2692,17 @@ class Controller(BaseController): hsac = "%s %s" % (auth_type, ','.join(client_names)) conf[path]['HiddenServiceAuthorizeClient'] = hsac + # Tor 0.3.5 changes its default for HS creation from v2 to v3. This is + # fine, but there's a couple options that are incompatible with v3. If + # creating a service with one of those we should explicitly create a v2 + # service instead. + # + # https://trac.torproject.org/projects/tor/ticket/27446 + + for path in conf: + if 'HiddenServiceAuthorizeClient' in conf[path] or 'RendPostPeriod' in conf[path]: + conf[path]['HiddenServiceVersion'] = '2' + self.set_hidden_service_conf(conf) hostname, hostname_for_client = None, {} @@ -2541,6 +2805,10 @@ class Controller(BaseController): .. versionadded:: 1.4.0 + .. versionchanged:: 1.6.0 + Tor change caused this to start providing empty strings if unset + (:trac:`21329`). + :param object default: response if the query fails :param bool our_services: include services created with this controller that weren't flagged as 'detached' @@ -2563,19 +2831,24 @@ class Controller(BaseController): try: result += self.get_info('onions/current').split('\n') except stem.ProtocolError as exc: + # TODO: Tor's behavior around this was changed in Feb 2017, we should + # drop it when all versions that did this are deprecated... + # + # https://trac.torproject.org/projects/tor/ticket/21329 + if 'No onion services of the specified type.' not in str(exc): - raise exc + raise if detached: try: result += self.get_info('onions/detached').split('\n') except stem.ProtocolError as exc: if 'No onion services of the specified type.' not in str(exc): - raise exc + raise - return result + return [r for r in result if r] # drop any empty responses (GETINFO is blank if unset) - def create_ephemeral_hidden_service(self, ports, key_type = 'NEW', key_content = 'BEST', discard_key = False, detached = False, await_publication = False): + def create_ephemeral_hidden_service(self, ports, key_type = 'NEW', key_content = 'BEST', discard_key = False, detached = False, await_publication = False, timeout = None, basic_auth = None, max_streams = None): """ Creates a new hidden service. Unlike :func:`~stem.control.Controller.create_hidden_service` this style of @@ -2600,30 +2873,88 @@ class Controller(BaseController): create_ephemeral_hidden_service({80: 80, 443: '173.194.33.133:443'}) + If **basic_auth** is provided this service will require basic + authentication to access. This means users must set HidServAuth in their + torrc with credentials to access it. + + **basic_auth** is a mapping of usernames to their credentials. If the + credential is **None** one is generated and returned as part of the + response. For instance, only bob can access using the given newly generated + credentials... + + :: + + >>> response = controller.create_ephemeral_hidden_service(80, basic_auth = {'bob': None}) + >>> print(response.client_auth) + {'bob': 'nKwfvVPmTNr2k2pG0pzV4g'} + + ... while both alice and bob can access with existing credentials in the + following... + + :: + + controller.create_ephemeral_hidden_service(80, basic_auth = { + 'alice': 'l4BT016McqV2Oail+Bwe6w', + 'bob': 'vGnNRpWYiMBFTWD2gbBlcA', + }) + + To create a **version 3** service simply specify **ED25519-V3** as the + our key type, and to create a **version 2** service use **RSA1024**. The + default version of newly created hidden services is based on the + **HiddenServiceVersion** value in your torrc... + + :: + + response = controller.create_ephemeral_hidden_service( + 80, + key_content = 'ED25519-V3', + await_publication = True, + ) + + print('service established at %s.onion' % response.service_id) + .. versionadded:: 1.4.0 + .. versionchanged:: 1.5.0 + Added the basic_auth argument. + + .. versionchanged:: 1.5.0 + Added support for non-anonymous services. To do so set + 'HiddenServiceSingleHopMode 1' and 'HiddenServiceNonAnonymousMode 1' in + your torrc. + + .. versionchanged:: 1.7.0 + Added the timeout and max_streams arguments. + :param int,list,dict ports: hidden service port(s) or mapping of hidden service ports to their targets :param str key_type: type of key being provided, generates a new key if - 'NEW' (options are: **NEW** and **RSA1024**) + 'NEW' (options are: **NEW**, **RSA1024**, and **ED25519-V3**) :param str key_content: key for the service to use or type of key to be - generated (options when **key_type** is **NEW** are **BEST** and - **RSA1024**) + generated (options when **key_type** is **NEW** are **BEST**, + **RSA1024**, and **ED25519-V3**) :param bool discard_key: avoid providing the key back in our response :param bool detached: continue this hidden service even after this control connection is closed if **True** :param bool await_publication: blocks until our descriptor is successfully published if **True** + :param float timeout: seconds to wait when **await_result** is **True** + :param dict basic_auth: required user credentials to access this service + :param int max_streams: maximum number of streams the hidden service will + accept, unlimited if zero or not set :returns: :class:`~stem.response.add_onion.AddOnionResponse` with the response - :raises: :class:`stem.ControllerError` if the call fails + :raises: + * :class:`stem.ControllerError` if the call fails + * :class:`stem.Timeout` if **timeout** was reached """ if self.get_version() < stem.version.Requirement.ADD_ONION: raise stem.UnsatisfiableRequest(message = 'Ephemeral hidden services were added in tor version %s' % stem.version.Requirement.ADD_ONION) hs_desc_queue, hs_desc_listener = queue.Queue(), None + start_time = time.time() if await_publication: def hs_desc_listener(event): @@ -2641,9 +2972,28 @@ class Controller(BaseController): if detached: flags.append('Detach') + if basic_auth is not None: + if self.get_version() < stem.version.Requirement.ADD_ONION_BASIC_AUTH: + raise stem.UnsatisfiableRequest(message = 'Basic authentication support was added to ADD_ONION in tor version %s' % stem.version.Requirement.ADD_ONION_BASIC_AUTH) + + flags.append('BasicAuth') + + if max_streams is not None: + if self.get_version() < stem.version.Requirement.ADD_ONION_MAX_STREAMS: + raise stem.UnsatisfiableRequest(message = 'Limitation of the maximum number of streams to accept was added to ADD_ONION in tor version %s' % stem.version.Requirement.ADD_ONION_MAX_STREAMS) + + flags.append('MaxStreamsCloseCircuit') + + if self.get_version() >= stem.version.Requirement.ADD_ONION_NON_ANONYMOUS: + if self.get_conf('HiddenServiceSingleHopMode', None) == '1' and self.get_conf('HiddenServiceNonAnonymousMode', None) == '1': + flags.append('NonAnonymous') + if flags: request += ' Flags=%s' % ','.join(flags) + if max_streams is not None: + request += ' MaxStreams=%s' % max_streams + if isinstance(ports, int): request += ' Port=%s' % ports elif isinstance(ports, list): @@ -2655,6 +3005,13 @@ class Controller(BaseController): else: raise ValueError("The 'ports' argument of create_ephemeral_hidden_service() needs to be an int, list, or dict") + if basic_auth is not None: + for client_name, client_blob in basic_auth.items(): + if client_blob: + request += ' ClientAuth=%s:%s' % (client_name, client_blob) + else: + request += ' ClientAuth=%s' % client_name + response = self.msg(request) stem.response.convert('ADD_ONION', response) @@ -2669,7 +3026,7 @@ class Controller(BaseController): try: while True: - event = hs_desc_queue.get() + event = _get_with_timeout(hs_desc_queue, timeout, start_time) if event.action == stem.HSDescAction.UPLOAD and event.address == response.service_id: directories_uploaded_to.append(event.directory_fingerprint) @@ -2736,6 +3093,13 @@ class Controller(BaseController): If a new control connection is initialized then this listener will be reattached. + If tor emits a malformed event it can be received by listening for the + stem.control.MALFORMED_EVENTS constant. + + .. versionchanged:: 1.7.0 + Listener exceptions and malformed events no longer break further event + processing. Added the **MALFORMED_EVENTS** constant. + :param functor listener: function to be called when an event is received :param stem.control.EventType events: event types to be listened for @@ -2800,7 +3164,12 @@ class Controller(BaseController): :returns: cached value corresponding to key or **None** if the key wasn't found """ - return self._get_cache_map([param], namespace).get(param, None) + with self._cache_lock: + if not self.is_caching_enabled(): + return None + + cache_key = '%s.%s' % (namespace, param) if namespace else param + return self._request_cache.get(cache_key, None) def _get_cache_map(self, params, namespace = None): """ @@ -2817,10 +3186,7 @@ class Controller(BaseController): if self.is_caching_enabled(): for param in params: - if namespace: - cache_key = '%s.%s' % (namespace, param) - else: - cache_key = param + cache_key = '%s.%s' % (namespace, param) if namespace else param if cache_key in self._request_cache: cached_values[param] = self._request_cache[cache_key] @@ -2840,6 +3206,23 @@ class Controller(BaseController): if not self.is_caching_enabled(): return + # if params is None then clear the namespace + + if params is None and namespace: + for cache_key in list(self._request_cache.keys()): + if cache_key.startswith('%s.' % namespace): + del self._request_cache[cache_key] + + return + + # remove uncacheable items + if namespace == 'getconf': + # shallow copy before edit so as not to change it for the caller + params = params.copy() + for key in UNCACHEABLE_GETCONF_PARAMS: + if key in params: + del params[key] + for key, value in list(params.items()): if namespace: cache_key = '%s.%s' % (namespace, key) @@ -2847,11 +3230,35 @@ class Controller(BaseController): cache_key = key if value is None: - if cache_key in self._request_cache: + if cache_key in list(self._request_cache.keys()): del self._request_cache[cache_key] else: self._request_cache[cache_key] = value + def _confchanged_cache_invalidation(self, params): + """ + Drops dependent portions of the cache when configuration changes. + + :param dict params: **dict** of 'config_key => value' pairs for configs + that changed. The entries' values are currently unused. + """ + + with self._cache_lock: + if not self.is_caching_enabled(): + return + + if any('hidden' in param.lower() for param in params.keys()): + self._set_cache({'hidden_service_conf': None}) + + # reset any getinfo parameters that can be changed by a SETCONF + + self._set_cache(dict([(k.lower(), None) for k in CACHEABLE_GETINFO_PARAMS_UNTIL_SETCONF]), 'getinfo') + self._set_cache(None, 'listeners') + + self._set_cache({'get_custom_options': None}) + + self._set_cache({'exit_policy': None}) # numerous options can change our policy + def is_caching_enabled(self): """ **True** if caching has been enabled, **False** otherwise. @@ -2881,7 +3288,7 @@ class Controller(BaseController): with self._cache_lock: self._request_cache = {} self._last_newnym = 0.0 - self._geoip_failure_count = 0 + self._is_geoip_unavailable = None def load_conf(self, configtext): """ @@ -2903,17 +3310,26 @@ class Controller(BaseController): elif not response.is_ok(): raise stem.ProtocolError('+LOADCONF Received unexpected response\n%s' % str(response)) - def save_conf(self): + def save_conf(self, force = False): """ Saves the current configuration options into the active torrc file. + .. versionchanged:: 1.6.0 + Added the force argument. + + :param bool force: overwrite the configuration even if it includes a + '%include' clause, this is ignored if tor doesn't support it + :raises: * :class:`stem.ControllerError` if the call fails * :class:`stem.OperationFailed` if the client is unable to save the configuration file """ - response = self.msg('SAVECONF') + if self.get_version() < stem.version.Requirement.SAVECONF_FORCE: + force = False + + response = self.msg('SAVECONF FORCE' if force else 'SAVECONF') stem.response.convert('SINGLELINE', response) if response.is_ok(): @@ -2973,7 +3389,7 @@ class Controller(BaseController): * :class:`stem.InvalidArguments` if features passed were invalid """ - if isinstance(features, (bytes, str_type)): + if stem.util._is_str(features): features = [features] response = self.msg('USEFEATURE %s' % ' '.join(features)) @@ -3035,29 +3451,35 @@ class Controller(BaseController): response = self.get_info('circuit-status') for circ in response.splitlines(): - circ_message = stem.socket.recv_message(StringIO('650 CIRC ' + circ + '\r\n')) + circ_message = stem.socket.recv_message(io.BytesIO(stem.util.str_tools._to_bytes('650 CIRC %s\r\n' % circ))) stem.response.convert('EVENT', circ_message, arrived_at = 0) circuits.append(circ_message) return circuits - def new_circuit(self, path = None, purpose = 'general', await_build = False): + def new_circuit(self, path = None, purpose = 'general', await_build = False, timeout = None): """ Requests a new circuit. If the path isn't provided, one is automatically selected. + .. versionchanged:: 1.7.0 + Added the timeout argument. + :param list,str path: one or more relays to make a circuit through :param str purpose: 'general' or 'controller' :param bool await_build: blocks until the circuit is built if **True** + :param float timeout: seconds to wait when **await_build** is **True** :returns: str of the circuit id of the newly created circuit - :raises: :class:`stem.ControllerError` if the call fails + :raises: + * :class:`stem.ControllerError` if the call fails + * :class:`stem.Timeout` if **timeout** was reached """ - return self.extend_circuit('0', path, purpose, await_build) + return self.extend_circuit('0', path, purpose, await_build, timeout) - def extend_circuit(self, circuit_id = '0', path = None, purpose = 'general', await_build = False): + def extend_circuit(self, circuit_id = '0', path = None, purpose = 'general', await_build = False, timeout = None): """ Either requests the creation of a new circuit or extends an existing one. @@ -3077,11 +3499,15 @@ class Controller(BaseController): 20 EXTENDED $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$649F2D0ACF418F7CFC6539AB2257EB2D5297BAFA=Eskimo BUILD_FLAGS=NEED_CAPACITY PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:51:11.433755 19 BUILT $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$30BAB8EE7606CBD12F3CC269AE976E0153E7A58D=Pascal1,$2765D8A8C4BBA3F89585A9FFE0E8575615880BEB=Anthracite PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:50:56.969938 + .. versionchanged:: 1.7.0 + Added the timeout argument. + :param str circuit_id: id of a circuit to be extended :param list,str path: one or more relays to make a circuit through, this is required if the circuit id is non-zero :param str purpose: 'general' or 'controller' :param bool await_build: blocks until the circuit is built if **True** + :param float timeout: seconds to wait when **await_build** is **True** :returns: str of the circuit id of the created or extended circuit @@ -3089,6 +3515,7 @@ class Controller(BaseController): * :class:`stem.InvalidRequest` if one of the parameters were invalid * :class:`stem.CircuitExtensionFailed` if we were waiting for the circuit to build but it failed + * :class:`stem.Timeout` if **timeout** was reached * :class:`stem.ControllerError` if the call fails """ @@ -3097,6 +3524,7 @@ class Controller(BaseController): # we then can't get the failure if it can't be created. circ_queue, circ_listener = queue.Queue(), None + start_time = time.time() if await_build: def circ_listener(event): @@ -3116,7 +3544,7 @@ class Controller(BaseController): args = [circuit_id] - if isinstance(path, (bytes, str_type)): + if stem.util._is_str(path): path = [path] if path: @@ -3140,7 +3568,7 @@ class Controller(BaseController): if await_build: while True: - circ = circ_queue.get() + circ = _get_with_timeout(circ_queue, timeout, start_time) if circ.id == new_circuit: if circ.status == CircStatus.BUILT: @@ -3218,7 +3646,7 @@ class Controller(BaseController): response = self.get_info('stream-status') for stream in response.splitlines(): - message = stem.socket.recv_message(StringIO('650 STREAM ' + stream + '\r\n')) + message = stem.socket.recv_message(io.BytesIO(stem.util.str_tools._to_bytes('650 STREAM %s\r\n' % stream))) stem.response.convert('EVENT', message, arrived_at = 0) streams.append(message) @@ -3294,7 +3722,9 @@ class Controller(BaseController): :param stem.Signal signal: type of signal to be sent - :raises: :class:`stem.InvalidArguments` if signal provided wasn't recognized + :raises: + * :class:`stem.ControllerError` if sending the signal failed + * :class:`stem.InvalidArguments` if signal provided wasn't recognized """ response = self.msg('SIGNAL %s' % signal) @@ -3380,18 +3810,34 @@ class Controller(BaseController): def is_geoip_unavailable(self): """ - Provides **True** if we've concluded hat our geoip database is unavailable, - **False** otherwise. This is determined by having our 'GETINFO - ip-to-country/\*' lookups fail so this will default to **False** if we - aren't making those queries. + Provides **True** if tor's geoip database is unavailable, **False** + otherwise. - Geoip failures will be untracked if caching is disabled. + .. versionchanged:: 1.6.0 + No longer requires previously failed GETINFO requests to determine this. - :returns: **bool** to indicate if we've concluded our geoip database to be - unavailable or not + .. deprecated:: 1.6.0 + This is available as of Tor 0.3.2.1 through the following instead... + + :: + + controller.get_info('ip-to-country/ipv4-available', 0) == '1' + + :returns: **bool** indicating if we've determined tor's geoip database to + be unavailable or not """ - return self._geoip_failure_count >= GEOIP_FAILURE_THRESHOLD + if self._is_geoip_unavailable is None: + try: + self.get_info('ip-to-country/0.0.0.0') + self._is_geoip_unavailable = False + except stem.ControllerError as exc: + if 'GeoIP data not loaded' in str(exc): + self._is_geoip_unavailable = True + else: + return False # unexpected issue, fail open and don't cache + + return self._is_geoip_unavailable def map_address(self, mapping): """ @@ -3471,13 +3917,21 @@ class Controller(BaseController): log.warn('We were unable assert ownership of tor through TAKEOWNERSHIP, despite being configured to be the owning process through __OwningControllerProcess. (%s)' % response) def _handle_event(self, event_message): - stem.response.convert('EVENT', event_message, arrived_at = time.time()) + try: + stem.response.convert('EVENT', event_message, arrived_at = time.time()) + event_type = event_message.type + except stem.ProtocolError as exc: + log.error('Tor sent a malformed event (%s): %s' % (exc, event_message)) + event_type = MALFORMED_EVENTS with self._event_listeners_lock: - for event_type, event_listeners in list(self._event_listeners.items()): - if event_type == event_message.type: + for listener_type, event_listeners in list(self._event_listeners.items()): + if listener_type == event_type: for listener in event_listeners: - listener(event_message) + try: + listener(event_message) + except Exception as exc: + log.warn('Event listener raised an uncaught exception (%s): %s' % (exc, event_message)) def _attach_listeners(self): """ @@ -3629,3 +4083,22 @@ def _case_insensitive_lookup(entries, key, default = UNDEFINED): return entry raise ValueError("key '%s' doesn't exist in dict: %s" % (key, entries)) + + +def _get_with_timeout(event_queue, timeout, start_time): + """ + Pulls an item from a queue with a given timeout. + """ + + if timeout: + time_left = timeout - (time.time() - start_time) + + if time_left <= 0: + raise stem.Timeout('Reached our %0.1f second timeout' % timeout) + + try: + return event_queue.get(True, time_left) + except queue.Empty: + raise stem.Timeout('Reached our %0.1f second timeout' % timeout) + else: + return event_queue.get() diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py b/Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py index 1ebe578..a986014 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -9,6 +9,8 @@ Package for parsing and processing descriptor data. :: parse_file - Parses the descriptors in a file. + create - Creates a new custom descriptor. + create_signing_key - Cretes a signing key that can be used for creating descriptors. Descriptor - Common parent for all descriptor file types. |- get_path - location of the descriptor on disk if it came from a file @@ -27,6 +29,24 @@ Package for parsing and processing descriptor data. and upfront runtime. However, if read time and memory aren't a concern then **DOCUMENT** can provide you with a fully populated document. + Handlers don't change the fact that most methods that provide + descriptors return an iterator. In the case of **DOCUMENT** and + **BARE_DOCUMENT** that iterator would have just a single item - + the document itself. + + Simple way to handle this is to call **next()** to get the iterator's one and + only value... + + :: + + import stem.descriptor.remote + from stem.descriptor import DocumentHandler + + consensus = next(stem.descriptor.remote.get_consensus( + document_handler = DocumentHandler.BARE_DOCUMENT, + ) + + =================== =========== DocumentHandler Description =================== =========== @@ -36,6 +56,29 @@ Package for parsing and processing descriptor data. =================== =========== """ +import base64 +import codecs +import collections +import copy +import hashlib +import os +import random +import re +import string +import tarfile + +import stem.prereq +import stem.util +import stem.util.enum +import stem.util.str_tools +import stem.util.system + +try: + # added in python 2.7 + from collections import OrderedDict +except ImportError: + from stem.util.ordereddict import OrderedDict + __all__ = [ 'export', 'reader', @@ -50,26 +93,12 @@ __all__ = [ 'Descriptor', ] -import base64 -import codecs -import copy -import hashlib -import os -import re -import tarfile +UNSEEKABLE_MSG = """\ +File object isn't seekable. Try wrapping it with a BytesIO instead... -import stem.prereq -import stem.util.enum -import stem.util.str_tools -import stem.util.system - -from stem import str_type - -try: - # added in python 2.7 - from collections import OrderedDict -except ImportError: - from stem.util.ordereddict import OrderedDict + content = my_file.read() + parsed_descriptors = stem.descriptor.parse_file(io.BytesIO(content)) +""" KEYWORD_CHAR = 'a-zA-Z0-9-' WHITESPACE = ' \t' @@ -77,6 +106,17 @@ KEYWORD_LINE = re.compile('^([%s]+)(?:[%s]+(.*))?$' % (KEYWORD_CHAR, WHITESPACE) SPECIFIC_KEYWORD_LINE = '^(%%s)(?:[%s]+(.*))?$' % WHITESPACE PGP_BLOCK_START = re.compile('^-----BEGIN ([%s%s]+)-----$' % (KEYWORD_CHAR, WHITESPACE)) PGP_BLOCK_END = '-----END %s-----' +EMPTY_COLLECTION = ([], {}, set()) + +DIGEST_TYPE_INFO = b'\x00\x01' +DIGEST_PADDING = b'\xFF' +DIGEST_SEPARATOR = b'\x00' + +CRYPTO_BLOB = """ +MIGJAoGBAJv5IIWQ+WDWYUdyA/0L8qbIkEVH/cwryZWoIaPAzINfrw1WfNZGtBmg +skFtXhOHHqTRN4GPPrZsAIUOQGzQtGb66IQgT4tO/pj+P6QmSCCdTfhvGfgTCsC+ +WPi4Fl2qryzTb3QO5r5x7T8OsG2IBUET1bLQzmtbC560SYR49IvVAgMBAAE= +""" DocumentHandler = stem.util.enum.UppercaseEnum( 'ENTRIES', @@ -85,7 +125,19 @@ DocumentHandler = stem.util.enum.UppercaseEnum( ) -def parse_file(descriptor_file, descriptor_type = None, validate = False, document_handler = DocumentHandler.ENTRIES, **kwargs): +class SigningKey(collections.namedtuple('SigningKey', ['private', 'public', 'public_digest'])): + """ + Key used by relays to sign their server and extrainfo descriptors. + + .. versionadded:: 1.6.0 + + :var cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private: private key + :var cryptography.hazmat.backends.openssl.rsa._RSAPublicKey public: public key + :var bytes public_digest: block that can be used for the a server descrptor's 'signing-key' field + """ + + +def parse_file(descriptor_file, descriptor_type = None, validate = False, document_handler = DocumentHandler.ENTRIES, normalize_newlines = None, **kwargs): """ Simple function to read the descriptor contents from a file, providing an iterator for its :class:`~stem.descriptor.__init__.Descriptor` contents. @@ -94,7 +146,7 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume tries to determine the descriptor type based on the following... * The @type annotation on the first line. These are generally only found in - the `CollecTor archives `_. + the `CollecTor archives `_. * The filename if it matches something from tor's data directory. For instance, tor's 'cached-descriptors' contains server descriptors. @@ -138,11 +190,13 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume my_descriptor_file = open(descriptor_path, 'rb') :param str,file,tarfile descriptor_file: path or opened file with the descriptor contents - :param str descriptor_type: `descriptor type `_, this is guessed if not provided + :param str descriptor_type: `descriptor type `_, this is guessed if not provided :param bool validate: checks the validity of the descriptor's content if **True**, skips these checks otherwise :param stem.descriptor.__init__.DocumentHandler document_handler: method in which to parse the :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` + :param bool normalize_newlines: converts windows newlines (CRLF), this is the + default when reading data directories on windows :param dict kwargs: additional arguments for the descriptor constructor :returns: iterator for :class:`~stem.descriptor.__init__.Descriptor` instances in the file @@ -157,7 +211,7 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume handler = None - if isinstance(descriptor_file, (bytes, str_type)): + if stem.util._is_str(descriptor_file): if stem.util.system.is_tarfile(descriptor_file): handler = _parse_file_for_tar_path else: @@ -171,6 +225,16 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume return + # Not all files are seekable. If unseekable then advising the user. + # + # Python 3.x adds an io.seekable() method, but not an option with python 2.x + # so using an experimental call to tell() to determine this. + + try: + descriptor_file.tell() + except IOError: + raise IOError(UNSEEKABLE_MSG) + # The tor descriptor specifications do not provide a reliable method for # identifying a descriptor file's type and version so we need to guess # based on its filename. Metrics descriptors, however, can be identified @@ -186,47 +250,48 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume descriptor_path = getattr(descriptor_file, 'name', None) filename = '' if descriptor_path is None else os.path.basename(descriptor_file.name) - file_parser = None - if descriptor_type is not None: - descriptor_type_match = re.match('^(\S+) (\d+).(\d+)$', descriptor_type) + def parse(descriptor_file): + if normalize_newlines: + descriptor_file = NewlineNormalizer(descriptor_file) - if descriptor_type_match: - desc_type, major_version, minor_version = descriptor_type_match.groups() - file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs) + if descriptor_type is not None: + descriptor_type_match = re.match('^(\S+) (\d+).(\d+)$', descriptor_type) + + if descriptor_type_match: + desc_type, major_version, minor_version = descriptor_type_match.groups() + return _parse_metrics_file(desc_type, int(major_version), int(minor_version), descriptor_file, validate, document_handler, **kwargs) + else: + raise ValueError("The descriptor_type must be of the form ' .'") + elif metrics_header_match: + # Metrics descriptor handling + + desc_type, major_version, minor_version = metrics_header_match.groups() + return _parse_metrics_file(desc_type, int(major_version), int(minor_version), descriptor_file, validate, document_handler, **kwargs) else: - raise ValueError("The descriptor_type must be of the form ' .'") - elif metrics_header_match: - # Metrics descriptor handling + # Cached descriptor handling. These contain multiple descriptors per file. - desc_type, major_version, minor_version = metrics_header_match.groups() - file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs) - else: - # Cached descriptor handling. These contain multiple descriptors per file. + if normalize_newlines is None and stem.util.system.is_windows(): + descriptor_file = NewlineNormalizer(descriptor_file) - if filename == 'cached-descriptors' or filename == 'cached-descriptors.new': - file_parser = lambda f: stem.descriptor.server_descriptor._parse_file(f, validate = validate, **kwargs) - elif filename == 'cached-extrainfo' or filename == 'cached-extrainfo.new': - file_parser = lambda f: stem.descriptor.extrainfo_descriptor._parse_file(f, validate = validate, **kwargs) - elif filename == 'cached-microdescs' or filename == 'cached-microdescs.new': - file_parser = lambda f: stem.descriptor.microdescriptor._parse_file(f, validate = validate, **kwargs) - elif filename == 'cached-consensus': - file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, validate = validate, document_handler = document_handler, **kwargs) - elif filename == 'cached-microdesc-consensus': - file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs) + if filename == 'cached-descriptors' or filename == 'cached-descriptors.new': + return stem.descriptor.server_descriptor._parse_file(descriptor_file, validate = validate, **kwargs) + elif filename == 'cached-extrainfo' or filename == 'cached-extrainfo.new': + return stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, validate = validate, **kwargs) + elif filename == 'cached-microdescs' or filename == 'cached-microdescs.new': + return stem.descriptor.microdescriptor._parse_file(descriptor_file, validate = validate, **kwargs) + elif filename == 'cached-consensus': + return stem.descriptor.networkstatus._parse_file(descriptor_file, validate = validate, document_handler = document_handler, **kwargs) + elif filename == 'cached-microdesc-consensus': + return stem.descriptor.networkstatus._parse_file(descriptor_file, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs) + else: + raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line)) - if file_parser: - for desc in file_parser(descriptor_file): - if descriptor_path is not None: - desc._set_path(os.path.abspath(descriptor_path)) + for desc in parse(descriptor_file): + if descriptor_path is not None: + desc._set_path(os.path.abspath(descriptor_path)) - yield desc - - return - - # Not recognized as a descriptor file. - - raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line)) + yield desc def _parse_file_for_path(descriptor_file, *args, **kwargs): @@ -253,6 +318,9 @@ def _parse_file_for_tarfile(descriptor_file, *args, **kwargs): if tar_entry.isfile(): entry = descriptor_file.extractfile(tar_entry) + if tar_entry.size == 0: + continue + try: for desc in parse_file(entry, *args, **kwargs): desc._set_archive_path(entry.name) @@ -320,6 +388,78 @@ def _parse_metrics_file(descriptor_type, major_version, minor_version, descripto raise TypeError("Unrecognized metrics descriptor format. type: '%s', version: '%i.%i'" % (descriptor_type, major_version, minor_version)) +def _descriptor_content(attr = None, exclude = (), header_template = (), footer_template = ()): + """ + Constructs a minimal descriptor with the given attributes. The content we + provide back is of the form... + + * header_template (with matching attr filled in) + * unused attr entries + * footer_template (with matching attr filled in) + + So for instance... + + :: + + _descriptor_content( + attr = {'nickname': 'caerSidi', 'contact': 'atagar'}, + header_template = ( + ('nickname', 'foobar'), + ('fingerprint', '12345'), + ), + ) + + ... would result in... + + :: + + nickname caerSidi + fingerprint 12345 + contact atagar + + :param dict attr: keyword/value mappings to be included in the descriptor + :param list exclude: mandatory keywords to exclude from the descriptor + :param tuple header_template: key/value pairs for mandatory fields before unrecognized content + :param tuple footer_template: key/value pairs for mandatory fields after unrecognized content + + :returns: bytes with the requested descriptor content + """ + + header_content, footer_content = [], [] + attr = {} if attr is None else OrderedDict(attr) # shallow copy since we're destructive + + for content, template in ((header_content, header_template), + (footer_content, footer_template)): + for keyword, value in template: + if keyword in exclude: + continue + + value = stem.util.str_tools._to_unicode(attr.pop(keyword, value)) + + if value is None: + continue + elif isinstance(value, (tuple, list)): + for v in value: + content.append('%s %s' % (keyword, v)) + elif value == '': + content.append(keyword) + elif value.startswith('\n'): + # some values like crypto follow the line instead + content.append('%s%s' % (keyword, value)) + else: + content.append('%s %s' % (keyword, value)) + + remainder = [] + + for k, v in attr.items(): + if isinstance(v, (tuple, list)): + remainder += ['%s %s' % (k, entry) for entry in v] + else: + remainder.append('%s %s' % (k, v)) + + return stem.util.str_tools._to_bytes('\n'.join(header_content + remainder + footer_content)) + + def _value(line, entries): return entries[line][0][0] @@ -328,13 +468,18 @@ def _values(line, entries): return [entry[0] for entry in entries[line]] -def _parse_simple_line(keyword, attribute): +def _parse_simple_line(keyword, attribute, func = None): def _parse(descriptor, entries): - setattr(descriptor, attribute, _value(keyword, entries)) + value = _value(keyword, entries) + setattr(descriptor, attribute, func(value) if func else value) return _parse +def _parse_if_present(keyword, attribute): + return lambda descriptor, entries: setattr(descriptor, attribute, keyword in entries) + + def _parse_bytes_line(keyword, attribute): def _parse(descriptor, entries): line_match = re.search(stem.util.str_tools._to_bytes('^(opt )?%s(?:[%s]+(.*))?$' % (keyword, WHITESPACE)), descriptor.get_bytes(), re.MULTILINE) @@ -377,6 +522,37 @@ def _parse_forty_character_hex(keyword, attribute): return _parse +def _parse_protocol_line(keyword, attribute): + def _parse(descriptor, entries): + # parses 'protocol' entries like: Cons=1-2 Desc=1-2 DirCache=1 HSDir=1 + + value = _value(keyword, entries) + protocols = OrderedDict() + + for k, v in _mappings_for(keyword, value): + versions = [] + + if not v: + continue + + for entry in v.split(','): + if '-' in entry: + min_value, max_value = entry.split('-', 1) + else: + min_value = max_value = entry + + if not min_value.isdigit() or not max_value.isdigit(): + raise ValueError('Protocol values should be a number or number range, but was: %s %s' % (keyword, value)) + + versions += range(int(min_value), int(max_value) + 1) + + protocols[k] = versions + + setattr(descriptor, attribute, protocols) + + return _parse + + def _parse_key_block(keyword, attribute, expected_block_type, value_attribute = None): def _parse(descriptor, entries): value, block_type, block_contents = entries[keyword][0] @@ -392,6 +568,48 @@ def _parse_key_block(keyword, attribute, expected_block_type, value_attribute = return _parse +def _mappings_for(keyword, value, require_value = False, divider = ' '): + """ + Parses an attribute as a series of 'key=value' mappings. Unlike _parse_* + functions this is a helper, returning the attribute value rather than setting + a descriptor field. This way parsers can perform additional validations. + + :param str keyword: descriptor field being parsed + :param str value: 'attribute => values' mappings to parse + :param str divider: separator between the key/value mappings + :param bool require_value: validates that values are not empty + + :returns: **generator** with the key/value of the map attribute + + :raises: **ValueError** if descriptor content is invalid + """ + + if value is None: + return # no descripoter value to process + elif value == '': + return # descriptor field was present, but blank + + for entry in value.split(divider): + if '=' not in entry: + raise ValueError("'%s' should be a series of 'key=value' pairs but was: %s" % (keyword, value)) + + k, v = entry.split('=', 1) + + if require_value and not v: + raise ValueError("'%s' line's %s mapping had a blank value: %s" % (keyword, k, value)) + + yield k, v + + +def _copy(default): + if default is None or isinstance(default, (bool, stem.exit_policy.ExitPolicy)): + return default # immutable + elif default in EMPTY_COLLECTION: + return type(default)() # collection construction tad faster than copy + else: + return copy.copy(default) + + class Descriptor(object): """ Common parent for all types of descriptors. @@ -408,6 +626,55 @@ class Descriptor(object): self._entries = {} self._unrecognized_lines = [] + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + """ + Creates descriptor content with the given attributes. Mandatory fields are + filled with dummy information unless data is supplied. This doesn't yet + create a valid signature. + + .. versionadded:: 1.6.0 + + :param dict attr: keyword/value mappings to be included in the descriptor + :param list exclude: mandatory keywords to exclude from the descriptor, this + results in an invalid descriptor + :param bool sign: includes cryptographic signatures and digests if True + + :returns: **str** with the content of a descriptor + + :raises: + * **ImportError** if cryptography is unavailable and sign is True + * **NotImplementedError** if not implemented for this descriptor type + """ + + raise NotImplementedError("The create and content methods haven't been implemented for %s" % cls.__name__) + + @classmethod + def create(cls, attr = None, exclude = (), validate = True, sign = False): + """ + Creates a descriptor with the given attributes. Mandatory fields are filled + with dummy information unless data is supplied. This doesn't yet create a + valid signature. + + .. versionadded:: 1.6.0 + + :param dict attr: keyword/value mappings to be included in the descriptor + :param list exclude: mandatory keywords to exclude from the descriptor, this + results in an invalid descriptor + :param bool validate: checks the validity of the descriptor's content if + **True**, skips these checks otherwise + :param bool sign: includes cryptographic signatures and digests if True + + :returns: :class:`~stem.descriptor.Descriptor` subclass + + :raises: + * **ValueError** if the contents is malformed and validate is True + * **ImportError** if cryptography is unavailable and sign is True + * **NotImplementedError** if not implemented for this descriptor type + """ + + return cls(cls.content(attr, exclude, sign), validate = validate) + def get_path(self): """ Provides the absolute path that we loaded this descriptor from. @@ -471,12 +738,6 @@ class Descriptor(object): if parser_for_line is None: parser_for_line = self.PARSER_FOR_LINE - # set defaults - - for attr in self.ATTRIBUTES: - if not hasattr(self, attr): - setattr(self, attr, copy.copy(self.ATTRIBUTES[attr][0])) - for keyword, values in list(entries.items()): try: if keyword in parser_for_line: @@ -489,9 +750,9 @@ class Descriptor(object): line += '\n%s' % block_contents self._unrecognized_lines.append(line) - except ValueError as exc: + except ValueError: if validate: - raise exc + raise def _set_path(self, path): self._path = path @@ -515,28 +776,25 @@ class Descriptor(object): """ if not stem.prereq.is_crypto_available(): - raise ValueError('Generating the signed digest requires pycrypto') + raise ValueError('Generating the signed digest requires the cryptography module') - from Crypto.Util import asn1 - from Crypto.Util.number import bytes_to_long, long_to_bytes + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives.serialization import load_der_public_key + from cryptography.utils import int_to_bytes, int_from_bytes - # get the ASN.1 sequence - - seq = asn1.DerSequence() - seq.decode(_bytes_for_block(signing_key)) - modulus, public_exponent = seq[0], seq[1] + key = load_der_public_key(_bytes_for_block(signing_key), default_backend()) + modulus = key.public_numbers().n + public_exponent = key.public_numbers().e sig_as_bytes = _bytes_for_block(signature) - sig_as_long = bytes_to_long(sig_as_bytes) # convert signature to an int - blocksize = 128 # block size will always be 128 for a 1024 bit key + sig_as_long = int_from_bytes(sig_as_bytes, byteorder='big') # convert signature to an int + blocksize = len(sig_as_bytes) # 256B for NetworkStatusDocuments, 128B for others # use the public exponent[e] & the modulus[n] to decrypt the int - decrypted_int = pow(sig_as_long, public_exponent, modulus) # convert the int to a byte array - - decrypted_bytes = long_to_bytes(decrypted_int, blocksize) + decrypted_bytes = int_to_bytes(decrypted_int, blocksize) ############################################################################ # The decrypted bytes should have a structure exactly along these lines. @@ -551,7 +809,7 @@ class Descriptor(object): ############################################################################ try: - if decrypted_bytes.index(b'\x00\x01') != 0: + if decrypted_bytes.index(DIGEST_TYPE_INFO) != 0: raise ValueError('Verification failed, identifier missing') except ValueError: raise ValueError('Verification failed, malformed data') @@ -560,7 +818,7 @@ class Descriptor(object): identifier_offset = 2 # find the separator - seperator_index = decrypted_bytes.index(b'\x00', identifier_offset) + seperator_index = decrypted_bytes.index(DIGEST_SEPARATOR, identifier_offset) except ValueError: raise ValueError('Verification failed, seperator not found') @@ -594,19 +852,38 @@ class Descriptor(object): return stem.util.str_tools._to_unicode(digest_hash.hexdigest().upper()) def __getattr__(self, name): - # If attribute isn't already present we might be lazy loading it... + # We can't use standard hasattr() since it calls this function, recursing. + # Doing so works since it stops recursing after several dozen iterations + # (not sure why), but horrible in terms of performance. - if self._lazy_loading and name in self.ATTRIBUTES: + def has_attr(attr): + try: + super(Descriptor, self).__getattribute__(attr) + return True + except: + return False + + # If an attribute we should have isn't present it means either... + # + # a. we still need to lazy load this + # b. we read the whole descriptor but it wasn't present, so needs the default + + if name in self.ATTRIBUTES and not has_attr(name): default, parsing_function = self.ATTRIBUTES[name] - try: - parsing_function(self, self._entries) - except (ValueError, KeyError): + if self._lazy_loading: try: - # despite having a validation failure check to see if we set something - return super(Descriptor, self).__getattribute__(name) - except AttributeError: - setattr(self, name, copy.copy(default)) + parsing_function(self, self._entries) + except (ValueError, KeyError): + # Set defaults for anything the parsing function should've covered. + # Despite having a validation failure some attributes might be set in + # which case we keep them. + + for attr_name, (attr_default, attr_parser) in self.ATTRIBUTES.items(): + if parsing_function == attr_parser and not has_attr(attr_name): + setattr(self, attr_name, _copy(attr_default)) + else: + setattr(self, name, _copy(default)) return super(Descriptor, self).__getattribute__(name) @@ -617,6 +894,31 @@ class Descriptor(object): return self._raw_contents +class NewlineNormalizer(object): + """ + File wrapper that normalizes CRLF line endings. + """ + + def __init__(self, wrapped_file): + self._wrapped_file = wrapped_file + self.name = getattr(wrapped_file, 'name', None) + + def read(self, *args): + return self._wrapped_file.read(*args).replace(b'\r\n', b'\n') + + def readline(self, *args): + return self._wrapped_file.readline(*args).replace(b'\r\n', b'\n') + + def readlines(self, *args): + return [line.rstrip(b'\r') for line in self._wrapped_file.readlines(*args)] + + def seek(self, *args): + return self._wrapped_file.seek(*args) + + def tell(self, *args): + return self._wrapped_file.tell(*args) + + def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_first = False, skip = False, end_position = None, include_ending_keyword = False): """ Reads from the descriptor file until we get to one of the given keywords or reach the @@ -636,23 +938,17 @@ def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_fi **True** """ - if skip: - content = None - content_append = lambda x: None - else: - content = [] - content_append = content.append - + content = None if skip else [] ending_keyword = None - if isinstance(keywords, (bytes, str_type)): + if stem.util._is_str(keywords): keywords = (keywords,) if ignore_first: first_line = descriptor_file.readline() - if first_line: - content_append(first_line) + if first_line and content is not None: + content.append(first_line) keyword_match = re.compile(SPECIFIC_KEYWORD_LINE % '|'.join(keywords)) @@ -674,12 +970,12 @@ def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_fi if not inclusive: descriptor_file.seek(last_position) - else: - content_append(line) + elif content is not None: + content.append(line) break - else: - content_append(line) + elif content is not None: + content.append(line) if include_ending_keyword: return (content, ending_keyword) @@ -741,7 +1037,109 @@ def _get_pseudo_pgp_block(remaining_contents): return None -def _get_descriptor_components(raw_contents, validate, extra_keywords = ()): +def create_signing_key(private_key = None): + """ + Serializes a signing key if we have one. Otherwise this creates a new signing + key we can use to create descriptors. + + .. versionadded:: 1.6.0 + + :param cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private_key: private key + + :returns: :class:`~stem.descriptor.__init__.SigningKey` that can be used to + create descriptors + + :raises: **ImportError** if the cryptography module is unavailable + """ + + if not stem.prereq.is_crypto_available(): + raise ImportError('Signing requires the cryptography module') + + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import serialization + from cryptography.hazmat.primitives.asymmetric import rsa + + if private_key is None: + private_key = rsa.generate_private_key( + public_exponent = 65537, + key_size = 1024, + backend = default_backend(), + ) + + # When signing the cryptography module includes a constant indicating + # the hash algorithm used. Tor doesn't. This causes signature + # validation failures and unfortunately cryptography have no nice way + # of excluding these so we need to mock out part of their internals... + # + # https://github.com/pyca/cryptography/issues/3713 + + def no_op(*args, **kwargs): + return 1 + + private_key._backend._lib.EVP_PKEY_CTX_set_signature_md = no_op + private_key._backend.openssl_assert = no_op + + public_key = private_key.public_key() + public_digest = b'\n' + public_key.public_bytes( + encoding = serialization.Encoding.PEM, + format = serialization.PublicFormat.PKCS1, + ).strip() + + return SigningKey(private_key, public_key, public_digest) + + +def _append_router_signature(content, private_key): + """ + Appends a router signature to a server or extrainfo descriptor. + + :param bytes content: descriptor content up through 'router-signature\\n' + :param cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private_key: + private relay signing key + + :returns: **bytes** with the signed descriptor content + """ + + if not stem.prereq.is_crypto_available(): + raise ImportError('Signing requires the cryptography module') + + from cryptography.hazmat.primitives import hashes + from cryptography.hazmat.primitives.asymmetric import padding + + signature = base64.b64encode(private_key.sign(content, padding.PKCS1v15(), hashes.SHA1())) + return content + b'\n'.join([b'-----BEGIN SIGNATURE-----'] + stem.util.str_tools._split_by_length(signature, 64) + [b'-----END SIGNATURE-----\n']) + + +def _random_nickname(): + return ('Unnamed%i' % random.randint(0, 100000000000000))[:19] + + +def _random_fingerprint(): + return ('%040x' % random.randrange(16 ** 40)).upper() + + +def _random_ipv4_address(): + return '%i.%i.%i.%i' % (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) + + +def _random_date(): + return '%i-%02i-%02i %02i:%02i:%02i' % (random.randint(2000, 2015), random.randint(1, 12), random.randint(1, 20), random.randint(0, 23), random.randint(0, 59), random.randint(0, 59)) + + +def _random_crypto_blob(block_type = None): + """ + Provides a random string that can be used for crypto blocks. + """ + + random_base64 = stem.util.str_tools._to_unicode(base64.b64encode(os.urandom(140))) + crypto_blob = '\n'.join(stem.util.str_tools._split_by_length(random_base64, 64)) + + if block_type: + return '\n-----BEGIN %s-----\n%s\n-----END %s-----' % (block_type, crypto_blob, block_type) + else: + return crypto_blob + + +def _descriptor_components(raw_contents, validate, extra_keywords = (), non_ascii_fields = ()): """ Initial breakup of the server descriptor contents to make parsing easier. @@ -760,6 +1158,7 @@ def _get_descriptor_components(raw_contents, validate, extra_keywords = ()): True, skips these checks otherwise :param list extra_keywords: entity keywords to put into a separate listing with ordering intact + :param list non_ascii_fields: fields containing non-ascii content :returns: **collections.OrderedDict** with the 'keyword => (value, pgp key) entries' @@ -815,11 +1214,18 @@ def _get_descriptor_components(raw_contents, validate, extra_keywords = ()): block_type, block_contents = block_attr else: block_type, block_contents = None, None - except ValueError as exc: + except ValueError: if not validate: continue - raise exc + raise + + if validate and keyword not in non_ascii_fields: + try: + value.encode('ascii') + except UnicodeError: + replaced = ''.join([(char if char in string.printable else '?') for char in value]) + raise ValueError("'%s' line had non-ascii content: %s" % (keyword, replaced)) if keyword in extra_keywords: extra_entries.append('%s %s' % (keyword, value)) @@ -831,6 +1237,7 @@ def _get_descriptor_components(raw_contents, validate, extra_keywords = ()): else: return entries + # importing at the end to avoid circular dependencies on our Descriptor class import stem.descriptor.server_descriptor diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/certificate.py b/Shared/lib/python3.4/site-packages/stem/descriptor/certificate.py new file mode 100644 index 0000000..0c4796a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/certificate.py @@ -0,0 +1,271 @@ +# Copyright 2017-2018, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Parsing for `Tor Ed25519 certificates +`_, which are +used to validate the key used to sign server descriptors. + +.. versionadded:: 1.6.0 + +**Module Overview:** + +:: + + Ed25519Certificate - Ed25519 signing key certificate + | +- Ed25519CertificateV1 - version 1 Ed25519 certificate + | |- is_expired - checks if certificate is presently expired + | +- validate - validates signature of a server descriptor + | + +- parse - reads base64 encoded certificate data + + Ed25519Extension - extension included within an Ed25519Certificate + +.. data:: CertType (enum) + + Purpose of Ed25519 certificate. As new certificate versions are added this + enumeration will expand. + + ============== =========== + CertType Description + ============== =========== + **SIGNING** signing a signing key with an identity key + **LINK_CERT** TLS link certificate signed with ed25519 signing key + **AUTH** authentication key signed with ed25519 signing key + ============== =========== + +.. data:: ExtensionType (enum) + + Recognized exception types. + + ==================== =========== + ExtensionType Description + ==================== =========== + **HAS_SIGNING_KEY** includes key used to sign the certificate + ==================== =========== + +.. data:: ExtensionFlag (enum) + + Flags that can be assigned to Ed25519 certificate extensions. + + ====================== =========== + ExtensionFlag Description + ====================== =========== + **AFFECTS_VALIDATION** extension affects whether the certificate is valid + **UNKNOWN** extension includes flags not yet recognized by stem + ====================== =========== +""" + +import base64 +import binascii +import collections +import datetime +import hashlib + +import stem.prereq +import stem.util.enum +import stem.util.str_tools + +ED25519_HEADER_LENGTH = 40 +ED25519_SIGNATURE_LENGTH = 64 +ED25519_ROUTER_SIGNATURE_PREFIX = b'Tor router descriptor signature v1' + +CertType = stem.util.enum.UppercaseEnum('SIGNING', 'LINK_CERT', 'AUTH') +ExtensionType = stem.util.enum.Enum(('HAS_SIGNING_KEY', 4),) +ExtensionFlag = stem.util.enum.UppercaseEnum('AFFECTS_VALIDATION', 'UNKNOWN') + + +class Ed25519Extension(collections.namedtuple('Ed25519Extension', ['type', 'flags', 'flag_int', 'data'])): + """ + Extension within an Ed25519 certificate. + + :var int type: extension type + :var list flags: extension attribute flags + :var int flag_int: integer encoding of the extension attribute flags + :var bytes data: data the extension concerns + """ + + +class Ed25519Certificate(object): + """ + Base class for an Ed25519 certificate. + + :var int version: certificate format version + :var str encoded: base64 encoded ed25519 certificate + """ + + def __init__(self, version, encoded): + self.version = version + self.encoded = encoded + + @staticmethod + def parse(content): + """ + Parses the given base64 encoded data as an Ed25519 certificate. + + :param str content: base64 encoded certificate + + :returns: :class:`~stem.descriptor.certificate.Ed25519Certificate` subclsss + for the given certificate + + :raises: **ValueError** if content is malformed + """ + + try: + decoded = base64.b64decode(stem.util.str_tools._to_bytes(content)) + + if not decoded: + raise TypeError('empty') + except (TypeError, binascii.Error) as exc: + raise ValueError("Ed25519 certificate wasn't propoerly base64 encoded (%s):\n%s" % (exc, content)) + + version = stem.util.str_tools._to_int(decoded[0:1]) + + if version == 1: + return Ed25519CertificateV1(version, content, decoded) + else: + raise ValueError('Ed25519 certificate is version %i. Parser presently only supports version 1.' % version) + + +class Ed25519CertificateV1(Ed25519Certificate): + """ + Version 1 Ed25519 certificate, which are used for signing tor server + descriptors. + + :var CertType type: certificate purpose + :var datetime expiration: expiration of the certificate + :var int key_type: format of the key + :var bytes key: key content + :var list extensions: :class:`~stem.descriptor.certificate.Ed25519Extension` in this certificate + :var bytes signature: certificate signature + """ + + def __init__(self, version, encoded, decoded): + super(Ed25519CertificateV1, self).__init__(version, encoded) + + if len(decoded) < ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH: + raise ValueError('Ed25519 certificate was %i bytes, but should be at least %i' % (len(decoded), ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH)) + + cert_type = stem.util.str_tools._to_int(decoded[1:2]) + + if cert_type in (0, 1, 2, 3): + raise ValueError('Ed25519 certificate cannot have a type of %i. This is reserved to avoid conflicts with tor CERTS cells.' % cert_type) + elif cert_type == 4: + self.type = CertType.SIGNING + elif cert_type == 5: + self.type = CertType.LINK_CERT + elif cert_type == 6: + self.type = CertType.AUTH + elif cert_type == 7: + raise ValueError('Ed25519 certificate cannot have a type of 7. This is reserved for RSA identity cross-certification.') + else: + raise ValueError("BUG: Ed25519 certificate type is decoded from one byte. It shouldn't be possible to have a value of %i." % cert_type) + + # expiration time is in hours since epoch + try: + self.expiration = datetime.datetime.utcfromtimestamp(stem.util.str_tools._to_int(decoded[2:6]) * 3600) + except ValueError as exc: + raise ValueError('Invalid expiration timestamp (%s): %s' % (exc, stem.util.str_tools._to_int(decoded[2:6]) * 3600)) + + self.key_type = stem.util.str_tools._to_int(decoded[6:7]) + self.key = decoded[7:39] + self.signature = decoded[-ED25519_SIGNATURE_LENGTH:] + + self.extensions = [] + extension_count = stem.util.str_tools._to_int(decoded[39:40]) + remaining_data = decoded[40:-ED25519_SIGNATURE_LENGTH] + + for i in range(extension_count): + if len(remaining_data) < 4: + raise ValueError('Ed25519 extension is missing header field data') + + extension_length = stem.util.str_tools._to_int(remaining_data[:2]) + extension_type = stem.util.str_tools._to_int(remaining_data[2:3]) + extension_flags = stem.util.str_tools._to_int(remaining_data[3:4]) + extension_data = remaining_data[4:4 + extension_length] + + if extension_length != len(extension_data): + raise ValueError("Ed25519 extension is truncated. It should have %i bytes of data but there's only %i." % (extension_length, len(extension_data))) + + flags, remaining_flags = [], extension_flags + + if remaining_flags % 2 == 1: + flags.append(ExtensionFlag.AFFECTS_VALIDATION) + remaining_flags -= 1 + + if remaining_flags: + flags.append(ExtensionFlag.UNKNOWN) + + if extension_type == ExtensionType.HAS_SIGNING_KEY and len(extension_data) != 32: + raise ValueError('Ed25519 HAS_SIGNING_KEY extension must be 32 bytes, but was %i.' % len(extension_data)) + + self.extensions.append(Ed25519Extension(extension_type, flags, extension_flags, extension_data)) + remaining_data = remaining_data[4 + extension_length:] + + if remaining_data: + raise ValueError('Ed25519 certificate had %i bytes of unused extension data' % len(remaining_data)) + + def is_expired(self): + """ + Checks if this certificate is presently expired or not. + + :returns: **True** if the certificate has expired, **False** otherwise + """ + + return datetime.datetime.now() > self.expiration + + def validate(self, server_descriptor): + """ + Validates our signing key and that the given descriptor content matches its + Ed25519 signature. + + :param stem.descriptor.server_descriptor.Ed25519 server_descriptor: relay + server descriptor to validate + + :raises: + * **ValueError** if signing key or descriptor are invalid + * **ImportError** if pynacl module is unavailable + """ + + if not stem.prereq._is_pynacl_available(): + raise ImportError('Certificate validation requires the pynacl module') + + import nacl.signing + import nacl.encoding + from nacl.exceptions import BadSignatureError + + descriptor_content = server_descriptor.get_bytes() + signing_key = None + + if server_descriptor.ed25519_master_key: + signing_key = nacl.signing.VerifyKey(stem.util.str_tools._to_bytes(server_descriptor.ed25519_master_key) + b'=', encoder = nacl.encoding.Base64Encoder) + else: + for extension in self.extensions: + if extension.type == ExtensionType.HAS_SIGNING_KEY: + signing_key = nacl.signing.VerifyKey(extension.data) + break + + if not signing_key: + raise ValueError('Server descriptor missing an ed25519 signing key') + + try: + signing_key.verify(base64.b64decode(stem.util.str_tools._to_bytes(self.encoded))[:-ED25519_SIGNATURE_LENGTH], self.signature) + except BadSignatureError as exc: + raise ValueError('Ed25519KeyCertificate signing key is invalid (%s)' % exc) + + # ed25519 signature validates descriptor content up until the signature itself + + if b'router-sig-ed25519 ' not in descriptor_content: + raise ValueError("Descriptor doesn't have a router-sig-ed25519 entry.") + + signed_content = descriptor_content[:descriptor_content.index(b'router-sig-ed25519 ') + 19] + descriptor_sha256_digest = hashlib.sha256(ED25519_ROUTER_SIGNATURE_PREFIX + signed_content).digest() + + missing_padding = len(server_descriptor.ed25519_signature) % 4 + signature_bytes = base64.b64decode(stem.util.str_tools._to_bytes(server_descriptor.ed25519_signature) + b'=' * missing_padding) + + try: + verify_key = nacl.signing.VerifyKey(self.key) + verify_key.verify(descriptor_sha256_digest, signature_bytes) + except BadSignatureError as exc: + raise ValueError('Descriptor Ed25519 certificate signature invalid (%s)' % exc) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/export.py b/Shared/lib/python3.4/site-packages/stem/descriptor/export.py index f90a607..c565bfa 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/export.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/export.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -10,6 +10,11 @@ Toolkit for exporting descriptors to other formats. export_csv - Exports descriptors to a CSV export_csv_file - Writes exported CSV output to a file + +.. deprecated:: 1.7.0 + + This module will likely be removed in Stem 2.0 due to lack of usage. If you + use this modle please `let me know `_. """ import csv @@ -98,7 +103,7 @@ def export_csv_file(output_file, descriptors, included_fields = (), excluded_fie writer = csv.DictWriter(output_file, included_fields, dialect = _ExportDialect(), extrasaction='ignore') - if header and stem.prereq.is_python_27(): + if header and not stem.prereq._is_python_26(): writer.writeheader() for desc in descriptors: diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/extrainfo_descriptor.py b/Shared/lib/python3.4/site-packages/stem/descriptor/extrainfo_descriptor.py index 607bbbe..485b306 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/extrainfo_descriptor.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/extrainfo_descriptor.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -7,7 +7,7 @@ their server descriptor is published and have a similar format. However, unlike server descriptors these don't contain information that Tor clients require to function and as such aren't fetched by default. -Defined in section 2.2 of the `dir-spec +Defined in section 2.1.2 of the `dir-spec `_, extra-info descriptors contain interesting but non-vital information such as usage statistics. Tor clients cannot request these documents for bridges. @@ -19,8 +19,7 @@ Extra-info descriptors are available from a few sources... * control port via 'GETINFO extra-info/digest/\*' queries * the 'cached-extrainfo' file in tor's data directory -* Archived descriptors provided by CollecTor - (https://collector.torproject.org/). +* Archived descriptors provided by `CollecTor `_. * Directory authorities and mirrors via their DirPort. @@ -72,6 +71,7 @@ import functools import hashlib import re +import stem.prereq import stem.util.connection import stem.util.enum import stem.util.str_tools @@ -79,19 +79,27 @@ import stem.util.str_tools from stem.descriptor import ( PGP_BLOCK_END, Descriptor, + create_signing_key, + _descriptor_content, _read_until_keywords, - _get_descriptor_components, + _descriptor_components, _value, _values, + _parse_simple_line, _parse_timestamp_line, _parse_forty_character_hex, _parse_key_block, + _mappings_for, + _append_router_signature, + _random_nickname, + _random_fingerprint, + _random_date, + _random_crypto_blob, ) -try: - # added in python 3.2 +if stem.prereq._is_lru_cache_available(): from functools import lru_cache -except ImportError: +else: from stem.util.lru_cache import lru_cache # known statuses for dirreq-v2-resp and dirreq-v3-resp... @@ -154,7 +162,6 @@ SINGLE_FIELDS = ( 'exit-streams-opened', ) - _timestamp_re = re.compile('^(.*) \(([0-9]+) s\)( .*)?$') _locale_re = re.compile('^[a-zA-Z0-9\?]{2}$') @@ -280,14 +287,15 @@ def _parse_transport_line(descriptor, entries): raise ValueError("Transport line's address:port entry is missing a colon: transport %s" % value) name = value_comp[0] - address, port_str = value_comp[1].split(':', 1) + address, port_str = value_comp[1].rsplit(':', 1) if not stem.util.connection.is_valid_ipv4_address(address) or \ - stem.util.connection.is_valid_ipv6_address(address): + stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True): raise ValueError('Transport line has a malformed address: transport %s' % value) elif not stem.util.connection.is_valid_port(port_str): raise ValueError('Transport line has a malformed port: transport %s' % value) + address.lstrip('[').rstrip(']') port = int(port_str) args = value_comp[2:] if len(value_comp) >= 3 else [] @@ -309,6 +317,21 @@ def _parse_cell_circuits_per_decline_line(descriptor, entries): descriptor.cell_circuits_per_decile = int(value) +def _parse_padding_counts_line(descriptor, entries): + # "padding-counts" YYYY-MM-DD HH:MM:SS (NSEC s) key=val key=val... + + value = _value('padding-counts', entries) + timestamp, interval, remainder = _parse_timestamp_and_interval('padding-counts', value) + counts = {} + + for k, v in _mappings_for('padding-counts', remainder, require_value = True): + counts[k] = int(v) if v.isdigit() else v + + setattr(descriptor, 'padding_counts_end', timestamp) + setattr(descriptor, 'padding_counts_interval', interval) + setattr(descriptor, 'padding_counts', counts) + + def _parse_dirreq_line(keyword, recognized_counts_attr, unrecognized_counts_attr, descriptor, entries): value = _value(keyword, entries) @@ -319,22 +342,15 @@ def _parse_dirreq_line(keyword, recognized_counts_attr, unrecognized_counts_attr key_set = DirResponse if is_response_stats else DirStat key_type = 'STATUS' if is_response_stats else 'STAT' - error_msg = '%s lines should contain %s=COUNT mappings: %s %s' % (keyword, key_type, keyword, value) - if value: - for entry in value.split(','): - if '=' not in entry: - raise ValueError(error_msg) + for status, count in _mappings_for(keyword, value, divider = ','): + if not count.isdigit(): + raise ValueError('%s lines should contain %s=COUNT mappings: %s %s' % (keyword, key_type, keyword, value)) - status, count = entry.split('=', 1) - - if count.isdigit(): - if status in key_set: - recognized_counts[status] = int(count) - else: - unrecognized_counts[status] = int(count) - else: - raise ValueError(error_msg) + if status in key_set: + recognized_counts[status] = int(count) + else: + unrecognized_counts[status] = int(count) setattr(descriptor, recognized_counts_attr, recognized_counts) setattr(descriptor, unrecognized_counts_attr, unrecognized_counts) @@ -423,22 +439,13 @@ def _parse_port_count_line(keyword, attribute, descriptor, entries): # "" port=N,port=N,... value, port_mappings = _value(keyword, entries), {} - error_msg = 'Entries in %s line should only be PORT=N entries: %s %s' % (keyword, keyword, value) - if value: - for entry in value.split(','): - if '=' not in entry: - raise ValueError(error_msg) + for port, stat in _mappings_for(keyword, value, divider = ','): + if (port != 'other' and not stem.util.connection.is_valid_port(port)) or not stat.isdigit(): + raise ValueError('Entries in %s line should only be PORT=N entries: %s %s' % (keyword, keyword, value)) - port, stat = entry.split('=', 1) - - if (port == 'other' or stem.util.connection.is_valid_port(port)) and stat.isdigit(): - if port != 'other': - port = int(port) - - port_mappings[port] = int(stat) - else: - raise ValueError(error_msg) + port = int(port) if port.isdigit() else port + port_mappings[port] = int(stat) setattr(descriptor, attribute, port_mappings) @@ -453,19 +460,12 @@ def _parse_geoip_to_count_line(keyword, attribute, descriptor, entries): # ??,"Unknown" value, locale_usage = _value(keyword, entries), {} - error_msg = 'Entries in %s line should only be CC=N entries: %s %s' % (keyword, keyword, value) - if value: - for entry in value.split(','): - if '=' not in entry: - raise ValueError(error_msg) + for locale, count in _mappings_for(keyword, value, divider = ','): + if not _locale_re.match(locale) or not count.isdigit(): + raise ValueError('Entries in %s line should only be CC=N entries: %s %s' % (keyword, keyword, value)) - locale, count = entry.split('=', 1) - - if _locale_re.match(locale) and count.isdigit(): - locale_usage[locale] = int(count) - else: - raise ValueError(error_msg) + locale_usage[locale] = int(count) setattr(descriptor, attribute, locale_usage) @@ -473,17 +473,11 @@ def _parse_geoip_to_count_line(keyword, attribute, descriptor, entries): def _parse_bridge_ip_versions_line(descriptor, entries): value, ip_versions = _value('bridge-ip-versions', entries), {} - if value: - for entry in value.split(','): - if '=' not in entry: - raise stem.ProtocolError("The bridge-ip-versions should be a comma separated listing of '=' mappings: bridge-ip-versions %s" % value) + for protocol, count in _mappings_for('bridge-ip-versions', value, divider = ','): + if not count.isdigit(): + raise stem.ProtocolError('IP protocol count was non-numeric (%s): bridge-ip-versions %s' % (count, value)) - protocol, count = entry.split('=', 1) - - if not count.isdigit(): - raise stem.ProtocolError('IP protocol count was non-numeric (%s): bridge-ip-versions %s' % (count, value)) - - ip_versions[protocol] = int(count) + ip_versions[protocol] = int(count) descriptor.ip_versions = ip_versions @@ -491,17 +485,11 @@ def _parse_bridge_ip_versions_line(descriptor, entries): def _parse_bridge_ip_transports_line(descriptor, entries): value, ip_transports = _value('bridge-ip-transports', entries), {} - if value: - for entry in value.split(','): - if '=' not in entry: - raise stem.ProtocolError("The bridge-ip-transports should be a comma separated listing of '=' mappings: bridge-ip-transports %s" % value) + for protocol, count in _mappings_for('bridge-ip-transports', value, divider = ','): + if not count.isdigit(): + raise stem.ProtocolError('Transport count was non-numeric (%s): bridge-ip-transports %s' % (count, value)) - protocol, count = entry.split('=', 1) - - if not count.isdigit(): - raise stem.ProtocolError('Transport count was non-numeric (%s): bridge-ip-transports %s' % (count, value)) - - ip_transports[protocol] = int(count) + ip_transports[protocol] = int(count) descriptor.ip_transports = ip_transports @@ -511,28 +499,30 @@ def _parse_hs_stats(keyword, stat_attribute, extra_attribute, descriptor, entrie value, stat, extra = _value(keyword, entries), None, {} - if value is not None: - value_comp = value.split() - - if not value_comp: - raise ValueError("'%s' line was blank" % keyword) + if value is None: + pass # not in the descriptor + elif value == '': + raise ValueError("'%s' line was blank" % keyword) + else: + if ' ' in value: + stat_value, remainder = value.split(' ', 1) + else: + stat_value, remainder = value, None try: - stat = int(value_comp[0]) + stat = int(stat_value) except ValueError: - raise ValueError("'%s' stat was non-numeric (%s): %s %s" % (keyword, value_comp[0], keyword, value)) + raise ValueError("'%s' stat was non-numeric (%s): %s %s" % (keyword, stat_value, keyword, value)) - for entry in value_comp[1:]: - if '=' not in entry: - raise ValueError('Entries after the stat in %s lines should only be key=val entries: %s %s' % (keyword, keyword, value)) - - key, val = entry.split('=', 1) + for key, val in _mappings_for(keyword, remainder): extra[key] = val setattr(descriptor, stat_attribute, stat) setattr(descriptor, extra_attribute, extra) +_parse_identity_ed25519_line = _parse_key_block('identity-ed25519', 'ed25519_certificate', 'ED25519 CERT') +_parse_master_key_ed25519_line = _parse_simple_line('master-key-ed25519', 'ed25519_certificate_hash') _parse_geoip_db_digest_line = _parse_forty_character_hex('geoip-db-digest', 'geoip_db_digest') _parse_geoip6_db_digest_line = _parse_forty_character_hex('geoip6-db-digest', 'geoip6_db_digest') _parse_dirreq_v2_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-resp', 'dir_v2_responses', 'dir_v2_responses_unknown') @@ -570,6 +560,8 @@ _parse_dirreq_v3_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirr _parse_geoip_client_origins_line = functools.partial(_parse_geoip_to_count_line, 'geoip-client-origins', 'geoip_client_origins') _parse_entry_ips_line = functools.partial(_parse_geoip_to_count_line, 'entry-ips', 'entry_ips') _parse_bridge_ips_line = functools.partial(_parse_geoip_to_count_line, 'bridge-ips', 'bridge_ips') +_parse_router_sig_ed25519_line = _parse_simple_line('router-sig-ed25519', 'ed25519_signature') +_parse_router_digest_sha256_line = _parse_simple_line('router-digest-sha256', 'router_digest_sha256') _parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest') _parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE') @@ -673,6 +665,12 @@ class ExtraInfoDescriptor(Descriptor): :var int hs_dir_onions_seen: rounded count of the identities seen :var int hs_dir_onions_seen_attr: **\*** attributes provided for the hs_dir_onions_seen + **Padding Count Attributes:** + + :var dict padding_counts: **\*** padding parameters + :var datetime padding_counts_end: end of the period when padding data is being collected + :var int padding_counts_interval: length in seconds of the interval + **Bridge Attributes:** :var datetime bridge_stats_end: end of the period when stats were gathered @@ -689,6 +687,10 @@ class ExtraInfoDescriptor(Descriptor): .. versionchanged:: 1.4.0 Added the hs_stats_end, hs_rend_cells, hs_rend_cells_attr, hs_dir_onions_seen, and hs_dir_onions_seen_attr attributes. + + .. versionchanged:: 1.6.0 + Added the padding_counts, padding_counts_end, and padding_counts_interval + attributes. """ ATTRIBUTES = { @@ -766,6 +768,10 @@ class ExtraInfoDescriptor(Descriptor): 'hs_dir_onions_seen': (None, _parse_hidden_service_dir_onions_seen_line), 'hs_dir_onions_seen_attr': ({}, _parse_hidden_service_dir_onions_seen_line), + 'padding_counts': ({}, _parse_padding_counts_line), + 'padding_counts_end': (None, _parse_padding_counts_line), + 'padding_counts_interval': (None, _parse_padding_counts_line), + 'bridge_stats_end': (None, _parse_bridge_stats_end_line), 'bridge_stats_interval': (None, _parse_bridge_stats_end_line), 'bridge_ips': (None, _parse_bridge_ips_line), @@ -811,6 +817,7 @@ class ExtraInfoDescriptor(Descriptor): 'hidserv-stats-end': _parse_hidden_service_stats_end_line, 'hidserv-rend-relayed-cells': _parse_hidden_service_rend_relayed_cells_line, 'hidserv-dir-onions-seen': _parse_hidden_service_dir_onions_seen_line, + 'padding-counts': _parse_padding_counts_line, 'dirreq-v2-ips': _parse_dirreq_v2_ips_line, 'dirreq-v3-ips': _parse_dirreq_v3_ips_line, 'dirreq-v2-reqs': _parse_dirreq_v2_reqs_line, @@ -836,7 +843,7 @@ class ExtraInfoDescriptor(Descriptor): """ super(ExtraInfoDescriptor, self).__init__(raw_contents, lazy_load = not validate) - entries = _get_descriptor_components(raw_contents, validate) + entries = _descriptor_components(raw_contents, validate) if validate: for keyword in self._required_fields(): @@ -886,19 +893,56 @@ class RelayExtraInfoDescriptor(ExtraInfoDescriptor): 'GETINFO extra-info/digest/\*', cached descriptors, and metrics (`specification `_). + :var ed25519_certificate str: base64 encoded ed25519 certificate + :var ed25519_signature str: signature of this document using ed25519 :var str signature: **\*** signature for this extrainfo descriptor **\*** attribute is required when we're parsed with validation + + .. versionchanged:: 1.5.0 + Added the ed25519_certificate and ed25519_signature attributes. """ ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{ + 'ed25519_certificate': (None, _parse_identity_ed25519_line), + 'ed25519_signature': (None, _parse_router_sig_ed25519_line), 'signature': (None, _parse_router_signature_line), }) PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{ + 'identity-ed25519': _parse_identity_ed25519_line, + 'router-sig-ed25519': _parse_router_sig_ed25519_line, 'router-signature': _parse_router_signature_line, }) + @classmethod + def content(cls, attr = None, exclude = (), sign = False, signing_key = None): + base_header = ( + ('extra-info', '%s %s' % (_random_nickname(), _random_fingerprint())), + ('published', _random_date()), + ) + + if signing_key: + sign = True + + if sign: + if attr and 'router-signature' in attr: + raise ValueError('Cannot sign the descriptor if a router-signature has been provided') + + if signing_key is None: + signing_key = create_signing_key() + + content = _descriptor_content(attr, exclude, base_header) + b'\nrouter-signature\n' + return _append_router_signature(content, signing_key.private) + else: + return _descriptor_content(attr, exclude, base_header, ( + ('router-signature', _random_crypto_blob('SIGNATURE')), + )) + + @classmethod + def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None): + return cls(cls.content(attr, exclude, sign, signing_key), validate = validate) + @lru_cache() def digest(self): # our digest is calculated from everything except our signature @@ -910,17 +954,39 @@ class RelayExtraInfoDescriptor(ExtraInfoDescriptor): class BridgeExtraInfoDescriptor(ExtraInfoDescriptor): """ Bridge extra-info descriptor (`bridge descriptor specification - `_) + `_) + + :var str ed25519_certificate_hash: sha256 hash of the original identity-ed25519 + :var str router_digest_sha256: sha256 digest of this document + + .. versionchanged:: 1.5.0 + Added the ed25519_certificate_hash and router_digest_sha256 attributes. """ ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{ + 'ed25519_certificate_hash': (None, _parse_master_key_ed25519_line), + 'router_digest_sha256': (None, _parse_router_digest_sha256_line), '_digest': (None, _parse_router_digest_line), }) PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{ + 'master-key-ed25519': _parse_master_key_ed25519_line, + 'router-digest-sha256': _parse_router_digest_sha256_line, 'router-digest': _parse_router_digest_line, }) + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + return _descriptor_content(attr, exclude, ( + ('extra-info', 'ec2bridgereaac65a3 %s' % _random_fingerprint()), + ('published', _random_date()), + ), ( + ('router-digest', _random_fingerprint()), + )) + def digest(self): return self._digest diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/hidden_service_descriptor.py b/Shared/lib/python3.4/site-packages/stem/descriptor/hidden_service_descriptor.py index 31a99cc..83618dd 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/hidden_service_descriptor.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/hidden_service_descriptor.py @@ -1,4 +1,4 @@ -# Copyright 2015, Damian Johnson and The Tor Project +# Copyright 2015-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -9,6 +9,9 @@ Unlike other descriptor types these describe a hidden service rather than a relay. They're created by the service, and can only be fetched via relays with the HSDir flag. +These are only available through the Controller's +:func:`~stem.control.get_hidden_service_descriptor` method. + **Module Overview:** :: @@ -18,34 +21,34 @@ the HSDir flag. .. versionadded:: 1.4.0 """ -# TODO: Add a description for how to retrieve them when tor supports that -# (#14847) and then update #15009. - import base64 import binascii import collections import hashlib import io +import stem.prereq import stem.util.connection import stem.util.str_tools from stem.descriptor import ( PGP_BLOCK_END, Descriptor, - _get_descriptor_components, + _descriptor_content, + _descriptor_components, _read_until_keywords, _bytes_for_block, _value, _parse_simple_line, _parse_timestamp_line, _parse_key_block, + _random_date, + _random_crypto_blob, ) -try: - # added in python 3.2 +if stem.prereq._is_lru_cache_available(): from functools import lru_cache -except ImportError: +else: from stem.util.lru_cache import lru_cache REQUIRED_FIELDS = ( @@ -80,7 +83,17 @@ SINGLE_INTRODUCTION_POINT_FIELDS = [ BASIC_AUTH = 1 STEALTH_AUTH = 2 -IntroductionPoint = collections.namedtuple('IntroductionPoints', INTRODUCTION_POINTS_ATTR.keys()) + +class IntroductionPoints(collections.namedtuple('IntroductionPoints', INTRODUCTION_POINTS_ATTR.keys())): + """ + :var str identifier: hash of this introduction point's identity key + :var str address: address of this introduction point + :var int port: port where this introduction point is listening + :var str onion_key: public key for communicating with this introduction point + :var str service_key: public key for communicating with this hidden service + :var list intro_authentication: tuples of the form (auth_type, auth_data) for + establishing a connection + """ class DecryptionFailure(Exception): @@ -153,25 +166,13 @@ def _parse_introduction_points_line(descriptor, entries): raise ValueError("'introduction-points' should be followed by a MESSAGE block, but was a %s" % block_type) descriptor.introduction_points_encoded = block_contents + descriptor.introduction_points_auth = [] # field was never implemented in tor (#15190) try: - decoded_field = _bytes_for_block(block_contents) + descriptor.introduction_points_content = _bytes_for_block(block_contents) except TypeError: raise ValueError("'introduction-points' isn't base64 encoded content:\n%s" % block_contents) - auth_types = [] - - while decoded_field.startswith(b'service-authentication ') and b'\n' in decoded_field: - auth_line, decoded_field = decoded_field.split(b'\n', 1) - auth_line_comp = auth_line.split(b' ') - - if len(auth_line_comp) < 3: - raise ValueError("Within introduction-points we expected 'service-authentication [auth_type] [auth_data]', but had '%s'" % auth_line) - - auth_types.append((auth_line_comp[1], auth_line_comp[2])) - - descriptor.introduction_points_auth = auth_types - descriptor.introduction_points_content = decoded_field _parse_rendezvous_service_descriptor_line = _parse_simple_line('rendezvous-service-descriptor', 'descriptor_id') _parse_permanent_key_line = _parse_key_block('permanent-key', 'permanent_key', 'RSA PUBLIC KEY') @@ -194,6 +195,7 @@ class HiddenServiceDescriptor(Descriptor): :var str introduction_points_encoded: raw introduction points blob :var list introduction_points_auth: **\*** tuples of the form (auth_method, auth_data) for our introduction_points_content + (**deprecated**, always **[]**) :var bytes introduction_points_content: decoded introduction-points content without authentication data, if using cookie authentication this is encrypted @@ -201,6 +203,14 @@ class HiddenServiceDescriptor(Descriptor): **\*** attribute is either required when we're parsed with validation or has a default value, others are left as **None** if undefined + + .. versionchanged:: 1.6.0 + Moved from the deprecated `pycrypto + `_ module to `cryptography + `_ for validating signatures. + + .. versionchanged:: 1.6.0 + Added the **skip_crypto_validation** constructor argument. """ ATTRIBUTES = { @@ -227,9 +237,30 @@ class HiddenServiceDescriptor(Descriptor): 'signature': _parse_signature_line, } - def __init__(self, raw_contents, validate = False): + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + return _descriptor_content(attr, exclude, ( + ('rendezvous-service-descriptor', 'y3olqqblqw2gbh6phimfuiroechjjafa'), + ('version', '2'), + ('permanent-key', _random_crypto_blob('RSA PUBLIC KEY')), + ('secret-id-part', 'e24kgecavwsznj7gpbktqsiwgvngsf4e'), + ('publication-time', _random_date()), + ('protocol-versions', '2,3'), + ('introduction-points', '\n-----BEGIN MESSAGE-----\n-----END MESSAGE-----'), + ), ( + ('signature', _random_crypto_blob('SIGNATURE')), + )) + + @classmethod + def create(cls, attr = None, exclude = (), validate = True, sign = False): + return cls(cls.content(attr, exclude, sign), validate = validate, skip_crypto_validation = not sign) + + def __init__(self, raw_contents, validate = False, skip_crypto_validation = False): super(HiddenServiceDescriptor, self).__init__(raw_contents, lazy_load = not validate) - entries = _get_descriptor_components(raw_contents, validate) + entries = _descriptor_components(raw_contents, validate, non_ascii_fields = ('introduction-points')) if validate: for keyword in REQUIRED_FIELDS: @@ -245,7 +276,7 @@ class HiddenServiceDescriptor(Descriptor): self._parse(entries, validate) - if stem.prereq.is_crypto_available(): + if not skip_crypto_validation and stem.prereq.is_crypto_available(): signed_digest = self._digest_for_signature(self.permanent_key, self.signature) content_digest = self._digest_for_content(b'rendezvous-service-descriptor ', b'\nsignature\n') @@ -257,21 +288,9 @@ class HiddenServiceDescriptor(Descriptor): @lru_cache() def introduction_points(self, authentication_cookie = None): """ - Provided this service's introduction points. This provides a list of - IntroductionPoint instances, which have the following attributes... + Provided this service's introduction points. - * **identifier** (str): hash of this introduction point's identity key - * **address** (str): address of this introduction point - * **port** (int): port where this introduction point is listening - * **onion_key** (str): public key for communicating with this introduction point - * **service_key** (str): public key for communicating with this hidden service - * **intro_authentication** (list): tuples of the form (auth_type, auth_data) - for establishing a connection - - :param str authentication_cookie: cookie to decrypt the introduction-points - if it's encrypted - - :returns: **list** of IntroductionPoints instances + :returns: **list** of :class:`~stem.descriptor.hidden_service_descriptor.IntroductionPoints` :raises: * **ValueError** if the our introduction-points is malformed @@ -284,7 +303,7 @@ class HiddenServiceDescriptor(Descriptor): return [] elif authentication_cookie: if not stem.prereq.is_crypto_available(): - raise DecryptionFailure('Decrypting introduction-points requires pycrypto') + raise DecryptionFailure('Decrypting introduction-points requires the cryptography module') try: missing_padding = len(authentication_cookie) % 4 @@ -310,9 +329,8 @@ class HiddenServiceDescriptor(Descriptor): @staticmethod def _decrypt_basic_auth(content, authentication_cookie): - from Crypto.Cipher import AES - from Crypto.Util import Counter - from Crypto.Util.number import bytes_to_long + from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes + from cryptography.hazmat.backends import default_backend try: client_blocks = int(binascii.hexlify(content[1:2]), 16) @@ -336,15 +354,15 @@ class HiddenServiceDescriptor(Descriptor): # try decrypting the session key - counter = Counter.new(128, initial_value = 0) - cipher = AES.new(authentication_cookie, AES.MODE_CTR, counter = counter) - session_key = cipher.decrypt(encrypted_session_key) + cipher = Cipher(algorithms.AES(authentication_cookie), modes.CTR(b'\x00' * len(iv)), default_backend()) + decryptor = cipher.decryptor() + session_key = decryptor.update(encrypted_session_key) + decryptor.finalize() # attempt to decrypt the intro points with the session key - counter = Counter.new(128, initial_value = bytes_to_long(iv)) - cipher = AES.new(session_key, AES.MODE_CTR, counter = counter) - decrypted = cipher.decrypt(encrypted) + cipher = Cipher(algorithms.AES(session_key), modes.CTR(iv), default_backend()) + decryptor = cipher.decryptor() + decrypted = decryptor.update(encrypted) + decryptor.finalize() # check if the decryption looks correct @@ -355,22 +373,20 @@ class HiddenServiceDescriptor(Descriptor): @staticmethod def _decrypt_stealth_auth(content, authentication_cookie): - from Crypto.Cipher import AES - from Crypto.Util import Counter - from Crypto.Util.number import bytes_to_long + from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes + from cryptography.hazmat.backends import default_backend # byte 1 = authentication type, 2-17 = input vector, 18 on = encrypted content - iv, encrypted = content[1:17], content[17:] - counter = Counter.new(128, initial_value = bytes_to_long(iv)) - cipher = AES.new(authentication_cookie, AES.MODE_CTR, counter = counter) + cipher = Cipher(algorithms.AES(authentication_cookie), modes.CTR(iv), default_backend()) + decryptor = cipher.decryptor() - return cipher.decrypt(encrypted) + return decryptor.update(encrypted) + decryptor.finalize() @staticmethod def _parse_introduction_points(content): """ - Provides the parsed list of IntroductionPoint for the unencrypted content. + Provides the parsed list of IntroductionPoints for the unencrypted content. """ introduction_points = [] @@ -383,7 +399,7 @@ class HiddenServiceDescriptor(Descriptor): break # reached the end attr = dict(INTRODUCTION_POINTS_ATTR) - entries = _get_descriptor_components(content, False) + entries = _descriptor_components(content, False) for keyword, values in list(entries.items()): value, block_type, block_contents = values[0] @@ -417,6 +433,6 @@ class HiddenServiceDescriptor(Descriptor): auth_type, auth_data = auth_value.split(' ')[:2] auth_entries.append((auth_type, auth_data)) - introduction_points.append(IntroductionPoint(**attr)) + introduction_points.append(IntroductionPoints(**attr)) return introduction_points diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/microdescriptor.py b/Shared/lib/python3.4/site-packages/stem/descriptor/microdescriptor.py index ffbec43..731e845 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/microdescriptor.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/microdescriptor.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015, Damian Johnson and The Tor Project +# Copyright 2013-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -19,7 +19,7 @@ corresponding router status entry. For added fun as of this writing the controller doesn't even surface those router status entries (:trac:`7953`). -For instance, here's an example that prints the nickname and fignerprints of +For instance, here's an example that prints the nickname and fingerprints of the exit relays. :: @@ -67,14 +67,18 @@ Doing the same is trivial with server descriptors... import hashlib import stem.exit_policy +import stem.prereq from stem.descriptor import ( Descriptor, - _get_descriptor_components, + _descriptor_content, + _descriptor_components, _read_until_keywords, - _value, + _values, _parse_simple_line, + _parse_protocol_line, _parse_key_block, + _random_crypto_blob, ) from stem.descriptor.router_status_entry import ( @@ -82,10 +86,9 @@ from stem.descriptor.router_status_entry import ( _parse_p_line, ) -try: - # added in python 3.2 +if stem.prereq._is_lru_cache_available(): from functools import lru_cache -except ImportError: +else: from stem.util.lru_cache import lru_cache REQUIRED_FIELDS = ( @@ -98,6 +101,7 @@ SINGLE_FIELDS = ( 'family', 'p', 'p6', + 'pr', ) @@ -159,21 +163,35 @@ def _parse_file(descriptor_file, validate = False, **kwargs): def _parse_id_line(descriptor, entries): - value = _value('id', entries) - value_comp = value.split() + identities = {} - if len(value_comp) >= 2: - descriptor.identifier_type = value_comp[0] - descriptor.identifier = value_comp[1] - else: - raise ValueError("'id' lines should contain both the key type and digest: id %s" % value) + for entry in _values('id', entries): + entry_comp = entry.split() + + if len(entry_comp) >= 2: + key_type, key_value = entry_comp[0], entry_comp[1] + + if key_type in identities: + raise ValueError("There can only be one 'id' line per a key type, but '%s' appeared multiple times" % key_type) + + descriptor.identifier_type = key_type + descriptor.identifier = key_value + identities[key_type] = key_value + else: + raise ValueError("'id' lines should contain both the key type and digest: id %s" % entry) + + descriptor.identifiers = identities + + +def _parse_digest(descriptor, entries): + setattr(descriptor, 'digest', hashlib.sha256(descriptor.get_bytes()).hexdigest().upper()) -_parse_digest = lambda descriptor, entries: setattr(descriptor, 'digest', hashlib.sha256(descriptor.get_bytes()).hexdigest().upper()) _parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY') _parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key') -_parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', _value('family', entries).split(' ')) -_parse_p6_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('p6', entries))) +_parse_family_line = _parse_simple_line('family', 'family', func = lambda v: v.split(' ')) +_parse_p6_line = _parse_simple_line('p6', 'exit_policy_v6', func = lambda v: stem.exit_policy.MicroExitPolicy(v)) +_parse_pr_line = _parse_protocol_line('pr', 'protocols') class Microdescriptor(Descriptor): @@ -192,13 +210,27 @@ class Microdescriptor(Descriptor): :var list family: **\*** nicknames or fingerprints of declared family :var stem.exit_policy.MicroExitPolicy exit_policy: **\*** relay's exit policy :var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6 - :var str identifier_type: identity digest key type - :var str identifier: base64 encoded identity digest, this is only used for collision prevention (:trac:`11743`) + :var hash identifiers: mapping of key types (like rsa1024 or ed25519) to + their base64 encoded identity, this is only used for collision prevention + (:trac:`11743`) + :var dict protocols: mapping of protocols to their supported versions + + :var str identifier: base64 encoded identity digest (**deprecated**, use + identifiers instead) + :var str identifier_type: identity digest key type (**deprecated**, use + identifiers instead) **\*** attribute is required when we're parsed with validation .. versionchanged:: 1.1.0 Added the identifier and identifier_type attributes. + + .. versionchanged:: 1.5.0 + Added the identifiers attribute, and deprecated identifier and + identifier_type since the field can now appear multiple times. + + .. versionchanged:: 1.6.0 + Added the protocols attribute. """ ATTRIBUTES = { @@ -208,8 +240,10 @@ class Microdescriptor(Descriptor): 'family': ([], _parse_family_line), 'exit_policy': (stem.exit_policy.MicroExitPolicy('reject 1-65535'), _parse_p_line), 'exit_policy_v6': (None, _parse_p6_line), - 'identifier_type': (None, _parse_id_line), - 'identifier': (None, _parse_id_line), + 'identifier_type': (None, _parse_id_line), # deprecated in favor of identifiers + 'identifier': (None, _parse_id_line), # deprecated in favor of identifiers + 'identifiers': ({}, _parse_id_line), + 'protocols': ({}, _parse_pr_line), 'digest': (None, _parse_digest), } @@ -220,13 +254,23 @@ class Microdescriptor(Descriptor): 'family': _parse_family_line, 'p': _parse_p_line, 'p6': _parse_p6_line, + 'pr': _parse_pr_line, 'id': _parse_id_line, } + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + return _descriptor_content(attr, exclude, ( + ('onion-key', _random_crypto_blob('RSA PUBLIC KEY')), + )) + def __init__(self, raw_contents, validate = False, annotations = None): super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate) self._annotation_lines = annotations if annotations else [] - entries = _get_descriptor_components(raw_contents, validate) + entries = _descriptor_components(raw_contents, validate) if validate: self.digest = hashlib.sha256(self.get_bytes()).hexdigest().upper() @@ -307,6 +351,9 @@ class Microdescriptor(Descriptor): def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py b/Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py index a162e2e..57098e8 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py @@ -1,14 +1,15 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ Parsing for Tor network status documents. This supports both the v2 and v3 -dir-spec. Documents can be obtained from a few sources... +`dir-spec `_. +Documents can be obtained from a few sources... * The 'cached-consensus' file in Tor's data directory. -* Archived descriptors provided by CollecTor - (https://collector.torproject.org/). +* Archived descriptors provided by `CollecTor + `_. * Directory authorities and mirrors via their DirPort. @@ -19,6 +20,10 @@ dir-spec. Documents can be obtained from a few sources... * list of :class:`stem.descriptor.router_status_entry.RouterStatusEntry` * document footer +**For a great graphical overview see** `Jordan Wright's chart describing the +anatomy of the consensus +`_. + Of these, the router status entry section can be quite large (on the order of hundreds of kilobytes). As such we provide a couple of methods for reading network status documents through :func:`~stem.descriptor.__init__.parse_file`. @@ -47,16 +52,6 @@ For more information see :func:`~stem.descriptor.__init__.DocumentHandler`... KeyCertificate - Certificate used to authenticate an authority DocumentSignature - Signature of a document by a directory authority DirectoryAuthority - Directory authority as defined in a v3 network status document - - -.. data:: PackageVersion - - Latest recommended version of a package that's available. - - :var str name: name of the package - :var str version: latest recommended version - :var str url: package's url - :var dict digests: mapping of digest types to their value """ import collections @@ -71,13 +66,22 @@ from stem.descriptor import ( PGP_BLOCK_END, Descriptor, DocumentHandler, - _get_descriptor_components, + _descriptor_content, + _descriptor_components, _read_until_keywords, _value, _parse_simple_line, + _parse_if_present, _parse_timestamp_line, _parse_forty_character_hex, + _parse_protocol_line, _parse_key_block, + _mappings_for, + _random_nickname, + _random_fingerprint, + _random_ipv4_address, + _random_date, + _random_crypto_blob, ) from stem.descriptor.router_status_entry import ( @@ -86,13 +90,6 @@ from stem.descriptor.router_status_entry import ( RouterStatusEntryMicroV3, ) -PackageVersion = collections.namedtuple('PackageVersion', [ - 'name', - 'version', - 'url', - 'digests', -]) - # Version 2 network status document fields, tuples of the form... # (keyword, is_mandatory) @@ -130,6 +127,15 @@ HEADER_STATUS_DOCUMENT_FIELDS = ( ('package', True, True, False), ('known-flags', True, True, True), ('flag-thresholds', True, False, False), + ('shared-rand-participate', True, False, False), + ('shared-rand-commit', True, False, False), + ('shared-rand-previous-value', True, True, False), + ('shared-rand-current-value', True, True, False), + ('bandwidth-file-headers', True, False, False), + ('recommended-client-protocols', True, True, False), + ('recommended-relay-protocols', True, True, False), + ('required-client-protocols', True, True, False), + ('required-relay-protocols', True, True, False), ('params', True, True, False), ) @@ -139,9 +145,6 @@ FOOTER_STATUS_DOCUMENT_FIELDS = ( ('directory-signature', True, True, True), ) -HEADER_FIELDS = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS] -FOOTER_FIELDS = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS] - AUTH_START = 'dir-source' ROUTERS_START = 'r' FOOTER_START = 'directory-footer' @@ -159,8 +162,17 @@ DEFAULT_PARAMS = { 'cbttestfreq': 60, 'cbtmintimeout': 2000, 'cbtinitialtimeout': 60000, + 'cbtlearntimeout': 180, + 'cbtmaxopencircs': 10, + 'UseOptimisticData': 1, 'Support022HiddenServices': 1, 'usecreatefast': 1, + 'max-consensuses-age-to-cache-for-diff': 72, + 'try-diff-for-consensus-newer-than': 72, + 'onion-key-rotation-days': 28, + 'onion-key-grace-period-days': 7, + 'hs_service_max_rdv_failures': 2, + 'circ_max_cell_queue_size': 50000, } # KeyCertificate fields, tuple is of the form... @@ -197,6 +209,8 @@ PARAM_RANGE = { 'cbtclosequantile': (MIN_PARAM, 99), 'cbttestfreq': (1, MAX_PARAM), 'cbtmintimeout': (500, MAX_PARAM), + 'cbtlearntimeout': (10, 60000), + 'cbtmaxopencircs': (0, 14), 'UseOptimisticData': (0, 1), 'Support022HiddenServices': (0, 1), 'usecreatefast': (0, 1), @@ -207,9 +221,40 @@ PARAM_RANGE = { 'GuardLifetime': (2592000, 157766400), # min: 30 days, max: 1826 days 'NumNTorsPerTAP': (1, 100000), 'AllowNonearlyExtend': (0, 1), + 'AuthDirNumSRVAgreements': (1, MAX_PARAM), + 'max-consensuses-age-to-cache-for-diff': (0, 8192), + 'try-diff-for-consensus-newer-than': (0, 8192), + 'onion-key-rotation-days': (1, 90), + 'onion-key-grace-period-days': (1, 90), # max is the highest onion-key-rotation-days + 'hs_service_max_rdv_failures': (1, 10), + 'circ_max_cell_queue_size': (1000, 4294967295), } +class PackageVersion(collections.namedtuple('PackageVersion', ['name', 'version', 'url', 'digests'])): + """ + Latest recommended version of a package that's available. + + :var str name: name of the package + :var str version: latest recommended version + :var str url: package's url + :var dict digests: mapping of digest types to their value + """ + + +class SharedRandomnessCommitment(collections.namedtuple('SharedRandomnessCommitment', ['version', 'algorithm', 'identity', 'commit', 'reveal'])): + """ + Directory authority's commitment for generating the next shared random value. + + :var int version: shared randomness protocol version + :var str algorithm: hash algorithm used to make the commitment + :var str identity: authority's sha1 identity fingerprint + :var str commit: base64 encoded commitment hash to the shared random value + :var str reveal: base64 encoded commitment to the shared random value, + **None** of not provided + """ + + def _parse_file(document_file, document_type = None, validate = False, is_microdescriptor = False, document_handler = DocumentHandler.ENTRIES, **kwargs): """ Parses a network status and iterates over the RouterStatusEntry in it. The @@ -361,10 +406,10 @@ _parse_network_status_version_line = _parse_version_line('network-status-version _parse_fingerprint_line = _parse_forty_character_hex('fingerprint', 'fingerprint') _parse_contact_line = _parse_simple_line('contact', 'contact') _parse_dir_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY') -_parse_client_versions_line = lambda descriptor, entries: setattr(descriptor, 'client_versions', _value('client-versions', entries).split(',')) -_parse_server_versions_line = lambda descriptor, entries: setattr(descriptor, 'server_versions', _value('server-versions', entries).split(',')) +_parse_client_versions_line = _parse_simple_line('client-versions', 'client_versions', func = lambda v: v.split(',')) +_parse_server_versions_line = _parse_simple_line('server-versions', 'server_versions', func = lambda v: v.split(',')) _parse_published_line = _parse_timestamp_line('published', 'published') -_parse_dir_options_line = lambda descriptor, entries: setattr(descriptor, 'options', _value('dir-options', entries).split()) +_parse_dir_options_line = _parse_simple_line('dir-options', 'options', func = lambda v: v.split()) _parse_directory_signature_line = _parse_key_block('directory-signature', 'signature', 'SIGNATURE', value_attribute = 'signing_authority') @@ -428,6 +473,22 @@ class NetworkStatusDocumentV2(NetworkStatusDocument): 'directory-signature': _parse_directory_signature_line, } + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + return _descriptor_content(attr, exclude, ( + ('network-status-version', '2'), + ('dir-source', '%s %s 80' % (_random_ipv4_address(), _random_ipv4_address())), + ('fingerprint', _random_fingerprint()), + ('contact', 'arma at mit dot edu'), + ('published', _random_date()), + ('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')), + ), ( + ('directory-signature', 'moria2' + _random_crypto_blob('SIGNATURE')), + )) + def __init__(self, raw_content, validate = False): super(NetworkStatusDocumentV2, self).__init__(raw_content, lazy_load = not validate) @@ -450,7 +511,7 @@ class NetworkStatusDocumentV2(NetworkStatusDocument): self.routers = dict((desc.fingerprint, desc) for desc in router_iter) - entries = _get_descriptor_components(document_content + b'\n' + document_file.read(), validate) + entries = _descriptor_components(document_content + b'\n' + document_file.read(), validate) if validate: self._check_constraints(entries) @@ -582,26 +643,20 @@ def _parse_header_flag_thresholds_line(descriptor, entries): value, thresholds = _value('flag-thresholds', entries).strip(), {} - if value: - for entry in value.split(' '): - if '=' not in entry: - raise ValueError("Network status document's 'flag-thresholds' line is expected to be space separated key=value mappings, got: flag-thresholds %s" % value) + for key, val in _mappings_for('flag-thresholds', value): + try: + if val.endswith('%'): + # opting for string manipulation rather than just + # 'float(entry_value) / 100' because floating point arithmetic + # will lose precision - entry_key, entry_value = entry.split('=', 1) - - try: - if entry_value.endswith('%'): - # opting for string manipulation rather than just - # 'float(entry_value) / 100' because floating point arithmetic - # will lose precision - - thresholds[entry_key] = float('0.' + entry_value[:-1].replace('.', '', 1)) - elif '.' in entry_value: - thresholds[entry_key] = float(entry_value) - else: - thresholds[entry_key] = int(entry_value) - except ValueError: - raise ValueError("Network status document's 'flag-thresholds' line is expected to have float values, got: flag-thresholds %s" % value) + thresholds[key] = float('0.' + val[:-1].replace('.', '', 1)) + elif '.' in val: + thresholds[key] = float(val) + else: + thresholds[key] = int(val) + except ValueError: + raise ValueError("Network status document's 'flag-thresholds' line is expected to have float values, got: flag-thresholds %s" % value) descriptor.flag_thresholds = thresholds @@ -617,11 +672,6 @@ def _parse_header_parameters_line(descriptor, entries): value = _value('params', entries) - # should only appear in consensus-method 7 or later - - if not descriptor.meets_consensus_method(7): - raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later") - if value != '': descriptor.params = _parse_int_mappings('params', value, True) descriptor._check_params_constraints() @@ -661,7 +711,7 @@ def _parse_package_line(descriptor, entries): package_versions = [] for value, _, _ in entries['package']: - value_comp = value.split() + value_comp = value.split(' ', 3) if len(value_comp) < 3: raise ValueError("'package' must at least have a 'PackageName Version URL': %s" % value) @@ -669,33 +719,98 @@ def _parse_package_line(descriptor, entries): name, version, url = value_comp[:3] digests = {} - for digest_entry in value_comp[3:]: - if '=' not in digest_entry: - raise ValueError("'package' digest entries should be 'key=value' pairs: %s" % value) - - key, value = digest_entry.split('=', 1) - digests[key] = value + if len(value_comp) == 4: + for key, val in _mappings_for('package', value_comp[3]): + digests[key] = val package_versions.append(PackageVersion(name, version, url, digests)) descriptor.packages = package_versions +def _parsed_shared_rand_commit(descriptor, entries): + # "shared-rand-commit" Version AlgName Identity Commit [Reveal] + + commitments = [] + + for value, _, _ in entries['shared-rand-commit']: + value_comp = value.split() + + if len(value_comp) < 4: + raise ValueError("'shared-rand-commit' must at least have a 'Version AlgName Identity Commit': %s" % value) + + version, algorithm, identity, commit = value_comp[:4] + reveal = value_comp[4] if len(value_comp) >= 5 else None + + if not version.isdigit(): + raise ValueError("The version on our 'shared-rand-commit' line wasn't an integer: %s" % value) + + commitments.append(SharedRandomnessCommitment(int(version), algorithm, identity, commit, reveal)) + + descriptor.shared_randomness_commitments = commitments + + +def _parse_shared_rand_previous_value(descriptor, entries): + # "shared-rand-previous-value" NumReveals Value + + value = _value('shared-rand-previous-value', entries) + value_comp = value.split(' ') + + if len(value_comp) == 2 and value_comp[0].isdigit(): + descriptor.shared_randomness_previous_reveal_count = int(value_comp[0]) + descriptor.shared_randomness_previous_value = value_comp[1] + else: + raise ValueError("A network status document's 'shared-rand-previous-value' line must be a pair of values, the first an integer but was '%s'" % value) + + +def _parse_shared_rand_current_value(descriptor, entries): + # "shared-rand-current-value" NumReveals Value + + value = _value('shared-rand-current-value', entries) + value_comp = value.split(' ') + + if len(value_comp) == 2 and value_comp[0].isdigit(): + descriptor.shared_randomness_current_reveal_count = int(value_comp[0]) + descriptor.shared_randomness_current_value = value_comp[1] + else: + raise ValueError("A network status document's 'shared-rand-current-value' line must be a pair of values, the first an integer but was '%s'" % value) + + +def _parse_bandwidth_file_headers(descriptor, entries): + # "bandwidth-file-headers" KeyValues + # KeyValues ::= "" | KeyValue | KeyValues SP KeyValue + # KeyValue ::= Keyword '=' Value + # Value ::= ArgumentChar+ + + value = _value('bandwidth-file-headers', entries) + results = {} + + for key, val in _mappings_for('bandwidth-file-headers', value): + results[key] = val + + descriptor.bandwidth_file_headers = results + + _parse_header_valid_after_line = _parse_timestamp_line('valid-after', 'valid_after') _parse_header_fresh_until_line = _parse_timestamp_line('fresh-until', 'fresh_until') _parse_header_valid_until_line = _parse_timestamp_line('valid-until', 'valid_until') _parse_header_client_versions_line = _parse_versions_line('client-versions', 'client_versions') _parse_header_server_versions_line = _parse_versions_line('server-versions', 'server_versions') -_parse_header_known_flags_line = lambda descriptor, entries: setattr(descriptor, 'known_flags', [entry for entry in _value('known-flags', entries).split(' ') if entry]) -_parse_footer_bandwidth_weights_line = lambda descriptor, entries: setattr(descriptor, 'bandwidth_weights', _parse_int_mappings('bandwidth-weights', _value('bandwidth-weights', entries), True)) +_parse_header_known_flags_line = _parse_simple_line('known-flags', 'known_flags', func = lambda v: [entry for entry in v.split(' ') if entry]) +_parse_footer_bandwidth_weights_line = _parse_simple_line('bandwidth-weights', 'bandwidth_weights', func = lambda v: _parse_int_mappings('bandwidth-weights', v, True)) +_parse_shared_rand_participate_line = _parse_if_present('shared-rand-participate', 'is_shared_randomness_participate') +_parse_recommended_client_protocols_line = _parse_protocol_line('recommended-client-protocols', 'recommended_client_protocols') +_parse_recommended_relay_protocols_line = _parse_protocol_line('recommended-relay-protocols', 'recommended_relay_protocols') +_parse_required_client_protocols_line = _parse_protocol_line('required-client-protocols', 'required_client_protocols') +_parse_required_relay_protocols_line = _parse_protocol_line('required-relay-protocols', 'required_relay_protocols') class NetworkStatusDocumentV3(NetworkStatusDocument): """ Version 3 network status document. This could be either a vote or consensus. - :var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` - contained in the document + :var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` + mapping for relays contained in the document :var int version: **\*** document version :var str version_flavor: **\*** flavor associated with the document (such as 'microdesc') @@ -725,17 +840,59 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): :var int consensus_method: method version used to generate this consensus :var dict bandwidth_weights: dict of weight(str) => value(int) mappings + :var int shared_randomness_current_reveal_count: number of commitments + used to generate the current shared random value + :var str shared_randomness_current_value: base64 encoded current shared + random value + + :var int shared_randomness_previous_reveal_count: number of commitments + used to generate the last shared random value + :var str shared_randomness_previous_value: base64 encoded last shared random + value + **Vote Attributes:** :var list consensus_methods: list of ints for the supported method versions :var datetime published: time when the document was published :var dict flag_thresholds: **\*** mapping of internal performance thresholds used while making the vote, values are **ints** or **floats** + :var dict recommended_client_protocols: recommended protocols for clients + :var dict recommended_relay_protocols: recommended protocols for relays + :var dict required_client_protocols: required protocols for clients + :var dict required_relay_protocols: required protocols for relays + :var dict bandwidth_file_headers: headers from the bandwidth authority that + generated this vote + **\*** attribute is either required when we're parsed with validation or has a default value, others are left as None if undefined .. versionchanged:: 1.4.0 Added the packages attribute. + + .. versionchanged:: 1.5.0 + Added the is_shared_randomness_participate, shared_randomness_commitments, + shared_randomness_previous_reveal_count, + shared_randomness_previous_value, + shared_randomness_current_reveal_count, and + shared_randomness_current_value attributes. + + .. versionchanged:: 1.6.0 + Added the recommended_client_protocols, recommended_relay_protocols, + required_client_protocols, and required_relay_protocols attributes. + + .. versionchanged:: 1.6.0 + The is_shared_randomness_participate and shared_randomness_commitments + were misdocumented in the tor spec and as such never set. They're now an + attribute of votes in the **directory_authorities**. + + .. versionchanged:: 1.7.0 + The shared_randomness_current_reveal_count and + shared_randomness_previous_reveal_count attributes were undocumented and + not provided properly if retrieved before their shred_randomness_*_value + counterpart. + + .. versionchanged:: 1.7.0 + Added the bandwidth_file_headers attributbute. """ ATTRIBUTES = { @@ -757,7 +914,16 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): 'packages': ([], _parse_package_line), 'known_flags': ([], _parse_header_known_flags_line), 'flag_thresholds': ({}, _parse_header_flag_thresholds_line), + 'recommended_client_protocols': ({}, _parse_recommended_client_protocols_line), + 'recommended_relay_protocols': ({}, _parse_recommended_relay_protocols_line), + 'required_client_protocols': ({}, _parse_required_client_protocols_line), + 'required_relay_protocols': ({}, _parse_required_relay_protocols_line), 'params': ({}, _parse_header_parameters_line), + 'shared_randomness_previous_reveal_count': (None, _parse_shared_rand_previous_value), + 'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value), + 'shared_randomness_current_reveal_count': (None, _parse_shared_rand_current_value), + 'shared_randomness_current_value': (None, _parse_shared_rand_current_value), + 'bandwidth_file_headers': ({}, _parse_bandwidth_file_headers), 'signatures': ([], _parse_footer_directory_signature_line), 'bandwidth_weights': ({}, _parse_footer_bandwidth_weights_line), @@ -778,7 +944,14 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): 'package': _parse_package_line, 'known-flags': _parse_header_known_flags_line, 'flag-thresholds': _parse_header_flag_thresholds_line, + 'recommended-client-protocols': _parse_recommended_client_protocols_line, + 'recommended-relay-protocols': _parse_recommended_relay_protocols_line, + 'required-client-protocols': _parse_required_client_protocols_line, + 'required-relay-protocols': _parse_required_relay_protocols_line, 'params': _parse_header_parameters_line, + 'shared-rand-previous-value': _parse_shared_rand_previous_value, + 'shared-rand-current-value': _parse_shared_rand_current_value, + 'bandwidth-file-headers': _parse_bandwidth_file_headers, } FOOTER_PARSER_FOR_LINE = { @@ -787,6 +960,85 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): 'directory-signature': _parse_footer_directory_signature_line, } + @classmethod + def content(cls, attr = None, exclude = (), sign = False, authorities = None, routers = None): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + attr = {} if attr is None else dict(attr) + is_vote = attr.get('vote-status') == 'vote' + + if is_vote: + extra_defaults = {'consensus-methods': '1 9', 'published': _random_date()} + else: + extra_defaults = {'consensus-method': '9'} + + if is_vote and authorities is None: + authorities = [DirectoryAuthority.create(is_vote = is_vote)] + + for k, v in extra_defaults.items(): + if exclude and k in exclude: + continue # explicitly excluding this field + elif k not in attr: + attr[k] = v + + desc_content = _descriptor_content(attr, exclude, ( + ('network-status-version', '3'), + ('vote-status', 'consensus'), + ('consensus-methods', None), + ('consensus-method', None), + ('published', None), + ('valid-after', _random_date()), + ('fresh-until', _random_date()), + ('valid-until', _random_date()), + ('voting-delay', '300 300'), + ('client-versions', None), + ('server-versions', None), + ('package', None), + ('known-flags', 'Authority BadExit Exit Fast Guard HSDir Named Running Stable Unnamed V2Dir Valid'), + ('params', None), + ), ( + ('directory-footer', ''), + ('bandwidth-weights', None), + ('directory-signature', '%s %s%s' % (_random_fingerprint(), _random_fingerprint(), _random_crypto_blob('SIGNATURE'))), + )) + + # inject the authorities and/or routers between the header and footer + + if authorities: + if b'directory-footer' in desc_content: + footer_div = desc_content.find(b'\ndirectory-footer') + 1 + elif b'directory-signature' in desc_content: + footer_div = desc_content.find(b'\ndirectory-signature') + 1 + else: + if routers: + desc_content += b'\n' + + footer_div = len(desc_content) + 1 + + authority_content = stem.util.str_tools._to_bytes('\n'.join([str(a) for a in authorities]) + '\n') + desc_content = desc_content[:footer_div] + authority_content + desc_content[footer_div:] + + if routers: + if b'directory-footer' in desc_content: + footer_div = desc_content.find(b'\ndirectory-footer') + 1 + elif b'directory-signature' in desc_content: + footer_div = desc_content.find(b'\ndirectory-signature') + 1 + else: + if routers: + desc_content += b'\n' + + footer_div = len(desc_content) + 1 + + router_content = stem.util.str_tools._to_bytes('\n'.join([str(r) for r in routers]) + '\n') + desc_content = desc_content[:footer_div] + router_content + desc_content[footer_div:] + + return desc_content + + @classmethod + def create(cls, attr = None, exclude = (), validate = True, sign = False, authorities = None, routers = None): + return cls(cls.content(attr, exclude, sign, authorities, routers), validate = validate) + def __init__(self, raw_content, validate = False, default_params = True): """ Parse a v3 network status document. @@ -802,6 +1054,13 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): super(NetworkStatusDocumentV3, self).__init__(raw_content, lazy_load = not validate) document_file = io.BytesIO(raw_content) + # TODO: Tor misdocumented these as being in the header rather than the + # authority section. As such these have never been set but we need the + # attributes for stem 1.5 compatability. Drop these in 2.0. + + self.is_shared_randomness_participate = False + self.shared_randomness_commitments = [] + self._default_params = default_params self._header(document_file, validate) @@ -829,6 +1088,39 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): self.routers = dict((desc.fingerprint, desc) for desc in router_iter) self._footer(document_file, validate) + def validate_signatures(self, key_certs): + """ + Validates we're properly signed by the signing certificates. + + .. versionadded:: 1.6.0 + + :param list key_certs: :class:`~stem.descriptor.networkstatus.KeyCertificates` + to validate the consensus against + + :raises: **ValueError** if an insufficient number of valid signatures are present. + """ + + # sha1 hash of the body and header + + local_digest = self._digest_for_content(b'network-status-version', b'directory-signature ') + + valid_digests, total_digests = 0, 0 + required_digests = len(self.signatures) / 2.0 + signing_keys = dict([(cert.fingerprint, cert.signing_key) for cert in key_certs]) + + for sig in self.signatures: + if sig.identity not in signing_keys: + continue + + signed_digest = self._digest_for_signature(signing_keys[sig.identity], sig.signature) + total_digests += 1 + + if signed_digest == local_digest: + valid_digests += 1 + + if valid_digests < required_digests: + raise ValueError('Network Status Document has %i valid signatures out of %i total, needed %i' % (valid_digests, total_digests, required_digests)) + def get_unrecognized_lines(self): if self._lazy_loading: self._parse(self._header_entries, False, parser_for_line = self.HEADER_PARSER_FOR_LINE) @@ -863,13 +1155,14 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): def _header(self, document_file, validate): content = bytes.join(b'', _read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file)) - entries = _get_descriptor_components(content, validate) + entries = _descriptor_components(content, validate) + header_fields = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS] if validate: # all known header fields can only appear once except for keyword, values in list(entries.items()): - if len(values) > 1 and keyword in HEADER_FIELDS and keyword != 'package': + if len(values) > 1 and keyword in header_fields and keyword != 'package' and keyword != 'shared-rand-commit': raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values))) if self._default_params: @@ -877,8 +1170,12 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): self._parse(entries, validate, parser_for_line = self.HEADER_PARSER_FOR_LINE) + # should only appear in consensus-method 7 or later + + if not self.meets_consensus_method(7) and 'params' in list(entries.keys()): + raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later") + _check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS) - _check_for_misordered_fields(entries, HEADER_FIELDS) # default consensus_method and consensus_methods based on if we're a consensus or vote @@ -891,14 +1188,15 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): self._entries.update(entries) def _footer(self, document_file, validate): - entries = _get_descriptor_components(document_file.read(), validate) + entries = _descriptor_components(document_file.read(), validate) + footer_fields = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS] if validate: for keyword, values in list(entries.items()): # all known footer fields can only appear once except... # * 'directory-signature' in a consensus - if len(values) > 1 and keyword in FOOTER_FIELDS: + if len(values) > 1 and keyword in footer_fields: if not (keyword == 'directory-signature' and self.is_consensus): raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values))) @@ -917,7 +1215,6 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): raise ValueError("Network status document's footer should start with a 'directory-signature' line prior to consensus-method 9") _check_for_missing_and_disallowed_fields(self, entries, FOOTER_STATUS_DOCUMENT_FIELDS) - _check_for_misordered_fields(entries, FOOTER_FIELDS) else: self._footer_entries = entries self._entries.update(entries) @@ -946,6 +1243,9 @@ class NetworkStatusDocumentV3(NetworkStatusDocument): def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) @@ -986,71 +1286,32 @@ def _check_for_missing_and_disallowed_fields(document, entries, fields): raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields)) -def _check_for_misordered_fields(entries, expected): - """ - To be valid a network status document's fiends need to appear in a specific - order. Checks that known fields appear in that order (unrecognized fields - are ignored). - - :param dict entries: ordered keyword/value mappings of the header or footer - :param list expected: ordered list of expected fields (either - **HEADER_FIELDS** or **FOOTER_FIELDS**) - - :raises: **ValueError** if entries aren't properly ordered - """ - - # Earlier validation has ensured that our fields either belong to our - # document type or are unknown. Remove the unknown fields since they - # reflect a spec change and can appear anywhere in the document. - - actual = [field for field in entries.keys() if field in expected] - - # Narrow the expected to just what we have. If the lists then match then the - # order's valid. - - expected = [field for field in expected if field in actual] - - if actual != expected: - actual_label = ', '.join(actual) - expected_label = ', '.join(expected) - raise ValueError("The fields in a section of the document are misordered. It should be '%s' but was '%s'" % (actual_label, expected_label)) - - def _parse_int_mappings(keyword, value, validate): # Parse a series of 'key=value' entries, checking the following: # - values are integers # - keys are sorted in lexical order results, seen_keys = {}, [] - for entry in value.split(' '): + error_template = "Unable to parse network status document's '%s' line (%%s): %s'" % (keyword, value) + + for key, val in _mappings_for(keyword, value): + if validate: + # parameters should be in ascending order by their key + for prior_key in seen_keys: + if prior_key > key: + raise ValueError(error_template % 'parameters must be sorted by their key') + try: - if '=' not in entry: - raise ValueError("must only have 'key=value' entries") + # the int() function accepts things like '+123', but we don't want to - entry_key, entry_value = entry.split('=', 1) + if val.startswith('+'): + raise ValueError() - try: - # the int() function accepts things like '+123', but we don't want to - if entry_value.startswith('+'): - raise ValueError() + results[key] = int(val) + except ValueError: + raise ValueError(error_template % ("'%s' is a non-numeric value" % val)) - entry_value = int(entry_value) - except ValueError: - raise ValueError("'%s' is a non-numeric value" % entry_value) - - if validate: - # parameters should be in ascending order by their key - for prior_key in seen_keys: - if prior_key > entry_key: - raise ValueError('parameters must be sorted by their key') - - results[entry_key] = entry_value - seen_keys.append(entry_key) - except ValueError as exc: - if not validate: - continue - - raise ValueError("Unable to parse network status document's '%s' line (%s): %s'" % (keyword, exc, value)) + seen_keys.append(key) return results @@ -1120,11 +1381,31 @@ class DirectoryAuthority(Descriptor): :var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\*** authority's key certificate + :var bool is_shared_randomness_participate: **\*** **True** if this authority + participates in establishing a shared random value, **False** otherwise + :var list shared_randomness_commitments: **\*** list of + :data:`~stem.descriptor.networkstatus.SharedRandomnessCommitment` entries + :var int shared_randomness_previous_reveal_count: number of commitments + used to generate the last shared random value + :var str shared_randomness_previous_value: base64 encoded last shared random + value + :var int shared_randomness_current_reveal_count: number of commitments + used to generate the current shared random value + :var str shared_randomness_current_value: base64 encoded current shared + random value + **\*** mandatory attribute .. versionchanged:: 1.4.0 Renamed our 'fingerprint' attribute to 'v3ident' (prior attribute exists for backward compatability, but is deprecated). + + .. versionchanged:: 1.6.0 + Added the is_shared_randomness_participate, shared_randomness_commitments, + shared_randomness_previous_reveal_count, + shared_randomness_previous_value, + shared_randomness_current_reveal_count, and + shared_randomness_current_value attributes. """ ATTRIBUTES = { @@ -1138,6 +1419,12 @@ class DirectoryAuthority(Descriptor): 'contact': (None, _parse_contact_line), 'vote_digest': (None, _parse_vote_digest_line), 'legacy_dir_key': (None, _parse_legacy_dir_key_line), + 'is_shared_randomness_participate': (False, _parse_shared_rand_participate_line), + 'shared_randomness_commitments': ([], _parsed_shared_rand_commit), + 'shared_randomness_previous_reveal_count': (None, _parse_shared_rand_previous_value), + 'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value), + 'shared_randomness_current_reveal_count': (None, _parse_shared_rand_current_value), + 'shared_randomness_current_value': (None, _parse_shared_rand_current_value), } PARSER_FOR_LINE = { @@ -1145,8 +1432,38 @@ class DirectoryAuthority(Descriptor): 'contact': _parse_contact_line, 'legacy-dir-key': _parse_legacy_dir_key_line, 'vote-digest': _parse_vote_digest_line, + 'shared-rand-participate': _parse_shared_rand_participate_line, + 'shared-rand-commit': _parsed_shared_rand_commit, + 'shared-rand-previous-value': _parse_shared_rand_previous_value, + 'shared-rand-current-value': _parse_shared_rand_current_value, } + @classmethod + def content(cls, attr = None, exclude = (), sign = False, is_vote = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + attr = {} if attr is None else dict(attr) + + # include mandatory 'vote-digest' if a consensus + + if not is_vote and not ('vote-digest' in attr or (exclude and 'vote-digest' in exclude)): + attr['vote-digest'] = _random_fingerprint() + + content = _descriptor_content(attr, exclude, ( + ('dir-source', '%s %s no.place.com %s 9030 9090' % (_random_nickname(), _random_fingerprint(), _random_ipv4_address())), + ('contact', 'Mike Perry '), + )) + + if is_vote: + content += b'\n' + KeyCertificate.content() + + return content + + @classmethod + def create(cls, attr = None, exclude = (), validate = True, sign = False, is_vote = False): + return cls(cls.content(attr, exclude, sign, is_vote), validate = validate, is_vote = is_vote) + def __init__(self, raw_content, validate = False, is_vote = False): """ Parse a directory authority entry in a v3 network status document. @@ -1171,7 +1488,7 @@ class DirectoryAuthority(Descriptor): else: self.key_certificate = None - entries = _get_descriptor_components(content, validate) + entries = _descriptor_components(content, validate) if validate and 'dir-source' != list(entries.keys())[0]: raise ValueError("Authority entries are expected to start with a 'dir-source' line:\n%s" % (content)) @@ -1233,9 +1550,15 @@ class DirectoryAuthority(Descriptor): return method(str(self).strip(), str(other).strip()) + def __hash__(self): + return hash(str(self).strip()) + def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) @@ -1251,7 +1574,7 @@ def _parse_dir_address_line(descriptor, entries): if ':' not in value: raise ValueError("Key certificate's 'dir-address' is expected to be of the form ADDRESS:PORT: dir-address %s" % value) - address, dirport = value.split(':', 1) + address, dirport = value.rsplit(':', 1) if not stem.util.connection.is_valid_ipv4_address(address): raise ValueError("Key certificate's address isn't a valid IPv4 address: dir-address %s" % value) @@ -1315,9 +1638,25 @@ class KeyCertificate(Descriptor): 'dir-key-certification': _parse_dir_key_certification_line, } + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + return _descriptor_content(attr, exclude, ( + ('dir-key-certificate-version', '3'), + ('fingerprint', _random_fingerprint()), + ('dir-key-published', _random_date()), + ('dir-key-expires', _random_date()), + ('dir-identity-key', _random_crypto_blob('RSA PUBLIC KEY')), + ('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')), + ), ( + ('dir-key-certification', _random_crypto_blob('SIGNATURE')), + )) + def __init__(self, raw_content, validate = False): super(KeyCertificate, self).__init__(raw_content, lazy_load = not validate) - entries = _get_descriptor_components(raw_content, validate) + entries = _descriptor_components(raw_content, validate) if validate: if 'dir-key-certificate-version' != list(entries.keys())[0]: @@ -1346,9 +1685,15 @@ class KeyCertificate(Descriptor): return method(str(self).strip(), str(other).strip()) + def __hash__(self): + return hash(str(self).strip()) + def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) @@ -1395,9 +1740,15 @@ class DocumentSignature(object): return method(True, True) # we're equal + def __hash__(self): + return hash(str(self).strip()) + def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) @@ -1410,8 +1761,8 @@ class BridgeNetworkStatusDocument(NetworkStatusDocument): Network status document containing bridges. This is only available through the metrics site. - :var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2` - contained in the document + :var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` + mapping for relays contained in the document :var datetime published: time when the document was published """ diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/reader.py b/Shared/lib/python3.4/site-packages/stem/descriptor/reader.py index a96406d..9889751 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/reader.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/reader.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -89,10 +89,10 @@ except ImportError: import stem.descriptor import stem.prereq +import stem.util +import stem.util.str_tools import stem.util.system -from stem import str_type - # flag to indicate when the reader thread is out of descriptor files to read FINISHED = 'DONE' @@ -179,9 +179,9 @@ def load_processed_files(path): processed_files = {} - with open(path) as input_file: + with open(path, 'rb') as input_file: for line in input_file.readlines(): - line = line.strip() + line = stem.util.str_tools._to_unicode(line.strip()) if not line: continue # skip blank lines @@ -218,6 +218,7 @@ def save_processed_files(path, processed_files): """ # makes the parent directory if it doesn't already exist + try: path_dir = os.path.dirname(path) @@ -264,10 +265,7 @@ class DescriptorReader(object): """ def __init__(self, target, validate = False, follow_links = False, buffer_size = 100, persistence_path = None, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs): - if isinstance(target, (bytes, str_type)): - self._targets = [target] - else: - self._targets = target + self._targets = [target] if stem.util._is_str(target) else target # expand any relative paths we got @@ -388,7 +386,7 @@ class DescriptorReader(object): raise ValueError('Already running, you need to call stop() first') else: self._is_stopped.clear() - self._reader_thread = threading.Thread(target = self._read_descriptor_files, name='Descriptor Reader') + self._reader_thread = threading.Thread(target = self._read_descriptor_files, name='Descriptor reader') self._reader_thread.setDaemon(True) self._reader_thread.start() @@ -514,7 +512,7 @@ class DescriptorReader(object): self._unreturned_descriptors.put(desc) self._iter_notice.set() - except TypeError as exc: + except TypeError: self._notify_skip_listeners(target, UnrecognizedType(mime_type)) except ValueError as exc: self._notify_skip_listeners(target, ParsingFailure(exc)) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/remote.py b/Shared/lib/python3.4/site-packages/stem/descriptor/remote.py index 4d3423d..dbae3cd 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/remote.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/remote.py @@ -1,11 +1,22 @@ -# Copyright 2013-2015, Damian Johnson and The Tor Project +# Copyright 2013-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ Module for remotely retrieving descriptors from directory authorities and -mirrors. This is most easily done through the +mirrors. This is the simplest method for getting current tor descriptor +information... + +:: + + import stem.descriptor.remote + + for desc in stem.descriptor.remote.get_server_descriptors(): + if desc.exit_policy.is_exiting_allowed(): + print(' %s (%s)' % (desc.nickname, desc.fingerprint)) + +More custom downloading behavior can be done through the :class:`~stem.descriptor.remote.DescriptorDownloader` class, which issues -:class:`~stem.descriptor.remote.Query` instances to get you the descriptor +:class:`~stem.descriptor.remote.Query` instances to get you descriptor content. For example... :: @@ -19,32 +30,25 @@ content. For example... query = downloader.get_server_descriptors() - print 'Exit Relays:' + print('Exit Relays:') try: for desc in query.run(): if desc.exit_policy.is_exiting_allowed(): - print ' %s (%s)' % (desc.nickname, desc.fingerprint) + print(' %s (%s)' % (desc.nickname, desc.fingerprint)) print - print 'Query took %0.2f seconds' % query.runtime + print('Query took %0.2f seconds' % query.runtime) except Exception as exc: - print 'Unable to retrieve the server descriptors: %s' % exc - -If you don't care about errors then you can also simply iterate over the query -itself... + print('Unable to retrieve the server descriptors: %s' % exc) :: - for desc in downloader.get_server_descriptors(): - if desc.exit_policy.is_exiting_allowed(): - print ' %s (%s)' % (desc.nickname, desc.fingerprint) - -:: - - get_authorities - Provides tor directory information. - - DirectoryAuthority - Information about a tor directory authority. + get_instance - Provides a singleton DescriptorDownloader used for... + |- their_server_descriptor - provides the server descriptor of the relay we download from + |- get_server_descriptors - provides present server descriptors + |- get_extrainfo_descriptors - provides present extrainfo descriptors + +- get_consensus - provides the present consensus or router status entries Query - Asynchronous request to download tor descriptors |- start - issues the query if it isn't already running @@ -52,9 +56,9 @@ itself... DescriptorDownloader - Configurable class for issuing queries |- use_directory_mirrors - use directory mirrors to download future descriptors + |- their_server_descriptor - provides the server descriptor of the relay we download from |- get_server_descriptors - provides present server descriptors |- get_extrainfo_descriptors - provides present extrainfo descriptors - |- get_microdescriptors - provides present microdescriptors |- get_consensus - provides the present consensus or router status entries |- get_key_certificates - provides present authority key certificates +- query - request an arbitrary descriptor resource @@ -70,6 +74,21 @@ itself... Maximum number of microdescriptors that can requested at a time by their hashes. + +.. data:: Compression (enum) + + Compression when downloading descriptors. + + .. versionadded:: 1.7.0 + + =============== =========== + Compression Description + =============== =========== + **PLAINTEXT** Uncompressed data. + **GZIP** `GZip compression `_. + **ZSTD** `Zstandard compression `_, this requires the `zstandard module `_. + **LZMA** `LZMA compression `_, this requires the 'lzma module `_. + =============== =========== """ import io @@ -79,44 +98,109 @@ import threading import time import zlib -try: - import urllib.request as urllib -except ImportError: - import urllib2 as urllib - +import stem +import stem.client import stem.descriptor +import stem.directory +import stem.prereq +import stem.util.enum -from stem import Flag -from stem.util import log +from stem.client.datatype import RelayCommand +from stem.util import log, str_tools + +try: + # account for urllib's change between python 2.x and 3.x + import urllib.request as urllib +except ImportError: + import urllib2 as urllib + +Compression = stem.util.enum.Enum( + ('PLAINTEXT', 'identity'), + ('GZIP', 'gzip'), # can also be 'deflate' + ('ZSTD', 'x-zstd'), + ('LZMA', 'x-tor-lzma'), +) # Tor has a limited number of descriptors we can fetch explicitly by their # fingerprint or hashes due to a limit on the url length by squid proxies. MAX_FINGERPRINTS = 96 -MAX_MICRODESCRIPTOR_HASHES = 92 +MAX_MICRODESCRIPTOR_HASHES = 90 -# We commonly only want authorities that vote in the consensus, and hence have -# a v3ident. - -HAS_V3IDENT = lambda auth: auth.v3ident is not None +SINGLETON_DOWNLOADER = None -def _guess_descriptor_type(resource): - # Attempts to determine the descriptor type based on the resource url. This - # raises a ValueError if the resource isn't recognized. +def get_instance(): + """ + Provides the singleton :class:`~stem.descriptor.remote.DescriptorDownloader` + used for the following functions... - if resource.startswith('/tor/server/'): - return 'server-descriptor 1.0' - elif resource.startswith('/tor/extra/'): - return 'extra-info 1.0' - elif resource.startswith('/tor/micro/'): - return 'microdescriptor 1.0' - elif resource.startswith('/tor/status-vote/'): - return 'network-status-consensus-3 1.0' - elif resource.startswith('/tor/keys/'): - return 'dir-key-certificate-3 1.0' - else: - raise ValueError("Unable to determine the descriptor type for '%s'" % resource) + * :func:`stem.descriptor.remote.get_server_descriptors` + * :func:`stem.descriptor.remote.get_extrainfo_descriptors` + * :func:`stem.descriptor.remote.get_consensus` + + .. versionadded:: 1.5.0 + + :returns: singleton :class:`~stem.descriptor.remote.DescriptorDownloader` instance + """ + + global SINGLETON_DOWNLOADER + + if SINGLETON_DOWNLOADER is None: + SINGLETON_DOWNLOADER = DescriptorDownloader() + + return SINGLETON_DOWNLOADER + + +def their_server_descriptor(**query_args): + """ + Provides the server descriptor of the relay we're downloading from. + + .. versionadded:: 1.7.0 + + :param query_args: additional arguments for the + :class:`~stem.descriptor.remote.Query` constructor + + :returns: :class:`~stem.descriptor.remote.Query` for the server descriptors + """ + + return get_instance().their_server_descriptor(**query_args) + + +def get_server_descriptors(fingerprints = None, **query_args): + """ + Shorthand for + :func:`~stem.descriptor.remote.DescriptorDownloader.get_server_descriptors` + on our singleton instance. + + .. versionadded:: 1.5.0 + """ + + return get_instance().get_server_descriptors(fingerprints, **query_args) + + +def get_extrainfo_descriptors(fingerprints = None, **query_args): + """ + Shorthand for + :func:`~stem.descriptor.remote.DescriptorDownloader.get_extrainfo_descriptors` + on our singleton instance. + + .. versionadded:: 1.5.0 + """ + + return get_instance().get_extrainfo_descriptors(fingerprints, **query_args) + + +def get_consensus(authority_v3ident = None, microdescriptor = False, **query_args): + """ + Shorthand for + :func:`~stem.descriptor.remote.DescriptorDownloader.get_consensus` + on our singleton instance. + + .. versionadded:: 1.5.0 + """ + + return get_instance().get_consensus(authority_v3ident, microdescriptor, **query_args) class Query(object): @@ -136,27 +220,27 @@ class Query(object): from stem.descriptor.remote import Query query = Query( - '/tor/server/all.z', + '/tor/server/all', block = True, timeout = 30, ) - print 'Current relays:' + print('Current relays:') if not query.error: for desc in query: - print desc.fingerprint + print(desc.fingerprint) else: - print 'Unable to retrieve the server descriptors: %s' % query.error + print('Unable to retrieve the server descriptors: %s' % query.error) ... while iterating fails silently... :: - print 'Current relays:' + print('Current relays:') - for desc in Query('/tor/server/all.z', 'server-descriptor 1.0'): - print desc.fingerprint + for desc in Query('/tor/server/all', 'server-descriptor 1.0'): + print(desc.fingerprint) In either case exceptions are available via our 'error' attribute. @@ -165,30 +249,64 @@ class Query(object): `_). Commonly useful ones include... - ===================================== =========== - Resource Description - ===================================== =========== - /tor/server/all.z all present server descriptors - /tor/server/fp/++.z server descriptors with the given fingerprints - /tor/extra/all.z all present extrainfo descriptors - /tor/extra/fp/++.z extrainfo descriptors with the given fingerprints - /tor/micro/d/-.z microdescriptors with the given hashes - /tor/status-vote/current/consensus.z present consensus - /tor/keys/all.z key certificates for the authorities - /tor/keys/fp/+.z key certificates for specific authorities - ===================================== =========== + =============================================== =========== + Resource Description + =============================================== =========== + /tor/server/all all present server descriptors + /tor/server/fp/++ server descriptors with the given fingerprints + /tor/extra/all all present extrainfo descriptors + /tor/extra/fp/++ extrainfo descriptors with the given fingerprints + /tor/micro/d/- microdescriptors with the given hashes + /tor/status-vote/current/consensus present consensus + /tor/status-vote/current/consensus-microdesc present microdescriptor consensus + /tor/keys/all key certificates for the authorities + /tor/keys/fp/+ key certificates for specific authorities + =============================================== =========== - The '.z' suffix can be excluded to get a plaintext rather than compressed - response. Compression is handled transparently, so this shouldn't matter to - the caller. + **ZSTD** compression requires `zstandard + `_, and **LZMA** requires the `lzma + module `_. - :var str resource: resource being fetched, such as '/tor/server/all.z' + For legacy reasons if our resource has a '.z' suffix then our **compression** + argument is overwritten with Compression.GZIP. + + .. versionchanged:: 1.7.0 + Added support for downloading from ORPorts. + + .. versionchanged:: 1.7.0 + Added the compression argument. + + .. versionchanged:: 1.7.0 + Added the reply_headers attribute. + + The class this provides changed between Python versions. In python2 + this was called httplib.HTTPMessage, whereas in python3 the class was + renamed to http.client.HTTPMessage. + + .. versionchanged:: 1.7.0 + Endpoints are now expected to be :class:`~stem.DirPort` or + :class:`~stem.ORPort` instances. Usage of tuples for this + argument is deprecated and will be removed in the future. + + .. versionchanged:: 1.7.0 + Avoid downloading from tor26. This directory authority throttles its + DirPort to such an extent that requests either time out or take on the + order of minutes. + + .. versionchanged:: 1.7.0 + Avoid downloading from Bifroest. This is the bridge authority so it + doesn't vote in the consensus, and apparently times out frequently. + + :var str resource: resource being fetched, such as '/tor/server/all' :var str descriptor_type: type of descriptors being fetched (for options see :func:`~stem.descriptor.__init__.parse_file`), this is guessed from the resource if **None** - :var list endpoints: (address, dirport) tuples of the authority or mirror - we're querying, this uses authorities if undefined + :var list endpoints: :class:`~stem.DirPort` or :class:`~stem.ORPort` of the + authority or mirror we're querying, this uses authorities if undefined + :var list compression: list of :data:`stem.descriptor.remote.Compression` + we're willing to accept, when none are mutually supported downloads fall + back to Compression.PLAINTEXT :var int retries: number of times to attempt the request if downloading it fails :var bool fall_back_to_authority: when retrying request issues the last @@ -197,11 +315,10 @@ class Query(object): :var str content: downloaded descriptor content :var Exception error: exception if a problem occured :var bool is_done: flag that indicates if our request has finished - :var str download_url: last url used to download the descriptor, this is - unset until we've actually made a download attempt :var float start_time: unix timestamp when we first started running - :var float timeout: duration before we'll time out our request + :var http.client.HTTPMessage reply_headers: headers provided in the response, + **None** if we haven't yet made our request :var float runtime: time our query took, this is **None** if it's not yet finished @@ -211,23 +328,58 @@ class Query(object): which to parse a :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` :var dict kwargs: additional arguments for the descriptor constructor + Following are only applicable when downloading from a + :class:`~stem.DirPort`... + + :var float timeout: duration before we'll time out our request + :var str download_url: last url used to download the descriptor, this is + unset until we've actually made a download attempt + :param bool start: start making the request when constructed (default is **True**) :param bool block: only return after the request has been completed, this is the same as running **query.run(True)** (default is **False**) """ - def __init__(self, resource, descriptor_type = None, endpoints = None, retries = 2, fall_back_to_authority = False, timeout = None, start = True, block = False, validate = False, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs): + def __init__(self, resource, descriptor_type = None, endpoints = None, compression = None, retries = 2, fall_back_to_authority = False, timeout = None, start = True, block = False, validate = False, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs): if not resource.startswith('/'): raise ValueError("Resources should start with a '/': %s" % resource) - self.resource = resource + if resource.endswith('.z'): + compression = [Compression.GZIP] + resource = resource[:-2] + elif compression is None: + compression = [Compression.PLAINTEXT] + else: + if isinstance(compression, str): + compression = [compression] # caller provided only a single option + + if Compression.ZSTD in compression and not stem.prereq.is_zstd_available(): + compression.remove(Compression.ZSTD) + + if Compression.LZMA in compression and not stem.prereq.is_lzma_available(): + compression.remove(Compression.LZMA) + + if not compression: + compression = [Compression.PLAINTEXT] if descriptor_type: self.descriptor_type = descriptor_type else: self.descriptor_type = _guess_descriptor_type(resource) - self.endpoints = endpoints if endpoints else [] + self.endpoints = [] + + if endpoints: + for endpoint in endpoints: + if isinstance(endpoint, tuple) and len(endpoint) == 2: + self.endpoints.append(stem.DirPort(endpoint[0], endpoint[1])) # TODO: remove this in stem 2.0 + elif isinstance(endpoint, (stem.ORPort, stem.DirPort)): + self.endpoints.append(endpoint) + else: + raise ValueError("Endpoints must be an stem.ORPort, stem.DirPort, or two value tuple. '%s' is a %s." % (endpoint, type(endpoint).__name__)) + + self.resource = resource + self.compression = compression self.retries = retries self.fall_back_to_authority = fall_back_to_authority @@ -242,6 +394,7 @@ class Query(object): self.validate = validate self.document_handler = document_handler + self.reply_headers = None self.kwargs = kwargs self._downloader_thread = None @@ -261,9 +414,9 @@ class Query(object): with self._downloader_thread_lock: if self._downloader_thread is None: self._downloader_thread = threading.Thread( - name = 'Descriptor Query', + name = 'Descriptor query', target = self._download_descriptors, - args = (self.retries,) + args = (self.retries, self.timeout) ) self._downloader_thread.setDaemon(True) @@ -332,10 +485,10 @@ class Query(object): for desc in self._run(True): yield desc - def _pick_url(self, use_authority = False): + def _pick_endpoint(self, use_authority = False): """ - Provides a url that can be queried. If we have multiple endpoints then one - will be picked randomly. + Provides an endpoint to query. If we have multiple endpoints then one + is picked at random. :param bool use_authority: ignores our endpoints and uses a directory authority instead @@ -344,34 +497,35 @@ class Query(object): """ if use_authority or not self.endpoints: - authority = random.choice(filter(HAS_V3IDENT, get_authorities().values())) - address, dirport = authority.address, authority.dir_port + picked = random.choice([auth for auth in stem.directory.Authority.from_cache().values() if auth.nickname not in ('tor26', 'Bifroest')]) + return stem.DirPort(picked.address, picked.dir_port) else: - address, dirport = random.choice(self.endpoints) + return random.choice(self.endpoints) - return 'http://%s:%i/%s' % (address, dirport, self.resource.lstrip('/')) - - def _download_descriptors(self, retries): + def _download_descriptors(self, retries, timeout): try: - use_authority = retries == 0 and self.fall_back_to_authority - self.download_url = self._pick_url(use_authority) - self.start_time = time.time() - response = urllib.urlopen(self.download_url, timeout = self.timeout).read() + endpoint = self._pick_endpoint(use_authority = retries == 0 and self.fall_back_to_authority) - if self.download_url.endswith('.z'): - response = zlib.decompress(response) - - self.content = response.strip() + if isinstance(endpoint, stem.ORPort): + self.content, self.reply_headers = _download_from_orport(endpoint, self.compression, self.resource) + elif isinstance(endpoint, stem.DirPort): + self.download_url = 'http://%s:%i/%s' % (endpoint.address, endpoint.port, self.resource.lstrip('/')) + self.content, self.reply_headers = _download_from_dirport(self.download_url, self.compression, timeout) + else: + raise ValueError("BUG: endpoints can only be ORPorts or DirPorts, '%s' was a %s" % (endpoint, type(endpoint).__name__)) self.runtime = time.time() - self.start_time log.trace("Descriptors retrieved from '%s' in %0.2fs" % (self.download_url, self.runtime)) except: exc = sys.exc_info()[1] - if retries > 0: + if timeout is not None: + timeout -= time.time() - self.start_time + + if retries > 0 and (timeout is None or timeout > 0): log.debug("Unable to download descriptors from '%s' (%i retries remaining): %s" % (self.download_url, retries, exc)) - return self._download_descriptors(retries - 1) + return self._download_descriptors(retries - 1, timeout) else: log.debug("Unable to download descriptors from '%s': %s" % (self.download_url, exc)) self.error = exc @@ -394,8 +548,8 @@ class DescriptorDownloader(object): def __init__(self, use_mirrors = False, **default_args): self._default_args = default_args - authorities = filter(HAS_V3IDENT, get_authorities().values()) - self._endpoints = [(auth.address, auth.dir_port) for auth in authorities] + directories = list(stem.directory.Authority.from_cache().values()) + self._endpoints = [(directory.address, directory.dir_port) for directory in directories] if use_mirrors: try: @@ -416,13 +570,13 @@ class DescriptorDownloader(object): :raises: **Exception** if unable to determine the directory mirrors """ - authorities = filter(HAS_V3IDENT, get_authorities().values()) - new_endpoints = set([(auth.address, auth.dir_port) for auth in authorities]) + directories = stem.directory.Authority.from_cache().values() + new_endpoints = set([(directory.address, directory.dir_port) for directory in directories]) consensus = list(self.get_consensus(document_handler = stem.descriptor.DocumentHandler.DOCUMENT).run())[0] for desc in consensus.routers.values(): - if Flag.V2DIR in desc.flags: + if stem.Flag.V2DIR in desc.flags: new_endpoints.add((desc.address, desc.dir_port)) # we need our endpoints to be a list rather than set for random.choice() @@ -431,11 +585,25 @@ class DescriptorDownloader(object): return consensus + def their_server_descriptor(self, **query_args): + """ + Provides the server descriptor of the relay we're downloading from. + + .. versionadded:: 1.7.0 + + :param query_args: additional arguments for the + :class:`~stem.descriptor.remote.Query` constructor + + :returns: :class:`~stem.descriptor.remote.Query` for the server descriptors + """ + + return self.query('/tor/server/authority', **query_args) + def get_server_descriptors(self, fingerprints = None, **query_args): """ Provides the server descriptors with the given fingerprints. If no - fingerprints are provided then this returns all descriptors in the present - consensus. + fingerprints are provided then this returns all descriptors known + by the relay. :param str,list fingerprints: fingerprint or list of fingerprints to be retrieved, gets all descriptors if **None** @@ -448,7 +616,7 @@ class DescriptorDownloader(object): fingerprints (this is due to a limit on the url length by squid proxies). """ - resource = '/tor/server/all.z' + resource = '/tor/server/all' if isinstance(fingerprints, str): fingerprints = [fingerprints] @@ -457,7 +625,7 @@ class DescriptorDownloader(object): if len(fingerprints) > MAX_FINGERPRINTS: raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS) - resource = '/tor/server/fp/%s.z' % '+'.join(fingerprints) + resource = '/tor/server/fp/%s' % '+'.join(fingerprints) return self.query(resource, **query_args) @@ -478,7 +646,7 @@ class DescriptorDownloader(object): fingerprints (this is due to a limit on the url length by squid proxies). """ - resource = '/tor/extra/all.z' + resource = '/tor/extra/all' if isinstance(fingerprints, str): fingerprints = [fingerprints] @@ -487,10 +655,12 @@ class DescriptorDownloader(object): if len(fingerprints) > MAX_FINGERPRINTS: raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS) - resource = '/tor/extra/fp/%s.z' % '+'.join(fingerprints) + resource = '/tor/extra/fp/%s' % '+'.join(fingerprints) return self.query(resource, **query_args) + # TODO: drop in stem 2.x + def get_microdescriptors(self, hashes, **query_args): """ Provides the microdescriptors with the given hashes. To get these see the @@ -499,6 +669,10 @@ class DescriptorDownloader(object): that these are only provided via a microdescriptor consensus (such as 'cached-microdesc-consensus' in your data directory). + .. deprecated:: 1.5.0 + This function has never worked, as it was never implemented in tor + (:trac:`9271`). + :param str,list hashes: microdescriptor hash or list of hashes to be retrieved :param query_args: additional arguments for the @@ -516,16 +690,21 @@ class DescriptorDownloader(object): if len(hashes) > MAX_MICRODESCRIPTOR_HASHES: raise ValueError('Unable to request more than %i microdescriptors at a time by their hashes' % MAX_MICRODESCRIPTOR_HASHES) - return self.query('/tor/micro/d/%s.z' % '-'.join(hashes), **query_args) + return self.query('/tor/micro/d/%s' % '-'.join(hashes), **query_args) - def get_consensus(self, authority_v3ident = None, **query_args): + def get_consensus(self, authority_v3ident = None, microdescriptor = False, **query_args): """ Provides the present router status entries. + .. versionchanged:: 1.5.0 + Added the microdescriptor argument. + :param str authority_v3ident: fingerprint of the authority key for which to get the consensus, see `'v3ident' in tor's config.c - `_ + `_ for the values. + :param bool microdescriptor: provides the microdescriptor consensus if + **True**, standard consensus otherwise :param query_args: additional arguments for the :class:`~stem.descriptor.remote.Query` constructor @@ -533,18 +712,31 @@ class DescriptorDownloader(object): entries """ - resource = '/tor/status-vote/current/consensus' + if microdescriptor: + resource = '/tor/status-vote/current/consensus-microdesc' + else: + resource = '/tor/status-vote/current/consensus' if authority_v3ident: resource += '/%s' % authority_v3ident - return self.query(resource + '.z', **query_args) + consensus_query = self.query(resource, **query_args) + + # if we're performing validation then check that it's signed by the + # authority key certificates + + if consensus_query.validate and consensus_query.document_handler == stem.descriptor.DocumentHandler.DOCUMENT and stem.prereq.is_crypto_available(): + consensus = list(consensus_query.run())[0] + key_certs = self.get_key_certificates(**query_args).run() + consensus.validate_signatures(key_certs) + + return consensus_query def get_vote(self, authority, **query_args): """ Provides the present vote for a given directory authority. - :param stem.descriptor.remote.DirectoryAuthority authority: authority for which to retrieve a vote for + :param stem.directory.Authority authority: authority for which to retrieve a vote for :param query_args: additional arguments for the :class:`~stem.descriptor.remote.Query` constructor @@ -557,7 +749,7 @@ class DescriptorDownloader(object): if 'endpoint' not in query_args: query_args['endpoints'] = [(authority.address, authority.dir_port)] - return self.query(resource + '.z', **query_args) + return self.query(resource, **query_args) def get_key_certificates(self, authority_v3idents = None, **query_args): """ @@ -579,7 +771,7 @@ class DescriptorDownloader(object): squid proxies). """ - resource = '/tor/keys/all.z' + resource = '/tor/keys/all' if isinstance(authority_v3idents, str): authority_v3idents = [authority_v3idents] @@ -588,7 +780,7 @@ class DescriptorDownloader(object): if len(authority_v3idents) > MAX_FINGERPRINTS: raise ValueError('Unable to request more than %i key certificates at a time by their identity fingerprints' % MAX_FINGERPRINTS) - resource = '/tor/keys/fp/%s.z' % '+'.join(authority_v3idents) + resource = '/tor/keys/fp/%s' % '+'.join(authority_v3idents) return self.query(resource, **query_args) @@ -596,7 +788,11 @@ class DescriptorDownloader(object): """ Issues a request for the given resource. - :param str resource: resource being fetched, such as '/tor/server/all.z' + .. versionchanged:: 1.7.0 + The **fall_back_to_authority** default when using this method is now + **False**, like the :class:`~stem.descriptor.Query` class. + + :param str resource: resource being fetched, such as '/tor/server/all' :param query_args: additional arguments for the :class:`~stem.descriptor.remote.Query` constructor @@ -612,166 +808,178 @@ class DescriptorDownloader(object): if 'endpoints' not in args: args['endpoints'] = self._endpoints - if 'fall_back_to_authority' not in args: - args['fall_back_to_authority'] = True - - return Query( - resource, - **args - ) + return Query(resource, **args) -class DirectoryAuthority(object): +def _download_from_orport(endpoint, compression, resource): """ - Tor directory authority, a special type of relay `hardcoded into tor - `_ - that enumerates the other relays within the network. + Downloads descriptors from the given orport. Payload is just like an http + response (headers and all)... - At a very high level tor works as follows... + :: - 1. A volunteer starts up a new tor relay, during which it sends a `server - descriptor `_ to each of the directory - authorities. + HTTP/1.0 200 OK + Date: Mon, 23 Apr 2018 18:43:47 GMT + Content-Type: text/plain + X-Your-Address-Is: 216.161.254.25 + Content-Encoding: identity + Expires: Wed, 25 Apr 2018 18:43:47 GMT - 2. Each hour the directory authorities make a `vote `_ - that says who they think the active relays are in the network and some - attributes about them. + router dannenberg 193.23.244.244 443 0 80 + identity-ed25519 + ... rest of the descriptor content... - 3. The directory authorities send each other their votes, and compile that - into the `consensus `_. This document is very similar - to the votes, the only difference being that the majority of the - authorities agree upon and sign this document. The idividual relay entries - in the vote or consensus is called `router status entries - `_. + :param stem.ORPort endpoint: endpoint to download from + :param list compression: compression methods for the request + :param str resource: descriptor resource to download - 4. Tor clients (people using the service) download the consensus from one of - the authorities or a mirror to determine the active relays within the - network. They in turn use this to construct their circuits and use the - network. + :returns: two value tuple of the form (data, reply_headers) - .. versionchanged:: 1.3.0 - Added the is_bandwidth_authority attribute. - - :var str nickname: nickname of the authority - :var str address: IP address of the authority, currently they're all IPv4 but - this may not always be the case - :var int or_port: port on which the relay services relay traffic - :var int dir_port: port on which directory information is available - :var str fingerprint: relay fingerprint - :var str v3ident: identity key fingerprint used to sign votes and consensus + :raises: + * :class:`stem.ProtocolError` if not a valid descriptor response + * :class:`stem.SocketError` if unable to establish a connection """ - def __init__(self, nickname = None, address = None, or_port = None, dir_port = None, is_bandwidth_authority = False, fingerprint = None, v3ident = None): - self.nickname = nickname - self.address = address - self.or_port = or_port - self.dir_port = dir_port - self.is_bandwidth_authority = is_bandwidth_authority - self.fingerprint = fingerprint - self.v3ident = v3ident + link_protocols = endpoint.link_protocols if endpoint.link_protocols else [3] + + with stem.client.Relay.connect(endpoint.address, endpoint.port, link_protocols) as relay: + with relay.create_circuit() as circ: + request = '\r\n'.join(( + 'GET %s HTTP/1.0' % resource, + 'Accept-Encoding: %s' % ', '.join(compression), + 'User-Agent: %s' % stem.USER_AGENT, + )) + '\r\n\r\n' + + circ.send(RelayCommand.BEGIN_DIR, stream_id = 1) + response = b''.join([cell.data for cell in circ.send(RelayCommand.DATA, request, stream_id = 1)]) + first_line, data = response.split(b'\r\n', 1) + header_data, body_data = data.split(b'\r\n\r\n', 1) + + if not first_line.startswith(b'HTTP/1.0 2'): + raise stem.ProtocolError("Response should begin with HTTP success, but was '%s'" % str_tools._to_unicode(first_line)) + + headers = {} + + for line in str_tools._to_unicode(header_data).splitlines(): + if ': ' not in line: + raise stem.ProtocolError("'%s' is not a HTTP header:\n\n%s" % line) + + key, value = line.split(': ', 1) + headers[key] = value + + return _decompress(body_data, headers.get('Content-Encoding')), headers -DIRECTORY_AUTHORITIES = { - 'moria1': DirectoryAuthority( - nickname = 'moria1', - address = '128.31.0.39', - or_port = 9101, - dir_port = 9131, - is_bandwidth_authority = True, - fingerprint = '9695DFC35FFEB861329B9F1AB04C46397020CE31', - v3ident = 'D586D18309DED4CD6D57C18FDB97EFA96D330566', - ), - 'tor26': DirectoryAuthority( - nickname = 'tor26', - address = '86.59.21.38', - or_port = 443, - dir_port = 80, - is_bandwidth_authority = True, - fingerprint = '847B1F850344D7876491A54892F904934E4EB85D', - v3ident = '14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4', - ), - 'dizum': DirectoryAuthority( - nickname = 'dizum', - address = '194.109.206.212', - or_port = 443, - dir_port = 80, - is_bandwidth_authority = False, - fingerprint = '7EA6EAD6FD83083C538F44038BBFA077587DD755', - v3ident = 'E8A9C45EDE6D711294FADF8E7951F4DE6CA56B58', - ), - 'Tonga': DirectoryAuthority( - nickname = 'Tonga', - address = '82.94.251.203', - or_port = 443, - dir_port = 80, - is_bandwidth_authority = False, - fingerprint = '4A0CCD2DDC7995083D73F5D667100C8A5831F16D', - v3ident = None, # does not vote in the consensus - ), - 'gabelmoo': DirectoryAuthority( - nickname = 'gabelmoo', - address = '131.188.40.189', - or_port = 443, - dir_port = 80, - is_bandwidth_authority = True, - fingerprint = 'F2044413DAC2E02E3D6BCF4735A19BCA1DE97281', - v3ident = 'ED03BB616EB2F60BEC80151114BB25CEF515B226', - ), - 'dannenberg': DirectoryAuthority( - nickname = 'dannenberg', - address = '193.23.244.244', - or_port = 443, - dir_port = 80, - is_bandwidth_authority = False, - fingerprint = '7BE683E65D48141321C5ED92F075C55364AC7123', - v3ident = '585769C78764D58426B8B52B6651A5A71137189A', - ), - 'urras': DirectoryAuthority( - nickname = 'urras', - address = '208.83.223.34', - or_port = 80, - dir_port = 443, - is_bandwidth_authority = False, - fingerprint = '0AD3FA884D18F89EEA2D89C019379E0E7FD94417', - v3ident = '80550987E1D626E3EBA5E5E75A458DE0626D088C', - ), - 'maatuska': DirectoryAuthority( - nickname = 'maatuska', - address = '171.25.193.9', - or_port = 80, - dir_port = 443, - is_bandwidth_authority = True, - fingerprint = 'BD6A829255CB08E66FBE7D3748363586E46B3810', - v3ident = '49015F787433103580E3B66A1707A00E60F2D15B', - ), - 'Faravahar': DirectoryAuthority( - nickname = 'Faravahar', - address = '154.35.175.225', - or_port = 443, - dir_port = 80, - is_bandwidth_authority = False, - fingerprint = 'CF6D0AAFB385BE71B8E111FC5CFF4B47923733BC', - v3ident = 'EFCBE720AB3A82B99F9E953CD5BF50F7EEFC7B97', - ), - 'longclaw': DirectoryAuthority( - nickname = 'longclaw', - address = '199.254.238.52', - or_port = 443, - dir_port = 80, - is_bandwidth_authority = True, - fingerprint = '74A910646BCEEFBCD2E874FC1DC997430F968145', - v3ident = '23D15D965BC35114467363C165C4F724B64B4F66', - ), -} +def _download_from_dirport(url, compression, timeout): + """ + Downloads descriptors from the given url. + + :param str url: dirport url from which to download from + :param list compression: compression methods for the request + :param float timeout: duration before we'll time out our request + + :returns: two value tuple of the form (data, reply_headers) + + :raises: + * **socket.timeout** if our request timed out + * **urllib2.URLError** for most request failures + """ + + response = urllib.urlopen( + urllib.Request( + url, + headers = { + 'Accept-Encoding': ', '.join(compression), + 'User-Agent': stem.USER_AGENT, + } + ), + timeout = timeout, + ) + + return _decompress(response.read(), response.headers.get('Content-Encoding')), response.headers + + +def _decompress(data, encoding): + """ + Decompresses descriptor data. + + Tor doesn't include compression headers. As such when using gzip we + need to include '32' for automatic header detection... + + https://stackoverflow.com/questions/3122145/zlib-error-error-3-while-decompressing-incorrect-header-check/22310760#22310760 + + ... and with zstd we need to use the streaming API. + + :param bytes data: data we received + :param str encoding: 'Content-Encoding' header of the response + + :raises: + * **ValueError** if encoding is unrecognized + * **ImportError** if missing the decompression module + """ + + if encoding == Compression.PLAINTEXT: + return data.strip() + elif encoding in (Compression.GZIP, 'deflate'): + return zlib.decompress(data, zlib.MAX_WBITS | 32).strip() + elif encoding == Compression.ZSTD: + if not stem.prereq.is_zstd_available(): + raise ImportError('Decompressing zstd data requires https://pypi.python.org/pypi/zstandard') + + import zstd + output_buffer = io.BytesIO() + + with zstd.ZstdDecompressor().write_to(output_buffer) as decompressor: + decompressor.write(data) + + return output_buffer.getvalue().strip() + elif encoding == Compression.LZMA: + if not stem.prereq.is_lzma_available(): + raise ImportError('Decompressing lzma data requires https://docs.python.org/3/library/lzma.html') + + import lzma + return lzma.decompress(data).strip() + else: + raise ValueError("'%s' isn't a recognized type of encoding" % encoding) + + +def _guess_descriptor_type(resource): + # Attempts to determine the descriptor type based on the resource url. This + # raises a ValueError if the resource isn't recognized. + + if resource.startswith('/tor/server/'): + return 'server-descriptor 1.0' + elif resource.startswith('/tor/extra/'): + return 'extra-info 1.0' + elif resource.startswith('/tor/micro/'): + return 'microdescriptor 1.0' + elif resource.startswith('/tor/status-vote/current/consensus-microdesc'): + return 'network-status-microdesc-consensus-3 1.0' + elif resource.startswith('/tor/status-vote/'): + return 'network-status-consensus-3 1.0' + elif resource.startswith('/tor/keys/'): + return 'dir-key-certificate-3 1.0' + else: + raise ValueError("Unable to determine the descriptor type for '%s'" % resource) def get_authorities(): """ - Provides the Tor directory authority information as of **Tor on 11/21/14**. - The directory information hardcoded into Tor and occasionally changes, so the - information this provides might not necessarily match your version of tor. + Provides cached Tor directory authority information. The directory + information hardcoded into Tor and occasionally changes, so the information + this provides might not necessarily match your version of tor. - :returns: dict of str nicknames to :class:`~stem.descriptor.remote.DirectoryAuthority` instances + .. deprecated:: 1.7.0 + Use stem.directory.Authority.from_cache() instead. + + :returns: **dict** of **str** nicknames to :class:`~stem.directory.Authority` instances """ - return dict(DIRECTORY_AUTHORITIES) + return DirectoryAuthority.from_cache() + + +# TODO: drop aliases in stem 2.0 + +Directory = stem.directory.Directory +DirectoryAuthority = stem.directory.Authority +FallbackDirectory = stem.directory.Fallback diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/router_status_entry.py b/Shared/lib/python3.4/site-packages/stem/descriptor/router_status_entry.py index 561f855..b4cd506 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/router_status_entry.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/router_status_entry.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -29,12 +29,19 @@ import stem.util.str_tools from stem.descriptor import ( KEYWORD_LINE, Descriptor, + _descriptor_content, _value, _values, - _get_descriptor_components, + _descriptor_components, + _parse_protocol_line, _read_until_keywords, + _random_nickname, + _random_ipv4_address, + _random_date, ) +_parse_pr_line = _parse_protocol_line('pr', 'protocols') + def _parse_file(document_file, validate, entry_class, entry_keyword = 'r', start_position = None, end_position = None, section_end_keywords = (), extra_args = ()): """ @@ -166,17 +173,12 @@ def _parse_a_line(descriptor, entries): raise ValueError("%s 'a' line must be of the form '[address]:[ports]': a %s" % (descriptor._name(), value)) address, port = value.rsplit(':', 1) - is_ipv6 = address.startswith('[') and address.endswith(']') - if is_ipv6: - address = address[1:-1] # remove brackets - - if not ((not is_ipv6 and stem.util.connection.is_valid_ipv4_address(address)) or - (is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))): + if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True): raise ValueError("%s 'a' line must start with an IPv6 address: a %s" % (descriptor._name(), value)) if stem.util.connection.is_valid_port(port): - or_addresses.append((address, int(port), is_ipv6)) + or_addresses.append((address.lstrip('[').rstrip(']'), int(port), stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True))) else: raise ValueError("%s 'a' line had an invalid port (%s): a %s" % (descriptor._name(), port, value)) @@ -228,6 +230,11 @@ def _parse_w_line(descriptor, entries): elif not w_comp[0].startswith('Bandwidth='): raise ValueError("%s 'w' line needs to start with a 'Bandwidth=' entry: w %s" % (descriptor._name(), value)) + bandwidth = None + measured = None + is_unmeasured = False + unrecognized_bandwidth_entries = [] + for w_entry in w_comp: if '=' in w_entry: w_key, w_value = w_entry.split('=', 1) @@ -238,25 +245,33 @@ def _parse_w_line(descriptor, entries): if not (w_value and w_value.isdigit()): raise ValueError("%s 'Bandwidth=' entry needs to have a numeric value: w %s" % (descriptor._name(), value)) - descriptor.bandwidth = int(w_value) + bandwidth = int(w_value) elif w_key == 'Measured': if not (w_value and w_value.isdigit()): raise ValueError("%s 'Measured=' entry needs to have a numeric value: w %s" % (descriptor._name(), value)) - descriptor.measured = int(w_value) + measured = int(w_value) elif w_key == 'Unmeasured': if w_value != '1': raise ValueError("%s 'Unmeasured=' should only have the value of '1': w %s" % (descriptor._name(), value)) - descriptor.is_unmeasured = True + is_unmeasured = True else: - descriptor.unrecognized_bandwidth_entries.append(w_entry) + unrecognized_bandwidth_entries.append(w_entry) + + descriptor.bandwidth = bandwidth + descriptor.measured = measured + descriptor.is_unmeasured = is_unmeasured + descriptor.unrecognized_bandwidth_entries = unrecognized_bandwidth_entries def _parse_p_line(descriptor, entries): # "p" ("accept" / "reject") PortList - # p reject 1-65535 - # example: p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001 + # + # examples: + # + # p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001 + # p reject 1-65535 value = _value('p', entries) @@ -266,6 +281,29 @@ def _parse_p_line(descriptor, entries): raise ValueError('%s exit policy is malformed (%s): p %s' % (descriptor._name(), exc, value)) +def _parse_id_line(descriptor, entries): + # "id" "ed25519" ed25519-identity + # + # examples: + # + # id ed25519 none + # id ed25519 8RH34kO07Pp+XYwzdoATVyCibIvmbslUjRkAm7J4IA8 + + value = _value('id', entries) + + if value: + if descriptor.document and not descriptor.document.is_vote: + raise ValueError("%s 'id' line should only appear in votes: id %s" % (descriptor._name(), value)) + + value_comp = value.split() + + if len(value_comp) >= 2: + descriptor.identifier_type = value_comp[0] + descriptor.identifier = value_comp[1] + else: + raise ValueError("'id' lines should contain both the key type and digest: id %s" % value) + + def _parse_m_line(descriptor, entries): # "m" methods 1*(algorithm "=" digest) # example: m 8,9,10,11,12 sha256=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs @@ -333,7 +371,7 @@ def _base64_to_hex(identity, check_if_fingerprint = True): except (TypeError, binascii.Error): raise ValueError("Unable to decode identity string '%s'" % identity) - fingerprint = binascii.b2a_hex(identity_decoded).upper() + fingerprint = binascii.hexlify(identity_decoded).upper() if stem.prereq.is_python_3(): fingerprint = stem.util.str_tools._to_unicode(fingerprint) @@ -400,7 +438,7 @@ class RouterStatusEntry(Descriptor): super(RouterStatusEntry, self).__init__(content, lazy_load = not validate) self.document = document - entries = _get_descriptor_components(content, validate) + entries = _descriptor_components(content, validate) if validate: for keyword in self._required_fields(): @@ -445,9 +483,15 @@ class RouterStatusEntry(Descriptor): return method(str(self).strip(), str(other).strip()) + def __hash__(self): + return hash(str(self).strip()) + def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) @@ -470,6 +514,15 @@ class RouterStatusEntryV2(RouterStatusEntry): 'digest': (None, _parse_r_line), }) + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + return _descriptor_content(attr, exclude, ( + ('r', '%s p1aag7VwarGxqctS7/fS0y5FU+s oQZFLYe9e4A7bOkWKR7TaNxb0JE %s %s 9001 0' % (_random_nickname(), _random_date(), _random_ipv4_address())), + )) + def _name(self, is_plural = False): return 'Router status entries (v2)' if is_plural else 'Router status entry (v2)' @@ -485,9 +538,15 @@ class RouterStatusEntryV2(RouterStatusEntry): return method(str(self).strip(), str(other).strip()) + def __hash__(self): + return hash(str(self).strip()) + def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) @@ -502,18 +561,21 @@ class RouterStatusEntryV3(RouterStatusEntry): :var list or_addresses: **\*** relay's OR addresses, this is a tuple listing of the form (address (**str**), port (**int**), is_ipv6 (**bool**)) + :var str identifier_type: identity digest key type + :var str identifier: base64 encoded identity digest :var str digest: **\*** router's upper-case hex digest - :var int bandwidth: bandwidth claimed by the relay (in kb/s) - :var int measured: bandwidth measured to be available by the relay, this is a + :var int bandwidth: bandwidth measured to be available by the relay, this is a unit-less heuristic generated by the Bandwidth authoritites to weight relay selection - :var bool is_unmeasured: bandwidth measurement isn't based on three or more + :var int measured: *bandwidth* vote provided by a bandwidth authority + :var bool is_unmeasured: *bandwidth* measurement isn't based on three or more measurements :var list unrecognized_bandwidth_entries: **\*** bandwidth weighting information that isn't yet recognized :var stem.exit_policy.MicroExitPolicy exit_policy: router's exit policy + :var dict protocols: mapping of protocols to their supported versions :var list microdescriptor_hashes: **\*** tuples of two values, the list of consensus methods for generating a set of digests and the 'algorithm => @@ -521,11 +583,19 @@ class RouterStatusEntryV3(RouterStatusEntry): **\*** attribute is either required when we're parsed with validation or has a default value, others are left as **None** if undefined + + .. versionchanged:: 1.5.0 + Added the identifier and identifier_type attributes. + + .. versionchanged:: 1.6.0 + Added the protocols attribute. """ ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{ 'digest': (None, _parse_r_line), 'or_addresses': ([], _parse_a_line), + 'identifier_type': (None, _parse_id_line), + 'identifier': (None, _parse_id_line), 'bandwidth': (None, _parse_w_line), 'measured': (None, _parse_w_line), @@ -533,6 +603,7 @@ class RouterStatusEntryV3(RouterStatusEntry): 'unrecognized_bandwidth_entries': ([], _parse_w_line), 'exit_policy': (None, _parse_p_line), + 'protocols': ({}, _parse_pr_line), 'microdescriptor_hashes': ([], _parse_m_line), }) @@ -540,9 +611,21 @@ class RouterStatusEntryV3(RouterStatusEntry): 'a': _parse_a_line, 'w': _parse_w_line, 'p': _parse_p_line, + 'pr': _parse_pr_line, + 'id': _parse_id_line, 'm': _parse_m_line, }) + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + return _descriptor_content(attr, exclude, ( + ('r', '%s p1aag7VwarGxqctS7/fS0y5FU+s oQZFLYe9e4A7bOkWKR7TaNxb0JE %s %s 9001 0' % (_random_nickname(), _random_date(), _random_ipv4_address())), + ('s', 'Fast Named Running Stable Valid'), + )) + def _name(self, is_plural = False): return 'Router status entries (v3)' if is_plural else 'Router status entry (v3)' @@ -550,7 +633,7 @@ class RouterStatusEntryV3(RouterStatusEntry): return ('r', 's') def _single_fields(self): - return ('r', 's', 'v', 'w', 'p') + return ('r', 's', 'v', 'w', 'p', 'pr') def _compare(self, other, method): if not isinstance(other, RouterStatusEntryV3): @@ -558,9 +641,15 @@ class RouterStatusEntryV3(RouterStatusEntry): return method(str(self).strip(), str(other).strip()) + def __hash__(self): + return hash(str(self).strip()) + def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) @@ -573,33 +662,57 @@ class RouterStatusEntryMicroV3(RouterStatusEntry): Information about an individual router stored within a microdescriptor flavored network status document. + :var list or_addresses: **\*** relay's OR addresses, this is a tuple listing + of the form (address (**str**), port (**int**), is_ipv6 (**bool**)) :var int bandwidth: bandwidth claimed by the relay (in kb/s) :var int measured: bandwidth measured to be available by the relay :var bool is_unmeasured: bandwidth measurement isn't based on three or more measurements :var list unrecognized_bandwidth_entries: **\*** bandwidth weighting information that isn't yet recognized + :var dict protocols: mapping of protocols to their supported versions :var str digest: **\*** router's hex encoded digest of our corresponding microdescriptor + .. versionchanged:: 1.6.0 + Added the protocols attribute. + + .. versionchanged:: 1.7.0 + Added the or_addresses attribute. + **\*** attribute is either required when we're parsed with validation or has a default value, others are left as **None** if undefined """ ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{ + 'or_addresses': ([], _parse_a_line), 'bandwidth': (None, _parse_w_line), 'measured': (None, _parse_w_line), 'is_unmeasured': (False, _parse_w_line), 'unrecognized_bandwidth_entries': ([], _parse_w_line), + 'protocols': ({}, _parse_pr_line), 'digest': (None, _parse_microdescriptor_m_line), }) PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{ + 'a': _parse_a_line, 'w': _parse_w_line, 'm': _parse_microdescriptor_m_line, + 'pr': _parse_pr_line, }) + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + return _descriptor_content(attr, exclude, ( + ('r', '%s ARIJF2zbqirB9IwsW0mQznccWww %s %s 9001 9030' % (_random_nickname(), _random_date(), _random_ipv4_address())), + ('m', 'aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70'), + ('s', 'Fast Guard HSDir Named Running Stable V2Dir Valid'), + )) + def _name(self, is_plural = False): return 'Router status entries (micro v3)' if is_plural else 'Router status entry (micro v3)' @@ -607,7 +720,7 @@ class RouterStatusEntryMicroV3(RouterStatusEntry): return ('r', 's', 'm') def _single_fields(self): - return ('r', 's', 'v', 'w', 'm') + return ('r', 's', 'v', 'w', 'm', 'pr') def _compare(self, other, method): if not isinstance(other, RouterStatusEntryMicroV3): @@ -615,9 +728,15 @@ class RouterStatusEntryMicroV3(RouterStatusEntry): return method(str(self).strip(), str(other).strip()) + def __hash__(self): + return hash(str(self).strip()) + def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/server_descriptor.py b/Shared/lib/python3.4/site-packages/stem/descriptor/server_descriptor.py index b375066..d212459 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/server_descriptor.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/server_descriptor.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -10,8 +10,7 @@ etc). This information is provided from a few sources... * The 'cached-descriptors' file in Tor's data directory. -* Archived descriptors provided by CollecTor - (https://collector.torproject.org/). +* Archived descriptors provided by `CollecTor `_. * Directory authorities and mirrors via their DirPort. @@ -21,6 +20,7 @@ etc). This information is provided from a few sources... ServerDescriptor - Tor server descriptor. |- RelayDescriptor - Server descriptor for a relay. + | +- make_router_status_entry - Creates a router status entry for this descriptor. | |- BridgeDescriptor - Scrubbed server descriptor for a bridge. | |- is_scrubbed - checks if our content has been properly scrubbed @@ -29,41 +29,69 @@ etc). This information is provided from a few sources... |- digest - calculates the upper-case hex digest value for our content |- get_annotations - dictionary of content prior to the descriptor entry +- get_annotation_lines - lines that provided the annotations + +.. data:: BridgeDistribution (enum) + + Preferred method of distributing this relay if a bridge. + + .. versionadded:: 1.6.0 + + ===================== =========== + BridgeDistribution Description + ===================== =========== + **ANY** No proference, BridgeDB will pick how the bridge is distributed. + **HTTPS** Provided via the `web interface `_. + **EMAIL** Provided in response to emails to bridges@torproject.org. + **MOAT** Provided in interactive menus within Tor Browser. + **HYPHAE** Provided via a cryptographic invitation-based system. + ===================== =========== """ +import base64 +import binascii import functools import hashlib import re +import stem.descriptor.certificate import stem.descriptor.extrainfo_descriptor import stem.exit_policy import stem.prereq import stem.util.connection +import stem.util.enum import stem.util.str_tools import stem.util.tor_tools import stem.version -from stem import str_type +from stem.descriptor.router_status_entry import RouterStatusEntryV3 from stem.descriptor import ( PGP_BLOCK_END, Descriptor, - _get_descriptor_components, + create_signing_key, + _descriptor_content, + _descriptor_components, _read_until_keywords, _bytes_for_block, _value, _values, _parse_simple_line, + _parse_if_present, _parse_bytes_line, _parse_timestamp_line, _parse_forty_character_hex, + _parse_protocol_line, _parse_key_block, + _append_router_signature, + _random_nickname, + _random_ipv4_address, + _random_date, + _random_crypto_blob, ) -try: - # added in python 3.2 +if stem.prereq._is_lru_cache_available(): from functools import lru_cache -except ImportError: +else: from stem.util.lru_cache import lru_cache # relay descriptors must have exactly one of the following @@ -78,6 +106,8 @@ REQUIRED_FIELDS = ( # optional entries that can appear at most once SINGLE_FIELDS = ( + 'identity-ed25519', + 'master-key-ed25519', 'platform', 'fingerprint', 'hibernating', @@ -86,17 +116,36 @@ SINGLE_FIELDS = ( 'read-history', 'write-history', 'eventdns', + 'bridge-distribution-request', 'family', 'caches-extra-info', 'extra-info-digest', 'hidden-service-dir', 'protocols', 'allow-single-hop-exits', + 'tunnelled-dir-server', + 'proto', + 'onion-key-crosscert', 'ntor-onion-key', + 'ntor-onion-key-crosscert', + 'router-sig-ed25519', +) + +BridgeDistribution = stem.util.enum.Enum( + ('ANY', 'any'), + ('HTTPS', 'https'), + ('EMAIL', 'email'), + ('MOAT', 'moat'), + ('HYPHAE', 'hyphae'), ) DEFAULT_IPV6_EXIT_POLICY = stem.exit_policy.MicroExitPolicy('reject 1-65535') REJECT_ALL_POLICY = stem.exit_policy.ExitPolicy('reject *:*') +DEFAULT_BRIDGE_DISTRIBUTION = 'any' + + +def _truncated_b64encode(content): + return stem.util.str_tools._to_unicode(base64.b64encode(content).rstrip(b'=')) def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs): @@ -265,6 +314,17 @@ def _parse_fingerprint_line(descriptor, entries): descriptor.fingerprint = fingerprint +def _parse_extrainfo_digest_line(descriptor, entries): + value = _value('extra-info-digest', entries) + digest_comp = value.split(' ') + + if not stem.util.tor_tools.is_hex_digits(digest_comp[0], 40): + raise ValueError('extra-info-digest should be 40 hex characters: %s' % digest_comp[0]) + + descriptor.extra_info_digest = digest_comp[0] + descriptor.extra_info_sha256_digest = digest_comp[1] if len(digest_comp) >= 2 else None + + def _parse_hibernating_line(descriptor, entries): # "hibernating" 0|1 (in practice only set if one) @@ -276,15 +336,6 @@ def _parse_hibernating_line(descriptor, entries): descriptor.hibernating = value == '1' -def _parse_hidden_service_dir_line(descriptor, entries): - value = _value('hidden-service-dir', entries) - - if value: - descriptor.hidden_service_dir = value.split(' ') - else: - descriptor.hidden_service_dir = ['2'] - - def _parse_uptime_line(descriptor, entries): # We need to be tolerant of negative uptimes to accommodate a past tor # bug... @@ -328,19 +379,14 @@ def _parse_or_address_line(descriptor, entries): raise ValueError('or-address line missing a colon: %s' % line) address, port = entry.rsplit(':', 1) - is_ipv6 = address.startswith('[') and address.endswith(']') - if is_ipv6: - address = address[1:-1] # remove brackets - - if not ((not is_ipv6 and stem.util.connection.is_valid_ipv4_address(address)) or - (is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))): + if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True): raise ValueError('or-address line has a malformed address: %s' % line) if not stem.util.connection.is_valid_port(port): raise ValueError('or-address line has a malformed port: %s' % line) - or_addresses.append((address, int(port), is_ipv6)) + or_addresses.append((address.lstrip('[').rstrip(']'), int(port), stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True))) descriptor.or_addresses = or_addresses @@ -364,7 +410,7 @@ def _parse_history_line(keyword, history_end_attribute, history_interval_attribu def _parse_exit_policy(descriptor, entries): if hasattr(descriptor, '_unparsed_exit_policy'): - if descriptor._unparsed_exit_policy == [str_type('reject *:*')]: + if descriptor._unparsed_exit_policy and stem.util.str_tools._to_unicode(descriptor._unparsed_exit_policy[0]) == 'reject *:*': descriptor.exit_policy = REJECT_ALL_POLICY else: descriptor.exit_policy = stem.exit_policy.ExitPolicy(*descriptor._unparsed_exit_policy) @@ -372,20 +418,39 @@ def _parse_exit_policy(descriptor, entries): del descriptor._unparsed_exit_policy +def _parse_identity_ed25519_line(descriptor, entries): + _parse_key_block('identity-ed25519', 'ed25519_certificate', 'ED25519 CERT')(descriptor, entries) + + if descriptor.ed25519_certificate: + cert_lines = descriptor.ed25519_certificate.split('\n') + + if cert_lines[0] == '-----BEGIN ED25519 CERT-----' and cert_lines[-1] == '-----END ED25519 CERT-----': + descriptor.certificate = stem.descriptor.certificate.Ed25519Certificate.parse(''.join(cert_lines[1:-1])) + + +_parse_master_key_ed25519_line = _parse_simple_line('master-key-ed25519', 'ed25519_master_key') +_parse_master_key_ed25519_for_hash_line = _parse_simple_line('master-key-ed25519', 'ed25519_certificate_hash') _parse_contact_line = _parse_bytes_line('contact', 'contact') _parse_published_line = _parse_timestamp_line('published', 'published') -_parse_extrainfo_digest_line = _parse_forty_character_hex('extra-info-digest', 'extra_info_digest') _parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values') _parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values') -_parse_ipv6_policy_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('ipv6-policy', entries))) -_parse_allow_single_hop_exits_line = lambda descriptor, entries: setattr(descriptor, 'allow_single_hop_exits', 'allow_single_hop_exits' in entries) -_parse_caches_extra_info_line = lambda descriptor, entries: setattr(descriptor, 'extra_info_cache', 'extra_info_cache' in entries) -_parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', set(_value('family', entries).split(' '))) -_parse_eventdns_line = lambda descriptor, entries: setattr(descriptor, 'eventdns', _value('eventdns', entries) == '1') +_parse_ipv6_policy_line = _parse_simple_line('ipv6-policy', 'exit_policy_v6', func = lambda v: stem.exit_policy.MicroExitPolicy(v)) +_parse_allow_single_hop_exits_line = _parse_if_present('allow-single-hop-exits', 'allow_single_hop_exits') +_parse_tunneled_dir_server_line = _parse_if_present('tunnelled-dir-server', 'allow_tunneled_dir_requests') +_parse_proto_line = _parse_protocol_line('proto', 'protocols') +_parse_hidden_service_dir_line = _parse_if_present('hidden-service-dir', 'is_hidden_service_dir') +_parse_caches_extra_info_line = _parse_if_present('caches-extra-info', 'extra_info_cache') +_parse_bridge_distribution_request_line = _parse_simple_line('bridge-distribution-request', 'bridge_distribution') +_parse_family_line = _parse_simple_line('family', 'family', func = lambda v: set(v.split(' '))) +_parse_eventdns_line = _parse_simple_line('eventdns', 'eventdns', func = lambda v: v == '1') _parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY') +_parse_onion_key_crosscert_line = _parse_key_block('onion-key-crosscert', 'onion_key_crosscert', 'CROSSCERT') _parse_signing_key_line = _parse_key_block('signing-key', 'signing_key', 'RSA PUBLIC KEY') _parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE') _parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key') +_parse_ntor_onion_key_crosscert_line = _parse_key_block('ntor-onion-key-crosscert', 'ntor_onion_key_crosscert', 'ED25519 CERT', 'ntor_onion_key_crosscert_sign') +_parse_router_sig_ed25519_line = _parse_simple_line('router-sig-ed25519', 'ed25519_signature') +_parse_router_digest_sha256_line = _parse_simple_line('router-digest-sha256', 'router_digest_sha256') _parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest') @@ -399,7 +464,7 @@ class ServerDescriptor(Descriptor): :var str address: **\*** IPv4 address of the relay :var int or_port: **\*** port used for relaying - :var int socks_port: **\*** port used as client (deprecated, always **None**) + :var int socks_port: **\*** port used as client (**deprecated**, always **None**) :var int dir_port: **\*** port used for descriptor mirroring :var bytes platform: line with operating system and tor version @@ -409,6 +474,8 @@ class ServerDescriptor(Descriptor): :var bytes contact: contact information :var stem.exit_policy.ExitPolicy exit_policy: **\*** stated exit policy :var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6 + :var BridgeDistribution bridge_distribution: **\*** preferred method of providing this relay's + address if a bridge :var set family: **\*** nicknames or fingerprints of declared family :var int average_bandwidth: **\*** average rate it's willing to relay in bytes/s @@ -417,16 +484,23 @@ class ServerDescriptor(Descriptor): :var list link_protocols: link protocols supported by the relay :var list circuit_protocols: circuit protocols supported by the relay + :var bool is_hidden_service_dir: **\*** indicates if the relay serves hidden + service descriptors :var bool hibernating: **\*** hibernating when published :var bool allow_single_hop_exits: **\*** flag if single hop exiting is allowed + :var bool allow_tunneled_dir_requests: **\*** flag if tunneled directory + requests are accepted :var bool extra_info_cache: **\*** flag if a mirror for extra-info documents :var str extra_info_digest: upper-case hex encoded digest of our extra-info document - :var bool eventdns: flag for evdns backend (deprecated, always unset) + :var str extra_info_sha256_digest: base64 encoded sha256 digest of our extra-info document + :var bool eventdns: flag for evdns backend (**deprecated**, always unset) + :var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol :var list or_addresses: **\*** alternative for our address/or_port attributes, each entry is a tuple of the form (address (**str**), port (**int**), is_ipv6 (**bool**)) + :var dict protocols: mapping of protocols to their supported versions - Deprecated, moved to extra-info descriptor... + **Deprecated**, moved to extra-info descriptor... :var datetime read_history_end: end of the sampling interval :var int read_history_interval: seconds per interval @@ -438,6 +512,20 @@ class ServerDescriptor(Descriptor): **\*** attribute is either required when we're parsed with validation or has a default value, others are left as **None** if undefined + + .. versionchanged:: 1.5.0 + Added the allow_tunneled_dir_requests attribute. + + .. versionchanged:: 1.6.0 + Added the extra_info_sha256_digest, protocols, and bridge_distribution + attributes. + + .. versionchanged:: 1.7.0 + Added the is_hidden_service_dir attribute. + + .. versionchanged:: 1.7.0 + Deprecated the hidden_service_dir field, it's never been populated + (:spec:`43c2f78`). This field will be removed in Stem 2.0. """ ATTRIBUTES = { @@ -457,6 +545,7 @@ class ServerDescriptor(Descriptor): 'operating_system': (None, _parse_platform_line), 'uptime': (None, _parse_uptime_line), 'exit_policy_v6': (DEFAULT_IPV6_EXIT_POLICY, _parse_ipv6_policy_line), + 'bridge_distribution': (DEFAULT_BRIDGE_DISTRIBUTION, _parse_bridge_distribution_request_line), 'family': (set(), _parse_family_line), 'average_bandwidth': (None, _parse_bandwidth_line), @@ -465,12 +554,16 @@ class ServerDescriptor(Descriptor): 'link_protocols': (None, _parse_protocols_line), 'circuit_protocols': (None, _parse_protocols_line), + 'is_hidden_service_dir': (False, _parse_hidden_service_dir_line), 'hibernating': (False, _parse_hibernating_line), 'allow_single_hop_exits': (False, _parse_allow_single_hop_exits_line), + 'allow_tunneled_dir_requests': (False, _parse_tunneled_dir_server_line), + 'protocols': ({}, _parse_proto_line), 'extra_info_cache': (False, _parse_caches_extra_info_line), 'extra_info_digest': (None, _parse_extrainfo_digest_line), - 'hidden_service_dir': (None, _parse_hidden_service_dir_line), + 'extra_info_sha256_digest': (None, _parse_extrainfo_digest_line), 'eventdns': (None, _parse_eventdns_line), + 'ntor_onion_key': (None, _parse_ntor_onion_key_line), 'or_addresses': ([], _parse_or_address_line), 'read_history_end': (None, _parse_read_history_line), @@ -494,12 +587,16 @@ class ServerDescriptor(Descriptor): 'hidden-service-dir': _parse_hidden_service_dir_line, 'uptime': _parse_uptime_line, 'protocols': _parse_protocols_line, + 'ntor-onion-key': _parse_ntor_onion_key_line, 'or-address': _parse_or_address_line, 'read-history': _parse_read_history_line, 'write-history': _parse_write_history_line, 'ipv6-policy': _parse_ipv6_policy_line, 'allow-single-hop-exits': _parse_allow_single_hop_exits_line, + 'tunnelled-dir-server': _parse_tunneled_dir_server_line, + 'proto': _parse_proto_line, 'caches-extra-info': _parse_caches_extra_info_line, + 'bridge-distribution-request': _parse_bridge_distribution_request_line, 'family': _parse_family_line, 'eventdns': _parse_eventdns_line, } @@ -533,7 +630,13 @@ class ServerDescriptor(Descriptor): # influences the resulting exit policy, but for everything else the order # does not matter so breaking it into key / value pairs. - entries, self._unparsed_exit_policy = _get_descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, ('accept', 'reject')) + entries, self._unparsed_exit_policy = _descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, extra_keywords = ('accept', 'reject'), non_ascii_fields = ('contact', 'platform')) + + # TODO: Remove the following field in Stem 2.0. It has never been populated... + # + # https://gitweb.torproject.org/torspec.git/commit/?id=43c2f78 + + self.hidden_service_dir = ['2'] if validate: self._parse(entries, validate) @@ -624,6 +727,12 @@ class ServerDescriptor(Descriptor): if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]: raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword) + if 'identity-ed25519' in entries.keys(): + if 'router-sig-ed25519' not in entries.keys(): + raise ValueError('Descriptor must have router-sig-ed25519 entry to accompany identity-ed25519') + elif 'router-sig-ed25519' not in list(entries.keys())[-2:]: + raise ValueError("Descriptor must have 'router-sig-ed25519' as the next-to-last entry") + if not self.exit_policy: raise ValueError("Descriptor must have at least one 'accept' or 'reject' entry") @@ -648,29 +757,68 @@ class RelayDescriptor(ServerDescriptor): Server descriptor (`descriptor specification `_) + :var stem.certificate.Ed25519Certificate certificate: ed25519 certificate + :var str ed25519_certificate: base64 encoded ed25519 certificate + :var str ed25519_master_key: base64 encoded master key for our ed25519 certificate + :var str ed25519_signature: signature of this document using ed25519 + :var str onion_key: **\*** key used to encrypt EXTEND cells - :var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol + :var str onion_key_crosscert: signature generated using the onion_key + :var str ntor_onion_key_crosscert: signature generated using the ntor-onion-key + :var str ntor_onion_key_crosscert_sign: sign of the corresponding ed25519 public key :var str signing_key: **\*** relay's long-term identity key :var str signature: **\*** signature for this descriptor **\*** attribute is required when we're parsed with validation + + .. versionchanged:: 1.5.0 + Added the ed25519_certificate, ed25519_master_key, ed25519_signature, + onion_key_crosscert, ntor_onion_key_crosscert, and + ntor_onion_key_crosscert_sign attributes. + + .. versionchanged:: 1.6.0 + Moved from the deprecated `pycrypto + `_ module to `cryptography + `_ for validating signatures. + + .. versionchanged:: 1.6.0 + Added the certificate attribute. + + .. deprecated:: 1.6.0 + Our **ed25519_certificate** is deprecated in favor of our new + **certificate** attribute. The base64 encoded certificate is available via + the certificate's **encoded** attribute. + + .. versionchanged:: 1.6.0 + Added the **skip_crypto_validation** constructor argument. """ ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{ + 'certificate': (None, _parse_identity_ed25519_line), + 'ed25519_certificate': (None, _parse_identity_ed25519_line), + 'ed25519_master_key': (None, _parse_master_key_ed25519_line), + 'ed25519_signature': (None, _parse_router_sig_ed25519_line), + 'onion_key': (None, _parse_onion_key_line), - 'ntor_onion_key': (None, _parse_ntor_onion_key_line), + 'onion_key_crosscert': (None, _parse_onion_key_crosscert_line), + 'ntor_onion_key_crosscert': (None, _parse_ntor_onion_key_crosscert_line), + 'ntor_onion_key_crosscert_sign': (None, _parse_ntor_onion_key_crosscert_line), 'signing_key': (None, _parse_signing_key_line), 'signature': (None, _parse_router_signature_line), }) PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{ + 'identity-ed25519': _parse_identity_ed25519_line, + 'master-key-ed25519': _parse_master_key_ed25519_line, + 'router-sig-ed25519': _parse_router_sig_ed25519_line, 'onion-key': _parse_onion_key_line, - 'ntor-onion-key': _parse_ntor_onion_key_line, + 'onion-key-crosscert': _parse_onion_key_crosscert_line, + 'ntor-onion-key-crosscert': _parse_ntor_onion_key_crosscert_line, 'signing-key': _parse_signing_key_line, 'router-signature': _parse_router_signature_line, }) - def __init__(self, raw_contents, validate = False, annotations = None): + def __init__(self, raw_contents, validate = False, annotations = None, skip_crypto_validation = False): super(RelayDescriptor, self).__init__(raw_contents, validate, annotations) if validate: @@ -680,12 +828,65 @@ class RelayDescriptor(ServerDescriptor): if key_hash != self.fingerprint.lower(): raise ValueError('Fingerprint does not match the hash of our signing key (fingerprint: %s, signing key hash: %s)' % (self.fingerprint.lower(), key_hash)) - if stem.prereq.is_crypto_available(): + if not skip_crypto_validation and stem.prereq.is_crypto_available(): signed_digest = self._digest_for_signature(self.signing_key, self.signature) if signed_digest != self.digest(): raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, self.digest())) + if self.onion_key_crosscert and stem.prereq.is_crypto_available(): + onion_key_crosscert_digest = self._digest_for_signature(self.onion_key, self.onion_key_crosscert) + + if onion_key_crosscert_digest != self._onion_key_crosscert_digest(): + raise ValueError('Decrypted onion-key-crosscert digest does not match local digest (calculated: %s, local: %s)' % (onion_key_crosscert_digest, self._onion_key_crosscert_digest())) + + if stem.prereq._is_pynacl_available() and self.certificate: + self.certificate.validate(self) + + @classmethod + def content(cls, attr = None, exclude = (), sign = False, signing_key = None): + if signing_key: + sign = True + + if attr is None: + attr = {} + + base_header = ( + ('router', '%s %s 9001 0 0' % (_random_nickname(), _random_ipv4_address())), + ('published', _random_date()), + ('bandwidth', '153600 256000 104590'), + ('reject', '*:*'), + ('onion-key', _random_crypto_blob('RSA PUBLIC KEY')), + ('signing-key', _random_crypto_blob('RSA PUBLIC KEY')), + ) + + if sign: + if attr and 'signing-key' in attr: + raise ValueError('Cannot sign the descriptor if a signing-key has been provided') + elif attr and 'router-signature' in attr: + raise ValueError('Cannot sign the descriptor if a router-signature has been provided') + + if signing_key is None: + signing_key = create_signing_key() + + if 'fingerprint' not in attr: + fingerprint = hashlib.sha1(_bytes_for_block(stem.util.str_tools._to_unicode(signing_key.public_digest.strip()))).hexdigest().upper() + attr['fingerprint'] = ' '.join(stem.util.str_tools._split_by_length(fingerprint, 4)) + + attr['signing-key'] = signing_key.public_digest + + content = _descriptor_content(attr, exclude, base_header) + b'\nrouter-signature\n' + return _append_router_signature(content, signing_key.private) + else: + return _descriptor_content(attr, exclude, base_header, ( + ('router-sig-ed25519', None), + ('router-signature', _random_crypto_blob('SIGNATURE')), + )) + + @classmethod + def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None): + return cls(cls.content(attr, exclude, sign, signing_key), validate = validate, skip_crypto_validation = not sign) + @lru_cache() def digest(self): """ @@ -693,23 +894,88 @@ class RelayDescriptor(ServerDescriptor): :returns: the digest string encoded in uppercase hex - :raises: ValueError if the digest canot be calculated + :raises: ValueError if the digest cannot be calculated """ return self._digest_for_content(b'router ', b'\nrouter-signature\n') + def make_router_status_entry(self): + """ + Provides a RouterStatusEntryV3 for this descriptor content. + + .. versionadded:: 1.6.0 + + :returns: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` + that would be in the consensus + """ + + if not self.fingerprint: + raise ValueError('Server descriptor lacks a fingerprint. This is an optional field, but required to make a router status entry.') + + attr = { + 'r': ' '.join([ + self.nickname, + _truncated_b64encode(binascii.unhexlify(stem.util.str_tools._to_bytes(self.fingerprint))), + _truncated_b64encode(binascii.unhexlify(stem.util.str_tools._to_bytes(self.digest()))), + self.published.strftime('%Y-%m-%d %H:%M:%S'), + self.address, + str(self.or_port), + str(self.dir_port) if self.dir_port else '0', + ]), + 'w': 'Bandwidth=%i' % self.average_bandwidth, + 'p': self.exit_policy.summary().replace(', ', ','), + } + + if self.tor_version: + attr['v'] = 'Tor %s' % self.tor_version + + if self.or_addresses: + attr['a'] = ['%s:%s' % (addr, port) for addr, port, _ in self.or_addresses] + + if self.certificate: + attr['id'] = 'ed25519 %s' % _truncated_b64encode(self.certificate.key) + + return RouterStatusEntryV3.create(attr) + + @lru_cache() + def _onion_key_crosscert_digest(self): + """ + Provides the digest of the onion-key-crosscert data. This consists of the + RSA identity key sha1 and ed25519 identity key. + + :returns: **unicode** digest encoded in uppercase hex + + :raises: ValueError if the digest cannot be calculated + """ + + signing_key_digest = hashlib.sha1(_bytes_for_block(self.signing_key)).digest() + data = signing_key_digest + base64.b64decode(stem.util.str_tools._to_bytes(self.ed25519_master_key) + b'=') + return stem.util.str_tools._to_unicode(binascii.hexlify(data).upper()) + def _compare(self, other, method): if not isinstance(other, RelayDescriptor): return False return method(str(self).strip(), str(other).strip()) + def _check_constraints(self, entries): + super(RelayDescriptor, self)._check_constraints(entries) + + if self.ed25519_certificate: + if not self.onion_key_crosscert: + raise ValueError("Descriptor must have a 'onion-key-crosscert' when identity-ed25519 is present") + elif not self.ed25519_signature: + raise ValueError("Descriptor must have a 'router-sig-ed25519' when identity-ed25519 is present") + def __hash__(self): return hash(str(self).strip()) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) @@ -720,17 +986,42 @@ class RelayDescriptor(ServerDescriptor): class BridgeDescriptor(ServerDescriptor): """ Bridge descriptor (`bridge descriptor specification - `_) + `_) + + :var str ed25519_certificate_hash: sha256 hash of the original identity-ed25519 + :var str router_digest_sha256: sha256 digest of this document + + .. versionchanged:: 1.5.0 + Added the ed25519_certificate_hash and router_digest_sha256 attributes. + Also added ntor_onion_key (previously this only belonged to unsanitized + descriptors). """ ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{ + 'ed25519_certificate_hash': (None, _parse_master_key_ed25519_for_hash_line), + 'router_digest_sha256': (None, _parse_router_digest_sha256_line), '_digest': (None, _parse_router_digest_line), }) PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{ + 'master-key-ed25519': _parse_master_key_ed25519_for_hash_line, + 'router-digest-sha256': _parse_router_digest_sha256_line, 'router-digest': _parse_router_digest_line, }) + @classmethod + def content(cls, attr = None, exclude = (), sign = False): + if sign: + raise NotImplementedError('Signing of %s not implemented' % cls.__name__) + + return _descriptor_content(attr, exclude, ( + ('router', '%s %s 9001 0 0' % (_random_nickname(), _random_ipv4_address())), + ('router-digest', '006FD96BA35E7785A6A3B8B75FE2E2435A13BDB4'), + ('published', _random_date()), + ('bandwidth', '409600 819200 5120'), + ('reject', '*:*'), + )) + def digest(self): return self._digest @@ -738,7 +1029,7 @@ class BridgeDescriptor(ServerDescriptor): """ Checks if we've been properly scrubbed in accordance with the `bridge descriptor specification - `_. + `_. Validation is a moving target so this may not be fully up to date. :returns: **True** if we're scrubbed, **False** otherwise @@ -815,6 +1106,9 @@ class BridgeDescriptor(ServerDescriptor): def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __lt__(self, other): return self._compare(other, lambda s, o: s < o) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py b/Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py index 75a252b..b573b79 100644 --- a/Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py @@ -1,4 +1,4 @@ -# Copyright 2013-2015, Damian Johnson and The Tor Project +# Copyright 2013-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -17,7 +17,7 @@ import stem.util.tor_tools from stem.descriptor import ( Descriptor, _read_until_keywords, - _get_descriptor_components, + _descriptor_components, ) @@ -63,7 +63,7 @@ class TorDNSEL(Descriptor): def __init__(self, raw_contents, validate): super(TorDNSEL, self).__init__(raw_contents) raw_contents = stem.util.str_tools._to_unicode(raw_contents) - entries = _get_descriptor_components(raw_contents, validate) + entries = _descriptor_components(raw_contents, validate) self.fingerprint = None self.published = None diff --git a/Shared/lib/python3.4/site-packages/stem/directory.py b/Shared/lib/python3.4/site-packages/stem/directory.py new file mode 100644 index 0000000..1782fbf --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/directory.py @@ -0,0 +1,659 @@ +# Copyright 2018, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Directories that provide `relay descriptor information +<../tutorials/mirror_mirror_on_the_wall.html>`_. At a very high level tor works +as follows... + +1. Volunteer starts a new tor relay, during which it sends a `server + descriptor `_ to each of the directory + authorities. + +2. Each hour the directory authorities make a `vote + `_ that says who they think the active + relays are in the network and some attributes about them. + +3. The directory authorities send each other their votes, and compile that + into the `consensus `_. This document is very + similar to the votes, the only difference being that the majority of the + authorities agree upon and sign this document. The idividual relay entries + in the vote or consensus is called `router status entries + `_. + +4. Tor clients (people using the service) download the consensus from an + authority, fallback, or other mirror to determine who the active relays in + the network are. They then use this to construct circuits and use the + network. + +:: + + Directory - Relay we can retrieve descriptor information from + | |- from_cache - Provides cached information bundled with Stem. + | +- from_remote - Downloads the latest directory information from tor. + | + |- Authority - Tor directory authority + +- Fallback - Mirrors that can be used instead of the authorities + +.. versionadded:: 1.7.0 +""" + +import os +import re + +import stem.util +import stem.util.conf + +from stem.util import connection, str_tools, tor_tools + +try: + # added in python 2.7 + from collections import OrderedDict +except ImportError: + from stem.util.ordereddict import OrderedDict + +try: + # account for urllib's change between python 2.x and 3.x + import urllib.request as urllib +except ImportError: + import urllib2 as urllib + +GITWEB_AUTHORITY_URL = 'https://gitweb.torproject.org/tor.git/plain/src/app/config/auth_dirs.inc' +GITWEB_FALLBACK_URL = 'https://gitweb.torproject.org/tor.git/plain/src/app/config/fallback_dirs.inc' +FALLBACK_CACHE_PATH = os.path.join(os.path.dirname(__file__), 'cached_fallbacks.cfg') + +AUTHORITY_NAME = re.compile('"(\S+) orport=(\d+) .*"') +AUTHORITY_V3IDENT = re.compile('"v3ident=([\dA-F]{40}) "') +AUTHORITY_IPV6 = re.compile('"ipv6=\[([\da-f:]+)\]:(\d+) "') +AUTHORITY_ADDR = re.compile('"([\d\.]+):(\d+) ([\dA-F ]{49})",') + +FALLBACK_DIV = '/* ===== */' +FALLBACK_MAPPING = re.compile('/\*\s+(\S+)=(\S*)\s+\*/') + +FALLBACK_ADDR = re.compile('"([\d\.]+):(\d+) orport=(\d+) id=([\dA-F]{40}).*') +FALLBACK_NICKNAME = re.compile('/\* nickname=(\S+) \*/') +FALLBACK_EXTRAINFO = re.compile('/\* extrainfo=([0-1]) \*/') +FALLBACK_IPV6 = re.compile('" ipv6=\[([\da-f:]+)\]:(\d+)"') + + +def _match_with(lines, regexes, required = None): + """ + Scans the given content against a series of regex matchers, providing back a + mapping of regexes to their capture groups. This maping is with the value if + the regex has just a single capture group, and a tuple otherwise. + + :param list lines: text to parse + :param list regexes: regexes to match against + :param list required: matches that must be in the content + + :returns: **dict** mapping matchers against their capture groups + + :raises: **ValueError** if a required match is not present + """ + + matches = {} + + for line in lines: + for matcher in regexes: + m = matcher.search(str_tools._to_unicode(line)) + + if m: + match_groups = m.groups() + matches[matcher] = match_groups if len(match_groups) > 1 else match_groups[0] + + if required: + for required_matcher in required: + if required_matcher not in matches: + raise ValueError('Failed to parse mandatory data from:\n\n%s' % '\n'.join(lines)) + + return matches + + +def _directory_entries(lines, pop_section_func, regexes, required = None): + next_section = pop_section_func(lines) + + while next_section: + yield _match_with(next_section, regexes, required) + next_section = pop_section_func(lines) + + +class Directory(object): + """ + Relay we can contact for descriptor information. + + Our :func:`~stem.directory.Directory.from_cache` and + :func:`~stem.directory.Directory.from_remote` functions key off a + different identifier based on our subclass... + + * :class:`~stem.directory.Authority` keys off the nickname. + * :class:`~stem.directory.Fallback` keys off fingerprints. + + This is because authorities are highly static and canonically known by their + names, whereas fallbacks vary more and don't necessarily have a nickname to + key off of. + + :var str address: IPv4 address of the directory + :var int or_port: port on which the relay services relay traffic + :var int dir_port: port on which directory information is available + :var str fingerprint: relay fingerprint + :var str nickname: relay nickname + :var str orport_v6: **(address, port)** tuple for the directory's IPv6 + ORPort, or **None** if it doesn't have one + """ + + def __init__(self, address, or_port, dir_port, fingerprint, nickname, orport_v6): + identifier = '%s (%s)' % (fingerprint, nickname) if nickname else fingerprint + + if not connection.is_valid_ipv4_address(address): + raise ValueError('%s has an invalid IPv4 address: %s' % (identifier, address)) + elif not connection.is_valid_port(or_port): + raise ValueError('%s has an invalid ORPort: %s' % (identifier, or_port)) + elif not connection.is_valid_port(dir_port): + raise ValueError('%s has an invalid DirPort: %s' % (identifier, dir_port)) + elif not tor_tools.is_valid_fingerprint(fingerprint): + raise ValueError('%s has an invalid fingerprint: %s' % (identifier, fingerprint)) + elif nickname and not tor_tools.is_valid_nickname(nickname): + raise ValueError('%s has an invalid nickname: %s' % (fingerprint, nickname)) + + if orport_v6: + if not isinstance(orport_v6, tuple) or len(orport_v6) != 2: + raise ValueError('%s orport_v6 should be a two value tuple: %s' % (identifier, str(orport_v6))) + elif not connection.is_valid_ipv6_address(orport_v6[0]): + raise ValueError('%s has an invalid IPv6 address: %s' % (identifier, orport_v6[0])) + elif not connection.is_valid_port(orport_v6[1]): + raise ValueError('%s has an invalid IPv6 port: %s' % (identifier, orport_v6[1])) + + self.address = address + self.or_port = int(or_port) + self.dir_port = int(dir_port) + self.fingerprint = fingerprint + self.nickname = nickname + self.orport_v6 = (orport_v6[0], int(orport_v6[1])) if orport_v6 else None + + @staticmethod + def from_cache(): + """ + Provides cached Tor directory information. This information is hardcoded + into Tor and occasionally changes, so the information provided by this + method may not necessarily match the latest version of tor. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 1.7.0 + Support added to the :class:`~stem.directory.Authority` class. + + :returns: **dict** of **str** identifiers to + :class:`~stem.directory.Directory` instances + """ + + raise NotImplementedError('Unsupported Operation: this should be implemented by the Directory subclass') + + @staticmethod + def from_remote(timeout = 60): + """ + Reads and parses tor's directory data `from gitweb.torproject.org `_. + Note that while convenient, this reliance on GitWeb means you should alway + call with a fallback, such as... + + :: + + try: + authorities = stem.directory.Authority.from_remote() + except IOError: + authorities = stem.directory.Authority.from_cache() + + .. versionadded:: 1.5.0 + + .. versionchanged:: 1.7.0 + Support added to the :class:`~stem.directory.Authority` class. + + :param int timeout: seconds to wait before timing out the request + + :returns: **dict** of **str** identifiers to their + :class:`~stem.directory.Directory` + + :raises: **IOError** if unable to retrieve the fallback directories + """ + + raise NotImplementedError('Unsupported Operation: this should be implemented by the Directory subclass') + + def __hash__(self): + return stem.util._hash_attr(self, 'address', 'or_port', 'dir_port', 'fingerprint', 'nickname', 'orport_v6') + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, Directory) else False + + def __ne__(self, other): + return not self == other + + +class Authority(Directory): + """ + Tor directory authority, a special type of relay `hardcoded into tor + `_ + to enumerate the relays in the network. + + .. versionchanged:: 1.3.0 + Added the is_bandwidth_authority attribute. + + .. versionchanged:: 1.7.0 + Added the orport_v6 attribute. + + .. deprecated:: 1.7.0 + The is_bandwidth_authority attribute is deprecated and will be removed in + the future. + + :var str v3ident: identity key fingerprint used to sign votes and consensus + """ + + def __init__(self, address = None, or_port = None, dir_port = None, fingerprint = None, nickname = None, orport_v6 = None, v3ident = None, is_bandwidth_authority = False): + super(Authority, self).__init__(address, or_port, dir_port, fingerprint, nickname, orport_v6) + + if v3ident and not tor_tools.is_valid_fingerprint(v3ident): + identifier = '%s (%s)' % (fingerprint, nickname) if nickname else fingerprint + raise ValueError('%s has an invalid v3ident: %s' % (identifier, v3ident)) + + self.v3ident = v3ident + self.is_bandwidth_authority = is_bandwidth_authority + + @staticmethod + def from_cache(): + return dict(DIRECTORY_AUTHORITIES) + + @staticmethod + def from_remote(timeout = 60): + try: + lines = str_tools._to_unicode(urllib.urlopen(GITWEB_AUTHORITY_URL, timeout = timeout).read()).splitlines() + except Exception as exc: + raise IOError("Unable to download tor's directory authorities from %s: %s" % (GITWEB_AUTHORITY_URL, exc)) + + if not lines: + raise IOError('%s did not have any content' % GITWEB_AUTHORITY_URL) + + # Entries look like... + # + # "moria1 orport=9101 " + # "v3ident=D586D18309DED4CD6D57C18FDB97EFA96D330566 " + # "128.31.0.39:9131 9695 DFC3 5FFE B861 329B 9F1A B04C 4639 7020 CE31", + + try: + results = {} + + for matches in _directory_entries(lines, Authority._pop_section, (AUTHORITY_NAME, AUTHORITY_V3IDENT, AUTHORITY_IPV6, AUTHORITY_ADDR), required = (AUTHORITY_NAME, AUTHORITY_ADDR)): + nickname, or_port = matches.get(AUTHORITY_NAME) + address, dir_port, fingerprint = matches.get(AUTHORITY_ADDR) + + results[nickname] = Authority( + address = address, + or_port = or_port, + dir_port = dir_port, + fingerprint = fingerprint.replace(' ', ''), + nickname = nickname, + orport_v6 = matches.get(AUTHORITY_IPV6), + v3ident = matches.get(AUTHORITY_V3IDENT), + ) + except ValueError as exc: + raise IOError(str(exc)) + + return results + + @staticmethod + def _pop_section(lines): + """ + Provides the next authority entry. + """ + + section_lines = [] + + if lines: + section_lines.append(lines.pop(0)) + + while lines and lines[0].startswith(' '): + section_lines.append(lines.pop(0)) + + return section_lines + + def __hash__(self): + return stem.util._hash_attr(self, 'v3ident', 'is_bandwidth_authority', parent = Directory, cache = True) + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, Authority) else False + + def __ne__(self, other): + return not self == other + + +class Fallback(Directory): + """ + Particularly stable relays tor can instead of authorities when + bootstrapping. These relays are `hardcoded in tor + `_. + + For example, the following checks the performance of tor's fallback directories... + + :: + + import time + from stem.descriptor.remote import get_consensus + from stem.directory import Fallback + + for fallback in Fallback.from_cache().values(): + start = time.time() + get_consensus(endpoints = [(fallback.address, fallback.dir_port)]).run() + print('Downloading the consensus took %0.2f from %s' % (time.time() - start, fallback.fingerprint)) + + :: + + % python example.py + Downloading the consensus took 5.07 from 0AD3FA884D18F89EEA2D89C019379E0E7FD94417 + Downloading the consensus took 3.59 from C871C91489886D5E2E94C13EA1A5FDC4B6DC5204 + Downloading the consensus took 4.16 from 74A910646BCEEFBCD2E874FC1DC997430F968145 + ... + + .. versionadded:: 1.5.0 + + .. versionchanged:: 1.7.0 + Added the has_extrainfo and header attributes which are part of + the `second version of the fallback directories + `_. + + :var bool has_extrainfo: **True** if the relay should be able to provide + extrainfo descriptors, **False** otherwise. + :var collections.OrderedDict header: metadata about the fallback directory file this originated from + """ + + def __init__(self, address = None, or_port = None, dir_port = None, fingerprint = None, nickname = None, has_extrainfo = False, orport_v6 = None, header = None): + super(Fallback, self).__init__(address, or_port, dir_port, fingerprint, nickname, orport_v6) + self.has_extrainfo = has_extrainfo + self.header = OrderedDict(header) if header else OrderedDict() + + @staticmethod + def from_cache(path = FALLBACK_CACHE_PATH): + conf = stem.util.conf.Config() + conf.load(path) + headers = OrderedDict([(k.split('.', 1)[1], conf.get(k)) for k in conf.keys() if k.startswith('header.')]) + + results = {} + + for fingerprint in set([key.split('.')[0] for key in conf.keys()]): + if fingerprint in ('tor_commit', 'stem_commit', 'header'): + continue + + attr = {} + + for attr_name in ('address', 'or_port', 'dir_port', 'nickname', 'has_extrainfo', 'orport6_address', 'orport6_port'): + key = '%s.%s' % (fingerprint, attr_name) + attr[attr_name] = conf.get(key) + + if not attr[attr_name] and attr_name not in ('nickname', 'has_extrainfo', 'orport6_address', 'orport6_port'): + raise IOError("'%s' is missing from %s" % (key, FALLBACK_CACHE_PATH)) + + if attr['orport6_address'] and attr['orport6_port']: + orport_v6 = (attr['orport6_address'], int(attr['orport6_port'])) + else: + orport_v6 = None + + results[fingerprint] = Fallback( + address = attr['address'], + or_port = int(attr['or_port']), + dir_port = int(attr['dir_port']), + fingerprint = fingerprint, + nickname = attr['nickname'], + has_extrainfo = attr['has_extrainfo'] == 'true', + orport_v6 = orport_v6, + header = headers, + ) + + return results + + @staticmethod + def from_remote(timeout = 60): + try: + lines = str_tools._to_unicode(urllib.urlopen(GITWEB_FALLBACK_URL, timeout = timeout).read()).splitlines() + except Exception as exc: + raise IOError("Unable to download tor's fallback directories from %s: %s" % (GITWEB_FALLBACK_URL, exc)) + + if not lines: + raise IOError('%s did not have any content' % GITWEB_FALLBACK_URL) + elif lines[0] != '/* type=fallback */': + raise IOError('%s does not have a type field indicating it is fallback directory metadata' % GITWEB_FALLBACK_URL) + + # header metadata + + header = {} + + for line in Fallback._pop_section(lines): + mapping = FALLBACK_MAPPING.match(line) + + if mapping: + header[mapping.group(1)] = mapping.group(2) + else: + raise IOError('Malformed fallback directory header line: %s' % line) + + Fallback._pop_section(lines) # skip human readable comments + + # Entries look like... + # + # "5.9.110.236:9030 orport=9001 id=0756B7CD4DFC8182BE23143FAC0642F515182CEB" + # " ipv6=[2a01:4f8:162:51e2::2]:9001" + # /* nickname=rueckgrat */ + # /* extrainfo=1 */ + + try: + results = {} + + for matches in _directory_entries(lines, Fallback._pop_section, (FALLBACK_ADDR, FALLBACK_NICKNAME, FALLBACK_EXTRAINFO, FALLBACK_IPV6), required = (FALLBACK_ADDR,)): + address, dir_port, or_port, fingerprint = matches[FALLBACK_ADDR] + + results[fingerprint] = Fallback( + address = address, + or_port = int(or_port), + dir_port = int(dir_port), + fingerprint = fingerprint, + nickname = matches.get(FALLBACK_NICKNAME), + has_extrainfo = matches.get(FALLBACK_EXTRAINFO) == '1', + orport_v6 = matches.get(FALLBACK_IPV6), + header = header, + ) + except ValueError as exc: + raise IOError(str(exc)) + + return results + + @staticmethod + def _pop_section(lines): + """ + Provides lines up through the next divider. This excludes lines with just a + comma since they're an artifact of these being C strings. + """ + + section_lines = [] + + if lines: + line = lines.pop(0) + + while lines and line != FALLBACK_DIV: + if line.strip() != ',': + section_lines.append(line) + + line = lines.pop(0) + + return section_lines + + @staticmethod + def _write(fallbacks, tor_commit, stem_commit, headers, path = FALLBACK_CACHE_PATH): + """ + Persists fallback directories to a location in a way that can be read by + from_cache(). + + :param dict fallbacks: mapping of fingerprints to their fallback directory + :param str tor_commit: tor commit the fallbacks came from + :param str stem_commit: stem commit the fallbacks came from + :param dict headers: metadata about the file these came from + :param str path: location fallbacks will be persisted to + """ + + conf = stem.util.conf.Config() + conf.set('tor_commit', tor_commit) + conf.set('stem_commit', stem_commit) + + for k, v in headers.items(): + conf.set('header.%s' % k, v) + + for directory in sorted(fallbacks.values(), key = lambda x: x.fingerprint): + fingerprint = directory.fingerprint + conf.set('%s.address' % fingerprint, directory.address) + conf.set('%s.or_port' % fingerprint, str(directory.or_port)) + conf.set('%s.dir_port' % fingerprint, str(directory.dir_port)) + conf.set('%s.nickname' % fingerprint, directory.nickname) + conf.set('%s.has_extrainfo' % fingerprint, 'true' if directory.has_extrainfo else 'false') + + if directory.orport_v6: + conf.set('%s.orport6_address' % fingerprint, str(directory.orport_v6[0])) + conf.set('%s.orport6_port' % fingerprint, str(directory.orport_v6[1])) + + conf.save(path) + + def __hash__(self): + return stem.util._hash_attr(self, 'has_extrainfo', 'header', parent = Directory, cache = True) + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, Fallback) else False + + def __ne__(self, other): + return not self == other + + +def _fallback_directory_differences(previous_directories, new_directories): + """ + Provides a description of how fallback directories differ. + """ + + lines = [] + + added_fp = set(new_directories.keys()).difference(previous_directories.keys()) + removed_fp = set(previous_directories.keys()).difference(new_directories.keys()) + + for fp in added_fp: + directory = new_directories[fp] + orport_v6 = '%s:%s' % directory.orport_v6 if directory.orport_v6 else '[none]' + + lines += [ + '* Added %s as a new fallback directory:' % directory.fingerprint, + ' address: %s' % directory.address, + ' or_port: %s' % directory.or_port, + ' dir_port: %s' % directory.dir_port, + ' nickname: %s' % directory.nickname, + ' has_extrainfo: %s' % directory.has_extrainfo, + ' orport_v6: %s' % orport_v6, + '', + ] + + for fp in removed_fp: + lines.append('* Removed %s as a fallback directory' % fp) + + for fp in new_directories: + if fp in added_fp or fp in removed_fp: + continue # already discussed these + + previous_directory = previous_directories[fp] + new_directory = new_directories[fp] + + if previous_directory != new_directory: + for attr in ('address', 'or_port', 'dir_port', 'fingerprint', 'orport_v6'): + old_attr = getattr(previous_directory, attr) + new_attr = getattr(new_directory, attr) + + if old_attr != new_attr: + lines.append('* Changed the %s of %s from %s to %s' % (attr, fp, old_attr, new_attr)) + + return '\n'.join(lines) + + +DIRECTORY_AUTHORITIES = { + 'moria1': Authority( + nickname = 'moria1', + address = '128.31.0.39', + or_port = 9101, + dir_port = 9131, + fingerprint = '9695DFC35FFEB861329B9F1AB04C46397020CE31', + v3ident = 'D586D18309DED4CD6D57C18FDB97EFA96D330566', + ), + 'tor26': Authority( + nickname = 'tor26', + address = '86.59.21.38', + or_port = 443, + dir_port = 80, + fingerprint = '847B1F850344D7876491A54892F904934E4EB85D', + orport_v6 = ('2001:858:2:2:aabb:0:563b:1526', 443), + v3ident = '14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4', + ), + 'dizum': Authority( + nickname = 'dizum', + address = '194.109.206.212', + or_port = 443, + dir_port = 80, + fingerprint = '7EA6EAD6FD83083C538F44038BBFA077587DD755', + v3ident = 'E8A9C45EDE6D711294FADF8E7951F4DE6CA56B58', + ), + 'gabelmoo': Authority( + nickname = 'gabelmoo', + address = '131.188.40.189', + or_port = 443, + dir_port = 80, + fingerprint = 'F2044413DAC2E02E3D6BCF4735A19BCA1DE97281', + orport_v6 = ('2001:638:a000:4140::ffff:189', 443), + v3ident = 'ED03BB616EB2F60BEC80151114BB25CEF515B226', + ), + 'dannenberg': Authority( + nickname = 'dannenberg', + address = '193.23.244.244', + or_port = 443, + dir_port = 80, + orport_v6 = ('2001:678:558:1000::244', 443), + fingerprint = '7BE683E65D48141321C5ED92F075C55364AC7123', + v3ident = '0232AF901C31A04EE9848595AF9BB7620D4C5B2E', + ), + 'maatuska': Authority( + nickname = 'maatuska', + address = '171.25.193.9', + or_port = 80, + dir_port = 443, + fingerprint = 'BD6A829255CB08E66FBE7D3748363586E46B3810', + orport_v6 = ('2001:67c:289c::9', 80), + v3ident = '49015F787433103580E3B66A1707A00E60F2D15B', + ), + 'Faravahar': Authority( + nickname = 'Faravahar', + address = '154.35.175.225', + or_port = 443, + dir_port = 80, + fingerprint = 'CF6D0AAFB385BE71B8E111FC5CFF4B47923733BC', + v3ident = 'EFCBE720AB3A82B99F9E953CD5BF50F7EEFC7B97', + ), + 'longclaw': Authority( + nickname = 'longclaw', + address = '199.58.81.140', + or_port = 443, + dir_port = 80, + fingerprint = '74A910646BCEEFBCD2E874FC1DC997430F968145', + v3ident = '23D15D965BC35114467363C165C4F724B64B4F66', + ), + 'bastet': Authority( + nickname = 'bastet', + address = '204.13.164.118', + or_port = 443, + dir_port = 80, + fingerprint = '24E2F139121D4394C54B5BCC368B3B411857C413', + orport_v6 = ('2620:13:4000:6000::1000:118', 443), + v3ident = '27102BC123E7AF1D4741AE047E160C91ADC76B21', + ), + 'Serge': Authority( + nickname = 'Serge', + address = '66.111.2.131', + or_port = 9001, + dir_port = 9030, + fingerprint = 'BA44A889E64B93FAA2B114E02C2A279A8555C533', + v3ident = None, # does not vote in the consensus + ), +} diff --git a/Shared/lib/python3.4/site-packages/stem/exit_policy.py b/Shared/lib/python3.4/site-packages/stem/exit_policy.py index 62b9a12..0ed5e58 100644 --- a/Shared/lib/python3.4/site-packages/stem/exit_policy.py +++ b/Shared/lib/python3.4/site-packages/stem/exit_policy.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -67,20 +67,19 @@ exiting to a destination is permissible or not. For instance... from __future__ import absolute_import +import re import socket import zlib import stem.prereq +import stem.util import stem.util.connection import stem.util.enum import stem.util.str_tools -from stem import str_type - -try: - # added in python 3.2 +if stem.prereq._is_lru_cache_available(): from functools import lru_cache -except ImportError: +else: from stem.util.lru_cache import lru_cache AddressType = stem.util.enum.Enum(('WILDCARD', 'Wildcard'), ('IPv4', 'IPv4'), ('IPv6', 'IPv6')) @@ -109,6 +108,12 @@ def get_config_policy(rules, ip_address = None): * ports being optional * the 'private' keyword + .. deprecated:: 1.7.0 + + Tor's torrc parameters lack a formal spec, making it difficult for this + method to be reliable. Callers are encouraged to move to + :func:`~stem.control.Controller.get_exit_policy` instead. + :param str,list rules: comma separated rules or list to be converted :param str ip_address: this relay's IP address for the 'private' policy if it's present, this defaults to the local address @@ -118,10 +123,12 @@ def get_config_policy(rules, ip_address = None): :raises: **ValueError** if input isn't a valid tor exit policy """ - if ip_address and not (stem.util.connection.is_valid_ipv4_address(ip_address) or stem.util.connection.is_valid_ipv6_address(ip_address)): + if ip_address and not (stem.util.connection.is_valid_ipv4_address(ip_address) or stem.util.connection.is_valid_ipv6_address(ip_address, allow_brackets = True)): raise ValueError("%s isn't a valid IP address" % ip_address) + elif ip_address and stem.util.connection.is_valid_ipv6_address(ip_address, allow_brackets = True) and not (ip_address[0] == '[' and ip_address[-1] == ']'): + ip_address = '[%s]' % ip_address # ExitPolicy validation expects IPv6 addresses to be bracketed - if isinstance(rules, (bytes, str_type)): + if stem.util._is_str(rules): rules = rules.split(',') result = [] @@ -132,12 +139,12 @@ def get_config_policy(rules, ip_address = None): if not rule: continue - if ':' not in rule: + if not re.search(':[\d\-\*]+$', rule): rule = '%s:*' % rule if 'private' in rule: acceptance = rule.split(' ', 1)[0] - port = rule.split(':', 1)[1] + port = rule.rsplit(':', 1)[1] addresses = list(PRIVATE_ADDRESSES) if ip_address: @@ -153,12 +160,6 @@ def get_config_policy(rules, ip_address = None): else: result.append(ExitPolicyRule(rule)) - # torrc policies can apply to IPv4 or IPv6, so we need to make sure /0 - # addresses aren't treated as being a full wildcard - - for rule in result: - rule._submask_wildcard = False - return ExitPolicy(*result) @@ -169,10 +170,10 @@ def _flag_private_rules(rules): series of rules exactly matching it. """ - matches = [] + matches = [] # find all possible starting indexes for i, rule in enumerate(rules): - if i + len(PRIVATE_ADDRESSES) + 1 > len(rules): + if i + len(PRIVATE_ADDRESSES) > len(rules): break rule_str = '%s/%s' % (rule.address, rule.get_masked_bits()) @@ -184,32 +185,35 @@ def _flag_private_rules(rules): # To match the private policy the following must all be true... # # * series of addresses and bit masks match PRIVATE_ADDRESSES - # * all rules have the same port range and acceptance + # * all rules have the same port range # * all rules have the same acceptance (all accept or reject entries) + # + # The last rule is dynamically based on the relay's public address. It may + # not be present if get_config_policy() created this policy and we couldn't + # resolve our address. - rule_set = rules[start_index:start_index + len(PRIVATE_ADDRESSES) + 1] + last_index = start_index + len(PRIVATE_ADDRESSES) + rule_set = rules[start_index:last_index] + last_rule = rules[last_index] if len(rules) > last_index else None is_match = True min_port, max_port = rule_set[0].min_port, rule_set[0].max_port is_accept = rule_set[0].is_accept - for i, rule in enumerate(rule_set[:-1]): + for i, rule in enumerate(rule_set): rule_str = '%s/%s' % (rule.address, rule.get_masked_bits()) if rule_str != PRIVATE_ADDRESSES[i] or rule.min_port != min_port or rule.max_port != max_port or rule.is_accept != is_accept: is_match = False break - # The last rule is for the relay's public address, so it's dynamic. - - last_rule = rule_set[-1] - - if last_rule.is_address_wildcard() or last_rule.min_port != min_port or last_rule.max_port != max_port or last_rule.is_accept != is_accept: - is_match = False if is_match: for rule in rule_set: rule._is_private = True + if last_rule and not last_rule.is_address_wildcard() and last_rule.min_port == min_port and last_rule.max_port == max_port and last_rule.is_accept == is_accept: + last_rule._is_private = True + def _flag_default_rules(rules): """ @@ -238,7 +242,7 @@ class ExitPolicy(object): # sanity check the types for rule in rules: - if not isinstance(rule, (bytes, str_type, ExitPolicyRule)): + if not stem.util._is_str(rule) and not isinstance(rule, ExitPolicyRule): raise TypeError('Exit policy rules can only contain strings or ExitPolicyRules, got a %s (%s)' % (type(rule), rules)) # Unparsed representation of the rules we were constructed with. Our @@ -249,7 +253,7 @@ class ExitPolicy(object): is_all_str = True for rule in rules: - if not isinstance(rule, (bytes, str_type)): + if not stem.util._is_str(rule): is_all_str = False if rules and is_all_str: @@ -282,6 +286,9 @@ class ExitPolicy(object): :returns: **True** if exiting to this destination is allowed, **False** otherwise """ + if not self.is_exiting_allowed(): + return False + for rule in self._get_rules(): if rule.is_match(address, port, strict): return rule.is_accept @@ -458,7 +465,10 @@ class ExitPolicy(object): if isinstance(rule, bytes): rule = stem.util.str_tools._to_unicode(rule) - if isinstance(rule, str_type): + if stem.util._is_str(rule): + if not rule.strip(): + continue + rule = ExitPolicyRule(rule.strip()) if rule.is_accept: @@ -522,10 +532,10 @@ class ExitPolicy(object): return self._hash def __eq__(self, other): - if isinstance(other, ExitPolicy): - return self._get_rules() == list(other) - else: - return False + return hash(self) == hash(other) if isinstance(other, ExitPolicy) else False + + def __ne__(self, other): + return not self == other class MicroExitPolicy(ExitPolicy): @@ -575,10 +585,10 @@ class MicroExitPolicy(ExitPolicy): policy = policy[6:] - if not policy.startswith(' ') or (len(policy) - 1 != len(policy.lstrip())): + if not policy.startswith(' '): raise ValueError('A microdescriptor exit policy should have a space separating accept/reject from its port list: %s' % self._policy) - policy = policy[1:] + policy = policy.lstrip() # convert our port list into MicroExitPolicyRule rules = [] @@ -605,10 +615,10 @@ class MicroExitPolicy(ExitPolicy): return hash(str(self)) def __eq__(self, other): - if isinstance(other, MicroExitPolicy): - return str(self) == str(other) - else: - return False + return hash(self) == hash(other) if isinstance(other, MicroExitPolicy) else False + + def __ne__(self, other): + return not self == other class ExitPolicyRule(object): @@ -626,6 +636,9 @@ class ExitPolicyRule(object): This should be treated as an immutable object. + .. versionchanged:: 1.5.0 + Support for 'accept6/reject6' entries and '\*4/6' wildcards. + :var bool is_accept: indicates if exiting is allowed or disallowed :var str address: address that this rule is for @@ -639,24 +652,27 @@ class ExitPolicyRule(object): """ def __init__(self, rule): - # policy ::= "accept" exitpattern | "reject" exitpattern + # policy ::= "accept[6]" exitpattern | "reject[6]" exitpattern # exitpattern ::= addrspec ":" portspec - if rule.startswith('accept'): - self.is_accept = True - elif rule.startswith('reject'): - self.is_accept = False + rule = stem.util.str_tools._to_unicode(rule) + + self.is_accept = rule.startswith('accept') + is_ipv6_only = rule.startswith('accept6') or rule.startswith('reject6') + + if rule.startswith('accept6') or rule.startswith('reject6'): + exitpattern = rule[7:] + elif rule.startswith('accept') or rule.startswith('reject'): + exitpattern = rule[6:] else: - raise ValueError("An exit policy must start with either 'accept' or 'reject': %s" % rule) + raise ValueError("An exit policy must start with either 'accept[6]' or 'reject[6]': %s" % rule) - exitpattern = rule[6:] - - if not exitpattern.startswith(' ') or (len(exitpattern) - 1 != len(exitpattern.lstrip())): + if not exitpattern.startswith(' '): raise ValueError('An exit policy should have a space separating its accept/reject from the exit pattern: %s' % rule) - exitpattern = exitpattern[1:] + exitpattern = exitpattern.lstrip() - if ':' not in exitpattern: + if ':' not in exitpattern or ']' in exitpattern.rsplit(':', 1)[1]: raise ValueError("An exitpattern must be of the form 'addrspec:portspec': %s" % rule) self.address = None @@ -671,15 +687,18 @@ class ExitPolicyRule(object): self._mask = None + # Malformed exit policies are rejected, but there's an exception where it's + # just skipped: when an accept6/reject6 rule has an IPv4 address... + # + # "Using an IPv4 address with accept6 or reject6 is ignored and generates + # a warning." + + self._skip_rule = False + addrspec, portspec = exitpattern.rsplit(':', 1) - self._apply_addrspec(rule, addrspec) + self._apply_addrspec(rule, addrspec, is_ipv6_only) self._apply_portspec(rule, portspec) - # If true then a submask of /0 is treated by is_address_wildcard() as being - # a wildcard. - - self._submask_wildcard = True - # Flags to indicate if this rule seems to be expanded from the 'private' # keyword or tor's default policy suffix. @@ -688,20 +707,14 @@ class ExitPolicyRule(object): def is_address_wildcard(self): """ - **True** if we'll match against any address, **False** otherwise. + **True** if we'll match against **any** address, **False** otherwise. - Note that if this policy can apply to both IPv4 and IPv6 then this is - different from being for a /0 (since, for instance, 0.0.0.0/0 wouldn't - match against an IPv6 address). That said, /0 addresses are highly unusual - and most things citing exit policies are IPv4 specific anyway, making this - moot. + Note that this is different than \*4, \*6, or '/0' address which are + wildcards for only either IPv4 or IPv6. :returns: **bool** for if our address matching is a wildcard """ - if self._submask_wildcard and self.get_masked_bits() == 0: - return True - return self._address_type == _address_type_to_int(AddressType.WILDCARD) def is_port_wildcard(self): @@ -729,6 +742,9 @@ class ExitPolicyRule(object): :raises: **ValueError** if provided with a malformed address or port """ + if self._skip_rule: + return False + # validate our input and check if the argument doesn't match our address type if address is not None: @@ -764,7 +780,7 @@ class ExitPolicyRule(object): if address is None: fuzzy_match = True else: - comparison_addr_bin = int(stem.util.connection._get_address_binary(address), 2) + comparison_addr_bin = stem.util.connection.address_to_int(address) comparison_addr_bin &= self._get_mask_bin() if self._get_address_bin() != comparison_addr_bin: @@ -800,8 +816,8 @@ class ExitPolicyRule(object): :returns: str of our subnet mask for the address (ex. '255.255.255.0') """ - # Lazy loading our mask because it very infrequently requested. There's - # no reason to usually usse memory for it. + # Lazy loading our mask because it is very infrequently requested. There's + # no reason to usually use memory for it. if not self._mask: address_type = self.get_address_type() @@ -896,41 +912,30 @@ class ExitPolicyRule(object): return label - def __hash__(self): - if self._hash is None: - my_hash = 0 - - for attr in ('is_accept', 'address', 'min_port', 'max_port'): - my_hash *= 1024 - - attr_value = getattr(self, attr) - - if attr_value is not None: - my_hash += hash(attr_value) - - my_hash *= 1024 - my_hash += hash(self.get_mask(False)) - - self._hash = my_hash - - return self._hash - @lru_cache() def _get_mask_bin(self): # provides an integer representation of our mask - return int(stem.util.connection._get_address_binary(self.get_mask(False)), 2) + return int(stem.util.connection._address_to_binary(self.get_mask(False)), 2) @lru_cache() def _get_address_bin(self): # provides an integer representation of our address - return int(stem.util.connection._get_address_binary(self.address), 2) & self._get_mask_bin() + return stem.util.connection.address_to_int(self.address) & self._get_mask_bin() - def _apply_addrspec(self, rule, addrspec): + def _apply_addrspec(self, rule, addrspec, is_ipv6_only): # Parses the addrspec... # addrspec ::= "*" | ip4spec | ip6spec + # Expand IPv4 and IPv6 specific wildcards into /0 entries so we have one + # fewer bizarre special case headaches to deal with. + + if addrspec == '*4': + addrspec = '0.0.0.0/0' + elif addrspec == '*6' or (addrspec == '*' and is_ipv6_only): + addrspec = '[0000:0000:0000:0000:0000:0000:0000:0000]/0' + if '/' in addrspec: self.address, addr_extra = addrspec.split('/', 1) else: @@ -945,6 +950,9 @@ class ExitPolicyRule(object): # ip4mask ::= an IPv4 mask in dotted-quad format # num_ip4_bits ::= an integer between 0 and 32 + if is_ipv6_only: + self._skip_rule = True + self._address_type = _address_type_to_int(AddressType.IPv4) if addr_extra is None: @@ -985,7 +993,7 @@ class ExitPolicyRule(object): else: raise ValueError("The '%s' isn't a number of bits: %s" % (addr_extra, rule)) else: - raise ValueError("Address isn't a wildcard, IPv4, or IPv6 address: %s" % rule) + raise ValueError("'%s' isn't a wildcard, IPv4, or IPv6 address: %s" % (addrspec, rule)) def _apply_portspec(self, rule, portspec): # Parses the portspec... @@ -1018,16 +1026,17 @@ class ExitPolicyRule(object): else: raise ValueError("Port value isn't a wildcard, integer, or range: %s" % rule) - def __eq__(self, other): - if isinstance(other, ExitPolicyRule): - # Our string representation encompasses our effective policy. Technically - # this isn't quite right since our rule attribute may differ (ie, 'accept - # 0.0.0.0/0' == 'accept 0.0.0.0/0.0.0.0' will be True), but these - # policies are effectively equivalent. + def __hash__(self): + if self._hash is None: + self._hash = stem.util._hash_attr(self, 'is_accept', 'address', 'min_port', 'max_port') * 1024 + hash(self.get_mask(False)) - return hash(self) == hash(other) - else: - return False + return self._hash + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, ExitPolicyRule) else False + + def __ne__(self, other): + return not self == other def _address_type_to_int(address_type): @@ -1048,7 +1057,7 @@ class MicroExitPolicyRule(ExitPolicyRule): self.address = None # wildcard address self.min_port = min_port self.max_port = max_port - self._hash = None + self._skip_rule = False def is_address_wildcard(self): return True @@ -1063,20 +1072,13 @@ class MicroExitPolicyRule(ExitPolicyRule): return None def __hash__(self): - if self._hash is None: - my_hash = 0 + return stem.util._hash_attr(self, 'is_accept', 'min_port', 'max_port', cache = True) - for attr in ('is_accept', 'min_port', 'max_port'): - my_hash *= 1024 + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, MicroExitPolicyRule) else False - attr_value = getattr(self, attr) - - if attr_value is not None: - my_hash += hash(attr_value) - - self._hash = my_hash - - return self._hash + def __ne__(self, other): + return not self == other DEFAULT_POLICY_RULES = tuple([ExitPolicyRule(rule) for rule in ( diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py b/Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py index cf69d63..131ecd6 100644 --- a/Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015, Damian Johnson and The Tor Project +# Copyright 2015-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -6,13 +6,6 @@ Interactive interpreter for interacting with Tor directly. This adds usability features such as tab completion, history, and IRC-style functions (like /help). """ -__all__ = [ - 'arguments', - 'autocomplete', - 'commands', - 'help', -] - import os import sys @@ -26,13 +19,20 @@ import stem.util.term from stem.util.term import Attr, Color, format +__all__ = [ + 'arguments', + 'autocomplete', + 'commands', + 'help', +] + PROMPT = format('>>> ', Color.GREEN, Attr.BOLD, Attr.READLINE_ESCAPE) -STANDARD_OUTPUT = (Color.BLUE, ) -BOLD_OUTPUT = (Color.BLUE, Attr.BOLD) -HEADER_OUTPUT = (Color.GREEN, ) -HEADER_BOLD_OUTPUT = (Color.GREEN, Attr.BOLD) -ERROR_OUTPUT = (Attr.BOLD, Color.RED) +STANDARD_OUTPUT = (Color.BLUE, Attr.LINES) +BOLD_OUTPUT = (Color.BLUE, Attr.BOLD, Attr.LINES) +HEADER_OUTPUT = (Color.GREEN, Attr.LINES) +HEADER_BOLD_OUTPUT = (Color.GREEN, Attr.BOLD, Attr.LINES) +ERROR_OUTPUT = (Attr.BOLD, Color.RED, Attr.LINES) settings_path = os.path.join(os.path.dirname(__file__), 'settings.cfg') uses_settings = stem.util.conf.uses_settings('stem_interpreter', settings_path) @@ -60,7 +60,7 @@ def main(): print(stem.interpreter.arguments.get_help()) sys.exit() - if args.disable_color: + if args.disable_color or not sys.stdout.isatty(): global PROMPT stem.util.term.DISABLE_COLOR_SUPPORT = True PROMPT = '>>> ' @@ -72,22 +72,30 @@ def main(): is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real') if not is_tor_running: - if not stem.util.system.is_available('tor'): + if args.tor_path == 'tor' and not stem.util.system.is_available('tor'): print(format(msg('msg.tor_unavailable'), *ERROR_OUTPUT)) sys.exit(1) else: - print(format(msg('msg.starting_tor'), *HEADER_OUTPUT)) + if not args.run_cmd and not args.run_path: + print(format(msg('msg.starting_tor'), *HEADER_OUTPUT)) - stem.process.launch_tor_with_config( - config = { - 'SocksPort': '0', - 'ControlPort': str(args.control_port), - 'CookieAuthentication': '1', - 'ExitPolicy': 'reject *:*', - }, - completion_percent = 5, - take_ownership = True, - ) + control_port = '9051' if args.control_port == 'default' else str(args.control_port) + + try: + stem.process.launch_tor_with_config( + config = { + 'SocksPort': '0', + 'ControlPort': control_port, + 'CookieAuthentication': '1', + 'ExitPolicy': 'reject *:*', + }, + tor_cmd = args.tor_path, + completion_percent = 5, + take_ownership = True, + ) + except OSError as exc: + print(format(msg('msg.unable_to_start_tor', error = exc), *ERROR_OUTPUT)) + sys.exit(1) control_port = (args.control_address, args.control_port) control_socket = args.control_socket @@ -115,27 +123,64 @@ def main(): readline.set_completer(autocompleter.complete) readline.set_completer_delims('\n') - interpreter = stem.interpreter.commands.ControlInterpretor(controller) + interpreter = stem.interpreter.commands.ControlInterpreter(controller) + showed_close_confirmation = False - for line in msg('msg.startup_banner').splitlines(): - line_format = HEADER_BOLD_OUTPUT if line.startswith(' ') else HEADER_OUTPUT - print(format(line, *line_format)) + if args.run_cmd: + if args.run_cmd.upper().startswith('SETEVENTS '): + # TODO: we can use a lambda here when dropping python 2.x support, but + # until then print's status as a keyword prevents it from being used in + # lambdas - print('') + def handle_event(event_message): + print(format(str(event_message), *STANDARD_OUTPUT)) - while True: + controller._handle_event = handle_event + + if sys.stdout.isatty(): + events = args.run_cmd.upper().split(' ', 1)[1] + print(format('Listening to %s events. Press any key to quit.\n' % events, *HEADER_BOLD_OUTPUT)) + + controller.msg(args.run_cmd) + + try: + raw_input() + except (KeyboardInterrupt, stem.SocketClosed): + pass + else: + interpreter.run_command(args.run_cmd, print_response = True) + elif args.run_path: try: - prompt = '... ' if interpreter.is_multiline_context else PROMPT + for line in open(args.run_path).readlines(): + interpreter.run_command(line.strip(), print_response = True) + except IOError as exc: + print(format(msg('msg.unable_to_read_file', path = args.run_path, error = exc), *ERROR_OUTPUT)) + sys.exit(1) - if stem.prereq.is_python_3(): - user_input = input(prompt) - else: - user_input = raw_input(prompt) + else: + for line in msg('msg.startup_banner').splitlines(): + line_format = HEADER_BOLD_OUTPUT if line.startswith(' ') else HEADER_OUTPUT + print(format(line, *line_format)) - response = interpreter.run_command(user_input) + print('') - if response is not None: - print(response) - except (KeyboardInterrupt, EOFError, stem.SocketClosed) as exc: - print('') # move cursor to the following line - break + while True: + try: + prompt = '... ' if interpreter.is_multiline_context else PROMPT + user_input = input(prompt) if stem.prereq.is_python_3() else raw_input(prompt) + interpreter.run_command(user_input, print_response = True) + except stem.SocketClosed: + if showed_close_confirmation: + print(format('Unable to run tor commands. The control connection has been closed.', *ERROR_OUTPUT)) + else: + prompt = format("Tor's control port has closed. Do you want to continue this interpreter? (y/n) ", *HEADER_BOLD_OUTPUT) + user_input = input(prompt) if stem.prereq.is_python_3() else raw_input(prompt) + print('') # blank line + + if user_input.lower() in ('y', 'yes'): + showed_close_confirmation = True + else: + break + except (KeyboardInterrupt, EOFError, stem.SocketClosed): + print('') # move cursor to the following line + break diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/arguments.py b/Shared/lib/python3.4/site-packages/stem/interpreter/arguments.py index eadd043..db0dd48 100644 --- a/Shared/lib/python3.4/site-packages/stem/interpreter/arguments.py +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/arguments.py @@ -1,4 +1,4 @@ -# Copyright 2015, Damian Johnson and The Tor Project +# Copyright 2015-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -7,22 +7,26 @@ Commandline argument parsing for our interpreter prompt. import collections import getopt +import os import stem.interpreter import stem.util.connection DEFAULT_ARGS = { 'control_address': '127.0.0.1', - 'control_port': 9051, + 'control_port': 'default', 'user_provided_port': False, 'control_socket': '/var/run/tor/control', 'user_provided_socket': False, + 'tor_path': 'tor', + 'run_cmd': None, + 'run_path': None, 'disable_color': False, 'print_help': False, } OPT = 'i:s:h' -OPT_EXPANDED = ['interface=', 'socket=', 'no-color', 'help'] +OPT_EXPANDED = ['interface=', 'socket=', 'tor=', 'run=', 'no-color', 'help'] def parse(argv): @@ -50,7 +54,7 @@ def parse(argv): for opt, arg in recognized_args: if opt in ('-i', '--interface'): if ':' in arg: - address, port = arg.split(':', 1) + address, port = arg.rsplit(':', 1) else: address, port = None, arg @@ -68,6 +72,13 @@ def parse(argv): elif opt in ('-s', '--socket'): args['control_socket'] = arg args['user_provided_socket'] = True + elif opt in ('--tor'): + args['tor_path'] = arg + elif opt in ('--run'): + if os.path.exists(arg): + args['run_path'] = arg + else: + args['run_cmd'] = arg elif opt == '--no-color': args['disable_color'] = True elif opt in ('-h', '--help'): diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/autocomplete.py b/Shared/lib/python3.4/site-packages/stem/interpreter/autocomplete.py index a6d940d..6401e22 100644 --- a/Shared/lib/python3.4/site-packages/stem/interpreter/autocomplete.py +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/autocomplete.py @@ -1,16 +1,17 @@ -# Copyright 2014-2015, Damian Johnson and The Tor Project +# Copyright 2014-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ Tab completion for our interpreter prompt. """ +import stem.prereq + from stem.interpreter import uses_settings -try: - # added in python 3.2 +if stem.prereq._is_lru_cache_available(): from functools import lru_cache -except ImportError: +else: from stem.util.lru_cache import lru_cache diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/commands.py b/Shared/lib/python3.4/site-packages/stem/interpreter/commands.py index 4047517..a281c08 100644 --- a/Shared/lib/python3.4/site-packages/stem/interpreter/commands.py +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/commands.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015, Damian Johnson and The Tor Project +# Copyright 2014-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -6,7 +6,9 @@ Handles making requests and formatting the responses. """ import code +import contextlib import socket +import sys import stem import stem.control @@ -19,6 +21,13 @@ import stem.util.tor_tools from stem.interpreter import STANDARD_OUTPUT, BOLD_OUTPUT, ERROR_OUTPUT, uses_settings, msg from stem.util.term import format +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO + +MAX_EVENTS = 100 + def _get_fingerprint(arg, controller): """ @@ -51,7 +60,7 @@ def _get_fingerprint(arg, controller): raise ValueError("Unable to find a relay with the nickname of '%s'" % arg) elif ':' in arg or stem.util.connection.is_valid_ipv4_address(arg): if ':' in arg: - address, port = arg.split(':', 1) + address, port = arg.rsplit(':', 1) if not stem.util.connection.is_valid_ipv4_address(address): raise ValueError("'%s' isn't a valid IPv4 address" % address) @@ -84,7 +93,18 @@ def _get_fingerprint(arg, controller): raise ValueError("'%s' isn't a fingerprint, nickname, or IP address" % arg) -class ControlInterpretor(code.InteractiveConsole): +@contextlib.contextmanager +def redirect(stdout, stderr): + original = sys.stdout, sys.stderr + sys.stdout, sys.stderr = stdout, stderr + + try: + yield + finally: + sys.stdout, sys.stderr = original + + +class ControlInterpreter(code.InteractiveConsole): """ Handles issuing requests and providing nicely formed responses, with support for special irc style subcommands. @@ -115,7 +135,10 @@ class ControlInterpretor(code.InteractiveConsole): def handle_event_wrapper(event_message): handle_event_real(event_message) - self._received_events.append(event_message) + self._received_events.insert(0, event_message) + + if len(self._received_events) > MAX_EVENTS: + self._received_events.pop() self._controller._handle_event = handle_event_wrapper @@ -276,13 +299,14 @@ class ControlInterpretor(code.InteractiveConsole): return format(response, *STANDARD_OUTPUT) @uses_settings - def run_command(self, command, config): + def run_command(self, command, config, print_response = False): """ Runs the given command. Requests starting with a '/' are special commands to the interpreter, and anything else is sent to the control port. :param stem.control.Controller controller: tor control connection :param str command: command to be processed + :param bool print_response: prints the response to stdout if true :returns: **list** out output lines, each line being a list of (msg, format) tuples @@ -290,12 +314,9 @@ class ControlInterpretor(code.InteractiveConsole): :raises: **stem.SocketClosed** if the control connection has been severed """ - if not self._controller.is_alive(): - raise stem.SocketClosed() - # Commands fall into three categories: # - # * Interpretor commands. These start with a '/'. + # * Interpreter commands. These start with a '/'. # # * Controller commands stem knows how to handle. We use our Controller's # methods for these to take advantage of caching and present nicer @@ -338,17 +359,25 @@ class ControlInterpretor(code.InteractiveConsole): is_tor_command = cmd in config.get('help.usage', {}) and cmd.lower() != 'events' if self._run_python_commands and not is_tor_command: - self.is_multiline_context = code.InteractiveConsole.push(self, command) - return + console_output = StringIO() + + with redirect(console_output, console_output): + self.is_multiline_context = code.InteractiveConsole.push(self, command) + + output = console_output.getvalue().strip() else: try: output = format(self._controller.msg(command).raw_content().strip(), *STANDARD_OUTPUT) except stem.ControllerError as exc: if isinstance(exc, stem.SocketClosed): - raise exc + raise else: output = format(str(exc), *ERROR_OUTPUT) - output += '\n' # give ourselves an extra line before the next prompt + if output: + output += '\n' # give ourselves an extra line before the next prompt + + if print_response: + print(output) return output diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/help.py b/Shared/lib/python3.4/site-packages/stem/interpreter/help.py index 83db0b1..20962c4 100644 --- a/Shared/lib/python3.4/site-packages/stem/interpreter/help.py +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/help.py @@ -1,10 +1,12 @@ -# Copyright 2014-2015, Damian Johnson and The Tor Project +# Copyright 2014-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ Provides our /help responses. """ +import stem.prereq + from stem.interpreter import ( STANDARD_OUTPUT, BOLD_OUTPUT, @@ -15,10 +17,9 @@ from stem.interpreter import ( from stem.util.term import format -try: - # added in python 3.2 +if stem.prereq._is_lru_cache_available(): from functools import lru_cache -except ImportError: +else: from stem.util.lru_cache import lru_cache diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg b/Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg index 1bacf1c..af96d59 100644 --- a/Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg @@ -17,6 +17,8 @@ msg.help | -i, --interface [ADDRESS:]PORT change control interface from {address}:{port} | -s, --socket SOCKET_PATH attach using unix domain socket if present, | SOCKET_PATH defaults to: {socket} +| --tor PATH tor binary if tor isn't already running +| --run executes the given command or file of commands | --no-color disables colorized output | -h, --help presents this help | @@ -41,6 +43,8 @@ msg.startup_banner | msg.tor_unavailable Tor isn't running and the command currently isn't in your PATH. +msg.unable_to_start_tor Unable to start tor: {error} +msg.unable_to_read_file Unable to read {path}: {error} msg.starting_tor |Tor isn't running. Starting a temporary Tor instance for our interpreter to @@ -57,7 +61,7 @@ msg.starting_tor # Response for the '/help' command without any arguments. help.general -|Interpretor commands include: +|Interpreter commands include: | /help - provides information for interpreter and tor commands | /events - prints events that we've received | /info - general information for a relay @@ -319,7 +323,9 @@ autocomplete AUTHCHALLENGE autocomplete DROPGUARDS autocomplete ADD_ONION NEW:BEST autocomplete ADD_ONION NEW:RSA1024 +autocomplete ADD_ONION NEW:ED25519-V3 autocomplete ADD_ONION RSA1024: +autocomplete ADD_ONION ED25519-V3: autocomplete DEL_ONION autocomplete HSFETCH autocomplete HSPOST diff --git a/Shared/lib/python3.4/site-packages/stem/manual.py b/Shared/lib/python3.4/site-packages/stem/manual.py new file mode 100644 index 0000000..7b956aa --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/manual.py @@ -0,0 +1,810 @@ +# Copyright 2015-2018, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Information available about Tor from `its manual +`_. This provides three +methods of getting this information... + +* :func:`~stem.manual.Manual.from_cache` provides manual content bundled with + Stem. This is the fastest and most reliable method but only as up-to-date as + Stem's release. + +* :func:`~stem.manual.Manual.from_man` reads Tor's local man page for + information about it. + +* :func:`~stem.manual.Manual.from_remote` fetches the latest manual information + remotely. This is the slowest and least reliable method but provides the most + recent information about Tor. + +Manual information includes arguments, signals, and probably most usefully the +torrc configuration options. For example, say we want a little script that told +us what our torrc options do... + +.. literalinclude:: /_static/example/manual_config_options.py + :language: python + +| + +.. image:: /_static/manual_output.png + +| + +**Module Overview:** + +:: + + query - performs a query on our cached sqlite manual information + is_important - Indicates if a configuration option is of particularly common importance. + download_man_page - Downloads tor's latest man page. + + Manual - Information about Tor available from its manual. + | |- from_cache - Provides manual information cached with Stem. + | |- from_man - Retrieves manual information from its man page. + | +- from_remote - Retrieves manual information remotely from tor's latest manual. + | + +- save - writes the manual contents to a given location + +.. versionadded:: 1.5.0 +""" + +import os +import shutil +import sys +import tempfile + +import stem.prereq +import stem.util +import stem.util.conf +import stem.util.enum +import stem.util.log +import stem.util.system + +try: + # added in python 2.7 + from collections import OrderedDict +except ImportError: + from stem.util.ordereddict import OrderedDict + +if stem.prereq._is_lru_cache_available(): + from functools import lru_cache +else: + from stem.util.lru_cache import lru_cache + +try: + # account for urllib's change between python 2.x and 3.x + import urllib.request as urllib +except ImportError: + import urllib2 as urllib + +Category = stem.util.enum.Enum('GENERAL', 'CLIENT', 'RELAY', 'DIRECTORY', 'AUTHORITY', 'HIDDEN_SERVICE', 'DENIAL_OF_SERVICE', 'TESTING', 'UNKNOWN') +GITWEB_MANUAL_URL = 'https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt' +CACHE_PATH = os.path.join(os.path.dirname(__file__), 'cached_manual.sqlite') +DATABASE = None # cache database connections +HAS_ENCODING_ARG = not stem.util.system.is_mac() and not stem.util.system.is_bsd() and not stem.util.system.is_slackware() + +SCHEMA_VERSION = 1 # version of our scheme, bump this if you change the following +SCHEMA = ( + 'CREATE TABLE schema(version INTEGER)', + 'INSERT INTO schema(version) VALUES (%i)' % SCHEMA_VERSION, + + 'CREATE TABLE metadata(name TEXT, synopsis TEXT, description TEXT, man_commit TEXT, stem_commit TEXT)', + 'CREATE TABLE commandline(name TEXT PRIMARY KEY, description TEXT)', + 'CREATE TABLE signals(name TEXT PRIMARY KEY, description TEXT)', + 'CREATE TABLE files(name TEXT PRIMARY KEY, description TEXT)', + 'CREATE TABLE torrc(key TEXT PRIMARY KEY, name TEXT, category TEXT, usage TEXT, summary TEXT, description TEXT, position INTEGER)', +) + +CATEGORY_SECTIONS = OrderedDict(( + ('GENERAL OPTIONS', Category.GENERAL), + ('CLIENT OPTIONS', Category.CLIENT), + ('SERVER OPTIONS', Category.RELAY), + ('DIRECTORY SERVER OPTIONS', Category.DIRECTORY), + ('DIRECTORY AUTHORITY SERVER OPTIONS', Category.AUTHORITY), + ('HIDDEN SERVICE OPTIONS', Category.HIDDEN_SERVICE), + ('DENIAL OF SERVICE MITIGATION OPTIONS', Category.DENIAL_OF_SERVICE), + ('TESTING NETWORK OPTIONS', Category.TESTING), +)) + + +class SchemaMismatch(IOError): + """ + Database schema doesn't match what Stem supports. + + .. versionadded:: 1.6.0 + + :var int database_schema: schema of the database + :var tuple supported_schemas: schemas library supports + """ + + def __init__(self, message, database_schema, library_schema): + super(SchemaMismatch, self).__init__(message) + self.database_schema = database_schema + self.library_schema = library_schema + + +def query(query, *param): + """ + Performs the given query on our sqlite manual cache. This database should + be treated as being read-only. File permissions generally enforce this, and + in the future will be enforced by this function as well. + + :: + + >>> import stem.manual + >>> print(stem.manual.query('SELECT description FROM torrc WHERE key=?', 'CONTROLSOCKET').fetchone()[0]) + Like ControlPort, but listens on a Unix domain socket, rather than a TCP socket. 0 disables ControlSocket. (Unix and Unix-like systems only.) (Default: 0) + + .. versionadded:: 1.6.0 + + :param str query: query to run on the cache + :param list param: query parameters + + :returns: :class:`sqlite3.Cursor` with the query results + + :raises: + * **ImportError** if the sqlite3 module is unavailable + * **sqlite3.OperationalError** if query fails + """ + + if not stem.prereq.is_sqlite_available(): + raise ImportError('Querying requires the sqlite3 module') + + import sqlite3 + + # The only reason to explicitly close the sqlite connection is to ensure + # transactions are committed. Since we're only using read-only access this + # doesn't matter, and can allow interpreter shutdown to do the needful. + # + # TODO: When we only support python 3.4+ we can use sqlite's uri argument + # to enforce a read-only connection... + # + # https://docs.python.org/3/library/sqlite3.html#sqlite3.connect + + global DATABASE + + if DATABASE is None: + DATABASE = sqlite3.connect(CACHE_PATH) + + return DATABASE.execute(query, param) + + +class ConfigOption(object): + """ + Tor configuration attribute found in its torrc. + + :var str name: name of the configuration option + :var stem.manual.Category category: category the config option was listed + under, this is Category.UNKNOWN if we didn't recognize the category + :var str usage: arguments accepted by the option + :var str summary: brief description of what the option does + :var str description: longer manual description with details + """ + + def __init__(self, name, category = Category.UNKNOWN, usage = '', summary = '', description = ''): + self.name = name + self.category = category + self.usage = usage + self.summary = summary + self.description = description + + def __hash__(self): + return stem.util._hash_attr(self, 'name', 'category', 'usage', 'summary', 'description', cache = True) + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, ConfigOption) else False + + def __ne__(self, other): + return not self == other + + +@lru_cache() +def _config(lowercase = True): + """ + Provides a dictionary for our settings.cfg. This has a couple categories... + + * manual.important (list) - configuration options considered to be important + * manual.summary.* (str) - summary descriptions of config options + + :param bool lowercase: uses lowercase keys if **True** to allow for case + insensitive lookups + """ + + config = stem.util.conf.Config() + config_path = os.path.join(os.path.dirname(__file__), 'settings.cfg') + + try: + config.load(config_path) + config_dict = dict([(key.lower() if lowercase else key, config.get_value(key)) for key in config.keys() if key.startswith('manual.summary.')]) + config_dict['manual.important'] = [name.lower() if lowercase else name for name in config.get_value('manual.important', [], multiple = True)] + return config_dict + except Exception as exc: + stem.util.log.warn("BUG: stem failed to load its internal manual information from '%s': %s" % (config_path, exc)) + return {} + + +def _manual_differences(previous_manual, new_manual): + """ + Provides a description of how two manuals differ. + """ + + lines = [] + + for attr in ('name', 'synopsis', 'description', 'commandline_options', 'signals', 'files', 'config_options'): + previous_attr = getattr(previous_manual, attr) + new_attr = getattr(new_manual, attr) + + if previous_attr != new_attr: + lines.append("* Manual's %s attribute changed\n" % attr) + + if attr in ('name', 'synopsis', 'description'): + lines.append(' Previously...\n\n%s\n' % previous_attr) + lines.append(' Updating to...\n\n%s' % new_attr) + elif attr == 'config_options': + for config_name, config_attr in new_attr.items(): + previous = previous_attr.get(config_name) + + if previous is None: + lines.append(' adding new config option => %s' % config_name) + elif config_attr != previous: + for attr in ('name', 'category', 'usage', 'summary', 'description'): + if getattr(config_attr, attr) != getattr(previous, attr): + lines.append(' modified %s (%s) => %s' % (config_name, attr, getattr(config_attr, attr))) + + for config_name in set(previous_attr.keys()).difference(new_attr.keys()): + lines.append(' removing config option => %s' % config_name) + else: + added_items = set(new_attr.items()).difference(previous_attr.items()) + removed_items = set(previous_attr.items()).difference(new_attr.items()) + + for added_item in added_items: + lines.append(' adding %s => %s' % added_item) + + for removed_item in removed_items: + lines.append(' removing %s => %s' % removed_item) + + lines.append('\n') + + return '\n'.join(lines) + + +def is_important(option): + """ + Indicates if a configuration option of particularly common importance or not. + + :param str option: tor configuration option to check + + :returns: **bool** that's **True** if this is an important option and + **False** otherwise + """ + + return option.lower() in _config()['manual.important'] + + +def download_man_page(path = None, file_handle = None, url = GITWEB_MANUAL_URL, timeout = 20): + """ + Downloads tor's latest man page from `gitweb.torproject.org + `_. This method is + both slow and unreliable - please see the warnings on + :func:`~stem.manual.Manual.from_remote`. + + :param str path: path to save tor's man page to + :param file file_handle: file handler to save tor's man page to + :param str url: url to download tor's asciidoc manual from + :param int timeout: seconds to wait before timing out the request + + :raises: **IOError** if unable to retrieve the manual + """ + + if not path and not file_handle: + raise ValueError("Either the path or file_handle we're saving to must be provided") + elif not stem.util.system.is_available('a2x'): + raise IOError('We require a2x from asciidoc to provide a man page') + + dirpath = tempfile.mkdtemp() + asciidoc_path = os.path.join(dirpath, 'tor.1.txt') + manual_path = os.path.join(dirpath, 'tor.1') + + try: + try: + with open(asciidoc_path, 'wb') as asciidoc_file: + request = urllib.urlopen(url, timeout = timeout) + shutil.copyfileobj(request, asciidoc_file) + except: + exc = sys.exc_info()[1] + raise IOError("Unable to download tor's manual from %s to %s: %s" % (url, asciidoc_path, exc)) + + try: + stem.util.system.call('a2x -f manpage %s' % asciidoc_path) + + if not os.path.exists(manual_path): + raise OSError('no man page was generated') + except stem.util.system.CallError as exc: + raise IOError("Unable to run '%s': %s" % (exc.command, exc.stderr)) + + if path: + try: + path_dir = os.path.dirname(path) + + if not os.path.exists(path_dir): + os.makedirs(path_dir) + + shutil.copyfile(manual_path, path) + except OSError as exc: + raise IOError(exc) + + if file_handle: + with open(manual_path, 'rb') as manual_file: + shutil.copyfileobj(manual_file, file_handle) + file_handle.flush() + finally: + shutil.rmtree(dirpath) + + +class Manual(object): + """ + Parsed tor man page. Tor makes no guarantees about its man page format so + this may not always be compatible. If not you can use the cached manual + information stored with Stem. + + This does not include every bit of information from the tor manual. For + instance, I've excluded the 'THE CONFIGURATION FILE FORMAT' section. If + there's a part you'd find useful then `file an issue + `_ and we can + add it. + + :var str name: brief description of the tor command + :var str synopsis: brief tor command usage + :var str description: general description of what tor does + + :var collections.OrderedDict commandline_options: mapping of commandline arguments to their descripton + :var collections.OrderedDict signals: mapping of signals tor accepts to their description + :var collections.OrderedDict files: mapping of file paths to their description + + :var collections.OrderedDict config_options: :class:`~stem.manual.ConfigOption` tuples for tor configuration options + + :var str man_commit: latest tor commit editing the man page when this + information was cached + :var str stem_commit: stem commit to cache this manual information + """ + + def __init__(self, name, synopsis, description, commandline_options, signals, files, config_options): + self.name = name + self.synopsis = synopsis + self.description = description + self.commandline_options = OrderedDict(commandline_options) + self.signals = OrderedDict(signals) + self.files = OrderedDict(files) + self.config_options = OrderedDict(config_options) + self.man_commit = None + self.stem_commit = None + self.schema = None + + @staticmethod + def from_cache(path = None): + """ + Provides manual information cached with Stem. Unlike + :func:`~stem.manual.Manual.from_man` and + :func:`~stem.manual.Manual.from_remote` this doesn't have any system + requirements, and is faster too. Only drawback is that this manual + content is only as up to date as the Stem release we're using. + + .. versionchanged:: 1.6.0 + Added support for sqlite cache. Support for + :class:`~stem.util.conf.Config` caches will be dropped in Stem 2.x. + + :param str path: cached manual content to read, if not provided this uses + the bundled manual information + + :returns: :class:`~stem.manual.Manual` with our bundled manual information + + :raises: + * **ImportError** if cache is sqlite and the sqlite3 module is + unavailable + * **IOError** if a **path** was provided and we were unable to read + it or the schema is out of date + """ + + # TODO: drop _from_config_cache() with stem 2.x + + if path is None: + path = CACHE_PATH + + if path is not None and path.endswith('.sqlite'): + return Manual._from_sqlite_cache(path) + else: + return Manual._from_config_cache(path) + + @staticmethod + def _from_sqlite_cache(path): + if not stem.prereq.is_sqlite_available(): + raise ImportError('Reading a sqlite cache requires the sqlite3 module') + + import sqlite3 + + if not os.path.exists(path): + raise IOError("%s doesn't exist" % path) + + with sqlite3.connect(path) as conn: + try: + schema = conn.execute('SELECT version FROM schema').fetchone()[0] + + if schema != SCHEMA_VERSION: + raise SchemaMismatch("Stem's current manual schema version is %s, but %s was version %s" % (SCHEMA_VERSION, path, schema), schema, (SCHEMA_VERSION,)) + + name, synopsis, description, man_commit, stem_commit = conn.execute('SELECT name, synopsis, description, man_commit, stem_commit FROM metadata').fetchone() + except sqlite3.OperationalError as exc: + raise IOError('Failed to read database metadata from %s: %s' % (path, exc)) + + commandline = dict(conn.execute('SELECT name, description FROM commandline').fetchall()) + signals = dict(conn.execute('SELECT name, description FROM signals').fetchall()) + files = dict(conn.execute('SELECT name, description FROM files').fetchall()) + + config_options = OrderedDict() + + for entry in conn.execute('SELECT name, category, usage, summary, description FROM torrc ORDER BY position').fetchall(): + option, category, usage, summary, option_description = entry + config_options[option] = ConfigOption(option, category, usage, summary, option_description) + + manual = Manual(name, synopsis, description, commandline, signals, files, config_options) + manual.man_commit = man_commit + manual.stem_commit = stem_commit + manual.schema = schema + + return manual + + @staticmethod + def _from_config_cache(path): + conf = stem.util.conf.Config() + conf.load(path, commenting = False) + + config_options = OrderedDict() + + for key in conf.keys(): + if key.startswith('config_options.'): + key = key.split('.')[1] + + if key not in config_options: + config_options[key] = ConfigOption( + conf.get('config_options.%s.name' % key, ''), + conf.get('config_options.%s.category' % key, ''), + conf.get('config_options.%s.usage' % key, ''), + conf.get('config_options.%s.summary' % key, ''), + conf.get('config_options.%s.description' % key, '') + ) + + manual = Manual( + conf.get('name', ''), + conf.get('synopsis', ''), + conf.get('description', ''), + conf.get('commandline_options', OrderedDict()), + conf.get('signals', OrderedDict()), + conf.get('files', OrderedDict()), + config_options, + ) + + manual.man_commit = conf.get('man_commit', None) + manual.stem_commit = conf.get('stem_commit', None) + + return manual + + @staticmethod + def from_man(man_path = 'tor'): + """ + Reads and parses a given man page. + + On OSX the man command doesn't have an '--encoding' argument so its results + may not quite match other platforms. For instance, it normalizes long + dashes into '--'. + + :param str man_path: path argument for 'man', for example you might want + '/path/to/tor/doc/tor.1' to read from tor's git repository + + :returns: :class:`~stem.manual.Manual` for the system's man page + + :raises: **IOError** if unable to retrieve the manual + """ + + man_cmd = 'man %s -P cat %s' % ('--encoding=ascii' if HAS_ENCODING_ARG else '', man_path) + + try: + man_output = stem.util.system.call(man_cmd, env = {'MANWIDTH': '10000000'}) + except OSError as exc: + raise IOError("Unable to run '%s': %s" % (man_cmd, exc)) + + categories, config_options = _get_categories(man_output), OrderedDict() + + for category_header, category_enum in CATEGORY_SECTIONS.items(): + _add_config_options(config_options, category_enum, categories.get(category_header, [])) + + for category in categories: + if category.endswith(' OPTIONS') and category not in CATEGORY_SECTIONS and category not in ('COMMAND-LINE OPTIONS', 'NON-PERSISTENT OPTIONS'): + _add_config_options(config_options, Category.UNKNOWN, categories.get(category, [])) + + return Manual( + _join_lines(categories.get('NAME', [])), + _join_lines(categories.get('SYNOPSIS', [])), + _join_lines(categories.get('DESCRIPTION', [])), + _get_indented_descriptions(categories.get('COMMAND-LINE OPTIONS', [])), + _get_indented_descriptions(categories.get('SIGNALS', [])), + _get_indented_descriptions(categories.get('FILES', [])), + config_options, + ) + + @staticmethod + def from_remote(timeout = 60): + """ + Reads and parses the latest tor man page `from gitweb.torproject.org + `_. Note that + while convenient, this reliance on GitWeb means you should alway call with + a fallback, such as... + + :: + + try: + manual = stem.manual.from_remote() + except IOError: + manual = stem.manual.from_cache() + + In addition to our GitWeb dependency this requires 'a2x' which is part of + `asciidoc `_ and... isn't quick. + Personally this takes ~7.41s, breaking down for me as follows... + + * 1.67s to download tor.1.txt + * 5.57s to convert the asciidoc to a man page + * 0.17s for stem to read and parse the manual + + :param int timeout: seconds to wait before timing out the request + + :returns: latest :class:`~stem.manual.Manual` available for tor + + :raises: **IOError** if unable to retrieve the manual + """ + + with tempfile.NamedTemporaryFile() as tmp: + download_man_page(file_handle = tmp, timeout = timeout) + return Manual.from_man(tmp.name) + + def save(self, path): + """ + Persists the manual content to a given location. + + .. versionchanged:: 1.6.0 + Added support for sqlite cache. Support for + :class:`~stem.util.conf.Config` caches will be dropped in Stem 2.x. + + :param str path: path to save our manual content to + + :raises: + * **ImportError** if saving as sqlite and the sqlite3 module is + unavailable + * **IOError** if unsuccessful + """ + + # TODO: drop _save_as_config() with stem 2.x + + if path.endswith('.sqlite'): + return self._save_as_sqlite(path) + else: + return self._save_as_config(path) + + def _save_as_sqlite(self, path): + if not stem.prereq.is_sqlite_available(): + raise ImportError('Saving a sqlite cache requires the sqlite3 module') + + import sqlite3 + tmp_path = path + '.new' + + if os.path.exists(tmp_path): + os.remove(tmp_path) + + with sqlite3.connect(tmp_path) as conn: + for cmd in SCHEMA: + conn.execute(cmd) + + conn.execute('INSERT INTO metadata(name, synopsis, description, man_commit, stem_commit) VALUES (?,?,?,?,?)', (self.name, self.synopsis, self.description, self.man_commit, self.stem_commit)) + + for k, v in self.commandline_options.items(): + conn.execute('INSERT INTO commandline(name, description) VALUES (?,?)', (k, v)) + + for k, v in self.signals.items(): + conn.execute('INSERT INTO signals(name, description) VALUES (?,?)', (k, v)) + + for k, v in self.files.items(): + conn.execute('INSERT INTO files(name, description) VALUES (?,?)', (k, v)) + + for i, v in enumerate(self.config_options.values()): + conn.execute('INSERT INTO torrc(key, name, category, usage, summary, description, position) VALUES (?,?,?,?,?,?,?)', (v.name.upper(), v.name, v.category, v.usage, v.summary, v.description, i)) + + if os.path.exists(path): + os.remove(path) + + os.rename(tmp_path, path) + + def _save_as_config(self, path): + conf = stem.util.conf.Config() + conf.set('name', self.name) + conf.set('synopsis', self.synopsis) + conf.set('description', self.description) + + if self.man_commit: + conf.set('man_commit', self.man_commit) + + if self.stem_commit: + conf.set('stem_commit', self.stem_commit) + + for k, v in self.commandline_options.items(): + conf.set('commandline_options', '%s => %s' % (k, v), overwrite = False) + + for k, v in self.signals.items(): + conf.set('signals', '%s => %s' % (k, v), overwrite = False) + + for k, v in self.files.items(): + conf.set('files', '%s => %s' % (k, v), overwrite = False) + + for k, v in self.config_options.items(): + conf.set('config_options.%s.category' % k, v.category) + conf.set('config_options.%s.name' % k, v.name) + conf.set('config_options.%s.usage' % k, v.usage) + conf.set('config_options.%s.summary' % k, v.summary) + conf.set('config_options.%s.description' % k, v.description) + + conf.save(path) + + def __hash__(self): + return stem.util._hash_attr(self, 'name', 'synopsis', 'description', 'commandline_options', 'signals', 'files', 'config_options', cache = True) + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, Manual) else False + + def __ne__(self, other): + return not self == other + + +def _get_categories(content): + """ + The man page is headers followed by an indented section. First pass gets + the mapping of category titles to their lines. + """ + + # skip header and footer lines + + if content and 'TOR(1)' in content[0]: + content = content[1:] + + if content and content[-1].startswith('Tor'): + content = content[:-1] + + categories = OrderedDict() + category, lines = None, [] + + for line in content: + # replace non-ascii characters + # + # \u2019 - smart single quote + # \u2014 - extra long dash + # \xb7 - centered dot + + char_for = chr if stem.prereq.is_python_3() else unichr + line = line.replace(char_for(0x2019), "'").replace(char_for(0x2014), '-').replace(char_for(0xb7), '*') + + if line and not line.startswith(' '): + if category: + if lines and lines[-1] == '': + lines = lines[:-1] # sections end with an extra empty line + + categories[category] = lines + + category, lines = line.strip(), [] + else: + if line.startswith(' '): + line = line[7:] # contents of a section have a seven space indentation + + lines.append(line) + + if category: + categories[category] = lines + + return categories + + +def _get_indented_descriptions(lines): + """ + Parses the commandline argument and signal sections. These are options + followed by an indented description. For example... + + :: + + -f FILE + Specify a new configuration file to contain further Tor configuration + options OR pass - to make Tor read its configuration from standard + input. (Default: /usr/local/etc/tor/torrc, or $HOME/.torrc if that file + is not found) + + There can be additional paragraphs not related to any particular argument but + ignoring those. + """ + + options, last_arg = OrderedDict(), None + + for line in lines: + if line and not line.startswith(' '): + options[line], last_arg = [], line + elif last_arg and line.startswith(' '): + options[last_arg].append(line[4:]) + + return dict([(arg, ' '.join(desc_lines)) for arg, desc_lines in options.items() if desc_lines]) + + +def _add_config_options(config_options, category, lines): + """ + Parses a section of tor configuration options. These have usage information, + followed by an indented description. For instance... + + :: + + ConnLimit NUM + The minimum number of file descriptors that must be available to the + Tor process before it will start. Tor will ask the OS for as many file + descriptors as the OS will allow (you can find this by "ulimit -H -n"). + If this number is less than ConnLimit, then Tor will refuse to start. + + + You probably don't need to adjust this. It has no effect on Windows + since that platform lacks getrlimit(). (Default: 1000) + """ + + last_option, usage, description = None, None, [] + + # Drop the section description. Each ends with a paragraph saying 'The + # following options...'. + + desc_paragraph_index = None + + for i, line in enumerate(lines): + if 'The following options' in line: + desc_paragraph_index = i + break + + if desc_paragraph_index is not None: + lines = lines[desc_paragraph_index:] # trim to the description paragrah + lines = lines[lines.index(''):] # drop the paragraph + + for line in lines: + if line and not line.startswith(' '): + if last_option: + summary = _config().get('manual.summary.%s' % last_option.lower(), '') + config_options[last_option] = ConfigOption(last_option, category, usage, summary, _join_lines(description).strip()) + + if ' ' in line: + last_option, usage = line.split(' ', 1) + else: + last_option, usage = line, '' + + description = [] + else: + if line.startswith(' '): + line = line[4:] + + description.append(line) + + if last_option: + summary = _config().get('manual.summary.%s' % last_option.lower(), '') + config_options[last_option] = ConfigOption(last_option, category, usage, summary, _join_lines(description).strip()) + + +def _join_lines(lines): + """ + Simple join, except we want empty lines to still provide a newline. + """ + + result = [] + + for line in lines: + if not line: + if result and result[-1] != '\n': + result.append('\n') + else: + result.append(line + '\n') + + return ''.join(result).strip() diff --git a/Shared/lib/python3.4/site-packages/stem/prereq.py b/Shared/lib/python3.4/site-packages/stem/prereq.py index 5ed87ef..ab2acba 100644 --- a/Shared/lib/python3.4/site-packages/stem/prereq.py +++ b/Shared/lib/python3.4/site-packages/stem/prereq.py @@ -1,34 +1,35 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ Checks for stem dependencies. We require python 2.6 or greater (including the -3.x series). Other requirements for complete functionality are... +3.x series), but note we'll be bumping our requirements to python 2.7 in stem +2.0. Other requirements for complete functionality are... -* pycrypto module +* cryptography module * validating descriptor signature integrity :: check_requirements - checks for minimum requirements for running stem - - is_python_27 - checks if python 2.7 or later is available is_python_3 - checks if python 3.0 or later is available - - is_crypto_available - checks if the pycrypto module is available + is_sqlite_available - checks if the sqlite3 module is available + is_crypto_available - checks if the cryptography module is available + is_zstd_available - checks if the zstd module is available + is_lzma_available - checks if the lzma module is available + is_mock_available - checks if the mock module is available """ +import functools import inspect +import platform import sys -try: - # added in python 3.2 - from functools import lru_cache -except ImportError: - from stem.util.lru_cache import lru_cache - -CRYPTO_UNAVAILABLE = "Unable to import the pycrypto module. Because of this we'll be unable to verify descriptor signature integrity. You can get pycrypto from: https://www.dlitz.net/software/pycrypto/" +CRYPTO_UNAVAILABLE = "Unable to import the cryptography module. Because of this we'll be unable to verify descriptor signature integrity. You can get cryptography from: https://pypi.python.org/pypi/cryptography" +ZSTD_UNAVAILABLE = 'ZSTD compression requires the zstandard module (https://pypi.python.org/pypi/zstandard)' +LZMA_UNAVAILABLE = 'LZMA compression requires the lzma module (https://docs.python.org/3/library/lzma.html)' +PYNACL_UNAVAILABLE = "Unable to import the pynacl module. Because of this we'll be unable to verify descriptor ed25519 certificate integrity. You can get pynacl from https://pypi.python.org/pypi/PyNaCl/" def check_requirements(): @@ -46,10 +47,26 @@ def check_requirements(): raise ImportError('stem requires python version 2.6 or greater') +def _is_python_26(): + """ + Checks if we're running python 2.6. This isn't for users as it'll be removed + in stem 2.0 (when python 2.6 support goes away). + + :returns: **True** if we're running python 2.6, **False** otherwise + """ + + major_version, minor_version = sys.version_info[0:2] + + return major_version == 2 and minor_version == 6 + + def is_python_27(): """ Checks if we're running python 2.7 or above (including the 3.x series). + .. deprecated:: 1.5.0 + Function lacks much utility and will be eventually removed. + :returns: **True** if we meet this requirement and **False** otherwise """ @@ -68,28 +85,103 @@ def is_python_3(): return sys.version_info[0] == 3 -@lru_cache() -def is_crypto_available(): +def is_pypy(): """ - Checks if the pycrypto functions we use are available. This is used for - verifying relay descriptor signatures. + Checks if we're running PyPy. - :returns: **True** if we can use pycrypto and **False** otherwise + .. versionadded:: 1.7.0 + + :returns: **True** if running pypy, **False** otherwise """ - from stem.util import log + return platform.python_implementation() == 'PyPy' + + +def is_sqlite_available(): + """ + Checks if the sqlite3 module is available. Usually this is built in, but some + platforms such as FreeBSD and Gentoo exclude it by default. + + .. versionadded:: 1.6.0 + + :returns: **True** if we can use the sqlite3 module and **False** otherwise + """ try: - from Crypto.PublicKey import RSA - from Crypto.Util import asn1 - from Crypto.Util.number import long_to_bytes + import sqlite3 return True except ImportError: + return False + + +def is_crypto_available(): + """ + Checks if the cryptography functions we use are available. This is used for + verifying relay descriptor signatures. + + :returns: **True** if we can use the cryptography module and **False** + otherwise + """ + + try: + from cryptography.utils import int_from_bytes, int_to_bytes + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives.asymmetric import rsa + from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes + from cryptography.hazmat.primitives.serialization import load_der_public_key + + if not hasattr(rsa.RSAPrivateKey, 'sign'): + raise ImportError() + + return True + except ImportError: + from stem.util import log log.log_once('stem.prereq.is_crypto_available', log.INFO, CRYPTO_UNAVAILABLE) return False -@lru_cache() +def is_zstd_available(): + """ + Checks if the `zstd module `_ is + available. + + .. versionadded:: 1.7.0 + + :returns: **True** if we can use the zstd module and **False** otherwise + """ + + try: + # Unfortunately the zstandard module uses the same namespace as another + # zstd module (https://pypi.python.org/pypi/zstd), so we need to + # differentiate them. + + import zstd + return hasattr(zstd, 'ZstdDecompressor') + except ImportError: + from stem.util import log + log.log_once('stem.prereq.is_zstd_available', log.INFO, ZSTD_UNAVAILABLE) + return False + + +def is_lzma_available(): + """ + Checks if the `lzma module `_ is + available. This was added as a builtin in Python 3.3. + + .. versionadded:: 1.7.0 + + :returns: **True** if we can use the lzma module and **False** otherwise + """ + + try: + import lzma + return True + except ImportError: + from stem.util import log + log.log_once('stem.prereq.is_lzma_available', log.INFO, LZMA_UNAVAILABLE) + return False + + def is_mock_available(): """ Checks if the mock module is available. In python 3.3 and up it is a builtin @@ -130,3 +222,37 @@ def is_mock_available(): return True except ImportError: return False + + +def _is_lru_cache_available(): + """ + Functools added lru_cache to the standard library in Python 3.2. Prior to + this using a bundled implementation. We're also using this with Python 3.5 + due to a buggy implementation. (:trac:`26412`) + """ + + major_version, minor_version = sys.version_info[0:2] + + if major_version == 3 and minor_version == 5: + return False + else: + return hasattr(functools, 'lru_cache') + + +def _is_pynacl_available(): + """ + Checks if the pynacl functions we use are available. This is used for + verifying ed25519 certificates in relay descriptor signatures. + + :returns: **True** if we can use pynacl and **False** otherwise + """ + + from stem.util import log + + try: + from nacl import encoding + from nacl import signing + return True + except ImportError: + log.log_once('stem.prereq._is_pynacl_available', log.INFO, PYNACL_UNAVAILABLE) + return False diff --git a/Shared/lib/python3.4/site-packages/stem/process.py b/Shared/lib/python3.4/site-packages/stem/process.py index b8b1f92..e2b36ec 100644 --- a/Shared/lib/python3.4/site-packages/stem/process.py +++ b/Shared/lib/python3.4/site-packages/stem/process.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -23,6 +23,7 @@ import re import signal import subprocess import tempfile +import threading import stem.prereq import stem.util.str_tools @@ -33,7 +34,7 @@ NO_TORRC = '' DEFAULT_INIT_TIMEOUT = 90 -def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, stdin = None): +def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, close_output = True, stdin = None): """ Initializes a tor process. This blocks until initialization completes or we error out. @@ -47,8 +48,14 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce default, but if you have a 'Log' entry in your torrc then you'll also need 'Log NOTICE stdout'. - Note: The timeout argument does not work on Windows, and relies on the global - state of the signal module. + Note: The timeout argument does not work on Windows or when outside the + main thread, and relies on the global state of the signal module. + + .. versionchanged:: 1.6.0 + Allowing the timeout argument to be a float. + + .. versionchanged:: 1.7.0 + Added the **close_output** argument. :param str tor_cmd: command for starting tor :param list args: additional arguments for tor @@ -62,6 +69,8 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce :param bool take_ownership: asserts ownership over the tor process so it aborts if this python process terminates or a :class:`~stem.control.Controller` we establish to it disconnects + :param bool close_output: closes tor's stdout and stderr streams when + bootstrapping is complete if true :param str stdin: content to provide on stdin :returns: **subprocess.Popen** instance for the tor subprocess @@ -71,6 +80,14 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce """ if stem.util.system.is_windows(): + if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT: + raise OSError('You cannot launch tor with a timeout on Windows') + + timeout = None + elif threading.current_thread().__class__.__name__ != '_MainThread': + if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT: + raise OSError('Launching tor with a timeout can only be done in the main thread') + timeout = None # sanity check that we got a tor binary @@ -105,27 +122,26 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce if take_ownership: runtime_args += ['__OwningControllerProcess', str(os.getpid())] - tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE) - - if stdin: - tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin)) - tor_process.stdin.close() - - if timeout: - def timeout_handler(signum, frame): - # terminates the uninitialized tor process and raise on timeout - - tor_process.kill() - raise OSError('reached a %i second timeout without success' % timeout) - - signal.signal(signal.SIGALRM, timeout_handler) - signal.alarm(timeout) - - bootstrap_line = re.compile('Bootstrapped ([0-9]+)%: ') - problem_line = re.compile('\[(warn|err)\] (.*)$') - last_problem = 'Timed out' + tor_process = None try: + tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE) + + if stdin: + tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin)) + tor_process.stdin.close() + + if timeout: + def timeout_handler(signum, frame): + raise OSError('reached a %i second timeout without success' % timeout) + + signal.signal(signal.SIGALRM, timeout_handler) + signal.setitimer(signal.ITIMER_REAL, timeout) + + bootstrap_line = re.compile('Bootstrapped ([0-9]+)%: ') + problem_line = re.compile('\[(warn|err)\] (.*)$') + last_problem = 'Timed out' + while True: # Tor's stdout will be read as ASCII bytes. This is fine for python 2, but # in python 3 that means it'll mismatch with other operations (for instance @@ -139,7 +155,6 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce # this will provide empty results if the process is terminated if not init_line: - tor_process.kill() # ... but best make sure raise OSError('Process terminated: %s' % last_problem) # provide the caller with the initialization message if they want it @@ -162,12 +177,22 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce msg = msg.split(': ')[-1].strip() last_problem = msg + except: + if tor_process: + tor_process.kill() # don't leave a lingering process + tor_process.wait() + + raise finally: if timeout: signal.alarm(0) # stop alarm - tor_process.stdout.close() - tor_process.stderr.close() + if tor_process and close_output: + if tor_process.stdout: + tor_process.stdout.close() + + if tor_process.stderr: + tor_process.stderr.close() if temp_file: try: @@ -176,7 +201,7 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce pass -def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False): +def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, close_output = True): """ Initializes a tor process, like :func:`~stem.process.launch_tor`, but with a customized configuration. This writes a temporary torrc to disk, launches @@ -196,6 +221,9 @@ def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, in }, ) + .. versionchanged:: 1.7.0 + Added the **close_output** argument. + :param dict config: configuration options, such as "{'ControlPort': '9051'}", values can either be a **str** or **list of str** if for multiple values :param str tor_cmd: command for starting tor @@ -208,6 +236,8 @@ def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, in :param bool take_ownership: asserts ownership over the tor process so it aborts if this python process terminates or a :class:`~stem.control.Controller` we establish to it disconnects + :param bool close_output: closes tor's stdout and stderr streams when + bootstrapping is complete if true :returns: **subprocess.Popen** instance for the tor subprocess @@ -252,7 +282,7 @@ def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, in config_str += '%s %s\n' % (key, value) if use_stdin: - return launch_tor(tor_cmd, ['-f', '-'], None, completion_percent, init_msg_handler, timeout, take_ownership, stdin = config_str) + return launch_tor(tor_cmd, ['-f', '-'], None, completion_percent, init_msg_handler, timeout, take_ownership, close_output, stdin = config_str) else: torrc_descriptor, torrc_path = tempfile.mkstemp(prefix = 'torrc-', text = True) diff --git a/Shared/lib/python3.4/site-packages/stem/response/__init__.py b/Shared/lib/python3.4/site-packages/stem/response/__init__.py index df534a9..96436b4 100644 --- a/Shared/lib/python3.4/site-packages/stem/response/__init__.py +++ b/Shared/lib/python3.4/site-packages/stem/response/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -16,9 +16,7 @@ Parses replies from the control socket. |- from_str - provides a ControlMessage for the given string |- is_ok - response had a 250 status |- content - provides the parsed message content - |- raw_content - unparsed socket data - |- __str__ - content stripped of protocol formatting - +- __iter__ - ControlLine entries for the content of the message + +- raw_content - unparsed socket data ControlLine - String subclass with methods for parsing controller responses. |- remainder - provides the unparsed content @@ -30,6 +28,15 @@ Parses replies from the control socket. +- pop_mapping - removes and returns the next entry as a KEY=VALUE mapping """ +import codecs +import io +import re +import threading + +import stem.socket +import stem.util +import stem.util.str_tools + __all__ = [ 'add_onion', 'events', @@ -43,28 +50,8 @@ __all__ = [ 'SingleLineResponse', ] -import re -import threading - -try: - from StringIO import StringIO -except ImportError: - from io import StringIO - -import stem.socket - KEY_ARG = re.compile('^(\S+)=') -# Escape sequences from the 'esc_for_log' function of tor's 'common/util.c'. -# It's hard to tell what controller functions use this in practice, but direct -# users are... -# - 'COOKIEFILE' field of PROTOCOLINFO responses -# - logged messages about bugs -# - the 'getinfo_helper_listeners' function of control.c - -CONTROL_ESCAPES = {r'\\': '\\', r'\"': '\"', r'\'': '\'', - r'\r': '\r', r'\n': '\n', r'\t': '\t'} - def convert(response_type, message, **kwargs): """ @@ -76,12 +63,13 @@ def convert(response_type, message, **kwargs): =================== ===== response_type Class =================== ===== - **GETINFO** :class:`stem.response.getinfo.GetInfoResponse` - **GETCONF** :class:`stem.response.getconf.GetConfResponse` - **MAPADDRESS** :class:`stem.response.mapaddress.MapAddressResponse` - **EVENT** :class:`stem.response.events.Event` subclass - **PROTOCOLINFO** :class:`stem.response.protocolinfo.ProtocolInfoResponse` + **ADD_ONION** :class:`stem.response.add_onion.AddOnionResponse` **AUTHCHALLENGE** :class:`stem.response.authchallenge.AuthChallengeResponse` + **EVENT** :class:`stem.response.events.Event` subclass + **GETCONF** :class:`stem.response.getconf.GetConfResponse` + **GETINFO** :class:`stem.response.getinfo.GetInfoResponse` + **MAPADDRESS** :class:`stem.response.mapaddress.MapAddressResponse` + **PROTOCOLINFO** :class:`stem.response.protocolinfo.ProtocolInfoResponse` **SINGLELINE** :class:`stem.response.SingleLineResponse` =================== ===== @@ -119,11 +107,11 @@ def convert(response_type, message, **kwargs): 'ADD_ONION': stem.response.add_onion.AddOnionResponse, 'AUTHCHALLENGE': stem.response.authchallenge.AuthChallengeResponse, 'EVENT': stem.response.events.Event, - 'GETINFO': stem.response.getinfo.GetInfoResponse, 'GETCONF': stem.response.getconf.GetConfResponse, + 'GETINFO': stem.response.getinfo.GetInfoResponse, 'MAPADDRESS': stem.response.mapaddress.MapAddressResponse, - 'SINGLELINE': SingleLineResponse, 'PROTOCOLINFO': stem.response.protocolinfo.ProtocolInfoResponse, + 'SINGLELINE': SingleLineResponse, } try: @@ -140,23 +128,37 @@ class ControlMessage(object): Message from the control socket. This is iterable and can be stringified for individual message components stripped of protocol formatting. Messages are never empty. + + .. versionchanged:: 1.7.0 + Implemented equality and hashing. """ @staticmethod - def from_str(content, msg_type = None, **kwargs): + def from_str(content, msg_type = None, normalize = False, **kwargs): """ Provides a ControlMessage for the given content. .. versionadded:: 1.1.0 + .. versionchanged:: 1.6.0 + Added the normalize argument. + :param str content: message to construct the message from :param str msg_type: type of tor reply to parse the content as + :param bool normalize: ensures expected carriage return and ending newline + are present :param kwargs: optional keyword arguments to be passed to the parser method :returns: stem.response.ControlMessage instance """ - msg = stem.socket.recv_message(StringIO(content)) + if normalize: + if not content.endswith('\n'): + content += '\n' + + content = re.sub('([\r]?)\n', '\r\n', content) + + msg = stem.socket.recv_message(io.BytesIO(stem.util.str_tools._to_bytes(content))) if msg_type is not None: convert(msg_type, msg, **kwargs) @@ -169,6 +171,8 @@ class ControlMessage(object): self._parsed_content = parsed_content self._raw_content = raw_content + self._str = None + self._hash = stem.util._hash_attr(self, '_raw_content') def is_ok(self): """ @@ -245,7 +249,10 @@ class ControlMessage(object): formatting. """ - return '\n'.join(list(self)) + if self._str is None: + self._str = '\n'.join(list(self)) + + return self._str def __iter__(self): """ @@ -295,6 +302,15 @@ class ControlMessage(object): return ControlLine(content) + def __hash__(self): + return self._hash + + def __eq__(self, other): + return hash(self) == hash(other) if isinstance(other, ControlMessage) else False + + def __ne__(self, other): + return not self == other + class ControlLine(str): """ @@ -336,7 +352,7 @@ class ControlLine(str): """ Checks if our next entry is a quoted value or not. - :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + :param bool escaped: unescapes the string :returns: **True** if the next entry can be parsed as a quoted value, **False** otherwise """ @@ -350,7 +366,7 @@ class ControlLine(str): :param str key: checks that the key matches this value, skipping the check if **None** :param bool quoted: checks that the mapping is to a quoted value - :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + :param bool escaped: unescapes the string :returns: **True** if the next entry can be parsed as a key=value mapping, **False** otherwise @@ -408,7 +424,7 @@ class ControlLine(str): "this has a \\" and \\\\ in it" :param bool quoted: parses the next entry as a quoted value, removing the quotes - :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + :param bool escaped: unescapes the string :returns: **str** of the next space separated entry @@ -418,17 +434,21 @@ class ControlLine(str): """ with self._remainder_lock: - next_entry, remainder = _parse_entry(self._remainder, quoted, escaped) + next_entry, remainder = _parse_entry(self._remainder, quoted, escaped, False) self._remainder = remainder return next_entry - def pop_mapping(self, quoted = False, escaped = False): + def pop_mapping(self, quoted = False, escaped = False, get_bytes = False): """ Parses the next space separated entry as a KEY=VALUE mapping, removing it and the space from our remaining content. + .. versionchanged:: 1.6.0 + Added the get_bytes argument. + :param bool quoted: parses the value as being quoted, removing the quotes - :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + :param bool escaped: unescapes the string + :param bool get_bytes: provides **bytes** for the **value** rather than a **str** :returns: **tuple** of the form (key, value) @@ -450,18 +470,18 @@ class ControlLine(str): key = key_match.groups()[0] remainder = self._remainder[key_match.end():] - next_entry, remainder = _parse_entry(remainder, quoted, escaped) + next_entry, remainder = _parse_entry(remainder, quoted, escaped, get_bytes) self._remainder = remainder return (key, next_entry) -def _parse_entry(line, quoted, escaped): +def _parse_entry(line, quoted, escaped, get_bytes): """ Parses the next entry from the given space separated content. :param str line: content to be parsed :param bool quoted: parses the next entry as a quoted value, removing the quotes - :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + :param bool escaped: unescapes the string :returns: **tuple** of the form (entry, remainder) @@ -491,7 +511,26 @@ def _parse_entry(line, quoted, escaped): next_entry, remainder = remainder, '' if escaped: - next_entry = _unescape(next_entry) + # Tor does escaping in its 'esc_for_log' function of 'common/util.c'. It's + # hard to tell what controller functions use this in practice, but direct + # users are... + # + # * 'COOKIEFILE' field of PROTOCOLINFO responses + # * logged messages about bugs + # * the 'getinfo_helper_listeners' function of control.c + # + # Ideally we'd use "next_entry.decode('string_escape')" but it was removed + # in python 3.x and 'unicode_escape' isn't quite the same... + # + # https://stackoverflow.com/questions/14820429/how-do-i-decodestring-escape-in-python3 + + next_entry = codecs.escape_decode(next_entry)[0] + + if stem.prereq.is_python_3() and not get_bytes: + next_entry = stem.util.str_tools._to_unicode(next_entry) # normalize back to str + + if get_bytes: + next_entry = stem.util.str_tools._to_bytes(next_entry) return (next_entry, remainder.lstrip()) @@ -501,7 +540,7 @@ def _get_quote_indices(line, escaped): Provides the indices of the next two quotes in the given content. :param str line: content to be parsed - :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + :param bool escaped: unescapes the string :returns: **tuple** of two ints, indices being -1 if a quote doesn't exist """ @@ -522,34 +561,6 @@ def _get_quote_indices(line, escaped): return tuple(indices) -def _unescape(entry): - # Unescapes the given string with the mappings in CONTROL_ESCAPES. - # - # This can't be a simple series of str.replace() calls because replacements - # need to be excluded from consideration for further unescaping. For - # instance, '\\t' should be converted to '\t' rather than a tab. - - def _pop_with_unescape(entry): - # Pop either the first character or the escape sequence conversion the - # entry starts with. This provides a tuple of... - # - # (unescaped prefix, remaining entry) - - for esc_sequence, replacement in CONTROL_ESCAPES.items(): - if entry.startswith(esc_sequence): - return (replacement, entry[len(esc_sequence):]) - - return (entry[0], entry[1:]) - - result = [] - - while entry: - prefix, entry = _pop_with_unescape(entry) - result.append(prefix) - - return ''.join(result) - - class SingleLineResponse(ControlMessage): """ Reply to a request that performs an action rather than querying data. These diff --git a/Shared/lib/python3.4/site-packages/stem/response/add_onion.py b/Shared/lib/python3.4/site-packages/stem/response/add_onion.py index 1472668..60e77ae 100644 --- a/Shared/lib/python3.4/site-packages/stem/response/add_onion.py +++ b/Shared/lib/python3.4/site-packages/stem/response/add_onion.py @@ -1,4 +1,4 @@ -# Copyright 2015, Damian Johnson and The Tor Project +# Copyright 2015-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information import stem.response @@ -12,17 +12,20 @@ class AddOnionResponse(stem.response.ControlMessage): :var str private_key: base64 encoded hidden service private key :var str private_key_type: crypto used to generate the hidden service private key (such as RSA1024) + :var dict client_auth: newly generated client credentials the service accepts """ def _parse_message(self): # Example: # 250-ServiceID=gfzprpioee3hoppz # 250-PrivateKey=RSA1024:MIICXgIBAAKBgQDZvYVxv... + # 250-ClientAuth=bob:l4BT016McqV2Oail+Bwe6w # 250 OK self.service_id = None self.private_key = None self.private_key_type = None + self.client_auth = {} if not self.is_ok(): raise stem.ProtocolError("ADD_ONION response didn't have an OK status: %s" % self) @@ -41,3 +44,9 @@ class AddOnionResponse(stem.response.ControlMessage): raise stem.ProtocolError("ADD_ONION PrivateKey lines should be of the form 'PrivateKey=[type]:[key]: %s" % self) self.private_key_type, self.private_key = value.split(':', 1) + elif key == 'ClientAuth': + if ':' not in value: + raise stem.ProtocolError("ADD_ONION ClientAuth lines should be of the form 'ClientAuth=[username]:[credential]: %s" % self) + + username, credential = value.split(':', 1) + self.client_auth[username] = credential diff --git a/Shared/lib/python3.4/site-packages/stem/response/authchallenge.py b/Shared/lib/python3.4/site-packages/stem/response/authchallenge.py index 60f3997..4364798 100644 --- a/Shared/lib/python3.4/site-packages/stem/response/authchallenge.py +++ b/Shared/lib/python3.4/site-packages/stem/response/authchallenge.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information import binascii @@ -41,7 +41,7 @@ class AuthChallengeResponse(stem.response.ControlMessage): if not stem.util.tor_tools.is_hex_digits(value, 64): raise stem.ProtocolError('SERVERHASH has an invalid value: %s' % value) - self.server_hash = binascii.a2b_hex(stem.util.str_tools._to_bytes(value)) + self.server_hash = binascii.unhexlify(stem.util.str_tools._to_bytes(value)) else: raise stem.ProtocolError('Missing SERVERHASH mapping: %s' % line) @@ -51,6 +51,6 @@ class AuthChallengeResponse(stem.response.ControlMessage): if not stem.util.tor_tools.is_hex_digits(value, 64): raise stem.ProtocolError('SERVERNONCE has an invalid value: %s' % value) - self.server_nonce = binascii.a2b_hex(stem.util.str_tools._to_bytes(value)) + self.server_nonce = binascii.unhexlify(stem.util.str_tools._to_bytes(value)) else: raise stem.ProtocolError('Missing SERVERNONCE mapping: %s' % line) diff --git a/Shared/lib/python3.4/site-packages/stem/response/events.py b/Shared/lib/python3.4/site-packages/stem/response/events.py index 9c38649..64ed250 100644 --- a/Shared/lib/python3.4/site-packages/stem/response/events.py +++ b/Shared/lib/python3.4/site-packages/stem/response/events.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information import io @@ -8,10 +8,11 @@ import time import stem import stem.control import stem.descriptor.router_status_entry +import stem.prereq import stem.response +import stem.util import stem.version -from stem import str_type, int_type from stem.util import connection, log, str_tools, tor_tools # Matches keyword=value arguments. This can't be a simple "(.*)=(.*)" pattern @@ -21,6 +22,11 @@ from stem.util import connection, log, str_tools, tor_tools KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)=(\S*)$') QUOTED_KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)="(.*)"$') CELL_TYPE = re.compile('^[a-z0-9_]+$') +PARSE_NEWCONSENSUS_EVENTS = True + +# TODO: We can remove the following when we drop python2.6 support. + +INT_TYPE = int if stem.prereq.is_python_3() else long class Event(stem.response.ControlMessage): @@ -65,6 +71,9 @@ class Event(stem.response.ControlMessage): self._parse() + def __hash__(self): + return stem.util._hash_attr(self, 'arrived_at', parent = stem.response.ControlMessage, cache = True) + def _parse_standard_attr(self): """ Most events are of the form... @@ -126,6 +135,25 @@ class Event(stem.response.ControlMessage): for controller_attr_name, attr_name in self._KEYWORD_ARGS.items(): setattr(self, attr_name, self.keyword_args.get(controller_attr_name)) + def _iso_timestamp(self, timestamp): + """ + Parses an iso timestamp (ISOTime2Frac in the control-spec). + + :param str timestamp: timestamp to parse + + :returns: **datetime** with the parsed timestamp + + :raises: :class:`stem.ProtocolError` if timestamp is malformed + """ + + if timestamp is None: + return None + + try: + return str_tools._parse_iso_timestamp(timestamp) + except ValueError as exc: + raise stem.ProtocolError('Unable to parse timestamp (%s): %s' % (exc, self)) + # method overwritten by our subclasses for special handling that they do def _parse(self): pass @@ -142,7 +170,7 @@ class Event(stem.response.ControlMessage): attr_values = getattr(self, attr) if attr_values: - if isinstance(attr_values, (bytes, str_type)): + if stem.util._is_str(attr_values): attr_values = [attr_values] for value in attr_values: @@ -163,7 +191,7 @@ class AddrMapEvent(Event): Added the cached attribute. :var str hostname: address being resolved - :var str destination: destionation of the resolution, this is usually an ip, + :var str destination: destination of the resolution, this is usually an ip, but could be a hostname if TrackHostExits is enabled or **NONE** if the resolution failed :var datetime expiry: expiration time of the resolution in local time @@ -212,7 +240,11 @@ class AuthDirNewDescEvent(Event): descriptors. The descriptor type contained within this event is unspecified so the descriptor contents are left unparsed. - The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha. + The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha and + removed in 0.3.2.1-alpha. (:spec:`6e887ba`) + + .. deprecated:: 1.6.0 + Tor dropped this event as of version 0.3.2.1. (:spec:`6e887ba`) :var stem.AuthDescriptorAction action: what is being done with the descriptor :var str message: explanation of why we chose this action @@ -245,8 +277,8 @@ class BandwidthEvent(Event): The BW event was one of the first Control Protocol V1 events and was introduced in tor version 0.1.1.1-alpha. - :var long read: bytes received by tor that second - :var long written: bytes sent by tor that second + :var int read: bytes received by tor that second + :var int written: bytes sent by tor that second """ _POSITIONAL_ARGS = ('read', 'written') @@ -259,8 +291,8 @@ class BandwidthEvent(Event): elif not self.read.isdigit() or not self.written.isdigit(): raise stem.ProtocolError("A BW event's bytes sent and received should be a positive numeric value, received: %s" % self) - self.read = int_type(self.read) - self.written = int_type(self.written) + self.read = INT_TYPE(self.read) + self.written = INT_TYPE(self.written) class BuildTimeoutSetEvent(Event): @@ -365,16 +397,11 @@ class CircuitEvent(Event): def _parse(self): self.path = tuple(stem.control._parse_circ_path(self.path)) + self.created = self._iso_timestamp(self.created) if self.build_flags is not None: self.build_flags = tuple(self.build_flags.split(',')) - if self.created is not None: - try: - self.created = str_tools._parse_iso_timestamp(self.created) - except ValueError as exc: - raise stem.ProtocolError('Unable to parse create date (%s): %s' % (exc, self)) - if not tor_tools.is_valid_circuit_id(self.id): raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) @@ -386,35 +413,15 @@ class CircuitEvent(Event): self._log_if_unrecognized('remote_reason', stem.CircClosureReason) def _compare(self, other, method): + # sorting circuit events by their identifier + if not isinstance(other, CircuitEvent): return False - for attr in ('id', 'status', 'path', 'build_flags', 'purpose', 'hs_state', 'rend_query', 'created', 'reason', 'remote_reason', 'socks_username', 'socks_port'): - my_attr = getattr(self, attr) - other_attr = getattr(other, attr) + my_id = getattr(self, 'id') + their_id = getattr(other, 'id') - # Our id attribute is technically a string, but Tor conventionally uses - # ints. Attempt to handle as ints if that's the case so we get numeric - # ordering. - - if attr == 'id' and my_attr and other_attr: - if my_attr.isdigit() and other_attr.isdigit(): - my_attr = int(my_attr) - other_attr = int(other_attr) - - if my_attr is None: - my_attr = '' - - if other_attr is None: - other_attr = '' - - if my_attr != other_attr: - return method(my_attr, other_attr) - - return True - - def __eq__(self, other): - return self._compare(other, lambda s, o: s == o) + return method(my_id, their_id) if my_id != their_id else method(hash(self), hash(other)) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) @@ -458,16 +465,11 @@ class CircMinorEvent(Event): def _parse(self): self.path = tuple(stem.control._parse_circ_path(self.path)) + self.created = self._iso_timestamp(self.created) if self.build_flags is not None: self.build_flags = tuple(self.build_flags.split(',')) - if self.created is not None: - try: - self.created = str_tools._parse_iso_timestamp(self.created) - except ValueError as exc: - raise stem.ProtocolError('Unable to parse create date (%s): %s' % (exc, self)) - if not tor_tools.is_valid_circuit_id(self.id): raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) @@ -545,15 +547,26 @@ class ConfChangedEvent(Event): The CONF_CHANGED event was introduced in tor version 0.2.3.3-alpha. - :var dict config: mapping of configuration options to their new values - (**None** if the option is being unset) + .. deprecated:: 1.7.0 + Deprecated the *config* attribute. Some tor configuration options (like + ExitPolicy) can have multiple values, so a simple 'str => str' mapping + meant that we only provided the last. + + .. versionchanged:: 1.7.0 + Added the changed and unset attributes. + + :var dict changed: mapping of configuration options to a list of their new + values + :var list unset: configuration options that have been unset """ _SKIP_PARSING = True _VERSION_ADDED = stem.version.Requirement.EVENT_CONF_CHANGED def _parse(self): - self.config = {} + self.changed = {} + self.unset = [] + self.config = {} # TODO: remove in stem 2.0 # Skip first and last line since they're the header and footer. For # instance... @@ -567,8 +580,10 @@ class ConfChangedEvent(Event): for line in str(self).splitlines()[1:-1]: if '=' in line: key, value = line.split('=', 1) + self.changed.setdefault(key, []).append(value) else: key, value = line, None + self.unset.append(key) self.config[key] = value @@ -630,6 +645,12 @@ class HSDescEvent(Event): .. versionchanged:: 1.3.0 Added the reason attribute. + .. versionchanged:: 1.5.0 + Added the replica attribute. + + .. versionchanged:: 1.7.0 + Added the index attribute. + :var stem.HSDescAction action: what is happening with the descriptor :var str address: hidden service address :var stem.HSAuth authentication: service's authentication method @@ -638,21 +659,30 @@ class HSDescEvent(Event): :var str directory_nickname: hidden service directory's nickname if it was provided :var str descriptor_id: descriptor identifier :var stem.HSDescReason reason: reason the descriptor failed to be fetched + :var int replica: replica number the descriptor involves + :var str index: computed index of the HSDir the descriptor was uploaded to or fetched from """ _VERSION_ADDED = stem.version.Requirement.EVENT_HS_DESC _POSITIONAL_ARGS = ('action', 'address', 'authentication', 'directory', 'descriptor_id') - _KEYWORD_ARGS = {'REASON': 'reason'} + _KEYWORD_ARGS = {'REASON': 'reason', 'REPLICA': 'replica', 'HSDIR_INDEX': 'index'} def _parse(self): self.directory_fingerprint = None self.directory_nickname = None - try: - self.directory_fingerprint, self.directory_nickname = \ - stem.control._parse_circ_entry(self.directory) - except stem.ProtocolError: - raise stem.ProtocolError("HS_DESC's directory doesn't match a ServerSpec: %s" % self) + if self.directory != 'UNKNOWN': + try: + self.directory_fingerprint, self.directory_nickname = \ + stem.control._parse_circ_entry(self.directory) + except stem.ProtocolError: + raise stem.ProtocolError("HS_DESC's directory doesn't match a ServerSpec: %s" % self) + + if self.replica is not None: + if not self.replica.isdigit(): + raise stem.ProtocolError('HS_DESC event got a non-numeric replica count (%s): %s' % (self.replica, self)) + + self.replica = int(self.replica) self._log_if_unrecognized('action', stem.HSDescAction) self._log_if_unrecognized('authentication', stem.HSAuth) @@ -744,11 +774,27 @@ class NetworkStatusEvent(Event): self.desc = list(stem.descriptor.router_status_entry._parse_file( io.BytesIO(str_tools._to_bytes(content)), - True, + False, entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3, )) +class NetworkLivenessEvent(Event): + """ + Event for when the network becomes reachable or unreachable. + + The NETWORK_LIVENESS event was introduced in tor version 0.2.7.2-alpha. + + .. versionadded:: 1.5.0 + + :var str status: status of the network ('UP', 'DOWN', or possibly other + statuses in the future) + """ + + _VERSION_ADDED = stem.version.Requirement.EVENT_NETWORK_LIVENESS + _POSITIONAL_ARGS = ('status',) + + class NewConsensusEvent(Event): """ Event for when we have a new consensus. This is similar to @@ -758,6 +804,19 @@ class NewConsensusEvent(Event): The NEWCONSENSUS event was introduced in tor version 0.2.1.13-alpha. + .. versionchanged:: 1.6.0 + Added the consensus_content attribute. + + .. deprecated:: 1.6.0 + In Stem 2.0 we'll remove the desc attribute, so this event only provides + the unparsed consensus. Callers can then parse it if they'd like. To drop + parsing before then you can set... + + :: + + stem.response.events.PARSE_NEWCONSENSUS_EVENTS = False + + :var str consensus_content: consensus content :var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors """ @@ -765,16 +824,19 @@ class NewConsensusEvent(Event): _VERSION_ADDED = stem.version.Requirement.EVENT_NEWCONSENSUS def _parse(self): - content = str(self).lstrip('NEWCONSENSUS\n').rstrip('\nOK') + self.consensus_content = str(self).lstrip('NEWCONSENSUS\n').rstrip('\nOK') # TODO: For stem 2.0.0 consider changing 'desc' to 'descriptors' to match # our other events. - self.desc = list(stem.descriptor.router_status_entry._parse_file( - io.BytesIO(str_tools._to_bytes(content)), - True, - entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3, - )) + if PARSE_NEWCONSENSUS_EVENTS: + self.desc = list(stem.descriptor.router_status_entry._parse_file( + io.BytesIO(str_tools._to_bytes(self.consensus_content)), + False, + entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3, + )) + else: + self.desc = None class NewDescEvent(Event): @@ -846,7 +908,7 @@ class ORConnEvent(Event): if ':' not in self.endpoint: raise stem.ProtocolError("ORCONN endpoint is neither a relay nor 'address:port': %s" % self) - address, port = self.endpoint.split(':', 1) + address, port = self.endpoint.rsplit(':', 1) if not connection.is_valid_port(port): raise stem.ProtocolError("ORCONN's endpoint location's port is invalid: %s" % self) @@ -993,7 +1055,7 @@ class StreamEvent(Event): if ':' not in self.source_addr: raise stem.ProtocolError("Source location must be of the form 'address:port': %s" % self) - address, port = self.source_addr.split(':', 1) + address, port = self.source_addr.rsplit(':', 1) if not connection.is_valid_port(port, allow_zero = True): raise stem.ProtocolError("Source location's port is invalid: %s" % self) @@ -1018,12 +1080,16 @@ class StreamBwEvent(Event): The STREAM_BW event was introduced in tor version 0.1.2.8-beta. + .. versionchanged:: 1.6.0 + Added the time attribute. + :var str id: stream identifier - :var long written: bytes sent by the application - :var long read: bytes received by the application + :var int written: bytes sent by the application + :var int read: bytes received by the application + :var datetime time: time when the measurement was recorded """ - _POSITIONAL_ARGS = ('id', 'written', 'read') + _POSITIONAL_ARGS = ('id', 'written', 'read', 'time') _VERSION_ADDED = stem.version.Requirement.EVENT_STREAM_BW def _parse(self): @@ -1036,8 +1102,9 @@ class StreamBwEvent(Event): elif not self.read.isdigit() or not self.written.isdigit(): raise stem.ProtocolError("A STREAM_BW event's bytes sent and received should be a positive numeric value, received: %s" % self) - self.read = int_type(self.read) - self.written = int_type(self.written) + self.read = INT_TYPE(self.read) + self.written = INT_TYPE(self.written) + self.time = self._iso_timestamp(self.time) class TransportLaunchedEvent(Event): @@ -1081,15 +1148,19 @@ class ConnectionBandwidthEvent(Event): .. versionadded:: 1.2.0 + .. versionchanged:: 1.6.0 + Renamed 'type' attribute to 'conn_type' so it wouldn't be override parent + class attribute with the same name. + :var str id: connection identifier - :var stem.ConnectionType type: connection type - :var long read: bytes received by tor that second - :var long written: bytes sent by tor that second + :var stem.ConnectionType conn_type: connection type + :var int read: bytes received by tor that second + :var int written: bytes sent by tor that second """ _KEYWORD_ARGS = { 'ID': 'id', - 'TYPE': 'type', + 'TYPE': 'conn_type', 'READ': 'read', 'WRITTEN': 'written', } @@ -1099,8 +1170,8 @@ class ConnectionBandwidthEvent(Event): def _parse(self): if not self.id: raise stem.ProtocolError('CONN_BW event is missing its id') - elif not self.type: - raise stem.ProtocolError('CONN_BW event is missing its type') + elif not self.conn_type: + raise stem.ProtocolError('CONN_BW event is missing its connection type') elif not self.read: raise stem.ProtocolError('CONN_BW event is missing its read value') elif not self.written: @@ -1110,10 +1181,10 @@ class ConnectionBandwidthEvent(Event): elif not tor_tools.is_valid_connection_id(self.id): raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) - self.read = int_type(self.read) - self.written = int_type(self.written) + self.read = INT_TYPE(self.read) + self.written = INT_TYPE(self.written) - self._log_if_unrecognized('type', stem.ConnectionType) + self._log_if_unrecognized('conn_type', stem.ConnectionType) class CircuitBandwidthEvent(Event): @@ -1125,15 +1196,32 @@ class CircuitBandwidthEvent(Event): .. versionadded:: 1.2.0 + .. versionchanged:: 1.6.0 + Added the time attribute. + + .. versionchanged:: 1.7.0 + Added the delivered_read, delivered_written, overhead_read, and + overhead_written attributes. + :var str id: circuit identifier - :var long read: bytes received by tor that second - :var long written: bytes sent by tor that second + :var int read: bytes received by tor that second + :var int written: bytes sent by tor that second + :var int delivered_read: user payload received by tor that second + :var int delivered_written: user payload sent by tor that second + :var int overhead_read: padding so read cells will have a fixed length + :var int overhead_written: padding so written cells will have a fixed length + :var datetime time: time when the measurement was recorded """ _KEYWORD_ARGS = { 'ID': 'id', 'READ': 'read', 'WRITTEN': 'written', + 'DELIVERED_READ': 'delivered_read', + 'DELIVERED_WRITTEN': 'delivered_written', + 'OVERHEAD_READ': 'overhead_read', + 'OVERHEAD_WRITTEN': 'overhead_written', + 'TIME': 'time', } _VERSION_ADDED = stem.version.Requirement.EVENT_CIRC_BW @@ -1145,13 +1233,28 @@ class CircuitBandwidthEvent(Event): raise stem.ProtocolError('CIRC_BW event is missing its read value') elif not self.written: raise stem.ProtocolError('CIRC_BW event is missing its written value') - elif not self.read.isdigit() or not self.written.isdigit(): - raise stem.ProtocolError("A CIRC_BW event's bytes sent and received should be a positive numeric value, received: %s" % self) + elif not self.read.isdigit(): + raise stem.ProtocolError("A CIRC_BW event's bytes received should be a positive numeric value, received: %s" % self) + elif not self.written.isdigit(): + raise stem.ProtocolError("A CIRC_BW event's bytes sent should be a positive numeric value, received: %s" % self) + elif self.delivered_read and not self.delivered_read.isdigit(): + raise stem.ProtocolError("A CIRC_BW event's delivered bytes received should be a positive numeric value, received: %s" % self) + elif self.delivered_written and not self.delivered_written.isdigit(): + raise stem.ProtocolError("A CIRC_BW event's delivered bytes sent should be a positive numeric value, received: %s" % self) + elif self.overhead_read and not self.overhead_read.isdigit(): + raise stem.ProtocolError("A CIRC_BW event's overhead bytes received should be a positive numeric value, received: %s" % self) + elif self.overhead_written and not self.overhead_written.isdigit(): + raise stem.ProtocolError("A CIRC_BW event's overhead bytes sent should be a positive numeric value, received: %s" % self) elif not tor_tools.is_valid_circuit_id(self.id): raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) - self.read = int_type(self.read) - self.written = int_type(self.written) + self.time = self._iso_timestamp(self.time) + + for attr in ('read', 'written', 'delivered_read', 'delivered_written', 'overhead_read', 'overhead_written'): + value = getattr(self, attr) + + if value: + setattr(self, attr, INT_TYPE(value)) class CellStatsEvent(Event): @@ -1280,7 +1383,7 @@ def _parse_cell_type_mapping(mapping): if ':' not in entry: raise stem.ProtocolError("Mappings are expected to be of the form 'key:value', got '%s': %s" % (entry, mapping)) - key, value = entry.split(':', 1) + key, value = entry.rsplit(':', 1) if not CELL_TYPE.match(key): raise stem.ProtocolError("Key had invalid characters, got '%s': %s" % (key, mapping)) @@ -1311,6 +1414,7 @@ EVENT_TYPE_TO_CLASS = { 'HS_DESC': HSDescEvent, 'HS_DESC_CONTENT': HSDescContentEvent, 'INFO': LogEvent, + 'NETWORK_LIVENESS': NetworkLivenessEvent, 'NEWCONSENSUS': NewConsensusEvent, 'NEWDESC': NewDescEvent, 'NOTICE': LogEvent, diff --git a/Shared/lib/python3.4/site-packages/stem/response/getconf.py b/Shared/lib/python3.4/site-packages/stem/response/getconf.py index ce14553..0c62ba5 100644 --- a/Shared/lib/python3.4/site-packages/stem/response/getconf.py +++ b/Shared/lib/python3.4/site-packages/stem/response/getconf.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information import stem.response @@ -48,6 +48,14 @@ class GetConfResponse(stem.response.ControlMessage): else: key, value = (line.pop(), None) + # Tor's CommaList and RouterList have a bug where they map to an empty + # string when undefined rather than None... + # + # https://trac.torproject.org/projects/tor/ticket/18263 + + if value == '': + value = None + if key not in self.entries: self.entries[key] = [] diff --git a/Shared/lib/python3.4/site-packages/stem/response/getinfo.py b/Shared/lib/python3.4/site-packages/stem/response/getinfo.py index 0798593..00c77b9 100644 --- a/Shared/lib/python3.4/site-packages/stem/response/getinfo.py +++ b/Shared/lib/python3.4/site-packages/stem/response/getinfo.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information import stem.response @@ -30,12 +30,20 @@ class GetInfoResponse(stem.response.ControlMessage): if not self.is_ok() or not remaining_lines.pop() == b'OK': unrecognized_keywords = [] + error_code, error_msg = None, None + for code, _, line in self.content(): + if code != '250': + error_code = code + error_msg = line + if code == '552' and line.startswith('Unrecognized key "') and line.endswith('"'): unrecognized_keywords.append(line[18:-1]) if unrecognized_keywords: raise stem.InvalidArguments('552', 'GETINFO request contained unrecognized keywords: %s\n' % ', '.join(unrecognized_keywords), unrecognized_keywords) + elif error_code: + raise stem.OperationFailed(error_code, error_msg) else: raise stem.ProtocolError("GETINFO response didn't have an OK status:\n%s" % self) diff --git a/Shared/lib/python3.4/site-packages/stem/response/mapaddress.py b/Shared/lib/python3.4/site-packages/stem/response/mapaddress.py index 5d2b418..417449b 100644 --- a/Shared/lib/python3.4/site-packages/stem/response/mapaddress.py +++ b/Shared/lib/python3.4/site-packages/stem/response/mapaddress.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information import stem.response diff --git a/Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py b/Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py index 97201de..bf55931 100644 --- a/Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py +++ b/Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py @@ -1,9 +1,13 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information +import sys + +import stem.prereq import stem.response import stem.socket import stem.version +import stem.util.str_tools from stem.connection import AuthMethod from stem.util import log @@ -101,8 +105,12 @@ class ProtocolInfoResponse(stem.response.ControlMessage): auth_methods.append(AuthMethod.UNKNOWN) # parse optional COOKIEFILE mapping (quoted and can have escapes) + if line.is_next_mapping('COOKIEFILE', True, True): - self.cookie_path = line.pop_mapping(True, True)[1] + self.cookie_path = line.pop_mapping(True, True, get_bytes = True)[1].decode(sys.getfilesystemencoding()) + + if stem.prereq.is_python_3(): + self.cookie_path = stem.util.str_tools._to_unicode(self.cookie_path) # normalize back to str elif line_type == 'VERSION': # Line format: # VersionLine = "250-VERSION" SP "Tor=" TorVersion OptArguments CRLF diff --git a/Shared/lib/python3.4/site-packages/stem/settings.cfg b/Shared/lib/python3.4/site-packages/stem/settings.cfg new file mode 100644 index 0000000..c64541c --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/settings.cfg @@ -0,0 +1,413 @@ +################################################################################ +# +# Information related to tor configuration options and events... +# +# * manual.important Most commonly used configuration options. +# * manual.summary Short summary describing the option. +# * event.description Descriptions for the events. +# +################################################################################ + +manual.important BandwidthRate +manual.important BandwidthBurst +manual.important RelayBandwidthRate +manual.important RelayBandwidthBurst +manual.important ControlPort +manual.important HashedControlPassword +manual.important CookieAuthentication +manual.important DataDirectory +manual.important Log +manual.important RunAsDaemon +manual.important User + +manual.important Bridge +manual.important ExcludeNodes +manual.important MaxCircuitDirtiness +manual.important SocksPort +manual.important UseBridges + +manual.important BridgeRelay +manual.important ContactInfo +manual.important ExitPolicy +manual.important MyFamily +manual.important Nickname +manual.important ORPort +manual.important AccountingMax +manual.important AccountingStart + +manual.important DirPortFrontPage +manual.important DirPort + +manual.important HiddenServiceDir +manual.important HiddenServicePort + +# General Config Options + +manual.summary.BandwidthRate Average bandwidth usage limit +manual.summary.BandwidthBurst Maximum bandwidth usage limit +manual.summary.MaxAdvertisedBandwidth Limit for the bandwidth we advertise as being available for relaying +manual.summary.RelayBandwidthRate Average bandwidth usage limit for relaying +manual.summary.RelayBandwidthBurst Maximum bandwidth usage limit for relaying +manual.summary.PerConnBWRate Average relayed bandwidth limit per connection +manual.summary.PerConnBWBurst Maximum relayed bandwidth limit per connection +manual.summary.ClientTransportPlugin Proxy when establishing bridge connections +manual.summary.ServerTransportPlugin Proxy when servicing bridge connections +manual.summary.ServerTransportListenAddr Endpoint for bridge's pluggable transport proxy +manual.summary.ServerTransportOptions Additional arguments for bridge's proxy +manual.summary.ExtORPort Endpoint for extended ORPort connections +manual.summary.ExtORPortCookieAuthFile Location of the ExtORPort's authentication cookie +manual.summary.ExtORPortCookieAuthFileGroupReadable Group read permissions for the ExtORPort's authentication cookie +manual.summary.ConnLimit Minimum number of file descriptors for Tor to start +manual.summary.DisableNetwork Don't accept non-controller connections +manual.summary.ConstrainedSockets Shrinks sockets to ConstrainedSockSize +manual.summary.ConstrainedSockSize Limit for the received and transmit buffers of sockets +manual.summary.ControlPort Port providing access to tor controllers (nyx, vidalia, etc) +manual.summary.ControlSocket Socket providing controller access +manual.summary.ControlSocketsGroupWritable Group read permissions for the control socket +manual.summary.HashedControlPassword Hash of the password for authenticating to the control port +manual.summary.CookieAuthentication If set, authenticates controllers via a cookie +manual.summary.CookieAuthFile Location of the authentication cookie +manual.summary.CookieAuthFileGroupReadable Group read permissions for the authentication cookie +manual.summary.ControlPortWriteToFile Path for a file tor writes containing its control port +manual.summary.ControlPortFileGroupReadable Group read permissions for the control port file +manual.summary.DataDirectory Location for storing runtime data (state, keys, etc) +manual.summary.DataDirectoryGroupReadable Group read permissions for the data directory +manual.summary.CacheDirectory Directory where information is cached +manual.summary.CacheDirectoryGroupReadable Group read permissions for the cache directory +manual.summary.FallbackDir Fallback when unable to retrieve descriptor information +manual.summary.UseDefaultFallbackDirs Use hard-coded fallback directory authorities when needed +manual.summary.DirAuthority Alternative directory authorities +manual.summary.DirAuthorityFallbackRate Rate at which to use fallback directory +manual.summary.AlternateDirAuthority Alternative directory authorities (consensus only) +manual.summary.AlternateBridgeAuthority Alternative directory authorities (bridges only) +manual.summary.DisableAllSwap Locks all allocated memory so they can't be paged out +manual.summary.DisableDebuggerAttachment Limit information applications can retrieve about the process +manual.summary.FetchDirInfoEarly Keeps consensus information up to date, even if unnecessary +manual.summary.FetchDirInfoExtraEarly Updates consensus information when it's first available +manual.summary.FetchHidServDescriptors Toggles if hidden service descriptors are fetched automatically or not +manual.summary.FetchServerDescriptors Toggles if the consensus is fetched automatically or not +manual.summary.FetchUselessDescriptors Toggles if relay descriptors are fetched when they aren't strictly necessary +manual.summary.HTTPProxy HTTP proxy for connecting to tor +manual.summary.HTTPProxyAuthenticator Authentication credentials for HTTPProxy +manual.summary.HTTPSProxy SSL proxy for connecting to tor +manual.summary.HTTPSProxyAuthenticator Authentication credentials for HTTPSProxy +manual.summary.Sandbox Run within a syscall sandbox +manual.summary.Socks4Proxy SOCKS 4 proxy for connecting to tor +manual.summary.Socks5Proxy SOCKS 5 for connecting to tor +manual.summary.Socks5ProxyUsername Username for connecting to the Socks5Proxy +manual.summary.Socks5ProxyPassword Password for connecting to the Socks5Proxy +manual.summary.UnixSocksGroupWritable Group write permissions for the socks socket +manual.summary.KeepalivePeriod Rate at which to send keepalive packets +manual.summary.Log Runlevels and location for tor logging +manual.summary.LogMessageDomains Includes a domain when logging messages +manual.summary.MaxUnparseableDescSizeToLog Size of the dedicated log for unparseable descriptors +manual.summary.OutboundBindAddress Sets the IP used for connecting to tor +manual.summary.OutboundBindAddressOR Make outbound non-exit connections originate from this address +manual.summary.OutboundBindAddressExit Make outbound exit connections originate from this address +manual.summary.PidFile Path for a file tor writes containing its process id +manual.summary.ProtocolWarnings Toggles if protocol errors give warnings or not +manual.summary.RunAsDaemon Toggles if tor runs as a daemon process +manual.summary.LogTimeGranularity limits granularity of log message timestamps +manual.summary.TruncateLogFile Overwrites log file rather than appending when restarted +manual.summary.SyslogIdentityTag Tag logs appended to the syslog as being from tor +manual.summary.AndroidIdentityTag Tag when logging to android subsystem +manual.summary.SafeLogging Toggles if logs are scrubbed of sensitive information +manual.summary.User UID for the process when started +manual.summary.KeepBindCapabilities Retain permission for binding to low valued ports +manual.summary.HardwareAccel Toggles if tor attempts to use hardware acceleration +manual.summary.AccelName OpenSSL engine name for crypto acceleration +manual.summary.AccelDir Crypto acceleration library path +manual.summary.AvoidDiskWrites Toggles if tor avoids frequently writing to disk +manual.summary.CircuitPriorityHalflife Overwrite method for prioritizing traffic among relayed connections +manual.summary.CountPrivateBandwidth Applies rate limiting to private IP addresses +manual.summary.ExtendByEd25519ID Include Ed25519 identifier when extending circuits +manual.summary.NoExec Prevents any launch of other executables +manual.summary.Schedulers Scheduling algorithm by which to send outbound data +manual.summary.KISTSchedRunInterval Scheduling interval if using KIST +manual.summary.KISTSockBufSizeFactor Multiplier for per-socket limit if using KIST + +# Client Config Options + +manual.summary.Bridge Available bridges +manual.summary.LearnCircuitBuildTimeout Toggles adaptive timeouts for circuit creation +manual.summary.CircuitBuildTimeout Initial timeout for circuit creation +manual.summary.CircuitsAvailableTimeout Time to keep circuits open and unused for +manual.summary.CircuitStreamTimeout Timeout for shifting streams among circuits +manual.summary.ClientOnly Ensures that we aren't used as a relay or directory mirror +manual.summary.ConnectionPadding Pad traffic to help prevent correlation attacks +manual.summary.ReducedConnectionPadding Reduce padding and increase circuit cycling for low bandidth connections +manual.summary.ExcludeNodes Relays or locales never to be used in circuits +manual.summary.ExcludeExitNodes Relays or locales never to be used for exits +manual.summary.GeoIPExcludeUnknown Don't use relays with an unknown locale in circuits +manual.summary.ExitNodes Preferred final hop for circuits +manual.summary.EntryNodes Preferred first hops for circuits +manual.summary.StrictNodes Never uses notes outside of Entry/ExitNodes +manual.summary.FascistFirewall Only make outbound connections on FirewallPorts +manual.summary.FirewallPorts Ports used by FascistFirewall +manual.summary.HidServAuth Authentication credentials for connecting to a hidden service +manual.summary.ClientOnionAuthDir Path containing hidden service authorization files +manual.summary.ReachableAddresses Rules for bypassing the local firewall +manual.summary.ReachableDirAddresses Rules for bypassing the local firewall (directory fetches) +manual.summary.ReachableORAddresses Rules for bypassing the local firewall (OR connections) +manual.summary.LongLivedPorts Ports requiring highly reliable relays +manual.summary.MapAddress Alias mappings for address requests +manual.summary.NewCircuitPeriod Period for considering the creation of new circuits +manual.summary.MaxCircuitDirtiness Duration for reusing constructed circuits +manual.summary.MaxClientCircuitsPending Number of circuits that can be in construction at once +manual.summary.NodeFamily Define relays as belonging to a family +manual.summary.EnforceDistinctSubnets Prevent use of multiple relays from the same subnet on a circuit +manual.summary.SocksPort Port for using tor as a Socks proxy +manual.summary.SocksPolicy Access policy for the pocks port +manual.summary.SocksTimeout Time until idle or unestablished socks connections are closed +manual.summary.TokenBucketRefillInterval Frequency at which exhausted connections are checked for new traffic +manual.summary.TrackHostExits Maintains use of the same exit whenever connecting to this destination +manual.summary.TrackHostExitsExpire Time until use of an exit for tracking expires +manual.summary.UpdateBridgesFromAuthority Toggles fetching bridge descriptors from the authorities +manual.summary.UseBridges Make use of configured bridges +manual.summary.UseEntryGuards Use guard relays for first hop +manual.summary.GuardfractionFile File containing information with duration of our guards +manual.summary.UseGuardFraction Take guardfraction into account for path selection +manual.summary.NumEntryGuards Pool size of guard relays we'll select from +manual.summary.NumPrimaryGuards Pool size of strongly preferred guard relays we'll select from +manual.summary.NumDirectoryGuards Pool size of directory guards we'll select from +manual.summary.GuardLifetime Minimum time to keep entry guards +manual.summary.SafeSocks Toggles rejecting unsafe variants of the socks protocol +manual.summary.TestSocks Provide notices for if socks connections are of the safe or unsafe variants +manual.summary.VirtualAddrNetworkIPv4 IPv4 address range to use when needing a virtual address +manual.summary.VirtualAddrNetworkIPv6 IPv6 address range to use when needing a virtual address +manual.summary.AllowNonRFC953Hostnames Toggles blocking invalid characters in hostname resolution +manual.summary.HTTPTunnelPort Port on which to allow 'HTTP CONNECT' connections +manual.summary.TransPort Port for transparent proxying if the OS supports it +manual.summary.TransProxyType Proxy type to be used +manual.summary.NATDPort Port for forwarding ipfw NATD connections +manual.summary.AutomapHostsOnResolve Map addresses ending with special suffixes to virtual addresses +manual.summary.AutomapHostsSuffixes Address suffixes recognized by AutomapHostsOnResolve +manual.summary.DNSPort Port from which DNS responses are fetched instead of tor +manual.summary.ClientDNSRejectInternalAddresses Disregards anonymous DNS responses for internal addresses +manual.summary.ClientRejectInternalAddresses Disables use of Tor for internal connections +manual.summary.DownloadExtraInfo Toggles fetching of extra information about relays +manual.summary.WarnPlaintextPorts Toggles warnings for using risky ports +manual.summary.RejectPlaintextPorts Prevents connections on risky ports +manual.summary.OptimisticData Use exits without confirmation that prior connections succeeded +manual.summary.HSLayer2Nodes permissible relays for the second hop of HS circuits +manual.summary.HSLayer3Nodes permissible relays for the third hop of HS circuits +manual.summary.UseMicrodescriptors Retrieve microdescriptors rather than server descriptors +manual.summary.PathBiasCircThreshold Number of circuits through a guard before applying bias checks +manual.summary.PathBiasNoticeRate Fraction of circuits that must succeed before logging a notice +manual.summary.PathBiasWarnRate Fraction of circuits that must succeed before logging a warning +manual.summary.PathBiasExtremeRate Fraction of circuits that must succeed before logging an error +manual.summary.PathBiasDropGuards Drop guards failing to establish circuits +manual.summary.PathBiasScaleThreshold Circuits through a guard before scaling past observations down +manual.summary.PathBiasUseThreshold Number of streams through a circuit before applying bias checks +manual.summary.PathBiasNoticeUseRate Fraction of streams that must succeed before logging a notice +manual.summary.PathBiasExtremeUseRate Fraction of streams that must succeed before logging an error +manual.summary.PathBiasScaleUseThreshold Streams through a circuit before scaling past observations down +manual.summary.ClientUseIPv4 Allow IPv4 connections to guards and fetching consensus +manual.summary.ClientUseIPv6 Allow IPv6 connections to guards and fetching consensus +manual.summary.ClientPreferIPv6DirPort Perfer relays with IPv6 when fetching consensus +manual.summary.ClientPreferIPv6ORPort Prefer a guard's IPv6 rather than IPv4 endpoint +manual.summary.PathsNeededToBuildCircuits Portion of relays to require information for before making circuits +manual.summary.ClientBootstrapConsensusAuthorityDownloadInitialDelay Delay when bootstrapping before downloading descriptors from authorities +manual.summary.ClientBootstrapConsensusFallbackDownloadInitialDelay Delay when bootstrapping before downloading descriptors from fallbacks +manual.summary.ClientBootstrapConsensusAuthorityOnlyDownloadInitialDelay Delay when bootstrapping before downloading descriptors from authorities if fallbacks disabled +manual.summary.ClientBootstrapConsensusMaxInProgressTries Descriptor documents that can be downloaded in parallel +manual.summary.ClientBootstrapConsensusMaxInProgressTries Number of consensus download requests to allow in-flight at once + +# Server Config Options + +manual.summary.Address Overwrites address others will use to reach this relay +manual.summary.AssumeReachable Skips reachability test at startup +manual.summary.BridgeRelay Act as a bridge +manual.summary.BridgeDistribution Distribution method BrideDB should provide our address by +manual.summary.ContactInfo Contact information for this relay +manual.summary.ExitRelay Allow relaying of exit traffic +manual.summary.ExitPolicy Traffic destinations that can exit from this relay +manual.summary.ExitPolicyRejectPrivate Prevent exiting on the local network +manual.summary.ExitPolicyRejectLocalInterfaces More extensive prevention of exiting on the local network +manual.summary.ReducedExitPolicy Customized reduced exit policy +manual.summary.IPv6Exit Allow clients to use us for IPv6 traffic +manual.summary.MaxOnionQueueDelay Duration to reject new onionskins if we have more than we can process +manual.summary.MyFamily Other relays this operator administers +manual.summary.Nickname Identifier for this relay +manual.summary.NumCPUs Number of processes spawned for decryption +manual.summary.ORPort Port used to accept relay traffic +manual.summary.PublishServerDescriptor Types of descriptors published +manual.summary.ShutdownWaitLength Delay before quitting after receiving a SIGINT signal +manual.summary.SSLKeyLifetime Lifetime for our link certificate +manual.summary.HeartbeatPeriod Rate at which an INFO level heartbeat message is sent +manual.summary.MainloopStats Include development information from the main loop with heartbeats +manual.summary.AccountingMax Amount of traffic before hibernating +manual.summary.AccountingRule Method to determine when the accounting limit is reached +manual.summary.AccountingStart Duration of an accounting period +manual.summary.RefuseUnknownExits Prevents relays not in the consensus from using us as an exit +manual.summary.ServerDNSResolvConfFile Overriding resolver config for DNS queries we provide +manual.summary.ServerDNSAllowBrokenConfig Toggles if we persist despite configuration parsing errors or not +manual.summary.ServerDNSSearchDomains Toggles if our DNS queries search for addresses in the local domain +manual.summary.ServerDNSDetectHijacking Toggles testing for DNS hijacking +manual.summary.ServerDNSTestAddresses Addresses to test to see if valid DNS queries are being hijacked +manual.summary.ServerDNSAllowNonRFC953Hostnames Toggles if we reject DNS queries with invalid characters +manual.summary.BridgeRecordUsageByCountry Tracks geoip information on bridge usage +manual.summary.ServerDNSRandomizeCase Toggles DNS query case randomization +manual.summary.GeoIPFile Path to file containing IPv4 geoip information +manual.summary.GeoIPv6File Path to file containing IPv6 geoip information +manual.summary.CellStatistics Toggles storing circuit queue duration to disk +manual.summary.PaddingStatistics Toggles storing padding counts +manual.summary.DirReqStatistics Toggles storing network status counts and performance to disk +manual.summary.EntryStatistics Toggles storing client connection counts to disk +manual.summary.ExitPortStatistics Toggles storing traffic and port usage data to disk +manual.summary.ConnDirectionStatistics Toggles storing connection use to disk +manual.summary.HiddenServiceStatistics Toggles storing hidden service stats to disk +manual.summary.ExtraInfoStatistics Publishes statistic data in the extra-info documents +manual.summary.ExtendAllowPrivateAddresses Allow circuits to be extended to the local network +manual.summary.MaxMemInQueues Threshold at which tor will terminate circuits to avoid running out of memory +manual.summary.DisableOOSCheck Don't close connections when running out of sockets +manual.summary.SigningKeyLifetime Duration the Ed25519 signing key is valid for +manual.summary.OfflineMasterKey Don't generate the master secret key +manual.summary.KeyDirectory Directory where secret keys reside +manual.summary.KeyDirectoryGroupReadable Group read permissions for the secret key directory + +# Directory Server Options + +manual.summary.DirPortFrontPage Publish this html file on the DirPort +manual.summary.DirPort Port for directory connections +manual.summary.DirPolicy Access policy for the DirPort +manual.summary.DirCache Provide cached descriptor information to other tor users +manual.summary.MaxConsensusAgeForDiffs Time to generate consensus caches for + +# Directory Authority Server Options + +manual.summary.AuthoritativeDirectory Act as a directory authority +manual.summary.V3AuthoritativeDirectory Generates a version 3 consensus +manual.summary.VersioningAuthoritativeDirectory Provides opinions on recommended versions of tor +manual.summary.RecommendedVersions Suggested versions of tor +manual.summary.RecommendedPackages Suggested versions of applications other than tor +manual.summary.RecommendedClientVersions Tor versions believed to be safe for clients +manual.summary.BridgeAuthoritativeDir Acts as a bridge authority +manual.summary.MinUptimeHidServDirectoryV2 Required uptime before accepting hidden service directory +manual.summary.RecommendedServerVersions Tor versions believed to be safe for relays +manual.summary.ConsensusParams Params entry of the networkstatus vote +manual.summary.DirAllowPrivateAddresses Toggles allowing arbitrary input or non-public IPs in descriptors +manual.summary.AuthDirBadExit Relays to be flagged as bad exits +manual.summary.AuthDirInvalid Relays from which the valid flag is withheld +manual.summary.AuthDirReject Relays to be dropped from the consensus +manual.summary.AuthDirBadExitCCs Countries for which to flag all relays as bad exits +manual.summary.AuthDirInvalidCCs Countries for which the valid flag is withheld +manual.summary.AuthDirRejectCCs Countries for which relays aren't accepted into the consensus +manual.summary.AuthDirListBadExits Toggles if we provide an opinion on bad exits +manual.summary.AuthDirMaxServersPerAddr Limit on the number of relays accepted per ip +manual.summary.AuthDirFastGuarantee Advertised rate at which the Fast flag is granted +manual.summary.AuthDirGuardBWGuarantee Advertised rate necessary to be a guard +manual.summary.AuthDirPinKeys Don't accept descriptors with conflicting identity keypairs +manual.summary.AuthDirSharedRandomness Participates in shared randomness voting +manual.summary.AuthDirTestEd25519LinkKeys Require proper Ed25519 key for the Running flag +manual.summary.BridgePassword Password for requesting bridge information +manual.summary.V3AuthVotingInterval Consensus voting interval +manual.summary.V3AuthVoteDelay Wait time to collect votes of other authorities +manual.summary.V3AuthDistDelay Wait time to collect the signatures of other authorities +manual.summary.V3AuthNIntervalsValid Number of voting intervals a consensus is valid for +manual.summary.V3BandwidthsFile Path to a file containing measured relay bandwidths +manual.summary.V3AuthUseLegacyKey Signs consensus with both the current and legacy keys +manual.summary.RephistTrackTime Discards old, unchanged reliability information +manual.summary.AuthDirHasIPv6Connectivity Descriptors can be retrieved over the authority's IPv6 ORPort +manual.summary.MinMeasuredBWsForAuthToIgnoreAdvertised Total measured value before advertised bandwidths are treated as unreliable + +# Hidden Service Options + +manual.summary.HiddenServiceDir Directory contents for the hidden service +manual.summary.HiddenServicePort Port the hidden service is provided on +manual.summary.PublishHidServDescriptors Toggles automated publishing of the hidden service to the rendezvous directory +manual.summary.HiddenServiceVersion Version for published hidden service descriptors +manual.summary.HiddenServiceAuthorizeClient Restricts access to the hidden service +manual.summary.HiddenServiceAllowUnknownPorts Allow rendezvous circuits on unrecognized ports +manual.summary.HiddenServiceExportCircuitID Exposes incoming client circuits via the given protocol +manual.summary.HiddenServiceMaxStreams Maximum streams per rendezvous circuit +manual.summary.HiddenServiceMaxStreamsCloseCircuit Closes rendezvous circuits that exceed the maximum number of streams +manual.summary.RendPostPeriod Period at which the rendezvous service descriptors are refreshed +manual.summary.HiddenServiceDirGroupReadable Group read permissions for the hidden service directory +manual.summary.HiddenServiceNumIntroductionPoints Number of introduction points the hidden service will have +manual.summary.HiddenServiceSingleHopMode Allow non-anonymous single hop hidden services +manual.summary.HiddenServiceNonAnonymousMode Enables HiddenServiceSingleHopMode to be set + +# DoS Mitigation Options + +manual.summary.DoSCircuitCreationEnabled Enables circuit creation DoS mitigation +manual.summary.DoSCircuitCreationMinConnections Connection rate when clients are a suspected DoS +manual.summary.DoSCircuitCreationRate Acceptable rate for circuit creation +manual.summary.DoSCircuitCreationBurst Accept burst of circuit creation up to this rate +manual.summary.DoSCircuitCreationDefenseType Method for mitigating circuit creation DoS +manual.summary.DoSCircuitCreationDefenseTimePeriod Duration of DoS mitigation +manual.summary.DoSConnectionEnabled Enables connection DoS mitigation +manual.summary.DoSConnectionMaxConcurrentCount Acceptable number of connections +manual.summary.DoSConnectionDefenseType Method for mitigating connection DoS +manual.summary.DoSRefuseSingleHopClientRendezvous Prevent establishment of single hop rendezvous points + +# Testing Network Options + +manual.summary.TestingTorNetwork Overrides other options to be a testing network +manual.summary.TestingV3AuthInitialVotingInterval Overrides V3AuthVotingInterval for the first consensus +manual.summary.TestingV3AuthInitialVoteDelay Overrides TestingV3AuthInitialVoteDelay for the first consensus +manual.summary.TestingV3AuthInitialDistDelay Overrides TestingV3AuthInitialDistDelay for the first consensus +manual.summary.TestingV3AuthVotingStartOffset Offset for the point at which the authority votes +manual.summary.TestingAuthDirTimeToLearnReachability Delay until opinions are given about which relays are running or not +manual.summary.TestingEstimatedDescriptorPropagationTime Delay before clients attempt to fetch descriptors from directory caches +manual.summary.TestingMinFastFlagThreshold Minimum value for the Fast flag +manual.summary.TestingServerDownloadInitialDelay Delay before downloading resources for relaying +manual.summary.TestingClientDownloadInitialDelay Delay before downloading resources for client usage +manual.summary.TestingServerConsensusDownloadInitialDelay Delay before downloading descriptors for relaying +manual.summary.TestingClientConsensusDownloadInitialDelay Delay before downloading descriptors for client usage +manual.summary.TestingBridgeDownloadInitialDelay Delay before downloading bridge descriptors +manual.summary.TestingBridgeBootstrapDownloadInitialDelay Delay before downloading bridge descriptors when first started +manual.summary.TestingClientMaxIntervalWithoutRequest Maximum time to wait to batch requests for missing descriptors +manual.summary.TestingDirConnectionMaxStall Duration to let directory connections stall before timing out +manual.summary.TestingDirAuthVoteExit Relays to give the Exit flag to +manual.summary.TestingDirAuthVoteExitIsStrict Only grant the Exit flag to relays listed by TestingDirAuthVoteExit +manual.summary.TestingDirAuthVoteGuard Relays to give the Guard flag to +manual.summary.TestingDirAuthVoteGuardIsStrict Only grant the Guard flag to relays listed by TestingDirAuthVoteGuard +manual.summary.TestingDirAuthVoteHSDir Relays to give the HSDir flag to +manual.summary.TestingDirAuthVoteHSDirIsStrict Only grant the HSDir flag to relays listed by TestingDirAuthVoteHSDir +manual.summary.TestingEnableConnBwEvent Allow controllers to request CONN_BW events +manual.summary.TestingEnableCellStatsEvent Allow controllers to request CELL_STATS events +manual.summary.TestingMinExitFlagThreshold Lower bound for assigning the Exit flag +manual.summary.TestingLinkCertLifetime Duration of our ed25519 certificate +manual.summary.TestingAuthKeyLifetime Duration for our ed25519 signing key +manual.summary.TestingLinkKeySlop Time before expiration that we replace our ed25519 link key +manual.summary.TestingAuthKeySlop Time before expiration that we replace our ed25519 authentication key +manual.summary.TestingSigningKeySlop Time before expiration that we replace our ed25519 signing key + +# Brief description of tor events + +event.description.debug Logging at the debug runlevel. This is low level, high volume information about tor's internals that generally isn't useful to users. +event.description.info Logging at the info runlevel. This is low level information of important internal processes. +event.description.notice Logging at the notice runlevel. This runlevel and above are shown to users by default, and includes general information the user should be aware of. +event.description.warn Logging at the warning runlevel. These are problems the user should be aware of. +event.description.err Logging at the error runlevel. These are critical issues that may prevent tor from working properly. + +event.description.addrmap New address mapping for our DNS cache. +event.description.authdir_newdescs Indicates we just received a new descriptor. This is only used by directory authorities. +event.description.buildtimeout_set Indicates the timeout value for a circuit has changed. +event.description.bw Event emitted every second with the bytes sent and received by tor. +event.description.cell_stats Event emitted every second with the count of the number of cell types per circuit. +event.description.circ Indicates that a circuit we've established through the tor network has been created, changed, or closed. +event.description.circ_bw Event emitted every second with the bytes sent and received on a per-circuit basis. +event.description.circ_minor Minor changes to our circuits, such as reuse of existing circuits for a different purpose. +event.description.clients_seen Periodic summary of the countries we've seen users connect from. This is only used by bridge relays. +event.description.conf_changed Indicates that our torrc configuration has changed. This could be in response to a SETCONF or RELOAD signal. +event.description.conn_bw Event emitted every second with the byytes sent and received on a per-connection basis. +event.description.descchanged Indicates that our descriptor has changed. +event.description.guard Indicates that the set of relays we use for our initial connection into the tor network (guards) have changed. +event.description.hs_desc Received a hidden service descriptor that wasn't yet cached. +event.description.hs_desc_content Content of a hidden service descriptor we've fetched. +event.description.network_liveness Emitted when the network becomes reachable or unreachable. +event.description.newconsensus Received a new hourly consensus of relays in the tor network. +event.description.newdesc Indicates that a new descriptor is available. +event.description.ns Consensus information for an individual relay has changed. This could be due to receiving a new consensus or tor locally decides a relay is up or down. +event.description.orconn Change in our connections as a relay. +event.description.signal Indicates that tor has received and acted upon a signal being sent to its process. +event.description.status_client Notification of a change in tor's state as a client (ie user). +event.description.status_general Notification of a change in tor's state. +event.description.status_server Notification of a change in tor's state as a relay. +event.description.stream Communication over a circuit we've established. For instance, Firefox making a connection through tor. +event.description.stream_bw Event emitted every second with the bytes sent and received for a specific stream. +event.description.tb_empty Statistics for when token buckets are refilled. This is only used when TestingTorNetwork is set. +event.description.transport_launched Emitted when a pluggable transport is launched. + diff --git a/Shared/lib/python3.4/site-packages/stem/socket.py b/Shared/lib/python3.4/site-packages/stem/socket.py index 8024098..3da24c2 100644 --- a/Shared/lib/python3.4/site-packages/stem/socket.py +++ b/Shared/lib/python3.4/site-packages/stem/socket.py @@ -1,8 +1,8 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ -Supports communication with sockets speaking the Tor control protocol. This +Supports communication with sockets speaking Tor protocols. This allows us to send messages as basic strings, and receive responses as :class:`~stem.response.ControlMessage` instances. @@ -46,18 +46,21 @@ Tor... :: - ControlSocket - Socket wrapper that speaks the tor control protocol. - |- ControlPort - Control connection via a port. - | |- get_address - provides the ip address of our socket - | +- get_port - provides the port of our socket + BaseSocket - Thread safe socket. + |- RelaySocket - Socket for a relay's ORPort. + | |- send - sends a message to the socket + | +- recv - receives a response from the socket | - |- ControlSocketFile - Control connection via a local file socket. - | +- get_socket_path - provides the path of the socket we connect to + |- ControlSocket - Socket wrapper that speaks the tor control protocol. + | |- ControlPort - Control connection via a port. + | |- ControlSocketFile - Control connection via a local file socket. + | | + | |- send - sends a message to the socket + | +- recv - receives a ControlMessage from the socket | - |- send - sends a message to the socket - |- recv - receives a ControlMessage from the socket |- is_alive - reports if the socket is known to be closed |- is_localhost - returns if the socket is for the local system or not + |- connection_time - timestamp when socket last connected or disconnected |- connect - connects a new socket |- close - shuts down the socket +- __enter__ / __exit__ - manages socket connection @@ -71,6 +74,7 @@ from __future__ import absolute_import import re import socket +import ssl import threading import time @@ -80,15 +84,21 @@ import stem.util.str_tools from stem.util import log +MESSAGE_PREFIX = re.compile(b'^[a-zA-Z0-9]{3}[-+ ]') +ERROR_MSG = 'Error while receiving a control message (%s): %s' -class ControlSocket(object): +# lines to limit our trace logging to, you can disable this by setting it to None + +TRUNCATE_LOGS = 10 + +# maximum number of bytes to read at a time from a relay socket + +MAX_READ_BUFFER_LEN = 10 * 1024 * 1024 + + +class BaseSocket(object): """ - Wrapper for a socket connection that speaks the Tor control protocol. To the - better part this transparently handles the formatting for sending and - receiving complete messages. All methods are thread safe. - - Callers should not instantiate this class directly, but rather use subclasses - which are expected to implement the **_make_socket()** method. + Thread safe socket, providing common socket functionality. """ def __init__(self): @@ -103,95 +113,21 @@ class ControlSocket(object): self._send_lock = threading.RLock() self._recv_lock = threading.RLock() - def send(self, message, raw = False): - """ - Formats and sends a message to the control socket. For more information see - the :func:`~stem.socket.send_message` function. - - :param str message: message to be formatted and sent to the socket - :param bool raw: leaves the message formatting untouched, passing it to the socket as-is - - :raises: - * :class:`stem.SocketError` if a problem arises in using the socket - * :class:`stem.SocketClosed` if the socket is known to be shut down - """ - - with self._send_lock: - try: - if not self.is_alive(): - raise stem.SocketClosed() - - send_message(self._socket_file, message, raw) - except stem.SocketClosed as exc: - # if send_message raises a SocketClosed then we should properly shut - # everything down - - if self.is_alive(): - self.close() - - raise exc - - def recv(self): - """ - Receives a message from the control socket, blocking until we've received - one. For more information see the :func:`~stem.socket.recv_message` function. - - :returns: :class:`~stem.response.ControlMessage` for the message received - - :raises: - * :class:`stem.ProtocolError` the content from the socket is malformed - * :class:`stem.SocketClosed` if the socket closes before we receive a complete message - """ - - with self._recv_lock: - try: - # makes a temporary reference to the _socket_file because connect() - # and close() may set or unset it - - socket_file = self._socket_file - - if not socket_file: - raise stem.SocketClosed() - - return recv_message(socket_file) - except stem.SocketClosed as exc: - # If recv_message raises a SocketClosed then we should properly shut - # everything down. However, there's a couple cases where this will - # cause deadlock... - # - # * this socketClosed was *caused by* a close() call, which is joining - # on our thread - # - # * a send() call that's currently in flight is about to call close(), - # also attempting to join on us - # - # To resolve this we make a non-blocking call to acquire the send lock. - # If we get it then great, we can close safely. If not then one of the - # above are in progress and we leave the close to them. - - if self.is_alive(): - if self._send_lock.acquire(False): - self.close() - self._send_lock.release() - - raise exc - def is_alive(self): """ Checks if the socket is known to be closed. We won't be aware if it is until we either use it or have explicitily shut it down. In practice a socket derived from a port knows about its disconnection - after a failed :func:`~stem.socket.ControlSocket.recv` call. Socket file - derived connections know after either a - :func:`~stem.socket.ControlSocket.send` or - :func:`~stem.socket.ControlSocket.recv`. + after failing to receive data, whereas socket file derived connections + know after either sending or receiving data. This means that to have reliable detection for when we're disconnected you need to continually pull from the socket (which is part of what the :class:`~stem.control.BaseController` does). - :returns: **bool** that's **True** if our socket is connected and **False** otherwise + :returns: **bool** that's **True** if our socket is connected and **False** + otherwise """ return self._is_alive @@ -200,7 +136,8 @@ class ControlSocket(object): """ Returns if the connection is for the local system or not. - :returns: **bool** that's **True** if the connection is for the local host and **False** otherwise + :returns: **bool** that's **True** if the connection is for the local host + and **False** otherwise """ return False @@ -295,12 +232,78 @@ class ControlSocket(object): if is_change: self._close() + def _send(self, message, handler): + """ + Send message in a thread safe manner. Handler is expected to be of the form... + + :: + + my_handler(socket, socket_file, message) + """ + + with self._send_lock: + try: + if not self.is_alive(): + raise stem.SocketClosed() + + handler(self._socket, self._socket_file, message) + except stem.SocketClosed: + # if send_message raises a SocketClosed then we should properly shut + # everything down + + if self.is_alive(): + self.close() + + raise + + def _recv(self, handler): + """ + Receives a message in a thread safe manner. Handler is expected to be of the form... + + :: + + my_handler(socket, socket_file) + """ + + with self._recv_lock: + try: + # makes a temporary reference to the _socket_file because connect() + # and close() may set or unset it + + my_socket, my_socket_file = self._socket, self._socket_file + + if not my_socket or not my_socket_file: + raise stem.SocketClosed() + + return handler(my_socket, my_socket_file) + except stem.SocketClosed: + # If recv_message raises a SocketClosed then we should properly shut + # everything down. However, there's a couple cases where this will + # cause deadlock... + # + # * This SocketClosed was *caused by* a close() call, which is joining + # on our thread. + # + # * A send() call that's currently in flight is about to call close(), + # also attempting to join on us. + # + # To resolve this we make a non-blocking call to acquire the send lock. + # If we get it then great, we can close safely. If not then one of the + # above are in progress and we leave the close to them. + + if self.is_alive(): + if self._send_lock.acquire(False): + self.close() + self._send_lock.release() + + raise + def _get_send_lock(self): """ The send lock is useful to classes that interact with us at a deep level because it's used to lock :func:`stem.socket.ControlSocket.connect` / - :func:`stem.socket.ControlSocket.close`, and by extension our - :func:`stem.socket.ControlSocket.is_alive` state changes. + :func:`stem.socket.BaseSocket.close`, and by extension our + :func:`stem.socket.BaseSocket.is_alive` state changes. :returns: **threading.RLock** that governs sending messages to our socket and state changes @@ -339,13 +342,135 @@ class ControlSocket(object): * **NotImplementedError** if not implemented by a subclass """ - raise NotImplementedError('Unsupported Operation: this should be implemented by the ControlSocket subclass') + raise NotImplementedError('Unsupported Operation: this should be implemented by the BaseSocket subclass') + + +class RelaySocket(BaseSocket): + """ + `Link-level connection + `_ to a Tor + relay. + + .. versionadded:: 1.7.0 + + :var str address: address our socket connects to + :var int port: ORPort our socket connects to + """ + + def __init__(self, address = '127.0.0.1', port = 9050, connect = True): + """ + RelaySocket constructor. + + :param str address: ip address of the relay + :param int port: orport of the relay + :param bool connect: connects to the socket if True, leaves it unconnected otherwise + + :raises: :class:`stem.SocketError` if connect is **True** and we're + unable to establish a connection + """ + + super(RelaySocket, self).__init__() + self.address = address + self.port = port + + if connect: + self.connect() + + def send(self, message): + """ + Sends a message to the relay's ORPort. + + :param str message: message to be formatted and sent to the socket + + :raises: + * :class:`stem.SocketError` if a problem arises in using the socket + * :class:`stem.SocketClosed` if the socket is known to be shut down + """ + + self._send(message, lambda s, sf, msg: _write_to_socket(sf, msg)) + + def recv(self): + """ + Receives a message from the relay. + + :returns: bytes for the message received + + :raises: + * :class:`stem.ProtocolError` the content from the socket is malformed + * :class:`stem.SocketClosed` if the socket closes before we receive a complete message + """ + + # TODO: Is MAX_READ_BUFFER_LEN defined in the spec? Not sure where it came + # from. + + return self._recv(lambda s, sf: s.recv(MAX_READ_BUFFER_LEN)) + + def is_localhost(self): + return self.address == '127.0.0.1' + + def _make_socket(self): + try: + relay_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + relay_socket.connect((self.address, self.port)) + return ssl.wrap_socket(relay_socket) + except socket.error as exc: + raise stem.SocketError(exc) + + +class ControlSocket(BaseSocket): + """ + Wrapper for a socket connection that speaks the Tor control protocol. To the + better part this transparently handles the formatting for sending and + receiving complete messages. + + Callers should not instantiate this class directly, but rather use subclasses + which are expected to implement the **_make_socket()** method. + """ + + def __init__(self): + super(ControlSocket, self).__init__() + + def send(self, message): + """ + Formats and sends a message to the control socket. For more information see + the :func:`~stem.socket.send_message` function. + + .. deprecated:: 1.7.0 + The **raw** argument was unhelpful and be removed. Use + :func:`stem.socket.send_message` if you need this level of control + instead. + + :param str message: message to be formatted and sent to the socket + + :raises: + * :class:`stem.SocketError` if a problem arises in using the socket + * :class:`stem.SocketClosed` if the socket is known to be shut down + """ + + self._send(message, lambda s, sf, msg: send_message(sf, msg)) + + def recv(self): + """ + Receives a message from the control socket, blocking until we've received + one. For more information see the :func:`~stem.socket.recv_message` function. + + :returns: :class:`~stem.response.ControlMessage` for the message received + + :raises: + * :class:`stem.ProtocolError` the content from the socket is malformed + * :class:`stem.SocketClosed` if the socket closes before we receive a complete message + """ + + return self._recv(lambda s, sf: recv_message(sf)) class ControlPort(ControlSocket): """ Control connection to tor. For more information see tor's ControlPort torrc option. + + :var str address: address our socket connects to + :var int port: ControlPort our socket connects to """ def __init__(self, address = '127.0.0.1', port = 9051, connect = True): @@ -361,8 +486,8 @@ class ControlPort(ControlSocket): """ super(ControlPort, self).__init__() - self._control_addr = address - self._control_port = port + self.address = address + self.port = port if connect: self.connect() @@ -371,27 +496,33 @@ class ControlPort(ControlSocket): """ Provides the ip address our socket connects to. + .. deprecated:: 1.7.0 + Use the **address** attribute instead. + :returns: str with the ip address of our socket """ - return self._control_addr + return self.address def get_port(self): """ Provides the port our socket connects to. + .. deprecated:: 1.7.0 + Use the **port** attribute instead. + :returns: int with the port of our socket """ - return self._control_port + return self.port def is_localhost(self): - return self._control_addr == '127.0.0.1' + return self.address == '127.0.0.1' def _make_socket(self): try: control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - control_socket.connect((self._control_addr, self._control_port)) + control_socket.connect((self.address, self.port)) return control_socket except socket.error as exc: raise stem.SocketError(exc) @@ -401,6 +532,8 @@ class ControlSocketFile(ControlSocket): """ Control connection to tor. For more information see tor's ControlSocket torrc option. + + :var str path: filesystem path of the socket we connect to """ def __init__(self, path = '/var/run/tor/control', connect = True): @@ -415,7 +548,7 @@ class ControlSocketFile(ControlSocket): """ super(ControlSocketFile, self).__init__() - self._socket_path = path + self.path = path if connect: self.connect() @@ -424,10 +557,13 @@ class ControlSocketFile(ControlSocket): """ Provides the path our socket connects to. + .. deprecated:: 1.7.0 + Use the **path** attribute instead. + :returns: str with the path for our control socket """ - return self._socket_path + return self.path def is_localhost(self): return True @@ -435,7 +571,7 @@ class ControlSocketFile(ControlSocket): def _make_socket(self): try: control_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - control_socket.connect(self._socket_path) + control_socket.connect(self.path) return control_socket except socket.error as exc: raise stem.SocketError(exc) @@ -476,14 +612,20 @@ def send_message(control_file, message, raw = False): if not raw: message = send_formatting(message) - try: - control_file.write(stem.util.str_tools._to_bytes(message)) - control_file.flush() + _write_to_socket(control_file, message) + if log.is_tracing(): log_message = message.replace('\r\n', '\n').rstrip() - log.trace('Sent to tor:\n' + log_message) + msg_div = '\n' if '\n' in log_message else ' ' + log.trace('Sent to tor:%s%s' % (msg_div, log_message)) + + +def _write_to_socket(socket_file, message): + try: + socket_file.write(stem.util.str_tools._to_bytes(message)) + socket_file.flush() except socket.error as exc: - log.info('Failed to send message: %s' % exc) + log.info('Failed to send: %s' % exc) # When sending there doesn't seem to be a reliable method for # distinguishing between failures from a disconnect verses other things. @@ -497,7 +639,7 @@ def send_message(control_file, message, raw = False): # if the control_file has been closed then flush will receive: # AttributeError: 'NoneType' object has no attribute 'sendall' - log.info('Failed to send message: file has been closed') + log.info('Failed to send: file has been closed') raise stem.SocketClosed('file has been closed') @@ -517,22 +659,16 @@ def recv_message(control_file): a complete message """ - parsed_content, raw_content = [], b'' - logging_prefix = 'Error while receiving a control message (%s): ' + parsed_content, raw_content, first_line = None, None, True while True: try: - # From a real socket readline() would always provide bytes, but during - # tests we might be given a StringIO in which case it's unicode under - # python 3.x. - - line = stem.util.str_tools._to_bytes(control_file.readline()) + line = control_file.readline() except AttributeError: # if the control_file has been closed then we will receive: # AttributeError: 'NoneType' object has no attribute 'recv' - prefix = logging_prefix % 'SocketClosed' - log.info(prefix + 'socket file has been closed') + log.info(ERROR_MSG % ('SocketClosed', 'socket file has been closed')) raise stem.SocketClosed('socket file has been closed') except (socket.error, ValueError) as exc: # When disconnected we get... @@ -543,70 +679,67 @@ def recv_message(control_file): # Python 3: # ValueError: I/O operation on closed file. - prefix = logging_prefix % 'SocketClosed' - log.info(prefix + 'received exception "%s"' % exc) + log.info(ERROR_MSG % ('SocketClosed', 'received exception "%s"' % exc)) raise stem.SocketClosed(exc) - raw_content += line - # Parses the tor control lines. These are of the form... # \r\n - if len(line) == 0: + if not line: # if the socket is disconnected then the readline() method will provide # empty content - prefix = logging_prefix % 'SocketClosed' - log.info(prefix + 'empty socket content') + log.info(ERROR_MSG % ('SocketClosed', 'empty socket content')) raise stem.SocketClosed('Received empty socket content.') - elif len(line) < 4: - prefix = logging_prefix % 'ProtocolError' - log.info(prefix + 'line too short, "%s"' % log.escape(line)) - raise stem.ProtocolError('Badly formatted reply line: too short') - elif not re.match(b'^[a-zA-Z0-9]{3}[-+ ]', line): - prefix = logging_prefix % 'ProtocolError' - log.info(prefix + 'malformed status code/divider, "%s"' % log.escape(line)) + elif not MESSAGE_PREFIX.match(line): + log.info(ERROR_MSG % ('ProtocolError', 'malformed status code/divider, "%s"' % log.escape(line))) raise stem.ProtocolError('Badly formatted reply line: beginning is malformed') elif not line.endswith(b'\r\n'): - prefix = logging_prefix % 'ProtocolError' - log.info(prefix + 'no CRLF linebreak, "%s"' % log.escape(line)) + log.info(ERROR_MSG % ('ProtocolError', 'no CRLF linebreak, "%s"' % log.escape(line))) raise stem.ProtocolError('All lines should end with CRLF') - line = line[:-2] # strips off the CRLF - status_code, divider, content = line[:3], line[3:4], line[4:] + status_code, divider, content = line[:3], line[3:4], line[4:-2] # strip CRLF off content if stem.prereq.is_python_3(): status_code = stem.util.str_tools._to_unicode(status_code) divider = stem.util.str_tools._to_unicode(divider) + # Most controller responses are single lines, in which case we don't need + # so much overhead. + + if first_line: + if divider == ' ': + _log_trace(line) + return stem.response.ControlMessage([(status_code, divider, content)], line) + else: + parsed_content, raw_content, first_line = [], bytearray(), False + + raw_content += line + if divider == '-': # mid-reply line, keep pulling for more content parsed_content.append((status_code, divider, content)) elif divider == ' ': # end of the message, return the message parsed_content.append((status_code, divider, content)) - - log_message = raw_content.replace(b'\r\n', b'\n').rstrip() - log.trace('Received from tor:\n' + stem.util.str_tools._to_unicode(log_message)) - - return stem.response.ControlMessage(parsed_content, raw_content) + _log_trace(bytes(raw_content)) + return stem.response.ControlMessage(parsed_content, bytes(raw_content)) elif divider == '+': # data entry, all of the following lines belong to the content until we # get a line with just a period + content_block = bytearray(content) + while True: try: - line = stem.util.str_tools._to_bytes(control_file.readline()) + line = control_file.readline() + raw_content += line except socket.error as exc: - prefix = logging_prefix % 'SocketClosed' - log.info(prefix + 'received an exception while mid-way through a data reply (exception: "%s", read content: "%s")' % (exc, log.escape(raw_content))) + log.info(ERROR_MSG % ('SocketClosed', 'received an exception while mid-way through a data reply (exception: "%s", read content: "%s")' % (exc, log.escape(bytes(raw_content))))) raise stem.SocketClosed(exc) - raw_content += line - if not line.endswith(b'\r\n'): - prefix = logging_prefix % 'ProtocolError' - log.info(prefix + 'CRLF linebreaks missing from a data reply, "%s"' % log.escape(raw_content)) + log.info(ERROR_MSG % ('ProtocolError', 'CRLF linebreaks missing from a data reply, "%s"' % log.escape(bytes(raw_content)))) raise stem.ProtocolError('All lines should end with CRLF') elif line == b'.\r\n': break # data block termination @@ -619,18 +752,17 @@ def recv_message(control_file): if line.startswith(b'..'): line = line[1:] - # appends to previous content, using a newline rather than CRLF - # separator (more conventional for multi-line string content outside - # the windows world) + content_block += b'\n' + line - content += b'\n' + line + # joins the content using a newline rather than CRLF separator (more + # conventional for multi-line string content outside the windows world) - parsed_content.append((status_code, divider, content)) + parsed_content.append((status_code, divider, bytes(content_block))) else: # this should never be reached due to the prefix regex, but might as well # be safe... - prefix = logging_prefix % 'ProtocolError' - log.warn(prefix + "\"%s\" isn't a recognized divider type" % divider) + + log.warn(ERROR_MSG % ('ProtocolError', "\"%s\" isn't a recognized divider type" % divider)) raise stem.ProtocolError("Unrecognized divider type '%s': %s" % (divider, stem.util.str_tools._to_unicode(line))) @@ -661,3 +793,19 @@ def send_formatting(message): return '+%s\r\n.\r\n' % message.replace('\n', '\r\n') else: return message + '\r\n' + + +def _log_trace(response): + if not log.is_tracing(): + return + + log_message = stem.util.str_tools._to_unicode(response.replace(b'\r\n', b'\n').rstrip()) + log_message_lines = log_message.split('\n') + + if TRUNCATE_LOGS and len(log_message_lines) > TRUNCATE_LOGS: + log_message = '\n'.join(log_message_lines[:TRUNCATE_LOGS] + ['... %i more lines...' % (len(log_message_lines) - TRUNCATE_LOGS)]) + + if len(log_message_lines) > 2: + log.trace('Received from tor:\n%s' % log_message) + else: + log.trace('Received from tor: %s' % log_message.replace('\n', '\\n')) diff --git a/Shared/lib/python3.4/site-packages/stem/util/__init__.py b/Shared/lib/python3.4/site-packages/stem/util/__init__.py index 1c18df3..fba506e 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/__init__.py +++ b/Shared/lib/python3.4/site-packages/stem/util/__init__.py @@ -1,10 +1,14 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ Utility functions used by the stem library. """ +import datetime + +import stem.prereq + __all__ = [ 'conf', 'connection', @@ -17,4 +21,135 @@ __all__ = [ 'term', 'test_tools', 'tor_tools', + 'datetime_to_unix', ] + +# Beginning with Stem 1.7 we take attribute types into account when hashing +# and checking equality. That is to say, if two Stem classes' attributes are +# the same but use different types we no longer consider them to be equal. +# For example... +# +# s1 = Schedule(classes = ['Math', 'Art', 'PE']) +# s2 = Schedule(classes = ('Math', 'Art', 'PE')) +# +# Prior to Stem 1.7 s1 and s2 would be equal, but afterward unless Stem's +# construcotr normalizes the types they won't. +# +# This change in behavior is the right thing to do but carries some risk, so +# we provide the following constant to revert to legacy behavior. If you find +# yourself using it them please let me know (https://www.atagar.com/contact/) +# since this flag will go away in the future. + +HASH_TYPES = True + + +def _hash_value(val): + if not HASH_TYPES: + my_hash = 0 + else: + # TODO: I hate doing this but until Python 2.x support is dropped we + # can't readily be strict about bytes vs unicode for attributes. This + # is because test assertions often use strings, and normalizing this + # would require wrapping most with to_unicode() calls. + # + # This hack will go away when we drop Python 2.x support. + + if _is_str(val): + my_hash = hash('str') + else: + # Hashing common builtins (ints, bools, etc) provide consistant values but many others vary their value on interpreter invokation. + + my_hash = hash(str(type(val))) + + if isinstance(val, (tuple, list)): + for v in val: + my_hash = (my_hash * 1024) + hash(v) + elif isinstance(val, dict): + for k in sorted(val.keys()): + my_hash = (my_hash * 2048) + (hash(k) * 1024) + hash(val[k]) + else: + my_hash += hash(val) + + return my_hash + + +def _is_str(val): + """ + Check if a value is a string. This will be removed when we no longer provide + backward compatibility for the Python 2.x series. + + :param object val: value to be checked + + :returns: **True** if the value is some form of string (unicode or bytes), + and **False** otherwise + """ + + if stem.prereq.is_python_3(): + return isinstance(val, (bytes, str)) + else: + return isinstance(val, (bytes, unicode)) + + +def _is_int(val): + """ + Check if a value is an integer. This will be removed when we no longer + provide backward compatibility for the Python 2.x series. + + :param object val: value to be checked + + :returns: **True** if the value is some form of integer (int or long), + and **False** otherwise + """ + + if stem.prereq.is_python_3(): + return isinstance(val, int) + else: + return isinstance(val, (int, long)) + + +def datetime_to_unix(timestamp): + """ + Converts a utc datetime object to a unix timestamp. + + .. versionadded:: 1.5.0 + + :param datetime timestamp: timestamp to be converted + + :returns: **float** for the unix timestamp of the given datetime object + """ + + if stem.prereq._is_python_26(): + delta = (timestamp - datetime.datetime(1970, 1, 1)) + return delta.days * 86400 + delta.seconds + else: + return (timestamp - datetime.datetime(1970, 1, 1)).total_seconds() + + +def _hash_attr(obj, *attributes, **kwargs): + """ + Provide a hash value for the given set of attributes. + + :param Object obj: object to be hashed + :param list attributes: attribute names to take into account + :param bool cache: persists hash in a '_cached_hash' object attribute + :param class parent: include parent's hash value + """ + + is_cached = kwargs.get('cache', False) + parent_class = kwargs.get('parent', None) + cached_hash = getattr(obj, '_cached_hash', None) + + if is_cached and cached_hash is not None: + return cached_hash + + my_hash = parent_class.__hash__(obj) if parent_class else 0 + my_hash = my_hash * 1024 + hash(str(type(obj))) + + for attr in attributes: + val = getattr(obj, attr) + my_hash = my_hash * 1024 + _hash_value(val) + + if is_cached: + setattr(obj, '_cached_hash', my_hash) + + return my_hash diff --git a/Shared/lib/python3.4/site-packages/stem/util/conf.py b/Shared/lib/python3.4/site-packages/stem/util/conf.py index 6b0efd7..999d819 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/conf.py +++ b/Shared/lib/python3.4/site-packages/stem/util/conf.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -161,6 +161,8 @@ import inspect import os import threading +import stem.prereq + from stem.util import log try: @@ -213,9 +215,8 @@ def config_dict(handle, conf_mappings, handler = None): For more information about how we convert types see our :func:`~stem.util.conf.Config.get` method. - **The dictionary you get from this is manged by the - :class:`~stem.util.conf.Config` class and should be treated as being - read-only.** + **The dictionary you get from this is manged by the Config class and should + be treated as being read-only.** :param str handle: unique identifier for a config instance :param dict conf_mappings: config key/value mappings used as our defaults @@ -274,15 +275,15 @@ def uses_settings(handle, path, lazy_load = True): config = get_config(handle) - if not lazy_load and not config.get('settings_loaded', False): + if not lazy_load and not config._settings_loaded: config.load(path) - config.set('settings_loaded', 'true') + config._settings_loaded = True def decorator(func): def wrapped(*args, **kwargs): - if lazy_load and not config.get('settings_loaded', False): + if lazy_load and not config._settings_loaded: config.load(path) - config.set('settings_loaded', 'true') + config._settings_loaded = True if 'config' in inspect.getargspec(func).args: return func(*args, config = config, **kwargs) @@ -446,11 +447,14 @@ class Config(object): # # Information for what values fail to load and why are reported to # 'stem.util.log'. + + .. versionchanged:: 1.7.0 + Class can now be used as a dictionary. """ def __init__(self): self._path = None # location we last loaded from or saved to - self._contents = {} # configuration key/value pairs + self._contents = OrderedDict() # configuration key/value pairs self._listeners = [] # functors to be notified of config changes # used for accessing _contents @@ -459,7 +463,10 @@ class Config(object): # keys that have been requested (used to provide unused config contents) self._requested_keys = set() - def load(self, path = None): + # flag to support lazy loading in uses_settings() + self._settings_loaded = False + + def load(self, path = None, commenting = True): """ Reads in the contents of the given path, adding its configuration values to our current contents. If the path is a directory then this loads each @@ -468,8 +475,16 @@ class Config(object): .. versionchanged:: 1.3.0 Added support for directories. + .. versionchanged:: 1.3.0 + Added the **commenting** argument. + + .. versionchanged:: 1.6.0 + Avoid loading vim swap files. + :param str path: file or directory path to be loaded, this uses the last loaded path if not provided + :param bool commenting: ignore line content after a '#' if **True**, read + otherwise :raises: * **IOError** if we fail to read the file (it doesn't exist, insufficient @@ -485,6 +500,9 @@ class Config(object): if os.path.isdir(self._path): for root, dirnames, filenames in os.walk(self._path): for filename in filenames: + if filename.endswith('.swp'): + continue # vim swap file + self.load(os.path.join(root, filename)) return @@ -497,7 +515,7 @@ class Config(object): line = read_contents.pop(0) # strips any commenting or excess whitespace - comment_start = line.find('#') + comment_start = line.find('#') if commenting else -1 if comment_start != -1: line = line[:comment_start] @@ -506,14 +524,10 @@ class Config(object): # parse the key/value pair if line: - try: + if ' ' in line: key, value = line.split(' ', 1) - value = value.strip() - except ValueError: - log.debug("Config entry '%s' is expected to be of the format 'Key Value', defaulting to '%s' -> ''" % (line, line)) - key, value = line, '' - - if not value: + self.set(key, value.strip(), False) + else: # this might be a multi-line entry, try processing it as such multiline_buffer = [] @@ -523,10 +537,9 @@ class Config(object): multiline_buffer.append(content) if multiline_buffer: - self.set(key, '\n'.join(multiline_buffer), False) - continue - - self.set(key, value, False) + self.set(line, '\n'.join(multiline_buffer), False) + else: + self.set(line, '', False) # default to a key => '' mapping def save(self, path = None): """ @@ -535,7 +548,9 @@ class Config(object): :param str path: location to be saved to - :raises: **ValueError** if no path was provided and we've never been provided one + :raises: + * **IOError** if we fail to save the file (insufficient permissions, etc) + * **ValueError** if no path was provided and we've never been provided one """ if path: @@ -544,8 +559,11 @@ class Config(object): raise ValueError('Unable to save configuration: no path provided') with self._contents_lock: + if not os.path.exists(os.path.dirname(self._path)): + os.makedirs(os.path.dirname(self._path)) + with open(self._path, 'w') as output_file: - for entry_key in sorted(self.keys()): + for entry_key in self.keys(): for entry_value in self.get_value(entry_key, multiple = True): # check for multi line entries if '\n' in entry_value: @@ -612,6 +630,9 @@ class Config(object): Appends the given key/value configuration mapping, behaving the same as if we'd loaded this from a configuration file. + .. versionchanged:: 1.5.0 + Allow removal of values by overwriting with a **None** value. + :param str key: key for the configuration mapping :param str,list value: value we're setting the mapping to :param bool overwrite: replaces the previous value if **True**, otherwise @@ -619,7 +640,14 @@ class Config(object): """ with self._contents_lock: - if isinstance(value, str): + unicode_type = str if stem.prereq.is_python_3() else unicode + + if value is None: + if overwrite and key in self._contents: + del self._contents[key] + else: + pass # no value so this is a no-op + elif isinstance(value, (bytes, unicode_type)): if not overwrite and key in self._contents: self._contents[key].append(value) else: @@ -636,7 +664,7 @@ class Config(object): for listener in self._listeners: listener(self, key) else: - raise ValueError("Config.set() only accepts str, list, or tuple. Provided value was a '%s'" % type(value)) + raise ValueError("Config.set() only accepts str (bytes or unicode), list, or tuple. Provided value was a '%s'" % type(value)) def get(self, key, default = None): """ @@ -743,3 +771,7 @@ class Config(object): message_id = 'stem.util.conf.missing_config_key_%s' % key log.log_once(message_id, log.TRACE, "config entry '%s' not found, defaulting to '%s'" % (key, default)) return default + + def __getitem__(self, key): + with self._contents_lock: + return self._contents[key] diff --git a/Shared/lib/python3.4/site-packages/stem/util/connection.py b/Shared/lib/python3.4/site-packages/stem/util/connection.py index 88d70d5..4ac8b2e 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/connection.py +++ b/Shared/lib/python3.4/site-packages/stem/util/connection.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -17,6 +17,8 @@ Connection and networking based utility functions. is_valid_port - checks if something is a valid representation for a port is_private_address - checks if an IPv4 address belongs to a private range or not + address_to_int - provides an integer representation of an IP address + expand_ipv6_address - provides an IPv6 address with its collapsed portions expanded get_mask_ipv4 - provides the mask representation for a given number of bits get_mask_ipv6 - provides the IPv6 mask representation for a given number of bits @@ -26,9 +28,17 @@ Connection and networking based utility functions. Method for resolving a process' connections. .. versionadded:: 1.1.0 + .. versionchanged:: 1.4.0 Added **NETSTAT_WINDOWS**. + .. versionchanged:: 1.6.0 + Added **BSD_FSTAT**. + + .. deprecated:: 1.6.0 + The SOCKSTAT connection resolver is proving to be unreliable + (:trac:`23057`), and will be dropped in the 2.0.0 release unless fixed. + ==================== =========== Resolver Description ==================== =========== @@ -37,9 +47,10 @@ Connection and networking based utility functions. **NETSTAT_WINDOWS** netstat command under Windows **SS** ss command **LSOF** lsof command - **SOCKSTAT** sockstat command under *nix + **SOCKSTAT** sockstat command under \*nix **BSD_SOCKSTAT** sockstat command under FreeBSD **BSD_PROCSTAT** procstat command under FreeBSD + **BSD_FSTAT** fstat command under OpenBSD ==================== =========== """ @@ -50,11 +61,11 @@ import os import platform import re +import stem.util import stem.util.proc import stem.util.system -from stem import str_type -from stem.util import conf, enum, log +from stem.util import conf, enum, log, str_tools # Connection resolution is risky to log about since it's highly likely to # contain sensitive information. That said, it's also difficult to get right in @@ -71,17 +82,10 @@ Resolver = enum.Enum( ('LSOF', 'lsof'), ('SOCKSTAT', 'sockstat'), ('BSD_SOCKSTAT', 'sockstat (bsd)'), - ('BSD_PROCSTAT', 'procstat (bsd)') + ('BSD_PROCSTAT', 'procstat (bsd)'), + ('BSD_FSTAT', 'fstat (bsd)') ) -Connection = collections.namedtuple('Connection', [ - 'local_address', - 'local_port', - 'remote_address', - 'remote_port', - 'protocol', -]) - FULL_IPv4_MASK = '255.255.255.255' FULL_IPv6_MASK = 'FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF' @@ -92,8 +96,8 @@ PORT_USES = None # port number => description RESOLVER_COMMAND = { Resolver.PROC: '', - # -n = prevents dns lookups, -p = include process - Resolver.NETSTAT: 'netstat -np', + # -n = prevents dns lookups, -p = include process, -W = don't crop addresses (needed for ipv6) + Resolver.NETSTAT: 'netstat -npW', # -a = show all TCP/UDP connections, -n = numeric addresses and ports, -o = include pid Resolver.NETSTAT_WINDOWS: 'netstat -ano', @@ -112,62 +116,97 @@ RESOLVER_COMMAND = { # -f = process pid Resolver.BSD_PROCSTAT: 'procstat -f {pid}', + + # -p = process pid + Resolver.BSD_FSTAT: 'fstat -p {pid}', } RESOLVER_FILTER = { Resolver.PROC: '', # tcp 0 586 192.168.0.1:44284 38.229.79.2:443 ESTABLISHED 15843/tor - Resolver.NETSTAT: '^{protocol}\s+.*\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED\s+{pid}/{name}\s*$', + Resolver.NETSTAT: '^{protocol}\s+.*\s+{local}\s+{remote}\s+ESTABLISHED\s+{pid}/{name}\s*$', # tcp 586 192.168.0.1:44284 38.229.79.2:443 ESTABLISHED 15843 - Resolver.NETSTAT_WINDOWS: '^\s*{protocol}\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED\s+{pid}\s*$', + Resolver.NETSTAT_WINDOWS: '^\s*{protocol}\s+{local}\s+{remote}\s+ESTABLISHED\s+{pid}\s*$', # tcp ESTAB 0 0 192.168.0.20:44415 38.229.79.2:443 users:(("tor",15843,9)) - Resolver.SS: '^{protocol}\s+ESTAB\s+.*\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+users:\(\("{name}",{pid},[0-9]+\)\)$', + Resolver.SS: '^{protocol}\s+ESTAB\s+.*\s+{local}\s+{remote}\s+users:\(\("{name}",(?:pid=)?{pid},(?:fd=)?[0-9]+\)\)$', # tor 3873 atagar 45u IPv4 40994 0t0 TCP 10.243.55.20:45724->194.154.227.109:9001 (ESTABLISHED) - Resolver.LSOF: '^{name}\s+{pid}\s+.*\s+{protocol}\s+{local_address}:{local_port}->{remote_address}:{remote_port} \(ESTABLISHED\)$', + Resolver.LSOF: '^{name}\s+{pid}\s+.*\s+{protocol}\s+{local}->{remote} \(ESTABLISHED\)$', # atagar tor 15843 tcp4 192.168.0.20:44092 68.169.35.102:443 ESTABLISHED - Resolver.SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+{protocol}4\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED$', + Resolver.SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+{protocol}4\s+{local}\s+{remote}\s+ESTABLISHED$', # _tor tor 4397 12 tcp4 172.27.72.202:54011 127.0.0.1:9001 - Resolver.BSD_SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+\S+\s+{protocol}4\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}$', + Resolver.BSD_SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+\S+\s+{protocol}4\s+{local}\s+{remote}$', # 3561 tor 4 s - rw---n-- 2 0 TCP 10.0.0.2:9050 10.0.0.1:22370 - Resolver.BSD_PROCSTAT: '^\s*{pid}\s+{name}\s+.*\s+{protocol}\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}$', + Resolver.BSD_PROCSTAT: '^\s*{pid}\s+{name}\s+.*\s+{protocol}\s+{local}\s+{remote}$', + + # _tor tor 15843 20* internet stream tcp 0x0 192.168.1.100:36174 --> 4.3.2.1:443 + Resolver.BSD_FSTAT: '^\S+\s+{name}\s+{pid}\s+.*\s+{protocol}\s+\S+\s+{local}\s+[-<]-[->]\s+{remote}$', } -def get_connections(resolver, process_pid = None, process_name = None): +class Connection(collections.namedtuple('Connection', ['local_address', 'local_port', 'remote_address', 'remote_port', 'protocol', 'is_ipv6'])): + """ + Network connection information. + + .. versionchanged:: 1.5.0 + Added the **is_ipv6** attribute. + + :var str local_address: ip address the connection originates from + :var int local_port: port the connection originates from + :var str remote_address: destionation ip address + :var int remote_port: destination port + :var str protocol: protocol of the connection ('tcp', 'udp', etc) + :var bool is_ipv6: addresses are ipv6 if true, and ipv4 otherwise + """ + + +def get_connections(resolver = None, process_pid = None, process_name = None): """ Retrieves a list of the current connections for a given process. This - provides a list of Connection instances, which have five attributes... - - * **local_address** (str) - * **local_port** (int) - * **remote_address** (str) - * **remote_port** (int) - * **protocol** (str, generally either 'tcp' or 'udp') + provides a list of :class:`~stem.util.connection.Connection`. Note that + addresses may be IPv4 *or* IPv6 depending on what the platform supports. .. versionadded:: 1.1.0 - :param Resolver resolver: method of connection resolution to use + .. versionchanged:: 1.5.0 + Made our resolver argument optional. + + .. versionchanged:: 1.5.0 + IPv6 support when resolving via proc, netstat, lsof, or ss. + + :param Resolver resolver: method of connection resolution to use, if not + provided then one is picked from among those that should likely be + available for the system :param int process_pid: pid of the process to retrieve :param str process_name: name of the process to retrieve - :returns: **list** of Connection instances + :returns: **list** of :class:`~stem.util.connection.Connection` instances :raises: - * **ValueError** if using **Resolver.PROC** or **Resolver.BSD_PROCSTAT** - and the process_pid wasn't provided + * **ValueError** if neither a process_pid nor process_name is provided * **IOError** if no connections are available or resolution fails (generally they're indistinguishable). The common causes are the command being unavailable or permissions. """ + if not resolver: + available_resolvers = system_resolvers() + + if available_resolvers: + resolver = available_resolvers[0] + else: + raise IOError('Unable to determine a connection resolver') + + if not process_pid and not process_name: + raise ValueError('You must provide a pid or process name to provide connections for') + def _log(msg): if LOG_CONNECTION_RESOLUTION: log.debug(msg) @@ -181,14 +220,20 @@ def get_connections(resolver, process_pid = None, process_name = None): except ValueError: raise ValueError('Process pid was non-numeric: %s' % process_pid) - if process_pid is None and process_name and resolver == Resolver.NETSTAT_WINDOWS: - process_pid = stem.util.system.pid_by_name(process_name) + if process_pid is None: + all_pids = stem.util.system.pid_by_name(process_name, True) - if process_pid is None and resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT): - raise ValueError('%s resolution requires a pid' % resolver) + if len(all_pids) == 0: + if resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT): + raise IOError("Unable to determine the pid of '%s'. %s requires the pid to provide the connections." % (process_name, resolver)) + elif len(all_pids) == 1: + process_pid = all_pids[0] + else: + if resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT): + raise IOError("There's multiple processes named '%s'. %s requires a single pid to provide the connections." % (process_name, resolver)) if resolver == Resolver.PROC: - return [Connection(*conn) for conn in stem.util.proc.connections(process_pid)] + return stem.util.proc.connections(pid = process_pid) resolver_command = RESOLVER_COMMAND[resolver].format(pid = process_pid) @@ -199,10 +244,8 @@ def get_connections(resolver, process_pid = None, process_name = None): resolver_regex_str = RESOLVER_FILTER[resolver].format( protocol = '(?P\S+)', - local_address = '(?P[0-9.]+)', - local_port = '(?P[0-9]+)', - remote_address = '(?P[0-9.]+)', - remote_port = '(?P[0-9]+)', + local = '(?P[\[\]0-9a-f.:]+)', + remote = '(?P[\[\]0-9a-f.:]+)', pid = process_pid if process_pid else '[0-9]*', name = process_name if process_name else '\S*', ) @@ -213,28 +256,41 @@ def get_connections(resolver, process_pid = None, process_name = None): connections = [] resolver_regex = re.compile(resolver_regex_str) + def _parse_address_str(addr_type, addr_str, line): + addr, port = addr_str.rsplit(':', 1) + + if not is_valid_ipv4_address(addr) and not is_valid_ipv6_address(addr, allow_brackets = True): + _log('Invalid %s address (%s): %s' % (addr_type, addr, line)) + return None, None + elif not is_valid_port(port): + _log('Invalid %s port (%s): %s' % (addr_type, port, line)) + return None, None + else: + _log('Valid %s:%s: %s' % (addr, port, line)) + return addr.lstrip('[').rstrip(']'), int(port) + for line in results: match = resolver_regex.match(line) if match: attr = match.groupdict() - local_addr = attr['local_address'] - local_port = int(attr['local_port']) - remote_addr = attr['remote_address'] - remote_port = int(attr['remote_port']) + + local_addr, local_port = _parse_address_str('local', attr['local'], line) + remote_addr, remote_port = _parse_address_str('remote', attr['remote'], line) + + if not (local_addr and local_port and remote_addr and remote_port): + continue # missing or malformed field + protocol = attr['protocol'].lower() - if remote_addr == '0.0.0.0': - continue # procstat response for unestablished connections + if protocol == 'tcp6': + protocol = 'tcp' - if not (is_valid_ipv4_address(local_addr) and is_valid_ipv4_address(remote_addr)): - _log('Invalid address (%s or %s): %s' % (local_addr, remote_addr, line)) - elif not (is_valid_port(local_port) and is_valid_port(remote_port)): - _log('Invalid port (%s or %s): %s' % (local_port, remote_port, line)) - elif protocol not in ('tcp', 'udp'): + if protocol not in ('tcp', 'udp'): _log('Unrecognized protocol (%s): %s' % (protocol, line)) + continue - conn = Connection(local_addr, local_port, remote_addr, remote_port, protocol) + conn = Connection(local_addr, local_port, remote_addr, remote_port, protocol, is_valid_ipv6_address(local_addr)) connections.append(conn) _log(str(conn)) @@ -261,6 +317,7 @@ def system_resolvers(system = None): :returns: **list** of :data:`~stem.util.connection.Resolver` instances available on this platform """ + if system is None: if stem.util.system.is_gentoo(): system = 'Gentoo' @@ -269,8 +326,10 @@ def system_resolvers(system = None): if system == 'Windows': resolvers = [Resolver.NETSTAT_WINDOWS] - elif system in ('Darwin', 'OpenBSD'): + elif system == 'Darwin': resolvers = [Resolver.LSOF] + elif system == 'OpenBSD': + resolvers = [Resolver.BSD_FSTAT] elif system == 'FreeBSD': # Netstat is available, but lacks a '-p' equivalent so we can't associate # the results to processes. The platform also has a ss command, but it @@ -349,7 +408,9 @@ def is_valid_ipv4_address(address): :returns: **True** if input is a valid IPv4 address, **False** otherwise """ - if not isinstance(address, (bytes, str_type)): + if isinstance(address, bytes): + address = str_tools._to_unicode(address) + elif not stem.util._is_str(address): return False # checks if theres four period separated values @@ -377,10 +438,31 @@ def is_valid_ipv6_address(address, allow_brackets = False): :returns: **True** if input is a valid IPv6 address, **False** otherwise """ + if isinstance(address, bytes): + address = str_tools._to_unicode(address) + elif not stem.util._is_str(address): + return False + if allow_brackets: if address.startswith('[') and address.endswith(']'): address = address[1:-1] + if address.count('.') == 3: + # Likely an ipv4-mapped portion. Check that its vaild, then replace with a + # filler. + + ipv4_start = address.rfind(':', 0, address.find('.')) + 1 + ipv4_end = address.find(':', ipv4_start + 1) + + if ipv4_end == -1: + ipv4_end = None # don't crop the last character + + if not is_valid_ipv4_address(address[ipv4_start:ipv4_end]): + return False + + addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, 'ff:ff', address[ipv4_end + 1:] if ipv4_end else None] + address = ':'.join(filter(None, addr_comp)) + # addresses are made up of eight colon separated groups of four hex digits # with leading zeros being optional # https://en.wikipedia.org/wiki/IPv6#Address_format @@ -469,6 +551,24 @@ def is_private_address(address): return False +def address_to_int(address): + """ + Provides an integer representation of a IPv4 or IPv6 address that can be used + for sorting. + + .. versionadded:: 1.5.0 + + :param str address: IPv4 or IPv6 address + + :returns: **int** representation of the address + """ + + # TODO: Could be neat to also use this for serialization if we also had an + # int_to_address() function. + + return int(_address_to_binary(address), 2) + + def expand_ipv6_address(address): """ Expands abbreviated IPv6 addresses to their full colon separated hex format. @@ -482,6 +582,9 @@ def expand_ipv6_address(address): >>> expand_ipv6_address('::') '0000:0000:0000:0000:0000:0000:0000:0000' + >>> expand_ipv6_address('::ffff:5.9.158.75') + '0000:0000:0000:0000:0000:ffff:0509:9e4b' + :param str address: IPv6 address to be expanded :raises: **ValueError** if the address can't be expanded due to being malformed @@ -490,6 +593,25 @@ def expand_ipv6_address(address): if not is_valid_ipv6_address(address): raise ValueError("'%s' isn't a valid IPv6 address" % address) + # expand ipv4-mapped portions of addresses + if address.count('.') == 3: + ipv4_start = address.rfind(':', 0, address.find('.')) + 1 + ipv4_end = address.find(':', ipv4_start + 1) + + if ipv4_end == -1: + ipv4_end = None # don't crop the last character + + # Converts ipv4 address to its hex ipv6 representation. For instance... + # + # '5.9.158.75' => '0509:9e4b' + + ipv4_bin = _address_to_binary(address[ipv4_start:ipv4_end]) + groupings = [ipv4_bin[16 * i:16 * (i + 1)] for i in range(2)] + ipv6_snippet = ':'.join(['%04x' % int(group, 2) for group in groupings]) + + addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, ipv6_snippet, address[ipv4_end + 1:] if ipv4_end else None] + address = ':'.join(filter(None, addr_comp)) + # expands collapsed groupings, there can only be a single '::' in a valid # address if '::' in address: @@ -577,7 +699,7 @@ def _get_masked_bits(mask): raise ValueError("'%s' is an invalid subnet mask" % mask) # converts octets to binary representation - mask_bin = _get_address_binary(mask) + mask_bin = _address_to_binary(mask) mask_match = re.match('^(1*)(0*)$', mask_bin) if mask_match: @@ -599,7 +721,7 @@ def _get_binary(value, bits): return ''.join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)]) -def _get_address_binary(address): +def _address_to_binary(address): """ Provides the binary value for an IPv4 or IPv6 address. @@ -644,6 +766,7 @@ def _cryptovariables_equal(x, y): _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) == _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y)) + # TODO: drop with stem 2.x # We renamed our methods to drop a redundant 'get_*' prefix, so alias the old # names for backward compatability. diff --git a/Shared/lib/python3.4/site-packages/stem/util/enum.py b/Shared/lib/python3.4/site-packages/stem/util/enum.py index 5cf81b8..00835ca 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/enum.py +++ b/Shared/lib/python3.4/site-packages/stem/util/enum.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -40,7 +40,7 @@ constructed as simple type listings... +- __iter__ - iterator over our enum keys """ -from stem import str_type +import stem.util def UppercaseEnum(*args): @@ -76,7 +76,7 @@ class Enum(object): keys, values = [], [] for entry in args: - if isinstance(entry, (bytes, str_type)): + if stem.util._is_str(entry): key, val = entry, _to_camel_case(entry) elif isinstance(entry, tuple) and len(entry) == 2: key, val = entry diff --git a/Shared/lib/python3.4/site-packages/stem/util/log.py b/Shared/lib/python3.4/site-packages/stem/util/log.py index 4154706..0b2de90 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/log.py +++ b/Shared/lib/python3.4/site-packages/stem/util/log.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -88,9 +88,13 @@ DEDUPLICATION_MESSAGE_IDS = set() class _NullHandler(logging.Handler): + def __init__(self): + logging.Handler.__init__(self, level = logging.FATAL + 5) # disable logging + def emit(self, record): pass + if not LOGGER.handlers: LOGGER.addHandler(_NullHandler()) @@ -99,7 +103,7 @@ def get_logger(): """ Provides the stem logger. - :return: **logging.Logger** for stem + :returns: **logging.Logger** for stem """ return LOGGER @@ -118,6 +122,22 @@ def logging_level(runlevel): return logging.FATAL + 5 +def is_tracing(): + """ + Checks if we're logging at the trace runlevel. + + .. versionadded:: 1.6.0 + + :returns: **True** if we're logging at the trace runlevel and **False** otherwise + """ + + for handler in get_logger().handlers: + if handler.level <= logging_level(TRACE): + return True + + return False + + def escape(message): """ Escapes specific sequences for logging (newlines, tabs, carriage returns). If @@ -199,8 +219,8 @@ class LogBuffer(logging.Handler): Basic log handler that listens for stem events and stores them so they can be read later. Log entries are cleared as they are read. - .. versionchanged:: 1.4.0 - Added the yield_records argument. + .. versionchanged:: 1.4.0 + Added the yield_records argument. """ def __init__(self, runlevel, yield_records = False): diff --git a/Shared/lib/python3.4/site-packages/stem/util/ports.cfg b/Shared/lib/python3.4/site-packages/stem/util/ports.cfg index 8b7829e..ebef834 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/ports.cfg +++ b/Shared/lib/python3.4/site-packages/stem/util/ports.cfg @@ -310,4 +310,5 @@ port 19638 => Ensim port 23399 => Skype port 30301 => BitTorrent port 33434 => traceroute +port 50002 => Electrum Bitcoin SSL diff --git a/Shared/lib/python3.4/site-packages/stem/util/proc.py b/Shared/lib/python3.4/site-packages/stem/util/proc.py index e4a826e..7fcfd1b 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/proc.py +++ b/Shared/lib/python3.4/site-packages/stem/util/proc.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -54,14 +54,23 @@ import socket import sys import time +import stem.prereq +import stem.util.connection import stem.util.enum +import stem.util.str_tools from stem.util import log try: - # added in python 3.2 - from functools import lru_cache + # unavailable on windows (#19823) + import pwd + IS_PWD_AVAILABLE = True except ImportError: + IS_PWD_AVAILABLE = False + +if stem.prereq._is_lru_cache_available(): + from functools import lru_cache +else: from stem.util.lru_cache import lru_cache # os.sysconf is only defined on unix @@ -70,6 +79,9 @@ try: except AttributeError: CLOCK_TICKS = None +IS_LITTLE_ENDIAN = sys.byteorder == 'little' +ENCODED_ADDR = {} # cache of encoded ips to their decoded version + Stat = stem.util.enum.Enum( ('COMMAND', 'command'), ('CPU_UTIME', 'utime'), ('CPU_STIME', 'stime'), ('START_TIME', 'start time') @@ -324,38 +336,110 @@ def file_descriptors_used(pid): raise IOError('Unable to check number of file descriptors used: %s' % exc) -def connections(pid): +def connections(pid = None, user = None): """ - Queries connection related information from the proc contents. This provides - similar results to netstat, lsof, sockstat, and other connection resolution - utilities (though the lookup is far quicker). + Queries connections from the proc contents. This matches netstat, lsof, and + friends but is much faster. If no **pid** or **user** are provided this + provides all present connections. - :param int pid: process id of the process to be queried + :param int pid: pid to provide connections for + :param str user: username to look up connections for - :returns: A listing of connection tuples of the form **[(local_ipAddr1, - local_port1, foreign_ipAddr1, foreign_port1, protocol), ...]** (addresses - and protocols are strings and ports are ints) + :returns: **list** of :class:`~stem.util.connection.Connection` instances :raises: **IOError** if it can't be determined """ + start_time, conn = time.time(), [] + + if pid: + parameter = 'connections for pid %s' % pid + + try: + pid = int(pid) + + if pid < 0: + raise IOError("Process pids can't be negative: %s" % pid) + except (ValueError, TypeError): + raise IOError('Process pid was non-numeric: %s' % pid) + elif user: + parameter = 'connections for user %s' % user + else: + parameter = 'all connections' + try: - pid = int(pid) + if not IS_PWD_AVAILABLE: + raise IOError("This requires python's pwd module, which is unavailable on Windows.") - if pid < 0: - raise IOError("Process pids can't be negative: %s" % pid) - except (ValueError, TypeError): - raise IOError('Process pid was non-numeric: %s' % pid) + inodes = _inodes_for_sockets(pid) if pid else set() + process_uid = stem.util.str_tools._to_bytes(str(pwd.getpwnam(user).pw_uid)) if user else None - if pid == 0: - return [] + for proc_file_path in ('/proc/net/tcp', '/proc/net/tcp6', '/proc/net/udp', '/proc/net/udp6'): + if proc_file_path.endswith('6') and not os.path.exists(proc_file_path): + continue # ipv6 proc contents are optional - # fetches the inode numbers for socket file descriptors + protocol = proc_file_path[10:].rstrip('6') # 'tcp' or 'udp' + is_ipv6 = proc_file_path.endswith('6') - start_time, parameter = time.time(), 'process connections' - inodes = [] + try: + with open(proc_file_path, 'rb') as proc_file: + proc_file.readline() # skip the first line - for fd in os.listdir('/proc/%s/fd' % pid): + for line in proc_file: + _, l_dst, r_dst, status, _, _, _, uid, _, inode = line.split()[:10] + + if inodes and inode not in inodes: + continue + elif process_uid and uid != process_uid: + continue + elif protocol == 'tcp' and status != b'01': + continue # skip tcp connections that aren't yet established + + div = l_dst.find(b':') + l_addr = _unpack_addr(l_dst[:div]) + l_port = int(l_dst[div + 1:], 16) + + div = r_dst.find(b':') + r_addr = _unpack_addr(r_dst[:div]) + r_port = int(r_dst[div + 1:], 16) + + if r_addr == '0.0.0.0' or r_addr == '0000:0000:0000:0000:0000:0000': + continue # no address + elif l_port == 0 or r_port == 0: + continue # no port + + conn.append(stem.util.connection.Connection(l_addr, l_port, r_addr, r_port, protocol, is_ipv6)) + except IOError as exc: + raise IOError("unable to read '%s': %s" % (proc_file_path, exc)) + except Exception as exc: + raise IOError("unable to parse '%s': %s" % (proc_file_path, exc)) + + _log_runtime(parameter, '/proc/net/[tcp|udp]', start_time) + return conn + except IOError as exc: + _log_failure(parameter, exc) + raise + + +def _inodes_for_sockets(pid): + """ + Provides inodes in use by a process for its sockets. + + :param int pid: process id of the process to be queried + + :returns: **set** with inodes for its sockets + + :raises: **IOError** if it can't be determined + """ + + inodes = set() + + try: + fd_contents = os.listdir('/proc/%s/fd' % pid) + except OSError as exc: + raise IOError('Unable to read our file descriptors: %s' % exc) + + for fd in fd_contents: fd_path = '/proc/%s/fd/%s' % (pid, fd) try: @@ -364,57 +448,18 @@ def connections(pid): fd_name = os.readlink(fd_path) if fd_name.startswith('socket:['): - inodes.append(fd_name[8:-1]) + inodes.add(stem.util.str_tools._to_bytes(fd_name[8:-1])) except OSError as exc: if not os.path.exists(fd_path): continue # descriptors may shift while we're in the middle of iterating over them # most likely couldn't be read due to permissions - exc = IOError('unable to determine file descriptor destination (%s): %s' % (exc, fd_path)) - _log_failure(parameter, exc) - raise exc + raise IOError('unable to determine file descriptor destination (%s): %s' % (exc, fd_path)) - if not inodes: - # unable to fetch any connections for this process - return [] - - # check for the connection information from the /proc/net contents - - conn = [] - - for proc_file_path in ('/proc/net/tcp', '/proc/net/udp'): - try: - proc_file = open(proc_file_path) - proc_file.readline() # skip the first line - - for line in proc_file: - _, l_addr, f_addr, status, _, _, _, _, _, inode = line.split()[:10] - - if inode in inodes: - # if a tcp connection, skip if it isn't yet established - if proc_file_path.endswith('/tcp') and status != '01': - continue - - local_ip, local_port = _decode_proc_address_encoding(l_addr) - foreign_ip, foreign_port = _decode_proc_address_encoding(f_addr) - protocol = proc_file_path[10:] - conn.append((local_ip, local_port, foreign_ip, foreign_port, protocol)) - - proc_file.close() - except IOError as exc: - exc = IOError("unable to read '%s': %s" % (proc_file_path, exc)) - _log_failure(parameter, exc) - raise exc - except Exception as exc: - exc = IOError("unable to parse '%s': %s" % (proc_file_path, exc)) - _log_failure(parameter, exc) - raise exc - - _log_runtime(parameter, '/proc/net/[tcp|udp]', start_time) - return conn + return inodes -def _decode_proc_address_encoding(addr): +def _unpack_addr(addr): """ Translates an address entry in the /proc/net/* contents to a human readable form (`reference `_, @@ -422,35 +467,40 @@ def _decode_proc_address_encoding(addr): :: - "0500000A:0016" -> ("10.0.0.5", 22) + "0500000A" -> "10.0.0.5" + "F804012A4A5190010000000002000000" -> "2a01:4f8:190:514a::2" :param str addr: proc address entry to be decoded - :returns: **tuple** of the form **(addr, port)**, with addr as a string and port an int + :returns: **str** of the decoded address """ - ip, port = addr.split(':') + if addr not in ENCODED_ADDR: + if len(addr) == 8: + # IPv4 address + decoded = base64.b16decode(addr)[::-1] if IS_LITTLE_ENDIAN else base64.b16decode(addr) + ENCODED_ADDR[addr] = socket.inet_ntop(socket.AF_INET, decoded) + else: + # IPv6 address - # the port is represented as a two-byte hexadecimal number - port = int(port, 16) + if IS_LITTLE_ENDIAN: + # Group into eight characters, then invert in pairs... + # + # https://trac.torproject.org/projects/tor/ticket/18079#comment:24 - if sys.version_info >= (3,): - ip = ip.encode('ascii') + inverted = [] - # The IPv4 address portion is a little-endian four-byte hexadecimal number. - # That is, the least significant byte is listed first, so we need to reverse - # the order of the bytes to convert it to an IP address. - # - # This needs to account for the endian ordering as per... - # http://code.google.com/p/psutil/issues/detail?id=201 - # https://trac.torproject.org/projects/tor/ticket/4777 + for i in range(4): + grouping = addr[8 * i:8 * (i + 1)] + inverted += [grouping[2 * i:2 * (i + 1)] for i in range(4)][::-1] - if sys.byteorder == 'little': - ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip)[::-1]) - else: - ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip)) + encoded = b''.join(inverted) + else: + encoded = addr - return (ip, port) + ENCODED_ADDR[addr] = stem.util.connection.expand_ipv6_address(socket.inet_ntop(socket.AF_INET6, base64.b16decode(encoded))) + + return ENCODED_ADDR[addr] def _is_float(*value): @@ -508,7 +558,7 @@ def _get_lines(file_path, line_prefixes, parameter): return results except IOError as exc: _log_failure(parameter, exc) - raise exc + raise def _log_runtime(parameter, proc_location, start_time): @@ -534,6 +584,7 @@ def _log_failure(parameter, exc): log.debug('proc call failed (%s): %s' % (parameter, exc)) + # TODO: drop with stem 2.x # We renamed our methods to drop a redundant 'get_*' prefix, so alias the old # names for backward compatability. diff --git a/Shared/lib/python3.4/site-packages/stem/util/str_tools.py b/Shared/lib/python3.4/site-packages/stem/util/str_tools.py index 497564e..0f31fb2 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/str_tools.py +++ b/Shared/lib/python3.4/site-packages/stem/util/str_tools.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -27,10 +27,9 @@ import re import sys import stem.prereq +import stem.util import stem.util.enum -from stem import str_type - # label conversion tuples of the form... # (bits / bytes / seconds, short label, long label) @@ -75,13 +74,13 @@ if stem.prereq.is_python_3(): return msg else: def _to_bytes_impl(msg): - if msg is not None and isinstance(msg, str_type): + if msg is not None and isinstance(msg, unicode): return codecs.latin_1_encode(msg, 'replace')[0] else: return msg def _to_unicode_impl(msg): - if msg is not None and not isinstance(msg, str_type): + if msg is not None and not isinstance(msg, unicode): return msg.decode('utf-8', 'replace') else: return msg @@ -117,6 +116,22 @@ def _to_unicode(msg): return _to_unicode_impl(msg) +def _to_int(msg): + """ + Serializes a string to a number. + + :param str msg: string to be serialized + + :returns: **int** representation of the string + """ + + if stem.prereq.is_python_3() and isinstance(msg, bytes): + # iterating over bytes in python3 provides ints rather than characters + return sum([pow(256, (len(msg) - i - 1)) * c for (i, c) in enumerate(msg)]) + else: + return sum([pow(256, (len(msg) - i - 1)) * ord(c) for (i, c) in enumerate(msg)]) + + def _to_camel_case(label, divider = '_', joiner = ' '): """ Converts the given string to camel case, ie: @@ -145,6 +160,24 @@ def _to_camel_case(label, divider = '_', joiner = ' '): return joiner.join(words) +def _split_by_length(msg, size): + """ + Splits a string into a list of strings up to the given size. + + :: + + >>> _split_by_length('hello', 2) + ['he', 'll', 'o'] + + :param str msg: string to split + :param int size: number of characters to chunk into + + :returns: **list** with chunked string components + """ + + return [msg[i:i + size] for i in range(0, len(msg), size)] + + # This needs to be defined after _to_camel_case() to avoid a circular # dependency with the enum module. @@ -210,6 +243,9 @@ def crop(msg, size, min_word_length = 4, min_crop = 0, ending = Ending.ELLIPSE, # ellipse, and cropping words requires an extra space for hyphens if ending == Ending.ELLIPSE: + if size < 3: + return ('', msg) if get_remainder else '' + size -= 3 elif min_word_length and ending == Ending.HYPHEN: min_word_length += 1 @@ -262,7 +298,7 @@ def crop(msg, size, min_word_length = 4, min_crop = 0, ending = Ending.ELLIPSE, return (return_msg, remainder) if get_remainder else return_msg -def size_label(byte_count, decimal = 0, is_long = False, is_bytes = True): +def size_label(byte_count, decimal = 0, is_long = False, is_bytes = True, round = False): """ Converts a number of bytes into a human readable label in its most significant units. For instance, 7500 bytes would return "7 KB". If the @@ -281,18 +317,22 @@ def size_label(byte_count, decimal = 0, is_long = False, is_bytes = True): >>> size_label(1050, 3, True) '1.025 Kilobytes' + .. versionchanged:: 1.6.0 + Added round argument. + :param int byte_count: number of bytes to be converted :param int decimal: number of decimal digits to be included :param bool is_long: expands units label :param bool is_bytes: provides units in bytes if **True**, bits otherwise + :param bool round: rounds normally if **True**, otherwise rounds down :returns: **str** with human readable representation of the size """ if is_bytes: - return _get_label(SIZE_UNITS_BYTES, byte_count, decimal, is_long) + return _get_label(SIZE_UNITS_BYTES, byte_count, decimal, is_long, round) else: - return _get_label(SIZE_UNITS_BITS, byte_count, decimal, is_long) + return _get_label(SIZE_UNITS_BITS, byte_count, decimal, is_long, round) def time_label(seconds, decimal = 0, is_long = False): @@ -456,7 +496,7 @@ def _parse_timestamp(entry): :raises: **ValueError** if the timestamp is malformed """ - if not isinstance(entry, (str, str_type)): + if not stem.util._is_str(entry): raise ValueError('parse_timestamp() input must be a str, got a %s' % type(entry)) try: @@ -482,7 +522,7 @@ def _parse_iso_timestamp(entry): :raises: **ValueError** if the timestamp is malformed """ - if not isinstance(entry, (str, str_type)): + if not stem.util._is_str(entry): raise ValueError('parse_iso_timestamp() input must be a str, got a %s' % type(entry)) # based after suggestions from... @@ -496,7 +536,7 @@ def _parse_iso_timestamp(entry): if len(microseconds) != 6 or not microseconds.isdigit(): raise ValueError("timestamp's microseconds should be six digits") - if timestamp_str[10] == 'T': + if len(timestamp_str) > 10 and timestamp_str[10] == 'T': timestamp_str = timestamp_str[:10] + ' ' + timestamp_str[11:] else: raise ValueError("timestamp didn't contain delimeter 'T' between date and time") @@ -505,7 +545,7 @@ def _parse_iso_timestamp(entry): return timestamp + datetime.timedelta(microseconds = int(microseconds)) -def _get_label(units, count, decimal, is_long): +def _get_label(units, count, decimal, is_long, round = False): """ Provides label corresponding to units of the highest significance in the provided set. This rounds down (ie, integer truncation after visible units). @@ -515,6 +555,7 @@ def _get_label(units, count, decimal, is_long): :param int count: number of base units being converted :param int decimal: decimal precision of label :param bool is_long: uses the long label if **True**, short label otherwise + :param bool round: rounds normally if **True**, otherwise rounds down """ # formatted string for the requested number of digits @@ -529,10 +570,12 @@ def _get_label(units, count, decimal, is_long): for count_per_unit, short_label, long_label in units: if count >= count_per_unit: - # Rounding down with a '%f' is a little clunky. Reducing the count so - # it'll divide evenly as the rounded down value. + if not round: + # Rounding down with a '%f' is a little clunky. Reducing the count so + # it'll divide evenly as the rounded down value. + + count -= count % (count_per_unit / (10 ** decimal)) - count -= count % (count_per_unit / (10 ** decimal)) count_label = label_format % (count / count_per_unit) if is_long: @@ -548,6 +591,7 @@ def _get_label(units, count, decimal, is_long): else: return count_label + short_label + # TODO: drop with stem 2.x # We renamed our methods to drop a redundant 'get_*' prefix, so alias the old # names for backward compatability. diff --git a/Shared/lib/python3.4/site-packages/stem/util/system.py b/Shared/lib/python3.4/site-packages/stem/util/system.py index aa13cb9..3c03bc4 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/system.py +++ b/Shared/lib/python3.4/site-packages/stem/util/system.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -10,6 +10,10 @@ best-effort, providing **None** if the lookup fails. Dropped the get_* prefix from several function names. The old names still work, but are deprecated aliases. +.. versionchanged:: 1.5.0 + Added the **SYSTEM_CALL_TIME** global, which tracks total time spent making + system commands. + **Module Overview:** :: @@ -17,16 +21,19 @@ best-effort, providing **None** if the lookup fails. is_windows - checks if we're running on windows is_mac - checks if we're running on a mac is_gentoo - checks if we're running on gentoo + is_slackware - checks if we're running on slackware is_bsd - checks if we're running on the bsd family of operating systems is_available - determines if a command is available on this system is_running - determines if a given process is running + size_of - provides the memory usage of an object call - runs the given system command and provides back the results name_by_pid - gets the name for a process by the given pid pid_by_name - gets the pid for a process by the given name pid_by_port - gets the pid for a process listening to a given port pid_by_open_file - gets the pid for the process with an open file + pids_by_user - provides processes owned by a user cwd - provides the current working directory for a given process user - provides the user a process is running under start_time - provides the unix timestamp when the process started @@ -40,25 +47,63 @@ best-effort, providing **None** if the lookup fails. get_process_name - provides our process' name set_process_name - changes our process' name + +.. data:: Status (enum) + + State of a subprocess. + + .. versionadded:: 1.6.0 + + ==================== =========== + Status Description + ==================== =========== + PENDING not yet started + RUNNING currently being performed + DONE completed successfully + FAILED failed with an exception + ==================== =========== """ +import collections import ctypes import ctypes.util -import distutils.spawn +import itertools import mimetypes +import multiprocessing import os import platform import re import subprocess +import sys import tarfile +import threading import time +import stem.prereq +import stem.util +import stem.util.enum import stem.util.proc import stem.util.str_tools -from stem import UNDEFINED, str_type +from stem import UNDEFINED from stem.util import log +State = stem.util.enum.UppercaseEnum( + 'PENDING', + 'RUNNING', + 'DONE', + 'FAILED', +) + +SIZE_RECURSES = { + tuple: iter, + list: iter, + collections.deque: iter, + dict: lambda d: itertools.chain.from_iterable(d.items()), + set: iter, + frozenset: iter, +} + # Mapping of commands to if they're available or not. CMD_AVAILABLE_CACHE = {} @@ -84,6 +129,8 @@ GET_PID_BY_PORT_NETSTAT = 'netstat -npltu' GET_PID_BY_PORT_SOCKSTAT = 'sockstat -4l -P tcp -p %s' GET_PID_BY_PORT_LSOF = 'lsof -wnP -iTCP -sTCP:LISTEN' GET_PID_BY_FILE_LSOF = 'lsof -tw %s' +GET_PIDS_BY_USER_LINUX = 'ps -o pid -u %s' +GET_PIDS_BY_USER_BSD = 'ps -o pid -U %s' GET_CWD_PWDX = 'pwdx %s' GET_CWD_LSOF = 'lsof -a -p %s -d cwd -Fn' GET_BSD_JAIL_ID_PS = 'ps -p %s -o jid' @@ -125,6 +172,143 @@ _PROCESS_NAME = None _MAX_NAME_LENGTH = -1 +# Tracks total time spent shelling out to other commands like 'ps' and +# 'netstat', so we can account for it as part of our cpu time along with +# os.times(). + +SYSTEM_CALL_TIME = 0.0 +SYSTEM_CALL_TIME_LOCK = threading.RLock() + + +class CallError(OSError): + """ + Error response when making a system call. This is an **OSError** subclass + with additional information about the process. Depending on the nature of the + error not all of these attributes will be available. + + :var str msg: exception string + :var str command: command that was ran + :var int exit_status: exit code of the process + :var float runtime: time the command took to run + :var str stdout: stdout of the process + :var str stderr: stderr of the process + """ + + def __init__(self, msg, command, exit_status, runtime, stdout, stderr): + self.msg = msg + self.command = command + self.exit_status = exit_status + self.runtime = runtime + self.stdout = stdout + self.stderr = stderr + + def __str__(self): + return self.msg + + +class CallTimeoutError(CallError): + """ + Error response when making a system call that has timed out. + + .. versionadded:: 1.6.0 + + :var float timeout: time we waited + """ + + def __init__(self, msg, command, exit_status, runtime, stdout, stderr, timeout): + super(CallTimeoutError, self).__init__(msg, command, exit_status, runtime, stdout, stderr) + self.timeout = timeout + + +class DaemonTask(object): + """ + Invokes the given function in a subprocess, returning the value. + + .. versionadded:: 1.6.0 + + :var function runner: function to be invoked by the subprocess + :var tuple args: arguments to provide to the subprocess + :var int priority: subprocess nice priority + + :var stem.util.system.State status: state of the subprocess + :var float runtime: seconds subprocess took to complete + :var object result: return value of subprocess if successful + :var exception error: exception raised by subprocess if it failed + """ + + def __init__(self, runner, args = None, priority = 15, start = False): + self.runner = runner + self.args = args + self.priority = priority + + self.status = State.PENDING + self.runtime = None + self.result = None + self.error = None + + self._process = None + self._pipe = None + + if start: + self.run() + + def run(self): + """ + Invokes the task if it hasn't already been started. If it has this is a + no-op. + """ + + if self.status == State.PENDING: + self._pipe, child_pipe = multiprocessing.Pipe() + self._process = multiprocessing.Process(target = DaemonTask._run_wrapper, args = (child_pipe, self.priority, self.runner, self.args)) + self._process.start() + self.status = State.RUNNING + + def join(self): + """ + Provides the result of the daemon task. If still running this blocks until + the task is completed. + + :returns: response of the function we ran + + :raises: exception raised by the function if it failed with one + """ + + if self.status == State.PENDING: + self.run() + + if self.status == State.RUNNING: + self._process.join() + response = self._pipe.recv() + + self.status = response[0] + self.runtime = response[1] + + if self.status == State.DONE: + self.result = response[2] + elif self.status == State.FAILED: + self.error = response[2] + + if self.status == State.DONE: + return self.result + elif self.status == State.FAILED: + raise self.error + else: + raise RuntimeError('BUG: unexpected status from daemon task, %s' % self.status) + + @staticmethod + def _run_wrapper(conn, priority, runner, args): + start_time = time.time() + os.nice(priority) + + try: + result = runner(*args) if args else runner() + conn.send((State.DONE, time.time() - start_time, result)) + except Exception as exc: + conn.send((State.FAILED, time.time() - start_time, exc)) + finally: + conn.close() + def is_windows(): """ @@ -156,6 +340,16 @@ def is_gentoo(): return os.path.exists('/etc/gentoo-release') +def is_slackware(): + """ + Checks if we are running on a Slackware system. + + :returns: **bool** to indicate if we're on a Slackware system + """ + + return os.path.exists('/etc/slackware-version') + + def is_bsd(): """ Checks if we are within the BSD family of operating systems. This currently @@ -164,7 +358,7 @@ def is_bsd(): :returns: **bool** to indicate if we're on a BSD OS """ - return platform.system() in ('Darwin', 'FreeBSD', 'OpenBSD') + return platform.system() in ('Darwin', 'FreeBSD', 'OpenBSD', 'NetBSD') def is_available(command, cached=True): @@ -188,27 +382,49 @@ def is_available(command, cached=True): command = command.split(' ')[0] if command in SHELL_COMMANDS: - # we can't actually look it up, so hope the shell really provides it... - - return True + return True # we can't actually look it up, so hope the shell really provides it... elif cached and command in CMD_AVAILABLE_CACHE: return CMD_AVAILABLE_CACHE[command] - else: - cmd_exists = distutils.spawn.find_executable(command) is not None - CMD_AVAILABLE_CACHE[command] = cmd_exists - return cmd_exists + elif 'PATH' not in os.environ: + return False # lacking a path will cause find_executable() to internally fail + + cmd_exists = False + + for path in os.environ['PATH'].split(os.pathsep): + cmd_path = os.path.join(path, command) + + if is_windows(): + cmd_path += '.exe' + + if os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK): + cmd_exists = True + break + + CMD_AVAILABLE_CACHE[command] = cmd_exists + return cmd_exists def is_running(command): """ - Checks for if a process with a given name is running or not. + Checks for if a process with a given name or pid is running. - :param str command: process name to be checked + .. versionchanged:: 1.6.0 + Added support for list and pid arguments. + + :param str,list,int command: process name if a str, multiple process names if + a list, or pid if an int to be checked :returns: **True** if the process is running, **False** if it's not among ps results, and **None** if ps can't be queried """ + if isinstance(command, int): + try: + os.kill(command, 0) + return True + except OSError: + return False + # Linux and the BSD families have different variants of ps. Guess based on # the is_bsd() check which to try first, then fall back to the other. # @@ -236,12 +452,63 @@ def is_running(command): command_listing = call(secondary_resolver, None) if command_listing: - command_listing = map(str_type.strip, command_listing) - return command in command_listing + command_listing = [c.strip() for c in command_listing] + + if stem.util._is_str(command): + command = [command] + + for cmd in command: + if cmd in command_listing: + return True + + return False return None +def size_of(obj, exclude = None): + """ + Provides the `approximate memory usage of an object + `_. This can recurse tuples, + lists, deques, dicts, and sets. To teach this function to inspect additional + object types expand SIZE_RECURSES... + + :: + + stem.util.system.SIZE_RECURSES[SomeClass] = SomeClass.get_elements + + .. versionadded:: 1.6.0 + + :param object obj: object to provide the size of + :param set exclude: object ids to exclude from size estimation + + :returns: **int** with the size of the object in bytes + + :raises: **NotImplementedError** if using PyPy + """ + + if stem.prereq.is_pypy(): + raise NotImplementedError('PyPy does not implement sys.getsizeof()') + + if exclude is None: + exclude = set() + elif id(obj) in exclude: + return 0 + + try: + size = sys.getsizeof(obj) + except TypeError: + size = sys.getsizeof(0) # estimate if object lacks a __sizeof__ + + exclude.add(id(obj)) + + if type(obj) in SIZE_RECURSES: + for entry in SIZE_RECURSES[type(obj)](obj): + size += size_of(entry, exclude) + + return size + + def name_by_pid(pid): """ Attempts to determine the name a given process is running under (not @@ -614,6 +881,38 @@ def pid_by_open_file(path): return None # all queries failed +def pids_by_user(user): + """ + Provides processes owned by a given user. + + .. versionadded:: 1.5.0 + + :param str user: user to look up processes for + + :returns: **list** with the process ids, **None** if it can't be determined + """ + + # example output: + # atagar@odin:~$ ps -o pid -u avahi + # PID + # 914 + # 915 + + if is_available('ps'): + if is_bsd(): + results = call(GET_PIDS_BY_USER_BSD % user, None) + else: + results = call(GET_PIDS_BY_USER_LINUX % user, None) + + if results: + try: + return list(map(int, results[1:])) + except ValueError: + pass + + return None + + def cwd(pid): """ Provides the working directory of the given process. @@ -668,8 +967,8 @@ def cwd(pid): if is_available('lsof'): results = call(GET_CWD_LSOF % pid, []) - if len(results) == 2 and results[1].startswith('n/'): - lsof_result = results[1][1:].strip() + if len(results) >= 2 and results[-1].startswith('n/'): + lsof_result = results[-1][1:].strip() # If we lack read permissions for the cwd then it returns... # p2683 @@ -765,7 +1064,7 @@ def tail(target, lines = None): """ if isinstance(target, str): - with open(target) as target_file: + with open(target, 'rb') as target_file: for line in tail(target_file, lines): yield line @@ -777,13 +1076,13 @@ def tail(target, lines = None): target.seek(0, 2) # go to the end of the file block_end_byte = target.tell() block_number = -1 - content = '' + content = b'' while (lines is None or lines > 0) and block_end_byte > 0: if (block_end_byte - BLOCK_SIZE > 0): # read the last block we haven't yet read target.seek(block_number * BLOCK_SIZE, 2) - content, completed_lines = (target.read(BLOCK_SIZE) + content).split('\n', 1) + content, completed_lines = (target.read(BLOCK_SIZE) + content).split(b'\n', 1) else: # reached the start of the file, just read what's left target.seek(0, 0) @@ -794,7 +1093,7 @@ def tail(target, lines = None): if lines is not None: lines -= 1 - yield line + yield stem.util.str_tools._to_unicode(line) block_end_byte -= BLOCK_SIZE block_number -= 1 @@ -951,63 +1250,105 @@ def files_with_suffix(base_path, suffix): yield os.path.join(root, filename) -def call(command, default = UNDEFINED, ignore_exit_status = False): +def call(command, default = UNDEFINED, ignore_exit_status = False, timeout = None, cwd = None, env = None): """ + call(command, default = UNDEFINED, ignore_exit_status = False) + Issues a command in a subprocess, blocking until completion and returning the results. This is not actually ran in a shell so pipes and other shell syntax are not permitted. + .. versionchanged:: 1.5.0 + Providing additional information upon failure by raising a CallError. This + is a subclass of OSError, providing backward compatibility. + + .. versionchanged:: 1.5.0 + Added env argument. + + .. versionchanged:: 1.6.0 + Added timeout and cwd arguments. + :param str,list command: command to be issued :param object default: response if the query fails :param bool ignore_exit_status: reports failure if our command's exit status was non-zero + :param float timeout: maximum seconds to wait, blocks indefinitely if + **None** + :param dict env: environment variables :returns: **list** with the lines of output from the command - :raises: **OSError** if this fails and no default was provided + :raises: + * **CallError** if this fails and no default was provided + * **CallTimeoutError** if the timeout is reached without a default """ + # TODO: in stem 2.x return a struct with stdout, stderr, and runtime instead + + global SYSTEM_CALL_TIME + if isinstance(command, str): command_list = command.split(' ') else: - command_list = command + command_list = list(map(str, command)) + + exit_status, runtime, stdout, stderr = None, None, None, None + start_time = time.time() try: is_shell_command = command_list[0] in SHELL_COMMANDS - start_time = time.time() - process = subprocess.Popen(command_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command) + process = subprocess.Popen(command_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command, cwd = cwd, env = env) + + if timeout: + while process.poll() is None: + if time.time() - start_time > timeout: + raise CallTimeoutError("Process didn't finish after %0.1f seconds" % timeout, ' '.join(command_list), None, timeout, '', '', timeout) + + time.sleep(0.001) stdout, stderr = process.communicate() stdout, stderr = stdout.strip(), stderr.strip() runtime = time.time() - start_time log.debug('System call: %s (runtime: %0.2f)' % (command, runtime)) - trace_prefix = 'Received from system (%s)' % command - if stdout and stderr: - log.trace(trace_prefix + ', stdout:\n%s\nstderr:\n%s' % (stdout, stderr)) - elif stdout: - log.trace(trace_prefix + ', stdout:\n%s' % stdout) - elif stderr: - log.trace(trace_prefix + ', stderr:\n%s' % stderr) + if log.is_tracing(): + trace_prefix = 'Received from system (%s)' % command - exit_code = process.poll() + if stdout and stderr: + log.trace(trace_prefix + ', stdout:\n%s\nstderr:\n%s' % (stdout, stderr)) + elif stdout: + log.trace(trace_prefix + ', stdout:\n%s' % stdout) + elif stderr: + log.trace(trace_prefix + ', stderr:\n%s' % stderr) - if not ignore_exit_status and exit_code != 0: - raise OSError('%s returned exit status %i' % (command, exit_code)) + exit_status = process.poll() + + if not ignore_exit_status and exit_status != 0: + raise OSError('%s returned exit status %i' % (command, exit_status)) if stdout: return stdout.decode('utf-8', 'replace').splitlines() else: return [] + except CallTimeoutError: + log.debug('System call (timeout): %s (after %0.4fs)' % (command, timeout)) + + if default != UNDEFINED: + return default + else: + raise except OSError as exc: log.debug('System call (failed): %s (error: %s)' % (command, exc)) if default != UNDEFINED: return default else: - raise exc + raise CallError(str(exc), ' '.join(command_list), exit_status, runtime, stdout, stderr) + finally: + with SYSTEM_CALL_TIME_LOCK: + SYSTEM_CALL_TIME += time.time() - start_time def get_process_name(): @@ -1150,7 +1491,7 @@ def _set_proc_title(process_name): libc = ctypes.CDLL(ctypes.util.find_library('c')) name_buffer = ctypes.create_string_buffer(len(process_name) + 1) - name_buffer.value = process_name + name_buffer.value = process_name.encode() try: libc.setproctitle(ctypes.byref(name_buffer)) diff --git a/Shared/lib/python3.4/site-packages/stem/util/term.py b/Shared/lib/python3.4/site-packages/stem/util/term.py index bb110ea..b4acd61 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/term.py +++ b/Shared/lib/python3.4/site-packages/stem/util/term.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -8,12 +8,13 @@ Utilities for working with the terminal. :: + encoding - provides the ANSI escape sequence for a terminal attribute format - wrap text with ANSI for the given colors or attributes .. data:: Color (enum) .. data:: BgColor (enum) - Enumerations for foreground or background terminal color. + Foreground or background terminal colors. =========== =========== Color Description @@ -30,15 +31,19 @@ Utilities for working with the terminal. .. data:: Attr (enum) - Enumerations of terminal text attributes. + Terminal text attributes. + + .. versionchanged:: 1.5.0 + Added the LINES attribute. =================== =========== Attr Description =================== =========== **BOLD** heavy typeface - **HILIGHT** inverted foreground and background + **HIGHLIGHT** inverted foreground and background **UNDERLINE** underlined text **READLINE_ESCAPE** wrap encodings in `RL_PROMPT_START_IGNORE and RL_PROMPT_END_IGNORE sequences `_ + **LINES** formats lines individually =================== =========== """ @@ -54,17 +59,52 @@ DISABLE_COLOR_SUPPORT = False Color = stem.util.enum.Enum(*TERM_COLORS) BgColor = stem.util.enum.Enum(*['BG_' + color for color in TERM_COLORS]) -Attr = stem.util.enum.Enum('BOLD', 'UNDERLINE', 'HILIGHT', 'READLINE_ESCAPE') +Attr = stem.util.enum.Enum('BOLD', 'UNDERLINE', 'HIGHLIGHT', 'READLINE_ESCAPE', 'LINES') # mappings of terminal attribute enums to their ANSI escape encoding FG_ENCODING = dict([(list(Color)[i], str(30 + i)) for i in range(8)]) BG_ENCODING = dict([(list(BgColor)[i], str(40 + i)) for i in range(8)]) -ATTR_ENCODING = {Attr.BOLD: '1', Attr.UNDERLINE: '4', Attr.HILIGHT: '7'} +ATTR_ENCODING = {Attr.BOLD: '1', Attr.UNDERLINE: '4', Attr.HIGHLIGHT: '7'} CSI = '\x1B[%sm' RESET = CSI % '0' +def encoding(*attrs): + """ + Provides the ANSI escape sequence for these terminal color or attributes. + + .. versionadded:: 1.5.0 + + :param list attr: :data:`~stem.util.terminal.Color`, + :data:`~stem.util.terminal.BgColor`, or :data:`~stem.util.terminal.Attr` to + provide an ecoding for + + :returns: **str** of the ANSI escape sequence, **None** no attributes are + recognized + """ + + term_encodings = [] + + for attr in attrs: + # TODO: Account for an earlier misspelled attribute. This should be dropped + # in Stem. 2.0.x. + + if attr == 'HILIGHT': + attr = 'HIGHLIGHT' + + attr = stem.util.str_tools._to_camel_case(attr) + term_encoding = FG_ENCODING.get(attr, None) + term_encoding = BG_ENCODING.get(attr, term_encoding) + term_encoding = ATTR_ENCODING.get(attr, term_encoding) + + if term_encoding: + term_encodings.append(term_encoding) + + if term_encodings: + return CSI % ';'.join(term_encodings) + + def format(msg, *attr): """ Simple terminal text formatting using `ANSI escape sequences @@ -75,38 +115,39 @@ def format(msg, *attr): * `termcolor `_ * `colorama `_ + .. versionchanged:: 1.6.0 + Normalized return value to be unicode to better support python 2/3 + compatibility. + :param str msg: string to be formatted :param str attr: text attributes, this can be :data:`~stem.util.term.Color`, :data:`~stem.util.term.BgColor`, or :data:`~stem.util.term.Attr` enums and are case insensitive (so strings like 'red' are fine) - :returns: **str** wrapped with ANSI escape encodings, starting with the given + :returns: **unicode** wrapped with ANSI escape encodings, starting with the given attributes and ending with a reset """ + msg = stem.util.str_tools._to_unicode(msg) + if DISABLE_COLOR_SUPPORT: return msg + if Attr.LINES in attr: + attr = list(attr) + attr.remove(Attr.LINES) + lines = [format(line, *attr) for line in msg.split('\n')] + return '\n'.join(lines) + # if we have reset sequences in the message then apply our attributes # after each of them if RESET in msg: return ''.join([format(comp, *attr) for comp in msg.split(RESET)]) - encodings = [] - - for text_attr in attr: - text_attr, encoding = stem.util.str_tools._to_camel_case(text_attr), None - encoding = FG_ENCODING.get(text_attr, encoding) - encoding = BG_ENCODING.get(text_attr, encoding) - encoding = ATTR_ENCODING.get(text_attr, encoding) - - if encoding: - encodings.append(encoding) - - if encodings: - prefix, suffix = CSI % ';'.join(encodings), RESET + prefix, suffix = encoding(*attr), RESET + if prefix: if Attr.READLINE_ESCAPE in attr: prefix = '\001%s\002' % prefix suffix = '\001%s\002' % suffix diff --git a/Shared/lib/python3.4/site-packages/stem/util/test_tools.py b/Shared/lib/python3.4/site-packages/stem/util/test_tools.py index d6a81c4..21e4a89 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/test_tools.py +++ b/Shared/lib/python3.4/site-packages/stem/util/test_tools.py @@ -1,46 +1,329 @@ -# Copyright 2015, Damian Johnson and The Tor Project +# Copyright 2015-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ Helper functions for testing. +Our **stylistic_issues**, **pyflakes_issues**, and **type_check_issues** +respect a 'exclude_paths' in our test config, excluding any absolute paths +matching those regexes. Issue strings can start or end with an asterisk +to match just against the prefix or suffix. For instance... + +:: + + exclude_paths .*/stem/test/data/.* + .. versionadded:: 1.2.0 :: + TimedTestRunner - test runner that tracks test runtimes + test_runtimes - provides runtime of tests excuted through TimedTestRunners clean_orphaned_pyc - delete *.pyc files without corresponding *.py is_pyflakes_available - checks if pyflakes is available - is_pep8_available - checks if pep8 is available + is_pycodestyle_available - checks if pycodestyle is available - stylistic_issues - checks for PEP8 and other stylistic issues pyflakes_issues - static checks for problems via pyflakes + stylistic_issues - checks for PEP8 and other stylistic issues """ import collections import linecache +import multiprocessing import os import re +import threading +import time +import traceback +import unittest +import stem.prereq import stem.util.conf +import stem.util.enum import stem.util.system CONFIG = stem.util.conf.config_dict('test', { - 'pep8.ignore': [], + 'pep8.ignore': [], # TODO: drop with stem 2.x, legacy alias for pycodestyle.ignore + 'pycodestyle.ignore': [], 'pyflakes.ignore': [], 'exclude_paths': [], }) -Issue = collections.namedtuple('Issue', [ - 'line_number', - 'message', - 'line', -]) +TEST_RUNTIMES = {} +ASYNC_TESTS = {} + +AsyncStatus = stem.util.enum.UppercaseEnum('PENDING', 'RUNNING', 'FINISHED') +AsyncResult = collections.namedtuple('AsyncResult', 'type msg') + +# TODO: Providing a copy of SkipTest that works with python 2.6. This will be +# dropped when we remove python 2.6 support. + +if stem.prereq._is_python_26(): + class SkipTest(Exception): + 'Notes that the test was skipped.' +else: + SkipTest = unittest.case.SkipTest + + +def assert_equal(expected, actual, msg = None): + """ + Function form of a TestCase's assertEqual. + + .. versionadded:: 1.6.0 + + :param object expected: expected value + :param object actual: actual value + :param str msg: message if assertion fails + + :raises: **AssertionError** if values aren't equal + """ + + if expected != actual: + raise AssertionError("Expected '%s' but was '%s'" % (expected, actual) if msg is None else msg) + + +def assert_in(expected, actual, msg = None): + """ + Asserts that a given value is within this content. + + .. versionadded:: 1.6.0 + + :param object expected: expected value + :param object actual: actual value + :param str msg: message if assertion fails + + :raises: **AssertionError** if the expected value isn't in the actual + """ + + if expected not in actual: + raise AssertionError("Expected '%s' to be within '%s'" % (expected, actual) if msg is None else msg) + + +def skip(msg): + """ + Function form of a TestCase's skipTest. + + .. versionadded:: 1.6.0 + + :param str msg: reason test is being skipped + + :raises: **unittest.case.SkipTest** for this reason + """ + + raise SkipTest(msg) + + +def asynchronous(func): + test = stem.util.test_tools.AsyncTest(func) + ASYNC_TESTS[test.name] = test + return test.method + + +class AsyncTest(object): + """ + Test that's run asychronously. These are functions (no self reference) + performed like the following... + + :: + + class MyTest(unittest.TestCase): + @staticmethod + def run_tests(): + MyTest.test_addition = stem.util.test_tools.AsyncTest(MyTest.test_addition).method + + @staticmethod + def test_addition(): + if 1 + 1 != 2: + raise AssertionError('tisk, tisk') + + MyTest.run() + + .. versionadded:: 1.6.0 + """ + + def __init__(self, runner, args = None, threaded = False): + self.name = '%s.%s' % (runner.__module__, runner.__name__) + + self._runner = runner + self._runner_args = args + self._threaded = threaded + + self.method = lambda test: self.result(test) # method that can be mixed into TestCases + + self._process = None + self._process_pipe = None + self._process_lock = threading.RLock() + + self._result = None + self._status = AsyncStatus.PENDING + + def run(self, *runner_args, **kwargs): + if stem.prereq._is_python_26(): + return # not supported under python 2.6 + + def _wrapper(conn, runner, args): + os.nice(12) + + try: + runner(*args) if args else runner() + conn.send(AsyncResult('success', None)) + except AssertionError as exc: + conn.send(AsyncResult('failure', str(exc))) + except SkipTest as exc: + conn.send(AsyncResult('skipped', str(exc))) + except: + conn.send(AsyncResult('error', traceback.format_exc())) + finally: + conn.close() + + with self._process_lock: + if self._status == AsyncStatus.PENDING: + if runner_args: + self._runner_args = runner_args + + if 'threaded' in kwargs: + self._threaded = kwargs['threaded'] + + self._process_pipe, child_pipe = multiprocessing.Pipe() + + if self._threaded: + self._process = threading.Thread( + target = _wrapper, + args = (child_pipe, self._runner, self._runner_args), + name = 'Background test of %s' % self.name, + ) + + self._process.setDaemon(True) + else: + self._process = multiprocessing.Process(target = _wrapper, args = (child_pipe, self._runner, self._runner_args)) + + self._process.start() + self._status = AsyncStatus.RUNNING + + def pid(self): + with self._process_lock: + return self._process.pid if (self._process and not self._threaded) else None + + def join(self): + self.result(None) + + def result(self, test): + if stem.prereq._is_python_26(): + return # not supported under python 2.6 + + with self._process_lock: + if self._status == AsyncStatus.PENDING: + self.run() + + if self._status == AsyncStatus.RUNNING: + self._result = self._process_pipe.recv() + self._process.join() + self._status = AsyncStatus.FINISHED + + if test and self._result.type == 'failure': + test.fail(self._result.msg) + elif test and self._result.type == 'error': + test.fail(self._result.msg) + elif test and self._result.type == 'skipped': + test.skipTest(self._result.msg) + + +class Issue(collections.namedtuple('Issue', ['line_number', 'message', 'line'])): + """ + Issue encountered by pyflakes or pycodestyle. + + :var int line_number: line number the issue occured on + :var str message: description of the issue + :var str line: content of the line the issue is about + """ + + +class TimedTestRunner(unittest.TextTestRunner): + """ + Test runner that tracks the runtime of individual tests. When tests are run + with this their runtimes are made available through + :func:`stem.util.test_tools.test_runtimes`. + + .. versionadded:: 1.6.0 + """ + + def run(self, test): + for t in test._tests: + original_type = type(t) + + class _TestWrapper(original_type): + def run(self, result = None): + start_time = time.time() + result = super(type(self), self).run(result) + TEST_RUNTIMES[self.id()] = time.time() - start_time + return result + + # TODO: remove and drop unnecessary 'returns' when dropping python 2.6 + # support + + def skipTest(self, message): + if not stem.prereq._is_python_26(): + return super(original_type, self).skipTest(message) + + # TODO: remove when dropping python 2.6 support + + def assertItemsEqual(self, expected, actual): + if stem.prereq._is_python_26(): + self.assertEqual(set(expected), set(actual)) + else: + return super(original_type, self).assertItemsEqual(expected, actual) + + def assertRaisesWith(self, exc_type, exc_msg, func, *args, **kwargs): + """ + Asserts the given invokation raises the expected excepiton. This is + similar to unittest's assertRaises and assertRaisesRegexp, but checks + for an exact match. + + This method is **not** being vended to external users and may be + changed without notice. If you want this method to be part of our + vended API then please let us know. + """ + + return self.assertRaisesRegexp(exc_type, '^%s$' % re.escape(exc_msg), func, *args, **kwargs) + + def assertRaisesRegexp(self, exc_type, exc_msg, func, *args, **kwargs): + if stem.prereq._is_python_26(): + try: + func(*args, **kwargs) + self.fail('Expected a %s to be raised but nothing was' % exc_type) + except exc_type as exc: + self.assertTrue(re.search(exc_msg, str(exc), re.MULTILINE)) + else: + return super(original_type, self).assertRaisesRegexp(exc_type, exc_msg, func, *args, **kwargs) + + def id(self): + return '%s.%s.%s' % (original_type.__module__, original_type.__name__, self._testMethodName) + + def __str__(self): + return '%s (%s.%s)' % (self._testMethodName, original_type.__module__, original_type.__name__) + + t.__class__ = _TestWrapper + + return super(TimedTestRunner, self).run(test) + + +def test_runtimes(): + """ + Provides the runtimes of tests executed through TimedTestRunners. + + :returns: **dict** of fully qualified test names to floats for the runtime in + seconds + + .. versionadded:: 1.6.0 + """ + + return dict(TEST_RUNTIMES) def clean_orphaned_pyc(paths): """ - Deletes any file with a *.pyc extention without a corresponding *.py. This + Deletes any file with a \*.pyc extention without a corresponding \*.py. This helps to address a common gotcha when deleting python files... * You delete module 'foo.py' and run the tests to ensure that you haven't @@ -90,50 +373,46 @@ def is_pyflakes_available(): :returns: **True** if we can use pyflakes and **False** otherwise """ - try: - import pyflakes.api - import pyflakes.reporter - return True - except ImportError: - return False + return _module_exists('pyflakes.api') and _module_exists('pyflakes.reporter') -def is_pep8_available(): +def is_pycodestyle_available(): """ - Checks if pep8 is availalbe. + Checks if pycodestyle is availalbe. - :returns: **True** if we can use pep8 and **False** otherwise + :returns: **True** if we can use pycodestyle and **False** otherwise """ - try: - import pep8 - - if not hasattr(pep8, 'BaseReport'): - raise ImportError() - - return True - except ImportError: + if _module_exists('pycodestyle'): + import pycodestyle + elif _module_exists('pep8'): + import pep8 as pycodestyle + else: return False + return hasattr(pycodestyle, 'BaseReport') -def stylistic_issues(paths, check_two_space_indents = False, check_newlines = False, check_trailing_whitespace = False, check_exception_keyword = False, prefer_single_quotes = False): + +def stylistic_issues(paths, check_newlines = False, check_exception_keyword = False, prefer_single_quotes = False): """ Checks for stylistic issues that are an issue according to the parts of PEP8 - we conform to. You can suppress PEP8 issues by making a 'test' configuration - that sets 'pep8.ignore'. + we conform to. You can suppress pycodestyle issues by making a 'test' + configuration that sets 'pycodestyle.ignore'. For example, with a 'test/settings.cfg' of... :: - # PEP8 compliance issues that we're ignoreing... + # pycodestyle compliance issues that we're ignoreing... # # * E111 and E121 four space indentations # * E501 line is over 79 characters - pep8.ignore E111 - pep8.ignore E121 - pep8.ignore E501 + pycodestyle.ignore E111 + pycodestyle.ignore E121 + pycodestyle.ignore E501 + + pycodestyle.ignore run_tests.py => E402: import stem.util.enum ... you can then run tests with... @@ -146,9 +425,6 @@ def stylistic_issues(paths, check_two_space_indents = False, check_newlines = Fa issues = stylistic_issues('my_project') - If a 'exclude_paths' was set in our test config then we exclude any absolute - paths matching those regexes. - .. versionchanged:: 1.3.0 Renamed from get_stylistic_issues() to stylistic_issues(). The old name still works as an alias, but will be dropped in Stem version 2.0.0. @@ -160,89 +436,106 @@ def stylistic_issues(paths, check_two_space_indents = False, check_newlines = Fa .. versionchanged:: 1.4.0 Added the prefer_single_quotes option. + .. versionchanged:: 1.6.0 + Changed 'pycodestyle.ignore' code snippets to only need to match against + the prefix. + :param list paths: paths to search for stylistic issues - :param bool check_two_space_indents: check for two space indentations and - that no tabs snuck in :param bool check_newlines: check that we have standard newlines (\\n), not windows (\\r\\n) nor classic mac (\\r) - :param bool check_trailing_whitespace: check that our lines don't end with - trailing whitespace :param bool check_exception_keyword: checks that we're using 'as' for exceptions rather than a comma :param bool prefer_single_quotes: standardize on using single rather than double quotes for strings, when reasonable - :returns: **dict** of the form ``path => [(line_number, message)...]`` + :returns: dict of paths list of :class:`stem.util.test_tools.Issue` instances """ issues = {} - if is_pep8_available(): - import pep8 + ignore_rules = [] + ignore_for_file = [] - class StyleReport(pep8.BaseReport): - def __init__(self, options): - super(StyleReport, self).__init__(options) + for rule in CONFIG['pycodestyle.ignore'] + CONFIG['pep8.ignore']: + if '=>' in rule: + path, rule_entry = rule.split('=>', 1) + + if ':' in rule_entry: + rule, code = rule_entry.split(':', 1) + ignore_for_file.append((path.strip(), rule.strip(), code.strip())) + else: + ignore_rules.append(rule) + + def is_ignored(path, rule, code): + for ignored_path, ignored_rule, ignored_code in ignore_for_file: + if path.endswith(ignored_path) and ignored_rule == rule and code.strip().startswith(ignored_code): + return True + + return False + + if is_pycodestyle_available(): + if _module_exists('pep8'): + import pep8 as pycodestyle + else: + import pycodestyle + + class StyleReport(pycodestyle.BaseReport): + def init_file(self, filename, lines, expected, line_offset): + super(StyleReport, self).init_file(filename, lines, expected, line_offset) + + if not check_newlines and not check_exception_keyword and not prefer_single_quotes: + return + + is_block_comment = False + + for index, line in enumerate(lines): + content = line.split('#', 1)[0].strip() + + if check_newlines and '\r' in line: + issues.setdefault(filename, []).append(Issue(index + 1, 'contains a windows newline', line)) + + if not content: + continue # blank line + + if '"""' in content: + is_block_comment = not is_block_comment + + if check_exception_keyword and content.startswith('except') and content.endswith(', exc:'): + # Python 2.6 - 2.7 supports two forms for exceptions... + # + # except ValueError, exc: + # except ValueError as exc: + # + # The former is the old method and no longer supported in python 3 + # going forward. + + # TODO: This check only works if the exception variable is called + # 'exc'. We should generalize this via a regex so other names work + # too. + + issues.setdefault(filename, []).append(Issue(index + 1, "except clause should use 'as', not comma", line)) + + if prefer_single_quotes and not is_block_comment: + if '"' in content and "'" not in content and '"""' not in content and not content.endswith('\\'): + # Checking if the line already has any single quotes since that + # usually means double quotes are preferable for the content (for + # instance "I'm hungry"). Also checking for '\' at the end since + # that can indicate a multi-line string. + + issues.setdefault(filename, []).append(Issue(index + 1, 'use single rather than double quotes', line)) def error(self, line_number, offset, text, check): code = super(StyleReport, self).error(line_number, offset, text, check) if code: - issues.setdefault(self.filename, []).append(Issue(line_number, '%s %s' % (code, text), text)) + line = linecache.getline(self.filename, line_number) - style_checker = pep8.StyleGuide(ignore = CONFIG['pep8.ignore'], reporter = StyleReport) + if not is_ignored(self.filename, code, line): + issues.setdefault(self.filename, []).append(Issue(line_number, text, line)) + + style_checker = pycodestyle.StyleGuide(ignore = ignore_rules, reporter = StyleReport) style_checker.check_files(list(_python_files(paths))) - if check_two_space_indents or check_newlines or check_trailing_whitespace or check_exception_keyword: - for path in _python_files(paths): - with open(path) as f: - file_contents = f.read() - - lines = file_contents.split('\n') - is_block_comment = False - - for index, line in enumerate(lines): - whitespace, content = re.match('^(\s*)(.*)$', line).groups() - - # TODO: This does not check that block indentations are two spaces - # because differentiating source from string blocks ("""foo""") is more - # of a pita than I want to deal with right now. - - if '"""' in content: - is_block_comment = not is_block_comment - - if check_two_space_indents and '\t' in whitespace: - issues.setdefault(path, []).append(Issue(index + 1, 'indentation has a tab', line)) - elif check_newlines and '\r' in content: - issues.setdefault(path, []).append(Issue(index + 1, 'contains a windows newline', line)) - elif check_trailing_whitespace and content != content.rstrip(): - issues.setdefault(path, []).append(Issue(index + 1, 'line has trailing whitespace', line)) - elif check_exception_keyword and content.lstrip().startswith('except') and content.endswith(', exc:'): - # Python 2.6 - 2.7 supports two forms for exceptions... - # - # except ValueError, exc: - # except ValueError as exc: - # - # The former is the old method and no longer supported in python 3 - # going forward. - - # TODO: This check only works if the exception variable is called - # 'exc'. We should generalize this via a regex so other names work - # too. - - issues.setdefault(path, []).append(Issue(index + 1, "except clause should use 'as', not comma", line)) - - if prefer_single_quotes and line and not is_block_comment: - content = line.strip().split('#', 1)[0] - - if '"' in content and "'" not in content and '"""' not in content and not content.endswith('\\'): - # Checking if the line already has any single quotes since that - # usually means double quotes are preferable for the content (for - # instance "I'm hungry"). Also checking for '\' at the end since - # that can indicate a multi-line string. - - issues.setdefault(path, []).append(Issue(index + 1, "use single rather than double quotes", line)) - return issues @@ -254,10 +547,7 @@ def pyflakes_issues(paths): :: pyflakes.ignore stem/util/test_tools.py => 'pyflakes' imported but unused - pyflakes.ignore stem/util/test_tools.py => 'pep8' imported but unused - - If a 'exclude_paths' was set in our test config then we exclude any absolute - paths matching those regexes. + pyflakes.ignore stem/util/test_tools.py => 'pycodestyle' imported but unused .. versionchanged:: 1.3.0 Renamed from get_pyflakes_issues() to pyflakes_issues(). The old name @@ -267,9 +557,12 @@ def pyflakes_issues(paths): Changing tuples in return value to be namedtuple instances, and adding the line that had the issue. + .. versionchanged:: 1.5.0 + Support matching against prefix or suffix issue strings. + :param list paths: paths to search for problems - :returns: dict of the form ``path => [(line_number, message)...]`` + :returns: dict of paths list of :class:`stem.util.test_tools.Issue` instances """ issues = {} @@ -300,15 +593,24 @@ def pyflakes_issues(paths): # path ends with any of them. for ignored_path, ignored_issues in self._ignored_issues.items(): - if path.endswith(ignored_path) and issue in ignored_issues: - return True + if path.endswith(ignored_path): + if issue in ignored_issues: + return True + + for prefix in [i[:1] for i in ignored_issues if i.endswith('*')]: + if issue.startswith(prefix): + return True + + for suffix in [i[1:] for i in ignored_issues if i.startswith('*')]: + if issue.endswith(suffix): + return True return False def _register_issue(self, path, line_number, issue, line): if not self._is_ignored(path, issue): if path and line_number and not line: - line = linecache.getline(path, line_number) + line = linecache.getline(path, line_number).strip() issues.setdefault(path, []).append(Issue(line_number, issue, line)) @@ -320,6 +622,22 @@ def pyflakes_issues(paths): return issues +def _module_exists(module_name): + """ + Checks if a module exists. + + :param str module_name: module to check existance of + + :returns: **True** if module exists and **False** otherwise + """ + + try: + __import__(module_name) + return True + except ImportError: + return False + + def _python_files(paths): for path in paths: for file_path in stem.util.system.files_with_suffix(path, '.py'): @@ -333,9 +651,12 @@ def _python_files(paths): if not skip: yield file_path + # TODO: drop with stem 2.x # We renamed our methods to drop a redundant 'get_*' prefix, so alias the old -# names for backward compatability. +# names for backward compatability, and account for pep8 being renamed to +# pycodestyle. get_stylistic_issues = stylistic_issues get_pyflakes_issues = pyflakes_issues +is_pep8_available = is_pycodestyle_available diff --git a/Shared/lib/python3.4/site-packages/stem/util/tor_tools.py b/Shared/lib/python3.4/site-packages/stem/util/tor_tools.py index 01c29ee..b3db371 100644 --- a/Shared/lib/python3.4/site-packages/stem/util/tor_tools.py +++ b/Shared/lib/python3.4/site-packages/stem/util/tor_tools.py @@ -1,4 +1,4 @@ -# Copyright 2012-2015, Damian Johnson and The Tor Project +# Copyright 2012-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -21,6 +21,8 @@ Miscellaneous utility functions for working with tor. import re +import stem.util.str_tools + # The control-spec defines the following as... # # Fingerprint = "$" 40*HEXDIG @@ -54,6 +56,9 @@ def is_valid_fingerprint(entry, check_prefix = False): :returns: **True** if the string could be a relay fingerprint, **False** otherwise """ + if isinstance(entry, bytes): + entry = stem.util.str_tools._to_unicode(entry) + try: if check_prefix: if not entry or entry[0] != '$': @@ -75,6 +80,9 @@ def is_valid_nickname(entry): :returns: **True** if the string could be a nickname, **False** otherwise """ + if isinstance(entry, bytes): + entry = stem.util.str_tools._to_unicode(entry) + try: return bool(NICKNAME_PATTERN.match(entry)) except TypeError: @@ -88,6 +96,9 @@ def is_valid_circuit_id(entry): :returns: **True** if the string could be a circuit id, **False** otherwise """ + if isinstance(entry, bytes): + entry = stem.util.str_tools._to_unicode(entry) + try: return bool(CIRC_ID_PATTERN.match(entry)) except TypeError: @@ -124,6 +135,9 @@ def is_valid_hidden_service_address(entry): :returns: **True** if the string could be a hidden service address, **False** otherwise """ + if isinstance(entry, bytes): + entry = stem.util.str_tools._to_unicode(entry) + try: return bool(HS_ADDRESS_PATTERN.match(entry)) except TypeError: diff --git a/Shared/lib/python3.4/site-packages/stem/version.py b/Shared/lib/python3.4/site-packages/stem/version.py index 1182bfc..979bcf9 100644 --- a/Shared/lib/python3.4/site-packages/stem/version.py +++ b/Shared/lib/python3.4/site-packages/stem/version.py @@ -1,4 +1,4 @@ -# Copyright 2011-2015, Damian Johnson and The Tor Project +# Copyright 2011-2018, Damian Johnson and The Tor Project # See LICENSE for licensing information """ @@ -26,10 +26,16 @@ easily parsed and compared, for instance... Enumerations for the version requirements of features. + .. deprecated:: 1.6.0 + Requirement entries belonging to tor versions which have been obsolete for + at least six months will be removed when we break backward compatibility + in the 2.x stem release. + ===================================== =========== Requirement Description ===================================== =========== **AUTH_SAFECOOKIE** SAFECOOKIE authentication method + **DESCRIPTOR_COMPRESSION** `Expanded compression support for ZSTD and LZMA `_ **DROPGUARDS** DROPGUARDS requests **EVENT_AUTHDIR_NEWDESCS** AUTHDIR_NEWDESC events **EVENT_BUILDTIMEOUT_SET** BUILDTIMEOUT_SET events @@ -39,6 +45,7 @@ easily parsed and compared, for instance... **EVENT_DESCCHANGED** DESCCHANGED events **EVENT_GUARD** GUARD events **EVENT_HS_DESC_CONTENT** HS_DESC_CONTENT events + **EVENT_NETWORK_LIVENESS** NETWORK_LIVENESS events **EVENT_NEWCONSENSUS** NEWCONSENSUS events **EVENT_NS** NS events **EVENT_SIGNAL** SIGNAL events @@ -54,11 +61,18 @@ easily parsed and compared, for instance... **FEATURE_EXTENDED_EVENTS** 'EXTENDED_EVENTS' optional feature **FEATURE_VERBOSE_NAMES** 'VERBOSE_NAMES' optional feature **GETINFO_CONFIG_TEXT** 'GETINFO config-text' query + **GETINFO_GEOIP_AVAILABLE** 'GETINFO ip-to-country/ipv4-available' query and its ipv6 counterpart + **GETINFO_MICRODESCRIPTORS** 'GETINFO md/all' query + **HIDDEN_SERVICE_V3** Support for v3 hidden services **HSFETCH** HSFETCH requests **HSPOST** HSPOST requests **ADD_ONION** ADD_ONION and DEL_ONION requests + **ADD_ONION_BASIC_AUTH** ADD_ONION supports basic authentication + **ADD_ONION_NON_ANONYMOUS** ADD_ONION supports non-anonymous mode + **ADD_ONION_MAX_STREAMS** ADD_ONION support for MaxStreamsCloseCircuit **LOADCONF** LOADCONF requests **MICRODESCRIPTOR_IS_DEFAULT** Tor gets microdescriptors by default rather than server descriptors + **SAVECONF_FORCE** Added the 'FORCE' flag to SAVECONF **TAKEOWNERSHIP** TAKEOWNERSHIP requests **TORRC_CONTROL_SOCKET** 'ControlSocket ' config option **TORRC_PORT_FORWARDING** 'PortForwarding' config option @@ -70,18 +84,21 @@ easily parsed and compared, for instance... import os import re +import stem.prereq +import stem.util import stem.util.enum import stem.util.system -try: - # added in python 3.2 +if stem.prereq._is_lru_cache_available(): from functools import lru_cache -except ImportError: +else: from stem.util.lru_cache import lru_cache # cache for the get_system_tor_version function VERSION_CACHE = {} +VERSION_PATTERN = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)?(-\S*)?(( \(\S*\))*)$') + def get_system_tor_version(tor_cmd = 'tor'): """ @@ -107,7 +124,7 @@ def get_system_tor_version(tor_cmd = 'tor'): if os.path.isabs(tor_cmd): exc = "Unable to check tor's version. '%s' doesn't exist." % tor_cmd else: - exc = "Unable to run '%s'. Mabye tor isn't in your PATH?" % version_cmd + exc = "Unable to run '%s'. Maybe tor isn't in your PATH?" % version_cmd raise IOError(exc) @@ -144,13 +161,17 @@ class Version(object): `_, such as "0.1.4" or "0.2.2.23-alpha (git-7dcd105be34a4f44)". + .. versionchanged:: 1.6.0 + Added all_extra parameter. + :var int major: major version :var int minor: minor version :var int micro: micro version :var int patch: patch level (**None** if undefined) :var str status: status tag such as 'alpha' or 'beta-dev' (**None** if undefined) - :var str extra: extra information without its parentheses such as + :var str extra: first extra information without its parentheses such as 'git-8be6058d8f31e578' (**None** if undefined) + :var list all_extra: all extra information entries, without their parentheses :var str git_commit: git commit id (**None** if it wasn't provided) :param str version_str: version to be parsed @@ -160,11 +181,10 @@ class Version(object): def __init__(self, version_str): self.version_str = version_str - version_parts = re.match(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)?(-\S*)?( \(\S*\))?$', version_str) - self._hash = None + version_parts = VERSION_PATTERN.match(version_str) if version_parts: - major, minor, micro, patch, status, extra = version_parts.groups() + major, minor, micro, patch, status, extra_str, _ = version_parts.groups() # The patch and status matches are optional (may be None) and have an extra # proceeding period or dash if they exist. Stripping those off. @@ -175,20 +195,19 @@ class Version(object): if status: status = status[1:] - if extra: - extra = extra[2:-1] - self.major = int(major) self.minor = int(minor) self.micro = int(micro) self.patch = patch self.status = status - self.extra = extra + self.all_extra = [entry[1:-1] for entry in extra_str.strip().split()] if extra_str else [] + self.extra = self.all_extra[0] if self.all_extra else None + self.git_commit = None - if extra and re.match('^git-[0-9a-f]{16}$', extra): - self.git_commit = extra[4:] - else: - self.git_commit = None + for extra in self.all_extra: + if extra and re.match('^git-[0-9a-f]{16}$', extra): + self.git_commit = extra[4:] + break else: raise ValueError("'%s' isn't a properly formatted tor version" % version_str) @@ -230,9 +249,15 @@ class Version(object): return method(my_status, other_status) + def __hash__(self): + return stem.util._hash_attr(self, 'major', 'minor', 'micro', 'patch', 'status', cache = True) + def __eq__(self, other): return self._compare(other, lambda s, o: s == o) + def __ne__(self, other): + return not self == other + def __gt__(self, other): """ Checks if this version meets the requirements for a given feature. We can @@ -259,22 +284,6 @@ class Version(object): return self._compare(other, lambda s, o: s >= o) - def __hash__(self): - if self._hash is None: - my_hash = 0 - - for attr in ('major', 'minor', 'micro', 'patch', 'status'): - my_hash *= 1024 - - attr_value = getattr(self, attr) - - if attr_value is not None: - my_hash += hash(attr_value) - - self._hash = my_hash - - return self._hash - class _VersionRequirements(object): """ @@ -324,21 +333,24 @@ class _VersionRequirements(object): :param bool to_inclusive: if comparison is inclusive with the ending version """ - if from_inclusive and to_inclusive: - new_rule = lambda v: from_version <= v <= to_version - elif from_inclusive: - new_rule = lambda v: from_version <= v < to_version - else: - new_rule = lambda v: from_version < v < to_version + def new_rule(v): + if from_inclusive and to_inclusive: + return from_version <= v <= to_version + elif from_inclusive: + return from_version <= v < to_version + else: + return from_version < v < to_version self.rules.append(new_rule) + safecookie_req = _VersionRequirements() safecookie_req.in_range(Version('0.2.2.36'), Version('0.2.3.0')) safecookie_req.greater_than(Version('0.2.3.13')) Requirement = stem.util.enum.Enum( ('AUTH_SAFECOOKIE', safecookie_req), + ('DESCRIPTOR_COMPRESSION', Version('0.3.1.1-alpha')), ('DROPGUARDS', Version('0.2.5.1-alpha')), ('EVENT_AUTHDIR_NEWDESCS', Version('0.1.1.10-alpha')), ('EVENT_BUILDTIMEOUT_SET', Version('0.2.2.7-alpha')), @@ -349,6 +361,7 @@ Requirement = stem.util.enum.Enum( ('EVENT_GUARD', Version('0.1.2.5-alpha')), ('EVENT_HS_DESC_CONTENT', Version('0.2.7.1-alpha')), ('EVENT_NS', Version('0.1.2.3-alpha')), + ('EVENT_NETWORK_LIVENESS', Version('0.2.7.2-alpha')), ('EVENT_NEWCONSENSUS', Version('0.2.1.13-alpha')), ('EVENT_SIGNAL', Version('0.2.3.1-alpha')), ('EVENT_STATUS', Version('0.1.2.3-alpha')), @@ -363,11 +376,18 @@ Requirement = stem.util.enum.Enum( ('FEATURE_EXTENDED_EVENTS', Version('0.2.2.1-alpha')), ('FEATURE_VERBOSE_NAMES', Version('0.2.2.1-alpha')), ('GETINFO_CONFIG_TEXT', Version('0.2.2.7-alpha')), + ('GETINFO_GEOIP_AVAILABLE', Version('0.3.2.1-alpha')), + ('GETINFO_MICRODESCRIPTORS', Version('0.3.5.1-alpha')), + ('HIDDEN_SERVICE_V3', Version('0.3.3.1-alpha')), ('HSFETCH', Version('0.2.7.1-alpha')), ('HSPOST', Version('0.2.7.1-alpha')), ('ADD_ONION', Version('0.2.7.1-alpha')), + ('ADD_ONION_BASIC_AUTH', Version('0.2.9.1-alpha')), + ('ADD_ONION_NON_ANONYMOUS', Version('0.2.9.3-alpha')), + ('ADD_ONION_MAX_STREAMS', Version('0.2.7.2-alpha')), ('LOADCONF', Version('0.2.1.1')), ('MICRODESCRIPTOR_IS_DEFAULT', Version('0.2.3.3')), + ('SAVECONF_FORCE', Version('0.3.1.1-alpha')), ('TAKEOWNERSHIP', Version('0.2.2.28-beta')), ('TORRC_CONTROL_SOCKET', Version('0.2.0.30')), ('TORRC_PORT_FORWARDING', Version('0.2.3.1-alpha')), diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/DESCRIPTION.rst deleted file mode 100644 index 2de5e77..0000000 --- a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,46 +0,0 @@ -Tornado Web Server -================== - -`Tornado `_ is a Python web framework and -asynchronous networking library, originally developed at `FriendFeed -`_. By using non-blocking network I/O, Tornado -can scale to tens of thousands of open connections, making it ideal for -`long polling `_, -`WebSockets `_, and other -applications that require a long-lived connection to each user. - -Hello, world ------------- - -Here is a simple "Hello, world" example web app for Tornado: - -.. code-block:: python - - import tornado.ioloop - import tornado.web - - class MainHandler(tornado.web.RequestHandler): - def get(self): - self.write("Hello, world") - - def make_app(): - return tornado.web.Application([ - (r"/", MainHandler), - ]) - - if __name__ == "__main__": - app = make_app() - app.listen(8888) - tornado.ioloop.IOLoop.current().start() - -This example does not use any of Tornado's asynchronous features; for -that see this `simple chat room -`_. - -Documentation -------------- - -Documentation and links to additional resources are available at -http://www.tornadoweb.org - - diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/METADATA b/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/METADATA deleted file mode 100644 index 0abc729..0000000 --- a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/METADATA +++ /dev/null @@ -1,67 +0,0 @@ -Metadata-Version: 2.0 -Name: tornado -Version: 4.3 -Summary: Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed. -Home-page: http://www.tornadoweb.org/ -Author: Facebook -Author-email: python-tornado@googlegroups.com -License: http://www.apache.org/licenses/LICENSE-2.0 -Platform: UNKNOWN -Classifier: License :: OSI Approved :: Apache Software License -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.2 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Requires-Dist: backports-abc (>=0.4) - -Tornado Web Server -================== - -`Tornado `_ is a Python web framework and -asynchronous networking library, originally developed at `FriendFeed -`_. By using non-blocking network I/O, Tornado -can scale to tens of thousands of open connections, making it ideal for -`long polling `_, -`WebSockets `_, and other -applications that require a long-lived connection to each user. - -Hello, world ------------- - -Here is a simple "Hello, world" example web app for Tornado: - -.. code-block:: python - - import tornado.ioloop - import tornado.web - - class MainHandler(tornado.web.RequestHandler): - def get(self): - self.write("Hello, world") - - def make_app(): - return tornado.web.Application([ - (r"/", MainHandler), - ]) - - if __name__ == "__main__": - app = make_app() - app.listen(8888) - tornado.ioloop.IOLoop.current().start() - -This example does not use any of Tornado's asynchronous features; for -that see this `simple chat room -`_. - -Documentation -------------- - -Documentation and links to additional resources are available at -http://www.tornadoweb.org - - diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/RECORD b/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/RECORD deleted file mode 100644 index d17b3a0..0000000 --- a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/RECORD +++ /dev/null @@ -1,177 +0,0 @@ -tornado/__init__.py,sha256=HzntEtmc_UmjIxX1ex-gMYFrhx_KfnlBEF1F9Ygnxt8,1122 -tornado/_locale_data.py,sha256=OQnYIsIdZngKZZMaSQBB-cgsnFNPIqYjg2YiOOJnjqc,5467 -tornado/auth.py,sha256=hnI3xYwtFbKox-suAbNDNXB63lT908KIBAtP2qHrW-s,46438 -tornado/autoreload.py,sha256=XknVuJH24kh9KWnAo7OOWGCYaAVsIZ5uIqFR6CggzxA,12840 -tornado/concurrent.py,sha256=SLiP_WSfS_gMh8ztaS2jj-Ok7Cs9SAY10u56ZndAQU0,18531 -tornado/curl_httpclient.py,sha256=hII9S4oBsXiP2UB7-2SZowpSAHUsQFQ7noDyAclBLks,22169 -tornado/escape.py,sha256=EwuAvQswS5RA7kHH0NvMhKBN_lD61rJt-8-7VBMCtl8,14441 -tornado/gen.py,sha256=JYo9046bQp3gzCYnuAgyQjxvi1p535-pGaxJW5YGs3A,44093 -tornado/http1connection.py,sha256=7rthe50chhnX5LqEEYlIGrgepPogBUoJUdb3Xib8XD8,30869 -tornado/httpclient.py,sha256=B9fCu0HCNAdYWmK6e102xtxm6PpBuNxB1pzp5n3CsGo,26738 -tornado/httpserver.py,sha256=dKlOTLf96yeaF9OdDSNRJDdlaPSRnju1UluysF8dV70,11915 -tornado/httputil.py,sha256=fjV1940-zr_jIOe4atQ6NAZplrZwdhRgZl-e6s_PDm4,28827 -tornado/ioloop.py,sha256=Tv_Jf0ceZGSHO92Ftq7VmXtZWi68aT_31MGMLifAqL0,41265 -tornado/iostream.py,sha256=zX63NPYEEGMEohVC4NZlwISUAlk8VAGUJdGgO3pnpCc,65135 -tornado/locale.py,sha256=NDj-I6Rhu1E4mV_2LWQ3ZSX3qHc1l3auAuGxp-mzbGw,20306 -tornado/locks.py,sha256=bYBT44H8vvS7KuGz_vo0CZm8e9nyII0IQdBALZA-cME,15234 -tornado/log.py,sha256=C63TfBPnOKW17a30cC9Et8GO7hUuBb4kuyV82TycorI,10920 -tornado/netutil.py,sha256=HtD2ipcj86dvU4MN8-iQRMsPIELlyWFu1VHEk-hb01o,20437 -tornado/options.py,sha256=5Y7RRuMvq86IJMYEWi1HJjVKMbyB-CEBgNHMsaguOeI,20852 -tornado/process.py,sha256=9Dh0oAik9PUFGSnSR4aXaq_4h6pxpkrNjwJTqS9b0_A,12300 -tornado/queues.py,sha256=TOxKUoxhsR16dP9VllUnNoAdd-w1ZtEe1DLJNX0BEXo,10013 -tornado/simple_httpclient.py,sha256=o5SDq6EHkAVp7Wfk9fnZKJQ4Zs25gHFcQXlzIDcZLOE,23983 -tornado/speedups.cpython-34m.so,sha256=yXxsKqC3kNmxunYcy4M1qM94K6iqhYJb-XhqBcZCPwM,17664 -tornado/stack_context.py,sha256=RJ2E4-UKbXPzvd0Yw4b9H9C2YUWK2vyKPf7NeB7F_zQ,13174 -tornado/tcpclient.py,sha256=BsGu5ZKUw3zRbiDB9NSgUS3j1HBr1GWjobiMMsUudU0,6802 -tornado/tcpserver.py,sha256=CYpGxMUb8XleB3zU-v_GfC00y3wUY-QyyJjbNvDsdG8,11530 -tornado/template.py,sha256=sW8BV8-iB8sNPP3l7vSqnIORJj7cRznQr_uHiz9XbpQ,35327 -tornado/testing.py,sha256=msKN3yNBOVxFEgFjzH0Xh3PF6acJI7SuQzz1W2XHO10,27632 -tornado/util.py,sha256=N2ArRO-Cu12Do3N87jAIphpKKO7IN8LodA5TTlaP2FA,13605 -tornado/web.py,sha256=x24_Dgwy79_3_l1u7PmgSzHKngzIZSVrxMc1nreSBOU,128870 -tornado/websocket.py,sha256=_e3vGGw8_F9B6JEuXmJj9Vj3rKmnzHfaaS-5FFEGc0g,40820 -tornado/wsgi.py,sha256=QrcwvkV3k8q3xELSm5063u2Xa6QGL04YEMZGHWjV-4I,13436 -tornado/platform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -tornado/platform/asyncio.py,sha256=vgpSdRSSxjcxhtwAg86ZafEGCiSkCv_0CD4B_RB1aOE,7648 -tornado/platform/auto.py,sha256=fhttLe4JKJr1t_EyuEKGECMkVUe9l66KQuWKSvMebqc,1726 -tornado/platform/caresresolver.py,sha256=Xts9gsEcmcj5yttldPz_eUEcXA7W2c1FojC1An8fQNM,3092 -tornado/platform/common.py,sha256=TK4xziS1z30HFXvhRgxnrGGvTgVd9j_Qkp18DvuPiF4,3403 -tornado/platform/epoll.py,sha256=Oo7uFFJ3LAa-ZSjG8kDlQTLCfASMTqfPPA3aeBjwZgw,934 -tornado/platform/interface.py,sha256=757vw4dGAlErxWMwTw8h_STcOGjp6Ih4eJenGk8zdEY,2244 -tornado/platform/kqueue.py,sha256=-GKz9Bba4tKlpc7o0OnP2OxK7gMx417KatBHOm5Q3tI,3431 -tornado/platform/posix.py,sha256=P5kgBqVDEi3gu9h1TjFTd5B5d_EjORqbtqYUlcMa3uo,1859 -tornado/platform/select.py,sha256=dv2nf5bFIXHzvKr1ivAcqSG9FH_3CtAaGGnAYm2vvxA,2634 -tornado/platform/twisted.py,sha256=Sa25jp1wIeIPvVp29e8kRrwARp0wnyLQxYNF9gENFsY,21586 -tornado/platform/windows.py,sha256=xonLtnGj7ZSo5q1_try4hkE6rWEmVRU-EbCEmWhgPog,681 -tornado/test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -tornado/test/__main__.py,sha256=eMWaR-Q9gUjxuD5m0juHeUxSfxbVPQTuNeGtKjZTZ-g,417 -tornado/test/asyncio_test.py,sha256=0kMt_HuDri1Qlsw6hUwsJUmLxo-Dv2jcXTnHCrTJdII,4548 -tornado/test/auth_test.py,sha256=FdqdIMDpWZJ96Jo3VqcPVAMJdKkZtc7e1NGVSdD1dvI,22535 -tornado/test/concurrent_test.py,sha256=ZRoaiTcA-YhVMYJKe4-ioJqE7PAG8ACc7QQoi-Td0LE,13390 -tornado/test/curl_httpclient_test.py,sha256=n9KCJlutUjox2CrzGv6qCZvJGSr_8kJry4XjODf1SvA,4478 -tornado/test/escape_test.py,sha256=a6bSUbBgz7DUa-JIeWUOGjnVnuWE8cYYM21DPBk0MEg,11251 -tornado/test/gen_test.py,sha256=F3iXDQh4GrL04bTymf-BWyWcEp8Hu6BOhx96Z-LaY0A,44402 -tornado/test/httpclient_test.py,sha256=_KHAknmJ6IbrgpRaU6Ox6e-NNKapdOaF5hPeZ11iLh0,26340 -tornado/test/httpserver_test.py,sha256=jfnICextemEAH2tK-h6hqh5NH_mib0UbuKiDPhQ3nWU,41199 -tornado/test/httputil_test.py,sha256=DvQGKAZ5-Nqb1fBFNqAuPKMLHVlKh9aCAuGo5c1rfro,13359 -tornado/test/import_test.py,sha256=MpWn1gsVSo3vHFckjbyGFAu4-kPr2zP_a-WwBFL2DNI,1531 -tornado/test/ioloop_test.py,sha256=mREcnLZ38vQK4iaFGZril0FqGFBir6R2Y427PzIHpOY,24243 -tornado/test/iostream_test.py,sha256=7MXi2llrcQgHhvSTu2Sh0_bx0U9TCBEFt93WfUVS13k,42040 -tornado/test/locale_test.py,sha256=DurCiwc4yW-4g2Ko1lSa5ylSp9ZUvjeQmBx1e3OosPQ,5923 -tornado/test/locks_test.py,sha256=3ZNRHFczN8-JWDAE8dORjgqGsjOKVcPOSJNFohm0ibY,16076 -tornado/test/log_test.py,sha256=iAgMJtRvj5bFimHvURpuQHlKAQ3sXWr9n1gdDPA8Mlk,9696 -tornado/test/netutil_test.py,sha256=VuJKfvNLr7GjDY1K6z_MySvDy6KaKCmZOwMAxsEmlyQ,7630 -tornado/test/options_test.cfg,sha256=N7vPJGYwcMLCN6_sei1UQtsYwwPfHsGc5aNfBnX-5No,49 -tornado/test/options_test.py,sha256=wMAAmHeoCiW2ey72tdrJQfBcWhEr_tMzWJe8neD29eI,10058 -tornado/test/process_test.py,sha256=yiqxgI1BZ1mWUrRfs85Z_CR6HPGAjuHfCpG3H-cUkb4,10569 -tornado/test/queues_test.py,sha256=UtR32JWOof5g_kS14aN1YO1O9rHDrcQ31SdXLKsdSyA,13062 -tornado/test/resolve_test_helper.py,sha256=cfiEZjHH_oIJ73xsSZ3M17tODdAS0YP_j-1wuvqGovo,521 -tornado/test/runtests.py,sha256=w3Mdsua9mLFa7vxFbZNBuW7WDuYbIxTxodzjBwjNl94,7125 -tornado/test/simple_httpclient_test.py,sha256=52ix08IFW4rB62p8x3NrwLRmerRlOp5BNoO1KAnGwbk,30120 -tornado/test/stack_context_test.py,sha256=KRSFLh506zofx6YRzvxnO6QeI8tDF_mMf8BuEFUEXlE,11102 -tornado/test/static_foo.txt,sha256=DdAKABzHb8kunGrAvUoTLnHlgdTN-6_PjH1irVmokm8,95 -tornado/test/tcpclient_test.py,sha256=ApjAfnOUSRvpfqvdzIf_UljyyXZQuKhghvTKwsgU-S8,10304 -tornado/test/tcpserver_test.py,sha256=h5a3JOV79nqUwKnGFzsiMnCEfpuDrRw-1uF8QqFp8xg,1361 -tornado/test/template_test.py,sha256=Lg51Gho7A5nDY5MiIZp8i6MDxhWMMOBqLNGdYdZYaxE,18558 -tornado/test/test.crt,sha256=ZE49nVCfsA7H4dQPEGfoYi1OcfDfez4MfzgUrysli9M,851 -tornado/test/test.key,sha256=KbArXO5iyzSJRST7AtVkj5TU5zXZlJnxvuG__BU4BNU,916 -tornado/test/testing_test.py,sha256=gDtF2e8_WARRnspZvbL2gKfqLK8t5p-5H6fFod1ZoeE,8732 -tornado/test/twisted_test.py,sha256=s365tyFMreeD-v4UB_yG_PY4UcaW2WWv2pt9nYlAW_s,27525 -tornado/test/util.py,sha256=WrPFnDlAeZtYhvIkM78dw2oWD3VVleUKrHCD3XiPLI8,3023 -tornado/test/util_test.py,sha256=0JEVH4SHOetbSxjcgY-lbtxPdeel91aiR0RO1Mx9TrY,6681 -tornado/test/web_test.py,sha256=L_G2qgN91wHPCXSZM0VeoGn_SQRRgGH_LelA8sD2-f4,109353 -tornado/test/websocket_test.py,sha256=2v9M7ScjAtrJeF5P0uHf4NeHZ5NtK_UbmuiXgJVLfS0,14781 -tornado/test/wsgi_test.py,sha256=wdTjmKiWmW-n5dhNhpxvV_4amkG855EJF1-GHFdr89Y,3714 -tornado/test/csv_translations/fr_FR.csv,sha256=0UsMzfh1cw3yQdhS7pCmRfQoAkbqWpgzzodpZqp7ttM,18 -tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo,sha256=fl0ZVZIlNwwU9lPx29pgZ4X-HfyEVYphJu7UWtll7jo,665 -tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po,sha256=Clw6HyQUcopGV25qw3pvw3gn1ZqZRYrovsi8PQTQAnM,1049 -tornado/test/static/robots.txt,sha256=Mx6pCQ2wyfb1l72YQP1bFxgw9uCzuhyyTfqR8Mla7cE,26 -tornado/test/static/sample.xml,sha256=7LeTf16BWDucipsUaZZK7oDxtKEMDH9sFtsNR1G6pME,666 -tornado/test/static/sample.xml.bz2,sha256=2Ql5ccWnaSpDdTyioino7Bw_dcGFkG_RQO5Lm5cfT6A,285 -tornado/test/static/sample.xml.gz,sha256=_App0wKpn31lZVA9P_rslytPm4ei5GvNPVKh55r7l28,264 -tornado/test/static/dir/index.html,sha256=tBwBanUSjISUy0BVan_QNKkYdLau8qs__P0G9oAiP78,18 -tornado/test/templates/utf8.html,sha256=9d1eiaw5KCjUTCbRRIl_RLSy0LCJXaO-bzVF2L_32fM,7 -tornado-4.3.dist-info/DESCRIPTION.rst,sha256=hQhN6PO4S2x1tSRSInNpVMz0GSgYcj48KZvcwkriohE,1332 -tornado-4.3.dist-info/METADATA,sha256=OaB6afBxInBZxrZ_zuSDcIruLZVEIxpkb_V2qoT_ZCM,2256 -tornado-4.3.dist-info/RECORD,, -tornado-4.3.dist-info/WHEEL,sha256=HslHw5cSLCuyOLxj8duGAooHNvXnupcmoBU1NzRPr2w,104 -tornado-4.3.dist-info/metadata.json,sha256=SdpUqt7g_ebcnkms9OevniGL2L3BKJ1Pu3vf4UF8gOs,1073 -tornado-4.3.dist-info/top_level.txt,sha256=5QAK1MeNpWgYdqWoU8iYlDuGB8j6NDPgx-uSUHTe0A4,8 -tornado-4.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -tornado/test/__pycache__/twisted_test.cpython-34.pyc,, -tornado/test/__pycache__/stack_context_test.cpython-34.pyc,, -tornado/__pycache__/httpclient.cpython-34.pyc,, -tornado/test/__pycache__/concurrent_test.cpython-34.pyc,, -tornado/test/__pycache__/httpclient_test.cpython-34.pyc,, -tornado/test/__pycache__/netutil_test.cpython-34.pyc,, -tornado/platform/__pycache__/posix.cpython-34.pyc,, -tornado/test/__pycache__/testing_test.cpython-34.pyc,, -tornado/__pycache__/log.cpython-34.pyc,, -tornado/test/__pycache__/template_test.cpython-34.pyc,, -tornado/platform/__pycache__/kqueue.cpython-34.pyc,, -tornado/__pycache__/escape.cpython-34.pyc,, -tornado/__pycache__/gen.cpython-34.pyc,, -tornado/test/__pycache__/__init__.cpython-34.pyc,, -tornado/platform/__pycache__/__init__.cpython-34.pyc,, -tornado/__pycache__/options.cpython-34.pyc,, -tornado/__pycache__/locale.cpython-34.pyc,, -tornado/__pycache__/simple_httpclient.cpython-34.pyc,, -tornado/__pycache__/util.cpython-34.pyc,, -tornado/test/__pycache__/wsgi_test.cpython-34.pyc,, -tornado/__pycache__/web.cpython-34.pyc,, -tornado/test/__pycache__/tcpserver_test.cpython-34.pyc,, -tornado/test/__pycache__/import_test.cpython-34.pyc,, -tornado/platform/__pycache__/epoll.cpython-34.pyc,, -tornado/__pycache__/_locale_data.cpython-34.pyc,, -tornado/test/__pycache__/httpserver_test.cpython-34.pyc,, -tornado/platform/__pycache__/twisted.cpython-34.pyc,, -tornado/__pycache__/__init__.cpython-34.pyc,, -tornado/__pycache__/tcpserver.cpython-34.pyc,, -tornado/test/__pycache__/gen_test.cpython-34.pyc,, -tornado/test/__pycache__/util_test.cpython-34.pyc,, -tornado/platform/__pycache__/asyncio.cpython-34.pyc,, -tornado/__pycache__/autoreload.cpython-34.pyc,, -tornado/__pycache__/iostream.cpython-34.pyc,, -tornado/__pycache__/queues.cpython-34.pyc,, -tornado/__pycache__/httpserver.cpython-34.pyc,, -tornado/__pycache__/auth.cpython-34.pyc,, -tornado/__pycache__/template.cpython-34.pyc,, -tornado/__pycache__/stack_context.cpython-34.pyc,, -tornado/__pycache__/process.cpython-34.pyc,, -tornado/test/__pycache__/iostream_test.cpython-34.pyc,, -tornado/test/__pycache__/asyncio_test.cpython-34.pyc,, -tornado/__pycache__/tcpclient.cpython-34.pyc,, -tornado/test/__pycache__/tcpclient_test.cpython-34.pyc,, -tornado/test/__pycache__/httputil_test.cpython-34.pyc,, -tornado/test/__pycache__/process_test.cpython-34.pyc,, -tornado/__pycache__/wsgi.cpython-34.pyc,, -tornado/test/__pycache__/web_test.cpython-34.pyc,, -tornado/test/__pycache__/util.cpython-34.pyc,, -tornado/test/__pycache__/runtests.cpython-34.pyc,, -tornado/platform/__pycache__/auto.cpython-34.pyc,, -tornado/test/__pycache__/__main__.cpython-34.pyc,, -tornado/test/__pycache__/simple_httpclient_test.cpython-34.pyc,, -tornado/test/__pycache__/auth_test.cpython-34.pyc,, -tornado/__pycache__/ioloop.cpython-34.pyc,, -tornado/__pycache__/locks.cpython-34.pyc,, -tornado/test/__pycache__/escape_test.cpython-34.pyc,, -tornado/__pycache__/testing.cpython-34.pyc,, -tornado/test/__pycache__/curl_httpclient_test.cpython-34.pyc,, -tornado/__pycache__/curl_httpclient.cpython-34.pyc,, -tornado/test/__pycache__/ioloop_test.cpython-34.pyc,, -tornado/platform/__pycache__/caresresolver.cpython-34.pyc,, -tornado/__pycache__/netutil.cpython-34.pyc,, -tornado/test/__pycache__/options_test.cpython-34.pyc,, -tornado/__pycache__/concurrent.cpython-34.pyc,, -tornado/test/__pycache__/resolve_test_helper.cpython-34.pyc,, -tornado/platform/__pycache__/windows.cpython-34.pyc,, -tornado/test/__pycache__/locks_test.cpython-34.pyc,, -tornado/test/__pycache__/locale_test.cpython-34.pyc,, -tornado/test/__pycache__/websocket_test.cpython-34.pyc,, -tornado/__pycache__/httputil.cpython-34.pyc,, -tornado/test/__pycache__/log_test.cpython-34.pyc,, -tornado/__pycache__/http1connection.cpython-34.pyc,, -tornado/platform/__pycache__/select.cpython-34.pyc,, -tornado/test/__pycache__/queues_test.cpython-34.pyc,, -tornado/platform/__pycache__/interface.cpython-34.pyc,, -tornado/__pycache__/websocket.cpython-34.pyc,, -tornado/platform/__pycache__/common.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/WHEEL deleted file mode 100644 index db40973..0000000 --- a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: false -Tag: cp34-cp34m-linux_x86_64 - diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/metadata.json deleted file mode 100644 index 6d587e2..0000000 --- a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "extensions": {"python.details": {"contacts": [{"email": "python-tornado@googlegroups.com", "name": "Facebook", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://www.tornadoweb.org/"}}}, "extras": [], "generator": "bdist_wheel (0.29.0)", "license": "http://www.apache.org/licenses/LICENSE-2.0", "metadata_version": "2.0", "name": "tornado", "run_requires": [{"requires": ["backports-abc (>=0.4)"]}], "summary": "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed.", "version": "4.3"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/PKG-INFO new file mode 100644 index 0000000..dff6e64 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/PKG-INFO @@ -0,0 +1,64 @@ +Metadata-Version: 1.1 +Name: tornado +Version: 4.3 +Summary: Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed. +Home-page: http://www.tornadoweb.org/ +Author: Facebook +Author-email: python-tornado@googlegroups.com +License: http://www.apache.org/licenses/LICENSE-2.0 +Description: Tornado Web Server + ================== + + `Tornado `_ is a Python web framework and + asynchronous networking library, originally developed at `FriendFeed + `_. By using non-blocking network I/O, Tornado + can scale to tens of thousands of open connections, making it ideal for + `long polling `_, + `WebSockets `_, and other + applications that require a long-lived connection to each user. + + Hello, world + ------------ + + Here is a simple "Hello, world" example web app for Tornado: + + .. code-block:: python + + import tornado.ioloop + import tornado.web + + class MainHandler(tornado.web.RequestHandler): + def get(self): + self.write("Hello, world") + + def make_app(): + return tornado.web.Application([ + (r"/", MainHandler), + ]) + + if __name__ == "__main__": + app = make_app() + app.listen(8888) + tornado.ioloop.IOLoop.current().start() + + This example does not use any of Tornado's asynchronous features; for + that see this `simple chat room + `_. + + Documentation + ------------- + + Documentation and links to additional resources are available at + http://www.tornadoweb.org + +Platform: UNKNOWN +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/SOURCES.txt new file mode 100644 index 0000000..654616d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/SOURCES.txt @@ -0,0 +1,233 @@ +MANIFEST.in +README.rst +runtests.sh +setup.cfg +setup.py +demos/appengine/README +demos/appengine/app.yaml +demos/appengine/blog.py +demos/appengine/static/blog.css +demos/appengine/templates/archive.html +demos/appengine/templates/base.html +demos/appengine/templates/compose.html +demos/appengine/templates/entry.html +demos/appengine/templates/feed.xml +demos/appengine/templates/home.html +demos/appengine/templates/modules/entry.html +demos/benchmark/benchmark.py +demos/benchmark/chunk_benchmark.py +demos/benchmark/gen_benchmark.py +demos/benchmark/stack_context_benchmark.py +demos/benchmark/template_benchmark.py +demos/blog/README +demos/blog/blog.py +demos/blog/schema.sql +demos/blog/static/blog.css +demos/blog/templates/archive.html +demos/blog/templates/base.html +demos/blog/templates/compose.html +demos/blog/templates/create_author.html +demos/blog/templates/entry.html +demos/blog/templates/feed.xml +demos/blog/templates/home.html +demos/blog/templates/login.html +demos/blog/templates/modules/entry.html +demos/chat/chatdemo.py +demos/chat/static/chat.css +demos/chat/static/chat.js +demos/chat/templates/index.html +demos/chat/templates/message.html +demos/facebook/README +demos/facebook/facebook.py +demos/facebook/static/facebook.css +demos/facebook/static/facebook.js +demos/facebook/templates/stream.html +demos/facebook/templates/modules/post.html +demos/helloworld/helloworld.py +demos/s3server/s3server.py +demos/twitter/home.html +demos/twitter/twitterdemo.py +demos/websocket/chatdemo.py +demos/websocket/static/chat.css +demos/websocket/static/chat.js +demos/websocket/templates/index.html +demos/websocket/templates/message.html +demos/webspider/webspider.py +docs/Makefile +docs/asyncio.rst +docs/auth.rst +docs/autoreload.rst +docs/caresresolver.rst +docs/concurrent.rst +docs/conf.py +docs/coroutine.rst +docs/escape.rst +docs/faq.rst +docs/favicon.ico +docs/gen.rst +docs/guide.rst +docs/http.rst +docs/http1connection.rst +docs/httpclient.rst +docs/httpserver.rst +docs/httputil.rst +docs/index.rst +docs/integration.rst +docs/ioloop.rst +docs/iostream.rst +docs/locale.rst +docs/locks.rst +docs/log.rst +docs/netutil.rst +docs/networking.rst +docs/options.rst +docs/process.rst +docs/queues.rst +docs/releases.rst +docs/requirements.txt +docs/stack_context.rst +docs/tcpclient.rst +docs/tcpserver.rst +docs/template.rst +docs/testing.rst +docs/tornado.png +docs/twisted.rst +docs/util.rst +docs/utilities.rst +docs/web.rst +docs/webframework.rst +docs/websocket.rst +docs/wsgi.rst +docs/guide/async.rst +docs/guide/coroutines.rst +docs/guide/intro.rst +docs/guide/queues.rst +docs/guide/running.rst +docs/guide/security.rst +docs/guide/structure.rst +docs/guide/templates.rst +docs/releases/v1.0.0.rst +docs/releases/v1.0.1.rst +docs/releases/v1.1.0.rst +docs/releases/v1.1.1.rst +docs/releases/v1.2.0.rst +docs/releases/v1.2.1.rst +docs/releases/v2.0.0.rst +docs/releases/v2.1.0.rst +docs/releases/v2.1.1.rst +docs/releases/v2.2.0.rst +docs/releases/v2.2.1.rst +docs/releases/v2.3.0.rst +docs/releases/v2.4.0.rst +docs/releases/v2.4.1.rst +docs/releases/v3.0.0.rst +docs/releases/v3.0.1.rst +docs/releases/v3.0.2.rst +docs/releases/v3.1.0.rst +docs/releases/v3.1.1.rst +docs/releases/v3.2.0.rst +docs/releases/v3.2.1.rst +docs/releases/v3.2.2.rst +docs/releases/v4.0.0.rst +docs/releases/v4.0.1.rst +docs/releases/v4.0.2.rst +docs/releases/v4.1.0.rst +docs/releases/v4.2.0.rst +docs/releases/v4.2.1.rst +docs/releases/v4.3.0.rst +tornado/__init__.py +tornado/_locale_data.py +tornado/auth.py +tornado/autoreload.py +tornado/concurrent.py +tornado/curl_httpclient.py +tornado/escape.py +tornado/gen.py +tornado/http1connection.py +tornado/httpclient.py +tornado/httpserver.py +tornado/httputil.py +tornado/ioloop.py +tornado/iostream.py +tornado/locale.py +tornado/locks.py +tornado/log.py +tornado/netutil.py +tornado/options.py +tornado/process.py +tornado/queues.py +tornado/simple_httpclient.py +tornado/speedups.c +tornado/stack_context.py +tornado/tcpclient.py +tornado/tcpserver.py +tornado/template.py +tornado/testing.py +tornado/util.py +tornado/web.py +tornado/websocket.py +tornado/wsgi.py +tornado.egg-info/PKG-INFO +tornado.egg-info/SOURCES.txt +tornado.egg-info/dependency_links.txt +tornado.egg-info/top_level.txt +tornado/platform/__init__.py +tornado/platform/asyncio.py +tornado/platform/auto.py +tornado/platform/caresresolver.py +tornado/platform/common.py +tornado/platform/epoll.py +tornado/platform/interface.py +tornado/platform/kqueue.py +tornado/platform/posix.py +tornado/platform/select.py +tornado/platform/twisted.py +tornado/platform/windows.py +tornado/test/__init__.py +tornado/test/__main__.py +tornado/test/asyncio_test.py +tornado/test/auth_test.py +tornado/test/concurrent_test.py +tornado/test/curl_httpclient_test.py +tornado/test/escape_test.py +tornado/test/gen_test.py +tornado/test/httpclient_test.py +tornado/test/httpserver_test.py +tornado/test/httputil_test.py +tornado/test/import_test.py +tornado/test/ioloop_test.py +tornado/test/iostream_test.py +tornado/test/locale_test.py +tornado/test/locks_test.py +tornado/test/log_test.py +tornado/test/netutil_test.py +tornado/test/options_test.cfg +tornado/test/options_test.py +tornado/test/process_test.py +tornado/test/queues_test.py +tornado/test/resolve_test_helper.py +tornado/test/runtests.py +tornado/test/simple_httpclient_test.py +tornado/test/stack_context_test.py +tornado/test/static_foo.txt +tornado/test/tcpclient_test.py +tornado/test/tcpserver_test.py +tornado/test/template_test.py +tornado/test/test.crt +tornado/test/test.key +tornado/test/testing_test.py +tornado/test/twisted_test.py +tornado/test/util.py +tornado/test/util_test.py +tornado/test/web_test.py +tornado/test/websocket_test.py +tornado/test/wsgi_test.py +tornado/test/csv_translations/fr_FR.csv +tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo +tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po +tornado/test/static/robots.txt +tornado/test/static/sample.xml +tornado/test/static/sample.xml.bz2 +tornado/test/static/sample.xml.gz +tornado/test/static/dir/index.html +tornado/test/templates/utf8.html \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/installed-files.txt new file mode 100644 index 0000000..7761e1a --- /dev/null +++ b/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/installed-files.txt @@ -0,0 +1,174 @@ +../tornado/__init__.py +../tornado/__pycache__/__init__.cpython-37.pyc +../tornado/__pycache__/_locale_data.cpython-37.pyc +../tornado/__pycache__/auth.cpython-37.pyc +../tornado/__pycache__/autoreload.cpython-37.pyc +../tornado/__pycache__/concurrent.cpython-37.pyc +../tornado/__pycache__/curl_httpclient.cpython-37.pyc +../tornado/__pycache__/escape.cpython-37.pyc +../tornado/__pycache__/gen.cpython-37.pyc +../tornado/__pycache__/http1connection.cpython-37.pyc +../tornado/__pycache__/httpclient.cpython-37.pyc +../tornado/__pycache__/httpserver.cpython-37.pyc +../tornado/__pycache__/httputil.cpython-37.pyc +../tornado/__pycache__/ioloop.cpython-37.pyc +../tornado/__pycache__/iostream.cpython-37.pyc +../tornado/__pycache__/locale.cpython-37.pyc +../tornado/__pycache__/locks.cpython-37.pyc +../tornado/__pycache__/log.cpython-37.pyc +../tornado/__pycache__/netutil.cpython-37.pyc +../tornado/__pycache__/options.cpython-37.pyc +../tornado/__pycache__/process.cpython-37.pyc +../tornado/__pycache__/queues.cpython-37.pyc +../tornado/__pycache__/simple_httpclient.cpython-37.pyc +../tornado/__pycache__/stack_context.cpython-37.pyc +../tornado/__pycache__/tcpclient.cpython-37.pyc +../tornado/__pycache__/tcpserver.cpython-37.pyc +../tornado/__pycache__/template.cpython-37.pyc +../tornado/__pycache__/testing.cpython-37.pyc +../tornado/__pycache__/util.cpython-37.pyc +../tornado/__pycache__/web.cpython-37.pyc +../tornado/__pycache__/websocket.cpython-37.pyc +../tornado/__pycache__/wsgi.cpython-37.pyc +../tornado/_locale_data.py +../tornado/auth.py +../tornado/autoreload.py +../tornado/concurrent.py +../tornado/curl_httpclient.py +../tornado/escape.py +../tornado/gen.py +../tornado/http1connection.py +../tornado/httpclient.py +../tornado/httpserver.py +../tornado/httputil.py +../tornado/ioloop.py +../tornado/iostream.py +../tornado/locale.py +../tornado/locks.py +../tornado/log.py +../tornado/netutil.py +../tornado/options.py +../tornado/platform/__init__.py +../tornado/platform/__pycache__/__init__.cpython-37.pyc +../tornado/platform/__pycache__/asyncio.cpython-37.pyc +../tornado/platform/__pycache__/auto.cpython-37.pyc +../tornado/platform/__pycache__/caresresolver.cpython-37.pyc +../tornado/platform/__pycache__/common.cpython-37.pyc +../tornado/platform/__pycache__/epoll.cpython-37.pyc +../tornado/platform/__pycache__/interface.cpython-37.pyc +../tornado/platform/__pycache__/kqueue.cpython-37.pyc +../tornado/platform/__pycache__/posix.cpython-37.pyc +../tornado/platform/__pycache__/select.cpython-37.pyc +../tornado/platform/__pycache__/twisted.cpython-37.pyc +../tornado/platform/__pycache__/windows.cpython-37.pyc +../tornado/platform/asyncio.py +../tornado/platform/auto.py +../tornado/platform/caresresolver.py +../tornado/platform/common.py +../tornado/platform/epoll.py +../tornado/platform/interface.py +../tornado/platform/kqueue.py +../tornado/platform/posix.py +../tornado/platform/select.py +../tornado/platform/twisted.py +../tornado/platform/windows.py +../tornado/process.py +../tornado/queues.py +../tornado/simple_httpclient.py +../tornado/speedups.cpython-37m-x86_64-linux-gnu.so +../tornado/stack_context.py +../tornado/tcpclient.py +../tornado/tcpserver.py +../tornado/template.py +../tornado/test/__init__.py +../tornado/test/__main__.py +../tornado/test/__pycache__/__init__.cpython-37.pyc +../tornado/test/__pycache__/__main__.cpython-37.pyc +../tornado/test/__pycache__/asyncio_test.cpython-37.pyc +../tornado/test/__pycache__/auth_test.cpython-37.pyc +../tornado/test/__pycache__/concurrent_test.cpython-37.pyc +../tornado/test/__pycache__/curl_httpclient_test.cpython-37.pyc +../tornado/test/__pycache__/escape_test.cpython-37.pyc +../tornado/test/__pycache__/gen_test.cpython-37.pyc +../tornado/test/__pycache__/httpclient_test.cpython-37.pyc +../tornado/test/__pycache__/httpserver_test.cpython-37.pyc +../tornado/test/__pycache__/httputil_test.cpython-37.pyc +../tornado/test/__pycache__/import_test.cpython-37.pyc +../tornado/test/__pycache__/ioloop_test.cpython-37.pyc +../tornado/test/__pycache__/iostream_test.cpython-37.pyc +../tornado/test/__pycache__/locale_test.cpython-37.pyc +../tornado/test/__pycache__/locks_test.cpython-37.pyc +../tornado/test/__pycache__/log_test.cpython-37.pyc +../tornado/test/__pycache__/netutil_test.cpython-37.pyc +../tornado/test/__pycache__/options_test.cpython-37.pyc +../tornado/test/__pycache__/process_test.cpython-37.pyc +../tornado/test/__pycache__/queues_test.cpython-37.pyc +../tornado/test/__pycache__/resolve_test_helper.cpython-37.pyc +../tornado/test/__pycache__/runtests.cpython-37.pyc +../tornado/test/__pycache__/simple_httpclient_test.cpython-37.pyc +../tornado/test/__pycache__/stack_context_test.cpython-37.pyc +../tornado/test/__pycache__/tcpclient_test.cpython-37.pyc +../tornado/test/__pycache__/tcpserver_test.cpython-37.pyc +../tornado/test/__pycache__/template_test.cpython-37.pyc +../tornado/test/__pycache__/testing_test.cpython-37.pyc +../tornado/test/__pycache__/twisted_test.cpython-37.pyc +../tornado/test/__pycache__/util.cpython-37.pyc +../tornado/test/__pycache__/util_test.cpython-37.pyc +../tornado/test/__pycache__/web_test.cpython-37.pyc +../tornado/test/__pycache__/websocket_test.cpython-37.pyc +../tornado/test/__pycache__/wsgi_test.cpython-37.pyc +../tornado/test/asyncio_test.py +../tornado/test/auth_test.py +../tornado/test/concurrent_test.py +../tornado/test/csv_translations/fr_FR.csv +../tornado/test/curl_httpclient_test.py +../tornado/test/escape_test.py +../tornado/test/gen_test.py +../tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo +../tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po +../tornado/test/httpclient_test.py +../tornado/test/httpserver_test.py +../tornado/test/httputil_test.py +../tornado/test/import_test.py +../tornado/test/ioloop_test.py +../tornado/test/iostream_test.py +../tornado/test/locale_test.py +../tornado/test/locks_test.py +../tornado/test/log_test.py +../tornado/test/netutil_test.py +../tornado/test/options_test.cfg +../tornado/test/options_test.py +../tornado/test/process_test.py +../tornado/test/queues_test.py +../tornado/test/resolve_test_helper.py +../tornado/test/runtests.py +../tornado/test/simple_httpclient_test.py +../tornado/test/stack_context_test.py +../tornado/test/static/dir/index.html +../tornado/test/static/robots.txt +../tornado/test/static/sample.xml +../tornado/test/static/sample.xml.bz2 +../tornado/test/static/sample.xml.gz +../tornado/test/static_foo.txt +../tornado/test/tcpclient_test.py +../tornado/test/tcpserver_test.py +../tornado/test/template_test.py +../tornado/test/templates/utf8.html +../tornado/test/test.crt +../tornado/test/test.key +../tornado/test/testing_test.py +../tornado/test/twisted_test.py +../tornado/test/util.py +../tornado/test/util_test.py +../tornado/test/web_test.py +../tornado/test/websocket_test.py +../tornado/test/wsgi_test.py +../tornado/testing.py +../tornado/util.py +../tornado/web.py +../tornado/websocket.py +../tornado/wsgi.py +PKG-INFO +SOURCES.txt +dependency_links.txt +top_level.txt diff --git a/Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/tornado-4.3.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/tornado-4.3.egg-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/tornado/speedups.cpython-34m.so b/Shared/lib/python3.4/site-packages/tornado/speedups.cpython-34m.so deleted file mode 100755 index 395a943..0000000 Binary files a/Shared/lib/python3.4/site-packages/tornado/speedups.cpython-34m.so and /dev/null differ diff --git a/Shared/lib/python3.4/site-packages/tornado/speedups.cpython-37m-x86_64-linux-gnu.so b/Shared/lib/python3.4/site-packages/tornado/speedups.cpython-37m-x86_64-linux-gnu.so new file mode 100755 index 0000000..6c859ec Binary files /dev/null and b/Shared/lib/python3.4/site-packages/tornado/speedups.cpython-37m-x86_64-linux-gnu.so differ diff --git a/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..665e877 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/DESCRIPTION.rst @@ -0,0 +1,27 @@ +python-webencodings +=================== + +This is a Python implementation of the `WHATWG Encoding standard +`_. + +* Latest documentation: http://packages.python.org/webencodings/ +* Source code and issue tracker: + https://github.com/gsnedders/python-webencodings +* PyPI releases: http://pypi.python.org/pypi/webencodings +* License: BSD +* Python 2.6+ and 3.3+ + +In order to be compatible with legacy web content +when interpreting something like ``Content-Type: text/html; charset=latin1``, +tools need to use a particular set of aliases for encoding labels +as well as some overriding rules. +For example, ``US-ASCII`` and ``iso-8859-1`` on the web are actually +aliases for ``windows-1252``, and an UTF-8 or UTF-16 BOM takes precedence +over any other encoding declaration. +The Encoding standard defines all such details so that implementations do +not have to reverse-engineer each other. + +This module has encoding labels and BOM detection, +but the actual implementation for encoders and decoders is Python’s. + + diff --git a/Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/INSTALLER similarity index 100% rename from Shared/lib/python3.4/site-packages/enum_compat-0.0.2.dist-info/INSTALLER rename to Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/INSTALLER diff --git a/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/METADATA b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/METADATA new file mode 100644 index 0000000..d6e1cb3 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/METADATA @@ -0,0 +1,52 @@ +Metadata-Version: 2.0 +Name: webencodings +Version: 0.5.1 +Summary: Character encoding aliases for legacy web content +Home-page: https://github.com/SimonSapin/python-webencodings +Author: Geoffrey Sneddon +Author-email: me@gsnedders.com +License: BSD +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Internet :: WWW/HTTP + +python-webencodings +=================== + +This is a Python implementation of the `WHATWG Encoding standard +`_. + +* Latest documentation: http://packages.python.org/webencodings/ +* Source code and issue tracker: + https://github.com/gsnedders/python-webencodings +* PyPI releases: http://pypi.python.org/pypi/webencodings +* License: BSD +* Python 2.6+ and 3.3+ + +In order to be compatible with legacy web content +when interpreting something like ``Content-Type: text/html; charset=latin1``, +tools need to use a particular set of aliases for encoding labels +as well as some overriding rules. +For example, ``US-ASCII`` and ``iso-8859-1`` on the web are actually +aliases for ``windows-1252``, and an UTF-8 or UTF-16 BOM takes precedence +over any other encoding declaration. +The Encoding standard defines all such details so that implementations do +not have to reverse-engineer each other. + +This module has encoding labels and BOM detection, +but the actual implementation for encoders and decoders is Python’s. + + diff --git a/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/RECORD b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/RECORD new file mode 100644 index 0000000..28208dc --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/RECORD @@ -0,0 +1,17 @@ +webencodings-0.5.1.dist-info/DESCRIPTION.rst,sha256=_NzU86VndzvBejXtyshDbUUz_lzzOrh54_OY9T0OuhQ,1039 +webencodings-0.5.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +webencodings-0.5.1.dist-info/METADATA,sha256=KQ7Fg8Lv4_nbUAbzAS29QmiXzG7HnWLIXpF2oN4P2u4,2063 +webencodings-0.5.1.dist-info/RECORD,, +webencodings-0.5.1.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +webencodings-0.5.1.dist-info/metadata.json,sha256=AYbksnhE5Frl5Ao68tMDgGeytnRxYvvWlvNgxKMiQFY,1092 +webencodings-0.5.1.dist-info/top_level.txt,sha256=bZs_aZHSf_PNlfIHD4-BETJmRi99BJdKLrOW7rQngeo,13 +webencodings/__init__.py,sha256=qOBJIuPy_4ByYH6W_bNgJF-qYQ2DoU-dKsDu5yRWCXg,10579 +webencodings/__pycache__/__init__.cpython-37.pyc,, +webencodings/__pycache__/labels.cpython-37.pyc,, +webencodings/__pycache__/mklabels.cpython-37.pyc,, +webencodings/__pycache__/tests.cpython-37.pyc,, +webencodings/__pycache__/x_user_defined.cpython-37.pyc,, +webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37HY9r3E,8979 +webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305 +webencodings/tests.py,sha256=OtGLyjhNY1fvkW1GvLJ_FV9ZoqC9Anyjr7q3kxTbzNs,6563 +webencodings/x_user_defined.py,sha256=yOqWSdmpytGfUgh_Z6JYgDNhoc-BAHyyeeT15Fr42tM,4307 diff --git a/Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/WHEEL similarity index 100% rename from Shared/lib/python3.4/site-packages/chardet-2.3.0.dist-info/WHEEL rename to Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/WHEEL diff --git a/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/metadata.json new file mode 100644 index 0000000..7192596 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP"], "extensions": {"python.details": {"contacts": [{"email": "me@gsnedders.com", "name": "Geoffrey Sneddon", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/SimonSapin/python-webencodings"}}}, "generator": "bdist_wheel (0.29.0)", "license": "BSD", "metadata_version": "2.0", "name": "webencodings", "summary": "Character encoding aliases for legacy web content", "version": "0.5.1"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/top_level.txt new file mode 100644 index 0000000..be8fcb7 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings-0.5.1.dist-info/top_level.txt @@ -0,0 +1 @@ +webencodings diff --git a/Shared/lib/python3.4/site-packages/webencodings/__init__.py b/Shared/lib/python3.4/site-packages/webencodings/__init__.py new file mode 100644 index 0000000..d21d697 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings/__init__.py @@ -0,0 +1,342 @@ +# coding: utf-8 +""" + + webencodings + ~~~~~~~~~~~~ + + This is a Python implementation of the `WHATWG Encoding standard + `. See README for details. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +from __future__ import unicode_literals + +import codecs + +from .labels import LABELS + + +VERSION = '0.5.1' + + +# Some names in Encoding are not valid Python aliases. Remap these. +PYTHON_NAMES = { + 'iso-8859-8-i': 'iso-8859-8', + 'x-mac-cyrillic': 'mac-cyrillic', + 'macintosh': 'mac-roman', + 'windows-874': 'cp874'} + +CACHE = {} + + +def ascii_lower(string): + r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z. + + :param string: An Unicode string. + :returns: A new Unicode string. + + This is used for `ASCII case-insensitive + `_ + matching of encoding labels. + The same matching is also used, among other things, + for `CSS keywords `_. + + This is different from the :meth:`~py:str.lower` method of Unicode strings + which also affect non-ASCII characters, + sometimes mapping them into the ASCII range: + + >>> keyword = u'Bac\N{KELVIN SIGN}ground' + >>> assert keyword.lower() == u'background' + >>> assert ascii_lower(keyword) != keyword.lower() + >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground' + + """ + # This turns out to be faster than unicode.translate() + return string.encode('utf8').lower().decode('utf8') + + +def lookup(label): + """ + Look for an encoding by its label. + This is the spec’s `get an encoding + `_ algorithm. + Supported labels are listed there. + + :param label: A string. + :returns: + An :class:`Encoding` object, or :obj:`None` for an unknown label. + + """ + # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020. + label = ascii_lower(label.strip('\t\n\f\r ')) + name = LABELS.get(label) + if name is None: + return None + encoding = CACHE.get(name) + if encoding is None: + if name == 'x-user-defined': + from .x_user_defined import codec_info + else: + python_name = PYTHON_NAMES.get(name, name) + # Any python_name value that gets to here should be valid. + codec_info = codecs.lookup(python_name) + encoding = Encoding(name, codec_info) + CACHE[name] = encoding + return encoding + + +def _get_encoding(encoding_or_label): + """ + Accept either an encoding object or label. + + :param encoding: An :class:`Encoding` object or a label string. + :returns: An :class:`Encoding` object. + :raises: :exc:`~exceptions.LookupError` for an unknown label. + + """ + if hasattr(encoding_or_label, 'codec_info'): + return encoding_or_label + + encoding = lookup(encoding_or_label) + if encoding is None: + raise LookupError('Unknown encoding label: %r' % encoding_or_label) + return encoding + + +class Encoding(object): + """Reresents a character encoding such as UTF-8, + that can be used for decoding or encoding. + + .. attribute:: name + + Canonical name of the encoding + + .. attribute:: codec_info + + The actual implementation of the encoding, + a stdlib :class:`~codecs.CodecInfo` object. + See :func:`codecs.register`. + + """ + def __init__(self, name, codec_info): + self.name = name + self.codec_info = codec_info + + def __repr__(self): + return '' % self.name + + +#: The UTF-8 encoding. Should be used for new content and formats. +UTF8 = lookup('utf-8') + +_UTF16LE = lookup('utf-16le') +_UTF16BE = lookup('utf-16be') + + +def decode(input, fallback_encoding, errors='replace'): + """ + Decode a single string. + + :param input: A byte string + :param fallback_encoding: + An :class:`Encoding` object or a label string. + The encoding to use if :obj:`input` does note have a BOM. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + :return: + A ``(output, encoding)`` tuple of an Unicode string + and an :obj:`Encoding`. + + """ + # Fail early if `encoding` is an invalid label. + fallback_encoding = _get_encoding(fallback_encoding) + bom_encoding, input = _detect_bom(input) + encoding = bom_encoding or fallback_encoding + return encoding.codec_info.decode(input, errors)[0], encoding + + +def _detect_bom(input): + """Return (bom_encoding, input), with any BOM removed from the input.""" + if input.startswith(b'\xFF\xFE'): + return _UTF16LE, input[2:] + if input.startswith(b'\xFE\xFF'): + return _UTF16BE, input[2:] + if input.startswith(b'\xEF\xBB\xBF'): + return UTF8, input[3:] + return None, input + + +def encode(input, encoding=UTF8, errors='strict'): + """ + Encode a single string. + + :param input: An Unicode string. + :param encoding: An :class:`Encoding` object or a label string. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + :return: A byte string. + + """ + return _get_encoding(encoding).codec_info.encode(input, errors)[0] + + +def iter_decode(input, fallback_encoding, errors='replace'): + """ + "Pull"-based decoder. + + :param input: + An iterable of byte strings. + + The input is first consumed just enough to determine the encoding + based on the precense of a BOM, + then consumed on demand when the return value is. + :param fallback_encoding: + An :class:`Encoding` object or a label string. + The encoding to use if :obj:`input` does note have a BOM. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + :returns: + An ``(output, encoding)`` tuple. + :obj:`output` is an iterable of Unicode strings, + :obj:`encoding` is the :obj:`Encoding` that is being used. + + """ + + decoder = IncrementalDecoder(fallback_encoding, errors) + generator = _iter_decode_generator(input, decoder) + encoding = next(generator) + return generator, encoding + + +def _iter_decode_generator(input, decoder): + """Return a generator that first yields the :obj:`Encoding`, + then yields output chukns as Unicode strings. + + """ + decode = decoder.decode + input = iter(input) + for chunck in input: + output = decode(chunck) + if output: + assert decoder.encoding is not None + yield decoder.encoding + yield output + break + else: + # Input exhausted without determining the encoding + output = decode(b'', final=True) + assert decoder.encoding is not None + yield decoder.encoding + if output: + yield output + return + + for chunck in input: + output = decode(chunck) + if output: + yield output + output = decode(b'', final=True) + if output: + yield output + + +def iter_encode(input, encoding=UTF8, errors='strict'): + """ + “Pull”-based encoder. + + :param input: An iterable of Unicode strings. + :param encoding: An :class:`Encoding` object or a label string. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + :returns: An iterable of byte strings. + + """ + # Fail early if `encoding` is an invalid label. + encode = IncrementalEncoder(encoding, errors).encode + return _iter_encode_generator(input, encode) + + +def _iter_encode_generator(input, encode): + for chunck in input: + output = encode(chunck) + if output: + yield output + output = encode('', final=True) + if output: + yield output + + +class IncrementalDecoder(object): + """ + “Push”-based decoder. + + :param fallback_encoding: + An :class:`Encoding` object or a label string. + The encoding to use if :obj:`input` does note have a BOM. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + + """ + def __init__(self, fallback_encoding, errors='replace'): + # Fail early if `encoding` is an invalid label. + self._fallback_encoding = _get_encoding(fallback_encoding) + self._errors = errors + self._buffer = b'' + self._decoder = None + #: The actual :class:`Encoding` that is being used, + #: or :obj:`None` if that is not determined yet. + #: (Ie. if there is not enough input yet to determine + #: if there is a BOM.) + self.encoding = None # Not known yet. + + def decode(self, input, final=False): + """Decode one chunk of the input. + + :param input: A byte string. + :param final: + Indicate that no more input is available. + Must be :obj:`True` if this is the last call. + :returns: An Unicode string. + + """ + decoder = self._decoder + if decoder is not None: + return decoder(input, final) + + input = self._buffer + input + encoding, input = _detect_bom(input) + if encoding is None: + if len(input) < 3 and not final: # Not enough data yet. + self._buffer = input + return '' + else: # No BOM + encoding = self._fallback_encoding + decoder = encoding.codec_info.incrementaldecoder(self._errors).decode + self._decoder = decoder + self.encoding = encoding + return decoder(input, final) + + +class IncrementalEncoder(object): + """ + “Push”-based encoder. + + :param encoding: An :class:`Encoding` object or a label string. + :param errors: Type of error handling. See :func:`codecs.register`. + :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. + + .. method:: encode(input, final=False) + + :param input: An Unicode string. + :param final: + Indicate that no more input is available. + Must be :obj:`True` if this is the last call. + :returns: A byte string. + + """ + def __init__(self, encoding=UTF8, errors='strict'): + encoding = _get_encoding(encoding) + self.encode = encoding.codec_info.incrementalencoder(errors).encode diff --git a/Shared/lib/python3.4/site-packages/webencodings/labels.py b/Shared/lib/python3.4/site-packages/webencodings/labels.py new file mode 100644 index 0000000..29cbf91 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings/labels.py @@ -0,0 +1,231 @@ +""" + + webencodings.labels + ~~~~~~~~~~~~~~~~~~~ + + Map encoding labels to their name. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +# XXX Do not edit! +# This file is automatically generated by mklabels.py + +LABELS = { + 'unicode-1-1-utf-8': 'utf-8', + 'utf-8': 'utf-8', + 'utf8': 'utf-8', + '866': 'ibm866', + 'cp866': 'ibm866', + 'csibm866': 'ibm866', + 'ibm866': 'ibm866', + 'csisolatin2': 'iso-8859-2', + 'iso-8859-2': 'iso-8859-2', + 'iso-ir-101': 'iso-8859-2', + 'iso8859-2': 'iso-8859-2', + 'iso88592': 'iso-8859-2', + 'iso_8859-2': 'iso-8859-2', + 'iso_8859-2:1987': 'iso-8859-2', + 'l2': 'iso-8859-2', + 'latin2': 'iso-8859-2', + 'csisolatin3': 'iso-8859-3', + 'iso-8859-3': 'iso-8859-3', + 'iso-ir-109': 'iso-8859-3', + 'iso8859-3': 'iso-8859-3', + 'iso88593': 'iso-8859-3', + 'iso_8859-3': 'iso-8859-3', + 'iso_8859-3:1988': 'iso-8859-3', + 'l3': 'iso-8859-3', + 'latin3': 'iso-8859-3', + 'csisolatin4': 'iso-8859-4', + 'iso-8859-4': 'iso-8859-4', + 'iso-ir-110': 'iso-8859-4', + 'iso8859-4': 'iso-8859-4', + 'iso88594': 'iso-8859-4', + 'iso_8859-4': 'iso-8859-4', + 'iso_8859-4:1988': 'iso-8859-4', + 'l4': 'iso-8859-4', + 'latin4': 'iso-8859-4', + 'csisolatincyrillic': 'iso-8859-5', + 'cyrillic': 'iso-8859-5', + 'iso-8859-5': 'iso-8859-5', + 'iso-ir-144': 'iso-8859-5', + 'iso8859-5': 'iso-8859-5', + 'iso88595': 'iso-8859-5', + 'iso_8859-5': 'iso-8859-5', + 'iso_8859-5:1988': 'iso-8859-5', + 'arabic': 'iso-8859-6', + 'asmo-708': 'iso-8859-6', + 'csiso88596e': 'iso-8859-6', + 'csiso88596i': 'iso-8859-6', + 'csisolatinarabic': 'iso-8859-6', + 'ecma-114': 'iso-8859-6', + 'iso-8859-6': 'iso-8859-6', + 'iso-8859-6-e': 'iso-8859-6', + 'iso-8859-6-i': 'iso-8859-6', + 'iso-ir-127': 'iso-8859-6', + 'iso8859-6': 'iso-8859-6', + 'iso88596': 'iso-8859-6', + 'iso_8859-6': 'iso-8859-6', + 'iso_8859-6:1987': 'iso-8859-6', + 'csisolatingreek': 'iso-8859-7', + 'ecma-118': 'iso-8859-7', + 'elot_928': 'iso-8859-7', + 'greek': 'iso-8859-7', + 'greek8': 'iso-8859-7', + 'iso-8859-7': 'iso-8859-7', + 'iso-ir-126': 'iso-8859-7', + 'iso8859-7': 'iso-8859-7', + 'iso88597': 'iso-8859-7', + 'iso_8859-7': 'iso-8859-7', + 'iso_8859-7:1987': 'iso-8859-7', + 'sun_eu_greek': 'iso-8859-7', + 'csiso88598e': 'iso-8859-8', + 'csisolatinhebrew': 'iso-8859-8', + 'hebrew': 'iso-8859-8', + 'iso-8859-8': 'iso-8859-8', + 'iso-8859-8-e': 'iso-8859-8', + 'iso-ir-138': 'iso-8859-8', + 'iso8859-8': 'iso-8859-8', + 'iso88598': 'iso-8859-8', + 'iso_8859-8': 'iso-8859-8', + 'iso_8859-8:1988': 'iso-8859-8', + 'visual': 'iso-8859-8', + 'csiso88598i': 'iso-8859-8-i', + 'iso-8859-8-i': 'iso-8859-8-i', + 'logical': 'iso-8859-8-i', + 'csisolatin6': 'iso-8859-10', + 'iso-8859-10': 'iso-8859-10', + 'iso-ir-157': 'iso-8859-10', + 'iso8859-10': 'iso-8859-10', + 'iso885910': 'iso-8859-10', + 'l6': 'iso-8859-10', + 'latin6': 'iso-8859-10', + 'iso-8859-13': 'iso-8859-13', + 'iso8859-13': 'iso-8859-13', + 'iso885913': 'iso-8859-13', + 'iso-8859-14': 'iso-8859-14', + 'iso8859-14': 'iso-8859-14', + 'iso885914': 'iso-8859-14', + 'csisolatin9': 'iso-8859-15', + 'iso-8859-15': 'iso-8859-15', + 'iso8859-15': 'iso-8859-15', + 'iso885915': 'iso-8859-15', + 'iso_8859-15': 'iso-8859-15', + 'l9': 'iso-8859-15', + 'iso-8859-16': 'iso-8859-16', + 'cskoi8r': 'koi8-r', + 'koi': 'koi8-r', + 'koi8': 'koi8-r', + 'koi8-r': 'koi8-r', + 'koi8_r': 'koi8-r', + 'koi8-u': 'koi8-u', + 'csmacintosh': 'macintosh', + 'mac': 'macintosh', + 'macintosh': 'macintosh', + 'x-mac-roman': 'macintosh', + 'dos-874': 'windows-874', + 'iso-8859-11': 'windows-874', + 'iso8859-11': 'windows-874', + 'iso885911': 'windows-874', + 'tis-620': 'windows-874', + 'windows-874': 'windows-874', + 'cp1250': 'windows-1250', + 'windows-1250': 'windows-1250', + 'x-cp1250': 'windows-1250', + 'cp1251': 'windows-1251', + 'windows-1251': 'windows-1251', + 'x-cp1251': 'windows-1251', + 'ansi_x3.4-1968': 'windows-1252', + 'ascii': 'windows-1252', + 'cp1252': 'windows-1252', + 'cp819': 'windows-1252', + 'csisolatin1': 'windows-1252', + 'ibm819': 'windows-1252', + 'iso-8859-1': 'windows-1252', + 'iso-ir-100': 'windows-1252', + 'iso8859-1': 'windows-1252', + 'iso88591': 'windows-1252', + 'iso_8859-1': 'windows-1252', + 'iso_8859-1:1987': 'windows-1252', + 'l1': 'windows-1252', + 'latin1': 'windows-1252', + 'us-ascii': 'windows-1252', + 'windows-1252': 'windows-1252', + 'x-cp1252': 'windows-1252', + 'cp1253': 'windows-1253', + 'windows-1253': 'windows-1253', + 'x-cp1253': 'windows-1253', + 'cp1254': 'windows-1254', + 'csisolatin5': 'windows-1254', + 'iso-8859-9': 'windows-1254', + 'iso-ir-148': 'windows-1254', + 'iso8859-9': 'windows-1254', + 'iso88599': 'windows-1254', + 'iso_8859-9': 'windows-1254', + 'iso_8859-9:1989': 'windows-1254', + 'l5': 'windows-1254', + 'latin5': 'windows-1254', + 'windows-1254': 'windows-1254', + 'x-cp1254': 'windows-1254', + 'cp1255': 'windows-1255', + 'windows-1255': 'windows-1255', + 'x-cp1255': 'windows-1255', + 'cp1256': 'windows-1256', + 'windows-1256': 'windows-1256', + 'x-cp1256': 'windows-1256', + 'cp1257': 'windows-1257', + 'windows-1257': 'windows-1257', + 'x-cp1257': 'windows-1257', + 'cp1258': 'windows-1258', + 'windows-1258': 'windows-1258', + 'x-cp1258': 'windows-1258', + 'x-mac-cyrillic': 'x-mac-cyrillic', + 'x-mac-ukrainian': 'x-mac-cyrillic', + 'chinese': 'gbk', + 'csgb2312': 'gbk', + 'csiso58gb231280': 'gbk', + 'gb2312': 'gbk', + 'gb_2312': 'gbk', + 'gb_2312-80': 'gbk', + 'gbk': 'gbk', + 'iso-ir-58': 'gbk', + 'x-gbk': 'gbk', + 'gb18030': 'gb18030', + 'hz-gb-2312': 'hz-gb-2312', + 'big5': 'big5', + 'big5-hkscs': 'big5', + 'cn-big5': 'big5', + 'csbig5': 'big5', + 'x-x-big5': 'big5', + 'cseucpkdfmtjapanese': 'euc-jp', + 'euc-jp': 'euc-jp', + 'x-euc-jp': 'euc-jp', + 'csiso2022jp': 'iso-2022-jp', + 'iso-2022-jp': 'iso-2022-jp', + 'csshiftjis': 'shift_jis', + 'ms_kanji': 'shift_jis', + 'shift-jis': 'shift_jis', + 'shift_jis': 'shift_jis', + 'sjis': 'shift_jis', + 'windows-31j': 'shift_jis', + 'x-sjis': 'shift_jis', + 'cseuckr': 'euc-kr', + 'csksc56011987': 'euc-kr', + 'euc-kr': 'euc-kr', + 'iso-ir-149': 'euc-kr', + 'korean': 'euc-kr', + 'ks_c_5601-1987': 'euc-kr', + 'ks_c_5601-1989': 'euc-kr', + 'ksc5601': 'euc-kr', + 'ksc_5601': 'euc-kr', + 'windows-949': 'euc-kr', + 'csiso2022kr': 'iso-2022-kr', + 'iso-2022-kr': 'iso-2022-kr', + 'utf-16be': 'utf-16be', + 'utf-16': 'utf-16le', + 'utf-16le': 'utf-16le', + 'x-user-defined': 'x-user-defined', +} diff --git a/Shared/lib/python3.4/site-packages/webencodings/mklabels.py b/Shared/lib/python3.4/site-packages/webencodings/mklabels.py new file mode 100644 index 0000000..295dc92 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings/mklabels.py @@ -0,0 +1,59 @@ +""" + + webencodings.mklabels + ~~~~~~~~~~~~~~~~~~~~~ + + Regenarate the webencodings.labels module. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +import json +try: + from urllib import urlopen +except ImportError: + from urllib.request import urlopen + + +def assert_lower(string): + assert string == string.lower() + return string + + +def generate(url): + parts = ['''\ +""" + + webencodings.labels + ~~~~~~~~~~~~~~~~~~~ + + Map encoding labels to their name. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +# XXX Do not edit! +# This file is automatically generated by mklabels.py + +LABELS = { +'''] + labels = [ + (repr(assert_lower(label)).lstrip('u'), + repr(encoding['name']).lstrip('u')) + for category in json.loads(urlopen(url).read().decode('ascii')) + for encoding in category['encodings'] + for label in encoding['labels']] + max_len = max(len(label) for label, name in labels) + parts.extend( + ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name) + for label, name in labels) + parts.append('}') + return ''.join(parts) + + +if __name__ == '__main__': + print(generate('http://encoding.spec.whatwg.org/encodings.json')) diff --git a/Shared/lib/python3.4/site-packages/webencodings/tests.py b/Shared/lib/python3.4/site-packages/webencodings/tests.py new file mode 100644 index 0000000..e12c10d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings/tests.py @@ -0,0 +1,153 @@ +# coding: utf-8 +""" + + webencodings.tests + ~~~~~~~~~~~~~~~~~~ + + A basic test suite for Encoding. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +from __future__ import unicode_literals + +from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode, + IncrementalDecoder, IncrementalEncoder, UTF8) + + +def assert_raises(exception, function, *args, **kwargs): + try: + function(*args, **kwargs) + except exception: + return + else: # pragma: no cover + raise AssertionError('Did not raise %s.' % exception) + + +def test_labels(): + assert lookup('utf-8').name == 'utf-8' + assert lookup('Utf-8').name == 'utf-8' + assert lookup('UTF-8').name == 'utf-8' + assert lookup('utf8').name == 'utf-8' + assert lookup('utf8').name == 'utf-8' + assert lookup('utf8 ').name == 'utf-8' + assert lookup(' \r\nutf8\t').name == 'utf-8' + assert lookup('u8') is None # Python label. + assert lookup('utf-8 ') is None # Non-ASCII white space. + + assert lookup('US-ASCII').name == 'windows-1252' + assert lookup('iso-8859-1').name == 'windows-1252' + assert lookup('latin1').name == 'windows-1252' + assert lookup('LATIN1').name == 'windows-1252' + assert lookup('latin-1') is None + assert lookup('LATİN1') is None # ASCII-only case insensitivity. + + +def test_all_labels(): + for label in LABELS: + assert decode(b'', label) == ('', lookup(label)) + assert encode('', label) == b'' + for repeat in [0, 1, 12]: + output, _ = iter_decode([b''] * repeat, label) + assert list(output) == [] + assert list(iter_encode([''] * repeat, label)) == [] + decoder = IncrementalDecoder(label) + assert decoder.decode(b'') == '' + assert decoder.decode(b'', final=True) == '' + encoder = IncrementalEncoder(label) + assert encoder.encode('') == b'' + assert encoder.encode('', final=True) == b'' + # All encoding names are valid labels too: + for name in set(LABELS.values()): + assert lookup(name).name == name + + +def test_invalid_label(): + assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid') + assert_raises(LookupError, encode, 'é', 'invalid') + assert_raises(LookupError, iter_decode, [], 'invalid') + assert_raises(LookupError, iter_encode, [], 'invalid') + assert_raises(LookupError, IncrementalDecoder, 'invalid') + assert_raises(LookupError, IncrementalEncoder, 'invalid') + + +def test_decode(): + assert decode(b'\x80', 'latin1') == ('€', lookup('latin1')) + assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1')) + assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8')) + assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8')) + assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii')) + assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM + + assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM + assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM + assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be')) + assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le')) + + assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be')) + assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le')) + assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le')) + + assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be')) + assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le')) + assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le')) + + +def test_encode(): + assert encode('é', 'latin1') == b'\xe9' + assert encode('é', 'utf8') == b'\xc3\xa9' + assert encode('é', 'utf8') == b'\xc3\xa9' + assert encode('é', 'utf-16') == b'\xe9\x00' + assert encode('é', 'utf-16le') == b'\xe9\x00' + assert encode('é', 'utf-16be') == b'\x00\xe9' + + +def test_iter_decode(): + def iter_decode_to_string(input, fallback_encoding): + output, _encoding = iter_decode(input, fallback_encoding) + return ''.join(output) + assert iter_decode_to_string([], 'latin1') == '' + assert iter_decode_to_string([b''], 'latin1') == '' + assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é' + assert iter_decode_to_string([b'hello'], 'latin1') == 'hello' + assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello' + assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello' + assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é' + assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é' + assert iter_decode_to_string([ + b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é' + assert iter_decode_to_string([ + b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD' + assert iter_decode_to_string([ + b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é' + assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == '' + assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»' + assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é' + assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é' + assert iter_decode_to_string([ + b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é' + assert iter_decode_to_string([ + b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo' + + +def test_iter_encode(): + assert b''.join(iter_encode([], 'latin1')) == b'' + assert b''.join(iter_encode([''], 'latin1')) == b'' + assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9' + assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9' + assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00' + assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00' + assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9' + assert b''.join(iter_encode([ + '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo' + + +def test_x_user_defined(): + encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca' + decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca' + encoded = b'aa' + decoded = 'aa' + assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined')) + assert encode(decoded, 'x-user-defined') == encoded diff --git a/Shared/lib/python3.4/site-packages/webencodings/x_user_defined.py b/Shared/lib/python3.4/site-packages/webencodings/x_user_defined.py new file mode 100644 index 0000000..d16e326 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/webencodings/x_user_defined.py @@ -0,0 +1,325 @@ +# coding: utf-8 +""" + + webencodings.x_user_defined + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + An implementation of the x-user-defined encoding. + + :copyright: Copyright 2012 by Simon Sapin + :license: BSD, see LICENSE for details. + +""" + +from __future__ import unicode_literals + +import codecs + + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self, input, errors='strict'): + return codecs.charmap_encode(input, errors, encoding_table) + + def decode(self, input, errors='strict'): + return codecs.charmap_decode(input, errors, decoding_table) + + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input, self.errors, encoding_table)[0] + + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input, self.errors, decoding_table)[0] + + +class StreamWriter(Codec, codecs.StreamWriter): + pass + + +class StreamReader(Codec, codecs.StreamReader): + pass + + +### encodings module API + +codec_info = codecs.CodecInfo( + name='x-user-defined', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, +) + + +### Decoding Table + +# Python 3: +# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700)) +decoding_table = ( + '\x00' + '\x01' + '\x02' + '\x03' + '\x04' + '\x05' + '\x06' + '\x07' + '\x08' + '\t' + '\n' + '\x0b' + '\x0c' + '\r' + '\x0e' + '\x0f' + '\x10' + '\x11' + '\x12' + '\x13' + '\x14' + '\x15' + '\x16' + '\x17' + '\x18' + '\x19' + '\x1a' + '\x1b' + '\x1c' + '\x1d' + '\x1e' + '\x1f' + ' ' + '!' + '"' + '#' + '$' + '%' + '&' + "'" + '(' + ')' + '*' + '+' + ',' + '-' + '.' + '/' + '0' + '1' + '2' + '3' + '4' + '5' + '6' + '7' + '8' + '9' + ':' + ';' + '<' + '=' + '>' + '?' + '@' + 'A' + 'B' + 'C' + 'D' + 'E' + 'F' + 'G' + 'H' + 'I' + 'J' + 'K' + 'L' + 'M' + 'N' + 'O' + 'P' + 'Q' + 'R' + 'S' + 'T' + 'U' + 'V' + 'W' + 'X' + 'Y' + 'Z' + '[' + '\\' + ']' + '^' + '_' + '`' + 'a' + 'b' + 'c' + 'd' + 'e' + 'f' + 'g' + 'h' + 'i' + 'j' + 'k' + 'l' + 'm' + 'n' + 'o' + 'p' + 'q' + 'r' + 's' + 't' + 'u' + 'v' + 'w' + 'x' + 'y' + 'z' + '{' + '|' + '}' + '~' + '\x7f' + '\uf780' + '\uf781' + '\uf782' + '\uf783' + '\uf784' + '\uf785' + '\uf786' + '\uf787' + '\uf788' + '\uf789' + '\uf78a' + '\uf78b' + '\uf78c' + '\uf78d' + '\uf78e' + '\uf78f' + '\uf790' + '\uf791' + '\uf792' + '\uf793' + '\uf794' + '\uf795' + '\uf796' + '\uf797' + '\uf798' + '\uf799' + '\uf79a' + '\uf79b' + '\uf79c' + '\uf79d' + '\uf79e' + '\uf79f' + '\uf7a0' + '\uf7a1' + '\uf7a2' + '\uf7a3' + '\uf7a4' + '\uf7a5' + '\uf7a6' + '\uf7a7' + '\uf7a8' + '\uf7a9' + '\uf7aa' + '\uf7ab' + '\uf7ac' + '\uf7ad' + '\uf7ae' + '\uf7af' + '\uf7b0' + '\uf7b1' + '\uf7b2' + '\uf7b3' + '\uf7b4' + '\uf7b5' + '\uf7b6' + '\uf7b7' + '\uf7b8' + '\uf7b9' + '\uf7ba' + '\uf7bb' + '\uf7bc' + '\uf7bd' + '\uf7be' + '\uf7bf' + '\uf7c0' + '\uf7c1' + '\uf7c2' + '\uf7c3' + '\uf7c4' + '\uf7c5' + '\uf7c6' + '\uf7c7' + '\uf7c8' + '\uf7c9' + '\uf7ca' + '\uf7cb' + '\uf7cc' + '\uf7cd' + '\uf7ce' + '\uf7cf' + '\uf7d0' + '\uf7d1' + '\uf7d2' + '\uf7d3' + '\uf7d4' + '\uf7d5' + '\uf7d6' + '\uf7d7' + '\uf7d8' + '\uf7d9' + '\uf7da' + '\uf7db' + '\uf7dc' + '\uf7dd' + '\uf7de' + '\uf7df' + '\uf7e0' + '\uf7e1' + '\uf7e2' + '\uf7e3' + '\uf7e4' + '\uf7e5' + '\uf7e6' + '\uf7e7' + '\uf7e8' + '\uf7e9' + '\uf7ea' + '\uf7eb' + '\uf7ec' + '\uf7ed' + '\uf7ee' + '\uf7ef' + '\uf7f0' + '\uf7f1' + '\uf7f2' + '\uf7f3' + '\uf7f4' + '\uf7f5' + '\uf7f6' + '\uf7f7' + '\uf7f8' + '\uf7f9' + '\uf7fa' + '\uf7fb' + '\uf7fc' + '\uf7fd' + '\uf7fe' + '\uf7ff' +) + +### Encoding table +encoding_table = codecs.charmap_build(decoding_table) diff --git a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/DESCRIPTION.rst deleted file mode 100644 index 07a9baf..0000000 --- a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,325 +0,0 @@ -Wheel -===== - -A built-package format for Python. - -A wheel is a ZIP-format archive with a specially formatted filename -and the .whl extension. It is designed to contain all the files for a -PEP 376 compatible install in a way that is very close to the on-disk -format. Many packages will be properly installed with only the "Unpack" -step (simply extracting the file onto sys.path), and the unpacked archive -preserves enough information to "Spread" (copy data and scripts to their -final locations) at any later time. - -The wheel project provides a `bdist_wheel` command for setuptools -(requires setuptools >= 0.8.0). Wheel files can be installed with a -newer `pip` from https://github.com/pypa/pip or with wheel's own command -line utility. - -The wheel documentation is at http://wheel.rtfd.org/. The file format -is documented in PEP 427 (http://www.python.org/dev/peps/pep-0427/). - -The reference implementation is at https://bitbucket.org/pypa/wheel - -Why not egg? ------------- - -Python's egg format predates the packaging related standards we have -today, the most important being PEP 376 "Database of Installed Python -Distributions" which specifies the .dist-info directory (instead of -.egg-info) and PEP 426 "Metadata for Python Software Packages 2.0" -which specifies how to express dependencies (instead of requires.txt -in .egg-info). - -Wheel implements these things. It also provides a richer file naming -convention that communicates the Python implementation and ABI as well -as simply the language version used in a particular package. - -Unlike .egg, wheel will be a fully-documented standard at the binary -level that is truly easy to install even if you do not want to use the -reference implementation. - - -Code of Conduct ---------------- - -Everyone interacting in the wheel project's codebases, issue trackers, chat -rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - - -0.29.0 -====== -- Fix compression type of files in archive (Issue #155, Pull Request #62, - thanks Xavier Fernandez) - -0.28.0 -====== -- Fix file modes in archive (Issue #154) - -0.27.0 -====== -- Support forcing a platform tag using `--plat-name` on pure-Python wheels, as - well as nonstandard platform tags on non-pure wheels (Pull Request #60, Issue - #144, thanks Andrés Díaz) -- Add SOABI tags to platform-specific wheels built for Python 2.X (Pull Request - #55, Issue #63, Issue #101) -- Support reproducible wheel files, wheels that can be rebuilt and will hash to - the same values as previous builds (Pull Request #52, Issue #143, thanks - Barry Warsaw) -- Support for changes in keyring >= 8.0 (Pull Request #61, thanks Jason R. - Coombs) -- Use the file context manager when checking if dependency_links.txt is empty, - fixes problems building wheels under PyPy on Windows (Issue #150, thanks - Cosimo Lupo) -- Don't attempt to (recursively) create a build directory ending with `..` - (invalid on all platforms, but code was only executed on Windows) (Issue #91) -- Added the PyPA Code of Conduct (Pull Request #56) - -0.26.0 -====== -- Fix multiple entrypoint comparison failure on Python 3 (Issue #148) - -0.25.0 -====== -- Add Python 3.5 to tox configuration -- Deterministic (sorted) metadata -- Fix tagging for Python 3.5 compatibility -- Support py2-none-'arch' and py3-none-'arch' tags -- Treat data-only wheels as pure -- Write to temporary file and rename when using wheel install --force - -0.24.0 -====== -- The python tag used for pure-python packages is now .pyN (major version - only). This change actually occurred in 0.23.0 when the --python-tag - option was added, but was not explicitly mentioned in the changelog then. -- wininst2wheel and egg2wheel removed. Use "wheel convert [archive]" - instead. -- Wheel now supports setuptools style conditional requirements via the - extras_require={} syntax. Separate 'extra' names from conditions using - the : character. Wheel's own setup.py does this. (The empty-string - extra is the same as install_requires.) These conditional requirements - should work the same whether the package is installed by wheel or - by setup.py. - -0.23.0 -====== -- Compatibility tag flags added to the bdist_wheel command -- sdist should include files necessary for tests -- 'wheel convert' can now also convert unpacked eggs to wheel -- Rename pydist.json to metadata.json to avoid stepping on the PEP -- The --skip-scripts option has been removed, and not generating scripts is now - the default. The option was a temporary approach until installers could - generate scripts themselves. That is now the case with pip 1.5 and later. - Note that using pip 1.4 to install a wheel without scripts will leave the - installation without entry-point wrappers. The "wheel install-scripts" - command can be used to generate the scripts in such cases. -- Thank you contributors - -0.22.0 -====== -- Include entry_points.txt, scripts a.k.a. commands, in experimental - pydist.json -- Improved test_requires parsing -- Python 2.6 fixes, "wheel version" command courtesy pombredanne - -0.21.0 -====== -- Pregenerated scripts are the default again. -- "setup.py bdist_wheel --skip-scripts" turns them off. -- setuptools is no longer a listed requirement for the 'wheel' - package. It is of course still required in order for bdist_wheel - to work. -- "python -m wheel" avoids importing pkg_resources until it's necessary. - -0.20.0 -====== -- No longer include console_scripts in wheels. Ordinary scripts (shell files, - standalone Python files) are included as usual. -- Include new command "python -m wheel install-scripts [distribution - [distribution ...]]" to install the console_scripts (setuptools-style - scripts using pkg_resources) for a distribution. - -0.19.0 -====== -- pymeta.json becomes pydist.json - -0.18.0 -====== -- Python 3 Unicode improvements - -0.17.0 -====== -- Support latest PEP-426 "pymeta.json" (json-format metadata) - -0.16.0 -====== -- Python 2.6 compatibility bugfix (thanks John McFarlane) -- Non-prerelease version number - -1.0.0a2 -======= -- Bugfix for C-extension tags for CPython 3.3 (using SOABI) - -1.0.0a1 -======= -- Bugfix for bdist_wininst converter "wheel convert" -- Bugfix for dists where "is pure" is None instead of True or False - -1.0.0a0 -======= -- Update for version 1.0 of Wheel (PEP accepted). -- Python 3 fix for moving Unicode Description to metadata body -- Include rudimentary API documentation in Sphinx (thanks Kevin Horn) - -0.15.0 -====== -- Various improvements - -0.14.0 -====== -- Changed the signature format to better comply with the current JWS spec. - Breaks all existing signatures. -- Include ``wheel unsign`` command to remove RECORD.jws from an archive. -- Put the description in the newly allowed payload section of PKG-INFO - (METADATA) files. - -0.13.0 -====== -- Use distutils instead of sysconfig to get installation paths; can install - headers. -- Improve WheelFile() sort. -- Allow bootstrap installs without any pkg_resources. - -0.12.0 -====== -- Unit test for wheel.tool.install - -0.11.0 -====== -- API cleanup - -0.10.3 -====== -- Scripts fixer fix - -0.10.2 -====== -- Fix keygen - -0.10.1 -====== -- Preserve attributes on install. - -0.10.0 -====== -- Include a copy of pkg_resources. Wheel can now install into a virtualenv - that does not have distribute (though most packages still require - pkg_resources to actually work; wheel install distribute) -- Define a new setup.cfg section [wheel]. universal=1 will - apply the py2.py3-none-any tag for pure python wheels. - -0.9.7 -===== -- Only import dirspec when needed. dirspec is only needed to find the - configuration for keygen/signing operations. - -0.9.6 -===== -- requires-dist from setup.cfg overwrites any requirements from setup.py - Care must be taken that the requirements are the same in both cases, - or just always install from wheel. -- drop dirspec requirement on win32 -- improved command line utility, adds 'wheel convert [egg or wininst]' to - convert legacy binary formats to wheel - -0.9.5 -===== -- Wheel's own wheel file can be executed by Python, and can install itself: - ``python wheel-0.9.5-py27-none-any/wheel install ...`` -- Use argparse; basic ``wheel install`` command should run with only stdlib - dependencies. -- Allow requires_dist in setup.cfg's [metadata] section. In addition to - dependencies in setup.py, but will only be interpreted when installing - from wheel, not from sdist. Can be qualified with environment markers. - -0.9.4 -===== -- Fix wheel.signatures in sdist - -0.9.3 -===== -- Integrated digital signatures support without C extensions. -- Integrated "wheel install" command (single package, no dependency - resolution) including compatibility check. -- Support Python 3.3 -- Use Metadata 1.3 (PEP 426) - -0.9.2 -===== -- Automatic signing if WHEEL_TOOL points to the wheel binary -- Even more Python 3 fixes - -0.9.1 -===== -- 'wheel sign' uses the keys generated by 'wheel keygen' (instead of generating - a new key at random each time) -- Python 2/3 encoding/decoding fixes -- Run tests on Python 2.6 (without signature verification) - -0.9 -=== -- Updated digital signatures scheme -- Python 3 support for digital signatures -- Always verify RECORD hashes on extract -- "wheel" command line tool to sign, verify, unpack wheel files - -0.8 -=== -- none/any draft pep tags update -- improved wininst2wheel script -- doc changes and other improvements - -0.7 -=== -- sort .dist-info at end of wheel archive -- Windows & Python 3 fixes from Paul Moore -- pep8 -- scripts to convert wininst & egg to wheel - -0.6 -=== -- require distribute >= 0.6.28 -- stop using verlib - -0.5 -=== -- working pretty well - -0.4.2 -===== -- hyphenated name fix - -0.4 -=== -- improve test coverage -- improve Windows compatibility -- include tox.ini courtesy of Marc Abramowitz -- draft hmac sha-256 signing function - -0.3 -=== -- prototype egg2wheel conversion script - -0.2 -=== -- Python 3 compatibility - -0.1 -=== -- Initial version - - diff --git a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/METADATA b/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/METADATA deleted file mode 100644 index 8962475..0000000 --- a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/METADATA +++ /dev/null @@ -1,357 +0,0 @@ -Metadata-Version: 2.0 -Name: wheel -Version: 0.29.0 -Summary: A built-package format for Python. -Home-page: https://bitbucket.org/pypa/wheel/ -Author: Daniel Holth -Author-email: dholth@fastmail.fm -License: MIT -Keywords: wheel,packaging -Platform: UNKNOWN -Classifier: Development Status :: 4 - Beta -Classifier: Intended Audience :: Developers -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.2 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Requires-Dist: argparse; python_version=="2.6" -Provides-Extra: faster-signatures -Requires-Dist: ed25519ll; extra == 'faster-signatures' -Provides-Extra: signatures -Requires-Dist: keyring; extra == 'signatures' -Requires-Dist: keyrings.alt; extra == 'signatures' -Provides-Extra: signatures -Requires-Dist: importlib; python_version=="2.6" and extra == 'signatures' -Provides-Extra: signatures -Requires-Dist: pyxdg; sys_platform!="win32" and extra == 'signatures' -Provides-Extra: tool - -Wheel -===== - -A built-package format for Python. - -A wheel is a ZIP-format archive with a specially formatted filename -and the .whl extension. It is designed to contain all the files for a -PEP 376 compatible install in a way that is very close to the on-disk -format. Many packages will be properly installed with only the "Unpack" -step (simply extracting the file onto sys.path), and the unpacked archive -preserves enough information to "Spread" (copy data and scripts to their -final locations) at any later time. - -The wheel project provides a `bdist_wheel` command for setuptools -(requires setuptools >= 0.8.0). Wheel files can be installed with a -newer `pip` from https://github.com/pypa/pip or with wheel's own command -line utility. - -The wheel documentation is at http://wheel.rtfd.org/. The file format -is documented in PEP 427 (http://www.python.org/dev/peps/pep-0427/). - -The reference implementation is at https://bitbucket.org/pypa/wheel - -Why not egg? ------------- - -Python's egg format predates the packaging related standards we have -today, the most important being PEP 376 "Database of Installed Python -Distributions" which specifies the .dist-info directory (instead of -.egg-info) and PEP 426 "Metadata for Python Software Packages 2.0" -which specifies how to express dependencies (instead of requires.txt -in .egg-info). - -Wheel implements these things. It also provides a richer file naming -convention that communicates the Python implementation and ABI as well -as simply the language version used in a particular package. - -Unlike .egg, wheel will be a fully-documented standard at the binary -level that is truly easy to install even if you do not want to use the -reference implementation. - - -Code of Conduct ---------------- - -Everyone interacting in the wheel project's codebases, issue trackers, chat -rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - - -0.29.0 -====== -- Fix compression type of files in archive (Issue #155, Pull Request #62, - thanks Xavier Fernandez) - -0.28.0 -====== -- Fix file modes in archive (Issue #154) - -0.27.0 -====== -- Support forcing a platform tag using `--plat-name` on pure-Python wheels, as - well as nonstandard platform tags on non-pure wheels (Pull Request #60, Issue - #144, thanks Andrés Díaz) -- Add SOABI tags to platform-specific wheels built for Python 2.X (Pull Request - #55, Issue #63, Issue #101) -- Support reproducible wheel files, wheels that can be rebuilt and will hash to - the same values as previous builds (Pull Request #52, Issue #143, thanks - Barry Warsaw) -- Support for changes in keyring >= 8.0 (Pull Request #61, thanks Jason R. - Coombs) -- Use the file context manager when checking if dependency_links.txt is empty, - fixes problems building wheels under PyPy on Windows (Issue #150, thanks - Cosimo Lupo) -- Don't attempt to (recursively) create a build directory ending with `..` - (invalid on all platforms, but code was only executed on Windows) (Issue #91) -- Added the PyPA Code of Conduct (Pull Request #56) - -0.26.0 -====== -- Fix multiple entrypoint comparison failure on Python 3 (Issue #148) - -0.25.0 -====== -- Add Python 3.5 to tox configuration -- Deterministic (sorted) metadata -- Fix tagging for Python 3.5 compatibility -- Support py2-none-'arch' and py3-none-'arch' tags -- Treat data-only wheels as pure -- Write to temporary file and rename when using wheel install --force - -0.24.0 -====== -- The python tag used for pure-python packages is now .pyN (major version - only). This change actually occurred in 0.23.0 when the --python-tag - option was added, but was not explicitly mentioned in the changelog then. -- wininst2wheel and egg2wheel removed. Use "wheel convert [archive]" - instead. -- Wheel now supports setuptools style conditional requirements via the - extras_require={} syntax. Separate 'extra' names from conditions using - the : character. Wheel's own setup.py does this. (The empty-string - extra is the same as install_requires.) These conditional requirements - should work the same whether the package is installed by wheel or - by setup.py. - -0.23.0 -====== -- Compatibility tag flags added to the bdist_wheel command -- sdist should include files necessary for tests -- 'wheel convert' can now also convert unpacked eggs to wheel -- Rename pydist.json to metadata.json to avoid stepping on the PEP -- The --skip-scripts option has been removed, and not generating scripts is now - the default. The option was a temporary approach until installers could - generate scripts themselves. That is now the case with pip 1.5 and later. - Note that using pip 1.4 to install a wheel without scripts will leave the - installation without entry-point wrappers. The "wheel install-scripts" - command can be used to generate the scripts in such cases. -- Thank you contributors - -0.22.0 -====== -- Include entry_points.txt, scripts a.k.a. commands, in experimental - pydist.json -- Improved test_requires parsing -- Python 2.6 fixes, "wheel version" command courtesy pombredanne - -0.21.0 -====== -- Pregenerated scripts are the default again. -- "setup.py bdist_wheel --skip-scripts" turns them off. -- setuptools is no longer a listed requirement for the 'wheel' - package. It is of course still required in order for bdist_wheel - to work. -- "python -m wheel" avoids importing pkg_resources until it's necessary. - -0.20.0 -====== -- No longer include console_scripts in wheels. Ordinary scripts (shell files, - standalone Python files) are included as usual. -- Include new command "python -m wheel install-scripts [distribution - [distribution ...]]" to install the console_scripts (setuptools-style - scripts using pkg_resources) for a distribution. - -0.19.0 -====== -- pymeta.json becomes pydist.json - -0.18.0 -====== -- Python 3 Unicode improvements - -0.17.0 -====== -- Support latest PEP-426 "pymeta.json" (json-format metadata) - -0.16.0 -====== -- Python 2.6 compatibility bugfix (thanks John McFarlane) -- Non-prerelease version number - -1.0.0a2 -======= -- Bugfix for C-extension tags for CPython 3.3 (using SOABI) - -1.0.0a1 -======= -- Bugfix for bdist_wininst converter "wheel convert" -- Bugfix for dists where "is pure" is None instead of True or False - -1.0.0a0 -======= -- Update for version 1.0 of Wheel (PEP accepted). -- Python 3 fix for moving Unicode Description to metadata body -- Include rudimentary API documentation in Sphinx (thanks Kevin Horn) - -0.15.0 -====== -- Various improvements - -0.14.0 -====== -- Changed the signature format to better comply with the current JWS spec. - Breaks all existing signatures. -- Include ``wheel unsign`` command to remove RECORD.jws from an archive. -- Put the description in the newly allowed payload section of PKG-INFO - (METADATA) files. - -0.13.0 -====== -- Use distutils instead of sysconfig to get installation paths; can install - headers. -- Improve WheelFile() sort. -- Allow bootstrap installs without any pkg_resources. - -0.12.0 -====== -- Unit test for wheel.tool.install - -0.11.0 -====== -- API cleanup - -0.10.3 -====== -- Scripts fixer fix - -0.10.2 -====== -- Fix keygen - -0.10.1 -====== -- Preserve attributes on install. - -0.10.0 -====== -- Include a copy of pkg_resources. Wheel can now install into a virtualenv - that does not have distribute (though most packages still require - pkg_resources to actually work; wheel install distribute) -- Define a new setup.cfg section [wheel]. universal=1 will - apply the py2.py3-none-any tag for pure python wheels. - -0.9.7 -===== -- Only import dirspec when needed. dirspec is only needed to find the - configuration for keygen/signing operations. - -0.9.6 -===== -- requires-dist from setup.cfg overwrites any requirements from setup.py - Care must be taken that the requirements are the same in both cases, - or just always install from wheel. -- drop dirspec requirement on win32 -- improved command line utility, adds 'wheel convert [egg or wininst]' to - convert legacy binary formats to wheel - -0.9.5 -===== -- Wheel's own wheel file can be executed by Python, and can install itself: - ``python wheel-0.9.5-py27-none-any/wheel install ...`` -- Use argparse; basic ``wheel install`` command should run with only stdlib - dependencies. -- Allow requires_dist in setup.cfg's [metadata] section. In addition to - dependencies in setup.py, but will only be interpreted when installing - from wheel, not from sdist. Can be qualified with environment markers. - -0.9.4 -===== -- Fix wheel.signatures in sdist - -0.9.3 -===== -- Integrated digital signatures support without C extensions. -- Integrated "wheel install" command (single package, no dependency - resolution) including compatibility check. -- Support Python 3.3 -- Use Metadata 1.3 (PEP 426) - -0.9.2 -===== -- Automatic signing if WHEEL_TOOL points to the wheel binary -- Even more Python 3 fixes - -0.9.1 -===== -- 'wheel sign' uses the keys generated by 'wheel keygen' (instead of generating - a new key at random each time) -- Python 2/3 encoding/decoding fixes -- Run tests on Python 2.6 (without signature verification) - -0.9 -=== -- Updated digital signatures scheme -- Python 3 support for digital signatures -- Always verify RECORD hashes on extract -- "wheel" command line tool to sign, verify, unpack wheel files - -0.8 -=== -- none/any draft pep tags update -- improved wininst2wheel script -- doc changes and other improvements - -0.7 -=== -- sort .dist-info at end of wheel archive -- Windows & Python 3 fixes from Paul Moore -- pep8 -- scripts to convert wininst & egg to wheel - -0.6 -=== -- require distribute >= 0.6.28 -- stop using verlib - -0.5 -=== -- working pretty well - -0.4.2 -===== -- hyphenated name fix - -0.4 -=== -- improve test coverage -- improve Windows compatibility -- include tox.ini courtesy of Marc Abramowitz -- draft hmac sha-256 signing function - -0.3 -=== -- prototype egg2wheel conversion script - -0.2 -=== -- Python 3 compatibility - -0.1 -=== -- Initial version - - diff --git a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/RECORD deleted file mode 100644 index cbdf946..0000000 --- a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/RECORD +++ /dev/null @@ -1,81 +0,0 @@ -wheel/__init__.py,sha256=YumT_ajakW9VAgnV3umrUYypy6VzpbLKE-OPbVnWm8M,96 -wheel/__main__.py,sha256=LbDDGId08qTxuhLYqX5mXO1MUs9D-Hhxb5NVc53ej0k,416 -wheel/archive.py,sha256=eGyE04hV52JjU3KulISGPqQB340uDwIVwBcJkghkxx4,2286 -wheel/bdist_wheel.py,sha256=4QOgk1c66zu045XjET9Enz4ODW89LlHzJXukbIft_yY,17441 -wheel/decorator.py,sha256=U2K77ZZ8x3x5vSIGCcEeh8GAxB6rABB7AlDwRukaoCk,541 -wheel/egg2wheel.py,sha256=_JNueL6ZcWOxiPdL1r71fB9Mwuzmln4cZOIf_gA0Nc4,2633 -wheel/eggnames.txt,sha256=X6LYsOjMd8llrzLo3SB3FwJ-uN9IskJqYKJDq2zIcbs,2490 -wheel/install.py,sha256=yzPyjCAUmIgPTk_be-tbXFOJ3m08kYt3T3Tf1R8Lmh0,18070 -wheel/metadata.py,sha256=ttwI-jwjN5YnmDFbfLR4mFKod4HSnd1tje8lK4rQNqc,11050 -wheel/paths.py,sha256=6AmG-MKx-NeJOC9zUJoSSZjYhZYGmX1UHG_N0IbkplI,1130 -wheel/pep425tags.py,sha256=vbazM-mj7u-8s-YauwSykBhXM_YreCb8mG-eZyz2vl4,5341 -wheel/pkginfo.py,sha256=-gLOTuQrkRf4geOD04qm0IUkdYAbjg81j-5zNtvWA9A,1225 -wheel/util.py,sha256=Pe2JZ9grNjmAcRRJay20FlDVJUbePWaR5ltySo3c6zQ,4890 -wheel/wininst2wheel.py,sha256=_cTbf8bcAt481G6tYRQsOUT9ZGeg3artyrS0tBU9Tzs,6961 -wheel/signatures/__init__.py,sha256=kZpKNsmxBDZhuXf85_uyy5qBH40AxVT_0utbAiTp_yg,3779 -wheel/signatures/djbec.py,sha256=53HTnlNlfqOHFDf8OY_6KL8sxR4swiLsWhUjtw3W3nI,6755 -wheel/signatures/ed25519py.py,sha256=SeTxiMZ7kmoMdIurBSaKb8Ku-amGnf6ZTRGElLzV8iI,1695 -wheel/signatures/keys.py,sha256=x3g4sAcs7KbIHM-5V8KWdMc24_VK7VeD-pjCyktNnYo,3320 -wheel/test/__init__.py,sha256=M0NZuQ7-112l8K2h1eayVvSmvQrufrOcD5AYKgIf_Is,1 -wheel/test/pydist-schema.json,sha256=ynEvNvThC1zRa7FioMsW3k-9nl98ytEoo1_3xbOP2eo,11483 -wheel/test/test-1.0-py2.py3-none-win32.whl,sha256=tCbefJJ7RpQJReRQaSRiwnTDM-YDlBpbcX9Rjcv9bf4,5224 -wheel/test/test_basic.py,sha256=2DIvjApcshiLpXVsEhXvN3l62ZrwS0jJcWK8SyASoNU,6405 -wheel/test/test_install.py,sha256=c0EECXPkVIGhCD9V5ad2qsBPRPYb1ehhaS0k6Gv5JQc,1866 -wheel/test/test_keys.py,sha256=5mBc9tf2TwC3TCpx1ySTYsCe5yvd6kMK64AlUUCcKEY,2575 -wheel/test/test_paths.py,sha256=-QOVUDFJIpF9OPNRzCJ-Xf4nBibKlUe_g4aaZ6sm3wE,172 -wheel/test/test_ranking.py,sha256=FSAQX4oHZ476jLddqVMlGmQFtbEel9a8SzOngJ03TJw,1496 -wheel/test/test_signatures.py,sha256=Z4REXj62p28gbSDB2D_OxopA-TTXICTZ5e2yZ3ejEVc,1120 -wheel/test/test_tagopt.py,sha256=t7A-iRbe3bH2Iz6NKdSEOpFFIFSF9I4ATmlrRBXoCcQ,5927 -wheel/test/test_tool.py,sha256=yt5dAr8mp51WoDzt0MmlwPk0xf7FvXAedy-YlNZXv1I,656 -wheel/test/test_wheelfile.py,sha256=x4exzQYuQB48YHqhU2NZyN76k-BSK6784-7rye0q6Ss,4585 -wheel/test/complex-dist/setup.py,sha256=4i1_AJoJxo4i6ik-mvydo23AF8BHzjmAEQYa5J4YPK4,855 -wheel/test/complex-dist/complexdist/__init__.py,sha256=PGDJWQTxjLXqnNrbqmTKK_yk6DVQBNeRp-YpP7w1rVk,23 -wheel/test/headers.dist/header.h,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wheel/test/headers.dist/headersdist.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wheel/test/headers.dist/setup.py,sha256=p3d9TGy7NLX6TnkBHnNHzedqYoOkdGQvwheyDQjf-JQ,324 -wheel/test/simple.dist/setup.py,sha256=8zWen71Um-iN_A5thot6VFogrkWs_RGVO-jr_MxkFog,383 -wheel/test/simple.dist/simpledist/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -wheel/tool/__init__.py,sha256=anweXjmIg4EIHPkH0kOgcTx-gyOmzE4ieRe2yk-aHDA,13229 -wheel-0.29.0.dist-info/DESCRIPTION.rst,sha256=JH6mogUIatQVQewIh4GB1ywCxuWbm7G4TjI_63dURp8,9813 -wheel-0.29.0.dist-info/METADATA,sha256=SA310hLnZJJFgp1TRwFLCIiXurVwKpIq2w3KWhMdgdo,11019 -wheel-0.29.0.dist-info/RECORD,, -wheel-0.29.0.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 -wheel-0.29.0.dist-info/entry_points.txt,sha256=2LFQDKAUKNMG-2zNtbLscfirPr9BEqBuwc-JALCv-D0,107 -wheel-0.29.0.dist-info/metadata.json,sha256=dxlCIm4231kQk4VlVdiN5ABz3l0nWY3gQ9HPoDammlU,1510 -wheel-0.29.0.dist-info/top_level.txt,sha256=HxSBIbgEstMPe4eFawhA66Mq-QYHMopXVoAncfjb_1c,6 -../../../bin/wheel,sha256=7tQwd7-yYmSKP07JBIwhFWPH68YseSftvBL19P0Ct5Y,277 -wheel-0.29.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -wheel/test/__pycache__/test_tool.cpython-34.pyc,, -wheel/test/headers.dist/__pycache__/headersdist.cpython-34.pyc,, -wheel/test/__pycache__/test_signatures.cpython-34.pyc,, -wheel/test/__pycache__/__init__.cpython-34.pyc,, -wheel/test/complex-dist/__pycache__/setup.cpython-34.pyc,, -wheel/__pycache__/wininst2wheel.cpython-34.pyc,, -wheel/test/__pycache__/test_tagopt.cpython-34.pyc,, -wheel/test/__pycache__/test_install.cpython-34.pyc,, -wheel/__pycache__/metadata.cpython-34.pyc,, -wheel/__pycache__/archive.cpython-34.pyc,, -wheel/test/simple.dist/simpledist/__pycache__/__init__.cpython-34.pyc,, -wheel/test/__pycache__/test_paths.cpython-34.pyc,, -wheel/test/__pycache__/test_ranking.cpython-34.pyc,, -wheel/__pycache__/__main__.cpython-34.pyc,, -wheel/test/__pycache__/test_keys.cpython-34.pyc,, -wheel/__pycache__/pkginfo.cpython-34.pyc,, -wheel/signatures/__pycache__/djbec.cpython-34.pyc,, -wheel/signatures/__pycache__/__init__.cpython-34.pyc,, -wheel/test/complex-dist/complexdist/__pycache__/__init__.cpython-34.pyc,, -wheel/signatures/__pycache__/ed25519py.cpython-34.pyc,, -wheel/__pycache__/paths.cpython-34.pyc,, -wheel/__pycache__/__init__.cpython-34.pyc,, -wheel/test/headers.dist/__pycache__/setup.cpython-34.pyc,, -wheel/__pycache__/egg2wheel.cpython-34.pyc,, -wheel/test/__pycache__/test_wheelfile.cpython-34.pyc,, -wheel/tool/__pycache__/__init__.cpython-34.pyc,, -wheel/__pycache__/decorator.cpython-34.pyc,, -wheel/test/__pycache__/test_basic.cpython-34.pyc,, -wheel/__pycache__/util.cpython-34.pyc,, -wheel/__pycache__/bdist_wheel.cpython-34.pyc,, -wheel/signatures/__pycache__/keys.cpython-34.pyc,, -wheel/__pycache__/install.cpython-34.pyc,, -wheel/__pycache__/pep425tags.cpython-34.pyc,, -wheel/test/simple.dist/__pycache__/setup.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/WHEEL deleted file mode 100644 index 8b6dd1b..0000000 --- a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/entry_points.txt b/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/entry_points.txt deleted file mode 100644 index f57b8c0..0000000 --- a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/entry_points.txt +++ /dev/null @@ -1,5 +0,0 @@ -[console_scripts] -wheel = wheel.tool:main - -[distutils.commands] -bdist_wheel = wheel.bdist_wheel:bdist_wheel \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/metadata.json deleted file mode 100644 index 1a42ef9..0000000 --- a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4"], "extensions": {"python.commands": {"wrap_console": {"wheel": "wheel.tool:main"}}, "python.details": {"contacts": [{"email": "dholth@fastmail.fm", "name": "Daniel Holth", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://bitbucket.org/pypa/wheel/"}}, "python.exports": {"console_scripts": {"wheel": "wheel.tool:main"}, "distutils.commands": {"bdist_wheel": "wheel.bdist_wheel:bdist_wheel"}}}, "extras": ["faster-signatures", "signatures", "tool"], "generator": "bdist_wheel (0.29.0)", "keywords": ["wheel", "packaging"], "license": "MIT", "metadata_version": "2.0", "name": "wheel", "run_requires": [{"extra": "faster-signatures", "requires": ["ed25519ll"]}, {"extra": "signatures", "requires": ["keyring", "keyrings.alt"]}, {"environment": "python_version==\"2.6\"", "requires": ["argparse"]}, {"environment": "python_version==\"2.6\"", "extra": "signatures", "requires": ["importlib"]}, {"environment": "sys_platform!=\"win32\"", "extra": "signatures", "requires": ["pyxdg"]}], "summary": "A built-package format for Python.", "version": "0.29.0"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/top_level.txt deleted file mode 100644 index 2309722..0000000 --- a/Shared/lib/python3.4/site-packages/wheel-0.29.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -wheel diff --git a/Shared/lib/python3.4/site-packages/wheel/__init__.py b/Shared/lib/python3.4/site-packages/wheel/__init__.py deleted file mode 100644 index be2453a..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# __variables__ with double-quoted values will be available in setup.py: -__version__ = "0.29.0" diff --git a/Shared/lib/python3.4/site-packages/wheel/__main__.py b/Shared/lib/python3.4/site-packages/wheel/__main__.py deleted file mode 100644 index 889359c..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/__main__.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -Wheel command line tool (enable python -m wheel syntax) -""" - -import sys - -def main(): # needed for console script - if __package__ == '': - # To be able to run 'python wheel-0.9.whl/wheel': - import os.path - path = os.path.dirname(os.path.dirname(__file__)) - sys.path[0:0] = [path] - import wheel.tool - sys.exit(wheel.tool.main()) - -if __name__ == "__main__": - sys.exit(main()) diff --git a/Shared/lib/python3.4/site-packages/wheel/archive.py b/Shared/lib/python3.4/site-packages/wheel/archive.py deleted file mode 100644 index f928e6a..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/archive.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Archive tools for wheel. -""" - -import os -import time -import logging -import os.path -import zipfile - -log = logging.getLogger("wheel") - - -def archive_wheelfile(base_name, base_dir): - '''Archive all files under `base_dir` in a whl file and name it like - `base_name`. - ''' - olddir = os.path.abspath(os.curdir) - base_name = os.path.abspath(base_name) - try: - os.chdir(base_dir) - return make_wheelfile_inner(base_name) - finally: - os.chdir(olddir) - - -def make_wheelfile_inner(base_name, base_dir='.'): - """Create a whl file from all the files under 'base_dir'. - - Places .dist-info at the end of the archive.""" - - zip_filename = base_name + ".whl" - - log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) - - # Some applications need reproducible .whl files, but they can't do this - # without forcing the timestamp of the individual ZipInfo objects. See - # issue #143. - timestamp = os.environ.get('SOURCE_DATE_EPOCH') - if timestamp is None: - date_time = None - else: - date_time = time.gmtime(int(timestamp))[0:6] - - # XXX support bz2, xz when available - zip = zipfile.ZipFile(open(zip_filename, "wb+"), "w", - compression=zipfile.ZIP_DEFLATED) - - score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3} - deferred = [] - - def writefile(path, date_time): - st = os.stat(path) - if date_time is None: - mtime = time.gmtime(st.st_mtime) - date_time = mtime[0:6] - zinfo = zipfile.ZipInfo(path, date_time) - zinfo.external_attr = st.st_mode << 16 - zinfo.compress_type = zipfile.ZIP_DEFLATED - with open(path, 'rb') as fp: - zip.writestr(zinfo, fp.read()) - log.info("adding '%s'" % path) - - for dirpath, dirnames, filenames in os.walk(base_dir): - for name in filenames: - path = os.path.normpath(os.path.join(dirpath, name)) - - if os.path.isfile(path): - if dirpath.endswith('.dist-info'): - deferred.append((score.get(name, 0), path)) - else: - writefile(path, date_time) - - deferred.sort() - for score, path in deferred: - writefile(path, date_time) - - zip.close() - - return zip_filename diff --git a/Shared/lib/python3.4/site-packages/wheel/bdist_wheel.py b/Shared/lib/python3.4/site-packages/wheel/bdist_wheel.py deleted file mode 100644 index 90db748..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/bdist_wheel.py +++ /dev/null @@ -1,453 +0,0 @@ -""" -Create a wheel (.whl) distribution. - -A wheel is a built archive format. -""" - -import csv -import hashlib -import os -import subprocess -import warnings -import shutil -import json -import wheel - -try: - import sysconfig -except ImportError: # pragma nocover - # Python < 2.7 - import distutils.sysconfig as sysconfig - -import pkg_resources - -safe_name = pkg_resources.safe_name -safe_version = pkg_resources.safe_version - -from shutil import rmtree -from email.generator import Generator - -from distutils.util import get_platform -from distutils.core import Command -from distutils.sysconfig import get_python_version - -from distutils import log as logger - -from .pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag -from .util import native, open_for_csv -from .archive import archive_wheelfile -from .pkginfo import read_pkg_info, write_pkg_info -from .metadata import pkginfo_to_dict -from . import pep425tags, metadata - -def safer_name(name): - return safe_name(name).replace('-', '_') - -def safer_version(version): - return safe_version(version).replace('-', '_') - -class bdist_wheel(Command): - - description = 'create a wheel distribution' - - user_options = [('bdist-dir=', 'b', - "temporary directory for creating the distribution"), - ('plat-name=', 'p', - "platform name to embed in generated filenames " - "(default: %s)" % get_platform()), - ('keep-temp', 'k', - "keep the pseudo-installation tree around after " + - "creating the distribution archive"), - ('dist-dir=', 'd', - "directory to put final built distributions in"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ('relative', None, - "build the archive using relative paths" - "(default: false)"), - ('owner=', 'u', - "Owner name used when creating a tar file" - " [default: current user]"), - ('group=', 'g', - "Group name used when creating a tar file" - " [default: current group]"), - ('universal', None, - "make a universal wheel" - " (default: false)"), - ('python-tag=', None, - "Python implementation compatibility tag" - " (default: py%s)" % get_impl_ver()[0]), - ] - - boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal'] - - def initialize_options(self): - self.bdist_dir = None - self.data_dir = None - self.plat_name = None - self.plat_tag = None - self.format = 'zip' - self.keep_temp = False - self.dist_dir = None - self.distinfo_dir = None - self.egginfo_dir = None - self.root_is_pure = None - self.skip_build = None - self.relative = False - self.owner = None - self.group = None - self.universal = False - self.python_tag = 'py' + get_impl_ver()[0] - self.plat_name_supplied = False - - def finalize_options(self): - if self.bdist_dir is None: - bdist_base = self.get_finalized_command('bdist').bdist_base - self.bdist_dir = os.path.join(bdist_base, 'wheel') - - self.data_dir = self.wheel_dist_name + '.data' - self.plat_name_supplied = self.plat_name is not None - - need_options = ('dist_dir', 'plat_name', 'skip_build') - - self.set_undefined_options('bdist', - *zip(need_options, need_options)) - - self.root_is_pure = not (self.distribution.has_ext_modules() - or self.distribution.has_c_libraries()) - - # Support legacy [wheel] section for setting universal - wheel = self.distribution.get_option_dict('wheel') - if 'universal' in wheel: - # please don't define this in your global configs - val = wheel['universal'][1].strip() - if val.lower() in ('1', 'true', 'yes'): - self.universal = True - - @property - def wheel_dist_name(self): - """Return distribution full name with - replaced with _""" - return '-'.join((safer_name(self.distribution.get_name()), - safer_version(self.distribution.get_version()))) - - def get_tag(self): - # bdist sets self.plat_name if unset, we should only use it for purepy - # wheels if the user supplied it. - if self.plat_name_supplied: - plat_name = self.plat_name - elif self.root_is_pure: - plat_name = 'any' - else: - plat_name = self.plat_name or get_platform() - plat_name = plat_name.replace('-', '_').replace('.', '_') - - if self.root_is_pure: - if self.universal: - impl = 'py2.py3' - else: - impl = self.python_tag - tag = (impl, 'none', plat_name) - else: - impl_name = get_abbr_impl() - impl_ver = get_impl_ver() - # PEP 3149 - abi_tag = str(get_abi_tag()).lower() - tag = (impl_name + impl_ver, abi_tag, plat_name) - supported_tags = pep425tags.get_supported( - supplied_platform=plat_name if self.plat_name_supplied else None) - # XXX switch to this alternate implementation for non-pure: - assert tag == supported_tags[0] - return tag - - def get_archive_basename(self): - """Return archive name without extension""" - - impl_tag, abi_tag, plat_tag = self.get_tag() - - archive_basename = "%s-%s-%s-%s" % ( - self.wheel_dist_name, - impl_tag, - abi_tag, - plat_tag) - return archive_basename - - def run(self): - build_scripts = self.reinitialize_command('build_scripts') - build_scripts.executable = 'python' - - if not self.skip_build: - self.run_command('build') - - install = self.reinitialize_command('install', - reinit_subcommands=True) - install.root = self.bdist_dir - install.compile = False - install.skip_build = self.skip_build - install.warn_dir = False - - # A wheel without setuptools scripts is more cross-platform. - # Use the (undocumented) `no_ep` option to setuptools' - # install_scripts command to avoid creating entry point scripts. - install_scripts = self.reinitialize_command('install_scripts') - install_scripts.no_ep = True - - # Use a custom scheme for the archive, because we have to decide - # at installation time which scheme to use. - for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'): - setattr(install, - 'install_' + key, - os.path.join(self.data_dir, key)) - - basedir_observed = '' - - if os.name == 'nt': - # win32 barfs if any of these are ''; could be '.'? - # (distutils.command.install:change_roots bug) - basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..')) - self.install_libbase = self.install_lib = basedir_observed - - setattr(install, - 'install_purelib' if self.root_is_pure else 'install_platlib', - basedir_observed) - - logger.info("installing to %s", self.bdist_dir) - - self.run_command('install') - - archive_basename = self.get_archive_basename() - - pseudoinstall_root = os.path.join(self.dist_dir, archive_basename) - if not self.relative: - archive_root = self.bdist_dir - else: - archive_root = os.path.join( - self.bdist_dir, - self._ensure_relative(install.install_base)) - - self.set_undefined_options( - 'install_egg_info', ('target', 'egginfo_dir')) - self.distinfo_dir = os.path.join(self.bdist_dir, - '%s.dist-info' % self.wheel_dist_name) - self.egg2dist(self.egginfo_dir, - self.distinfo_dir) - - self.write_wheelfile(self.distinfo_dir) - - self.write_record(self.bdist_dir, self.distinfo_dir) - - # Make the archive - if not os.path.exists(self.dist_dir): - os.makedirs(self.dist_dir) - wheel_name = archive_wheelfile(pseudoinstall_root, archive_root) - - # Sign the archive - if 'WHEEL_TOOL' in os.environ: - subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name]) - - # Add to 'Distribution.dist_files' so that the "upload" command works - getattr(self.distribution, 'dist_files', []).append( - ('bdist_wheel', get_python_version(), wheel_name)) - - if not self.keep_temp: - if self.dry_run: - logger.info('removing %s', self.bdist_dir) - else: - rmtree(self.bdist_dir) - - def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel.__version__ + ')'): - from email.message import Message - msg = Message() - msg['Wheel-Version'] = '1.0' # of the spec - msg['Generator'] = generator - msg['Root-Is-Purelib'] = str(self.root_is_pure).lower() - - # Doesn't work for bdist_wininst - impl_tag, abi_tag, plat_tag = self.get_tag() - for impl in impl_tag.split('.'): - for abi in abi_tag.split('.'): - for plat in plat_tag.split('.'): - msg['Tag'] = '-'.join((impl, abi, plat)) - - wheelfile_path = os.path.join(wheelfile_base, 'WHEEL') - logger.info('creating %s', wheelfile_path) - with open(wheelfile_path, 'w') as f: - Generator(f, maxheaderlen=0).flatten(msg) - - def _ensure_relative(self, path): - # copied from dir_util, deleted - drive, path = os.path.splitdrive(path) - if path[0:1] == os.sep: - path = drive + path[1:] - return path - - def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path): - return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path) - - def license_file(self): - """Return license filename from a license-file key in setup.cfg, or None.""" - metadata = self.distribution.get_option_dict('metadata') - if not 'license_file' in metadata: - return None - return metadata['license_file'][1] - - def setupcfg_requirements(self): - """Generate requirements from setup.cfg as - ('Requires-Dist', 'requirement; qualifier') tuples. From a metadata - section in setup.cfg: - - [metadata] - provides-extra = extra1 - extra2 - requires-dist = requirement; qualifier - another; qualifier2 - unqualified - - Yields - - ('Provides-Extra', 'extra1'), - ('Provides-Extra', 'extra2'), - ('Requires-Dist', 'requirement; qualifier'), - ('Requires-Dist', 'another; qualifier2'), - ('Requires-Dist', 'unqualified') - """ - metadata = self.distribution.get_option_dict('metadata') - - # our .ini parser folds - to _ in key names: - for key, title in (('provides_extra', 'Provides-Extra'), - ('requires_dist', 'Requires-Dist')): - if not key in metadata: - continue - field = metadata[key] - for line in field[1].splitlines(): - line = line.strip() - if not line: - continue - yield (title, line) - - def add_requirements(self, metadata_path): - """Add additional requirements from setup.cfg to file metadata_path""" - additional = list(self.setupcfg_requirements()) - if not additional: return - pkg_info = read_pkg_info(metadata_path) - if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info: - warnings.warn('setup.cfg requirements overwrite values from setup.py') - del pkg_info['Provides-Extra'] - del pkg_info['Requires-Dist'] - for k, v in additional: - pkg_info[k] = v - write_pkg_info(metadata_path, pkg_info) - - def egg2dist(self, egginfo_path, distinfo_path): - """Convert an .egg-info directory into a .dist-info directory""" - def adios(p): - """Appropriately delete directory, file or link.""" - if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p): - shutil.rmtree(p) - elif os.path.exists(p): - os.unlink(p) - - adios(distinfo_path) - - if not os.path.exists(egginfo_path): - # There is no egg-info. This is probably because the egg-info - # file/directory is not named matching the distribution name used - # to name the archive file. Check for this case and report - # accordingly. - import glob - pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info') - possible = glob.glob(pat) - err = "Egg metadata expected at %s but not found" % (egginfo_path,) - if possible: - alt = os.path.basename(possible[0]) - err += " (%s found - possible misnamed archive file?)" % (alt,) - - raise ValueError(err) - - if os.path.isfile(egginfo_path): - # .egg-info is a single file - pkginfo_path = egginfo_path - pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path) - os.mkdir(distinfo_path) - else: - # .egg-info is a directory - pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO') - pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path) - - # ignore common egg metadata that is useless to wheel - shutil.copytree(egginfo_path, distinfo_path, - ignore=lambda x, y: set(('PKG-INFO', - 'requires.txt', - 'SOURCES.txt', - 'not-zip-safe',))) - - # delete dependency_links if it is only whitespace - dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt') - with open(dependency_links_path, 'r') as dependency_links_file: - dependency_links = dependency_links_file.read().strip() - if not dependency_links: - adios(dependency_links_path) - - write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info) - - # XXX deprecated. Still useful for current distribute/setuptools. - metadata_path = os.path.join(distinfo_path, 'METADATA') - self.add_requirements(metadata_path) - - # XXX intentionally a different path than the PEP. - metadata_json_path = os.path.join(distinfo_path, 'metadata.json') - pymeta = pkginfo_to_dict(metadata_path, - distribution=self.distribution) - - if 'description' in pymeta: - description_filename = 'DESCRIPTION.rst' - description_text = pymeta.pop('description') - description_path = os.path.join(distinfo_path, - description_filename) - with open(description_path, "wb") as description_file: - description_file.write(description_text.encode('utf-8')) - pymeta['extensions']['python.details']['document_names']['description'] = description_filename - - # XXX heuristically copy any LICENSE/LICENSE.txt? - license = self.license_file() - if license: - license_filename = 'LICENSE.txt' - shutil.copy(license, os.path.join(self.distinfo_dir, license_filename)) - pymeta['extensions']['python.details']['document_names']['license'] = license_filename - - with open(metadata_json_path, "w") as metadata_json: - json.dump(pymeta, metadata_json, sort_keys=True) - - adios(egginfo_path) - - def write_record(self, bdist_dir, distinfo_dir): - from wheel.util import urlsafe_b64encode - - record_path = os.path.join(distinfo_dir, 'RECORD') - record_relpath = os.path.relpath(record_path, bdist_dir) - - def walk(): - for dir, dirs, files in os.walk(bdist_dir): - dirs.sort() - for f in sorted(files): - yield os.path.join(dir, f) - - def skip(path): - """Wheel hashes every possible file.""" - return (path == record_relpath) - - with open_for_csv(record_path, 'w+') as record_file: - writer = csv.writer(record_file) - for path in walk(): - relpath = os.path.relpath(path, bdist_dir) - if skip(relpath): - hash = '' - size = '' - else: - with open(path, 'rb') as f: - data = f.read() - digest = hashlib.sha256(data).digest() - hash = 'sha256=' + native(urlsafe_b64encode(digest)) - size = len(data) - record_path = os.path.relpath( - path, bdist_dir).replace(os.path.sep, '/') - writer.writerow((record_path, hash, size)) diff --git a/Shared/lib/python3.4/site-packages/wheel/decorator.py b/Shared/lib/python3.4/site-packages/wheel/decorator.py deleted file mode 100644 index e4b56d1..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/decorator.py +++ /dev/null @@ -1,19 +0,0 @@ -# from Pyramid - - -class reify(object): - """Put the result of a method which uses this (non-data) - descriptor decorator in the instance dict after the first call, - effectively replacing the decorator with an instance variable. - """ - - def __init__(self, wrapped): - self.wrapped = wrapped - self.__doc__ = wrapped.__doc__ - - def __get__(self, inst, objtype=None): - if inst is None: - return self - val = self.wrapped(inst) - setattr(inst, self.wrapped.__name__, val) - return val diff --git a/Shared/lib/python3.4/site-packages/wheel/egg2wheel.py b/Shared/lib/python3.4/site-packages/wheel/egg2wheel.py deleted file mode 100644 index bf919c4..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/egg2wheel.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python -import os.path -import re -import sys -import tempfile -import zipfile -import wheel.bdist_wheel -import shutil -import distutils.dist -from distutils.archive_util import make_archive -from argparse import ArgumentParser -from glob import iglob - -egg_info_re = re.compile(r'''(?P.+?)-(?P.+?) - (-(?P.+?))?(-(?P.+?))?.egg''', re.VERBOSE) - -def egg2wheel(egg_path, dest_dir): - egg_info = egg_info_re.match(os.path.basename(egg_path)).groupdict() - dir = tempfile.mkdtemp(suffix="_e2w") - if os.path.isfile(egg_path): - # assume we have a bdist_egg otherwise - egg = zipfile.ZipFile(egg_path) - egg.extractall(dir) - else: - # support buildout-style installed eggs directories - for pth in os.listdir(egg_path): - src = os.path.join(egg_path, pth) - if os.path.isfile(src): - shutil.copy2(src, dir) - else: - shutil.copytree(src, os.path.join(dir, pth)) - - dist_info = "%s-%s" % (egg_info['name'], egg_info['ver']) - abi = 'none' - pyver = egg_info['pyver'].replace('.', '') - arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_') - if arch != 'any': - # assume all binary eggs are for CPython - pyver = 'cp' + pyver[2:] - wheel_name = '-'.join(( - dist_info, - pyver, - abi, - arch - )) - bw = wheel.bdist_wheel.bdist_wheel(distutils.dist.Distribution()) - bw.root_is_purelib = egg_info['arch'] is None - dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info) - bw.egg2dist(os.path.join(dir, 'EGG-INFO'), - dist_info_dir) - bw.write_wheelfile(dist_info_dir, generator='egg2wheel') - bw.write_record(dir, dist_info_dir) - filename = make_archive(os.path.join(dest_dir, wheel_name), 'zip', root_dir=dir) - os.rename(filename, filename[:-3] + 'whl') - shutil.rmtree(dir) - -def main(): - parser = ArgumentParser() - parser.add_argument('eggs', nargs='*', help="Eggs to convert") - parser.add_argument('--dest-dir', '-d', default=os.path.curdir, - help="Directory to store wheels (default %(default)s)") - parser.add_argument('--verbose', '-v', action='store_true') - args = parser.parse_args() - for pat in args.eggs: - for egg in iglob(pat): - if args.verbose: - sys.stdout.write("{0}... ".format(egg)) - egg2wheel(egg, args.dest_dir) - if args.verbose: - sys.stdout.write("OK\n") - -if __name__ == "__main__": - main() diff --git a/Shared/lib/python3.4/site-packages/wheel/eggnames.txt b/Shared/lib/python3.4/site-packages/wheel/eggnames.txt deleted file mode 100644 index d422120..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/eggnames.txt +++ /dev/null @@ -1,87 +0,0 @@ -vcard-0.7.8-py2.7.egg -qtalchemy-0.7.1-py2.7.egg -AMQPDeliver-0.1-py2.7.egg -infi.registry-0.1.1-py2.7.egg -infi.instruct-0.5.5-py2.7.egg -infi.devicemanager-0.1.2-py2.7.egg -TracTixSummary-1.0-py2.7.egg -ToscaWidgets-0.9.12-py2.7.egg -archipel_agent_iphone_notification-0.5.0beta-py2.7.egg -archipel_agent_action_scheduler-0.5.0beta-py2.7.egg -ao.social-1.0.2-py2.7.egg -apgl-0.7-py2.7.egg -satchmo_payment_payworld-0.1.1-py2.7.egg -snmpsim-0.1.3-py2.7.egg -sshim-0.2-py2.7.egg -shove-0.3.4-py2.7.egg -simpleavro-0.3.0-py2.7.egg -wkhtmltopdf-0.2-py2.7.egg -wokkel-0.7.0-py2.7.egg -jmbo_social-0.0.6-py2.7.egg -jmbo_post-0.0.6-py2.7.egg -jcrack-0.0.2-py2.7.egg -riak-1.4.0-py2.7.egg -restclient-0.10.2-py2.7.egg -Sutekh-0.8.1-py2.7.egg -trayify-0.0.1-py2.7.egg -tweepy-1.9-py2.7.egg -topzootools-0.2.1-py2.7.egg -haystack-0.16-py2.7.egg -zope.interface-4.0.1-py2.7-win32.egg -neuroshare-0.8.5-py2.7-macosx-10.7-intel.egg -ndg_httpsclient-0.2.0-py2.7.egg -libtele-0.3-py2.7.egg -litex.cxpool-1.0.2-py2.7.egg -obspy.iris-0.5.1-py2.7.egg -obspy.mseed-0.6.1-py2.7-win32.egg -obspy.core-0.6.2-py2.7.egg -CorePost-0.0.3-py2.7.egg -fnordstalk-0.0.3-py2.7.egg -Persistence-2.13.2-py2.7-win32.egg -Pydap-3.1.RC1-py2.7.egg -PyExecJS-1.0.4-py2.7.egg -Wally-0.7.2-py2.7.egg -ExtensionClass-4.0a1-py2.7-win32.egg -Feedjack-0.9.16-py2.7.egg -Mars24-0.3.9-py2.7.egg -HalWeb-0.6.0-py2.7.egg -DARE-0.7.140-py2.7.egg -macholib-1.3-py2.7.egg -marrow.wsgi.egress.compression-1.1-py2.7.egg -mcs-0.3.7-py2.7.egg -Kook-0.6.0-py2.7.egg -er-0.1-py2.7.egg -evasion_director-1.1.4-py2.7.egg -djquery-0.1a-py2.7.egg -django_factory-0.7-py2.7.egg -django_gizmo-0.0.3-py2.7.egg -django_category-0.1-py2.7.egg -dbwrap-0.3.2-py2.7.egg -django_supergeneric-1.0-py2.7.egg -django_dynamo-0.25-py2.7.egg -django_acollabauth-0.1-py2.7.egg -django_qrlink-0.1.0-py2.7.egg -django_addons-0.6.6-py2.7.egg -cover_grabber-1.1.2-py2.7.egg -chem-1.1-py2.7.egg -crud-0.1-py2.7.egg -bongo-0.1-py2.7.egg -bytecodehacks-April2000-py2.7.egg -greenlet-0.3.4-py2.7-win32.egg -ginvoke-0.3.1-py2.7.egg -pyobjc_framework_ScriptingBridge-2.3-py2.7.egg -pecan-0.2.0a-py2.7.egg -pyress-0.2.0-py2.7.egg -pyobjc_framework_PubSub-2.3-py2.7.egg -pyobjc_framework_ExceptionHandling-2.3-py2.7.egg -pywps-trunk-py2.7.egg -pyobjc_framework_CFNetwork-2.3-py2.7-macosx-10.6-fat.egg -py.saunter-0.40-py2.7.egg -pyfnordmetric-0.0.1-py2.7.egg -pyws-1.1.1-py2.7.egg -prestapyt-0.4.0-py2.7.egg -passlib-1.5.3-py2.7.egg -pyga-2.1-py2.7.egg -pygithub3-0.3-py2.7.egg -pyobjc_framework_OpenDirectory-2.3-py2.7.egg -yaposib-0.2.75-py2.7-linux-x86_64.egg diff --git a/Shared/lib/python3.4/site-packages/wheel/install.py b/Shared/lib/python3.4/site-packages/wheel/install.py deleted file mode 100644 index 3af6d0c..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/install.py +++ /dev/null @@ -1,480 +0,0 @@ -""" -Operations on existing wheel files, including basic installation. -""" -# XXX see patched pip to install - -import sys -import warnings -import os.path -import re -import zipfile -import hashlib -import csv - -import shutil - -try: - _big_number = sys.maxsize -except NameError: - _big_number = sys.maxint - -from wheel.decorator import reify -from wheel.util import (urlsafe_b64encode, from_json, urlsafe_b64decode, - native, binary, HashingFile) -from wheel import signatures -from wheel.pkginfo import read_pkg_info_bytes -from wheel.util import open_for_csv - -from .pep425tags import get_supported -from .paths import get_install_paths - -# The next major version after this version of the 'wheel' tool: -VERSION_TOO_HIGH = (1, 0) - -# Non-greedy matching of an optional build number may be too clever (more -# invalid wheel filenames will match). Separate regex for .dist-info? -WHEEL_INFO_RE = re.compile( - r"""^(?P(?P.+?)(-(?P\d.+?))?) - ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) - \.whl|\.dist-info)$""", - re.VERBOSE).match - -def parse_version(version): - """Use parse_version from pkg_resources or distutils as available.""" - global parse_version - try: - from pkg_resources import parse_version - except ImportError: - from distutils.version import LooseVersion as parse_version - return parse_version(version) - -class BadWheelFile(ValueError): - pass - - -class WheelFile(object): - """Parse wheel-specific attributes from a wheel (.whl) file and offer - basic installation and verification support. - - WheelFile can be used to simply parse a wheel filename by avoiding the - methods that require the actual file contents.""" - - WHEEL_INFO = "WHEEL" - RECORD = "RECORD" - - def __init__(self, - filename, - fp=None, - append=False, - context=get_supported): - """ - :param fp: A seekable file-like object or None to open(filename). - :param append: Open archive in append mode. - :param context: Function returning list of supported tags. Wheels - must have the same context to be sortable. - """ - self.filename = filename - self.fp = fp - self.append = append - self.context = context - basename = os.path.basename(filename) - self.parsed_filename = WHEEL_INFO_RE(basename) - if not basename.endswith('.whl') or self.parsed_filename is None: - raise BadWheelFile("Bad filename '%s'" % filename) - - def __repr__(self): - return self.filename - - @property - def distinfo_name(self): - return "%s.dist-info" % self.parsed_filename.group('namever') - - @property - def datadir_name(self): - return "%s.data" % self.parsed_filename.group('namever') - - @property - def record_name(self): - return "%s/%s" % (self.distinfo_name, self.RECORD) - - @property - def wheelinfo_name(self): - return "%s/%s" % (self.distinfo_name, self.WHEEL_INFO) - - @property - def tags(self): - """A wheel file is compatible with the Cartesian product of the - period-delimited tags in its filename. - To choose a wheel file among several candidates having the same - distribution version 'ver', an installer ranks each triple of - (pyver, abi, plat) that its Python installation can run, sorting - the wheels by the best-ranked tag it supports and then by their - arity which is just len(list(compatibility_tags)). - """ - tags = self.parsed_filename.groupdict() - for pyver in tags['pyver'].split('.'): - for abi in tags['abi'].split('.'): - for plat in tags['plat'].split('.'): - yield (pyver, abi, plat) - - compatibility_tags = tags - - @property - def arity(self): - """The number of compatibility tags the wheel declares.""" - return len(list(self.compatibility_tags)) - - @property - def rank(self): - """ - Lowest index of any of this wheel's tags in self.context(), and the - arity e.g. (0, 1) - """ - return self.compatibility_rank(self.context()) - - @property - def compatible(self): - return self.rank[0] != _big_number # bad API! - - # deprecated: - def compatibility_rank(self, supported): - """Rank the wheel against the supported tags. Smaller ranks are more - compatible! - - :param supported: A list of compatibility tags that the current - Python implemenation can run. - """ - preferences = [] - for tag in self.compatibility_tags: - try: - preferences.append(supported.index(tag)) - # Tag not present - except ValueError: - pass - if len(preferences): - return (min(preferences), self.arity) - return (_big_number, 0) - - # deprecated - def supports_current_python(self, x): - assert self.context == x, 'context mismatch' - return self.compatible - - # Comparability. - # Wheels are equal if they refer to the same file. - # If two wheels are not equal, compare based on (in this order): - # 1. Name - # 2. Version - # 3. Compatibility rank - # 4. Filename (as a tiebreaker) - @property - def _sort_key(self): - return (self.parsed_filename.group('name'), - parse_version(self.parsed_filename.group('ver')), - tuple(-x for x in self.rank), - self.filename) - - def __eq__(self, other): - return self.filename == other.filename - - def __ne__(self, other): - return self.filename != other.filename - - def __lt__(self, other): - if self.context != other.context: - raise TypeError("{0}.context != {1}.context".format(self, other)) - - return self._sort_key < other._sort_key - - # XXX prune - - sn = self.parsed_filename.group('name') - on = other.parsed_filename.group('name') - if sn != on: - return sn < on - sv = parse_version(self.parsed_filename.group('ver')) - ov = parse_version(other.parsed_filename.group('ver')) - if sv != ov: - return sv < ov - # Compatibility - if self.context != other.context: - raise TypeError("{0}.context != {1}.context".format(self, other)) - sc = self.rank - oc = other.rank - if sc != None and oc != None and sc != oc: - # Smaller compatibility ranks are "better" than larger ones, - # so we have to reverse the sense of the comparison here! - return sc > oc - elif sc == None and oc != None: - return False - return self.filename < other.filename - - def __gt__(self, other): - return other < self - - def __le__(self, other): - return self == other or self < other - - def __ge__(self, other): - return self == other or other < self - - # - # Methods using the file's contents: - # - - @reify - def zipfile(self): - mode = "r" - if self.append: - mode = "a" - vzf = VerifyingZipFile(self.fp if self.fp else self.filename, mode) - if not self.append: - self.verify(vzf) - return vzf - - @reify - def parsed_wheel_info(self): - """Parse wheel metadata (the .data/WHEEL file)""" - return read_pkg_info_bytes(self.zipfile.read(self.wheelinfo_name)) - - def check_version(self): - version = self.parsed_wheel_info['Wheel-Version'] - if tuple(map(int, version.split('.'))) >= VERSION_TOO_HIGH: - raise ValueError("Wheel version is too high") - - @reify - def install_paths(self): - """ - Consult distutils to get the install paths for our dist. A dict with - ('purelib', 'platlib', 'headers', 'scripts', 'data'). - - We use the name from our filename as the dist name, which means headers - could be installed in the wrong place if the filesystem-escaped name - is different than the Name. Who cares? - """ - name = self.parsed_filename.group('name') - return get_install_paths(name) - - def install(self, force=False, overrides={}): - """ - Install the wheel into site-packages. - """ - - # Utility to get the target directory for a particular key - def get_path(key): - return overrides.get(key) or self.install_paths[key] - - # The base target location is either purelib or platlib - if self.parsed_wheel_info['Root-Is-Purelib'] == 'true': - root = get_path('purelib') - else: - root = get_path('platlib') - - # Parse all the names in the archive - name_trans = {} - for info in self.zipfile.infolist(): - name = info.filename - # Zip files can contain entries representing directories. - # These end in a '/'. - # We ignore these, as we create directories on demand. - if name.endswith('/'): - continue - - # Pathnames in a zipfile namelist are always /-separated. - # In theory, paths could start with ./ or have other oddities - # but this won't happen in practical cases of well-formed wheels. - # We'll cover the simple case of an initial './' as it's both easy - # to do and more common than most other oddities. - if name.startswith('./'): - name = name[2:] - - # Split off the base directory to identify files that are to be - # installed in non-root locations - basedir, sep, filename = name.partition('/') - if sep and basedir == self.datadir_name: - # Data file. Target destination is elsewhere - key, sep, filename = filename.partition('/') - if not sep: - raise ValueError("Invalid filename in wheel: {0}".format(name)) - target = get_path(key) - else: - # Normal file. Target destination is root - key = '' - target = root - filename = name - - # Map the actual filename from the zipfile to its intended target - # directory and the pathname relative to that directory. - dest = os.path.normpath(os.path.join(target, filename)) - name_trans[info] = (key, target, filename, dest) - - # We're now ready to start processing the actual install. The process - # is as follows: - # 1. Prechecks - is the wheel valid, is its declared architecture - # OK, etc. [[Responsibility of the caller]] - # 2. Overwrite check - do any of the files to be installed already - # exist? - # 3. Actual install - put the files in their target locations. - # 4. Update RECORD - write a suitably modified RECORD file to - # reflect the actual installed paths. - - if not force: - for info, v in name_trans.items(): - k = info.filename - key, target, filename, dest = v - if os.path.exists(dest): - raise ValueError("Wheel file {0} would overwrite {1}. Use force if this is intended".format(k, dest)) - - # Get the name of our executable, for use when replacing script - # wrapper hashbang lines. - # We encode it using getfilesystemencoding, as that is "the name of - # the encoding used to convert Unicode filenames into system file - # names". - exename = sys.executable.encode(sys.getfilesystemencoding()) - record_data = [] - record_name = self.distinfo_name + '/RECORD' - for info, (key, target, filename, dest) in name_trans.items(): - name = info.filename - source = self.zipfile.open(info) - # Skip the RECORD file - if name == record_name: - continue - ddir = os.path.dirname(dest) - if not os.path.isdir(ddir): - os.makedirs(ddir) - destination = HashingFile(open(dest, 'wb')) - if key == 'scripts': - hashbang = source.readline() - if hashbang.startswith(b'#!python'): - hashbang = b'#!' + exename + binary(os.linesep) - destination.write(hashbang) - shutil.copyfileobj(source, destination) - reldest = os.path.relpath(dest, root) - reldest.replace(os.sep, '/') - record_data.append((reldest, destination.digest(), destination.length)) - destination.close() - source.close() - # preserve attributes (especially +x bit for scripts) - attrs = info.external_attr >> 16 - if attrs: # tends to be 0 if Windows. - os.chmod(dest, info.external_attr >> 16) - - record_name = os.path.join(root, self.record_name) - writer = csv.writer(open_for_csv(record_name, 'w+')) - for reldest, digest, length in sorted(record_data): - writer.writerow((reldest, digest, length)) - writer.writerow((self.record_name, '', '')) - - def verify(self, zipfile=None): - """Configure the VerifyingZipFile `zipfile` by verifying its signature - and setting expected hashes for every hash in RECORD. - Caller must complete the verification process by completely reading - every file in the archive (e.g. with extractall).""" - sig = None - if zipfile is None: - zipfile = self.zipfile - zipfile.strict = True - - record_name = '/'.join((self.distinfo_name, 'RECORD')) - sig_name = '/'.join((self.distinfo_name, 'RECORD.jws')) - # tolerate s/mime signatures: - smime_sig_name = '/'.join((self.distinfo_name, 'RECORD.p7s')) - zipfile.set_expected_hash(record_name, None) - zipfile.set_expected_hash(sig_name, None) - zipfile.set_expected_hash(smime_sig_name, None) - record = zipfile.read(record_name) - - record_digest = urlsafe_b64encode(hashlib.sha256(record).digest()) - try: - sig = from_json(native(zipfile.read(sig_name))) - except KeyError: # no signature - pass - if sig: - headers, payload = signatures.verify(sig) - if payload['hash'] != "sha256=" + native(record_digest): - msg = "RECORD.sig claimed RECORD hash {0} != computed hash {1}." - raise BadWheelFile(msg.format(payload['hash'], - native(record_digest))) - - reader = csv.reader((native(r) for r in record.splitlines())) - - for row in reader: - filename = row[0] - hash = row[1] - if not hash: - if filename not in (record_name, sig_name): - sys.stderr.write("%s has no hash!\n" % filename) - continue - algo, data = row[1].split('=', 1) - assert algo == "sha256", "Unsupported hash algorithm" - zipfile.set_expected_hash(filename, urlsafe_b64decode(binary(data))) - - -class VerifyingZipFile(zipfile.ZipFile): - """ZipFile that can assert that each of its extracted contents matches - an expected sha256 hash. Note that each file must be completly read in - order for its hash to be checked.""" - - def __init__(self, file, mode="r", - compression=zipfile.ZIP_STORED, - allowZip64=False): - zipfile.ZipFile.__init__(self, file, mode, compression, allowZip64) - - self.strict = False - self._expected_hashes = {} - self._hash_algorithm = hashlib.sha256 - - def set_expected_hash(self, name, hash): - """ - :param name: name of zip entry - :param hash: bytes of hash (or None for "don't care") - """ - self._expected_hashes[name] = hash - - def open(self, name_or_info, mode="r", pwd=None): - """Return file-like object for 'name'.""" - # A non-monkey-patched version would contain most of zipfile.py - ef = zipfile.ZipFile.open(self, name_or_info, mode, pwd) - if isinstance(name_or_info, zipfile.ZipInfo): - name = name_or_info.filename - else: - name = name_or_info - if (name in self._expected_hashes - and self._expected_hashes[name] != None): - expected_hash = self._expected_hashes[name] - try: - _update_crc_orig = ef._update_crc - except AttributeError: - warnings.warn('Need ZipExtFile._update_crc to implement ' - 'file hash verification (in Python >= 2.7)') - return ef - running_hash = self._hash_algorithm() - if hasattr(ef, '_eof'): # py33 - def _update_crc(data): - _update_crc_orig(data) - running_hash.update(data) - if ef._eof and running_hash.digest() != expected_hash: - raise BadWheelFile("Bad hash for file %r" % ef.name) - else: - def _update_crc(data, eof=None): - _update_crc_orig(data, eof=eof) - running_hash.update(data) - if eof and running_hash.digest() != expected_hash: - raise BadWheelFile("Bad hash for file %r" % ef.name) - ef._update_crc = _update_crc - elif self.strict and name not in self._expected_hashes: - raise BadWheelFile("No expected hash for file %r" % ef.name) - return ef - - def pop(self): - """Truncate the last file off this zipfile. - Assumes infolist() is in the same order as the files (true for - ordinary zip files created by Python)""" - if not self.fp: - raise RuntimeError( - "Attempt to pop from ZIP archive that was already closed") - last = self.infolist().pop() - del self.NameToInfo[last.filename] - self.fp.seek(last.header_offset, os.SEEK_SET) - self.fp.truncate() - self._didModify = True diff --git a/Shared/lib/python3.4/site-packages/wheel/metadata.py b/Shared/lib/python3.4/site-packages/wheel/metadata.py deleted file mode 100644 index b3cc65c..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/metadata.py +++ /dev/null @@ -1,317 +0,0 @@ -""" -Tools for converting old- to new-style metadata. -""" - -from collections import namedtuple -from .pkginfo import read_pkg_info -from .util import OrderedDefaultDict -try: - from collections import OrderedDict -except ImportError: - OrderedDict = dict - -import re -import os.path -import textwrap -import pkg_resources -import email.parser -import wheel - -METADATA_VERSION = "2.0" - -PLURAL_FIELDS = { "classifier" : "classifiers", - "provides_dist" : "provides", - "provides_extra" : "extras" } - -SKIP_FIELDS = set() - -CONTACT_FIELDS = (({"email":"author_email", "name": "author"}, - "author"), - ({"email":"maintainer_email", "name": "maintainer"}, - "maintainer")) - -# commonly filled out as "UNKNOWN" by distutils: -UNKNOWN_FIELDS = set(("author", "author_email", "platform", "home_page", - "license")) - -# Wheel itself is probably the only program that uses non-extras markers -# in METADATA/PKG-INFO. Support its syntax with the extra at the end only. -EXTRA_RE = re.compile("""^(?P.*?)(;\s*(?P.*?)(extra == '(?P.*?)')?)$""") -KEYWORDS_RE = re.compile("[\0-,]+") - -MayRequiresKey = namedtuple('MayRequiresKey', ('condition', 'extra')) - -def unique(iterable): - """ - Yield unique values in iterable, preserving order. - """ - seen = set() - for value in iterable: - if not value in seen: - seen.add(value) - yield value - - -def handle_requires(metadata, pkg_info, key): - """ - Place the runtime requirements from pkg_info into metadata. - """ - may_requires = OrderedDefaultDict(list) - for value in sorted(pkg_info.get_all(key)): - extra_match = EXTRA_RE.search(value) - if extra_match: - groupdict = extra_match.groupdict() - condition = groupdict['condition'] - extra = groupdict['extra'] - package = groupdict['package'] - if condition.endswith(' and '): - condition = condition[:-5] - else: - condition, extra = None, None - package = value - key = MayRequiresKey(condition, extra) - may_requires[key].append(package) - - if may_requires: - metadata['run_requires'] = [] - def sort_key(item): - # Both condition and extra could be None, which can't be compared - # against strings in Python 3. - key, value = item - if key.condition is None: - return '' - return key.condition - for key, value in sorted(may_requires.items(), key=sort_key): - may_requirement = OrderedDict((('requires', value),)) - if key.extra: - may_requirement['extra'] = key.extra - if key.condition: - may_requirement['environment'] = key.condition - metadata['run_requires'].append(may_requirement) - - if not 'extras' in metadata: - metadata['extras'] = [] - metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra]) - - -def pkginfo_to_dict(path, distribution=None): - """ - Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict. - - The description is included under the key ['description'] rather than - being written to a separate file. - - path: path to PKG-INFO file - distribution: optional distutils Distribution() - """ - - metadata = OrderedDefaultDict(lambda: OrderedDefaultDict(lambda: OrderedDefaultDict(OrderedDict))) - metadata["generator"] = "bdist_wheel (" + wheel.__version__ + ")" - try: - unicode - pkg_info = read_pkg_info(path) - except NameError: - pkg_info = email.parser.Parser().parsestr(open(path, 'rb').read().decode('utf-8')) - description = None - - if pkg_info['Summary']: - metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary') - del pkg_info['Summary'] - - if pkg_info['Description']: - description = dedent_description(pkg_info) - del pkg_info['Description'] - else: - payload = pkg_info.get_payload() - if isinstance(payload, bytes): - # Avoid a Python 2 Unicode error. - # We still suffer ? glyphs on Python 3. - payload = payload.decode('utf-8') - if payload: - description = payload - - if description: - pkg_info['description'] = description - - for key in sorted(unique(k.lower() for k in pkg_info.keys())): - low_key = key.replace('-', '_') - - if low_key in SKIP_FIELDS: - continue - - if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN': - continue - - if low_key in sorted(PLURAL_FIELDS): - metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key) - - elif low_key == "requires_dist": - handle_requires(metadata, pkg_info, key) - - elif low_key == 'provides_extra': - if not 'extras' in metadata: - metadata['extras'] = [] - metadata['extras'].extend(pkg_info.get_all(key)) - - elif low_key == 'home_page': - metadata['extensions']['python.details']['project_urls'] = {'Home':pkg_info[key]} - - elif low_key == 'keywords': - metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key]) - - else: - metadata[low_key] = pkg_info[key] - - metadata['metadata_version'] = METADATA_VERSION - - if 'extras' in metadata: - metadata['extras'] = sorted(set(metadata['extras'])) - - # include more information if distribution is available - if distribution: - for requires, attr in (('test_requires', 'tests_require'),): - try: - requirements = getattr(distribution, attr) - if isinstance(requirements, list): - new_requirements = sorted(convert_requirements(requirements)) - metadata[requires] = [{'requires':new_requirements}] - except AttributeError: - pass - - # handle contacts - contacts = [] - for contact_type, role in CONTACT_FIELDS: - contact = OrderedDict() - for key in sorted(contact_type): - if contact_type[key] in metadata: - contact[key] = metadata.pop(contact_type[key]) - if contact: - contact['role'] = role - contacts.append(contact) - if contacts: - metadata['extensions']['python.details']['contacts'] = contacts - - # convert entry points to exports - try: - with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file: - ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read()) - exports = OrderedDict() - for group, items in sorted(ep_map.items()): - exports[group] = OrderedDict() - for item in sorted(map(str, items.values())): - name, export = item.split(' = ', 1) - exports[group][name] = export - if exports: - metadata['extensions']['python.exports'] = exports - except IOError: - pass - - # copy console_scripts entry points to commands - if 'python.exports' in metadata['extensions']: - for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'), - ('gui_scripts', 'wrap_gui')): - if ep_script in metadata['extensions']['python.exports']: - metadata['extensions']['python.commands'][wrap_script] = \ - metadata['extensions']['python.exports'][ep_script] - - return metadata - -def requires_to_requires_dist(requirement): - """Compose the version predicates for requirement in PEP 345 fashion.""" - requires_dist = [] - for op, ver in requirement.specs: - requires_dist.append(op + ver) - if not requires_dist: - return '' - return " (%s)" % ','.join(requires_dist) - -def convert_requirements(requirements): - """Yield Requires-Dist: strings for parsed requirements strings.""" - for req in requirements: - parsed_requirement = pkg_resources.Requirement.parse(req) - spec = requires_to_requires_dist(parsed_requirement) - extras = ",".join(parsed_requirement.extras) - if extras: - extras = "[%s]" % extras - yield (parsed_requirement.project_name + extras + spec) - -def pkginfo_to_metadata(egg_info_path, pkginfo_path): - """ - Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka - old-draft Metadata 2.0 format. - """ - pkg_info = read_pkg_info(pkginfo_path) - pkg_info.replace_header('Metadata-Version', '2.0') - requires_path = os.path.join(egg_info_path, 'requires.txt') - if os.path.exists(requires_path): - requires = open(requires_path).read() - for extra, reqs in sorted(pkg_resources.split_sections(requires), - key=lambda x: x[0] or ''): - condition = '' - if extra and ':' in extra: # setuptools extra:condition syntax - extra, condition = extra.split(':', 1) - if extra: - pkg_info['Provides-Extra'] = extra - if condition: - condition += " and " - condition += 'extra == %s' % repr(extra) - if condition: - condition = '; ' + condition - for new_req in sorted(convert_requirements(reqs)): - pkg_info['Requires-Dist'] = new_req + condition - - description = pkg_info['Description'] - if description: - pkg_info.set_payload(dedent_description(pkg_info)) - del pkg_info['Description'] - - return pkg_info - - -def pkginfo_unicode(pkg_info, field): - """Hack to coax Unicode out of an email Message() - Python 3.3+""" - text = pkg_info[field] - field = field.lower() - if not isinstance(text, str): - if not hasattr(pkg_info, 'raw_items'): # Python 3.2 - return str(text) - for item in pkg_info.raw_items(): - if item[0].lower() == field: - text = item[1].encode('ascii', 'surrogateescape')\ - .decode('utf-8') - break - - return text - - -def dedent_description(pkg_info): - """ - Dedent and convert pkg_info['Description'] to Unicode. - """ - description = pkg_info['Description'] - - # Python 3 Unicode handling, sorta. - surrogates = False - if not isinstance(description, str): - surrogates = True - description = pkginfo_unicode(pkg_info, 'Description') - - description_lines = description.splitlines() - description_dedent = '\n'.join( - # if the first line of long_description is blank, - # the first line here will be indented. - (description_lines[0].lstrip(), - textwrap.dedent('\n'.join(description_lines[1:])), - '\n')) - - if surrogates: - description_dedent = description_dedent\ - .encode("utf8")\ - .decode("ascii", "surrogateescape") - - return description_dedent - - -if __name__ == "__main__": - import sys, pprint - pprint.pprint(pkginfo_to_dict(sys.argv[1])) diff --git a/Shared/lib/python3.4/site-packages/wheel/paths.py b/Shared/lib/python3.4/site-packages/wheel/paths.py deleted file mode 100644 index fe3dfd6..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/paths.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Installation paths. - -Map the .data/ subdirectory names to install paths. -""" - -import os.path -import sys -import distutils.dist as dist -import distutils.command.install as install - -def get_install_command(name): - # late binding due to potential monkeypatching - d = dist.Distribution({'name':name}) - i = install.install(d) - i.finalize_options() - return i - -def get_install_paths(name): - """ - Return the (distutils) install paths for the named dist. - - A dict with ('purelib', 'platlib', 'headers', 'scripts', 'data') keys. - """ - paths = {} - - i = get_install_command(name) - - for key in install.SCHEME_KEYS: - paths[key] = getattr(i, 'install_' + key) - - # pip uses a similar path as an alternative to the system's (read-only) - # include directory: - if hasattr(sys, 'real_prefix'): # virtualenv - paths['headers'] = os.path.join(sys.prefix, - 'include', - 'site', - 'python' + sys.version[:3], - name) - - return paths diff --git a/Shared/lib/python3.4/site-packages/wheel/pep425tags.py b/Shared/lib/python3.4/site-packages/wheel/pep425tags.py deleted file mode 100644 index 106c879..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/pep425tags.py +++ /dev/null @@ -1,169 +0,0 @@ -"""Generate and work with PEP 425 Compatibility Tags.""" - -import sys -import warnings - -try: - import sysconfig -except ImportError: # pragma nocover - # Python < 2.7 - import distutils.sysconfig as sysconfig -import distutils.util - - -def get_config_var(var): - try: - return sysconfig.get_config_var(var) - except IOError as e: # pip Issue #1074 - warnings.warn("{0}".format(e), RuntimeWarning) - return None - - -def get_abbr_impl(): - """Return abbreviated implementation name.""" - if hasattr(sys, 'pypy_version_info'): - pyimpl = 'pp' - elif sys.platform.startswith('java'): - pyimpl = 'jy' - elif sys.platform == 'cli': - pyimpl = 'ip' - else: - pyimpl = 'cp' - return pyimpl - - -def get_impl_ver(): - """Return implementation version.""" - impl_ver = get_config_var("py_version_nodot") - if not impl_ver or get_abbr_impl() == 'pp': - impl_ver = ''.join(map(str, get_impl_version_info())) - return impl_ver - - -def get_impl_version_info(): - """Return sys.version_info-like tuple for use in decrementing the minor - version.""" - if get_abbr_impl() == 'pp': - # as per https://github.com/pypa/pip/issues/2882 - return (sys.version_info[0], sys.pypy_version_info.major, - sys.pypy_version_info.minor) - else: - return sys.version_info[0], sys.version_info[1] - - -def get_flag(var, fallback, expected=True, warn=True): - """Use a fallback method for determining SOABI flags if the needed config - var is unset or unavailable.""" - val = get_config_var(var) - if val is None: - if warn: - warnings.warn("Config variable '{0}' is unset, Python ABI tag may " - "be incorrect".format(var), RuntimeWarning, 2) - return fallback() - return val == expected - - -def get_abi_tag(): - """Return the ABI tag based on SOABI (if available) or emulate SOABI - (CPython 2, PyPy).""" - soabi = get_config_var('SOABI') - impl = get_abbr_impl() - if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'): - d = '' - m = '' - u = '' - if get_flag('Py_DEBUG', - lambda: hasattr(sys, 'gettotalrefcount'), - warn=(impl == 'cp')): - d = 'd' - if get_flag('WITH_PYMALLOC', - lambda: impl == 'cp', - warn=(impl == 'cp')): - m = 'm' - if get_flag('Py_UNICODE_SIZE', - lambda: sys.maxunicode == 0x10ffff, - expected=4, - warn=(impl == 'cp' and - sys.version_info < (3, 3))) \ - and sys.version_info < (3, 3): - u = 'u' - abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) - elif soabi and soabi.startswith('cpython-'): - abi = 'cp' + soabi.split('-')[1] - elif soabi: - abi = soabi.replace('.', '_').replace('-', '_') - else: - abi = None - return abi - - -def get_platform(): - """Return our platform name 'win32', 'linux_x86_64'""" - # XXX remove distutils dependency - return distutils.util.get_platform().replace('.', '_').replace('-', '_') - - -def get_supported(versions=None, supplied_platform=None): - """Return a list of supported tags for each version specified in - `versions`. - - :param versions: a list of string versions, of the form ["33", "32"], - or None. The first version will be assumed to support our ABI. - """ - supported = [] - - # Versions must be given with respect to the preference - if versions is None: - versions = [] - version_info = get_impl_version_info() - major = version_info[:-1] - # Support all previous minor Python versions. - for minor in range(version_info[-1], -1, -1): - versions.append(''.join(map(str, major + (minor,)))) - - impl = get_abbr_impl() - - abis = [] - - abi = get_abi_tag() - if abi: - abis[0:0] = [abi] - - abi3s = set() - import imp - for suffix in imp.get_suffixes(): - if suffix[0].startswith('.abi'): - abi3s.add(suffix[0].split('.', 2)[1]) - - abis.extend(sorted(list(abi3s))) - - abis.append('none') - - platforms = [] - if supplied_platform: - platforms.append(supplied_platform) - platforms.append(get_platform()) - - # Current version, current API (built specifically for our Python): - for abi in abis: - for arch in platforms: - supported.append(('%s%s' % (impl, versions[0]), abi, arch)) - - # No abi / arch, but requires our implementation: - for i, version in enumerate(versions): - supported.append(('%s%s' % (impl, version), 'none', 'any')) - if i == 0: - # Tagged specifically as being cross-version compatible - # (with just the major version specified) - supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) - - # Major Python version + platform; e.g. binaries not using the Python API - supported.append(('py%s' % (versions[0][0]), 'none', arch)) - - # No abi / arch, generic Python - for i, version in enumerate(versions): - supported.append(('py%s' % (version,), 'none', 'any')) - if i == 0: - supported.append(('py%s' % (version[0]), 'none', 'any')) - - return supported diff --git a/Shared/lib/python3.4/site-packages/wheel/pkginfo.py b/Shared/lib/python3.4/site-packages/wheel/pkginfo.py deleted file mode 100644 index 8a4aca3..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/pkginfo.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Tools for reading and writing PKG-INFO / METADATA without caring -about the encoding.""" - -from email.parser import Parser - -try: - unicode - _PY3 = False -except NameError: - _PY3 = True - -if not _PY3: - from email.generator import Generator - - def read_pkg_info_bytes(bytestr): - return Parser().parsestr(bytestr) - - def read_pkg_info(path): - with open(path, "r") as headers: - message = Parser().parse(headers) - return message - - def write_pkg_info(path, message): - with open(path, 'w') as metadata: - Generator(metadata, maxheaderlen=0).flatten(message) - -else: - from email.generator import BytesGenerator - def read_pkg_info_bytes(bytestr): - headers = bytestr.decode(encoding="ascii", errors="surrogateescape") - message = Parser().parsestr(headers) - return message - - def read_pkg_info(path): - with open(path, "r", - encoding="ascii", - errors="surrogateescape") as headers: - message = Parser().parse(headers) - return message - - def write_pkg_info(path, message): - with open(path, "wb") as out: - BytesGenerator(out, maxheaderlen=0).flatten(message) - diff --git a/Shared/lib/python3.4/site-packages/wheel/signatures/__init__.py b/Shared/lib/python3.4/site-packages/wheel/signatures/__init__.py deleted file mode 100644 index 3f21b50..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/signatures/__init__.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Create and verify jws-js format Ed25519 signatures. -""" - -__all__ = [ 'sign', 'verify' ] - -import json -from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary - -ed25519ll = None - -ALG = "Ed25519" - -def get_ed25519ll(): - """Lazy import-and-test of ed25519 module""" - global ed25519ll - - if not ed25519ll: - try: - import ed25519ll # fast (thousands / s) - except (ImportError, OSError): # pragma nocover - from . import ed25519py as ed25519ll # pure Python (hundreds / s) - test() - - return ed25519ll - -def sign(payload, keypair): - """Return a JWS-JS format signature given a JSON-serializable payload and - an Ed25519 keypair.""" - get_ed25519ll() - # - header = { - "alg": ALG, - "jwk": { - "kty": ALG, # alg -> kty in jwk-08. - "vk": native(urlsafe_b64encode(keypair.vk)) - } - } - - encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True))) - encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True))) - secured_input = b".".join((encoded_header, encoded_payload)) - sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk) - signature = sig_msg[:ed25519ll.SIGNATUREBYTES] - encoded_signature = urlsafe_b64encode(signature) - - return {"recipients": - [{"header":native(encoded_header), - "signature":native(encoded_signature)}], - "payload": native(encoded_payload)} - -def assertTrue(condition, message=""): - if not condition: - raise ValueError(message) - -def verify(jwsjs): - """Return (decoded headers, payload) if all signatures in jwsjs are - consistent, else raise ValueError. - - Caller must decide whether the keys are actually trusted.""" - get_ed25519ll() - # XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+) - recipients = jwsjs["recipients"] - encoded_payload = binary(jwsjs["payload"]) - headers = [] - for recipient in recipients: - assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient)) - h = binary(recipient["header"]) - s = binary(recipient["signature"]) - header = json.loads(native(urlsafe_b64decode(h))) - assertTrue(header["alg"] == ALG, - "Unexpected algorithm {0}".format(header["alg"])) - if "alg" in header["jwk"] and not "kty" in header["jwk"]: - header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08 - assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519 - "Unexpected key type {0}".format(header["jwk"]["kty"])) - vk = urlsafe_b64decode(binary(header["jwk"]["vk"])) - secured_input = b".".join((h, encoded_payload)) - sig = urlsafe_b64decode(s) - sig_msg = sig+secured_input - verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk)) - verified_header, verified_payload = verified_input.split('.') - verified_header = binary(verified_header) - decoded_header = native(urlsafe_b64decode(verified_header)) - headers.append(json.loads(decoded_header)) - - verified_payload = binary(verified_payload) - - # only return header, payload that have passed through the crypto library. - payload = json.loads(native(urlsafe_b64decode(verified_payload))) - - return headers, payload - -def test(): - kp = ed25519ll.crypto_sign_keypair() - payload = {'test': 'onstartup'} - jwsjs = json.loads(json.dumps(sign(payload, kp))) - verify(jwsjs) - jwsjs['payload'] += 'x' - try: - verify(jwsjs) - except ValueError: - pass - else: # pragma no cover - raise RuntimeError("No error from bad wheel.signatures payload.") - diff --git a/Shared/lib/python3.4/site-packages/wheel/signatures/djbec.py b/Shared/lib/python3.4/site-packages/wheel/signatures/djbec.py deleted file mode 100644 index 56efe44..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/signatures/djbec.py +++ /dev/null @@ -1,270 +0,0 @@ -# Ed25519 digital signatures -# Based on http://ed25519.cr.yp.to/python/ed25519.py -# See also http://ed25519.cr.yp.to/software.html -# Adapted by Ron Garret -# Sped up considerably using coordinate transforms found on: -# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html -# Specifically add-2008-hwcd-4 and dbl-2008-hwcd - -try: # pragma nocover - unicode - PY3 = False - def asbytes(b): - """Convert array of integers to byte string""" - return ''.join(chr(x) for x in b) - def joinbytes(b): - """Convert array of bytes to byte string""" - return ''.join(b) - def bit(h, i): - """Return i'th bit of bytestring h""" - return (ord(h[i//8]) >> (i%8)) & 1 - -except NameError: # pragma nocover - PY3 = True - asbytes = bytes - joinbytes = bytes - def bit(h, i): - return (h[i//8] >> (i%8)) & 1 - -import hashlib - -b = 256 -q = 2**255 - 19 -l = 2**252 + 27742317777372353535851937790883648493 - -def H(m): - return hashlib.sha512(m).digest() - -def expmod(b, e, m): - if e == 0: return 1 - t = expmod(b, e // 2, m) ** 2 % m - if e & 1: t = (t * b) % m - return t - -# Can probably get some extra speedup here by replacing this with -# an extended-euclidean, but performance seems OK without that -def inv(x): - return expmod(x, q-2, q) - -d = -121665 * inv(121666) -I = expmod(2,(q-1)//4,q) - -def xrecover(y): - xx = (y*y-1) * inv(d*y*y+1) - x = expmod(xx,(q+3)//8,q) - if (x*x - xx) % q != 0: x = (x*I) % q - if x % 2 != 0: x = q-x - return x - -By = 4 * inv(5) -Bx = xrecover(By) -B = [Bx % q,By % q] - -#def edwards(P,Q): -# x1 = P[0] -# y1 = P[1] -# x2 = Q[0] -# y2 = Q[1] -# x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2) -# y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2) -# return (x3 % q,y3 % q) - -#def scalarmult(P,e): -# if e == 0: return [0,1] -# Q = scalarmult(P,e/2) -# Q = edwards(Q,Q) -# if e & 1: Q = edwards(Q,P) -# return Q - -# Faster (!) version based on: -# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html - -def xpt_add(pt1, pt2): - (X1, Y1, Z1, T1) = pt1 - (X2, Y2, Z2, T2) = pt2 - A = ((Y1-X1)*(Y2+X2)) % q - B = ((Y1+X1)*(Y2-X2)) % q - C = (Z1*2*T2) % q - D = (T1*2*Z2) % q - E = (D+C) % q - F = (B-A) % q - G = (B+A) % q - H = (D-C) % q - X3 = (E*F) % q - Y3 = (G*H) % q - Z3 = (F*G) % q - T3 = (E*H) % q - return (X3, Y3, Z3, T3) - -def xpt_double (pt): - (X1, Y1, Z1, _) = pt - A = (X1*X1) - B = (Y1*Y1) - C = (2*Z1*Z1) - D = (-A) % q - J = (X1+Y1) % q - E = (J*J-A-B) % q - G = (D+B) % q - F = (G-C) % q - H = (D-B) % q - X3 = (E*F) % q - Y3 = (G*H) % q - Z3 = (F*G) % q - T3 = (E*H) % q - return (X3, Y3, Z3, T3) - -def pt_xform (pt): - (x, y) = pt - return (x, y, 1, (x*y)%q) - -def pt_unxform (pt): - (x, y, z, _) = pt - return ((x*inv(z))%q, (y*inv(z))%q) - -def xpt_mult (pt, n): - if n==0: return pt_xform((0,1)) - _ = xpt_double(xpt_mult(pt, n>>1)) - return xpt_add(_, pt) if n&1 else _ - -def scalarmult(pt, e): - return pt_unxform(xpt_mult(pt_xform(pt), e)) - -def encodeint(y): - bits = [(y >> i) & 1 for i in range(b)] - e = [(sum([bits[i * 8 + j] << j for j in range(8)])) - for i in range(b//8)] - return asbytes(e) - -def encodepoint(P): - x = P[0] - y = P[1] - bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1] - e = [(sum([bits[i * 8 + j] << j for j in range(8)])) - for i in range(b//8)] - return asbytes(e) - -def publickey(sk): - h = H(sk) - a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2)) - A = scalarmult(B,a) - return encodepoint(A) - -def Hint(m): - h = H(m) - return sum(2**i * bit(h,i) for i in range(2*b)) - -def signature(m,sk,pk): - h = H(sk) - a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2)) - inter = joinbytes([h[i] for i in range(b//8,b//4)]) - r = Hint(inter + m) - R = scalarmult(B,r) - S = (r + Hint(encodepoint(R) + pk + m) * a) % l - return encodepoint(R) + encodeint(S) - -def isoncurve(P): - x = P[0] - y = P[1] - return (-x*x + y*y - 1 - d*x*x*y*y) % q == 0 - -def decodeint(s): - return sum(2**i * bit(s,i) for i in range(0,b)) - -def decodepoint(s): - y = sum(2**i * bit(s,i) for i in range(0,b-1)) - x = xrecover(y) - if x & 1 != bit(s,b-1): x = q-x - P = [x,y] - if not isoncurve(P): raise Exception("decoding point that is not on curve") - return P - -def checkvalid(s, m, pk): - if len(s) != b//4: raise Exception("signature length is wrong") - if len(pk) != b//8: raise Exception("public-key length is wrong") - R = decodepoint(s[0:b//8]) - A = decodepoint(pk) - S = decodeint(s[b//8:b//4]) - h = Hint(encodepoint(R) + pk + m) - v1 = scalarmult(B,S) -# v2 = edwards(R,scalarmult(A,h)) - v2 = pt_unxform(xpt_add(pt_xform(R), pt_xform(scalarmult(A, h)))) - return v1==v2 - -########################################################## -# -# Curve25519 reference implementation by Matthew Dempsky, from: -# http://cr.yp.to/highspeed/naclcrypto-20090310.pdf - -# P = 2 ** 255 - 19 -P = q -A = 486662 - -#def expmod(b, e, m): -# if e == 0: return 1 -# t = expmod(b, e / 2, m) ** 2 % m -# if e & 1: t = (t * b) % m -# return t - -# def inv(x): return expmod(x, P - 2, P) - -def add(n, m, d): - (xn, zn) = n - (xm, zm) = m - (xd, zd) = d - x = 4 * (xm * xn - zm * zn) ** 2 * zd - z = 4 * (xm * zn - zm * xn) ** 2 * xd - return (x % P, z % P) - -def double(n): - (xn, zn) = n - x = (xn ** 2 - zn ** 2) ** 2 - z = 4 * xn * zn * (xn ** 2 + A * xn * zn + zn ** 2) - return (x % P, z % P) - -def curve25519(n, base=9): - one = (base,1) - two = double(one) - # f(m) evaluates to a tuple - # containing the mth multiple and the - # (m+1)th multiple of base. - def f(m): - if m == 1: return (one, two) - (pm, pm1) = f(m // 2) - if (m & 1): - return (add(pm, pm1, one), double(pm1)) - return (double(pm), add(pm, pm1, one)) - ((x,z), _) = f(n) - return (x * inv(z)) % P - -import random - -def genkey(n=0): - n = n or random.randint(0,P) - n &= ~7 - n &= ~(128 << 8 * 31) - n |= 64 << 8 * 31 - return n - -#def str2int(s): -# return int(hexlify(s), 16) -# # return sum(ord(s[i]) << (8 * i) for i in range(32)) -# -#def int2str(n): -# return unhexlify("%x" % n) -# # return ''.join([chr((n >> (8 * i)) & 255) for i in range(32)]) - -################################################# - -def dsa_test(): - import os - msg = str(random.randint(q,q+q)).encode('utf-8') - sk = os.urandom(32) - pk = publickey(sk) - sig = signature(msg, sk, pk) - return checkvalid(sig, msg, pk) - -def dh_test(): - sk1 = genkey() - sk2 = genkey() - return curve25519(sk1, curve25519(sk2)) == curve25519(sk2, curve25519(sk1)) - diff --git a/Shared/lib/python3.4/site-packages/wheel/signatures/ed25519py.py b/Shared/lib/python3.4/site-packages/wheel/signatures/ed25519py.py deleted file mode 100644 index 55eba2e..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/signatures/ed25519py.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- - -import warnings -import os - -from collections import namedtuple -from . import djbec - -__all__ = ['crypto_sign', 'crypto_sign_open', 'crypto_sign_keypair', 'Keypair', - 'PUBLICKEYBYTES', 'SECRETKEYBYTES', 'SIGNATUREBYTES'] - -PUBLICKEYBYTES=32 -SECRETKEYBYTES=64 -SIGNATUREBYTES=64 - -Keypair = namedtuple('Keypair', ('vk', 'sk')) # verifying key, secret key - -def crypto_sign_keypair(seed=None): - """Return (verifying, secret) key from a given seed, or os.urandom(32)""" - if seed is None: - seed = os.urandom(PUBLICKEYBYTES) - else: - warnings.warn("ed25519ll should choose random seed.", - RuntimeWarning) - if len(seed) != 32: - raise ValueError("seed must be 32 random bytes or None.") - skbytes = seed - vkbytes = djbec.publickey(skbytes) - return Keypair(vkbytes, skbytes+vkbytes) - - -def crypto_sign(msg, sk): - """Return signature+message given message and secret key. - The signature is the first SIGNATUREBYTES bytes of the return value. - A copy of msg is in the remainder.""" - if len(sk) != SECRETKEYBYTES: - raise ValueError("Bad signing key length %d" % len(sk)) - vkbytes = sk[PUBLICKEYBYTES:] - skbytes = sk[:PUBLICKEYBYTES] - sig = djbec.signature(msg, skbytes, vkbytes) - return sig + msg - - -def crypto_sign_open(signed, vk): - """Return message given signature+message and the verifying key.""" - if len(vk) != PUBLICKEYBYTES: - raise ValueError("Bad verifying key length %d" % len(vk)) - rc = djbec.checkvalid(signed[:SIGNATUREBYTES], signed[SIGNATUREBYTES:], vk) - if not rc: - raise ValueError("rc != True", rc) - return signed[SIGNATUREBYTES:] - diff --git a/Shared/lib/python3.4/site-packages/wheel/signatures/keys.py b/Shared/lib/python3.4/site-packages/wheel/signatures/keys.py deleted file mode 100644 index 1dde4bf..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/signatures/keys.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Store and retrieve wheel signing / verifying keys. - -Given a scope (a package name, + meaning "all packages", or - meaning -"no packages"), return a list of verifying keys that are trusted for that -scope. - -Given a package name, return a list of (scope, key) suggested keys to sign -that package (only the verifying keys; the private signing key is stored -elsewhere). - -Keys here are represented as urlsafe_b64encoded strings with no padding. - -Tentative command line interface: - -# list trusts -wheel trust -# trust a particular key for all -wheel trust + key -# trust key for beaglevote -wheel trust beaglevote key -# stop trusting a key for all -wheel untrust + key - -# generate a key pair -wheel keygen - -# import a signing key from a file -wheel import keyfile - -# export a signing key -wheel export key -""" - -import json -import os.path -from wheel.util import native, load_config_paths, save_config_path - -class WheelKeys(object): - SCHEMA = 1 - CONFIG_NAME = 'wheel.json' - - def __init__(self): - self.data = {'signers':[], 'verifiers':[]} - - def load(self): - # XXX JSON is not a great database - for path in load_config_paths('wheel'): - conf = os.path.join(native(path), self.CONFIG_NAME) - if os.path.exists(conf): - with open(conf, 'r') as infile: - self.data = json.load(infile) - for x in ('signers', 'verifiers'): - if not x in self.data: - self.data[x] = [] - if 'schema' not in self.data: - self.data['schema'] = self.SCHEMA - elif self.data['schema'] != self.SCHEMA: - raise ValueError( - "Bad wheel.json version {0}, expected {1}".format( - self.data['schema'], self.SCHEMA)) - break - return self - - def save(self): - # Try not to call this a very long time after load() - path = save_config_path('wheel') - conf = os.path.join(native(path), self.CONFIG_NAME) - with open(conf, 'w+') as out: - json.dump(self.data, out, indent=2) - return self - - def trust(self, scope, vk): - """Start trusting a particular key for given scope.""" - self.data['verifiers'].append({'scope':scope, 'vk':vk}) - return self - - def untrust(self, scope, vk): - """Stop trusting a particular key for given scope.""" - self.data['verifiers'].remove({'scope':scope, 'vk':vk}) - return self - - def trusted(self, scope=None): - """Return list of [(scope, trusted key), ...] for given scope.""" - trust = [(x['scope'], x['vk']) for x in self.data['verifiers'] if x['scope'] in (scope, '+')] - trust.sort(key=lambda x: x[0]) - trust.reverse() - return trust - - def signers(self, scope): - """Return list of signing key(s).""" - sign = [(x['scope'], x['vk']) for x in self.data['signers'] if x['scope'] in (scope, '+')] - sign.sort(key=lambda x: x[0]) - sign.reverse() - return sign - - def add_signer(self, scope, vk): - """Remember verifying key vk as being valid for signing in scope.""" - self.data['signers'].append({'scope':scope, 'vk':vk}) - diff --git a/Shared/lib/python3.4/site-packages/wheel/test/__init__.py b/Shared/lib/python3.4/site-packages/wheel/test/__init__.py deleted file mode 100644 index 4287ca8..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/wheel/test/complex-dist/complexdist/__init__.py b/Shared/lib/python3.4/site-packages/wheel/test/complex-dist/complexdist/__init__.py deleted file mode 100644 index 559fbb7..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/complex-dist/complexdist/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -def main(): - return diff --git a/Shared/lib/python3.4/site-packages/wheel/test/complex-dist/setup.py b/Shared/lib/python3.4/site-packages/wheel/test/complex-dist/setup.py deleted file mode 100644 index 615d5dc..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/complex-dist/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -from setuptools import setup - -try: - unicode - def u8(s): - return s.decode('unicode-escape') -except NameError: - def u8(s): - return s - -setup(name='complex-dist', - version='0.1', - description=u8('Another testing distribution \N{SNOWMAN}'), - long_description=u8('Another testing distribution \N{SNOWMAN}'), - author="Illustrious Author", - author_email="illustrious@example.org", - url="http://example.org/exemplary", - packages=['complexdist'], - setup_requires=["wheel", "setuptools"], - install_requires=["quux", "splort"], - extras_require={'simple':['simple.dist']}, - tests_require=["foo", "bar>=10.0.0"], - entry_points={ - 'console_scripts': [ - 'complex-dist=complexdist:main', - 'complex-dist2=complexdist:main', - ], - }, - ) - diff --git a/Shared/lib/python3.4/site-packages/wheel/test/headers.dist/headersdist.py b/Shared/lib/python3.4/site-packages/wheel/test/headers.dist/headersdist.py deleted file mode 100644 index e69de29..0000000 diff --git a/Shared/lib/python3.4/site-packages/wheel/test/headers.dist/setup.py b/Shared/lib/python3.4/site-packages/wheel/test/headers.dist/setup.py deleted file mode 100644 index 2704f01..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/headers.dist/setup.py +++ /dev/null @@ -1,16 +0,0 @@ -from setuptools import setup - -try: - unicode - def u8(s): - return s.decode('unicode-escape').encode('utf-8') -except NameError: - def u8(s): - return s.encode('utf-8') - -setup(name='headers.dist', - version='0.1', - description=u8('A distribution with headers'), - headers=['header.h'] - ) - diff --git a/Shared/lib/python3.4/site-packages/wheel/test/pydist-schema.json b/Shared/lib/python3.4/site-packages/wheel/test/pydist-schema.json deleted file mode 100644 index 566f3a4..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/pydist-schema.json +++ /dev/null @@ -1,362 +0,0 @@ -{ - "id": "http://www.python.org/dev/peps/pep-0426/", - "$schema": "http://json-schema.org/draft-04/schema#", - "title": "Metadata for Python Software Packages 2.0", - "type": "object", - "properties": { - "metadata_version": { - "description": "Version of the file format", - "type": "string", - "pattern": "^(\\d+(\\.\\d+)*)$" - }, - "generator": { - "description": "Name and version of the program that produced this file.", - "type": "string", - "pattern": "^[0-9A-Za-z]([0-9A-Za-z_.-]*[0-9A-Za-z])( \\(.*\\))?$" - }, - "name": { - "description": "The name of the distribution.", - "type": "string", - "$ref": "#/definitions/distribution_name" - }, - "version": { - "description": "The distribution's public version identifier", - "type": "string", - "pattern": "^(\\d+(\\.\\d+)*)((a|b|c|rc)(\\d+))?(\\.(post)(\\d+))?(\\.(dev)(\\d+))?$" - }, - "source_label": { - "description": "A constrained identifying text string", - "type": "string", - "pattern": "^[0-9a-z_.-+]+$" - }, - "source_url": { - "description": "A string containing a full URL where the source for this specific version of the distribution can be downloaded.", - "type": "string", - "format": "uri" - }, - "summary": { - "description": "A one-line summary of what the distribution does.", - "type": "string" - }, - "extras": { - "description": "A list of optional sets of dependencies that may be used to define conditional dependencies in \"may_require\" and similar fields.", - "type": "array", - "items": { - "type": "string", - "$ref": "#/definitions/extra_name" - } - }, - "meta_requires": { - "description": "A list of subdistributions made available through this metadistribution.", - "type": "array", - "$ref": "#/definitions/dependencies" - }, - "run_requires": { - "description": "A list of other distributions needed to run this distribution.", - "type": "array", - "$ref": "#/definitions/dependencies" - }, - "test_requires": { - "description": "A list of other distributions needed when this distribution is tested.", - "type": "array", - "$ref": "#/definitions/dependencies" - }, - "build_requires": { - "description": "A list of other distributions needed when this distribution is built.", - "type": "array", - "$ref": "#/definitions/dependencies" - }, - "dev_requires": { - "description": "A list of other distributions needed when this distribution is developed.", - "type": "array", - "$ref": "#/definitions/dependencies" - }, - "provides": { - "description": "A list of strings naming additional dependency requirements that are satisfied by installing this distribution. These strings must be of the form Name or Name (Version)", - "type": "array", - "items": { - "type": "string", - "$ref": "#/definitions/provides_declaration" - } - }, - "modules": { - "description": "A list of modules and/or packages available for import after installing this distribution.", - "type": "array", - "items": { - "type": "string", - "$ref": "#/definitions/qualified_name" - } - }, - "namespaces": { - "description": "A list of namespace packages this distribution contributes to", - "type": "array", - "items": { - "type": "string", - "$ref": "#/definitions/qualified_name" - } - }, - "obsoleted_by": { - "description": "A string that indicates that this project is no longer being developed. The named project provides a substitute or replacement.", - "type": "string", - "$ref": "#/definitions/requirement" - }, - "supports_environments": { - "description": "A list of strings specifying the environments that the distribution explicitly supports.", - "type": "array", - "items": { - "type": "string", - "$ref": "#/definitions/environment_marker" - } - }, - "install_hooks": { - "description": "The install_hooks field is used to define various operations that may be invoked on a distribution in a platform independent manner.", - "type": "object", - "properties": { - "postinstall": { - "type": "string", - "$ref": "#/definitions/export_specifier" - }, - "preuninstall": { - "type": "string", - "$ref": "#/definitions/export_specifier" - } - } - }, - "extensions": { - "description": "Extensions to the metadata may be present in a mapping under the 'extensions' key.", - "type": "object", - "$ref": "#/definitions/extensions" - } - }, - - "required": ["metadata_version", "name", "version", "summary"], - "additionalProperties": false, - - "definitions": { - "contact": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "email": { - "type": "string" - }, - "url": { - "type": "string" - }, - "role": { - "type": "string" - } - }, - "required": ["name"], - "additionalProperties": false - }, - "dependencies": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/dependency" - } - }, - "dependency": { - "type": "object", - "properties": { - "extra": { - "type": "string", - "$ref": "#/definitions/extra_name" - }, - "environment": { - "type": "string", - "$ref": "#/definitions/environment_marker" - }, - "requires": { - "type": "array", - "items": { - "type": "string", - "$ref": "#/definitions/requirement" - } - } - }, - "required": ["requires"], - "additionalProperties": false - }, - "extensions": { - "type": "object", - "patternProperties": { - "^[A-Za-z][0-9A-Za-z_]*([.][0-9A-Za-z_]*)*$": {} - }, - "properties": { - "python.details" : { - "description": "More information regarding the distribution.", - "type": "object", - "properties": { - "document_names": { - "description": "Names of supporting metadata documents", - "type": "object", - "properties": { - "description": { - "type": "string", - "$ref": "#/definitions/document_name" - }, - "changelog": { - "type": "string", - "$ref": "#/definitions/document_name" - }, - "license": { - "type": "string", - "$ref": "#/definitions/document_name" - } - }, - "additionalProperties": false - }, - "keywords": { - "description": "A list of additional keywords to be used to assist searching for the distribution in a larger catalog.", - "type": "array", - "items": { - "type": "string" - } - }, - "license": { - "description": "A string indicating the license covering the distribution.", - "type": "string" - }, - "classifiers": { - "description": "A list of strings, with each giving a single classification value for the distribution.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "python.project" : { - "description": "More information regarding the creation and maintenance of the distribution.", - "$ref": "#/definitions/project_or_integrator" - }, - "python.integrator" : { - "description": "More information regarding the downstream redistributor of the distribution.", - "$ref": "#/definitions/project_or_integrator" - }, - "python.commands" : { - "description": "Command line interfaces provided by this distribution", - "type": "object", - "$ref": "#/definitions/commands" - }, - "python.exports" : { - "description": "Other exported interfaces provided by this distribution", - "type": "object", - "$ref": "#/definitions/exports" - } - }, - "additionalProperties": false - }, - "commands": { - "type": "object", - "properties": { - "wrap_console": { - "type": "object", - "$ref": "#/definitions/command_map" - }, - "wrap_gui": { - "type": "object", - "$ref": "#/definitions/command_map" - }, - "prebuilt": { - "type": "array", - "items": { - "type": "string", - "$ref": "#/definitions/relative_path" - } - } - }, - "additionalProperties": false - }, - "exports": { - "type": "object", - "patternProperties": { - "^[A-Za-z][0-9A-Za-z_]*([.][0-9A-Za-z_]*)*$": { - "type": "object", - "patternProperties": { - ".": { - "type": "string", - "$ref": "#/definitions/export_specifier" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - "command_map": { - "type": "object", - "patternProperties": { - "^[0-9A-Za-z]([0-9A-Za-z_.-]*[0-9A-Za-z])?$": { - "type": "string", - "$ref": "#/definitions/export_specifier" - } - }, - "additionalProperties": false - }, - "project_or_integrator" : { - "type": "object", - "properties" : { - "contacts": { - "description": "A list of contributor entries giving the recommended contact points for getting more information about the project.", - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/contact" - } - }, - "contributors": { - "description": "A list of contributor entries for other contributors not already listed as current project points of contact.", - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/contact" - } - }, - "project_urls": { - "description": "A mapping of arbitrary text labels to additional URLs relevant to the project.", - "type": "object" - } - } - }, - "distribution_name": { - "type": "string", - "pattern": "^[0-9A-Za-z]([0-9A-Za-z_.-]*[0-9A-Za-z])?$" - }, - "requirement": { - "type": "string" - }, - "provides_declaration": { - "type": "string" - }, - "environment_marker": { - "type": "string" - }, - "document_name": { - "type": "string" - }, - "extra_name" : { - "type": "string", - "pattern": "^[0-9A-Za-z]([0-9A-Za-z_.-]*[0-9A-Za-z])?$" - }, - "relative_path" : { - "type": "string" - }, - "export_specifier": { - "type": "string", - "pattern": "^([A-Za-z_][A-Za-z_0-9]*([.][A-Za-z_][A-Za-z_0-9]*)*)(:[A-Za-z_][A-Za-z_0-9]*([.][A-Za-z_][A-Za-z_0-9]*)*)?(\\[[0-9A-Za-z]([0-9A-Za-z_.-]*[0-9A-Za-z])?\\])?$" - }, - "qualified_name" : { - "type": "string", - "pattern": "^[A-Za-z_][A-Za-z_0-9]*([.][A-Za-z_][A-Za-z_0-9]*)*$" - }, - "prefixed_name" : { - "type": "string", - "pattern": "^[A-Za-z_][A-Za-z_0-9]*([.][A-Za-z_0-9]*)*$" - } - } -} diff --git a/Shared/lib/python3.4/site-packages/wheel/test/simple.dist/setup.py b/Shared/lib/python3.4/site-packages/wheel/test/simple.dist/setup.py deleted file mode 100644 index 50c909f..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/simple.dist/setup.py +++ /dev/null @@ -1,17 +0,0 @@ -from setuptools import setup - -try: - unicode - def u8(s): - return s.decode('unicode-escape').encode('utf-8') -except NameError: - def u8(s): - return s.encode('utf-8') - -setup(name='simple.dist', - version='0.1', - description=u8('A testing distribution \N{SNOWMAN}'), - packages=['simpledist'], - extras_require={'voting': ['beaglevote']}, - ) - diff --git a/Shared/lib/python3.4/site-packages/wheel/test/test-1.0-py2.py3-none-win32.whl b/Shared/lib/python3.4/site-packages/wheel/test/test-1.0-py2.py3-none-win32.whl deleted file mode 100644 index 095583e..0000000 Binary files a/Shared/lib/python3.4/site-packages/wheel/test/test-1.0-py2.py3-none-win32.whl and /dev/null differ diff --git a/Shared/lib/python3.4/site-packages/wheel/test/test_basic.py b/Shared/lib/python3.4/site-packages/wheel/test/test_basic.py deleted file mode 100644 index e69fef9..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/test_basic.py +++ /dev/null @@ -1,176 +0,0 @@ -""" -Basic wheel tests. -""" - -import os -import pkg_resources -import json -import sys - -from pkg_resources import resource_filename - -import wheel.util -import wheel.tool - -from wheel import egg2wheel -from wheel.install import WheelFile -from zipfile import ZipFile -from shutil import rmtree - -test_distributions = ("complex-dist", "simple.dist", "headers.dist") - -def teardown_module(): - """Delete eggs/wheels created by tests.""" - base = pkg_resources.resource_filename('wheel.test', '') - for dist in test_distributions: - for subdir in ('build', 'dist'): - try: - rmtree(os.path.join(base, dist, subdir)) - except OSError: - pass - -def setup_module(): - build_wheel() - build_egg() - -def build_wheel(): - """Build wheels from test distributions.""" - for dist in test_distributions: - pwd = os.path.abspath(os.curdir) - distdir = pkg_resources.resource_filename('wheel.test', dist) - os.chdir(distdir) - try: - sys.argv = ['', 'bdist_wheel'] - exec(compile(open('setup.py').read(), 'setup.py', 'exec')) - finally: - os.chdir(pwd) - -def build_egg(): - """Build eggs from test distributions.""" - for dist in test_distributions: - pwd = os.path.abspath(os.curdir) - distdir = pkg_resources.resource_filename('wheel.test', dist) - os.chdir(distdir) - try: - sys.argv = ['', 'bdist_egg'] - exec(compile(open('setup.py').read(), 'setup.py', 'exec')) - finally: - os.chdir(pwd) - -def test_findable(): - """Make sure pkg_resources can find us.""" - assert pkg_resources.working_set.by_key['wheel'].version - -def test_egg_re(): - """Make sure egg_info_re matches.""" - egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt')) - for line in egg_names: - line = line.strip() - if not line: - continue - assert egg2wheel.egg_info_re.match(line), line - -def test_compatibility_tags(): - """Test compatibilty tags are working.""" - wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl") - assert (list(wf.compatibility_tags) == - [('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')]) - assert (wf.arity == 2) - - wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl") - wf2_info = wf2.parsed_filename.groupdict() - assert wf2_info['build'] == '1st', wf2_info - -def test_convert_egg(): - base = pkg_resources.resource_filename('wheel.test', '') - for dist in test_distributions: - distdir = os.path.join(base, dist, 'dist') - eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')] - wheel.tool.convert(eggs, distdir, verbose=False) - -def test_unpack(): - """ - Make sure 'wheel unpack' works. - This also verifies the integrity of our testing wheel files. - """ - for dist in test_distributions: - distdir = pkg_resources.resource_filename('wheel.test', - os.path.join(dist, 'dist')) - for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')): - wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir) - -def test_no_scripts(): - """Make sure entry point scripts are not generated.""" - dist = "complex-dist" - basedir = pkg_resources.resource_filename('wheel.test', dist) - for (dirname, subdirs, filenames) in os.walk(basedir): - for filename in filenames: - if filename.endswith('.whl'): - whl = ZipFile(os.path.join(dirname, filename)) - for entry in whl.infolist(): - assert not '.data/scripts/' in entry.filename - -def test_pydist(): - """Make sure pydist.json exists and validates against our schema.""" - # XXX this test may need manual cleanup of older wheels - - import jsonschema - - def open_json(filename): - return json.loads(open(filename, 'rb').read().decode('utf-8')) - - pymeta_schema = open_json(resource_filename('wheel.test', - 'pydist-schema.json')) - valid = 0 - for dist in ("simple.dist", "complex-dist"): - basedir = pkg_resources.resource_filename('wheel.test', dist) - for (dirname, subdirs, filenames) in os.walk(basedir): - for filename in filenames: - if filename.endswith('.whl'): - whl = ZipFile(os.path.join(dirname, filename)) - for entry in whl.infolist(): - if entry.filename.endswith('/metadata.json'): - pymeta = json.loads(whl.read(entry).decode('utf-8')) - jsonschema.validate(pymeta, pymeta_schema) - valid += 1 - assert valid > 0, "No metadata.json found" - -def test_util(): - """Test functions in util.py.""" - for i in range(10): - before = b'*' * i - encoded = wheel.util.urlsafe_b64encode(before) - assert not encoded.endswith(b'=') - after = wheel.util.urlsafe_b64decode(encoded) - assert before == after - - -def test_pick_best(): - """Test the wheel ranking algorithm.""" - def get_tags(res): - info = res[-1].parsed_filename.groupdict() - return info['pyver'], info['abi'], info['plat'] - - cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'), - ('cp27', 'noabi', 'linux_i686'), - ('cp26', 'noabi', 'linux_i686'), - ('cp27', 'noabi', 'linux_x86_64'), - ('cp26', 'noabi', 'linux_x86_64')] - cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t) - for t in cand_tags] - - supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')] - supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'), - ('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')] - supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'), - ('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')] - - for supp in (supported, supported2, supported3): - context = lambda: list(supp) - for wheel in cand_wheels: - wheel.context = context - best = max(cand_wheels) - assert list(best.tags)[0] == supp[0] - - # assert_equal( - # list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp) diff --git a/Shared/lib/python3.4/site-packages/wheel/test/test_install.py b/Shared/lib/python3.4/site-packages/wheel/test/test_install.py deleted file mode 100644 index ddcddf5..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/test_install.py +++ /dev/null @@ -1,55 +0,0 @@ -# Test wheel. -# The file has the following contents: -# hello.pyd -# hello/hello.py -# hello/__init__.py -# test-1.0.data/data/hello.dat -# test-1.0.data/headers/hello.dat -# test-1.0.data/scripts/hello.sh -# test-1.0.dist-info/WHEEL -# test-1.0.dist-info/METADATA -# test-1.0.dist-info/RECORD -# The root is PLATLIB -# So, some in PLATLIB, and one in each of DATA, HEADERS and SCRIPTS. - -import wheel.tool -import wheel.pep425tags -from wheel.install import WheelFile -from tempfile import mkdtemp -import shutil -import os - -THISDIR = os.path.dirname(__file__) -TESTWHEEL = os.path.join(THISDIR, 'test-1.0-py2.py3-none-win32.whl') - -def check(*path): - return os.path.exists(os.path.join(*path)) - -def test_install(): - tempdir = mkdtemp() - def get_supported(): - return list(wheel.pep425tags.get_supported()) + [('py3', 'none', 'win32')] - whl = WheelFile(TESTWHEEL, context=get_supported) - assert whl.supports_current_python(get_supported) - try: - locs = {} - for key in ('purelib', 'platlib', 'scripts', 'headers', 'data'): - locs[key] = os.path.join(tempdir, key) - os.mkdir(locs[key]) - whl.install(overrides=locs) - assert len(os.listdir(locs['purelib'])) == 0 - assert check(locs['platlib'], 'hello.pyd') - assert check(locs['platlib'], 'hello', 'hello.py') - assert check(locs['platlib'], 'hello', '__init__.py') - assert check(locs['data'], 'hello.dat') - assert check(locs['headers'], 'hello.dat') - assert check(locs['scripts'], 'hello.sh') - assert check(locs['platlib'], 'test-1.0.dist-info', 'RECORD') - finally: - shutil.rmtree(tempdir) - -def test_install_tool(): - """Slightly improve coverage of wheel.install""" - wheel.tool.install([TESTWHEEL], force=True, dry_run=True) - - \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/wheel/test/test_keys.py b/Shared/lib/python3.4/site-packages/wheel/test/test_keys.py deleted file mode 100644 index f96166b..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/test_keys.py +++ /dev/null @@ -1,98 +0,0 @@ -import tempfile -import os.path -import unittest -import json - -from wheel.signatures import keys - -wheel_json = """ -{ - "verifiers": [ - { - "scope": "+", - "vk": "bp-bjK2fFgtA-8DhKKAAPm9-eAZcX_u03oBv2RlKOBc" - }, - { - "scope": "+", - "vk": "KAHZBfyqFW3OcFDbLSG4nPCjXxUPy72phP9I4Rn9MAo" - }, - { - "scope": "+", - "vk": "tmAYCrSfj8gtJ10v3VkvW7jOndKmQIYE12hgnFu3cvk" - } - ], - "signers": [ - { - "scope": "+", - "vk": "tmAYCrSfj8gtJ10v3VkvW7jOndKmQIYE12hgnFu3cvk" - }, - { - "scope": "+", - "vk": "KAHZBfyqFW3OcFDbLSG4nPCjXxUPy72phP9I4Rn9MAo" - } - ], - "schema": 1 -} -""" - -class TestWheelKeys(unittest.TestCase): - def setUp(self): - self.config = tempfile.NamedTemporaryFile(suffix='.json') - self.config.close() - - self.config_path, self.config_filename = os.path.split(self.config.name) - def load(*args): - return [self.config_path] - def save(*args): - return self.config_path - keys.load_config_paths = load - keys.save_config_path = save - self.wk = keys.WheelKeys() - self.wk.CONFIG_NAME = self.config_filename - - def tearDown(self): - os.unlink(self.config.name) - - def test_load_save(self): - self.wk.data = json.loads(wheel_json) - - self.wk.add_signer('+', '67890') - self.wk.add_signer('scope', 'abcdefg') - - self.wk.trust('epocs', 'gfedcba') - self.wk.trust('+', '12345') - - self.wk.save() - - del self.wk.data - self.wk.load() - - signers = self.wk.signers('scope') - self.assertTrue(signers[0] == ('scope', 'abcdefg'), self.wk.data['signers']) - self.assertTrue(signers[1][0] == '+', self.wk.data['signers']) - - trusted = self.wk.trusted('epocs') - self.assertTrue(trusted[0] == ('epocs', 'gfedcba')) - self.assertTrue(trusted[1][0] == '+') - - self.wk.untrust('epocs', 'gfedcba') - trusted = self.wk.trusted('epocs') - self.assertTrue(('epocs', 'gfedcba') not in trusted) - - def test_load_save_incomplete(self): - self.wk.data = json.loads(wheel_json) - del self.wk.data['signers'] - self.wk.data['schema'] = self.wk.SCHEMA+1 - self.wk.save() - try: - self.wk.load() - except ValueError: - pass - else: - raise Exception("Expected ValueError") - - del self.wk.data['schema'] - self.wk.save() - self.wk.load() - - diff --git a/Shared/lib/python3.4/site-packages/wheel/test/test_paths.py b/Shared/lib/python3.4/site-packages/wheel/test/test_paths.py deleted file mode 100644 index a23d506..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/test_paths.py +++ /dev/null @@ -1,6 +0,0 @@ -import wheel.paths -from distutils.command.install import SCHEME_KEYS - -def test_path(): - d = wheel.paths.get_install_paths('wheel') - assert len(d) == len(SCHEME_KEYS) diff --git a/Shared/lib/python3.4/site-packages/wheel/test/test_ranking.py b/Shared/lib/python3.4/site-packages/wheel/test/test_ranking.py deleted file mode 100644 index 1632a13..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/test_ranking.py +++ /dev/null @@ -1,43 +0,0 @@ -import unittest - -from wheel.pep425tags import get_supported -from wheel.install import WheelFile - -WHEELPAT = "%(name)s-%(ver)s-%(pyver)s-%(abi)s-%(arch)s.whl" -def make_wheel(name, ver, pyver, abi, arch): - name = WHEELPAT % dict(name=name, ver=ver, pyver=pyver, abi=abi, - arch=arch) - return WheelFile(name) - -# This relies on the fact that generate_supported will always return the -# exact pyver, abi, and architecture for its first (best) match. -sup = get_supported() -pyver, abi, arch = sup[0] -genver = 'py' + pyver[2:] -majver = genver[:3] - -COMBINATIONS = ( - ('bar', '0.9', 'py2.py3', 'none', 'any'), - ('bar', '0.9', majver, 'none', 'any'), - ('bar', '0.9', genver, 'none', 'any'), - ('bar', '0.9', pyver, abi, arch), - ('bar', '1.3.2', majver, 'none', 'any'), - ('bar', '3.1', genver, 'none', 'any'), - ('bar', '3.1', pyver, abi, arch), - ('foo', '1.0', majver, 'none', 'any'), - ('foo', '1.1', pyver, abi, arch), - ('foo', '2.1', majver + '0', 'none', 'any'), - # This will not be compatible for Python x.0. Beware when we hit Python - # 4.0, and don't test with 3.0!!! - ('foo', '2.1', majver + '1', 'none', 'any'), - ('foo', '2.1', pyver , 'none', 'any'), - ('foo', '2.1', pyver , abi, arch), -) - -WHEELS = [ make_wheel(*args) for args in COMBINATIONS ] - -class TestRanking(unittest.TestCase): - def test_comparison(self): - for i in range(len(WHEELS)-1): - for j in range(i): - self.assertTrue(WHEELS[j]') - setup_py = SETUP_PY.format(ext_modules=EXT_MODULES) - else: - setup_py = SETUP_PY.format(ext_modules='') - temppath.join('setup.py').write(setup_py) - return temppath - -@pytest.fixture -def temp_ext_pkg(request): - return temp_pkg(request, ext=True) - -def test_default_tag(temp_pkg): - subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel'], - cwd=str(temp_pkg)) - dist_dir = temp_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename == 'Test-1.0-py%s-none-any.whl' % (sys.version[0],) - assert wheels[0].ext == '.whl' - -def test_explicit_tag(temp_pkg): - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel', '--python-tag=py32'], - cwd=str(temp_pkg)) - dist_dir = temp_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.startswith('Test-1.0-py32-') - assert wheels[0].ext == '.whl' - -def test_universal_tag(temp_pkg): - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel', '--universal'], - cwd=str(temp_pkg)) - dist_dir = temp_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.startswith('Test-1.0-py2.py3-') - assert wheels[0].ext == '.whl' - -def test_universal_beats_explicit_tag(temp_pkg): - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel', '--universal', '--python-tag=py32'], - cwd=str(temp_pkg)) - dist_dir = temp_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.startswith('Test-1.0-py2.py3-') - assert wheels[0].ext == '.whl' - -def test_universal_in_setup_cfg(temp_pkg): - temp_pkg.join('setup.cfg').write('[bdist_wheel]\nuniversal=1') - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel'], - cwd=str(temp_pkg)) - dist_dir = temp_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.startswith('Test-1.0-py2.py3-') - assert wheels[0].ext == '.whl' - -def test_pythontag_in_setup_cfg(temp_pkg): - temp_pkg.join('setup.cfg').write('[bdist_wheel]\npython_tag=py32') - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel'], - cwd=str(temp_pkg)) - dist_dir = temp_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.startswith('Test-1.0-py32-') - assert wheels[0].ext == '.whl' - -def test_legacy_wheel_section_in_setup_cfg(temp_pkg): - temp_pkg.join('setup.cfg').write('[wheel]\nuniversal=1') - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel'], - cwd=str(temp_pkg)) - dist_dir = temp_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.startswith('Test-1.0-py2.py3-') - assert wheels[0].ext == '.whl' - -def test_plat_name_purepy(temp_pkg): - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=testplat.pure'], - cwd=str(temp_pkg)) - dist_dir = temp_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.endswith('-testplat_pure.whl') - assert wheels[0].ext == '.whl' - -def test_plat_name_ext(temp_ext_pkg): - try: - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=testplat.arch'], - cwd=str(temp_ext_pkg)) - except subprocess.CalledProcessError: - pytest.skip("Cannot compile C Extensions") - dist_dir = temp_ext_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.endswith('-testplat_arch.whl') - assert wheels[0].ext == '.whl' - -def test_plat_name_purepy_in_setupcfg(temp_pkg): - temp_pkg.join('setup.cfg').write('[bdist_wheel]\nplat_name=testplat.pure') - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel'], - cwd=str(temp_pkg)) - dist_dir = temp_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.endswith('-testplat_pure.whl') - assert wheels[0].ext == '.whl' - -def test_plat_name_ext_in_setupcfg(temp_ext_pkg): - temp_ext_pkg.join('setup.cfg').write('[bdist_wheel]\nplat_name=testplat.arch') - try: - subprocess.check_call( - [sys.executable, 'setup.py', 'bdist_wheel'], - cwd=str(temp_ext_pkg)) - except subprocess.CalledProcessError: - pytest.skip("Cannot compile C Extensions") - dist_dir = temp_ext_pkg.join('dist') - assert dist_dir.check(dir=1) - wheels = dist_dir.listdir() - assert len(wheels) == 1 - assert wheels[0].basename.endswith('-testplat_arch.whl') - assert wheels[0].ext == '.whl' diff --git a/Shared/lib/python3.4/site-packages/wheel/test/test_tool.py b/Shared/lib/python3.4/site-packages/wheel/test/test_tool.py deleted file mode 100644 index 078f1ed..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/test_tool.py +++ /dev/null @@ -1,25 +0,0 @@ -from .. import tool - -def test_keygen(): - def get_keyring(): - WheelKeys, keyring = tool.get_keyring() - - class WheelKeysTest(WheelKeys): - def save(self): - pass - - class keyringTest: - @classmethod - def get_keyring(cls): - class keyringTest2: - pw = None - def set_password(self, a, b, c): - self.pw = c - def get_password(self, a, b): - return self.pw - - return keyringTest2() - - return WheelKeysTest, keyringTest - - tool.keygen(get_keyring=get_keyring) diff --git a/Shared/lib/python3.4/site-packages/wheel/test/test_wheelfile.py b/Shared/lib/python3.4/site-packages/wheel/test/test_wheelfile.py deleted file mode 100644 index 181668f..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/test/test_wheelfile.py +++ /dev/null @@ -1,142 +0,0 @@ -import os -import wheel.install -import wheel.archive -import hashlib -try: - from StringIO import StringIO -except ImportError: - from io import BytesIO as StringIO -import codecs -import zipfile -import pytest -import shutil -import tempfile -from contextlib import contextmanager - -@contextmanager -def environ(key, value): - old_value = os.environ.get(key) - try: - os.environ[key] = value - yield - finally: - if old_value is None: - del os.environ[key] - else: - os.environ[key] = old_value - -@contextmanager -def temporary_directory(): - # tempfile.TemporaryDirectory doesn't exist in Python 2. - tempdir = tempfile.mkdtemp() - try: - yield tempdir - finally: - shutil.rmtree(tempdir) - -@contextmanager -def readable_zipfile(path): - # zipfile.ZipFile() isn't a context manager under Python 2. - zf = zipfile.ZipFile(path, 'r') - try: - yield zf - finally: - zf.close() - - -def test_verifying_zipfile(): - if not hasattr(zipfile.ZipExtFile, '_update_crc'): - pytest.skip('No ZIP verification. Missing ZipExtFile._update_crc.') - - sio = StringIO() - zf = zipfile.ZipFile(sio, 'w') - zf.writestr("one", b"first file") - zf.writestr("two", b"second file") - zf.writestr("three", b"third file") - zf.close() - - # In default mode, VerifyingZipFile checks the hash of any read file - # mentioned with set_expected_hash(). Files not mentioned with - # set_expected_hash() are not checked. - vzf = wheel.install.VerifyingZipFile(sio, 'r') - vzf.set_expected_hash("one", hashlib.sha256(b"first file").digest()) - vzf.set_expected_hash("three", "blurble") - vzf.open("one").read() - vzf.open("two").read() - try: - vzf.open("three").read() - except wheel.install.BadWheelFile: - pass - else: - raise Exception("expected exception 'BadWheelFile()'") - - # In strict mode, VerifyingZipFile requires every read file to be - # mentioned with set_expected_hash(). - vzf.strict = True - try: - vzf.open("two").read() - except wheel.install.BadWheelFile: - pass - else: - raise Exception("expected exception 'BadWheelFile()'") - - vzf.set_expected_hash("two", None) - vzf.open("two").read() - -def test_pop_zipfile(): - sio = StringIO() - zf = wheel.install.VerifyingZipFile(sio, 'w') - zf.writestr("one", b"first file") - zf.writestr("two", b"second file") - zf.close() - - try: - zf.pop() - except RuntimeError: - pass # already closed - else: - raise Exception("expected RuntimeError") - - zf = wheel.install.VerifyingZipFile(sio, 'a') - zf.pop() - zf.close() - - zf = wheel.install.VerifyingZipFile(sio, 'r') - assert len(zf.infolist()) == 1 - -def test_zipfile_timestamp(): - # An environment variable can be used to influence the timestamp on - # TarInfo objects inside the zip. See issue #143. TemporaryDirectory is - # not a context manager under Python 3. - with temporary_directory() as tempdir: - for filename in ('one', 'two', 'three'): - path = os.path.join(tempdir, filename) - with codecs.open(path, 'w', encoding='utf-8') as fp: - fp.write(filename + '\n') - zip_base_name = os.path.join(tempdir, 'dummy') - # The earliest date representable in TarInfos, 1980-01-01 - with environ('SOURCE_DATE_EPOCH', '315576060'): - zip_filename = wheel.archive.make_wheelfile_inner( - zip_base_name, tempdir) - with readable_zipfile(zip_filename) as zf: - for info in zf.infolist(): - assert info.date_time[:3] == (1980, 1, 1) - -def test_zipfile_attributes(): - # With the change from ZipFile.write() to .writestr(), we need to manually - # set member attributes. - with temporary_directory() as tempdir: - files = (('foo', 0o644), ('bar', 0o755)) - for filename, mode in files: - path = os.path.join(tempdir, filename) - with codecs.open(path, 'w', encoding='utf-8') as fp: - fp.write(filename + '\n') - os.chmod(path, mode) - zip_base_name = os.path.join(tempdir, 'dummy') - zip_filename = wheel.archive.make_wheelfile_inner( - zip_base_name, tempdir) - with readable_zipfile(zip_filename) as zf: - for filename, mode in files: - info = zf.getinfo(os.path.join(tempdir, filename)) - assert info.external_attr == (mode | 0o100000) << 16 - assert info.compress_type == zipfile.ZIP_DEFLATED diff --git a/Shared/lib/python3.4/site-packages/wheel/tool/__init__.py b/Shared/lib/python3.4/site-packages/wheel/tool/__init__.py deleted file mode 100644 index 95f0a9b..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/tool/__init__.py +++ /dev/null @@ -1,360 +0,0 @@ -""" -Wheel command-line utility. -""" - -import os -import hashlib -import sys -import json -import wheel.paths - -from glob import iglob -from .. import signatures -from ..util import (urlsafe_b64decode, urlsafe_b64encode, native, binary, - matches_requirement) -from ..install import WheelFile - -def require_pkgresources(name): - try: - import pkg_resources - except ImportError: - raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name)) - -import argparse - -class WheelError(Exception): pass - -# For testability -def get_keyring(): - try: - from ..signatures import keys - import keyring - assert keyring.get_keyring().priority - except (ImportError, AssertionError): - raise WheelError("Install wheel[signatures] (requires keyring, keyrings.alt, pyxdg) for signatures.") - return keys.WheelKeys, keyring - -def keygen(get_keyring=get_keyring): - """Generate a public/private key pair.""" - WheelKeys, keyring = get_keyring() - - ed25519ll = signatures.get_ed25519ll() - - wk = WheelKeys().load() - - keypair = ed25519ll.crypto_sign_keypair() - vk = native(urlsafe_b64encode(keypair.vk)) - sk = native(urlsafe_b64encode(keypair.sk)) - kr = keyring.get_keyring() - kr.set_password("wheel", vk, sk) - sys.stdout.write("Created Ed25519 keypair with vk={0}\n".format(vk)) - sys.stdout.write("in {0!r}\n".format(kr)) - - sk2 = kr.get_password('wheel', vk) - if sk2 != sk: - raise WheelError("Keyring is broken. Could not retrieve secret key.") - - sys.stdout.write("Trusting {0} to sign and verify all packages.\n".format(vk)) - wk.add_signer('+', vk) - wk.trust('+', vk) - wk.save() - -def sign(wheelfile, replace=False, get_keyring=get_keyring): - """Sign a wheel""" - WheelKeys, keyring = get_keyring() - - ed25519ll = signatures.get_ed25519ll() - - wf = WheelFile(wheelfile, append=True) - wk = WheelKeys().load() - - name = wf.parsed_filename.group('name') - sign_with = wk.signers(name)[0] - sys.stdout.write("Signing {0} with {1}\n".format(name, sign_with[1])) - - vk = sign_with[1] - kr = keyring.get_keyring() - sk = kr.get_password('wheel', vk) - keypair = ed25519ll.Keypair(urlsafe_b64decode(binary(vk)), - urlsafe_b64decode(binary(sk))) - - - record_name = wf.distinfo_name + '/RECORD' - sig_name = wf.distinfo_name + '/RECORD.jws' - if sig_name in wf.zipfile.namelist(): - raise WheelError("Wheel is already signed.") - record_data = wf.zipfile.read(record_name) - payload = {"hash":"sha256=" + native(urlsafe_b64encode(hashlib.sha256(record_data).digest()))} - sig = signatures.sign(payload, keypair) - wf.zipfile.writestr(sig_name, json.dumps(sig, sort_keys=True)) - wf.zipfile.close() - -def unsign(wheelfile): - """ - Remove RECORD.jws from a wheel by truncating the zip file. - - RECORD.jws must be at the end of the archive. The zip file must be an - ordinary archive, with the compressed files and the directory in the same - order, and without any non-zip content after the truncation point. - """ - import wheel.install - vzf = wheel.install.VerifyingZipFile(wheelfile, "a") - info = vzf.infolist() - if not (len(info) and info[-1].filename.endswith('/RECORD.jws')): - raise WheelError("RECORD.jws not found at end of archive.") - vzf.pop() - vzf.close() - -def verify(wheelfile): - """Verify a wheel. - - The signature will be verified for internal consistency ONLY and printed. - Wheel's own unpack/install commands verify the manifest against the - signature and file contents. - """ - wf = WheelFile(wheelfile) - sig_name = wf.distinfo_name + '/RECORD.jws' - sig = json.loads(native(wf.zipfile.open(sig_name).read())) - verified = signatures.verify(sig) - sys.stderr.write("Signatures are internally consistent.\n") - sys.stdout.write(json.dumps(verified, indent=2)) - sys.stdout.write('\n') - -def unpack(wheelfile, dest='.'): - """Unpack a wheel. - - Wheel content will be unpacked to {dest}/{name}-{ver}, where {name} - is the package name and {ver} its version. - - :param wheelfile: The path to the wheel. - :param dest: Destination directory (default to current directory). - """ - wf = WheelFile(wheelfile) - namever = wf.parsed_filename.group('namever') - destination = os.path.join(dest, namever) - sys.stderr.write("Unpacking to: %s\n" % (destination)) - wf.zipfile.extractall(destination) - wf.zipfile.close() - -def install(requirements, requirements_file=None, - wheel_dirs=None, force=False, list_files=False, - dry_run=False): - """Install wheels. - - :param requirements: A list of requirements or wheel files to install. - :param requirements_file: A file containing requirements to install. - :param wheel_dirs: A list of directories to search for wheels. - :param force: Install a wheel file even if it is not compatible. - :param list_files: Only list the files to install, don't install them. - :param dry_run: Do everything but the actual install. - """ - - # If no wheel directories specified, use the WHEELPATH environment - # variable, or the current directory if that is not set. - if not wheel_dirs: - wheelpath = os.getenv("WHEELPATH") - if wheelpath: - wheel_dirs = wheelpath.split(os.pathsep) - else: - wheel_dirs = [ os.path.curdir ] - - # Get a list of all valid wheels in wheel_dirs - all_wheels = [] - for d in wheel_dirs: - for w in os.listdir(d): - if w.endswith('.whl'): - wf = WheelFile(os.path.join(d, w)) - if wf.compatible: - all_wheels.append(wf) - - # If there is a requirements file, add it to the list of requirements - if requirements_file: - # If the file doesn't exist, search for it in wheel_dirs - # This allows standard requirements files to be stored with the - # wheels. - if not os.path.exists(requirements_file): - for d in wheel_dirs: - name = os.path.join(d, requirements_file) - if os.path.exists(name): - requirements_file = name - break - - with open(requirements_file) as fd: - requirements.extend(fd) - - to_install = [] - for req in requirements: - if req.endswith('.whl'): - # Explicitly specified wheel filename - if os.path.exists(req): - wf = WheelFile(req) - if wf.compatible or force: - to_install.append(wf) - else: - msg = ("{0} is not compatible with this Python. " - "--force to install anyway.".format(req)) - raise WheelError(msg) - else: - # We could search on wheel_dirs, but it's probably OK to - # assume the user has made an error. - raise WheelError("No such wheel file: {}".format(req)) - continue - - # We have a requirement spec - # If we don't have pkg_resources, this will raise an exception - matches = matches_requirement(req, all_wheels) - if not matches: - raise WheelError("No match for requirement {}".format(req)) - to_install.append(max(matches)) - - # We now have a list of wheels to install - if list_files: - sys.stdout.write("Installing:\n") - - if dry_run: - return - - for wf in to_install: - if list_files: - sys.stdout.write(" {0}\n".format(wf.filename)) - continue - wf.install(force=force) - wf.zipfile.close() - -def install_scripts(distributions): - """ - Regenerate the entry_points console_scripts for the named distribution. - """ - try: - from setuptools.command import easy_install - import pkg_resources - except ImportError: - raise RuntimeError("'wheel install_scripts' needs setuptools.") - - for dist in distributions: - pkg_resources_dist = pkg_resources.get_distribution(dist) - install = wheel.paths.get_install_command(dist) - command = easy_install.easy_install(install.distribution) - command.args = ['wheel'] # dummy argument - command.finalize_options() - command.install_egg_scripts(pkg_resources_dist) - -def convert(installers, dest_dir, verbose): - require_pkgresources('wheel convert') - - # Only support wheel convert if pkg_resources is present - from ..wininst2wheel import bdist_wininst2wheel - from ..egg2wheel import egg2wheel - - for pat in installers: - for installer in iglob(pat): - if os.path.splitext(installer)[1] == '.egg': - conv = egg2wheel - else: - conv = bdist_wininst2wheel - if verbose: - sys.stdout.write("{0}... ".format(installer)) - sys.stdout.flush() - conv(installer, dest_dir) - if verbose: - sys.stdout.write("OK\n") - -def parser(): - p = argparse.ArgumentParser() - s = p.add_subparsers(help="commands") - - def keygen_f(args): - keygen() - keygen_parser = s.add_parser('keygen', help='Generate signing key') - keygen_parser.set_defaults(func=keygen_f) - - def sign_f(args): - sign(args.wheelfile) - sign_parser = s.add_parser('sign', help='Sign wheel') - sign_parser.add_argument('wheelfile', help='Wheel file') - sign_parser.set_defaults(func=sign_f) - - def unsign_f(args): - unsign(args.wheelfile) - unsign_parser = s.add_parser('unsign', help=unsign.__doc__) - unsign_parser.add_argument('wheelfile', help='Wheel file') - unsign_parser.set_defaults(func=unsign_f) - - def verify_f(args): - verify(args.wheelfile) - verify_parser = s.add_parser('verify', help=verify.__doc__) - verify_parser.add_argument('wheelfile', help='Wheel file') - verify_parser.set_defaults(func=verify_f) - - def unpack_f(args): - unpack(args.wheelfile, args.dest) - unpack_parser = s.add_parser('unpack', help='Unpack wheel') - unpack_parser.add_argument('--dest', '-d', help='Destination directory', - default='.') - unpack_parser.add_argument('wheelfile', help='Wheel file') - unpack_parser.set_defaults(func=unpack_f) - - def install_f(args): - install(args.requirements, args.requirements_file, - args.wheel_dirs, args.force, args.list_files) - install_parser = s.add_parser('install', help='Install wheels') - install_parser.add_argument('requirements', nargs='*', - help='Requirements to install.') - install_parser.add_argument('--force', default=False, - action='store_true', - help='Install incompatible wheel files.') - install_parser.add_argument('--wheel-dir', '-d', action='append', - dest='wheel_dirs', - help='Directories containing wheels.') - install_parser.add_argument('--requirements-file', '-r', - help="A file containing requirements to " - "install.") - install_parser.add_argument('--list', '-l', default=False, - dest='list_files', - action='store_true', - help="List wheels which would be installed, " - "but don't actually install anything.") - install_parser.set_defaults(func=install_f) - - def install_scripts_f(args): - install_scripts(args.distributions) - install_scripts_parser = s.add_parser('install-scripts', help='Install console_scripts') - install_scripts_parser.add_argument('distributions', nargs='*', - help='Regenerate console_scripts for these distributions') - install_scripts_parser.set_defaults(func=install_scripts_f) - - def convert_f(args): - convert(args.installers, args.dest_dir, args.verbose) - convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel') - convert_parser.add_argument('installers', nargs='*', help='Installers to convert') - convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir, - help="Directory to store wheels (default %(default)s)") - convert_parser.add_argument('--verbose', '-v', action='store_true') - convert_parser.set_defaults(func=convert_f) - - def version_f(args): - from .. import __version__ - sys.stdout.write("wheel %s\n" % __version__) - version_parser = s.add_parser('version', help='Print version and exit') - version_parser.set_defaults(func=version_f) - - def help_f(args): - p.print_help() - help_parser = s.add_parser('help', help='Show this help') - help_parser.set_defaults(func=help_f) - - return p - -def main(): - p = parser() - args = p.parse_args() - if not hasattr(args, 'func'): - p.print_help() - else: - # XXX on Python 3.3 we get 'args has no func' rather than short help. - try: - args.func(args) - return 0 - except WheelError as e: - sys.stderr.write(e.message + "\n") - return 1 diff --git a/Shared/lib/python3.4/site-packages/wheel/util.py b/Shared/lib/python3.4/site-packages/wheel/util.py deleted file mode 100644 index 5268813..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/util.py +++ /dev/null @@ -1,167 +0,0 @@ -"""Utility functions.""" - -import sys -import os -import base64 -import json -import hashlib -try: - from collections import OrderedDict -except ImportError: - OrderedDict = dict - -__all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8', - 'to_json', 'from_json', 'matches_requirement'] - -def urlsafe_b64encode(data): - """urlsafe_b64encode without padding""" - return base64.urlsafe_b64encode(data).rstrip(binary('=')) - - -def urlsafe_b64decode(data): - """urlsafe_b64decode without padding""" - pad = b'=' * (4 - (len(data) & 3)) - return base64.urlsafe_b64decode(data + pad) - - -def to_json(o): - '''Convert given data to JSON.''' - return json.dumps(o, sort_keys=True) - - -def from_json(j): - '''Decode a JSON payload.''' - return json.loads(j) - -def open_for_csv(name, mode): - if sys.version_info[0] < 3: - nl = {} - bin = 'b' - else: - nl = { 'newline': '' } - bin = '' - return open(name, mode + bin, **nl) - -try: - unicode - - def utf8(data): - '''Utf-8 encode data.''' - if isinstance(data, unicode): - return data.encode('utf-8') - return data -except NameError: - def utf8(data): - '''Utf-8 encode data.''' - if isinstance(data, str): - return data.encode('utf-8') - return data - - -try: - # For encoding ascii back and forth between bytestrings, as is repeatedly - # necessary in JSON-based crypto under Python 3 - unicode - def native(s): - return s - def binary(s): - if isinstance(s, unicode): - return s.encode('ascii') - return s -except NameError: - def native(s): - if isinstance(s, bytes): - return s.decode('ascii') - return s - def binary(s): - if isinstance(s, str): - return s.encode('ascii') - -class HashingFile(object): - def __init__(self, fd, hashtype='sha256'): - self.fd = fd - self.hashtype = hashtype - self.hash = hashlib.new(hashtype) - self.length = 0 - def write(self, data): - self.hash.update(data) - self.length += len(data) - self.fd.write(data) - def close(self): - self.fd.close() - def digest(self): - if self.hashtype == 'md5': - return self.hash.hexdigest() - digest = self.hash.digest() - return self.hashtype + '=' + native(urlsafe_b64encode(digest)) - -class OrderedDefaultDict(OrderedDict): - def __init__(self, *args, **kwargs): - if not args: - self.default_factory = None - else: - if not (args[0] is None or callable(args[0])): - raise TypeError('first argument must be callable or None') - self.default_factory = args[0] - args = args[1:] - super(OrderedDefaultDict, self).__init__(*args, **kwargs) - - def __missing__ (self, key): - if self.default_factory is None: - raise KeyError(key) - self[key] = default = self.default_factory() - return default - -if sys.platform == 'win32': - import ctypes.wintypes - # CSIDL_APPDATA for reference - not used here for compatibility with - # dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order - csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28, - CSIDL_COMMON_APPDATA=35) - def get_path(name): - SHGFP_TYPE_CURRENT = 0 - buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH) - ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf) - return buf.value - - def save_config_path(*resource): - appdata = get_path("CSIDL_LOCAL_APPDATA") - path = os.path.join(appdata, *resource) - if not os.path.isdir(path): - os.makedirs(path) - return path - def load_config_paths(*resource): - ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"] - for id in ids: - base = get_path(id) - path = os.path.join(base, *resource) - if os.path.exists(path): - yield path -else: - def save_config_path(*resource): - import xdg.BaseDirectory - return xdg.BaseDirectory.save_config_path(*resource) - def load_config_paths(*resource): - import xdg.BaseDirectory - return xdg.BaseDirectory.load_config_paths(*resource) - -def matches_requirement(req, wheels): - """List of wheels matching a requirement. - - :param req: The requirement to satisfy - :param wheels: List of wheels to search. - """ - try: - from pkg_resources import Distribution, Requirement - except ImportError: - raise RuntimeError("Cannot use requirements without pkg_resources") - - req = Requirement.parse(req) - - selected = [] - for wf in wheels: - f = wf.parsed_filename - dist = Distribution(project_name=f.group("name"), version=f.group("ver")) - if dist in req: - selected.append(wf) - return selected diff --git a/Shared/lib/python3.4/site-packages/wheel/wininst2wheel.py b/Shared/lib/python3.4/site-packages/wheel/wininst2wheel.py deleted file mode 100644 index 297f8d1..0000000 --- a/Shared/lib/python3.4/site-packages/wheel/wininst2wheel.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python -import os.path -import re -import sys -import tempfile -import zipfile -import wheel.bdist_wheel -import distutils.dist -from distutils.archive_util import make_archive -from shutil import rmtree -from wheel.archive import archive_wheelfile -from argparse import ArgumentParser -from glob import iglob - -egg_info_re = re.compile(r'''(^|/)(?P[^/]+?)-(?P.+?) - (-(?P.+?))?(-(?P.+?))?.egg-info(/|$)''', re.VERBOSE) - -def parse_info(wininfo_name, egginfo_name): - """Extract metadata from filenames. - - Extracts the 4 metadataitems needed (name, version, pyversion, arch) from - the installer filename and the name of the egg-info directory embedded in - the zipfile (if any). - - The egginfo filename has the format:: - - name-ver(-pyver)(-arch).egg-info - - The installer filename has the format:: - - name-ver.arch(-pyver).exe - - Some things to note: - - 1. The installer filename is not definitive. An installer can be renamed - and work perfectly well as an installer. So more reliable data should - be used whenever possible. - 2. The egg-info data should be preferred for the name and version, because - these come straight from the distutils metadata, and are mandatory. - 3. The pyver from the egg-info data should be ignored, as it is - constructed from the version of Python used to build the installer, - which is irrelevant - the installer filename is correct here (even to - the point that when it's not there, any version is implied). - 4. The architecture must be taken from the installer filename, as it is - not included in the egg-info data. - 5. Architecture-neutral installers still have an architecture because the - installer format itself (being executable) is architecture-specific. We - should therefore ignore the architecture if the content is pure-python. - """ - - egginfo = None - if egginfo_name: - egginfo = egg_info_re.search(egginfo_name) - if not egginfo: - raise ValueError("Egg info filename %s is not valid" % - (egginfo_name,)) - - # Parse the wininst filename - # 1. Distribution name (up to the first '-') - w_name, sep, rest = wininfo_name.partition('-') - if not sep: - raise ValueError("Installer filename %s is not valid" % - (wininfo_name,)) - # Strip '.exe' - rest = rest[:-4] - # 2. Python version (from the last '-', must start with 'py') - rest2, sep, w_pyver = rest.rpartition('-') - if sep and w_pyver.startswith('py'): - rest = rest2 - w_pyver = w_pyver.replace('.', '') - else: - # Not version specific - use py2.py3. While it is possible that - # pure-Python code is not compatible with both Python 2 and 3, there - # is no way of knowing from the wininst format, so we assume the best - # here (the user can always manually rename the wheel to be more - # restrictive if needed). - w_pyver = 'py2.py3' - # 3. Version and architecture - w_ver, sep, w_arch = rest.rpartition('.') - if not sep: - raise ValueError("Installer filename %s is not valid" % - (wininfo_name,)) - - if egginfo: - w_name = egginfo.group('name') - w_ver = egginfo.group('ver') - - return dict(name=w_name, ver=w_ver, arch=w_arch, pyver=w_pyver) - -def bdist_wininst2wheel(path, dest_dir=os.path.curdir): - bdw = zipfile.ZipFile(path) - - # Search for egg-info in the archive - egginfo_name = None - for filename in bdw.namelist(): - if '.egg-info' in filename: - egginfo_name = filename - break - - info = parse_info(os.path.basename(path), egginfo_name) - - root_is_purelib = True - for zipinfo in bdw.infolist(): - if zipinfo.filename.startswith('PLATLIB'): - root_is_purelib = False - break - if root_is_purelib: - paths = {'purelib': ''} - else: - paths = {'platlib': ''} - - dist_info = "%(name)s-%(ver)s" % info - datadir = "%s.data/" % dist_info - - # rewrite paths to trick ZipFile into extracting an egg - # XXX grab wininst .ini - between .exe, padding, and first zip file. - members = [] - egginfo_name = '' - for zipinfo in bdw.infolist(): - key, basename = zipinfo.filename.split('/', 1) - key = key.lower() - basepath = paths.get(key, None) - if basepath is None: - basepath = datadir + key.lower() + '/' - oldname = zipinfo.filename - newname = basepath + basename - zipinfo.filename = newname - del bdw.NameToInfo[oldname] - bdw.NameToInfo[newname] = zipinfo - # Collect member names, but omit '' (from an entry like "PLATLIB/" - if newname: - members.append(newname) - # Remember egg-info name for the egg2dist call below - if not egginfo_name: - if newname.endswith('.egg-info'): - egginfo_name = newname - elif '.egg-info/' in newname: - egginfo_name, sep, _ = newname.rpartition('/') - dir = tempfile.mkdtemp(suffix="_b2w") - bdw.extractall(dir, members) - - # egg2wheel - abi = 'none' - pyver = info['pyver'] - arch = (info['arch'] or 'any').replace('.', '_').replace('-', '_') - # Wininst installers always have arch even if they are not - # architecture-specific (because the format itself is). - # So, assume the content is architecture-neutral if root is purelib. - if root_is_purelib: - arch = 'any' - # If the installer is architecture-specific, it's almost certainly also - # CPython-specific. - if arch != 'any': - pyver = pyver.replace('py', 'cp') - wheel_name = '-'.join(( - dist_info, - pyver, - abi, - arch - )) - bw = wheel.bdist_wheel.bdist_wheel(distutils.dist.Distribution()) - bw.root_is_purelib = root_is_purelib - dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info) - bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir) - bw.write_wheelfile(dist_info_dir, generator='wininst2wheel') - bw.write_record(dir, dist_info_dir) - - archive_wheelfile(os.path.join(dest_dir, wheel_name), dir) - rmtree(dir) - -def main(): - parser = ArgumentParser() - parser.add_argument('installers', nargs='*', help="Installers to convert") - parser.add_argument('--dest-dir', '-d', default=os.path.curdir, - help="Directory to store wheels (default %(default)s)") - parser.add_argument('--verbose', '-v', action='store_true') - args = parser.parse_args() - for pat in args.installers: - for installer in iglob(pat): - if args.verbose: - sys.stdout.write("{0}... ".format(installer)) - bdist_wininst2wheel(installer, args.dest_dir) - if args.verbose: - sys.stdout.write("OK\n") - -if __name__ == "__main__": - main() diff --git a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/DESCRIPTION.rst deleted file mode 100644 index e200d8e..0000000 --- a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,277 +0,0 @@ -python-zeroconf -=============== - -.. image:: https://travis-ci.org/jstasiak/python-zeroconf.svg?branch=master - :target: https://travis-ci.org/jstasiak/python-zeroconf - -.. image:: https://img.shields.io/pypi/v/zeroconf.svg - :target: https://pypi.python.org/pypi/zeroconf - -.. image:: https://img.shields.io/coveralls/jstasiak/python-zeroconf.svg - :target: https://coveralls.io/r/jstasiak/python-zeroconf - - -This is fork of pyzeroconf, Multicast DNS Service Discovery for Python, -originally by Paul Scott-Murphy (https://github.com/paulsm/pyzeroconf), -modified by William McBrine (https://github.com/wmcbrine/pyzeroconf). - -The original William McBrine's fork note:: - - This fork is used in all of my TiVo-related projects: HME for Python - (and therefore HME/VLC), Network Remote, Remote Proxy, and pyTivo. - Before this, I was tracking the changes for zeroconf.py in three - separate repos. I figured I should have an authoritative source. - - Although I make changes based on my experience with TiVos, I expect that - they're generally applicable. This version also includes patches found - on the now-defunct (?) Launchpad repo of pyzeroconf, and elsewhere - around the net -- not always well-documented, sorry. - -Compatible with: - -* Bonjour -* Avahi - -Compared to some other Zeroconf/Bonjour/Avahi Python packages, python-zeroconf: - -* isn't tied to Bonjour or Avahi -* doesn't use D-Bus -* doesn't force you to use particular event loop or Twisted -* is pip-installable -* has PyPI distribution - -Python compatibility --------------------- - -* CPython 2.6, 2.7, 3.3+ -* PyPy 2.2+ (possibly 1.9-2.1 as well) -* PyPy3 2.4+ - -Versioning ----------- - -This project's versions follow the following pattern: MAJOR.MINOR.PATCH. - -* MAJOR version has been 0 so far -* MINOR version is incremented on backward incompatible changes -* PATCH version is incremented on backward compatible changes - -Status ------- - -There are some people using this package. I don't actively use it and as such -any help I can offer with regard to any issues is very limited. - - -How to get python-zeroconf? -=========================== - -* PyPI page https://pypi.python.org/pypi/zeroconf -* GitHub project https://github.com/jstasiak/python-zeroconf - -The easiest way to install python-zeroconf is using pip:: - - pip install zeroconf - - - -How do I use it? -================ - -Here's an example: - -.. code-block:: python - - from six.moves import input - from zeroconf import ServiceBrowser, Zeroconf - - - class MyListener(object): - - def remove_service(self, zeroconf, type, name): - print("Service %s removed" % (name,)) - - def add_service(self, zeroconf, type, name): - info = zeroconf.get_service_info(type, name) - print("Service %s added, service info: %s" % (name, info)) - - - zeroconf = Zeroconf() - listener = MyListener() - browser = ServiceBrowser(zeroconf, "_http._tcp.local.", listener) - try: - input("Press enter to exit...\n\n") - finally: - zeroconf.close() - -.. note:: - - Discovery and service registration use *all* available network interfaces by default. - If you want to customize that you need to specify ``interfaces`` argument when - constructing ``Zeroconf`` object (see the code for details). - -See examples directory for more. - -Changelog -========= - -0.17.4 ------- - -* Fixed support for Linux kernel versions < 3.9 (thanks to Giovanni Harting - and Luckydonald, GitHub pull request #26) - -0.17.3 ------- - -* Fixed DNSText repr on Python 3 (it'd crash when the text was longer than - 10 bytes), thanks to Paulus Schoutsen for the patch, GitHub pull request #24 - -0.17.2 ------- - -* Fixed installation on Python 3.4.3+ (was failing because of enum34 dependency - which fails to install on 3.4.3+, changed to depend on enum-compat instead; - thanks to Michael Brennan for the original patch, GitHub pull request #22) - -0.17.1 ------- - -* Fixed EADDRNOTAVAIL when attempting to use dummy network interfaces on Windows, - thanks to daid - -0.17.0 ------- - -* Added some Python dependencies so it's not zero-dependencies anymore -* Improved exception handling (it'll be quieter now) -* Messages are listened to and sent using all available network interfaces - by default (configurable); thanks to Marcus Müller -* Started using logging more freely -* Fixed a bug with binary strings as property values being converted to False - (https://github.com/jstasiak/python-zeroconf/pull/10); thanks to Dr. Seuss -* Added new ``ServiceBrowser`` event handler interface (see the examples) -* PyPy3 now officially supported -* Fixed ServiceInfo repr on Python 3, thanks to Yordan Miladinov - -0.16.0 ------- - -* Set up Python logging and started using it -* Cleaned up code style (includes migrating from camel case to snake case) - -0.15.1 ------- - -* Fixed handling closed socket (GitHub #4) - -0.15 ----- - -* Forked by Jakub Stasiak -* Made Python 3 compatible -* Added setup script, made installable by pip and uploaded to PyPI -* Set up Travis build -* Reformatted the code and moved files around -* Stopped catching BaseException in several places, that could hide errors -* Marked threads as daemonic, they won't keep application alive now - -0.14 ----- - -* Fix for SOL_IP undefined on some systems - thanks Mike Erdely. -* Cleaned up examples. -* Lowercased module name. - -0.13 ----- - -* Various minor changes; see git for details. -* No longer compatible with Python 2.2. Only tested with 2.5-2.7. -* Fork by William McBrine. - -0.12 ----- - -* allow selection of binding interface -* typo fix - Thanks A. M. Kuchlingi -* removed all use of word 'Rendezvous' - this is an API change - -0.11 ----- - -* correction to comments for addListener method -* support for new record types seen from OS X - - IPv6 address - - hostinfo - -* ignore unknown DNS record types -* fixes to name decoding -* works alongside other processes using port 5353 (e.g. on Mac OS X) -* tested against Mac OS X 10.3.2's mDNSResponder -* corrections to removal of list entries for service browser - -0.10 ----- - -* Jonathon Paisley contributed these corrections: - - always multicast replies, even when query is unicast - - correct a pointer encoding problem - - can now write records in any order - - traceback shown on failure - - better TXT record parsing - - server is now separate from name - - can cancel a service browser -* modified some unit tests to accommodate these changes - -0.09 ----- - -* remove all records on service unregistration -* fix DOS security problem with readName - -0.08 ----- - -* changed licensing to LGPL - -0.07 ----- - -* faster shutdown on engine -* pointer encoding of outgoing names -* ServiceBrowser now works -* new unit tests - -0.06 ----- -* small improvements with unit tests -* added defined exception types -* new style objects -* fixed hostname/interface problem -* fixed socket timeout problem -* fixed add_service_listener() typo bug -* using select() for socket reads -* tested on Debian unstable with Python 2.2.2 - -0.05 ----- - -* ensure case insensitivty on domain names -* support for unicast DNS queries - -0.04 ----- - -* added some unit tests -* added __ne__ adjuncts where required -* ensure names end in '.local.' -* timeout on receiving socket for clean shutdown - - -License -======= - -LGPL, see COPYING file for details. - - diff --git a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/RECORD b/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/RECORD deleted file mode 100644 index 96a0421..0000000 --- a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/RECORD +++ /dev/null @@ -1,10 +0,0 @@ -zeroconf.py,sha256=5LFspm7fZaaG6trVMkZY2t-S-ezev9tdoTTmBqabNeE,57139 -zeroconf-0.17.4.dist-info/DESCRIPTION.rst,sha256=8rlcTqe6M6ytbm5pTf1acLqbRdwaxQa7Fg_Sy6kVrSA,7187 -zeroconf-0.17.4.dist-info/METADATA,sha256=qcgqtuJxaOwEyUS38z9eEYxSj30OG2-8dQ94NWipo7k,8561 -zeroconf-0.17.4.dist-info/metadata.json,sha256=fGng8FC3NLfrxojVUFfEtquL8iURakIJZoKP3DQij8Y,1437 -zeroconf-0.17.4.dist-info/pbr.json,sha256=6YIrYDsheNOX2fcy6S_Jw7xMl65fgtWkooyLbEQLGjE,46 -zeroconf-0.17.4.dist-info/RECORD,, -zeroconf-0.17.4.dist-info/top_level.txt,sha256=G_yoNgGm6QMZZpH139yjBdEQpn-jCn0EN5Zvy0kJuII,9 -zeroconf-0.17.4.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 -zeroconf-0.17.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -__pycache__/zeroconf.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/WHEEL deleted file mode 100644 index 9dff69d..0000000 --- a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.24.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/metadata.json deleted file mode 100644 index e09e55e..0000000 --- a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"license": "LGPL", "name": "zeroconf", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "Pure Python Multicast DNS Service Discovery Library (Bonjour/Avahi compatible)", "platform": "unix", "run_requires": [{"requires": ["enum-compat", "netifaces", "six"]}], "version": "0.17.4", "extensions": {"python.details": {"project_urls": {"Home": "https://github.com/jstasiak/python-zeroconf"}, "document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "name": "Paul Scott-Murphy, William McBrine, Jakub Stasiak"}]}}, "keywords": ["Bonjour", "Avahi", "Zeroconf", "Multicast", "DNS", "Service", "Discovery", "mDNS"], "classifiers": ["Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)", "Operating System :: POSIX", "Operating System :: POSIX :: Linux", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Libraries", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy"], "extras": []} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/pbr.json b/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/pbr.json deleted file mode 100644 index f2cc4e2..0000000 --- a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/pbr.json +++ /dev/null @@ -1 +0,0 @@ -{"is_release": true, "git_version": "0b9093d"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/INSTALLER b/Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/INSTALLER similarity index 100% rename from Shared/lib/python3.4/site-packages/feedparser-5.2.1.dist-info/INSTALLER rename to Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/INSTALLER diff --git a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/METADATA b/Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/METADATA similarity index 70% rename from Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/METADATA rename to Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/METADATA index e099b14..f1f2e09 100644 --- a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/METADATA +++ b/Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/METADATA @@ -1,6 +1,6 @@ -Metadata-Version: 2.0 +Metadata-Version: 2.1 Name: zeroconf -Version: 0.17.4 +Version: 0.21.3 Summary: Pure Python Multicast DNS Service Discovery Library (Bonjour/Avahi compatible) Home-page: https://github.com/jstasiak/python-zeroconf Author: Paul Scott-Murphy, William McBrine, Jakub Stasiak @@ -18,18 +18,14 @@ Classifier: Operating System :: POSIX Classifier: Operating System :: POSIX :: Linux Classifier: Operating System :: MacOS :: MacOS X Classifier: Topic :: Software Development :: Libraries -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy -Requires-Dist: enum-compat -Requires-Dist: netifaces -Requires-Dist: six +Requires-Dist: ifaddr +Requires-Dist: typing; python_version < "3.5" python-zeroconf =============== @@ -76,9 +72,8 @@ Compared to some other Zeroconf/Bonjour/Avahi Python packages, python-zeroconf: Python compatibility -------------------- -* CPython 2.6, 2.7, 3.3+ -* PyPy 2.2+ (possibly 1.9-2.1 as well) -* PyPy3 2.4+ +* CPython 3.4+ +* PyPy3 5.8+ Versioning ---------- @@ -111,15 +106,14 @@ The easiest way to install python-zeroconf is using pip:: How do I use it? ================ -Here's an example: +Here's an example of browsing for a service: .. code-block:: python - from six.moves import input from zeroconf import ServiceBrowser, Zeroconf - class MyListener(object): + class MyListener: def remove_service(self, zeroconf, type, name): print("Service %s removed" % (name,)) @@ -143,11 +137,112 @@ Here's an example: If you want to customize that you need to specify ``interfaces`` argument when constructing ``Zeroconf`` object (see the code for details). +If you don't know the name of the service you need to browse for, try: + +.. code-block:: python + + from zeroconf import ZeroconfServiceTypes + print('\n'.join(ZeroconfServiceTypes.find())) + See examples directory for more. Changelog ========= +0.21.3 +------ + +* This time really allowed incoming service names to contain underscores (patch released + as part of 0.20.0 was defective) + +0.21.2 +------ + +* Fixed import-time typing-related TypeError when older typing version is used + +0.21.1 +------ + +* Fixed installation on Python 3.4 (we use typing now but there was no explicit dependency on it) + +0.21.0 +------ + +* Added an error message when importing the package using unsupported Python version +* Fixed TTL handling for published service +* Implemented unicast support +* Fixed WSL (Windows Subsystem for Linux) compatibility +* Fixed occassional UnboundLocalError issue +* Fixed UTF-8 multibyte name compression +* Switched from netifaces to ifaddr (pure Python) +* Allowed incoming service names to contain underscores + +0.20.0 +------ + +* Dropped support for Python 2 (this includes PyPy) and 3.3 +* Fixed some class' equality operators +* ServiceBrowser entries are being refreshed when 'stale' now +* Cache returns new records first now instead of last + +0.19.1 +------ + +* Allowed installation with netifaces >= 0.10.6 (a bug that was concerning us + got fixed) + +0.19.0 +------ + +* Technically backwards incompatible - restricted netifaces dependency version to + work around a bug, see https://github.com/jstasiak/python-zeroconf/issues/84 for + details + +0.18.0 +------ + +* Dropped Python 2.6 support +* Improved error handling inside code executed when Zeroconf object is being closed + +0.17.7 +------ + +* Better Handling of DNS Incoming Packets parsing exceptions +* Many exceptions will now log a warning the first time they are seen +* Catch and log sendto() errors +* Fix/Implement duplicate name change +* Fix overly strict name validation introduced in 0.17.6 +* Greatly improve handling of oversized packets including: + + - Implement name compression per RFC1035 + - Limit size of generated packets to 9000 bytes as per RFC6762 + - Better handle over sized incoming packets + +* Increased test coverage to 95% + +0.17.6 +------ + +* Many improvements to address race conditions and exceptions during ZC() + startup and shutdown, thanks to: morpav, veawor, justingiorgi, herczy, + stephenrauch +* Added more test coverage: strahlex, stephenrauch +* Stephen Rauch contributed: + + - Speed up browser startup + - Add ZeroconfServiceTypes() query class to discover all advertised service types + - Add full validation for service names, types and subtypes + - Fix for subtype browsing + - Fix DNSHInfo support + +0.17.5 +------ + +* Fixed OpenBSD compatibility, thanks to Alessio Sergi +* Fixed race condition on ServiceBrowser startup, thanks to gbiddison +* Fixed installation on some Python 3 systems, thanks to Per Sandström +* Fixed "size change during iteration" bug on Python 3, thanks to gbiddison + 0.17.4 ------ @@ -248,6 +343,7 @@ Changelog ---- * Jonathon Paisley contributed these corrections: + - always multicast replies, even when query is unicast - correct a pointer encoding problem - can now write records in any order @@ -255,6 +351,7 @@ Changelog - better TXT record parsing - server is now separate from name - can cancel a service browser + * modified some unit tests to accommodate these changes 0.09 diff --git a/Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/RECORD b/Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/RECORD new file mode 100644 index 0000000..c4ac1aa --- /dev/null +++ b/Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/RECORD @@ -0,0 +1,7 @@ +__pycache__/zeroconf.cpython-37.pyc,, +zeroconf-0.21.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +zeroconf-0.21.3.dist-info/METADATA,sha256=lxy4qAG2nLI2utQyZH5A5szoINtKhm5tvBqHQ1PHXXI,11278 +zeroconf-0.21.3.dist-info/RECORD,, +zeroconf-0.21.3.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 +zeroconf-0.21.3.dist-info/top_level.txt,sha256=G_yoNgGm6QMZZpH139yjBdEQpn-jCn0EN5Zvy0kJuII,9 +zeroconf.py,sha256=5t7HwXoNu0JRsfKKFwVDUWrU14sw9nJ7Qz7aghuOfqQ,73545 diff --git a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/WHEEL similarity index 70% rename from Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/WHEEL rename to Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/WHEEL index 0de529b..1316c41 100644 --- a/Shared/lib/python3.4/site-packages/six-1.10.0.dist-info/WHEEL +++ b/Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.26.0) +Generator: bdist_wheel (0.31.1) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any diff --git a/Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/zeroconf-0.17.4.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/zeroconf-0.21.3.dist-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/zeroconf.py b/Shared/lib/python3.4/site-packages/zeroconf.py index 6d7078c..e027e2c 100644 --- a/Shared/lib/python3.4/site-packages/zeroconf.py +++ b/Shared/lib/python3.4/site-packages/zeroconf.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import, division, print_function, unicode_literals - """ Multicast DNS Service Discovery for Python, v0.14-wmcbrine Copyright 2003 Paul Scott-Murphy, 2014 William McBrine @@ -25,49 +23,43 @@ from __future__ import absolute_import, division, print_function, unicode_litera import enum import errno import logging +import re import select import socket import struct +import sys import threading import time from functools import reduce +from typing import Callable # noqa # used in type hints +from typing import Dict, List, Optional, Union -import netifaces -from six import binary_type, indexbytes, int2byte, iteritems, text_type -from six.moves import xrange +import ifaddr __author__ = 'Paul Scott-Murphy, William McBrine' __maintainer__ = 'Jakub Stasiak ' -__version__ = '0.17.4' +__version__ = '0.21.3' __license__ = 'LGPL' -try: - NullHandler = logging.NullHandler -except AttributeError: - # Python 2.6 fallback - class NullHandler(logging.Handler): - - def emit(self, record): - pass - __all__ = [ "__version__", "Zeroconf", "ServiceInfo", "ServiceBrowser", "Error", "InterfaceChoice", "ServiceStateChange", ] +if sys.version_info <= (3, 3): + raise ImportError(''' +Python version > 3.3 required for python-zeroconf. +If you need support for Python 2 or Python 3.3 please use version 19.1 + ''') log = logging.getLogger(__name__) -log.addHandler(NullHandler()) +log.addHandler(logging.NullHandler()) if log.level == logging.NOTSET: log.setLevel(logging.WARN) -# hook for threads - -_GLOBAL_DONE = False - # Some timing constants _UNREGISTER_TIME = 125 @@ -81,16 +73,16 @@ _BROWSER_TIME = 500 _MDNS_ADDR = '224.0.0.251' _MDNS_PORT = 5353 _DNS_PORT = 53 -_DNS_TTL = 60 * 60 # one hour default TTL +_DNS_TTL = 120 # two minutes default TTL as recommended by RFC6762 _MAX_MSG_TYPICAL = 1460 # unused -_MAX_MSG_ABSOLUTE = 8972 +_MAX_MSG_ABSOLUTE = 8966 _FLAGS_QR_MASK = 0x8000 # query response mask _FLAGS_QR_QUERY = 0x0000 # query _FLAGS_QR_RESPONSE = 0x8000 # response -_FLAGS_AA = 0x0400 # Authorative answer +_FLAGS_AA = 0x0400 # Authoritative answer _FLAGS_TC = 0x0200 # Truncated _FLAGS_RD = 0x0100 # Recursion desired _FLAGS_RA = 0x8000 # Recursion available @@ -157,13 +149,144 @@ _TYPES = {_TYPE_A: "a", _TYPE_SRV: "srv", _TYPE_ANY: "any"} +_HAS_A_TO_Z = re.compile(r'[A-Za-z]') +_HAS_ONLY_A_TO_Z_NUM_HYPHEN = re.compile(r'^[A-Za-z0-9\-]+$') +_HAS_ONLY_A_TO_Z_NUM_HYPHEN_UNDERSCORE = re.compile(r'^[A-Za-z0-9\-\_]+$') +_HAS_ASCII_CONTROL_CHARS = re.compile(r'[\x00-\x1f\x7f]') + +int2byte = struct.Struct(">B").pack + + +@enum.unique +class InterfaceChoice(enum.Enum): + Default = 1 + All = 2 + + +@enum.unique +class ServiceStateChange(enum.Enum): + Added = 1 + Removed = 2 + + # utility functions -def current_time_millis(): +def current_time_millis() -> float: """Current system time in milliseconds""" return time.time() * 1000 + +def service_type_name(type_, *, allow_underscores: bool = False): + """ + Validate a fully qualified service name, instance or subtype. [rfc6763] + + Returns fully qualified service name. + + Domain names used by mDNS-SD take the following forms: + + . <_tcp|_udp> . local. + . . <_tcp|_udp> . local. + ._sub . . <_tcp|_udp> . local. + + 1) must end with 'local.' + + This is true because we are implementing mDNS and since the 'm' means + multi-cast, the 'local.' domain is mandatory. + + 2) local is preceded with either '_udp.' or '_tcp.' + + 3) service name precedes <_tcp|_udp> + + The rules for Service Names [RFC6335] state that they may be no more + than fifteen characters long (not counting the mandatory underscore), + consisting of only letters, digits, and hyphens, must begin and end + with a letter or digit, must not contain consecutive hyphens, and + must contain at least one letter. + + The instance name and sub type may be up to 63 bytes. + + The portion of the Service Instance Name is a user- + friendly name consisting of arbitrary Net-Unicode text [RFC5198]. It + MUST NOT contain ASCII control characters (byte values 0x00-0x1F and + 0x7F) [RFC20] but otherwise is allowed to contain any characters, + without restriction, including spaces, uppercase, lowercase, + punctuation -- including dots -- accented characters, non-Roman text, + and anything else that may be represented using Net-Unicode. + + :param type_: Type, SubType or service name to validate + :return: fully qualified service name (eg: _http._tcp.local.) + """ + if not (type_.endswith('._tcp.local.') or type_.endswith('._udp.local.')): + raise BadTypeInNameException( + "Type '%s' must end with '._tcp.local.' or '._udp.local.'" % + type_) + + remaining = type_[:-len('._tcp.local.')].split('.') + name = remaining.pop() + if not name: + raise BadTypeInNameException("No Service name found") + + if len(remaining) == 1 and len(remaining[0]) == 0: + raise BadTypeInNameException( + "Type '%s' must not start with '.'" % type_) + + if name[0] != '_': + raise BadTypeInNameException( + "Service name (%s) must start with '_'" % name) + + # remove leading underscore + name = name[1:] + + if len(name) > 15: + raise BadTypeInNameException( + "Service name (%s) must be <= 15 bytes" % name) + + if '--' in name: + raise BadTypeInNameException( + "Service name (%s) must not contain '--'" % name) + + if '-' in (name[0], name[-1]): + raise BadTypeInNameException( + "Service name (%s) may not start or end with '-'" % name) + + if not _HAS_A_TO_Z.search(name): + raise BadTypeInNameException( + "Service name (%s) must contain at least one letter (eg: 'A-Z')" % + name) + + allowed_characters_re = ( + _HAS_ONLY_A_TO_Z_NUM_HYPHEN_UNDERSCORE if allow_underscores + else _HAS_ONLY_A_TO_Z_NUM_HYPHEN + ) + + if not allowed_characters_re.search(name): + raise BadTypeInNameException( + "Service name (%s) must contain only these characters: " + "A-Z, a-z, 0-9, hyphen ('-')%s" % (name, ", underscore ('_')" if allow_underscores else "")) + + if remaining and remaining[-1] == '_sub': + remaining.pop() + if len(remaining) == 0 or len(remaining[0]) == 0: + raise BadTypeInNameException( + "_sub requires a subtype name") + + if len(remaining) > 1: + remaining = ['.'.join(remaining)] + + if remaining: + length = len(remaining[0].encode('utf-8')) + if length > 63: + raise BadTypeInNameException("Too long: '%s'" % remaining[0]) + + if _HAS_ASCII_CONTROL_CHARS.search(remaining[0]): + raise BadTypeInNameException( + "Ascii control character 0x00-0x1F and 0x7F illegal in '%s'" % + remaining[0]) + + return '_' + name + type_[-len('._tcp.local.'):] + + # Exceptions @@ -171,36 +294,66 @@ class Error(Exception): pass -class NonLocalNameException(Exception): +class IncomingDecodeError(Error): pass -class NonUniqueNameException(Exception): +class NonUniqueNameException(Error): pass -class NamePartTooLongException(Exception): +class NamePartTooLongException(Error): pass -class AbstractMethodException(Exception): +class AbstractMethodException(Error): pass -class BadTypeInNameException(Exception): +class BadTypeInNameException(Error): pass + # implementation classes -class DNSEntry(object): +class QuietLogger: + _seen_logs = {} # type: Dict[str, tuple] + + @classmethod + def log_exception_warning(cls, logger_data=None): + exc_info = sys.exc_info() + exc_str = str(exc_info[1]) + if exc_str not in cls._seen_logs: + # log at warning level the first time this is seen + cls._seen_logs[exc_str] = exc_info + logger = log.warning + else: + logger = log.debug + if logger_data is not None: + logger(*logger_data) + logger('Exception occurred:', exc_info=exc_info) + + @classmethod + def log_warning_once(cls, *args): + msg_str = args[0] + if msg_str not in cls._seen_logs: + cls._seen_logs[msg_str] = 0 + logger = log.warning + else: + logger = log.debug + cls._seen_logs[msg_str] += 1 + logger(*args) + + +class DNSEntry: """A DNS entry""" - def __init__(self, name, type, class_): + def __init__(self, name, type_, class_): self.key = name.lower() self.name = name - self.type = type + self.type = type_ self.class_ = class_ & _CLASS_MASK self.unique = (class_ & _CLASS_UNIQUE) != 0 @@ -215,11 +368,13 @@ class DNSEntry(object): """Non-equality test""" return not self.__eq__(other) - def get_class_(self, class_): + @staticmethod + def get_class_(class_): """Class accessor""" return _CLASSES.get(class_, "?(%s)" % class_) - def get_type(self, t): + @staticmethod + def get_type(t): """Type accessor""" return _TYPES.get(t, "?(%s)" % t) @@ -233,7 +388,7 @@ class DNSEntry(object): result += "," result += self.name if other is not None: - result += ",%s]" % (other) + result += ",%s]" % other else: result += "]" return result @@ -243,18 +398,16 @@ class DNSQuestion(DNSEntry): """A DNS question entry""" - def __init__(self, name, type, class_): - # if not name.endswith(".local."): - # raise NonLocalNameException - DNSEntry.__init__(self, name, type, class_) + def __init__(self, name: str, type_: int, class_: int) -> None: + DNSEntry.__init__(self, name, type_, class_) - def answered_by(self, rec): + def answered_by(self, rec: 'DNSRecord') -> bool: """Returns true if the question is answered by the record""" return (self.class_ == rec.class_ and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name) - def __repr__(self): + def __repr__(self) -> str: """String representation""" return DNSEntry.to_string(self, "question", None) @@ -263,14 +416,18 @@ class DNSRecord(DNSEntry): """A DNS record - like a DNS entry, but has a TTL""" - def __init__(self, name, type, class_, ttl): - DNSEntry.__init__(self, name, type, class_) + def __init__(self, name, type_, class_, ttl): + DNSEntry.__init__(self, name, type_, class_) self.ttl = ttl self.created = current_time_millis() def __eq__(self, other): - """Tests equality as per DNSRecord""" - return isinstance(other, DNSRecord) and DNSEntry.__eq__(self, other) + """Abstract method""" + raise AbstractMethodException + + def __ne__(self, other): + """Non-equality test""" + return not self.__eq__(other) def suppressed_by(self, msg): """Returns true if any answer in a message can suffice for the @@ -292,9 +449,9 @@ class DNSRecord(DNSEntry): def get_remaining_ttl(self, now): """Returns the remaining TTL in seconds.""" - return max(0, (self.get_expiration_time(100) - now) / 1000) + return max(0, (self.get_expiration_time(100) - now) / 1000.0) - def is_expired(self, now): + def is_expired(self, now) -> bool: """Returns true if this record has expired.""" return self.get_expiration_time(100) <= now @@ -313,9 +470,9 @@ class DNSRecord(DNSEntry): raise AbstractMethodException def to_string(self, other): - """String representation with addtional information""" - arg = "%s/%s,%s" % (self.ttl, - self.get_remaining_ttl(current_time_millis()), other) + """String representation with additional information""" + arg = "%s/%s,%s" % ( + self.ttl, self.get_remaining_ttl(current_time_millis()), other) return DNSEntry.to_string(self, "record", arg) @@ -323,8 +480,8 @@ class DNSAddress(DNSRecord): """A DNS address record""" - def __init__(self, name, type, class_, ttl, address): - DNSRecord.__init__(self, name, type, class_, ttl) + def __init__(self, name, type_, class_, ttl, address): + DNSRecord.__init__(self, name, type_, class_, ttl) self.address = address def write(self, out): @@ -333,36 +490,50 @@ class DNSAddress(DNSRecord): def __eq__(self, other): """Tests equality on address""" - return isinstance(other, DNSAddress) and self.address == other.address + return (isinstance(other, DNSAddress) and DNSEntry.__eq__(self, other) and + self.address == other.address) + + def __ne__(self, other): + """Non-equality test""" + return not self.__eq__(other) def __repr__(self): """String representation""" try: - return socket.inet_ntoa(self.address) - except Exception as e: # TODO stop catching all Exceptions - log.exception('Unknown error, possibly benign: %r', e) - return self.address + return str(socket.inet_ntoa(self.address)) + except Exception: # TODO stop catching all Exceptions + return str(self.address) class DNSHinfo(DNSRecord): """A DNS host information record""" - def __init__(self, name, type, class_, ttl, cpu, os): - DNSRecord.__init__(self, name, type, class_, ttl) - self.cpu = cpu - self.os = os + def __init__(self, name, type_, class_, ttl, cpu, os): + DNSRecord.__init__(self, name, type_, class_, ttl) + try: + self.cpu = cpu.decode('utf-8') + except AttributeError: + self.cpu = cpu + try: + self.os = os.decode('utf-8') + except AttributeError: + self.os = os def write(self, out): """Used in constructing an outgoing packet""" - out.write_string(self.cpu) - out.write_string(self.oso) + out.write_character_string(self.cpu.encode('utf-8')) + out.write_character_string(self.os.encode('utf-8')) def __eq__(self, other): """Tests equality on cpu and os""" - return (isinstance(other, DNSHinfo) and + return (isinstance(other, DNSHinfo) and DNSEntry.__eq__(self, other) and self.cpu == other.cpu and self.os == other.os) + def __ne__(self, other): + """Non-equality test""" + return not self.__eq__(other) + def __repr__(self): """String representation""" return self.cpu + " " + self.os @@ -372,8 +543,8 @@ class DNSPointer(DNSRecord): """A DNS pointer record""" - def __init__(self, name, type, class_, ttl, alias): - DNSRecord.__init__(self, name, type, class_, ttl) + def __init__(self, name, type_, class_, ttl, alias): + DNSRecord.__init__(self, name, type_, class_, ttl) self.alias = alias def write(self, out): @@ -382,7 +553,12 @@ class DNSPointer(DNSRecord): def __eq__(self, other): """Tests equality on alias""" - return isinstance(other, DNSPointer) and self.alias == other.alias + return (isinstance(other, DNSPointer) and DNSEntry.__eq__(self, other) and + self.alias == other.alias) + + def __ne__(self, other): + """Non-equality test""" + return not self.__eq__(other) def __repr__(self): """String representation""" @@ -404,7 +580,12 @@ class DNSText(DNSRecord): def __eq__(self, other): """Tests equality on text""" - return isinstance(other, DNSText) and self.text == other.text + return (isinstance(other, DNSText) and DNSEntry.__eq__(self, other) and + self.text == other.text) + + def __ne__(self, other): + """Non-equality test""" + return not self.__eq__(other) def __repr__(self): """String representation""" @@ -418,8 +599,9 @@ class DNSService(DNSRecord): """A DNS service record""" - def __init__(self, name, type, class_, ttl, priority, weight, port, server): - DNSRecord.__init__(self, name, type, class_, ttl) + def __init__(self, name, type_, class_, ttl, + priority, weight, port, server): + DNSRecord.__init__(self, name, type_, class_, ttl) self.priority = priority self.weight = weight self.port = port @@ -435,17 +617,22 @@ class DNSService(DNSRecord): def __eq__(self, other): """Tests equality on priority, weight, port and server""" return (isinstance(other, DNSService) and + DNSEntry.__eq__(self, other) and self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server) + def __ne__(self, other): + """Non-equality test""" + return not self.__eq__(other) + def __repr__(self): """String representation""" return self.to_string("%s:%s" % (self.server, self.port)) -class DNSIncoming(object): +class DNSIncoming(QuietLogger): """Object representation of an incoming DNS packet""" @@ -455,42 +642,52 @@ class DNSIncoming(object): self.data = data self.questions = [] self.answers = [] + self.id = 0 + self.flags = 0 self.num_questions = 0 self.num_answers = 0 self.num_authorities = 0 self.num_additionals = 0 + self.valid = False - self.read_header() - self.read_questions() - self.read_others() + try: + self.read_header() + self.read_questions() + self.read_others() + self.valid = True - def unpack(self, format): - length = struct.calcsize(format) - info = struct.unpack(format, self.data[self.offset:self.offset + length]) + except (IndexError, struct.error, IncomingDecodeError): + self.log_exception_warning(( + 'Choked at offset %d while unpacking %r', self.offset, data)) + + def unpack(self, format_): + length = struct.calcsize(format_) + info = struct.unpack( + format_, self.data[self.offset:self.offset + length]) self.offset += length return info def read_header(self): """Reads header portion of packet""" (self.id, self.flags, self.num_questions, self.num_answers, - self.num_quthorities, self.num_additionals) = self.unpack(b'!6H') + self.num_authorities, self.num_additionals) = self.unpack(b'!6H') def read_questions(self): """Reads questions section of packet""" - for i in xrange(self.num_questions): + for i in range(self.num_questions): name = self.read_name() - type, class_ = self.unpack(b'!HH') + type_, class_ = self.unpack(b'!HH') - question = DNSQuestion(name, type, class_) + question = DNSQuestion(name, type_, class_) self.questions.append(question) - def read_int(self): - """Reads an integer from the packet""" - return self.unpack(b'!I')[0] + # def read_int(self): + # """Reads an integer from the packet""" + # return self.unpack(b'!I')[0] def read_character_string(self): """Reads a character string from the packet""" - length = indexbytes(self.data, self.offset) + length = self.data[self.offset] self.offset += 1 return self.read_string(length) @@ -508,26 +705,32 @@ class DNSIncoming(object): """Reads the answers, authorities and additionals section of the packet""" n = self.num_answers + self.num_authorities + self.num_additionals - for i in xrange(n): + for i in range(n): domain = self.read_name() - type, class_, ttl, length = self.unpack(b'!HHiH') + type_, class_, ttl, length = self.unpack(b'!HHiH') rec = None - if type == _TYPE_A: - rec = DNSAddress(domain, type, class_, ttl, self.read_string(4)) - elif type == _TYPE_CNAME or type == _TYPE_PTR: - rec = DNSPointer(domain, type, class_, ttl, self.read_name()) - elif type == _TYPE_TXT: - rec = DNSText(domain, type, class_, ttl, self.read_string(length)) - elif type == _TYPE_SRV: - rec = DNSService(domain, type, class_, ttl, - self.read_unsigned_short(), self.read_unsigned_short(), - self.read_unsigned_short(), self.read_name()) - elif type == _TYPE_HINFO: - rec = DNSHinfo(domain, type, class_, ttl, - self.read_character_string(), self.read_character_string()) - elif type == _TYPE_AAAA: - rec = DNSAddress(domain, type, class_, ttl, self.read_string(16)) + if type_ == _TYPE_A: + rec = DNSAddress( + domain, type_, class_, ttl, self.read_string(4)) + elif type_ == _TYPE_CNAME or type_ == _TYPE_PTR: + rec = DNSPointer( + domain, type_, class_, ttl, self.read_name()) + elif type_ == _TYPE_TXT: + rec = DNSText( + domain, type_, class_, ttl, self.read_string(length)) + elif type_ == _TYPE_SRV: + rec = DNSService( + domain, type_, class_, ttl, + self.read_unsigned_short(), self.read_unsigned_short(), + self.read_unsigned_short(), self.read_name()) + elif type_ == _TYPE_HINFO: + rec = DNSHinfo( + domain, type_, class_, ttl, + self.read_character_string(), self.read_character_string()) + elif type_ == _TYPE_AAAA: + rec = DNSAddress( + domain, type_, class_, ttl, self.read_string(16)) else: # Try to ignore types we don't know about # Skip the payload for the resource record so the next @@ -537,7 +740,7 @@ class DNSIncoming(object): if rec is not None: self.answers.append(rec) - def is_query(self): + def is_query(self) -> bool: """Returns true if this is a query""" return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY @@ -547,17 +750,17 @@ class DNSIncoming(object): def read_utf(self, offset, length): """Reads a UTF-8 string of a given length from the packet""" - return text_type(self.data[offset:offset + length], 'utf-8', 'replace') + return str(self.data[offset:offset + length], 'utf-8', 'replace') def read_name(self): """Reads a domain name from the packet""" result = '' off = self.offset - next = -1 + next_ = -1 first = off while True: - length = indexbytes(self.data, off) + length = self.data[off] off += 1 if length == 0: break @@ -566,26 +769,25 @@ class DNSIncoming(object): result = ''.join((result, self.read_utf(off, length) + '.')) off += length elif t == 0xC0: - if next < 0: - next = off + 1 - off = ((length & 0x3F) << 8) | indexbytes(self.data, off) + if next_ < 0: + next_ = off + 1 + off = ((length & 0x3F) << 8) | self.data[off] if off >= first: - # TODO raise more specific exception - raise Exception("Bad domain name (circular) at %s" % (off,)) + raise IncomingDecodeError( + "Bad domain name (circular) at %s" % (off,)) first = off else: - # TODO raise more specific exception - raise Exception("Bad domain name at %s" % (off,)) + raise IncomingDecodeError("Bad domain name at %s" % (off,)) - if next >= 0: - self.offset = next + if next_ >= 0: + self.offset = next_ else: self.offset = off return result -class DNSOutgoing(object): +class DNSOutgoing: """Object representation of an outgoing packet""" @@ -597,12 +799,27 @@ class DNSOutgoing(object): self.names = {} self.data = [] self.size = 12 + self.state = self.State.init self.questions = [] self.answers = [] self.authorities = [] self.additionals = [] + def __repr__(self): + return '' % ', '.join([ + 'multicast=%s' % self.multicast, + 'flags=%s' % self.flags, + 'questions=%s' % self.questions, + 'answers=%s' % self.answers, + 'authorities=%s' % self.authorities, + 'additionals=%s' % self.additionals, + ]) + + class State(enum.Enum): + init = 0 + finished = 1 + def add_question(self, record): """Adds a question""" self.questions.append(record) @@ -613,7 +830,7 @@ class DNSOutgoing(object): self.add_answer_at_time(record, 0) def add_answer_at_time(self, record, now): - """Adds an answer if if does not expire by a certain time""" + """Adds an answer if it does not expire by a certain time""" if record is not None: if now == 0 or not record.is_expired(now): self.answers.append((record, now)) @@ -623,12 +840,46 @@ class DNSOutgoing(object): self.authorities.append(record) def add_additional_answer(self, record): - """Adds an additional answer""" + """ Adds an additional answer + + From: RFC 6763, DNS-Based Service Discovery, February 2013 + + 12. DNS Additional Record Generation + + DNS has an efficiency feature whereby a DNS server may place + additional records in the additional section of the DNS message. + These additional records are records that the client did not + explicitly request, but the server has reasonable grounds to expect + that the client might request them shortly, so including them can + save the client from having to issue additional queries. + + This section recommends which additional records SHOULD be generated + to improve network efficiency, for both Unicast and Multicast DNS-SD + responses. + + 12.1. PTR Records + + When including a DNS-SD Service Instance Enumeration or Selective + Instance Enumeration (subtype) PTR record in a response packet, the + server/responder SHOULD include the following additional records: + + o The SRV record(s) named in the PTR rdata. + o The TXT record(s) named in the PTR rdata. + o All address records (type "A" and "AAAA") named in the SRV rdata. + + 12.2. SRV Records + + When including an SRV record in a response packet, the + server/responder SHOULD include the following additional records: + + o All address records (type "A" and "AAAA") named in the SRV rdata. + + """ self.additionals.append(record) - def pack(self, format, value): - self.data.append(struct.pack(format, value)) - self.size += struct.calcsize(format) + def pack(self, format_, value): + self.data.append(struct.pack(format_, value)) + self.size += struct.calcsize(format_) def write_byte(self, value): """Writes a single byte to the packet""" @@ -662,29 +913,59 @@ class DNSOutgoing(object): self.write_byte(length) self.write_string(utfstr) + def write_character_string(self, value): + assert isinstance(value, bytes) + length = len(value) + if length > 256: + raise NamePartTooLongException + self.write_byte(length) + self.write_string(value) + def write_name(self, name): - """Writes a domain name to the packet""" + """ + Write names to packet - if name in self.names: - # Find existing instance of this name in packet - # - index = self.names[name] + 18.14. Name Compression - # An index was found, so write a pointer to it - # + When generating Multicast DNS messages, implementations SHOULD use + name compression wherever possible to compress the names of resource + records, by replacing some or all of the resource record name with a + compact two-byte reference to an appearance of that data somewhere + earlier in the message [RFC1035]. + """ + + # split name into each label + parts = name.split('.') + if not parts[-1]: + parts.pop() + + # construct each suffix + name_suffices = ['.'.join(parts[i:]) for i in range(len(parts))] + + # look for an existing name or suffix + for count, sub_name in enumerate(name_suffices): + if sub_name in self.names: + break + else: + count = len(name_suffices) + + # note the new names we are saving into the packet + name_length = len(name.encode('utf-8')) + for suffix in name_suffices[:count]: + self.names[suffix] = self.size + name_length - len(suffix.encode('utf-8')) - 1 + + # write the new names out. + for part in parts[:count]: + self.write_utf(part) + + # if we wrote part of the name, create a pointer to the rest + if count != len(name_suffices): + # Found substring in packet, create pointer + index = self.names[name_suffices[count]] self.write_byte((index >> 8) | 0xC0) self.write_byte(index & 0xFF) else: - # No record of this name already, so write it - # out as normal, recording the location of the name - # for future pointers to it. - # - self.names[name] = self.size - parts = name.split('.') - if parts[-1] == '': - parts = parts[:-1] - for part in parts: - self.write_utf(part) + # this is the end of a name self.write_byte(0) def write_question(self, question): @@ -696,6 +977,10 @@ class DNSOutgoing(object): def write_record(self, record, now): """Writes a record (answer, authoritative answer, additional) to the packet""" + if self.state == self.State.finished: + return 1 + + start_data_length, start_size = len(self.data), self.size self.write_name(record.name) self.write_short(record.type) if record.unique and self.multicast: @@ -707,34 +992,47 @@ class DNSOutgoing(object): else: self.write_int(record.get_remaining_ttl(now)) index = len(self.data) + # Adjust size for the short we will write before this record - # self.size += 2 record.write(self) self.size -= 2 - length = len(b''.join(self.data[index:])) - self.insert_short(index, length) # Here is the short we adjusted for + length = sum((len(d) for d in self.data[index:])) + # Here is the short we adjusted for + self.insert_short(index, length) - def packet(self): + # if we go over, then rollback and quit + if self.size > _MAX_MSG_ABSOLUTE: + while len(self.data) > start_data_length: + self.data.pop() + self.size = start_size + self.state = self.State.finished + return 1 + return 0 + + def packet(self) -> bytes: """Returns a string containing the packet's bytes No further parts should be added to the packet once this is done.""" - if not self.finished: - self.finished = True + + overrun_answers, overrun_authorities, overrun_additionals = 0, 0, 0 + + if self.state != self.State.finished: for question in self.questions: self.write_question(question) for answer, time_ in self.answers: - self.write_record(answer, time_) + overrun_answers += self.write_record(answer, time_) for authority in self.authorities: - self.write_record(authority, 0) + overrun_authorities += self.write_record(authority, 0) for additional in self.additionals: - self.write_record(additional, 0) + overrun_additionals += self.write_record(additional, 0) + self.state = self.State.finished - self.insert_short(0, len(self.additionals)) - self.insert_short(0, len(self.authorities)) - self.insert_short(0, len(self.answers)) + self.insert_short(0, len(self.additionals) - overrun_additionals) + self.insert_short(0, len(self.authorities) - overrun_authorities) + self.insert_short(0, len(self.answers) - overrun_answers) self.insert_short(0, len(self.questions)) self.insert_short(0, self.flags) if self.multicast: @@ -744,7 +1042,7 @@ class DNSOutgoing(object): return b''.join(self.data) -class DNSCache(object): +class DNSCache: """A cache of DNS entries""" @@ -753,7 +1051,8 @@ class DNSCache(object): def add(self, entry): """Adds an entry""" - self.cache.setdefault(entry.key, []).append(entry) + # Insert first in list so get returns newest entry + self.cache.setdefault(entry.key, []).insert(0, entry) def remove(self, entry): """Removes an entry""" @@ -768,29 +1067,41 @@ class DNSCache(object): matching entry.""" try: list_ = self.cache[entry.key] - return list_[list_.index(entry)] + for cached_entry in list_: + if entry.__eq__(cached_entry): + return cached_entry except (KeyError, ValueError): return None - def get_by_details(self, name, type, class_): + def get_by_details(self, name, type_, class_): """Gets an entry by details. Will return None if there is no matching entry.""" - entry = DNSEntry(name, type, class_) + entry = DNSEntry(name, type_, class_) return self.get(entry) def entries_with_name(self, name): """Returns a list of entries whose key matches the name.""" try: - return self.cache[name] + return self.cache[name.lower()] except KeyError: return [] + def current_entry_with_name_and_alias(self, name, alias): + now = current_time_millis() + for record in self.entries_with_name(name): + if (record.type == _TYPE_PTR and + not record.is_expired(now) and + record.alias == alias): + return record + def entries(self): """Returns a list of all entries""" if not self.cache: return [] else: - return reduce(lambda a, b: a + b, self.cache.values()) + # avoid size change during iteration by copying the cache + values = list(self.cache.values()) + return reduce(lambda a, b: a + b, values) class Engine(threading.Thread): @@ -807,7 +1118,7 @@ class Engine(threading.Thread): """ def __init__(self, zc): - threading.Thread.__init__(self) + threading.Thread.__init__(self, name='zeroconf-Engine') self.daemon = True self.zc = zc self.readers = {} # maps socket to reader @@ -816,85 +1127,78 @@ class Engine(threading.Thread): self.start() def run(self): - while not _GLOBAL_DONE: - rs = self.get_readers() - if len(rs) == 0: - # No sockets to manage, but we wait for the timeout - # or addition of a socket - # - with self.condition: + while not self.zc.done: + with self.condition: + rs = self.readers.keys() + if len(rs) == 0: + # No sockets to manage, but we wait for the timeout + # or addition of a socket self.condition.wait(self.timeout) - else: + + if len(rs) != 0: try: rr, wr, er = select.select(rs, [], [], self.timeout) - for socket_ in rr: - try: - self.readers[socket_].handle_read(socket_) - except Exception as e: # TODO stop catching all Exceptions - log.exception('Unknown error, possibly benign: %r', e) - except Exception as e: # TODO stop catching all Exceptions - log.exception('Unknown error, possibly benign: %r', e) + if not self.zc.done: + for socket_ in rr: + reader = self.readers.get(socket_) + if reader: + reader.handle_read(socket_) - def get_readers(self): - result = [] - with self.condition: - result = self.readers.keys() - return result + except (select.error, socket.error) as e: + # If the socket was closed by another thread, during + # shutdown, ignore it and exit + if e.args[0] != socket.EBADF or not self.zc.done: + raise - def add_reader(self, reader, socket): + def add_reader(self, reader, socket_): with self.condition: - self.readers[socket] = reader + self.readers[socket_] = reader self.condition.notify() - def del_reader(self, socket): - with self.condition: - del self.readers[socket] - self.condition.notify() - - def notify(self): + def del_reader(self, socket_): with self.condition: + del self.readers[socket_] self.condition.notify() -class Listener(object): +class Listener(QuietLogger): """A Listener is used by this module to listen on the multicast group to which DNS messages are sent, allowing the implementation to cache information as it arrives. It requires registration with an Engine object in order to have - the read() method called when a socket is availble for reading.""" + the read() method called when a socket is available for reading.""" def __init__(self, zc): self.zc = zc + self.data = None def handle_read(self, socket_): try: data, (addr, port) = socket_.recvfrom(_MAX_MSG_ABSOLUTE) - except socket.error as e: - # If the socket was closed by another thread -- which happens - # regularly on shutdown -- an EBADF exception is thrown here. - # Ignore it. - if e.errno == socket.EBADF: - return - else: - raise e - else: - log.debug('Received %r from %r:%r', data, addr, port) + except Exception: + self.log_exception_warning() + return + + log.debug('Received from %r:%r: %r ', addr, port, data) self.data = data msg = DNSIncoming(data) - if msg.is_query(): + if not msg.valid: + pass + + elif msg.is_query(): # Always multicast responses - # if port == _MDNS_PORT: self.zc.handle_query(msg, _MDNS_ADDR, _MDNS_PORT) + # If it's not a multicast query, reply via unicast # and multicast - # elif port == _DNS_PORT: self.zc.handle_query(msg, addr, port) self.zc.handle_query(msg, _MDNS_ADDR, _MDNS_PORT) + else: self.zc.handle_response(msg) @@ -905,7 +1209,7 @@ class Reaper(threading.Thread): have expired.""" def __init__(self, zc): - threading.Thread.__init__(self) + threading.Thread.__init__(self, name='zeroconf-Reaper') self.daemon = True self.zc = zc self.start() @@ -913,7 +1217,7 @@ class Reaper(threading.Thread): def run(self): while True: self.zc.wait(10 * 1000) - if _GLOBAL_DONE: + if self.zc.done: return now = current_time_millis() for record in self.zc.cache.entries(): @@ -922,7 +1226,7 @@ class Reaper(threading.Thread): self.zc.cache.remove(record) -class Signal(object): +class Signal: def __init__(self): self._handlers = [] @@ -935,7 +1239,7 @@ class Signal(object): return SignalRegistrationInterface(self._handlers) -class SignalRegistrationInterface(object): +class SignalRegistrationInterface: def __init__(self, handlers): self._handlers = handlers @@ -949,7 +1253,12 @@ class SignalRegistrationInterface(object): return self -class ServiceBrowser(threading.Thread): +class RecordUpdateListener: + def update_record(self, zc: 'Zeroconf', now: float, record: DNSRecord) -> None: + raise NotImplementedError() + + +class ServiceBrowser(RecordUpdateListener, threading.Thread): """Used to browse for a service of a specific type. @@ -957,25 +1266,28 @@ class ServiceBrowser(threading.Thread): remove_service() methods called when this browser discovers changes in the services availability.""" - def __init__(self, zc, type_, handlers=None, listener=None): + def __init__(self, zc: 'Zeroconf', type_: str, handlers=None, listener=None, + addr: str = _MDNS_ADDR, port: int = _MDNS_PORT, delay: int = _BROWSER_TIME) -> None: """Creates a browser for a specific type""" assert handlers or listener, 'You need to specify at least one handler' - threading.Thread.__init__(self) + if not type_.endswith(service_type_name(type_, allow_underscores=True)): + raise BadTypeInNameException + threading.Thread.__init__( + self, name='zeroconf-ServiceBrowser_' + type_) self.daemon = True self.zc = zc self.type = type_ - self.services = {} + self.addr = addr + self.port = port + self.multicast = (self.addr == _MDNS_ADDR) + self.services = {} # type: Dict[str, DNSRecord] self.next_time = current_time_millis() - self.delay = _BROWSER_TIME - self._handlers_to_call = [] - - self.done = False + self.delay = delay + self._handlers_to_call = [] # type: List[Callable[[Zeroconf], None]] self._service_state_changed = Signal() - self.zc.add_listener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN)) - self.start() - + self.done = False if hasattr(handlers, 'add_service'): listener = handlers @@ -997,16 +1309,18 @@ class ServiceBrowser(threading.Thread): for h in handlers: self.service_state_changed.register_handler(h) + self.start() + @property - def service_state_changed(self): + def service_state_changed(self) -> SignalRegistrationInterface: return self._service_state_changed.registration_interface - def update_record(self, zc, now, record): + def update_record(self, zc: 'Zeroconf', now: float, record: DNSRecord) -> None: """Callback invoked by Zeroconf when new information arrives. Updates information required by browser in the Zeroconf cache.""" - def enqueue_callback(state_change, name): + def enqueue_callback(state_change: ServiceStateChange, name: str) -> None: self._handlers_to_call.append( lambda zeroconf: self._service_state_changed.fire( zeroconf=zeroconf, @@ -1016,6 +1330,7 @@ class ServiceBrowser(threading.Thread): )) if record.type == _TYPE_PTR and record.name == self.type: + assert isinstance(record, DNSPointer) expired = record.is_expired(now) service_key = record.alias.lower() try: @@ -1038,41 +1353,47 @@ class ServiceBrowser(threading.Thread): def cancel(self): self.done = True - self.zc.notify_all() + self.zc.remove_listener(self) + self.join() def run(self): + self.zc.add_listener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN)) + while True: now = current_time_millis() if len(self._handlers_to_call) == 0 and self.next_time > now: self.zc.wait(self.next_time - now) - if _GLOBAL_DONE or self.done: + if self.zc.done or self.done: return now = current_time_millis() - if self.next_time <= now: - out = DNSOutgoing(_FLAGS_QR_QUERY) + out = DNSOutgoing(_FLAGS_QR_QUERY, multicast=self.multicast) out.add_question(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN)) for record in self.services.values(): - if not record.is_expired(now): + if not record.is_stale(now): out.add_answer_at_time(record, now) - self.zc.send(out) + + self.zc.send(out, addr=self.addr, port=self.port) self.next_time = now + self.delay self.delay = min(20 * 1000, self.delay * 2) - if len(self._handlers_to_call) > 0: + if len(self._handlers_to_call) > 0 and not self.zc.done: handler = self._handlers_to_call.pop(0) handler(self.zc) -class ServiceInfo(object): +ServicePropertiesType = Dict[bytes, Union[bool, str]] + + +class ServiceInfo(RecordUpdateListener): """Service information""" - def __init__(self, type, name, address=None, port=None, weight=0, - priority=0, properties=None, server=None): + def __init__(self, type_: str, name: str, address: bytes = None, port: int = None, weight: int = 0, + priority: int = 0, properties=None, server: str = None) -> None: """Create a service description. - type: fully qualified service type name + type_: fully qualified service type name name: fully qualified service name address: IP address as unsigned short, network byte order port: port that the service runs on @@ -1082,9 +1403,9 @@ class ServiceInfo(object): bytes for the text field) server: fully qualified name for service host (defaults to name)""" - if not name.endswith(type): + if not type_.endswith(service_type_name(name, allow_underscores=True)): raise BadTypeInNameException - self.type = type + self.type = type_ self.name = name self.address = address self.port = port @@ -1094,27 +1415,31 @@ class ServiceInfo(object): self.server = server else: self.server = name + self._properties = {} # type: ServicePropertiesType self._set_properties(properties) + # FIXME: this is here only so that mypy doesn't complain when we set and then use the attribute when + # registering services. See if setting this to None by default is the right way to go. + self.ttl = None # type: Optional[int] @property - def properties(self): + def properties(self) -> ServicePropertiesType: return self._properties - def _set_properties(self, properties): + def _set_properties(self, properties: Union[bytes, ServicePropertiesType]): """Sets properties and text of this info from a dictionary""" if isinstance(properties, dict): self._properties = properties - list = [] + list_ = [] result = b'' - for key, value in iteritems(properties): - if isinstance(key, text_type): + for key, value in properties.items(): + if isinstance(key, str): key = key.encode('utf-8') if value is None: suffix = b'' - elif isinstance(value, text_type): + elif isinstance(value, str): suffix = value.encode('utf-8') - elif isinstance(value, binary_type): + elif isinstance(value, bytes): suffix = value elif isinstance(value, int): if value: @@ -1123,8 +1448,8 @@ class ServiceInfo(object): suffix = b'false' else: suffix = b'' - list.append(b'='.join((key, suffix))) - for item in list: + list_.append(b'='.join((key, suffix))) + for item in list_: result = b''.join((result, int2byte(len(item)), item)) self.text = result else: @@ -1138,7 +1463,7 @@ class ServiceInfo(object): index = 0 strs = [] while index < end: - length = indexbytes(text, index) + length = text[index] index += 1 strs.append(text[index:index + length]) index += length @@ -1169,79 +1494,98 @@ class ServiceInfo(object): return self.name[:len(self.name) - len(self.type) - 1] return self.name - def update_record(self, zc, now, record): + def update_record(self, zc: 'Zeroconf', now: float, record: DNSRecord) -> None: """Updates service information from a DNS record""" if record is not None and not record.is_expired(now): if record.type == _TYPE_A: + assert isinstance(record, DNSAddress) # if record.name == self.name: if record.name == self.server: self.address = record.address elif record.type == _TYPE_SRV: + assert isinstance(record, DNSService) if record.name == self.name: self.server = record.server self.port = record.port self.weight = record.weight self.priority = record.priority # self.address = None - self.update_record(zc, now, - zc.cache.get_by_details(self.server, _TYPE_A, _CLASS_IN)) + self.update_record( + zc, now, zc.cache.get_by_details( + self.server, _TYPE_A, _CLASS_IN)) elif record.type == _TYPE_TXT: + assert isinstance(record, DNSText) if record.name == self.name: self._set_text(record.text) - def request(self, zc, timeout): + def request(self, zc: 'Zeroconf', timeout: float) -> bool: """Returns true if the service could be discovered on the network, and updates this object with details discovered. """ now = current_time_millis() delay = _LISTENER_TIME - next = now + delay + next_ = now + delay last = now + timeout - result = False + + record_types_for_check_cache = [ + (_TYPE_SRV, _CLASS_IN), + (_TYPE_TXT, _CLASS_IN), + ] + if self.server is not None: + record_types_for_check_cache.append((_TYPE_A, _CLASS_IN)) + for record_type in record_types_for_check_cache: + cached = zc.cache.get_by_details(self.name, *record_type) + if cached: + self.update_record(zc, now, cached) + + if None not in (self.server, self.address, self.text): + return True + try: zc.add_listener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN)) - while (self.server is None or self.address is None or - self.text is None): + while None in (self.server, self.address, self.text): if last <= now: return False - if next <= now: + if next_ <= now: out = DNSOutgoing(_FLAGS_QR_QUERY) - out.add_question(DNSQuestion(self.name, _TYPE_SRV, - _CLASS_IN)) - out.add_answer_at_time(zc.cache.get_by_details(self.name, - _TYPE_SRV, _CLASS_IN), now) - out.add_question(DNSQuestion(self.name, _TYPE_TXT, - _CLASS_IN)) - out.add_answer_at_time(zc.cache.get_by_details(self.name, - _TYPE_TXT, _CLASS_IN), now) - if self.server is not None: - out.add_question(DNSQuestion(self.server, - _TYPE_A, _CLASS_IN)) - out.add_answer_at_time(zc.cache.get_by_details(self.server, - _TYPE_A, _CLASS_IN), now) - zc.send(out) - next = now + delay - delay = delay * 2 + out.add_question( + DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN)) + out.add_answer_at_time( + zc.cache.get_by_details( + self.name, _TYPE_SRV, _CLASS_IN), now) - zc.wait(min(next, last) - now) + out.add_question( + DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN)) + out.add_answer_at_time( + zc.cache.get_by_details( + self.name, _TYPE_TXT, _CLASS_IN), now) + + if self.server is not None: + out.add_question( + DNSQuestion(self.server, _TYPE_A, _CLASS_IN)) + out.add_answer_at_time( + zc.cache.get_by_details( + self.server, _TYPE_A, _CLASS_IN), now) + zc.send(out) + next_ = now + delay + delay *= 2 + + zc.wait(min(next_, last) - now) now = current_time_millis() - result = True finally: zc.remove_listener(self) - return result + return True - def __eq__(self, other): + def __eq__(self, other: object) -> bool: """Tests equality of service name""" - if isinstance(other, ServiceInfo): - return other.name == self.name - return False + return isinstance(other, ServiceInfo) and other.name == self.name - def __ne__(self, other): + def __ne__(self, other: object) -> bool: """Non-equality test""" return not self.__eq__(other) - def __repr__(self): + def __repr__(self) -> str: """String representation""" return '%s(%s)' % ( type(self).__name__, @@ -1255,39 +1599,66 @@ class ServiceInfo(object): ) -@enum.unique -class InterfaceChoice(enum.Enum): - Default = 1 - All = 2 +class ZeroconfServiceTypes: + """ + Return all of the advertised services on any local networks + """ + def __init__(self): + self.found_services = set() + + def add_service(self, zc, type_, name): + self.found_services.add(name) + + def remove_service(self, zc, type_, name): + pass + + @classmethod + def find(cls, zc=None, timeout=5, interfaces=InterfaceChoice.All): + """ + Return all of the advertised services on any local networks. + + :param zc: Zeroconf() instance. Pass in if already have an + instance running or if non-default interfaces are needed + :param timeout: seconds to wait for any responses + :return: tuple of service type strings + """ + local_zc = zc or Zeroconf(interfaces=interfaces) + listener = cls() + browser = ServiceBrowser( + local_zc, '_services._dns-sd._udp.local.', listener=listener) + + # wait for responses + time.sleep(timeout) + + # close down anything we opened + if zc is None: + local_zc.close() + else: + browser.cancel() + + return tuple(sorted(listener.found_services)) -@enum.unique -class ServiceStateChange(enum.Enum): - Added = 1 - Removed = 2 - - -HOST_ONLY_NETWORK_MASK = '255.255.255.255' - - -def get_all_addresses(address_family): +def get_all_addresses() -> List[str]: return list(set( - addr['addr'] - for iface in netifaces.interfaces() - for addr in netifaces.ifaddresses(iface).get(address_family, []) - if addr.get('netmask') != HOST_ONLY_NETWORK_MASK + addr.ip + for iface in ifaddr.get_adapters() + for addr in iface.ips + if addr.is_IPv4 and addr.network_prefix != 32 # Host only netmask 255.255.255.255 )) -def normalize_interface_choice(choice, address_family): +def normalize_interface_choice(choice: Union[List[str], InterfaceChoice]) -> List[str]: if choice is InterfaceChoice.Default: - choice = ['0.0.0.0'] + return ['0.0.0.0'] elif choice is InterfaceChoice.All: - choice = get_all_addresses(address_family) - return choice + return get_all_addresses() + else: + assert isinstance(choice, list) + return choice -def new_socket(): +def new_socket(port: int = _MDNS_PORT) -> socket.socket: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) @@ -1305,23 +1676,29 @@ def new_socket(): else: try: s.setsockopt(socket.SOL_SOCKET, reuseport, 1) - except (OSError, socket.error) as err: # OSError on python 3, socket.error on python 2 + except (OSError, socket.error) as err: + # OSError on python 3, socket.error on python 2 if not err.errno == errno.ENOPROTOOPT: raise - s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255) - s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1) + if port is _MDNS_PORT: + # OpenBSD needs the ttl and loop values for the IP_MULTICAST_TTL and + # IP_MULTICAST_LOOP socket options as an unsigned char. + ttl = struct.pack(b'B', 255) + s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) + loop = struct.pack(b'B', 1) + s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, loop) - s.bind(('', _MDNS_PORT)) + s.bind(('', port)) return s -def get_errno(e): +def get_errno(e: Exception) -> int: assert isinstance(e, socket.error) return e.args[0] -class Zeroconf(object): +class Zeroconf(QuietLogger): """Implementation of Zeroconf Multicast DNS Service Discovery @@ -1330,52 +1707,66 @@ class Zeroconf(object): def __init__( self, - interfaces=InterfaceChoice.All, - ): + interfaces: Union[List[str], InterfaceChoice] = InterfaceChoice.All, + unicast: bool = False + ) -> None: """Creates an instance of the Zeroconf class, establishing multicast communications, listening and reaping threads. :type interfaces: :class:`InterfaceChoice` or sequence of ip addresses """ - global _GLOBAL_DONE - _GLOBAL_DONE = False + # hook for threads + self._GLOBAL_DONE = False + self.unicast = unicast - self._listen_socket = new_socket() - interfaces = normalize_interface_choice(interfaces, socket.AF_INET) + if not unicast: + self._listen_socket = new_socket() + interfaces = normalize_interface_choice(interfaces) - self._respond_sockets = [] + self._respond_sockets = [] # type: List[socket.socket] for i in interfaces: - log.debug('Adding %r to multicast group', i) - try: - self._listen_socket.setsockopt( - socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, - socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(i)) - except socket.error as e: - if get_errno(e) == errno.EADDRINUSE: - log.info( - 'Address in use when adding %s to multicast group, ' - 'it is expected to happen on some systems', i, - ) - elif get_errno(e) == errno.EADDRNOTAVAIL: - log.info( - 'Address not available when adding %s to multicast group, ' - 'it is expected to happen on some systems', i, - ) - continue - else: - raise + if not unicast: + log.debug('Adding %r to multicast group', i) + try: + _value = socket.inet_aton(_MDNS_ADDR) + socket.inet_aton(i) + self._listen_socket.setsockopt( + socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, _value) + except socket.error as e: + _errno = get_errno(e) + if _errno == errno.EADDRINUSE: + log.info( + 'Address in use when adding %s to multicast group, ' + 'it is expected to happen on some systems', i, + ) + elif _errno == errno.EADDRNOTAVAIL: + log.info( + 'Address not available when adding %s to multicast ' + 'group, it is expected to happen on some systems', i, + ) + continue + elif _errno == errno.EINVAL: + log.info( + 'Interface of %s does not support multicast, ' + 'it is expected in WSL', i + ) + continue - respond_socket = new_socket() - respond_socket.setsockopt( - socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(i)) + else: + raise + + respond_socket = new_socket() + respond_socket.setsockopt( + socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(i)) + else: + respond_socket = new_socket(port=0) self._respond_sockets.append(respond_socket) - self.listeners = [] - self.browsers = [] - self.services = {} - self.servicetypes = {} + self.listeners = [] # type: List[RecordUpdateListener] + self.browsers = {} # type: Dict[RecordUpdateListener, ServiceBrowser] + self.services = {} # type: Dict[str, ServiceInfo] + self.servicetypes = {} # type: Dict[str, int] self.cache = DNSCache() @@ -1383,49 +1774,66 @@ class Zeroconf(object): self.engine = Engine(self) self.listener = Listener(self) - self.engine.add_reader(self.listener, self._listen_socket) + if not unicast: + self.engine.add_reader(self.listener, self._listen_socket) + else: + for s in self._respond_sockets: + self.engine.add_reader(self.listener, s) self.reaper = Reaper(self) - def wait(self, timeout): + self.debug = None # type: Optional[DNSOutgoing] + + @property + def done(self) -> bool: + return self._GLOBAL_DONE + + def wait(self, timeout: float) -> None: """Calling thread waits for a given number of milliseconds or until notified.""" with self.condition: - self.condition.wait(timeout / 1000) + self.condition.wait(timeout / 1000.0) - def notify_all(self): + def notify_all(self) -> None: """Notifies all waiting threads""" with self.condition: self.condition.notify_all() - def get_service_info(self, type, name, timeout=3000): + def get_service_info(self, type_: str, name: str, timeout: int = 3000) -> Optional[ServiceInfo]: """Returns network's service information for a particular name and type, or None if no service matches by the timeout, which defaults to 3 seconds.""" - info = ServiceInfo(type, name) + info = ServiceInfo(type_, name) if info.request(self, timeout): return info return None - def add_service_listener(self, type, listener): + def add_service_listener(self, type_: str, listener: RecordUpdateListener) -> None: """Adds a listener for a particular service type. This object will then have its update_record method called when information arrives for that type.""" self.remove_service_listener(listener) - self.browsers.append(ServiceBrowser(self, type, listener)) + self.browsers[listener] = ServiceBrowser(self, type_, listener) - def remove_service_listener(self, listener): + def remove_service_listener(self, listener: RecordUpdateListener) -> None: """Removes a listener from the set that is currently listening.""" - for browser in self.browsers: - if browser.listener == listener: - browser.cancel() - del browser + if listener in self.browsers: + self.browsers[listener].cancel() + del self.browsers[listener] - def register_service(self, info, ttl=_DNS_TTL): + def remove_all_service_listeners(self) -> None: + """Removes a listener from the set that is currently listening.""" + for listener in [k for k in self.browsers]: + self.remove_service_listener(listener) + + def register_service( + self, info: ServiceInfo, ttl: int = _DNS_TTL, allow_name_change: bool = False, + ) -> None: """Registers service information to the network with a default TTL of 60 seconds. Zeroconf will then respond to requests for information for that service. The name of the service may be changed if needed to make it unique on the network.""" - self.check_service(info) + info.ttl = ttl + self.check_service(info, allow_name_change) self.services[info.name.lower()] = info if info.type in self.servicetypes: self.servicetypes[info.type] += 1 @@ -1440,21 +1848,24 @@ class Zeroconf(object): now = current_time_millis() continue out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - out.add_answer_at_time(DNSPointer(info.type, _TYPE_PTR, - _CLASS_IN, ttl, info.name), 0) - out.add_answer_at_time(DNSService(info.name, _TYPE_SRV, - _CLASS_IN, ttl, info.priority, info.weight, info.port, - info.server), 0) - out.add_answer_at_time(DNSText(info.name, _TYPE_TXT, _CLASS_IN, - ttl, info.text), 0) + out.add_answer_at_time( + DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0) + out.add_answer_at_time( + DNSService(info.name, _TYPE_SRV, _CLASS_IN, + ttl, info.priority, info.weight, info.port, + info.server), 0) + + out.add_answer_at_time( + DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0) if info.address: - out.add_answer_at_time(DNSAddress(info.server, _TYPE_A, - _CLASS_IN, ttl, info.address), 0) + out.add_answer_at_time( + DNSAddress(info.server, _TYPE_A, _CLASS_IN, + ttl, info.address), 0) self.send(out) i += 1 next_time += _REGISTER_TIME - def unregister_service(self, info): + def unregister_service(self, info: ServiceInfo) -> None: """Unregister a service.""" try: del self.services[info.name.lower()] @@ -1473,21 +1884,23 @@ class Zeroconf(object): now = current_time_millis() continue out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - out.add_answer_at_time(DNSPointer(info.type, _TYPE_PTR, - _CLASS_IN, 0, info.name), 0) - out.add_answer_at_time(DNSService(info.name, _TYPE_SRV, - _CLASS_IN, 0, info.priority, info.weight, info.port, - info.name), 0) - out.add_answer_at_time(DNSText(info.name, _TYPE_TXT, _CLASS_IN, - 0, info.text), 0) + out.add_answer_at_time( + DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0) + out.add_answer_at_time( + DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, + info.priority, info.weight, info.port, info.name), 0) + out.add_answer_at_time( + DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0) + if info.address: - out.add_answer_at_time(DNSAddress(info.server, _TYPE_A, - _CLASS_IN, 0, info.address), 0) + out.add_answer_at_time( + DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, + info.address), 0) self.send(out) i += 1 next_time += _UNREGISTER_TIME - def unregister_all_services(self): + def unregister_all_services(self) -> None: """Unregister all registered services.""" if len(self.services) > 0: now = current_time_millis() @@ -1500,52 +1913,67 @@ class Zeroconf(object): continue out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) for info in self.services.values(): - out.add_answer_at_time(DNSPointer(info.type, _TYPE_PTR, - _CLASS_IN, 0, info.name), 0) - out.add_answer_at_time(DNSService(info.name, _TYPE_SRV, - _CLASS_IN, 0, info.priority, info.weight, - info.port, info.server), 0) - out.add_answer_at_time(DNSText(info.name, _TYPE_TXT, - _CLASS_IN, 0, info.text), 0) + out.add_answer_at_time(DNSPointer( + info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0) + out.add_answer_at_time(DNSService( + info.name, _TYPE_SRV, _CLASS_IN, 0, + info.priority, info.weight, info.port, info.server), 0) + out.add_answer_at_time(DNSText( + info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0) if info.address: - out.add_answer_at_time(DNSAddress(info.server, - _TYPE_A, _CLASS_IN, 0, info.address), 0) + out.add_answer_at_time(DNSAddress( + info.server, _TYPE_A, _CLASS_IN, 0, + info.address), 0) self.send(out) i += 1 next_time += _UNREGISTER_TIME - def check_service(self, info): + def check_service(self, info: ServiceInfo, allow_name_change: bool) -> None: """Checks the network for a unique service name, modifying the ServiceInfo passed in if it is not unique.""" + + # This is kind of funky because of the subtype based tests + # need to make subtypes a first class citizen + service_name = service_type_name(info.name) + if not info.type.endswith(service_name): + raise BadTypeInNameException + + instance_name = info.name[:-len(service_name) - 1] + next_instance_number = 2 + now = current_time_millis() next_time = now i = 0 while i < 3: - for record in self.cache.entries_with_name(info.type): - if (record.type == _TYPE_PTR and - not record.is_expired(now) and - record.alias == info.name): - if info.name.find('.') < 0: - info.name = '%s.[%s:%s].%s' % (info.name, - info.address, info.port, info.type) - - self.check_service(info) - return + # check for a name conflict + while self.cache.current_entry_with_name_and_alias( + info.type, info.name): + if not allow_name_change: raise NonUniqueNameException + + # change the name and look for a conflict + info.name = '%s-%s.%s' % ( + instance_name, next_instance_number, info.type) + next_instance_number += 1 + service_type_name(info.name) + next_time = now + i = 0 + if now < next_time: self.wait(next_time - now) now = current_time_millis() continue + out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA) self.debug = out out.add_question(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN)) - out.add_authorative_answer(DNSPointer(info.type, _TYPE_PTR, - _CLASS_IN, _DNS_TTL, info.name)) + out.add_authorative_answer(DNSPointer( + info.type, _TYPE_PTR, _CLASS_IN, info.ttl, info.name)) self.send(out) i += 1 next_time += _CHECK_TIME - def add_listener(self, listener, question): + def add_listener(self, listener: RecordUpdateListener, question: Optional[DNSQuestion]) -> None: """Adds a listener for a given question. The listener will have its update_record method called when information is available to answer the question.""" @@ -1557,7 +1985,7 @@ class Zeroconf(object): listener.update_record(self, now, record) self.notify_all() - def remove_listener(self, listener): + def remove_listener(self, listener: RecordUpdateListener) -> None: """Removes a listener.""" try: self.listeners.remove(listener) @@ -1565,14 +1993,14 @@ class Zeroconf(object): except Exception as e: # TODO stop catching all Exceptions log.exception('Unknown error, possibly benign: %r', e) - def update_record(self, now, rec): + def update_record(self, now: float, rec: DNSRecord) -> None: """Used to notify listeners of new information that has updated a record.""" for listener in self.listeners: listener.update_record(self, now, rec) self.notify_all() - def handle_response(self, msg): + def handle_response(self, msg: DNSIncoming) -> None: """Deal with incoming response packets. All answers are held in the cache, and listeners are notified.""" now = current_time_millis() @@ -1585,13 +2013,13 @@ class Zeroconf(object): entry = self.cache.get(record) if entry is not None: entry.reset_ttl(record) - record = entry else: self.cache.add(record) + for record in msg.answers: self.update_record(now, record) - def handle_query(self, msg, addr, port): + def handle_query(self, msg: DNSIncoming, addr: str, port: int) -> None: """Deal with incoming query packets. Provides a response if possible.""" out = None @@ -1599,7 +2027,7 @@ class Zeroconf(object): # Support unicast client responses # if port != _MDNS_PORT: - out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, False) + out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, multicast=False) for question in msg.questions: out.add_question(question) @@ -1609,16 +2037,16 @@ class Zeroconf(object): for stype in self.servicetypes.keys(): if out is None: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - out.add_answer(msg, - DNSPointer("_services._dns-sd._udp.local.", - _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype)) + out.add_answer(msg, DNSPointer( + "_services._dns-sd._udp.local.", _TYPE_PTR, + _CLASS_IN, _DNS_TTL, stype)) for service in self.services.values(): if question.name == service.type: if out is None: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - out.add_answer(msg, - DNSPointer(service.type, _TYPE_PTR, - _CLASS_IN, _DNS_TTL, service.name)) + out.add_answer(msg, DNSPointer( + service.type, _TYPE_PTR, + _CLASS_IN, service.ttl, service.name)) else: try: if out is None: @@ -1628,53 +2056,78 @@ class Zeroconf(object): if question.type in (_TYPE_A, _TYPE_ANY): for service in self.services.values(): if service.server == question.name.lower(): - out.add_answer(msg, DNSAddress(question.name, - _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, - _DNS_TTL, service.address)) + out.add_answer(msg, DNSAddress( + question.name, _TYPE_A, + _CLASS_IN | _CLASS_UNIQUE, + service.ttl, service.address)) - service = self.services.get(question.name.lower(), None) - if not service: + name_to_find = question.name.lower() + if name_to_find not in self.services: continue + service = self.services[name_to_find] if question.type in (_TYPE_SRV, _TYPE_ANY): - out.add_answer(msg, DNSService(question.name, - _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, - _DNS_TTL, service.priority, service.weight, - service.port, service.server)) + out.add_answer(msg, DNSService( + question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, + service.ttl, service.priority, service.weight, + service.port, service.server)) if question.type in (_TYPE_TXT, _TYPE_ANY): - out.add_answer(msg, DNSText(question.name, - _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, - _DNS_TTL, service.text)) + out.add_answer(msg, DNSText( + question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, + service.ttl, service.text)) if question.type == _TYPE_SRV: - out.add_additional_answer(DNSAddress(service.server, - _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, - _DNS_TTL, service.address)) - except Exception as e: # TODO stop catching all Exceptions - log.exception('Unknown error, possibly benign: %r', e) + out.add_additional_answer(DNSAddress( + service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, + service.ttl, service.address)) + except Exception: # TODO stop catching all Exceptions + self.log_exception_warning() if out is not None and out.answers: out.id = msg.id self.send(out, addr, port) - def send(self, out, addr=_MDNS_ADDR, port=_MDNS_PORT): + def send(self, out: DNSOutgoing, addr: str = _MDNS_ADDR, port: int = _MDNS_PORT) -> None: """Sends an outgoing packet.""" packet = out.packet() - log.debug('Sending %r as %r...', out, packet) + if len(packet) > _MAX_MSG_ABSOLUTE: + self.log_warning_once("Dropping %r over-sized packet (%d bytes) %r", + out, len(packet), packet) + return + log.debug('Sending %r (%d bytes) as %r...', out, len(packet), packet) for s in self._respond_sockets: - bytes_sent = s.sendto(packet, 0, (addr, port)) - if bytes_sent != len(packet): - raise Error( - 'Should not happen, sent %d out of %d bytes' % ( - bytes_sent, len(packet))) + if self._GLOBAL_DONE: + return + try: + bytes_sent = s.sendto(packet, 0, (addr, port)) + except Exception: # TODO stop catching all Exceptions + # on send errors, log the exception and keep going + self.log_exception_warning() + else: + if bytes_sent != len(packet): + self.log_warning_once( + '!!! sent %d out of %d bytes to %r' % ( + bytes_sent, len(packet), s)) - def close(self): + def close(self) -> None: """Ends the background threads, and prevent this instance from servicing further queries.""" - global _GLOBAL_DONE - if not _GLOBAL_DONE: - _GLOBAL_DONE = True - self.notify_all() - self.engine.notify() + if not self._GLOBAL_DONE: + self._GLOBAL_DONE = True + # remove service listeners + self.remove_all_service_listeners() self.unregister_all_services() - for s in [self._listen_socket] + self._respond_sockets: + + # shutdown recv socket and thread + if not self.unicast: + self.engine.del_reader(self._listen_socket) + self._listen_socket.close() + else: + for s in self._respond_sockets: + self.engine.del_reader(s) + self.engine.join() + + # shutdown the rest + self.notify_all() + self.reaper.join() + for s in self._respond_sockets: s.close()