add tor deps
This commit is contained in:
parent
0ee7628a4f
commit
1f23120cc3
91 changed files with 25537 additions and 535 deletions
|
@ -8,8 +8,8 @@
|
|||
../ed25519/__pycache__/test_ed25519.cpython-34.pyc
|
||||
../ed25519/_ed25519.cpython-34m.so
|
||||
./
|
||||
top_level.txt
|
||||
PKG-INFO
|
||||
dependency_links.txt
|
||||
top_level.txt
|
||||
SOURCES.txt
|
||||
../../../../bin/edsig
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -71,54 +71,54 @@ setuptools-18.5.dist-info/top_level.txt,sha256=7780fzudMJkykiTcIrAQ8m8Lll6kot3EE
|
|||
setuptools-18.5.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
||||
/srv/openmedialibrary/platform/Linux_x86_64/home/.local/bin/easy_install,sha256=7h7lc5DCAhnE08UwEm49wGymGDGdOcrkRdncTKYmXIQ,233
|
||||
/srv/openmedialibrary/platform/Linux_x86_64/home/.local/bin/easy_install-3.4,sha256=7h7lc5DCAhnE08UwEm49wGymGDGdOcrkRdncTKYmXIQ,233
|
||||
setuptools/__pycache__/package_index.cpython-34.pyc,,
|
||||
setuptools/__pycache__/msvc9_support.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,,
|
||||
setuptools/__pycache__/depends.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py26compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/ssl_support.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/__pycache__/archive_util.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,,
|
||||
__pycache__/easy_install.cpython-34.pyc,,
|
||||
setuptools/__pycache__/compat.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,,
|
||||
setuptools/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/egg_info.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/__pycache__/windows_support.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/build_ext.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_scripts.cpython-34.pyc,,
|
||||
setuptools/__pycache__/unicode_utils.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/saveopts.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/rotate.cpython-34.pyc,,
|
||||
setuptools/__pycache__/dist.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/build_ext.cpython-34.pyc,,
|
||||
setuptools/__pycache__/sandbox.cpython-34.pyc,,
|
||||
__pycache__/easy_install.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/upload_docs.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_lib.cpython-34.pyc,,
|
||||
pkg_resources/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/__pycache__/msvc9_support.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/easy_install.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/register.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/__pycache__/package_index.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/build_py.cpython-34.pyc,,
|
||||
setuptools/__pycache__/version.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/egg_info.cpython-34.pyc,,
|
||||
setuptools/__pycache__/compat.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/test.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/setopt.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/__pycache__/site-patch.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_egg_info.cpython-34.pyc,,
|
||||
setuptools/__pycache__/extension.cpython-34.pyc,,
|
||||
setuptools/__pycache__/depends.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py27compat.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/build_py.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/setopt.cpython-34.pyc,,
|
||||
setuptools/__pycache__/utils.cpython-34.pyc,,
|
||||
setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/windows_support.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/develop.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/saveopts.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py27compat.cpython-34.pyc,,
|
||||
pkg_resources/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/rotate.cpython-34.pyc,,
|
||||
setuptools/__pycache__/extension.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/test.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/upload_docs.cpython-34.pyc,,
|
||||
_markerlib/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/sdist.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py31compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/ssl_support.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py26compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/__pycache__/archive_util.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/alias.cpython-34.pyc,,
|
||||
_markerlib/__pycache__/markers.cpython-34.pyc,,
|
||||
setuptools/__pycache__/site-patch.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py31compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/sandbox.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,,
|
||||
setuptools/__pycache__/dist.cpython-34.pyc,,
|
||||
setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,,
|
||||
setuptools/__pycache__/version.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_lib.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/develop.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/easy_install.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/alias.cpython-34.pyc,,
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -4,6 +4,7 @@ export PIP_DOWNLOAD_CACHE=$HOME/../../../pip_cache
|
|||
cat ../../openmedialibrary/requirements.txt \
|
||||
| grep -v lxml \
|
||||
| grep -v pyopenssl \
|
||||
| grep -v pyCrypto \
|
||||
| grep -v simplejson \
|
||||
> requirements.txt
|
||||
echo setuptools >> requirements.txt
|
||||
|
|
|
@ -18,6 +18,6 @@
|
|||
../PyPDF2/__pycache__/__init__.cpython-34.pyc
|
||||
./
|
||||
top_level.txt
|
||||
dependency_links.txt
|
||||
PKG-INFO
|
||||
dependency_links.txt
|
||||
SOURCES.txt
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
Metadata-Version: 1.0
|
||||
Name: PySocks
|
||||
Version: 1.5.6
|
||||
Summary: A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information.
|
||||
Home-page: https://github.com/Anorov/PySocks
|
||||
Author: Anorov
|
||||
Author-email: anorov.vorona@gmail.com
|
||||
License: BSD
|
||||
Description: UNKNOWN
|
||||
Keywords: socks,proxy
|
||||
Platform: UNKNOWN
|
|
@ -0,0 +1,6 @@
|
|||
socks.py
|
||||
sockshandler.py
|
||||
PySocks.egg-info/PKG-INFO
|
||||
PySocks.egg-info/SOURCES.txt
|
||||
PySocks.egg-info/dependency_links.txt
|
||||
PySocks.egg-info/top_level.txt
|
|
@ -0,0 +1,9 @@
|
|||
../socks.py
|
||||
../sockshandler.py
|
||||
../__pycache__/socks.cpython-34.pyc
|
||||
../__pycache__/sockshandler.cpython-34.pyc
|
||||
./
|
||||
top_level.txt
|
||||
dependency_links.txt
|
||||
PKG-INFO
|
||||
SOURCES.txt
|
|
@ -0,0 +1,2 @@
|
|||
socks
|
||||
sockshandler
|
|
@ -0,0 +1,48 @@
|
|||
Certifi: Python SSL Certificates
|
||||
================================
|
||||
|
||||
`Certifi`_ is a carefully curated collection of Root Certificates for
|
||||
validating the trustworthiness of SSL certificates while verifying the identity
|
||||
of TLS hosts. It has been extracted from the `Requests`_ project.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
``certifi`` is available on PyPI. Simply install it with ``pip``::
|
||||
|
||||
$ pip install certifi
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
To reference the installed CA Bundle, you can use the built-in function::
|
||||
|
||||
>>> import certifi
|
||||
|
||||
>>> certifi.where()
|
||||
'/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
|
||||
|
||||
Enjoy!
|
||||
|
||||
1024-bit Root Certificates
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Browsers and certificate authorities have concluded that 1024-bit keys are
|
||||
unacceptably weak for certificates, particularly root certificates. For this
|
||||
reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
|
||||
bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
|
||||
certifiate from the same CA. Because Mozilla removed these certificates from
|
||||
its bundle, ``certifi`` removed them as well.
|
||||
|
||||
Unfortunately, old versions of OpenSSL (less than 1.0.2) sometimes fail to
|
||||
validate certificate chains that use the strong roots. For this reason, if you
|
||||
fail to validate a certificate using the ``certifi.where()`` mechanism, you can
|
||||
intentionally re-add the 1024-bit roots back into your bundle by calling
|
||||
``certifi.old_where()`` instead. This is not recommended in production: if at
|
||||
all possible you should upgrade to a newer OpenSSL. However, if you have no
|
||||
other option, this may work for you.
|
||||
|
||||
.. _`Certifi`: http://certifi.io/en/latest/
|
||||
.. _`Requests`: http://docs.python-requests.org/en/latest/
|
||||
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
Metadata-Version: 2.0
|
||||
Name: certifi
|
||||
Version: 2015.9.6.2
|
||||
Version: 2015.11.20
|
||||
Summary: Python package for providing Mozilla's CA Bundle.
|
||||
Home-page: http://certifi.io/
|
||||
Author: Kenneth Reitz
|
||||
|
@ -30,7 +30,7 @@ of TLS hosts. It has been extracted from the `Requests`_ project.
|
|||
Installation
|
||||
------------
|
||||
|
||||
`certifi` is available on PyPI. Simply install it with `pip`::
|
||||
``certifi`` is available on PyPI. Simply install it with ``pip``::
|
||||
|
||||
$ pip install certifi
|
||||
|
||||
|
@ -46,6 +46,24 @@ To reference the installed CA Bundle, you can use the built-in function::
|
|||
|
||||
Enjoy!
|
||||
|
||||
1024-bit Root Certificates
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Browsers and certificate authorities have concluded that 1024-bit keys are
|
||||
unacceptably weak for certificates, particularly root certificates. For this
|
||||
reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
|
||||
bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
|
||||
certifiate from the same CA. Because Mozilla removed these certificates from
|
||||
its bundle, ``certifi`` removed them as well.
|
||||
|
||||
Unfortunately, old versions of OpenSSL (less than 1.0.2) sometimes fail to
|
||||
validate certificate chains that use the strong roots. For this reason, if you
|
||||
fail to validate a certificate using the ``certifi.where()`` mechanism, you can
|
||||
intentionally re-add the 1024-bit roots back into your bundle by calling
|
||||
``certifi.old_where()`` instead. This is not recommended in production: if at
|
||||
all possible you should upgrade to a newer OpenSSL. However, if you have no
|
||||
other option, this may work for you.
|
||||
|
||||
.. _`Certifi`: http://certifi.io/en/latest/
|
||||
.. _`Requests`: http://docs.python-requests.org/en/latest/
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
certifi/__init__.py,sha256=hG3J5tdsVc9gHdErxXJQnes5-EQI_102Y5UXk0G-qkk,63
|
||||
certifi/__main__.py,sha256=FiOYt1Fltst7wk9DRa6GCoBr8qBUxlNQu_MKJf04E6s,41
|
||||
certifi/cacert.pem,sha256=j_IWL25eEgetcl_WsBPVc3qYwpOleezC6wo_zWb98V0,315580
|
||||
certifi/core.py,sha256=DqvIINYNNXsp3Srlk_NRaiizaww8po3l8t8ksz-Xt6Q,716
|
||||
certifi/old_root.pem,sha256=Sm1SGy9Y3FjEDEy9ie0EX39fcJCv_r6gAPtj9yBrXEY,24014
|
||||
certifi/weak.pem,sha256=spA74ndnORVAEKwL68MswT1BBXwtOHd9ht2vIKRF0oE,339594
|
||||
certifi-2015.11.20.dist-info/DESCRIPTION.rst,sha256=u4KmW8nf84KSFVrJue_kb-ArB1h3uUQT4H6CV_oOeUI,1706
|
||||
certifi-2015.11.20.dist-info/METADATA,sha256=z-iWa7SyyBzqckDPWM7zx6oVo6C3EYMn3xKBp7Mgzgg,2522
|
||||
certifi-2015.11.20.dist-info/RECORD,,
|
||||
certifi-2015.11.20.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110
|
||||
certifi-2015.11.20.dist-info/metadata.json,sha256=wO51GWDU74nDlRgLGR8kNuvtzKreMz0K4Fp3E4fhUys,911
|
||||
certifi-2015.11.20.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
||||
certifi/__pycache__/core.cpython-34.pyc,,
|
||||
certifi/__pycache__/__init__.cpython-34.pyc,,
|
||||
certifi/__pycache__/__main__.cpython-34.pyc,,
|
|
@ -1,5 +1,5 @@
|
|||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.24.0)
|
||||
Generator: bdist_wheel (0.26.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py2-none-any
|
||||
Tag: py3-none-any
|
|
@ -0,0 +1 @@
|
|||
{"generator": "bdist_wheel (0.26.0)", "summary": "Python package for providing Mozilla's CA Bundle.", "classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.0", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4"], "extensions": {"python.details": {"project_urls": {"Home": "http://certifi.io/"}, "contacts": [{"email": "me@kennethreitz.com", "name": "Kenneth Reitz", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "license": "ISC", "metadata_version": "2.0", "name": "certifi", "version": "2015.11.20"}
|
|
@ -1,30 +0,0 @@
|
|||
Certifi: Python SSL Certificates
|
||||
================================
|
||||
|
||||
`Certifi`_ is a carefully curated collection of Root Certificates for
|
||||
validating the trustworthiness of SSL certificates while verifying the identity
|
||||
of TLS hosts. It has been extracted from the `Requests`_ project.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
`certifi` is available on PyPI. Simply install it with `pip`::
|
||||
|
||||
$ pip install certifi
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
To reference the installed CA Bundle, you can use the built-in function::
|
||||
|
||||
>>> import certifi
|
||||
|
||||
>>> certifi.where()
|
||||
'/usr/local/lib/python2.7/site-packages/certifi/cacert.pem'
|
||||
|
||||
Enjoy!
|
||||
|
||||
.. _`Certifi`: http://certifi.io/en/latest/
|
||||
.. _`Requests`: http://docs.python-requests.org/en/latest/
|
||||
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
certifi/__init__.py,sha256=T8LOdkem2W_EqteuCirstbPu3iS11BmKnS_nKqQI_kQ,65
|
||||
certifi/__main__.py,sha256=FiOYt1Fltst7wk9DRa6GCoBr8qBUxlNQu_MKJf04E6s,41
|
||||
certifi/cacert.pem,sha256=wY10ezo0r5ZPcgfctoi3Q9KRZ79_tpb_MPDGsgWiOwE,320698
|
||||
certifi/core.py,sha256=DqvIINYNNXsp3Srlk_NRaiizaww8po3l8t8ksz-Xt6Q,716
|
||||
certifi/old_root.pem,sha256=Sm1SGy9Y3FjEDEy9ie0EX39fcJCv_r6gAPtj9yBrXEY,24014
|
||||
certifi/weak.pem,sha256=5xzWFRrSP0ZsXiW6emg8UQ_w497lT4qWCv32OO8R1ME,344712
|
||||
certifi-2015.9.6.2.dist-info/DESCRIPTION.rst,sha256=1HthO7cC8rfi_tZB3iPCnK7Npcd48svSApnFrl8J89Q,716
|
||||
certifi-2015.9.6.2.dist-info/METADATA,sha256=-IMJn5G46t_YY0VsjSgXQalm6mC4sChB8lsDanFlTME,1532
|
||||
certifi-2015.9.6.2.dist-info/metadata.json,sha256=LNvgTP4aFSgWMQ-8ySDRnRE7506kiisjTkPqBHna1YE,911
|
||||
certifi-2015.9.6.2.dist-info/RECORD,,
|
||||
certifi-2015.9.6.2.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
|
||||
certifi-2015.9.6.2.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
|
||||
certifi/__pycache__/__init__.cpython-34.pyc,,
|
||||
certifi/__pycache__/__main__.cpython-34.pyc,,
|
||||
certifi/__pycache__/core.cpython-34.pyc,,
|
|
@ -1 +0,0 @@
|
|||
{"license": "ISC", "name": "certifi", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "Python package for providing Mozilla's CA Bundle.", "version": "2015.9.6.2", "extensions": {"python.details": {"project_urls": {"Home": "http://certifi.io/"}, "document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "me@kennethreitz.com", "name": "Kenneth Reitz"}]}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.0", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4"]}
|
|
@ -1,3 +1,3 @@
|
|||
from .core import where, old_where
|
||||
|
||||
__version__ = "2015.09.06.2"
|
||||
__version__ = "2015.11.20"
|
||||
|
|
|
@ -1,31 +1,4 @@
|
|||
|
||||
# Issuer: O=Equifax OU=Equifax Secure Certificate Authority
|
||||
# Subject: O=Equifax OU=Equifax Secure Certificate Authority
|
||||
# Label: "Equifax Secure CA"
|
||||
# Serial: 903804111
|
||||
# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4
|
||||
# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a
|
||||
# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV
|
||||
UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy
|
||||
dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1
|
||||
MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx
|
||||
dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B
|
||||
AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f
|
||||
BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A
|
||||
cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC
|
||||
AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ
|
||||
MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm
|
||||
aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw
|
||||
ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj
|
||||
IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF
|
||||
MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA
|
||||
A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y
|
||||
7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh
|
||||
1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
|
||||
# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
|
||||
# Label: "GlobalSign Root CA"
|
||||
|
@ -117,38 +90,6 @@ F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
|
|||
TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
|
||||
# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
|
||||
# Label: "Verisign Class 4 Public Primary Certification Authority - G3"
|
||||
# Serial: 314531972711909413743075096039378935511
|
||||
# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df
|
||||
# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d
|
||||
# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
|
||||
CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
|
||||
cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
|
||||
LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
|
||||
aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
|
||||
dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
|
||||
VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
|
||||
aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
|
||||
bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
|
||||
IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
|
||||
LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1
|
||||
GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ
|
||||
+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd
|
||||
U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm
|
||||
NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY
|
||||
ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/
|
||||
ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1
|
||||
CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq
|
||||
g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm
|
||||
fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c
|
||||
2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/
|
||||
bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
|
||||
# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
|
||||
# Label: "Entrust.net Premium 2048 Secure Server CA"
|
||||
|
@ -910,40 +851,6 @@ u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3ynGQI0DvDKcWy
|
|||
iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com
|
||||
# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com
|
||||
# Label: "UTN DATACorp SGC Root CA"
|
||||
# Serial: 91374294542884689855167577680241077609
|
||||
# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06
|
||||
# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4
|
||||
# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB
|
||||
kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
|
||||
Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
|
||||
dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
|
||||
IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG
|
||||
EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD
|
||||
VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu
|
||||
dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN
|
||||
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6
|
||||
E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ
|
||||
D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK
|
||||
4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq
|
||||
lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW
|
||||
bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB
|
||||
o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT
|
||||
MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js
|
||||
LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr
|
||||
BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB
|
||||
AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft
|
||||
Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj
|
||||
j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH
|
||||
KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv
|
||||
2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3
|
||||
mfnGV/TJVTl4uix5yaaIK/QI
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com
|
||||
# Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com
|
||||
# Label: "UTN USERFirst Hardware Root CA"
|
||||
|
@ -1507,39 +1414,6 @@ rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2
|
|||
9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005
|
||||
# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005
|
||||
# Label: "TURKTRUST Certificate Services Provider Root 2"
|
||||
# Serial: 1
|
||||
# MD5 Fingerprint: 37:a5:6e:d4:b1:25:84:97:b7:fd:56:15:7a:f9:a2:00
|
||||
# SHA1 Fingerprint: b4:35:d4:e1:11:9d:1c:66:90:a7:49:eb:b3:94:bd:63:7b:a7:82:b7
|
||||
# SHA256 Fingerprint: c4:70:cf:54:7e:23:02:b9:77:fb:29:dd:71:a8:9a:7b:6c:1f:60:77:7b:03:29:f5:60:17:f3:28:bf:4f:6b:e6
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOc
|
||||
UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx
|
||||
c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xS
|
||||
S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg
|
||||
SGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcNMDUxMTA3MTAwNzU3
|
||||
WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVrdHJv
|
||||
bmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJU
|
||||
UjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSw
|
||||
bGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWe
|
||||
LiAoYykgS2FzxLFtIDIwMDUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
|
||||
AQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqeLCDe2JAOCtFp0if7qnef
|
||||
J1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKIx+XlZEdh
|
||||
R3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJ
|
||||
Qv2gQrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGX
|
||||
JHpsmxcPbe9TmJEr5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1p
|
||||
zpwACPI2/z7woQ8arBT9pmAPAgMBAAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58S
|
||||
Fq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8GA1UdEwEB/wQFMAMBAf8wDQYJ
|
||||
KoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/nttRbj2hWyfIvwq
|
||||
ECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4
|
||||
Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFz
|
||||
gw2lGh1uEpJ+hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotH
|
||||
uFEJjOp9zYhys2AzsfAKRO8P9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LS
|
||||
y3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5UrbnBEI=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
|
||||
# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
|
||||
# Label: "SwissSign Gold CA - G2"
|
||||
|
@ -3362,37 +3236,6 @@ ducTZnV+ZfsBn5OHiJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmC
|
|||
IoaZM3Fa6hlXPZHNqcCjbgcTpsnt+GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03
|
||||
# Subject: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03
|
||||
# Label: "A-Trust-nQual-03"
|
||||
# Serial: 93214
|
||||
# MD5 Fingerprint: 49:63:ae:27:f4:d5:95:3d:d8:db:24:86:b8:9c:07:53
|
||||
# SHA1 Fingerprint: d3:c0:63:f2:19:ed:07:3e:34:ad:5d:75:0b:32:76:29:ff:d5:9a:f2
|
||||
# SHA256 Fingerprint: 79:3c:bf:45:59:b9:fd:e3:8a:b2:2d:f1:68:69:f6:98:81:ae:14:c4:b0:13:9a:c7:88:a7:8a:1a:fc:ca:02:fb
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJB
|
||||
VDFIMEYGA1UECgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBp
|
||||
bSBlbGVrdHIuIERhdGVudmVya2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5R
|
||||
dWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5RdWFsLTAzMB4XDTA1MDgxNzIyMDAw
|
||||
MFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgwRgYDVQQKDD9BLVRy
|
||||
dXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0ZW52
|
||||
ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMM
|
||||
EEEtVHJ1c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
|
||||
AQCtPWFuA/OQO8BBC4SAzewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUj
|
||||
lUC5B3ilJfYKvUWG6Nm9wASOhURh73+nyfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZ
|
||||
znF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPESU7l0+m0iKsMrmKS1GWH
|
||||
2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4iHQF63n1
|
||||
k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs
|
||||
2e3Vcuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYD
|
||||
VR0OBAoECERqlWdVeRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
|
||||
AQEAVdRU0VlIXLOThaq/Yy/kgM40ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fG
|
||||
KOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmrsQd7TZjTXLDR8KdCoLXEjq/+
|
||||
8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZdJXDRZslo+S4R
|
||||
FGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS
|
||||
mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmE
|
||||
DNuxUCAKGkq6ahq97BvIxYSazQ==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
|
||||
# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
|
||||
# Label: "TWCA Root Certification Authority"
|
||||
|
@ -5227,3 +5070,83 @@ Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ
|
|||
8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW
|
||||
gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
|
||||
# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
|
||||
# Label: "OISTE WISeKey Global Root GB CA"
|
||||
# Serial: 157768595616588414422159278966750757568
|
||||
# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
|
||||
# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
|
||||
# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
|
||||
MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
|
||||
Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
|
||||
YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
|
||||
CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
|
||||
b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
|
||||
bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
|
||||
HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
|
||||
WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
|
||||
1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
|
||||
u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
|
||||
99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
|
||||
M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
|
||||
AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
|
||||
BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
|
||||
cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
|
||||
gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
|
||||
ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
|
||||
aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
|
||||
Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=Certification Authority of WoSign G2 O=WoSign CA Limited
|
||||
# Subject: CN=Certification Authority of WoSign G2 O=WoSign CA Limited
|
||||
# Label: "Certification Authority of WoSign G2"
|
||||
# Serial: 142423943073812161787490648904721057092
|
||||
# MD5 Fingerprint: c8:1c:7d:19:aa:cb:71:93:f2:50:f8:52:a8:1e:ba:60
|
||||
# SHA1 Fingerprint: fb:ed:dc:90:65:b7:27:20:37:bc:55:0c:9c:56:de:bb:f2:78:94:e1
|
||||
# SHA256 Fingerprint: d4:87:a5:6f:83:b0:74:82:e8:5e:96:33:94:c1:ec:c2:c9:e5:1d:09:03:ee:94:6b:02:c3:01:58:1e:d9:9e:16
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBY
|
||||
MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNV
|
||||
BAMTJENlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDEx
|
||||
MDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgxCzAJBgNVBAYTAkNOMRowGAYDVQQK
|
||||
ExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlmaWNhdGlvbiBBdXRo
|
||||
b3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
||||
AQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPX
|
||||
JYY1kBaiXW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgO
|
||||
gHzKtB0TiGsOqCR3A9DuW/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg
|
||||
5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg95k4ot+vElbGs/V6r+kHLXZ1L3PR8du9n
|
||||
fwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BKv0mUYQs4kI9dJGwlezt5
|
||||
2eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
|
||||
VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJ
|
||||
KoZIhvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8
|
||||
fHulwqZm46qwtyeYP0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G
|
||||
3CE4Q3RM+zD4F3LBMvzIkRfEzFg3TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yy
|
||||
SrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu+sif/a+RZQp4OBXllxcU3fng
|
||||
LDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+7Q9LGOHSJDy7
|
||||
XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=CA WoSign ECC Root O=WoSign CA Limited
|
||||
# Subject: CN=CA WoSign ECC Root O=WoSign CA Limited
|
||||
# Label: "CA WoSign ECC Root"
|
||||
# Serial: 138625735294506723296996289575837012112
|
||||
# MD5 Fingerprint: 80:c6:53:ee:61:82:28:72:f0:ff:21:b9:17:ca:b2:20
|
||||
# SHA1 Fingerprint: d2:7a:d2:be:ed:94:c0:a1:3c:c7:25:21:ea:5d:71:be:81:19:f3:2b
|
||||
# SHA256 Fingerprint: 8b:45:da:1c:06:f7:91:eb:0c:ab:f2:6b:e5:88:f5:fb:23:16:5c:2e:61:4b:f8:85:56:2d:0d:ce:50:b2:9b:02
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQsw
|
||||
CQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMT
|
||||
EkNBIFdvU2lnbiBFQ0MgUm9vdDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4
|
||||
NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEb
|
||||
MBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACID
|
||||
YgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiUt5v8
|
||||
KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES
|
||||
1ns2o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
|
||||
FgQUqv3VWqP2h4syhf3RMluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB
|
||||
1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0Daupn75OcsqF1NnstTJFGG+rrQIwfcf3
|
||||
aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYua/GRspBl9JrmkO5K
|
||||
-----END CERTIFICATE-----
|
||||
|
|
|
@ -1,31 +1,4 @@
|
|||
|
||||
# Issuer: O=Equifax OU=Equifax Secure Certificate Authority
|
||||
# Subject: O=Equifax OU=Equifax Secure Certificate Authority
|
||||
# Label: "Equifax Secure CA"
|
||||
# Serial: 903804111
|
||||
# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4
|
||||
# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a
|
||||
# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV
|
||||
UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy
|
||||
dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1
|
||||
MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx
|
||||
dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B
|
||||
AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f
|
||||
BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A
|
||||
cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC
|
||||
AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ
|
||||
MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm
|
||||
aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw
|
||||
ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj
|
||||
IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF
|
||||
MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA
|
||||
A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y
|
||||
7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh
|
||||
1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
|
||||
# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
|
||||
# Label: "GlobalSign Root CA"
|
||||
|
@ -117,38 +90,6 @@ F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
|
|||
TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
|
||||
# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
|
||||
# Label: "Verisign Class 4 Public Primary Certification Authority - G3"
|
||||
# Serial: 314531972711909413743075096039378935511
|
||||
# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df
|
||||
# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d
|
||||
# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
|
||||
CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
|
||||
cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
|
||||
LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
|
||||
aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
|
||||
dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
|
||||
VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
|
||||
aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
|
||||
bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
|
||||
IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
|
||||
LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1
|
||||
GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ
|
||||
+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd
|
||||
U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm
|
||||
NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY
|
||||
ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/
|
||||
ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1
|
||||
CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq
|
||||
g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm
|
||||
fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c
|
||||
2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/
|
||||
bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
|
||||
# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
|
||||
# Label: "Entrust.net Premium 2048 Secure Server CA"
|
||||
|
@ -910,40 +851,6 @@ u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3ynGQI0DvDKcWy
|
|||
iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com
|
||||
# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com
|
||||
# Label: "UTN DATACorp SGC Root CA"
|
||||
# Serial: 91374294542884689855167577680241077609
|
||||
# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06
|
||||
# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4
|
||||
# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB
|
||||
kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
|
||||
Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
|
||||
dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
|
||||
IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG
|
||||
EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD
|
||||
VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu
|
||||
dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN
|
||||
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6
|
||||
E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ
|
||||
D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK
|
||||
4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq
|
||||
lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW
|
||||
bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB
|
||||
o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT
|
||||
MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js
|
||||
LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr
|
||||
BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB
|
||||
AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft
|
||||
Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj
|
||||
j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH
|
||||
KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv
|
||||
2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3
|
||||
mfnGV/TJVTl4uix5yaaIK/QI
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com
|
||||
# Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com
|
||||
# Label: "UTN USERFirst Hardware Root CA"
|
||||
|
@ -1507,39 +1414,6 @@ rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2
|
|||
9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005
|
||||
# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005
|
||||
# Label: "TURKTRUST Certificate Services Provider Root 2"
|
||||
# Serial: 1
|
||||
# MD5 Fingerprint: 37:a5:6e:d4:b1:25:84:97:b7:fd:56:15:7a:f9:a2:00
|
||||
# SHA1 Fingerprint: b4:35:d4:e1:11:9d:1c:66:90:a7:49:eb:b3:94:bd:63:7b:a7:82:b7
|
||||
# SHA256 Fingerprint: c4:70:cf:54:7e:23:02:b9:77:fb:29:dd:71:a8:9a:7b:6c:1f:60:77:7b:03:29:f5:60:17:f3:28:bf:4f:6b:e6
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOc
|
||||
UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx
|
||||
c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xS
|
||||
S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg
|
||||
SGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcNMDUxMTA3MTAwNzU3
|
||||
WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVrdHJv
|
||||
bmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJU
|
||||
UjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSw
|
||||
bGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWe
|
||||
LiAoYykgS2FzxLFtIDIwMDUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
|
||||
AQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqeLCDe2JAOCtFp0if7qnef
|
||||
J1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKIx+XlZEdh
|
||||
R3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJ
|
||||
Qv2gQrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGX
|
||||
JHpsmxcPbe9TmJEr5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1p
|
||||
zpwACPI2/z7woQ8arBT9pmAPAgMBAAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58S
|
||||
Fq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8GA1UdEwEB/wQFMAMBAf8wDQYJ
|
||||
KoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/nttRbj2hWyfIvwq
|
||||
ECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4
|
||||
Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFz
|
||||
gw2lGh1uEpJ+hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotH
|
||||
uFEJjOp9zYhys2AzsfAKRO8P9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LS
|
||||
y3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5UrbnBEI=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
|
||||
# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
|
||||
# Label: "SwissSign Gold CA - G2"
|
||||
|
@ -3362,37 +3236,6 @@ ducTZnV+ZfsBn5OHiJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmC
|
|||
IoaZM3Fa6hlXPZHNqcCjbgcTpsnt+GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03
|
||||
# Subject: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03
|
||||
# Label: "A-Trust-nQual-03"
|
||||
# Serial: 93214
|
||||
# MD5 Fingerprint: 49:63:ae:27:f4:d5:95:3d:d8:db:24:86:b8:9c:07:53
|
||||
# SHA1 Fingerprint: d3:c0:63:f2:19:ed:07:3e:34:ad:5d:75:0b:32:76:29:ff:d5:9a:f2
|
||||
# SHA256 Fingerprint: 79:3c:bf:45:59:b9:fd:e3:8a:b2:2d:f1:68:69:f6:98:81:ae:14:c4:b0:13:9a:c7:88:a7:8a:1a:fc:ca:02:fb
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJB
|
||||
VDFIMEYGA1UECgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBp
|
||||
bSBlbGVrdHIuIERhdGVudmVya2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5R
|
||||
dWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5RdWFsLTAzMB4XDTA1MDgxNzIyMDAw
|
||||
MFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgwRgYDVQQKDD9BLVRy
|
||||
dXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0ZW52
|
||||
ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMM
|
||||
EEEtVHJ1c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
|
||||
AQCtPWFuA/OQO8BBC4SAzewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUj
|
||||
lUC5B3ilJfYKvUWG6Nm9wASOhURh73+nyfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZ
|
||||
znF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPESU7l0+m0iKsMrmKS1GWH
|
||||
2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4iHQF63n1
|
||||
k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs
|
||||
2e3Vcuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYD
|
||||
VR0OBAoECERqlWdVeRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
|
||||
AQEAVdRU0VlIXLOThaq/Yy/kgM40ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fG
|
||||
KOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmrsQd7TZjTXLDR8KdCoLXEjq/+
|
||||
8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZdJXDRZslo+S4R
|
||||
FGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS
|
||||
mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmE
|
||||
DNuxUCAKGkq6ahq97BvIxYSazQ==
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
|
||||
# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
|
||||
# Label: "TWCA Root Certification Authority"
|
||||
|
@ -5227,6 +5070,86 @@ Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ
|
|||
8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW
|
||||
gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
|
||||
# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
|
||||
# Label: "OISTE WISeKey Global Root GB CA"
|
||||
# Serial: 157768595616588414422159278966750757568
|
||||
# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
|
||||
# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
|
||||
# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
|
||||
MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
|
||||
Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
|
||||
YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
|
||||
CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
|
||||
b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
|
||||
bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
|
||||
HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
|
||||
WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
|
||||
1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
|
||||
u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
|
||||
99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
|
||||
M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
|
||||
AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
|
||||
BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
|
||||
cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
|
||||
gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
|
||||
ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
|
||||
aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
|
||||
Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=Certification Authority of WoSign G2 O=WoSign CA Limited
|
||||
# Subject: CN=Certification Authority of WoSign G2 O=WoSign CA Limited
|
||||
# Label: "Certification Authority of WoSign G2"
|
||||
# Serial: 142423943073812161787490648904721057092
|
||||
# MD5 Fingerprint: c8:1c:7d:19:aa:cb:71:93:f2:50:f8:52:a8:1e:ba:60
|
||||
# SHA1 Fingerprint: fb:ed:dc:90:65:b7:27:20:37:bc:55:0c:9c:56:de:bb:f2:78:94:e1
|
||||
# SHA256 Fingerprint: d4:87:a5:6f:83:b0:74:82:e8:5e:96:33:94:c1:ec:c2:c9:e5:1d:09:03:ee:94:6b:02:c3:01:58:1e:d9:9e:16
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBY
|
||||
MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNV
|
||||
BAMTJENlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDEx
|
||||
MDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgxCzAJBgNVBAYTAkNOMRowGAYDVQQK
|
||||
ExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlmaWNhdGlvbiBBdXRo
|
||||
b3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
||||
AQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPX
|
||||
JYY1kBaiXW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgO
|
||||
gHzKtB0TiGsOqCR3A9DuW/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg
|
||||
5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg95k4ot+vElbGs/V6r+kHLXZ1L3PR8du9n
|
||||
fwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BKv0mUYQs4kI9dJGwlezt5
|
||||
2eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
|
||||
VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJ
|
||||
KoZIhvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8
|
||||
fHulwqZm46qwtyeYP0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G
|
||||
3CE4Q3RM+zD4F3LBMvzIkRfEzFg3TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yy
|
||||
SrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu+sif/a+RZQp4OBXllxcU3fng
|
||||
LDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+7Q9LGOHSJDy7
|
||||
XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
# Issuer: CN=CA WoSign ECC Root O=WoSign CA Limited
|
||||
# Subject: CN=CA WoSign ECC Root O=WoSign CA Limited
|
||||
# Label: "CA WoSign ECC Root"
|
||||
# Serial: 138625735294506723296996289575837012112
|
||||
# MD5 Fingerprint: 80:c6:53:ee:61:82:28:72:f0:ff:21:b9:17:ca:b2:20
|
||||
# SHA1 Fingerprint: d2:7a:d2:be:ed:94:c0:a1:3c:c7:25:21:ea:5d:71:be:81:19:f3:2b
|
||||
# SHA256 Fingerprint: 8b:45:da:1c:06:f7:91:eb:0c:ab:f2:6b:e5:88:f5:fb:23:16:5c:2e:61:4b:f8:85:56:2d:0d:ce:50:b2:9b:02
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQsw
|
||||
CQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMT
|
||||
EkNBIFdvU2lnbiBFQ0MgUm9vdDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4
|
||||
NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEb
|
||||
MBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACID
|
||||
YgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiUt5v8
|
||||
KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES
|
||||
1ns2o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
|
||||
FgQUqv3VWqP2h4syhf3RMluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB
|
||||
1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0Daupn75OcsqF1NnstTJFGG+rrQIwfcf3
|
||||
aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYua/GRspBl9JrmkO5K
|
||||
-----END CERTIFICATE-----
|
||||
# Issuer: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
|
||||
# Subject: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
|
||||
# Label: "Entrust.net Secure Server CA"
|
||||
|
|
|
@ -71,8 +71,8 @@
|
|||
../html5lib/trie/__pycache__/datrie.cpython-34.pyc
|
||||
../html5lib/trie/__pycache__/py.cpython-34.pyc
|
||||
./
|
||||
dependency_links.txt
|
||||
requires.txt
|
||||
top_level.txt
|
||||
PKG-INFO
|
||||
requires.txt
|
||||
dependency_links.txt
|
||||
SOURCES.txt
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
Metadata-Version: 1.1
|
||||
Name: ox
|
||||
Version: 2.3.x
|
||||
Version: 2.3.b-769-
|
||||
Summary: python-ox - the web in a dict
|
||||
Home-page: http://code.0x2620.org/python-ox
|
||||
Home-page: https://wiki.0x2620.org/wiki/python-ox
|
||||
Author: 0x2620
|
||||
Author-email: 0x2620@0x2620.org
|
||||
License: GPLv3
|
||||
Download-URL: http://code.0x2620.org/python-ox/download
|
||||
Download-URL: https://code.0x2620.org/python-ox/download
|
||||
Description: UNKNOWN
|
||||
Platform: UNKNOWN
|
||||
Classifier: Operating System :: OS Independent
|
|
@ -1,5 +1,6 @@
|
|||
README
|
||||
ox/__init__.py
|
||||
ox/__version.py
|
||||
ox/api.py
|
||||
ox/cache.py
|
||||
ox/file.py
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
../ox/srt.py
|
||||
../ox/text.py
|
||||
../ox/utils.py
|
||||
../ox/__version.py
|
||||
../ox/django/__init__.py
|
||||
../ox/django/decorators.py
|
||||
../ox/django/fields.py
|
||||
|
@ -98,6 +99,7 @@
|
|||
../ox/__pycache__/srt.cpython-34.pyc
|
||||
../ox/__pycache__/text.cpython-34.pyc
|
||||
../ox/__pycache__/utils.cpython-34.pyc
|
||||
../ox/__pycache__/__version.cpython-34.pyc
|
||||
../ox/django/__pycache__/__init__.cpython-34.pyc
|
||||
../ox/django/__pycache__/decorators.cpython-34.pyc
|
||||
../ox/django/__pycache__/fields.cpython-34.pyc
|
||||
|
@ -157,8 +159,8 @@
|
|||
../ox/web/__pycache__/wikipedia.cpython-34.pyc
|
||||
../ox/web/__pycache__/youtube.cpython-34.pyc
|
||||
./
|
||||
top_level.txt
|
||||
PKG-INFO
|
||||
requires.txt
|
||||
dependency_links.txt
|
||||
top_level.txt
|
||||
SOURCES.txt
|
1
Shared/lib/python3.4/site-packages/ox/__version.py
Normal file
1
Shared/lib/python3.4/site-packages/ox/__version.py
Normal file
|
@ -0,0 +1 @@
|
|||
VERSION="2.3.b'769'"
|
|
@ -3,6 +3,8 @@
|
|||
# vi:si:et:sw=4:sts=4:ts=4
|
||||
from __future__ import with_statement, print_function
|
||||
|
||||
import re
|
||||
|
||||
from .js import minify
|
||||
from .utils import json
|
||||
|
||||
|
@ -14,8 +16,18 @@ def loads(source):
|
|||
try:
|
||||
minified = minify(source)
|
||||
return json.loads(minified)
|
||||
except json.JSONDecodeError as e:
|
||||
s = minified.split('\n')
|
||||
context = s[e.lineno-1][max(0, e.colno-1):e.colno+30]
|
||||
msg = e.msg + ' at ' + context
|
||||
raise json.JSONDecodeError(msg, minified, e.pos)
|
||||
except ValueError as e:
|
||||
msg = e.message if hasattr(e, 'message') else str(e)
|
||||
lineno = None
|
||||
colno = None
|
||||
try:
|
||||
m = re.search(r'line (\d+) column (\d+)', msg)
|
||||
if m:
|
||||
(lineno, colno) = map(int, m.groups())
|
||||
except:
|
||||
pass
|
||||
if lineno and colno:
|
||||
s = minified.split('\n')
|
||||
context = s[lineno-1][max(0, colno-30):colno+30]
|
||||
msg += ' at:\n\n %s\n %s\033[1m^\033[0m' %(context, ' ' * (colno - max(0, colno-30) - 2))
|
||||
raise ValueError(msg)
|
||||
|
|
|
@ -83,81 +83,81 @@ requests-2.3.0.dist-info/pydist.json,sha256=7nySdPrVYYyJK2C3cPlHJr1oSZ_-lFiBlp9D
|
|||
requests-2.3.0.dist-info/RECORD,,
|
||||
requests-2.3.0.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9
|
||||
requests-2.3.0.dist-info/WHEEL,sha256=SXYYsi-y-rEGIva8sB8iKF6bAFD6YDhmqHX5hI3fc0o,110
|
||||
requests/packages/chardet/__pycache__/sbcsgroupprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/compat.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/big5prober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/connection.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/ssl_.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/chardistribution.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/connection.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/timeout.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/url.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/utf8prober.cpython-34.pyc,,
|
||||
requests/__pycache__/sessions.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/request.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/latin1prober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/eucjpprober.cpython-34.pyc,,
|
||||
requests/__pycache__/certs.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/response.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/universaldetector.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/gb2312freq.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/cp949prober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/ssl_.cpython-34.pyc,,
|
||||
requests/__pycache__/api.cpython-34.pyc,,
|
||||
requests/__pycache__/adapters.cpython-34.pyc,,
|
||||
requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/hebrewprober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/connectionpool.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/_collections.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/escprober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/fields.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/euctwfreq.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/gb2312prober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/response.cpython-34.pyc,,
|
||||
requests/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/euckrfreq.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/connection.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langhebrewmodel.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/poolmanager.cpython-34.pyc,,
|
||||
requests/packages/urllib3/contrib/__pycache__/pyopenssl.cpython-34.pyc,,
|
||||
requests/__pycache__/status_codes.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/__pycache__/compat.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/mbcharsetprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/sbcsgroupprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/jpcntx.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langthaimodel.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/constants.cpython-34.pyc,,
|
||||
requests/__pycache__/exceptions.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/escsm.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langcyrillicmodel.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/filepost.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/jisfreq.cpython-34.pyc,,
|
||||
requests/packages/urllib3/packages/__pycache__/six.cpython-34.pyc,,
|
||||
requests/__pycache__/hooks.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/mbcssm.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/poolmanager.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/url.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/constants.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langhebrewmodel.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/latin1prober.cpython-34.pyc,,
|
||||
requests/__pycache__/sessions.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/euctwfreq.cpython-34.pyc,,
|
||||
requests/__pycache__/certs.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/euckrprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/universaldetector.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langbulgarianmodel.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langhungarianmodel.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/codingstatemachine.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/jpcntx.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/chardetect.cpython-34.pyc,,
|
||||
requests/__pycache__/exceptions.cpython-34.pyc,,
|
||||
requests/__pycache__/compat.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/fields.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/sbcharsetprober.cpython-34.pyc,,
|
||||
requests/__pycache__/api.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/hebrewprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/big5freq.cpython-34.pyc,,
|
||||
requests/packages/urllib3/packages/__pycache__/ordered_dict.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/euckrfreq.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langthaimodel.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/euctwprober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/exceptions.cpython-34.pyc,,
|
||||
requests/packages/urllib3/packages/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/eucjpprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/gb2312freq.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/response.cpython-34.pyc,,
|
||||
requests/packages/urllib3/contrib/__pycache__/ntlmpool.cpython-34.pyc,,
|
||||
requests/__pycache__/status_codes.cpython-34.pyc,,
|
||||
requests/__pycache__/models.cpython-34.pyc,,
|
||||
requests/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/__pycache__/adapters.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/escprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/sjisprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/mbcsgroupprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/charsetgroupprober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/contrib/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/__pycache__/structures.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/response.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/exceptions.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/sjisprober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/request.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/filepost.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/charsetprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langcyrillicmodel.cpython-34.pyc,,
|
||||
requests/__pycache__/cookies.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/charsetgroupprober.cpython-34.pyc,,
|
||||
requests/packages/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/connectionpool.cpython-34.pyc,,
|
||||
requests/packages/urllib3/contrib/__pycache__/pyopenssl.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langgreekmodel.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/mbcsgroupprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/mbcssm.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/chardetect.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/sbcharsetprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langbulgarianmodel.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/connection.cpython-34.pyc,,
|
||||
requests/__pycache__/auth.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/cp949prober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/gb2312prober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/mbcharsetprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langhungarianmodel.cpython-34.pyc,,
|
||||
requests/__pycache__/cookies.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/compat.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/langgreekmodel.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/charsetprober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/__pycache__/request.cpython-34.pyc,,
|
||||
requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-34.pyc,,
|
||||
requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/packages/urllib3/util/__pycache__/timeout.cpython-34.pyc,,
|
||||
requests/packages/urllib3/packages/__pycache__/six.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/euckrprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/chardistribution.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/big5freq.cpython-34.pyc,,
|
||||
requests/packages/__pycache__/__init__.cpython-34.pyc,,
|
||||
requests/__pycache__/models.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/big5prober.cpython-34.pyc,,
|
||||
requests/packages/urllib3/contrib/__pycache__/ntlmpool.cpython-34.pyc,,
|
||||
requests/packages/urllib3/packages/__pycache__/ordered_dict.cpython-34.pyc,,
|
||||
requests/__pycache__/utils.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/euctwprober.cpython-34.pyc,,
|
||||
requests/packages/chardet/__pycache__/codingstatemachine.cpython-34.pyc,,
|
||||
requests/__pycache__/hooks.cpython-34.pyc,,
|
||||
requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-34.pyc,,
|
||||
|
|
|
@ -71,54 +71,54 @@ setuptools-18.5.dist-info/top_level.txt,sha256=7780fzudMJkykiTcIrAQ8m8Lll6kot3EE
|
|||
setuptools-18.5.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
||||
/srv/openmedialibrary/platform/Shared/home/.local/bin/easy_install,sha256=7h7lc5DCAhnE08UwEm49wGymGDGdOcrkRdncTKYmXIQ,233
|
||||
/srv/openmedialibrary/platform/Shared/home/.local/bin/easy_install-3.4,sha256=7h7lc5DCAhnE08UwEm49wGymGDGdOcrkRdncTKYmXIQ,233
|
||||
setuptools/command/__pycache__/develop.cpython-34.pyc,,
|
||||
setuptools/__pycache__/__init__.cpython-34.pyc,,
|
||||
pkg_resources/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/egg_info.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_scripts.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/build_py.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/register.cpython-34.pyc,,
|
||||
setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,,
|
||||
_markerlib/__pycache__/markers.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/test.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_egg_info.cpython-34.pyc,,
|
||||
setuptools/__pycache__/msvc9_support.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,,
|
||||
setuptools/__pycache__/dist.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/easy_install.cpython-34.pyc,,
|
||||
setuptools/__pycache__/ssl_support.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py27compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py26compat.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/upload_docs.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,,
|
||||
setuptools/__pycache__/archive_util.cpython-34.pyc,,
|
||||
_markerlib/__pycache__/__init__.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,,
|
||||
setuptools/__pycache__/package_index.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/rotate.cpython-34.pyc,,
|
||||
setuptools/__pycache__/version.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/build_ext.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py31compat.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,,
|
||||
setuptools/__pycache__/site-patch.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/unicode_utils.cpython-34.pyc,,
|
||||
setuptools/__pycache__/depends.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/sdist.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_lib.cpython-34.pyc,,
|
||||
setuptools/__pycache__/utils.cpython-34.pyc,,
|
||||
setuptools/__pycache__/sandbox.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/saveopts.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install.cpython-34.pyc,,
|
||||
setuptools/__pycache__/compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/windows_support.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/setopt.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,,
|
||||
__pycache__/easy_install.cpython-34.pyc,,
|
||||
setuptools/__pycache__/extension.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/alias.cpython-34.pyc,,
|
||||
setuptools/__pycache__/sandbox.cpython-34.pyc,,
|
||||
setuptools/__pycache__/compat.cpython-34.pyc,,
|
||||
pkg_resources/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/egg_info.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/test.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/register.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,,
|
||||
setuptools/__pycache__/utils.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/easy_install.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py31compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/depends.cpython-34.pyc,,
|
||||
setuptools/__pycache__/extension.cpython-34.pyc,,
|
||||
setuptools/__pycache__/__init__.cpython-34.pyc,,
|
||||
__pycache__/easy_install.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py27compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/windows_support.cpython-34.pyc,,
|
||||
setuptools/__pycache__/dist.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/saveopts.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,,
|
||||
_markerlib/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_scripts.cpython-34.pyc,,
|
||||
setuptools/__pycache__/msvc9_support.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/__pycache__/ssl_support.cpython-34.pyc,,
|
||||
setuptools/__pycache__/version.cpython-34.pyc,,
|
||||
setuptools/__pycache__/site-patch.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/rotate.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/build_py.cpython-34.pyc,,
|
||||
setuptools/__pycache__/archive_util.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_lib.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/__init__.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/build_ext.cpython-34.pyc,,
|
||||
setuptools/__pycache__/py26compat.cpython-34.pyc,,
|
||||
setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,,
|
||||
setuptools/__pycache__/unicode_utils.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/develop.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/upload_docs.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/sdist.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,,
|
||||
pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,,
|
||||
setuptools/command/__pycache__/install_egg_info.cpython-34.pyc,,
|
||||
setuptools/__pycache__/package_index.cpython-34.pyc,,
|
||||
|
|
712
Shared/lib/python3.4/site-packages/socks.py
Normal file
712
Shared/lib/python3.4/site-packages/socks.py
Normal file
|
@ -0,0 +1,712 @@
|
|||
"""
|
||||
SocksiPy - Python SOCKS module.
|
||||
Version 1.5.6
|
||||
|
||||
Copyright 2006 Dan-Haim. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
3. Neither the name of Dan Haim nor the names of his contributors may be used
|
||||
to endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
|
||||
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
|
||||
|
||||
|
||||
This module provides a standard socket-like interface for Python
|
||||
for tunneling connections through SOCKS proxies.
|
||||
|
||||
===============================================================================
|
||||
|
||||
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
|
||||
for use in PyLoris (http://pyloris.sourceforge.net/)
|
||||
|
||||
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
|
||||
mainly to merge bug fixes found in Sourceforge
|
||||
|
||||
Modifications made by Anorov (https://github.com/Anorov)
|
||||
-Forked and renamed to PySocks
|
||||
-Fixed issue with HTTP proxy failure checking (same bug that was in the old ___recvall() method)
|
||||
-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler,
|
||||
courtesy of e000 (https://github.com/e000): https://gist.github.com/869791#file_socksipyhandler.py
|
||||
-Re-styled code to make it readable
|
||||
-Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc.
|
||||
-Improved exception handling and output
|
||||
-Removed irritating use of sequence indexes, replaced with tuple unpacked variables
|
||||
-Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03"
|
||||
-Other general fixes
|
||||
-Added clarification that the HTTP proxy connection method only supports CONNECT-style tunneling HTTP proxies
|
||||
-Various small bug fixes
|
||||
"""
|
||||
|
||||
__version__ = "1.5.6"
|
||||
|
||||
import socket
|
||||
import struct
|
||||
from errno import EOPNOTSUPP, EINVAL, EAGAIN
|
||||
from io import BytesIO
|
||||
from os import SEEK_CUR
|
||||
from collections import Callable
|
||||
|
||||
PROXY_TYPE_SOCKS4 = SOCKS4 = 1
|
||||
PROXY_TYPE_SOCKS5 = SOCKS5 = 2
|
||||
PROXY_TYPE_HTTP = HTTP = 3
|
||||
|
||||
PROXY_TYPES = {"SOCKS4": SOCKS4, "SOCKS5": SOCKS5, "HTTP": HTTP}
|
||||
PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys()))
|
||||
|
||||
_orgsocket = _orig_socket = socket.socket
|
||||
|
||||
class ProxyError(IOError):
|
||||
"""
|
||||
socket_err contains original socket.error exception.
|
||||
"""
|
||||
def __init__(self, msg, socket_err=None):
|
||||
self.msg = msg
|
||||
self.socket_err = socket_err
|
||||
|
||||
if socket_err:
|
||||
self.msg += ": {0}".format(socket_err)
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
class GeneralProxyError(ProxyError): pass
|
||||
class ProxyConnectionError(ProxyError): pass
|
||||
class SOCKS5AuthError(ProxyError): pass
|
||||
class SOCKS5Error(ProxyError): pass
|
||||
class SOCKS4Error(ProxyError): pass
|
||||
class HTTPError(ProxyError): pass
|
||||
|
||||
SOCKS4_ERRORS = { 0x5B: "Request rejected or failed",
|
||||
0x5C: "Request rejected because SOCKS server cannot connect to identd on the client",
|
||||
0x5D: "Request rejected because the client program and identd report different user-ids"
|
||||
}
|
||||
|
||||
SOCKS5_ERRORS = { 0x01: "General SOCKS server failure",
|
||||
0x02: "Connection not allowed by ruleset",
|
||||
0x03: "Network unreachable",
|
||||
0x04: "Host unreachable",
|
||||
0x05: "Connection refused",
|
||||
0x06: "TTL expired",
|
||||
0x07: "Command not supported, or protocol error",
|
||||
0x08: "Address type not supported"
|
||||
}
|
||||
|
||||
DEFAULT_PORTS = { SOCKS4: 1080,
|
||||
SOCKS5: 1080,
|
||||
HTTP: 8080
|
||||
}
|
||||
|
||||
def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
|
||||
"""
|
||||
set_default_proxy(proxy_type, addr[, port[, rdns[, username, password]]])
|
||||
|
||||
Sets a default proxy which all further socksocket objects will use,
|
||||
unless explicitly changed. All parameters are as for socket.set_proxy().
|
||||
"""
|
||||
socksocket.default_proxy = (proxy_type, addr, port, rdns,
|
||||
username.encode() if username else None,
|
||||
password.encode() if password else None)
|
||||
|
||||
setdefaultproxy = set_default_proxy
|
||||
|
||||
def get_default_proxy():
|
||||
"""
|
||||
Returns the default proxy, set by set_default_proxy.
|
||||
"""
|
||||
return socksocket.default_proxy
|
||||
|
||||
getdefaultproxy = get_default_proxy
|
||||
|
||||
def wrap_module(module):
|
||||
"""
|
||||
Attempts to replace a module's socket library with a SOCKS socket. Must set
|
||||
a default proxy using set_default_proxy(...) first.
|
||||
This will only work on modules that import socket directly into the namespace;
|
||||
most of the Python Standard Library falls into this category.
|
||||
"""
|
||||
if socksocket.default_proxy:
|
||||
module.socket.socket = socksocket
|
||||
else:
|
||||
raise GeneralProxyError("No default proxy specified")
|
||||
|
||||
wrapmodule = wrap_module
|
||||
|
||||
def create_connection(dest_pair, proxy_type=None, proxy_addr=None,
|
||||
proxy_port=None, proxy_rdns=True,
|
||||
proxy_username=None, proxy_password=None,
|
||||
timeout=None, source_address=None,
|
||||
socket_options=None):
|
||||
"""create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
|
||||
|
||||
Like socket.create_connection(), but connects to proxy
|
||||
before returning the socket object.
|
||||
|
||||
dest_pair - 2-tuple of (IP/hostname, port).
|
||||
**proxy_args - Same args passed to socksocket.set_proxy() if present.
|
||||
timeout - Optional socket timeout value, in seconds.
|
||||
source_address - tuple (host, port) for the socket to bind to as its source
|
||||
address before connecting (only for compatibility)
|
||||
"""
|
||||
sock = socksocket()
|
||||
if socket_options is not None:
|
||||
for opt in socket_options:
|
||||
sock.setsockopt(*opt)
|
||||
if isinstance(timeout, (int, float)):
|
||||
sock.settimeout(timeout)
|
||||
if proxy_type is not None:
|
||||
sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
|
||||
proxy_username, proxy_password)
|
||||
if source_address is not None:
|
||||
sock.bind(source_address)
|
||||
|
||||
sock.connect(dest_pair)
|
||||
return sock
|
||||
|
||||
class _BaseSocket(socket.socket):
|
||||
"""Allows Python 2's "delegated" methods such as send() to be overridden
|
||||
"""
|
||||
def __init__(self, *pos, **kw):
|
||||
_orig_socket.__init__(self, *pos, **kw)
|
||||
|
||||
self._savedmethods = dict()
|
||||
for name in self._savenames:
|
||||
self._savedmethods[name] = getattr(self, name)
|
||||
delattr(self, name) # Allows normal overriding mechanism to work
|
||||
|
||||
_savenames = list()
|
||||
|
||||
def _makemethod(name):
|
||||
return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
|
||||
for name in ("sendto", "send", "recvfrom", "recv"):
|
||||
method = getattr(_BaseSocket, name, None)
|
||||
|
||||
# Determine if the method is not defined the usual way
|
||||
# as a function in the class.
|
||||
# Python 2 uses __slots__, so there are descriptors for each method,
|
||||
# but they are not functions.
|
||||
if not isinstance(method, Callable):
|
||||
_BaseSocket._savenames.append(name)
|
||||
setattr(_BaseSocket, name, _makemethod(name))
|
||||
|
||||
class socksocket(_BaseSocket):
|
||||
"""socksocket([family[, type[, proto]]]) -> socket object
|
||||
|
||||
Open a SOCKS enabled socket. The parameters are the same as
|
||||
those of the standard socket init. In order for SOCKS to work,
|
||||
you must specify family=AF_INET and proto=0.
|
||||
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
|
||||
"""
|
||||
|
||||
default_proxy = None
|
||||
|
||||
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs):
|
||||
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
|
||||
msg = "Socket type must be stream or datagram, not {!r}"
|
||||
raise ValueError(msg.format(type))
|
||||
|
||||
_BaseSocket.__init__(self, family, type, proto, *args, **kwargs)
|
||||
self._proxyconn = None # TCP connection to keep UDP relay alive
|
||||
|
||||
if self.default_proxy:
|
||||
self.proxy = self.default_proxy
|
||||
else:
|
||||
self.proxy = (None, None, None, None, None, None)
|
||||
self.proxy_sockname = None
|
||||
self.proxy_peername = None
|
||||
|
||||
def _readall(self, file, count):
|
||||
"""
|
||||
Receive EXACTLY the number of bytes requested from the file object.
|
||||
Blocks until the required number of bytes have been received.
|
||||
"""
|
||||
data = b""
|
||||
while len(data) < count:
|
||||
d = file.read(count - len(data))
|
||||
if not d:
|
||||
raise GeneralProxyError("Connection closed unexpectedly")
|
||||
data += d
|
||||
return data
|
||||
|
||||
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
|
||||
"""set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]])
|
||||
Sets the proxy to be used.
|
||||
|
||||
proxy_type - The type of the proxy to be used. Three types
|
||||
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
|
||||
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
|
||||
addr - The address of the server (IP or DNS).
|
||||
port - The port of the server. Defaults to 1080 for SOCKS
|
||||
servers and 8080 for HTTP proxy servers.
|
||||
rdns - Should DNS queries be performed on the remote side
|
||||
(rather than the local side). The default is True.
|
||||
Note: This has no effect with SOCKS4 servers.
|
||||
username - Username to authenticate with to the server.
|
||||
The default is no authentication.
|
||||
password - Password to authenticate with to the server.
|
||||
Only relevant when username is also provided.
|
||||
"""
|
||||
self.proxy = (proxy_type, addr, port, rdns,
|
||||
username.encode() if username else None,
|
||||
password.encode() if password else None)
|
||||
|
||||
setproxy = set_proxy
|
||||
|
||||
def bind(self, *pos, **kw):
|
||||
"""
|
||||
Implements proxy connection for UDP sockets,
|
||||
which happens during the bind() phase.
|
||||
"""
|
||||
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
|
||||
if not proxy_type or self.type != socket.SOCK_DGRAM:
|
||||
return _orig_socket.bind(self, *pos, **kw)
|
||||
|
||||
if self._proxyconn:
|
||||
raise socket.error(EINVAL, "Socket already bound to an address")
|
||||
if proxy_type != SOCKS5:
|
||||
msg = "UDP only supported by SOCKS5 proxy type"
|
||||
raise socket.error(EOPNOTSUPP, msg)
|
||||
_BaseSocket.bind(self, *pos, **kw)
|
||||
|
||||
# Need to specify actual local port because
|
||||
# some relays drop packets if a port of zero is specified.
|
||||
# Avoid specifying host address in case of NAT though.
|
||||
_, port = self.getsockname()
|
||||
dst = ("0", port)
|
||||
|
||||
self._proxyconn = _orig_socket()
|
||||
proxy = self._proxy_addr()
|
||||
self._proxyconn.connect(proxy)
|
||||
|
||||
UDP_ASSOCIATE = b"\x03"
|
||||
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
|
||||
|
||||
# The relay is most likely on the same host as the SOCKS proxy,
|
||||
# but some proxies return a private IP address (10.x.y.z)
|
||||
host, _ = proxy
|
||||
_, port = relay
|
||||
_BaseSocket.connect(self, (host, port))
|
||||
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
|
||||
|
||||
def sendto(self, bytes, *args, **kwargs):
|
||||
if self.type != socket.SOCK_DGRAM:
|
||||
return _BaseSocket.sendto(self, bytes, *args, **kwargs)
|
||||
if not self._proxyconn:
|
||||
self.bind(("", 0))
|
||||
|
||||
address = args[-1]
|
||||
flags = args[:-1]
|
||||
|
||||
header = BytesIO()
|
||||
RSV = b"\x00\x00"
|
||||
header.write(RSV)
|
||||
STANDALONE = b"\x00"
|
||||
header.write(STANDALONE)
|
||||
self._write_SOCKS5_address(address, header)
|
||||
|
||||
sent = _BaseSocket.send(self, header.getvalue() + bytes, *flags, **kwargs)
|
||||
return sent - header.tell()
|
||||
|
||||
def send(self, bytes, flags=0, **kwargs):
|
||||
if self.type == socket.SOCK_DGRAM:
|
||||
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
|
||||
else:
|
||||
return _BaseSocket.send(self, bytes, flags, **kwargs)
|
||||
|
||||
def recvfrom(self, bufsize, flags=0):
|
||||
if self.type != socket.SOCK_DGRAM:
|
||||
return _BaseSocket.recvfrom(self, bufsize, flags)
|
||||
if not self._proxyconn:
|
||||
self.bind(("", 0))
|
||||
|
||||
buf = BytesIO(_BaseSocket.recv(self, bufsize, flags))
|
||||
buf.seek(+2, SEEK_CUR)
|
||||
frag = buf.read(1)
|
||||
if ord(frag):
|
||||
raise NotImplementedError("Received UDP packet fragment")
|
||||
fromhost, fromport = self._read_SOCKS5_address(buf)
|
||||
|
||||
if self.proxy_peername:
|
||||
peerhost, peerport = self.proxy_peername
|
||||
if fromhost != peerhost or peerport not in (0, fromport):
|
||||
raise socket.error(EAGAIN, "Packet filtered")
|
||||
|
||||
return (buf.read(), (fromhost, fromport))
|
||||
|
||||
def recv(self, *pos, **kw):
|
||||
bytes, _ = self.recvfrom(*pos, **kw)
|
||||
return bytes
|
||||
|
||||
def close(self):
|
||||
if self._proxyconn:
|
||||
self._proxyconn.close()
|
||||
return _BaseSocket.close(self)
|
||||
|
||||
def get_proxy_sockname(self):
|
||||
"""
|
||||
Returns the bound IP address and port number at the proxy.
|
||||
"""
|
||||
return self.proxy_sockname
|
||||
|
||||
getproxysockname = get_proxy_sockname
|
||||
|
||||
def get_proxy_peername(self):
|
||||
"""
|
||||
Returns the IP and port number of the proxy.
|
||||
"""
|
||||
return _BaseSocket.getpeername(self)
|
||||
|
||||
getproxypeername = get_proxy_peername
|
||||
|
||||
def get_peername(self):
|
||||
"""
|
||||
Returns the IP address and port number of the destination
|
||||
machine (note: get_proxy_peername returns the proxy)
|
||||
"""
|
||||
return self.proxy_peername
|
||||
|
||||
getpeername = get_peername
|
||||
|
||||
def _negotiate_SOCKS5(self, *dest_addr):
|
||||
"""
|
||||
Negotiates a stream connection through a SOCKS5 server.
|
||||
"""
|
||||
CONNECT = b"\x01"
|
||||
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self,
|
||||
CONNECT, dest_addr)
|
||||
|
||||
def _SOCKS5_request(self, conn, cmd, dst):
|
||||
"""
|
||||
Send SOCKS5 request with given command (CMD field) and
|
||||
address (DST field). Returns resolved DST address that was used.
|
||||
"""
|
||||
proxy_type, addr, port, rdns, username, password = self.proxy
|
||||
|
||||
writer = conn.makefile("wb")
|
||||
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
|
||||
try:
|
||||
# First we'll send the authentication packages we support.
|
||||
if username and password:
|
||||
# The username/password details were supplied to the
|
||||
# set_proxy method so we support the USERNAME/PASSWORD
|
||||
# authentication (in addition to the standard none).
|
||||
writer.write(b"\x05\x02\x00\x02")
|
||||
else:
|
||||
# No username/password were entered, therefore we
|
||||
# only support connections with no authentication.
|
||||
writer.write(b"\x05\x01\x00")
|
||||
|
||||
# We'll receive the server's response to determine which
|
||||
# method was selected
|
||||
writer.flush()
|
||||
chosen_auth = self._readall(reader, 2)
|
||||
|
||||
if chosen_auth[0:1] != b"\x05":
|
||||
# Note: string[i:i+1] is used because indexing of a bytestring
|
||||
# via bytestring[i] yields an integer in Python 3
|
||||
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||
|
||||
# Check the chosen authentication method
|
||||
|
||||
if chosen_auth[1:2] == b"\x02":
|
||||
# Okay, we need to perform a basic username/password
|
||||
# authentication.
|
||||
writer.write(b"\x01" + chr(len(username)).encode()
|
||||
+ username
|
||||
+ chr(len(password)).encode()
|
||||
+ password)
|
||||
writer.flush()
|
||||
auth_status = self._readall(reader, 2)
|
||||
if auth_status[0:1] != b"\x01":
|
||||
# Bad response
|
||||
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||
if auth_status[1:2] != b"\x00":
|
||||
# Authentication failed
|
||||
raise SOCKS5AuthError("SOCKS5 authentication failed")
|
||||
|
||||
# Otherwise, authentication succeeded
|
||||
|
||||
# No authentication is required if 0x00
|
||||
elif chosen_auth[1:2] != b"\x00":
|
||||
# Reaching here is always bad
|
||||
if chosen_auth[1:2] == b"\xFF":
|
||||
raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected")
|
||||
else:
|
||||
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||
|
||||
# Now we can request the actual connection
|
||||
writer.write(b"\x05" + cmd + b"\x00")
|
||||
resolved = self._write_SOCKS5_address(dst, writer)
|
||||
writer.flush()
|
||||
|
||||
# Get the response
|
||||
resp = self._readall(reader, 3)
|
||||
if resp[0:1] != b"\x05":
|
||||
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||
|
||||
status = ord(resp[1:2])
|
||||
if status != 0x00:
|
||||
# Connection failed: server returned an error
|
||||
error = SOCKS5_ERRORS.get(status, "Unknown error")
|
||||
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
|
||||
|
||||
# Get the bound address/port
|
||||
bnd = self._read_SOCKS5_address(reader)
|
||||
return (resolved, bnd)
|
||||
finally:
|
||||
reader.close()
|
||||
writer.close()
|
||||
|
||||
def _write_SOCKS5_address(self, addr, file):
|
||||
"""
|
||||
Return the host and port packed for the SOCKS5 protocol,
|
||||
and the resolved address as a tuple object.
|
||||
"""
|
||||
host, port = addr
|
||||
proxy_type, _, _, rdns, username, password = self.proxy
|
||||
|
||||
# If the given destination address is an IP address, we'll
|
||||
# use the IPv4 address request even if remote resolving was specified.
|
||||
try:
|
||||
addr_bytes = socket.inet_aton(host)
|
||||
file.write(b"\x01" + addr_bytes)
|
||||
host = socket.inet_ntoa(addr_bytes)
|
||||
except socket.error:
|
||||
# Well it's not an IP number, so it's probably a DNS name.
|
||||
if rdns:
|
||||
# Resolve remotely
|
||||
host_bytes = host.encode('idna')
|
||||
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
|
||||
else:
|
||||
# Resolve locally
|
||||
addr_bytes = socket.inet_aton(socket.gethostbyname(host))
|
||||
file.write(b"\x01" + addr_bytes)
|
||||
host = socket.inet_ntoa(addr_bytes)
|
||||
|
||||
file.write(struct.pack(">H", port))
|
||||
return host, port
|
||||
|
||||
def _read_SOCKS5_address(self, file):
|
||||
atyp = self._readall(file, 1)
|
||||
if atyp == b"\x01":
|
||||
addr = socket.inet_ntoa(self._readall(file, 4))
|
||||
elif atyp == b"\x03":
|
||||
length = self._readall(file, 1)
|
||||
addr = self._readall(file, ord(length))
|
||||
else:
|
||||
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||
|
||||
port = struct.unpack(">H", self._readall(file, 2))[0]
|
||||
return addr, port
|
||||
|
||||
def _negotiate_SOCKS4(self, dest_addr, dest_port):
|
||||
"""
|
||||
Negotiates a connection through a SOCKS4 server.
|
||||
"""
|
||||
proxy_type, addr, port, rdns, username, password = self.proxy
|
||||
|
||||
writer = self.makefile("wb")
|
||||
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
|
||||
try:
|
||||
# Check if the destination address provided is an IP address
|
||||
remote_resolve = False
|
||||
try:
|
||||
addr_bytes = socket.inet_aton(dest_addr)
|
||||
except socket.error:
|
||||
# It's a DNS name. Check where it should be resolved.
|
||||
if rdns:
|
||||
addr_bytes = b"\x00\x00\x00\x01"
|
||||
remote_resolve = True
|
||||
else:
|
||||
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
|
||||
|
||||
# Construct the request packet
|
||||
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
|
||||
writer.write(addr_bytes)
|
||||
|
||||
# The username parameter is considered userid for SOCKS4
|
||||
if username:
|
||||
writer.write(username)
|
||||
writer.write(b"\x00")
|
||||
|
||||
# DNS name if remote resolving is required
|
||||
# NOTE: This is actually an extension to the SOCKS4 protocol
|
||||
# called SOCKS4A and may not be supported in all cases.
|
||||
if remote_resolve:
|
||||
writer.write(dest_addr.encode('idna') + b"\x00")
|
||||
writer.flush()
|
||||
|
||||
# Get the response from the server
|
||||
resp = self._readall(reader, 8)
|
||||
if resp[0:1] != b"\x00":
|
||||
# Bad data
|
||||
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
|
||||
|
||||
status = ord(resp[1:2])
|
||||
if status != 0x5A:
|
||||
# Connection failed: server returned an error
|
||||
error = SOCKS4_ERRORS.get(status, "Unknown error")
|
||||
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
|
||||
|
||||
# Get the bound address/port
|
||||
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
|
||||
if remote_resolve:
|
||||
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
|
||||
else:
|
||||
self.proxy_peername = dest_addr, dest_port
|
||||
finally:
|
||||
reader.close()
|
||||
writer.close()
|
||||
|
||||
def _negotiate_HTTP(self, dest_addr, dest_port):
|
||||
"""
|
||||
Negotiates a connection through an HTTP server.
|
||||
NOTE: This currently only supports HTTP CONNECT-style proxies.
|
||||
"""
|
||||
proxy_type, addr, port, rdns, username, password = self.proxy
|
||||
|
||||
# If we need to resolve locally, we do this now
|
||||
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
|
||||
|
||||
self.sendall(b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() +
|
||||
b" HTTP/1.1\r\n" + b"Host: " + dest_addr.encode('idna') + b"\r\n\r\n")
|
||||
|
||||
# We just need the first line to check if the connection was successful
|
||||
fobj = self.makefile()
|
||||
status_line = fobj.readline()
|
||||
fobj.close()
|
||||
|
||||
if not status_line:
|
||||
raise GeneralProxyError("Connection closed unexpectedly")
|
||||
|
||||
try:
|
||||
proto, status_code, status_msg = status_line.split(" ", 2)
|
||||
except ValueError:
|
||||
raise GeneralProxyError("HTTP proxy server sent invalid response")
|
||||
|
||||
if not proto.startswith("HTTP/"):
|
||||
raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy")
|
||||
|
||||
try:
|
||||
status_code = int(status_code)
|
||||
except ValueError:
|
||||
raise HTTPError("HTTP proxy server did not return a valid HTTP status")
|
||||
|
||||
if status_code != 200:
|
||||
error = "{0}: {1}".format(status_code, status_msg)
|
||||
if status_code in (400, 403, 405):
|
||||
# It's likely that the HTTP proxy server does not support the CONNECT tunneling method
|
||||
error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks"
|
||||
" (must be a CONNECT tunnel proxy)")
|
||||
raise HTTPError(error)
|
||||
|
||||
self.proxy_sockname = (b"0.0.0.0", 0)
|
||||
self.proxy_peername = addr, dest_port
|
||||
|
||||
_proxy_negotiators = {
|
||||
SOCKS4: _negotiate_SOCKS4,
|
||||
SOCKS5: _negotiate_SOCKS5,
|
||||
HTTP: _negotiate_HTTP
|
||||
}
|
||||
|
||||
|
||||
def connect(self, dest_pair):
|
||||
"""
|
||||
Connects to the specified destination through a proxy.
|
||||
Uses the same API as socket's connect().
|
||||
To select the proxy server, use set_proxy().
|
||||
|
||||
dest_pair - 2-tuple of (IP/hostname, port).
|
||||
"""
|
||||
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
|
||||
# Probably IPv6, not supported -- raise an error, and hope
|
||||
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
|
||||
# connection works...
|
||||
raise socket.error("PySocks doesn't support IPv6")
|
||||
|
||||
dest_addr, dest_port = dest_pair
|
||||
|
||||
if self.type == socket.SOCK_DGRAM:
|
||||
if not self._proxyconn:
|
||||
self.bind(("", 0))
|
||||
dest_addr = socket.gethostbyname(dest_addr)
|
||||
|
||||
# If the host address is INADDR_ANY or similar, reset the peer
|
||||
# address so that packets are received from any peer
|
||||
if dest_addr == "0.0.0.0" and not dest_port:
|
||||
self.proxy_peername = None
|
||||
else:
|
||||
self.proxy_peername = (dest_addr, dest_port)
|
||||
return
|
||||
|
||||
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
|
||||
|
||||
# Do a minimal input check first
|
||||
if (not isinstance(dest_pair, (list, tuple))
|
||||
or len(dest_pair) != 2
|
||||
or not dest_addr
|
||||
or not isinstance(dest_port, int)):
|
||||
raise GeneralProxyError("Invalid destination-connection (host, port) pair")
|
||||
|
||||
|
||||
if proxy_type is None:
|
||||
# Treat like regular socket object
|
||||
self.proxy_peername = dest_pair
|
||||
_BaseSocket.connect(self, (dest_addr, dest_port))
|
||||
return
|
||||
|
||||
proxy_addr = self._proxy_addr()
|
||||
|
||||
try:
|
||||
# Initial connection to proxy server
|
||||
_BaseSocket.connect(self, proxy_addr)
|
||||
|
||||
except socket.error as error:
|
||||
# Error while connecting to proxy
|
||||
self.close()
|
||||
proxy_addr, proxy_port = proxy_addr
|
||||
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
|
||||
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
|
||||
|
||||
msg = "Error connecting to {0} proxy {1}".format(printable_type,
|
||||
proxy_server)
|
||||
raise ProxyConnectionError(msg, error)
|
||||
|
||||
else:
|
||||
# Connected to proxy server, now negotiate
|
||||
try:
|
||||
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
|
||||
negotiate = self._proxy_negotiators[proxy_type]
|
||||
negotiate(self, dest_addr, dest_port)
|
||||
except socket.error as error:
|
||||
# Wrap socket errors
|
||||
self.close()
|
||||
raise GeneralProxyError("Socket error", error)
|
||||
except ProxyError:
|
||||
# Protocol error while negotiating with proxy
|
||||
self.close()
|
||||
raise
|
||||
|
||||
def _proxy_addr(self):
|
||||
"""
|
||||
Return proxy address to connect to as tuple object
|
||||
"""
|
||||
proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy
|
||||
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
|
||||
if not proxy_port:
|
||||
raise GeneralProxyError("Invalid proxy type")
|
||||
return proxy_addr, proxy_port
|
79
Shared/lib/python3.4/site-packages/sockshandler.py
Normal file
79
Shared/lib/python3.4/site-packages/sockshandler.py
Normal file
|
@ -0,0 +1,79 @@
|
|||
#!/usr/bin/env python
|
||||
"""
|
||||
SocksiPy + urllib2 handler
|
||||
|
||||
version: 0.3
|
||||
author: e<e@tr0ll.in>
|
||||
|
||||
This module provides a Handler which you can use with urllib2 to allow it to tunnel your connection through a socks.sockssocket socket, with out monkey patching the original socket...
|
||||
"""
|
||||
import ssl
|
||||
|
||||
try:
|
||||
import urllib2
|
||||
import httplib
|
||||
except ImportError: # Python 3
|
||||
import urllib.request as urllib2
|
||||
import http.client as httplib
|
||||
|
||||
import socks # $ pip install PySocks
|
||||
|
||||
def merge_dict(a, b):
|
||||
d = a.copy()
|
||||
d.update(b)
|
||||
return d
|
||||
|
||||
class SocksiPyConnection(httplib.HTTPConnection):
|
||||
def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
|
||||
self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
|
||||
httplib.HTTPConnection.__init__(self, *args, **kwargs)
|
||||
|
||||
def connect(self):
|
||||
self.sock = socks.socksocket()
|
||||
self.sock.setproxy(*self.proxyargs)
|
||||
if type(self.timeout) in (int, float):
|
||||
self.sock.settimeout(self.timeout)
|
||||
self.sock.connect((self.host, self.port))
|
||||
|
||||
class SocksiPyConnectionS(httplib.HTTPSConnection):
|
||||
def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs):
|
||||
self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password)
|
||||
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
|
||||
|
||||
def connect(self):
|
||||
sock = socks.socksocket()
|
||||
sock.setproxy(*self.proxyargs)
|
||||
if type(self.timeout) in (int, float):
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect((self.host, self.port))
|
||||
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
|
||||
|
||||
class SocksiPyHandler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.args = args
|
||||
self.kw = kwargs
|
||||
urllib2.HTTPHandler.__init__(self)
|
||||
|
||||
def http_open(self, req):
|
||||
def build(host, port=None, timeout=0, **kwargs):
|
||||
kw = merge_dict(self.kw, kwargs)
|
||||
conn = SocksiPyConnection(*self.args, host=host, port=port, timeout=timeout, **kw)
|
||||
return conn
|
||||
return self.do_open(build, req)
|
||||
|
||||
def https_open(self, req):
|
||||
def build(host, port=None, timeout=0, **kwargs):
|
||||
kw = merge_dict(self.kw, kwargs)
|
||||
conn = SocksiPyConnectionS(*self.args, host=host, port=port, timeout=timeout, **kw)
|
||||
return conn
|
||||
return self.do_open(build, req)
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
try:
|
||||
port = int(sys.argv[1])
|
||||
except (ValueError, IndexError):
|
||||
port = 9050
|
||||
opener = urllib2.build_opener(SocksiPyHandler(socks.PROXY_TYPE_SOCKS5, "localhost", port))
|
||||
print("HTTP: " + opener.open("http://httpbin.org/ip").read().decode())
|
||||
print("HTTPS: " + opener.open("https://httpbin.org/ip").read().decode())
|
|
@ -0,0 +1,13 @@
|
|||
Metadata-Version: 1.1
|
||||
Name: stem
|
||||
Version: 1.4.0
|
||||
Summary: Stem is a Python controller library that allows applications to interact with
|
||||
Tor <https://www.torproject.org/>.
|
||||
Home-page: https://stem.torproject.org/
|
||||
Author: Damian Johnson
|
||||
Author-email: atagar@torproject.org
|
||||
License: LGPLv3
|
||||
Description: UNKNOWN
|
||||
Keywords: tor onion controller
|
||||
Platform: UNKNOWN
|
||||
Provides: stem
|
|
@ -0,0 +1,52 @@
|
|||
tor-prompt
|
||||
stem/__init__.py
|
||||
stem/connection.py
|
||||
stem/control.py
|
||||
stem/exit_policy.py
|
||||
stem/prereq.py
|
||||
stem/process.py
|
||||
stem/socket.py
|
||||
stem/version.py
|
||||
stem.egg-info/PKG-INFO
|
||||
stem.egg-info/SOURCES.txt
|
||||
stem.egg-info/dependency_links.txt
|
||||
stem.egg-info/top_level.txt
|
||||
stem/descriptor/__init__.py
|
||||
stem/descriptor/export.py
|
||||
stem/descriptor/extrainfo_descriptor.py
|
||||
stem/descriptor/hidden_service_descriptor.py
|
||||
stem/descriptor/microdescriptor.py
|
||||
stem/descriptor/networkstatus.py
|
||||
stem/descriptor/reader.py
|
||||
stem/descriptor/remote.py
|
||||
stem/descriptor/router_status_entry.py
|
||||
stem/descriptor/server_descriptor.py
|
||||
stem/descriptor/tordnsel.py
|
||||
stem/interpreter/__init__.py
|
||||
stem/interpreter/arguments.py
|
||||
stem/interpreter/autocomplete.py
|
||||
stem/interpreter/commands.py
|
||||
stem/interpreter/help.py
|
||||
stem/interpreter/settings.cfg
|
||||
stem/response/__init__.py
|
||||
stem/response/add_onion.py
|
||||
stem/response/authchallenge.py
|
||||
stem/response/events.py
|
||||
stem/response/getconf.py
|
||||
stem/response/getinfo.py
|
||||
stem/response/mapaddress.py
|
||||
stem/response/protocolinfo.py
|
||||
stem/util/__init__.py
|
||||
stem/util/conf.py
|
||||
stem/util/connection.py
|
||||
stem/util/enum.py
|
||||
stem/util/log.py
|
||||
stem/util/lru_cache.py
|
||||
stem/util/ordereddict.py
|
||||
stem/util/ports.cfg
|
||||
stem/util/proc.py
|
||||
stem/util/str_tools.py
|
||||
stem/util/system.py
|
||||
stem/util/term.py
|
||||
stem/util/test_tools.py
|
||||
stem/util/tor_tools.py
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,98 @@
|
|||
../stem/__init__.py
|
||||
../stem/connection.py
|
||||
../stem/control.py
|
||||
../stem/exit_policy.py
|
||||
../stem/prereq.py
|
||||
../stem/process.py
|
||||
../stem/socket.py
|
||||
../stem/version.py
|
||||
../stem/descriptor/__init__.py
|
||||
../stem/descriptor/export.py
|
||||
../stem/descriptor/extrainfo_descriptor.py
|
||||
../stem/descriptor/hidden_service_descriptor.py
|
||||
../stem/descriptor/microdescriptor.py
|
||||
../stem/descriptor/networkstatus.py
|
||||
../stem/descriptor/reader.py
|
||||
../stem/descriptor/remote.py
|
||||
../stem/descriptor/router_status_entry.py
|
||||
../stem/descriptor/server_descriptor.py
|
||||
../stem/descriptor/tordnsel.py
|
||||
../stem/interpreter/__init__.py
|
||||
../stem/interpreter/arguments.py
|
||||
../stem/interpreter/autocomplete.py
|
||||
../stem/interpreter/commands.py
|
||||
../stem/interpreter/help.py
|
||||
../stem/response/__init__.py
|
||||
../stem/response/add_onion.py
|
||||
../stem/response/authchallenge.py
|
||||
../stem/response/events.py
|
||||
../stem/response/getconf.py
|
||||
../stem/response/getinfo.py
|
||||
../stem/response/mapaddress.py
|
||||
../stem/response/protocolinfo.py
|
||||
../stem/util/__init__.py
|
||||
../stem/util/conf.py
|
||||
../stem/util/connection.py
|
||||
../stem/util/enum.py
|
||||
../stem/util/log.py
|
||||
../stem/util/lru_cache.py
|
||||
../stem/util/ordereddict.py
|
||||
../stem/util/proc.py
|
||||
../stem/util/str_tools.py
|
||||
../stem/util/system.py
|
||||
../stem/util/term.py
|
||||
../stem/util/test_tools.py
|
||||
../stem/util/tor_tools.py
|
||||
../stem/interpreter/settings.cfg
|
||||
../stem/util/ports.cfg
|
||||
../stem/__pycache__/__init__.cpython-34.pyc
|
||||
../stem/__pycache__/connection.cpython-34.pyc
|
||||
../stem/__pycache__/control.cpython-34.pyc
|
||||
../stem/__pycache__/exit_policy.cpython-34.pyc
|
||||
../stem/__pycache__/prereq.cpython-34.pyc
|
||||
../stem/__pycache__/process.cpython-34.pyc
|
||||
../stem/__pycache__/socket.cpython-34.pyc
|
||||
../stem/__pycache__/version.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/__init__.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/export.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/extrainfo_descriptor.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/hidden_service_descriptor.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/microdescriptor.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/networkstatus.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/reader.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/remote.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/router_status_entry.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/server_descriptor.cpython-34.pyc
|
||||
../stem/descriptor/__pycache__/tordnsel.cpython-34.pyc
|
||||
../stem/interpreter/__pycache__/__init__.cpython-34.pyc
|
||||
../stem/interpreter/__pycache__/arguments.cpython-34.pyc
|
||||
../stem/interpreter/__pycache__/autocomplete.cpython-34.pyc
|
||||
../stem/interpreter/__pycache__/commands.cpython-34.pyc
|
||||
../stem/interpreter/__pycache__/help.cpython-34.pyc
|
||||
../stem/response/__pycache__/__init__.cpython-34.pyc
|
||||
../stem/response/__pycache__/add_onion.cpython-34.pyc
|
||||
../stem/response/__pycache__/authchallenge.cpython-34.pyc
|
||||
../stem/response/__pycache__/events.cpython-34.pyc
|
||||
../stem/response/__pycache__/getconf.cpython-34.pyc
|
||||
../stem/response/__pycache__/getinfo.cpython-34.pyc
|
||||
../stem/response/__pycache__/mapaddress.cpython-34.pyc
|
||||
../stem/response/__pycache__/protocolinfo.cpython-34.pyc
|
||||
../stem/util/__pycache__/__init__.cpython-34.pyc
|
||||
../stem/util/__pycache__/conf.cpython-34.pyc
|
||||
../stem/util/__pycache__/connection.cpython-34.pyc
|
||||
../stem/util/__pycache__/enum.cpython-34.pyc
|
||||
../stem/util/__pycache__/log.cpython-34.pyc
|
||||
../stem/util/__pycache__/lru_cache.cpython-34.pyc
|
||||
../stem/util/__pycache__/ordereddict.cpython-34.pyc
|
||||
../stem/util/__pycache__/proc.cpython-34.pyc
|
||||
../stem/util/__pycache__/str_tools.cpython-34.pyc
|
||||
../stem/util/__pycache__/system.cpython-34.pyc
|
||||
../stem/util/__pycache__/term.cpython-34.pyc
|
||||
../stem/util/__pycache__/test_tools.cpython-34.pyc
|
||||
../stem/util/__pycache__/tor_tools.cpython-34.pyc
|
||||
./
|
||||
dependency_links.txt
|
||||
top_level.txt
|
||||
PKG-INFO
|
||||
SOURCES.txt
|
||||
../../../../bin/tor-prompt
|
|
@ -0,0 +1 @@
|
|||
stem
|
833
Shared/lib/python3.4/site-packages/stem/__init__.py
Normal file
833
Shared/lib/python3.4/site-packages/stem/__init__.py
Normal file
|
@ -0,0 +1,833 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Library for working with the tor process.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
ControllerError - Base exception raised when using the controller.
|
||||
|- ProtocolError - Malformed socket data.
|
||||
|- OperationFailed - Tor was unable to successfully complete the operation.
|
||||
| |- UnsatisfiableRequest - Tor was unable to satisfy a valid request.
|
||||
| | +- CircuitExtensionFailed - Attempt to make or extend a circuit failed.
|
||||
| |- DescriptorUnavailable - The given relay descriptor is unavailable.
|
||||
| +- InvalidRequest - Invalid request.
|
||||
| +- InvalidArguments - Invalid request parameters.
|
||||
+- SocketError - Communication with the socket failed.
|
||||
+- SocketClosed - Socket has been shut down.
|
||||
|
||||
.. data:: Runlevel (enum)
|
||||
|
||||
Rating of importance used for event logging.
|
||||
|
||||
=========== ===========
|
||||
Runlevel Description
|
||||
=========== ===========
|
||||
**ERR** critical issues that impair tor's ability to function
|
||||
**WARN** non-critical issues the user should be aware of
|
||||
**NOTICE** information that may be helpful to the user
|
||||
**INFO** high level runtime information
|
||||
**DEBUG** low level runtime information
|
||||
=========== ===========
|
||||
|
||||
.. data:: Signal (enum)
|
||||
|
||||
Signals that the tor process will accept.
|
||||
|
||||
========================= ===========
|
||||
Signal Description
|
||||
========================= ===========
|
||||
**RELOAD** or **HUP** reloads our torrc
|
||||
**SHUTDOWN** or **INT** shut down, waiting ShutdownWaitLength first if we're a relay
|
||||
**DUMP** or **USR1** dumps information about open connections and circuits to our log
|
||||
**DEBUG** or **USR2** switch our logging to the DEBUG runlevel
|
||||
**HALT** or **TERM** exit tor immediately
|
||||
**NEWNYM** switch to new circuits, so new application requests don't share any circuits with old ones (this also clears our DNS cache)
|
||||
**CLEARDNSCACHE** clears cached DNS results
|
||||
**HEARTBEAT** trigger a heartbeat log message
|
||||
========================= ===========
|
||||
|
||||
.. data:: Flag (enum)
|
||||
|
||||
Flag assigned to tor relays by the authorities to indicate various
|
||||
characteristics.
|
||||
|
||||
**Note:** The BADDIRECTORY flag was `removed from tor <https://gitweb.torproject.org/torspec.git/commit/dir-spec.txt?id=2f012f1>`_.
|
||||
|
||||
================= ===========
|
||||
Flag Description
|
||||
================= ===========
|
||||
**AUTHORITY** relay is a directory authority
|
||||
**BADEXIT** relay shouldn't be used as an exit due to being either problematic or malicious (`wiki <https://trac.torproject.org/projects/tor/wiki/doc/badRelays>`_)
|
||||
**BADDIRECTORY** relay shouldn't be used for directory information
|
||||
**EXIT** relay's exit policy makes it more useful as an exit rather than middle hop
|
||||
**FAST** relay's suitable for high-bandwidth circuits
|
||||
**GUARD** relay's suitable for being an entry guard (first hop)
|
||||
**HSDIR** relay is being used as a v2 hidden service directory
|
||||
**NAMED** relay can be referred to by its nickname
|
||||
**RUNNING** relay is currently usable
|
||||
**STABLE** relay's suitable for long-lived circuits
|
||||
**UNNAMED** relay isn't currently bound to a nickname
|
||||
**V2DIR** relay supports the v2 directory protocol
|
||||
**VALID** relay has been validated
|
||||
================= ===========
|
||||
|
||||
.. data:: CircStatus (enum)
|
||||
|
||||
Statuses that a circuit can be in. Tor may provide statuses not in this enum.
|
||||
|
||||
============ ===========
|
||||
CircStatus Description
|
||||
============ ===========
|
||||
**LAUNCHED** new circuit was created
|
||||
**BUILT** circuit finished being created and can accept traffic
|
||||
**EXTENDED** circuit has been extended by a hop
|
||||
**FAILED** circuit construction failed
|
||||
**CLOSED** circuit has been closed
|
||||
============ ===========
|
||||
|
||||
.. data:: CircBuildFlag (enum)
|
||||
|
||||
Attributes about how a circuit is built. These were introduced in tor version
|
||||
0.2.3.11. Tor may provide flags not in this enum.
|
||||
|
||||
================= ===========
|
||||
CircBuildFlag Description
|
||||
================= ===========
|
||||
**ONEHOP_TUNNEL** single hop circuit to fetch directory information
|
||||
**IS_INTERNAL** circuit that won't be used for client traffic
|
||||
**NEED_CAPACITY** circuit only includes high capacity relays
|
||||
**NEED_UPTIME** circuit only includes relays with a high uptime
|
||||
================= ===========
|
||||
|
||||
.. data:: CircPurpose (enum)
|
||||
|
||||
Description of what a circuit is intended for. These were introduced in tor
|
||||
version 0.2.1.6. Tor may provide purposes not in this enum.
|
||||
|
||||
==================== ===========
|
||||
CircPurpose Description
|
||||
==================== ===========
|
||||
**GENERAL** client traffic or fetching directory information
|
||||
**HS_CLIENT_INTRO** client side introduction point for a hidden service circuit
|
||||
**HS_CLIENT_REND** client side hidden service rendezvous circuit
|
||||
**HS_SERVICE_INTRO** server side introduction point for a hidden service circuit
|
||||
**HS_SERVICE_REND** server side hidden service rendezvous circuit
|
||||
**TESTING** testing to see if we're reachable, so we can be used as a relay
|
||||
**CONTROLLER** circuit that was built by a controller
|
||||
**MEASURE_TIMEOUT** circuit being kept around to see how long it takes
|
||||
==================== ===========
|
||||
|
||||
.. data:: CircClosureReason (enum)
|
||||
|
||||
Reason that a circuit is being closed or failed to be established. Tor may
|
||||
provide reasons not in this enum.
|
||||
|
||||
========================= ===========
|
||||
CircClosureReason Description
|
||||
========================= ===========
|
||||
**NONE** no reason given
|
||||
**TORPROTOCOL** violation in the tor protocol
|
||||
**INTERNAL** internal error
|
||||
**REQUESTED** requested by the client via a TRUNCATE command
|
||||
**HIBERNATING** relay is currently hibernating
|
||||
**RESOURCELIMIT** relay is out of memory, sockets, or circuit IDs
|
||||
**CONNECTFAILED** unable to contact the relay
|
||||
**OR_IDENTITY** relay had the wrong OR identification
|
||||
**OR_CONN_CLOSED** connection failed after being established
|
||||
**FINISHED** circuit has expired (see tor's MaxCircuitDirtiness config option)
|
||||
**TIMEOUT** circuit construction timed out
|
||||
**DESTROYED** circuit unexpectedly closed
|
||||
**NOPATH** not enough relays to make a circuit
|
||||
**NOSUCHSERVICE** requested hidden service does not exist
|
||||
**MEASUREMENT_EXPIRED** same as **TIMEOUT** except that it was left open for measurement purposes
|
||||
========================= ===========
|
||||
|
||||
.. data:: CircEvent (enum)
|
||||
|
||||
Type of change reflected in a circuit by a CIRC_MINOR event. Tor may provide
|
||||
event types not in this enum.
|
||||
|
||||
===================== ===========
|
||||
CircEvent Description
|
||||
===================== ===========
|
||||
**PURPOSE_CHANGED** circuit purpose or hidden service state has changed
|
||||
**CANNIBALIZED** circuit connections are being reused for a different circuit
|
||||
===================== ===========
|
||||
|
||||
.. data:: HiddenServiceState (enum)
|
||||
|
||||
State that a hidden service circuit can have. These were introduced in tor
|
||||
version 0.2.3.11. Tor may provide states not in this enum.
|
||||
|
||||
Enumerations fall into four groups based on their prefix...
|
||||
|
||||
======= ===========
|
||||
Prefix Description
|
||||
======= ===========
|
||||
HSCI_* client-side introduction-point
|
||||
HSCR_* client-side rendezvous-point
|
||||
HSSI_* service-side introduction-point
|
||||
HSSR_* service-side rendezvous-point
|
||||
======= ===========
|
||||
|
||||
============================= ===========
|
||||
HiddenServiceState Description
|
||||
============================= ===========
|
||||
**HSCI_CONNECTING** connecting to the introductory point
|
||||
**HSCI_INTRO_SENT** sent INTRODUCE1 and awaiting a reply
|
||||
**HSCI_DONE** received a reply, circuit is closing
|
||||
**HSCR_CONNECTING** connecting to the introductory point
|
||||
**HSCR_ESTABLISHED_IDLE** rendezvous-point established, awaiting an introduction
|
||||
**HSCR_ESTABLISHED_WAITING** introduction received, awaiting a rend
|
||||
**HSCR_JOINED** connected to the hidden service
|
||||
**HSSI_CONNECTING** connecting to the introductory point
|
||||
**HSSI_ESTABLISHED** established introductory point
|
||||
**HSSR_CONNECTING** connecting to the introductory point
|
||||
**HSSR_JOINED** connected to the rendezvous-point
|
||||
============================= ===========
|
||||
|
||||
.. data:: RelayEndReason (enum)
|
||||
|
||||
Reasons why the stream is to be closed.
|
||||
|
||||
=================== ===========
|
||||
RelayEndReason Description
|
||||
=================== ===========
|
||||
**MISC** none of the following reasons
|
||||
**RESOLVEFAILED** unable to resolve the hostname
|
||||
**CONNECTREFUSED** remote host refused the connection
|
||||
**EXITPOLICY** OR refuses to connect to the destination
|
||||
**DESTROY** circuit is being shut down
|
||||
**DONE** connection has been closed
|
||||
**TIMEOUT** connection timed out
|
||||
**NOROUTE** routing error while contacting the destination
|
||||
**HIBERNATING** relay is temporarily hibernating
|
||||
**INTERNAL** internal error at the relay
|
||||
**RESOURCELIMIT** relay has insufficient resources to service the request
|
||||
**CONNRESET** connection was unexpectedly reset
|
||||
**TORPROTOCOL** violation in the tor protocol
|
||||
**NOTDIRECTORY** directory information requested from a relay that isn't mirroring it
|
||||
=================== ===========
|
||||
|
||||
.. data:: StreamStatus (enum)
|
||||
|
||||
State that a stream going through tor can have. Tor may provide states not in
|
||||
this enum.
|
||||
|
||||
================= ===========
|
||||
StreamStatus Description
|
||||
================= ===========
|
||||
**NEW** request for a new connection
|
||||
**NEWRESOLVE** request to resolve an address
|
||||
**REMAP** address is being re-mapped to another
|
||||
**SENTCONNECT** sent a connect cell along a circuit
|
||||
**SENTRESOLVE** sent a resolve cell along a circuit
|
||||
**SUCCEEDED** stream has been established
|
||||
**FAILED** stream is detached, and won't be re-established
|
||||
**DETACHED** stream is detached, but might be re-established
|
||||
**CLOSED** stream has closed
|
||||
================= ===========
|
||||
|
||||
.. data:: StreamClosureReason (enum)
|
||||
|
||||
Reason that a stream is being closed or failed to be established. This
|
||||
includes all values in the :data:`~stem.RelayEndReason` enumeration as
|
||||
well as the following. Tor may provide reasons not in this enum.
|
||||
|
||||
===================== ===========
|
||||
StreamClosureReason Description
|
||||
===================== ===========
|
||||
**END** endpoint has sent a RELAY_END cell
|
||||
**PRIVATE_ADDR** endpoint was a private address (127.0.0.1, 10.0.0.1, etc)
|
||||
===================== ===========
|
||||
|
||||
.. data:: StreamSource (enum)
|
||||
|
||||
Cause of a stream being remapped to another address. Tor may provide sources
|
||||
not in this enum.
|
||||
|
||||
============= ===========
|
||||
StreamSource Description
|
||||
============= ===========
|
||||
**CACHE** tor is remapping because of a cached answer
|
||||
**EXIT** exit relay requested the remap
|
||||
============= ===========
|
||||
|
||||
.. data:: StreamPurpose (enum)
|
||||
|
||||
Purpsoe of the stream. This is only provided with new streams and tor may
|
||||
provide purposes not in this enum.
|
||||
|
||||
================= ===========
|
||||
StreamPurpose Description
|
||||
================= ===========
|
||||
**DIR_FETCH** fetching directory information (descriptors, consensus, etc)
|
||||
**DIR_UPLOAD** uploading our descriptor to an authority
|
||||
**DNS_REQUEST** user initiated DNS request
|
||||
**DIRPORT_TEST** checking that our directory port is reachable externally
|
||||
**USER** either relaying user traffic or not one of the above categories
|
||||
================= ===========
|
||||
|
||||
.. data:: ORStatus (enum)
|
||||
|
||||
State that an OR connection can have. Tor may provide states not in this
|
||||
enum.
|
||||
|
||||
=============== ===========
|
||||
ORStatus Description
|
||||
=============== ===========
|
||||
**NEW** received OR connection, starting server-side handshake
|
||||
**LAUNCHED** launched outbound OR connection, starting client-side handshake
|
||||
**CONNECTED** OR connection has been established
|
||||
**FAILED** attempt to establish OR connection failed
|
||||
**CLOSED** OR connection has been closed
|
||||
=============== ===========
|
||||
|
||||
.. data:: ORClosureReason (enum)
|
||||
|
||||
Reason that an OR connection is being closed or failed to be established. Tor
|
||||
may provide reasons not in this enum.
|
||||
|
||||
=================== ===========
|
||||
ORClosureReason Description
|
||||
=================== ===========
|
||||
**DONE** OR connection shut down cleanly
|
||||
**CONNECTREFUSED** got a ECONNREFUSED when connecting to the relay
|
||||
**IDENTITY** identity of the relay wasn't what we expected
|
||||
**CONNECTRESET** got a ECONNRESET or similar error from relay
|
||||
**TIMEOUT** got a ETIMEOUT or similar error from relay
|
||||
**NOROUTE** got a ENOTCONN, ENETUNREACH, ENETDOWN, EHOSTUNREACH, or similar error from relay
|
||||
**IOERROR** got a different kind of error from relay
|
||||
**RESOURCELIMIT** relay has insufficient resources to service the request
|
||||
**MISC** connection refused for another reason
|
||||
**PT_MISSING** no pluggable transport was available
|
||||
=================== ===========
|
||||
|
||||
.. data:: AuthDescriptorAction (enum)
|
||||
|
||||
Actions that directory authorities might take with relay descriptors. Tor may
|
||||
provide reasons not in this enum.
|
||||
|
||||
===================== ===========
|
||||
AuthDescriptorAction Description
|
||||
===================== ===========
|
||||
**ACCEPTED** accepting the descriptor as the newest version
|
||||
**DROPPED** descriptor rejected without notifying the relay
|
||||
**REJECTED** relay notified that its descriptor has been rejected
|
||||
===================== ===========
|
||||
|
||||
.. data:: StatusType (enum)
|
||||
|
||||
Sources for tor status events. Tor may provide types not in this enum.
|
||||
|
||||
============= ===========
|
||||
StatusType Description
|
||||
============= ===========
|
||||
**GENERAL** general tor activity, not specifically as a client or relay
|
||||
**CLIENT** related to our activity as a tor client
|
||||
**SERVER** related to our activity as a tor relay
|
||||
============= ===========
|
||||
|
||||
.. data:: GuardType (enum)
|
||||
|
||||
Use a guard relay can be for. Tor may provide types not in this enum.
|
||||
|
||||
=========== ===========
|
||||
GuardType Description
|
||||
=========== ===========
|
||||
**ENTRY** used to connect to the tor network
|
||||
=========== ===========
|
||||
|
||||
.. data:: GuardStatus (enum)
|
||||
|
||||
Status a guard relay can have. Tor may provide types not in this enum.
|
||||
|
||||
============= ===========
|
||||
GuardStatus Description
|
||||
============= ===========
|
||||
**NEW** new guard that we weren't previously using
|
||||
**DROPPED** removed from use as one of our guards
|
||||
**UP** guard is now reachable
|
||||
**DOWN** guard is now unreachable
|
||||
**BAD** consensus or relay considers this relay to be unusable as a guard
|
||||
**GOOD** consensus or relay considers this relay to be usable as a guard
|
||||
============= ===========
|
||||
|
||||
.. data:: TimeoutSetType (enum)
|
||||
|
||||
Way in which the timeout value of a circuit is changing. Tor may provide
|
||||
types not in this enum.
|
||||
|
||||
=============== ===========
|
||||
TimeoutSetType Description
|
||||
=============== ===========
|
||||
**COMPUTED** tor has computed a new timeout based on prior circuits
|
||||
**RESET** timeout reverted to its default
|
||||
**SUSPENDED** timeout reverted to its default until network connectivity has recovered
|
||||
**DISCARD** throwing out timeout value from when the network was down
|
||||
**RESUME** resumed calculations to determine the proper timeout
|
||||
=============== ===========
|
||||
|
||||
.. data:: ConnectionType (enum)
|
||||
|
||||
Purpose for a tor connection. Tor may provide types not in this enum.
|
||||
|
||||
The meaning behind these values is a bit unclear, pending :trac:`10086`.
|
||||
|
||||
=============== ===========
|
||||
ConnectionType Description
|
||||
=============== ===========
|
||||
**OR** carrying traffic within the tor network
|
||||
**DIR** fetching or sending tor descriptor data
|
||||
**EXIT** carrying traffic between the tor network and an external destination
|
||||
=============== ===========
|
||||
|
||||
.. data:: TokenBucket (enum)
|
||||
|
||||
Bucket categories of TB_EMPTY events.
|
||||
|
||||
=============== ===========
|
||||
TokenBucket Description
|
||||
=============== ===========
|
||||
**GLOBAL** global token bucket
|
||||
**RELAY** relay token bucket
|
||||
**ORCONN** bucket used for OR connections
|
||||
=============== ===========
|
||||
|
||||
.. data:: HSDescAction (enum)
|
||||
|
||||
Action beeing taken in a HS_DESC event.
|
||||
|
||||
=============== ===========
|
||||
HSDescAction Description
|
||||
=============== ===========
|
||||
**REQUESTED** uncached hidden service descriptor is being requested
|
||||
**UPLOAD** descriptor is being uploaded with HSPOST
|
||||
**RECEIVED** hidden service descriptor has been retrieved
|
||||
**UPLOADED** descriptor was uploaded with HSPOST
|
||||
**IGNORE** fetched descriptor was ignored because we already have its v0 descriptor
|
||||
**FAILED** we were unable to retrieve the descriptor
|
||||
=============== ===========
|
||||
|
||||
.. data:: HSDescReason (enum)
|
||||
|
||||
Reason for the hidden service descriptor to fail to be fetched.
|
||||
|
||||
=================== ===========
|
||||
HSDescReason Description
|
||||
=================== ===========
|
||||
**BAD_DESC** descriptor was unparseable
|
||||
**QUERY_REJECTED** hidden service directory refused to provide the descriptor
|
||||
**UPLOAD_REJECTED** descriptor was rejected by the hidden service directory
|
||||
**NOT_FOUND** descriptor with the given identifier wasn't found
|
||||
**UNEXPECTED** failure type is unknown
|
||||
=================== ===========
|
||||
|
||||
.. data:: HSAuth (enum)
|
||||
|
||||
Type of authentication being used for a HS_DESC event.
|
||||
|
||||
================= ===========
|
||||
HSAuth Description
|
||||
================= ===========
|
||||
**NO_AUTH** no authentication
|
||||
**BASIC_AUTH** general hidden service authentication
|
||||
**STEALTH_AUTH** authentication method that hides service activity from unauthorized clients
|
||||
**UNKNOWN** unrecognized method of authentication
|
||||
================= ===========
|
||||
"""
|
||||
|
||||
__version__ = '1.4.0'
|
||||
__author__ = 'Damian Johnson'
|
||||
__contact__ = 'atagar@torproject.org'
|
||||
__url__ = 'https://stem.torproject.org/'
|
||||
__license__ = 'LGPLv3'
|
||||
|
||||
__all__ = [
|
||||
'descriptor',
|
||||
'response',
|
||||
'util',
|
||||
'connection',
|
||||
'control',
|
||||
'exit_policy',
|
||||
'prereq',
|
||||
'process',
|
||||
'socket',
|
||||
'version',
|
||||
'ControllerError',
|
||||
'ProtocolError',
|
||||
'OperationFailed',
|
||||
'UnsatisfiableRequest',
|
||||
'CircuitExtensionFailed',
|
||||
'DescriptorUnavailable',
|
||||
'InvalidRequest',
|
||||
'InvalidArguments',
|
||||
'SocketError',
|
||||
'SocketClosed',
|
||||
'Runlevel',
|
||||
'Signal',
|
||||
'Flag',
|
||||
'CircStatus',
|
||||
'CircBuildFlag',
|
||||
'CircPurpose',
|
||||
'CircClosureReason',
|
||||
'CircEvent',
|
||||
'HiddenServiceState',
|
||||
'HSAuth',
|
||||
'HSDescAction',
|
||||
'HSDescReason',
|
||||
'RelayEndReason',
|
||||
'StreamStatus',
|
||||
'StreamClosureReason',
|
||||
'StreamSource',
|
||||
'StreamPurpose',
|
||||
'ORStatus',
|
||||
'ORClosureReason',
|
||||
'AuthDescriptorAction',
|
||||
'StatusType',
|
||||
'GuardType',
|
||||
'GuardStatus',
|
||||
'TimeoutSetType',
|
||||
]
|
||||
|
||||
import stem.prereq
|
||||
|
||||
if stem.prereq.is_python_3():
|
||||
str_type = str
|
||||
int_type = int
|
||||
else:
|
||||
str_type = unicode
|
||||
int_type = long
|
||||
|
||||
import stem.util.enum
|
||||
|
||||
# Constant to indicate an undefined argument default. Usually we'd use None for
|
||||
# this, but users will commonly provide None as the argument so need something
|
||||
# else fairly unique...
|
||||
|
||||
UNDEFINED = '<Undefined_ >'
|
||||
|
||||
|
||||
class ControllerError(Exception):
|
||||
'Base error for controller communication issues.'
|
||||
|
||||
|
||||
class ProtocolError(ControllerError):
|
||||
'Malformed content from the control socket.'
|
||||
|
||||
|
||||
class OperationFailed(ControllerError):
|
||||
"""
|
||||
Base exception class for failed operations that return an error code
|
||||
|
||||
:var str code: error code returned by Tor
|
||||
:var str message: error message returned by Tor or a human readable error
|
||||
message
|
||||
"""
|
||||
|
||||
def __init__(self, code = None, message = None):
|
||||
super(ControllerError, self).__init__(message)
|
||||
self.code = code
|
||||
self.message = message
|
||||
|
||||
|
||||
class UnsatisfiableRequest(OperationFailed):
|
||||
"""
|
||||
Exception raised if Tor was unable to process our request.
|
||||
"""
|
||||
|
||||
|
||||
class CircuitExtensionFailed(UnsatisfiableRequest):
|
||||
"""
|
||||
An attempt to create or extend a circuit failed.
|
||||
|
||||
:var stem.response.CircuitEvent circ: response notifying us of the failure
|
||||
"""
|
||||
|
||||
def __init__(self, message, circ = None):
|
||||
super(CircuitExtensionFailed, self).__init__(message = message)
|
||||
self.circ = circ
|
||||
|
||||
|
||||
class DescriptorUnavailable(OperationFailed):
|
||||
"""
|
||||
Tor was unable to provide a descriptor for the given relay.
|
||||
"""
|
||||
|
||||
def __init__(self, message):
|
||||
super(DescriptorUnavailable, self).__init__(message = message)
|
||||
|
||||
|
||||
class InvalidRequest(OperationFailed):
|
||||
"""
|
||||
Exception raised when the request was invalid or malformed.
|
||||
"""
|
||||
|
||||
|
||||
class InvalidArguments(InvalidRequest):
|
||||
"""
|
||||
Exception class for requests which had invalid arguments.
|
||||
|
||||
:var str code: error code returned by Tor
|
||||
:var str message: error message returned by Tor or a human readable error
|
||||
message
|
||||
:var list arguments: a list of arguments which were invalid
|
||||
"""
|
||||
|
||||
def __init__(self, code = None, message = None, arguments = None):
|
||||
super(InvalidArguments, self).__init__(code, message)
|
||||
self.arguments = arguments
|
||||
|
||||
|
||||
class SocketError(ControllerError):
|
||||
'Error arose while communicating with the control socket.'
|
||||
|
||||
|
||||
class SocketClosed(SocketError):
|
||||
'Control socket was closed before completing the message.'
|
||||
|
||||
Runlevel = stem.util.enum.UppercaseEnum(
|
||||
'DEBUG',
|
||||
'INFO',
|
||||
'NOTICE',
|
||||
'WARN',
|
||||
'ERR',
|
||||
)
|
||||
|
||||
Flag = stem.util.enum.Enum(
|
||||
('AUTHORITY', 'Authority'),
|
||||
('BADEXIT', 'BadExit'),
|
||||
('BADDIRECTORY', 'BadDirectory'),
|
||||
('EXIT', 'Exit'),
|
||||
('FAST', 'Fast'),
|
||||
('GUARD', 'Guard'),
|
||||
('HSDIR', 'HSDir'),
|
||||
('NAMED', 'Named'),
|
||||
('RUNNING', 'Running'),
|
||||
('STABLE', 'Stable'),
|
||||
('UNNAMED', 'Unnamed'),
|
||||
('V2DIR', 'V2Dir'),
|
||||
('V3DIR', 'V3Dir'),
|
||||
('VALID', 'Valid'),
|
||||
)
|
||||
|
||||
Signal = stem.util.enum.UppercaseEnum(
|
||||
'RELOAD',
|
||||
'HUP',
|
||||
'SHUTDOWN',
|
||||
'INT',
|
||||
'DUMP',
|
||||
'USR1',
|
||||
'DEBUG',
|
||||
'USR2',
|
||||
'HALT',
|
||||
'TERM',
|
||||
'NEWNYM',
|
||||
'CLEARDNSCACHE',
|
||||
'HEARTBEAT',
|
||||
)
|
||||
|
||||
CircStatus = stem.util.enum.UppercaseEnum(
|
||||
'LAUNCHED',
|
||||
'BUILT',
|
||||
'EXTENDED',
|
||||
'FAILED',
|
||||
'CLOSED',
|
||||
)
|
||||
|
||||
CircBuildFlag = stem.util.enum.UppercaseEnum(
|
||||
'ONEHOP_TUNNEL',
|
||||
'IS_INTERNAL',
|
||||
'NEED_CAPACITY',
|
||||
'NEED_UPTIME',
|
||||
)
|
||||
|
||||
CircPurpose = stem.util.enum.UppercaseEnum(
|
||||
'GENERAL',
|
||||
'HS_CLIENT_INTRO',
|
||||
'HS_CLIENT_REND',
|
||||
'HS_SERVICE_INTRO',
|
||||
'HS_SERVICE_REND',
|
||||
'TESTING',
|
||||
'CONTROLLER',
|
||||
'MEASURE_TIMEOUT',
|
||||
)
|
||||
|
||||
CircClosureReason = stem.util.enum.UppercaseEnum(
|
||||
'NONE',
|
||||
'TORPROTOCOL',
|
||||
'INTERNAL',
|
||||
'REQUESTED',
|
||||
'HIBERNATING',
|
||||
'RESOURCELIMIT',
|
||||
'CONNECTFAILED',
|
||||
'OR_IDENTITY',
|
||||
'OR_CONN_CLOSED',
|
||||
'FINISHED',
|
||||
'TIMEOUT',
|
||||
'DESTROYED',
|
||||
'NOPATH',
|
||||
'NOSUCHSERVICE',
|
||||
'MEASUREMENT_EXPIRED',
|
||||
)
|
||||
|
||||
CircEvent = stem.util.enum.UppercaseEnum(
|
||||
'PURPOSE_CHANGED',
|
||||
'CANNIBALIZED',
|
||||
)
|
||||
|
||||
HiddenServiceState = stem.util.enum.UppercaseEnum(
|
||||
'HSCI_CONNECTING',
|
||||
'HSCI_INTRO_SENT',
|
||||
'HSCI_DONE',
|
||||
'HSCR_CONNECTING',
|
||||
'HSCR_ESTABLISHED_IDLE',
|
||||
'HSCR_ESTABLISHED_WAITING',
|
||||
'HSCR_JOINED',
|
||||
'HSSI_CONNECTING',
|
||||
'HSSI_ESTABLISHED',
|
||||
'HSSR_CONNECTING',
|
||||
'HSSR_JOINED',
|
||||
)
|
||||
|
||||
RelayEndReason = stem.util.enum.UppercaseEnum(
|
||||
'MISC',
|
||||
'RESOLVEFAILED',
|
||||
'CONNECTREFUSED',
|
||||
'EXITPOLICY',
|
||||
'DESTROY',
|
||||
'DONE',
|
||||
'TIMEOUT',
|
||||
'NOROUTE',
|
||||
'HIBERNATING',
|
||||
'INTERNAL',
|
||||
'RESOURCELIMIT',
|
||||
'CONNRESET',
|
||||
'TORPROTOCOL',
|
||||
'NOTDIRECTORY',
|
||||
)
|
||||
|
||||
StreamStatus = stem.util.enum.UppercaseEnum(
|
||||
'NEW',
|
||||
'NEWRESOLVE',
|
||||
'REMAP',
|
||||
'SENTCONNECT',
|
||||
'SENTRESOLVE',
|
||||
'SUCCEEDED',
|
||||
'FAILED',
|
||||
'DETACHED',
|
||||
'CLOSED',
|
||||
)
|
||||
|
||||
# StreamClosureReason is a superset of RelayEndReason
|
||||
StreamClosureReason = stem.util.enum.UppercaseEnum(*(RelayEndReason.keys() + [
|
||||
'END',
|
||||
'PRIVATE_ADDR',
|
||||
]))
|
||||
|
||||
StreamSource = stem.util.enum.UppercaseEnum(
|
||||
'CACHE',
|
||||
'EXIT',
|
||||
)
|
||||
|
||||
StreamPurpose = stem.util.enum.UppercaseEnum(
|
||||
'DIR_FETCH',
|
||||
'DIR_UPLOAD',
|
||||
'DNS_REQUEST',
|
||||
'DIRPORT_TEST',
|
||||
'USER',
|
||||
)
|
||||
|
||||
ORStatus = stem.util.enum.UppercaseEnum(
|
||||
'NEW',
|
||||
'LAUNCHED',
|
||||
'CONNECTED',
|
||||
'FAILED',
|
||||
'CLOSED',
|
||||
)
|
||||
|
||||
ORClosureReason = stem.util.enum.UppercaseEnum(
|
||||
'DONE',
|
||||
'CONNECTREFUSED',
|
||||
'IDENTITY',
|
||||
'CONNECTRESET',
|
||||
'TIMEOUT',
|
||||
'NOROUTE',
|
||||
'IOERROR',
|
||||
'RESOURCELIMIT',
|
||||
'MISC',
|
||||
'PT_MISSING',
|
||||
)
|
||||
|
||||
AuthDescriptorAction = stem.util.enum.UppercaseEnum(
|
||||
'ACCEPTED',
|
||||
'DROPPED',
|
||||
'REJECTED',
|
||||
)
|
||||
|
||||
StatusType = stem.util.enum.UppercaseEnum(
|
||||
'GENERAL',
|
||||
'CLIENT',
|
||||
'SERVER',
|
||||
)
|
||||
|
||||
GuardType = stem.util.enum.UppercaseEnum(
|
||||
'ENTRY',
|
||||
)
|
||||
|
||||
GuardStatus = stem.util.enum.UppercaseEnum(
|
||||
'NEW',
|
||||
'UP',
|
||||
'DOWN',
|
||||
'BAD',
|
||||
'GOOD',
|
||||
'DROPPED',
|
||||
)
|
||||
|
||||
TimeoutSetType = stem.util.enum.UppercaseEnum(
|
||||
'COMPUTED',
|
||||
'RESET',
|
||||
'SUSPENDED',
|
||||
'DISCARD',
|
||||
'RESUME',
|
||||
)
|
||||
|
||||
ConnectionType = stem.util.enum.UppercaseEnum(
|
||||
'OR',
|
||||
'DIR',
|
||||
'EXIT',
|
||||
)
|
||||
|
||||
TokenBucket = stem.util.enum.UppercaseEnum(
|
||||
'GLOBAL',
|
||||
'RELAY',
|
||||
'ORCONN',
|
||||
)
|
||||
|
||||
HSDescAction = stem.util.enum.UppercaseEnum(
|
||||
'REQUESTED',
|
||||
'UPLOAD',
|
||||
'RECEIVED',
|
||||
'UPLOADED',
|
||||
'IGNORE',
|
||||
'FAILED',
|
||||
)
|
||||
|
||||
HSDescReason = stem.util.enum.UppercaseEnum(
|
||||
'BAD_DESC',
|
||||
'QUERY_REJECTED',
|
||||
'UPLOAD_REJECTED',
|
||||
'NOT_FOUND',
|
||||
'UNEXPECTED',
|
||||
)
|
||||
|
||||
HSAuth = stem.util.enum.UppercaseEnum(
|
||||
'NO_AUTH',
|
||||
'BASIC_AUTH',
|
||||
'STEALTH_AUTH',
|
||||
'UNKNOWN',
|
||||
)
|
1284
Shared/lib/python3.4/site-packages/stem/connection.py
Normal file
1284
Shared/lib/python3.4/site-packages/stem/connection.py
Normal file
File diff suppressed because it is too large
Load diff
3631
Shared/lib/python3.4/site-packages/stem/control.py
Normal file
3631
Shared/lib/python3.4/site-packages/stem/control.py
Normal file
File diff suppressed because it is too large
Load diff
841
Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py
Normal file
841
Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py
Normal file
|
@ -0,0 +1,841 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Package for parsing and processing descriptor data.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
parse_file - Parses the descriptors in a file.
|
||||
|
||||
Descriptor - Common parent for all descriptor file types.
|
||||
|- get_path - location of the descriptor on disk if it came from a file
|
||||
|- get_archive_path - location of the descriptor within the archive it came from
|
||||
|- get_bytes - similar to str(), but provides our original bytes content
|
||||
|- get_unrecognized_lines - unparsed descriptor content
|
||||
+- __str__ - string that the descriptor was made from
|
||||
|
||||
.. data:: DocumentHandler (enum)
|
||||
|
||||
Ways in which we can parse a
|
||||
:class:`~stem.descriptor.networkstatus.NetworkStatusDocument`.
|
||||
|
||||
Both **ENTRIES** and **BARE_DOCUMENT** have a 'thin' document, which doesn't
|
||||
have a populated **routers** attribute. This allows for lower memory usage
|
||||
and upfront runtime. However, if read time and memory aren't a concern then
|
||||
**DOCUMENT** can provide you with a fully populated document.
|
||||
|
||||
=================== ===========
|
||||
DocumentHandler Description
|
||||
=================== ===========
|
||||
**ENTRIES** Iterates over the contained :class:`~stem.descriptor.router_status_entry.RouterStatusEntry`. Each has a reference to the bare document it came from (through its **document** attribute).
|
||||
**DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` with the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` it contains (through its **routers** attribute).
|
||||
**BARE_DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` **without** a reference to its contents (the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` are unread).
|
||||
=================== ===========
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'export',
|
||||
'reader',
|
||||
'remote',
|
||||
'extrainfo_descriptor',
|
||||
'server_descriptor',
|
||||
'microdescriptor',
|
||||
'networkstatus',
|
||||
'router_status_entry',
|
||||
'tordnsel',
|
||||
'parse_file',
|
||||
'Descriptor',
|
||||
]
|
||||
|
||||
import base64
|
||||
import codecs
|
||||
import copy
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import tarfile
|
||||
|
||||
import stem.prereq
|
||||
import stem.util.enum
|
||||
import stem.util.str_tools
|
||||
import stem.util.system
|
||||
|
||||
from stem import str_type
|
||||
|
||||
try:
|
||||
# added in python 2.7
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from stem.util.ordereddict import OrderedDict
|
||||
|
||||
KEYWORD_CHAR = 'a-zA-Z0-9-'
|
||||
WHITESPACE = ' \t'
|
||||
KEYWORD_LINE = re.compile('^([%s]+)(?:[%s]+(.*))?$' % (KEYWORD_CHAR, WHITESPACE))
|
||||
SPECIFIC_KEYWORD_LINE = '^(%%s)(?:[%s]+(.*))?$' % WHITESPACE
|
||||
PGP_BLOCK_START = re.compile('^-----BEGIN ([%s%s]+)-----$' % (KEYWORD_CHAR, WHITESPACE))
|
||||
PGP_BLOCK_END = '-----END %s-----'
|
||||
|
||||
DocumentHandler = stem.util.enum.UppercaseEnum(
|
||||
'ENTRIES',
|
||||
'DOCUMENT',
|
||||
'BARE_DOCUMENT',
|
||||
)
|
||||
|
||||
|
||||
def parse_file(descriptor_file, descriptor_type = None, validate = False, document_handler = DocumentHandler.ENTRIES, **kwargs):
|
||||
"""
|
||||
Simple function to read the descriptor contents from a file, providing an
|
||||
iterator for its :class:`~stem.descriptor.__init__.Descriptor` contents.
|
||||
|
||||
If you don't provide a **descriptor_type** argument then this automatically
|
||||
tries to determine the descriptor type based on the following...
|
||||
|
||||
* The @type annotation on the first line. These are generally only found in
|
||||
the `CollecTor archives <https://collector.torproject.org/formats.html#relay-descriptors>`_.
|
||||
|
||||
* The filename if it matches something from tor's data directory. For
|
||||
instance, tor's 'cached-descriptors' contains server descriptors.
|
||||
|
||||
This is a handy function for simple usage, but if you're reading multiple
|
||||
descriptor files you might want to consider the
|
||||
:class:`~stem.descriptor.reader.DescriptorReader`.
|
||||
|
||||
Descriptor types include the following, including further minor versions (ie.
|
||||
if we support 1.1 then we also support everything from 1.0 and most things
|
||||
from 1.2, but not 2.0)...
|
||||
|
||||
========================================= =====
|
||||
Descriptor Type Class
|
||||
========================================= =====
|
||||
server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.RelayDescriptor`
|
||||
extra-info 1.0 :class:`~stem.descriptor.extrainfo_descriptor.RelayExtraInfoDescriptor`
|
||||
microdescriptor 1.0 :class:`~stem.descriptor.microdescriptor.Microdescriptor`
|
||||
directory 1.0 **unsupported**
|
||||
network-status-2 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV2`)
|
||||
dir-key-certificate-3 1.0 :class:`~stem.descriptor.networkstatus.KeyCertificate`
|
||||
network-status-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
|
||||
network-status-vote-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
|
||||
network-status-microdesc-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
|
||||
bridge-network-status 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.BridgeNetworkStatusDocument`)
|
||||
bridge-server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.BridgeDescriptor`
|
||||
bridge-extra-info 1.1 or 1.2 :class:`~stem.descriptor.extrainfo_descriptor.BridgeExtraInfoDescriptor`
|
||||
torperf 1.0 **unsupported**
|
||||
bridge-pool-assignment 1.0 **unsupported**
|
||||
tordnsel 1.0 :class:`~stem.descriptor.tordnsel.TorDNSEL`
|
||||
hidden-service-descriptor 1.0 :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor`
|
||||
========================================= =====
|
||||
|
||||
If you're using **python 3** then beware that the open() function defaults to
|
||||
using text mode. **Binary mode** is strongly suggested because it's both
|
||||
faster (by my testing by about 33x) and doesn't do universal newline
|
||||
translation which can make us misparse the document.
|
||||
|
||||
::
|
||||
|
||||
my_descriptor_file = open(descriptor_path, 'rb')
|
||||
|
||||
:param str,file,tarfile descriptor_file: path or opened file with the descriptor contents
|
||||
:param str descriptor_type: `descriptor type <https://collector.torproject.org/formats.html>`_, this is guessed if not provided
|
||||
:param bool validate: checks the validity of the descriptor's content if
|
||||
**True**, skips these checks otherwise
|
||||
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
|
||||
which to parse the :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
|
||||
:param dict kwargs: additional arguments for the descriptor constructor
|
||||
|
||||
:returns: iterator for :class:`~stem.descriptor.__init__.Descriptor` instances in the file
|
||||
|
||||
:raises:
|
||||
* **ValueError** if the contents is malformed and validate is True
|
||||
* **TypeError** if we can't match the contents of the file to a descriptor type
|
||||
* **IOError** if unable to read from the descriptor_file
|
||||
"""
|
||||
|
||||
# Delegate to a helper if this is a path or tarfile.
|
||||
|
||||
handler = None
|
||||
|
||||
if isinstance(descriptor_file, (bytes, str_type)):
|
||||
if stem.util.system.is_tarfile(descriptor_file):
|
||||
handler = _parse_file_for_tar_path
|
||||
else:
|
||||
handler = _parse_file_for_path
|
||||
elif isinstance(descriptor_file, tarfile.TarFile):
|
||||
handler = _parse_file_for_tarfile
|
||||
|
||||
if handler:
|
||||
for desc in handler(descriptor_file, descriptor_type, validate, document_handler, **kwargs):
|
||||
yield desc
|
||||
|
||||
return
|
||||
|
||||
# The tor descriptor specifications do not provide a reliable method for
|
||||
# identifying a descriptor file's type and version so we need to guess
|
||||
# based on its filename. Metrics descriptors, however, can be identified
|
||||
# by an annotation on their first line...
|
||||
# https://trac.torproject.org/5651
|
||||
|
||||
initial_position = descriptor_file.tell()
|
||||
first_line = stem.util.str_tools._to_unicode(descriptor_file.readline().strip())
|
||||
metrics_header_match = re.match('^@type (\S+) (\d+).(\d+)$', first_line)
|
||||
|
||||
if not metrics_header_match:
|
||||
descriptor_file.seek(initial_position)
|
||||
|
||||
descriptor_path = getattr(descriptor_file, 'name', None)
|
||||
filename = '<undefined>' if descriptor_path is None else os.path.basename(descriptor_file.name)
|
||||
file_parser = None
|
||||
|
||||
if descriptor_type is not None:
|
||||
descriptor_type_match = re.match('^(\S+) (\d+).(\d+)$', descriptor_type)
|
||||
|
||||
if descriptor_type_match:
|
||||
desc_type, major_version, minor_version = descriptor_type_match.groups()
|
||||
file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs)
|
||||
else:
|
||||
raise ValueError("The descriptor_type must be of the form '<type> <major_version>.<minor_version>'")
|
||||
elif metrics_header_match:
|
||||
# Metrics descriptor handling
|
||||
|
||||
desc_type, major_version, minor_version = metrics_header_match.groups()
|
||||
file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs)
|
||||
else:
|
||||
# Cached descriptor handling. These contain multiple descriptors per file.
|
||||
|
||||
if filename == 'cached-descriptors' or filename == 'cached-descriptors.new':
|
||||
file_parser = lambda f: stem.descriptor.server_descriptor._parse_file(f, validate = validate, **kwargs)
|
||||
elif filename == 'cached-extrainfo' or filename == 'cached-extrainfo.new':
|
||||
file_parser = lambda f: stem.descriptor.extrainfo_descriptor._parse_file(f, validate = validate, **kwargs)
|
||||
elif filename == 'cached-microdescs' or filename == 'cached-microdescs.new':
|
||||
file_parser = lambda f: stem.descriptor.microdescriptor._parse_file(f, validate = validate, **kwargs)
|
||||
elif filename == 'cached-consensus':
|
||||
file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, validate = validate, document_handler = document_handler, **kwargs)
|
||||
elif filename == 'cached-microdesc-consensus':
|
||||
file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs)
|
||||
|
||||
if file_parser:
|
||||
for desc in file_parser(descriptor_file):
|
||||
if descriptor_path is not None:
|
||||
desc._set_path(os.path.abspath(descriptor_path))
|
||||
|
||||
yield desc
|
||||
|
||||
return
|
||||
|
||||
# Not recognized as a descriptor file.
|
||||
|
||||
raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line))
|
||||
|
||||
|
||||
def _parse_file_for_path(descriptor_file, *args, **kwargs):
|
||||
with open(descriptor_file, 'rb') as desc_file:
|
||||
for desc in parse_file(desc_file, *args, **kwargs):
|
||||
yield desc
|
||||
|
||||
|
||||
def _parse_file_for_tar_path(descriptor_file, *args, **kwargs):
|
||||
# TODO: use 'with' for tarfile after dropping python 2.6 support
|
||||
tar_file = tarfile.open(descriptor_file)
|
||||
|
||||
try:
|
||||
for desc in parse_file(tar_file, *args, **kwargs):
|
||||
desc._set_path(os.path.abspath(descriptor_file))
|
||||
yield desc
|
||||
finally:
|
||||
if tar_file:
|
||||
tar_file.close()
|
||||
|
||||
|
||||
def _parse_file_for_tarfile(descriptor_file, *args, **kwargs):
|
||||
for tar_entry in descriptor_file:
|
||||
if tar_entry.isfile():
|
||||
entry = descriptor_file.extractfile(tar_entry)
|
||||
|
||||
try:
|
||||
for desc in parse_file(entry, *args, **kwargs):
|
||||
desc._set_archive_path(entry.name)
|
||||
yield desc
|
||||
finally:
|
||||
entry.close()
|
||||
|
||||
|
||||
def _parse_metrics_file(descriptor_type, major_version, minor_version, descriptor_file, validate, document_handler, **kwargs):
|
||||
# Parses descriptor files from metrics, yielding individual descriptors. This
|
||||
# throws a TypeError if the descriptor_type or version isn't recognized.
|
||||
|
||||
if descriptor_type == 'server-descriptor' and major_version == 1:
|
||||
for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'bridge-server-descriptor' and major_version == 1:
|
||||
for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'extra-info' and major_version == 1:
|
||||
for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'microdescriptor' and major_version == 1:
|
||||
for desc in stem.descriptor.microdescriptor._parse_file(descriptor_file, validate = validate, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'bridge-extra-info' and major_version == 1:
|
||||
# version 1.1 introduced a 'transport' field...
|
||||
# https://trac.torproject.org/6257
|
||||
|
||||
for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'network-status-2' and major_version == 1:
|
||||
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV2
|
||||
|
||||
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'dir-key-certificate-3' and major_version == 1:
|
||||
for desc in stem.descriptor.networkstatus._parse_file_key_certs(descriptor_file, validate = validate, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type in ('network-status-consensus-3', 'network-status-vote-3') and major_version == 1:
|
||||
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3
|
||||
|
||||
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'network-status-microdesc-consensus-3' and major_version == 1:
|
||||
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3
|
||||
|
||||
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'bridge-network-status' and major_version == 1:
|
||||
document_type = stem.descriptor.networkstatus.BridgeNetworkStatusDocument
|
||||
|
||||
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'tordnsel' and major_version == 1:
|
||||
document_type = stem.descriptor.tordnsel.TorDNSEL
|
||||
|
||||
for desc in stem.descriptor.tordnsel._parse_file(descriptor_file, validate = validate, **kwargs):
|
||||
yield desc
|
||||
elif descriptor_type == 'hidden-service-descriptor' and major_version == 1:
|
||||
document_type = stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor
|
||||
|
||||
for desc in stem.descriptor.hidden_service_descriptor._parse_file(descriptor_file, validate = validate, **kwargs):
|
||||
yield desc
|
||||
else:
|
||||
raise TypeError("Unrecognized metrics descriptor format. type: '%s', version: '%i.%i'" % (descriptor_type, major_version, minor_version))
|
||||
|
||||
|
||||
def _value(line, entries):
|
||||
return entries[line][0][0]
|
||||
|
||||
|
||||
def _values(line, entries):
|
||||
return [entry[0] for entry in entries[line]]
|
||||
|
||||
|
||||
def _parse_simple_line(keyword, attribute):
|
||||
def _parse(descriptor, entries):
|
||||
setattr(descriptor, attribute, _value(keyword, entries))
|
||||
|
||||
return _parse
|
||||
|
||||
|
||||
def _parse_bytes_line(keyword, attribute):
|
||||
def _parse(descriptor, entries):
|
||||
line_match = re.search(stem.util.str_tools._to_bytes('^(opt )?%s(?:[%s]+(.*))?$' % (keyword, WHITESPACE)), descriptor.get_bytes(), re.MULTILINE)
|
||||
result = None
|
||||
|
||||
if line_match:
|
||||
value = line_match.groups()[1]
|
||||
result = b'' if value is None else value
|
||||
|
||||
setattr(descriptor, attribute, result)
|
||||
|
||||
return _parse
|
||||
|
||||
|
||||
def _parse_timestamp_line(keyword, attribute):
|
||||
# "<keyword>" YYYY-MM-DD HH:MM:SS
|
||||
|
||||
def _parse(descriptor, entries):
|
||||
value = _value(keyword, entries)
|
||||
|
||||
try:
|
||||
setattr(descriptor, attribute, stem.util.str_tools._parse_timestamp(value))
|
||||
except ValueError:
|
||||
raise ValueError("Timestamp on %s line wasn't parsable: %s %s" % (keyword, keyword, value))
|
||||
|
||||
return _parse
|
||||
|
||||
|
||||
def _parse_forty_character_hex(keyword, attribute):
|
||||
# format of fingerprints, sha1 digests, etc
|
||||
|
||||
def _parse(descriptor, entries):
|
||||
value = _value(keyword, entries)
|
||||
|
||||
if not stem.util.tor_tools.is_hex_digits(value, 40):
|
||||
raise ValueError('%s line had an invalid value (should be 40 hex characters): %s %s' % (keyword, keyword, value))
|
||||
|
||||
setattr(descriptor, attribute, value)
|
||||
|
||||
return _parse
|
||||
|
||||
|
||||
def _parse_key_block(keyword, attribute, expected_block_type, value_attribute = None):
|
||||
def _parse(descriptor, entries):
|
||||
value, block_type, block_contents = entries[keyword][0]
|
||||
|
||||
if not block_contents or block_type != expected_block_type:
|
||||
raise ValueError("'%s' should be followed by a %s block, but was a %s" % (keyword, expected_block_type, block_type))
|
||||
|
||||
setattr(descriptor, attribute, block_contents)
|
||||
|
||||
if value_attribute:
|
||||
setattr(descriptor, value_attribute, value)
|
||||
|
||||
return _parse
|
||||
|
||||
|
||||
class Descriptor(object):
|
||||
"""
|
||||
Common parent for all types of descriptors.
|
||||
"""
|
||||
|
||||
ATTRIBUTES = {} # mapping of 'attribute' => (default_value, parsing_function)
|
||||
PARSER_FOR_LINE = {} # line keyword to its associated parsing function
|
||||
|
||||
def __init__(self, contents, lazy_load = False):
|
||||
self._path = None
|
||||
self._archive_path = None
|
||||
self._raw_contents = contents
|
||||
self._lazy_loading = lazy_load
|
||||
self._entries = {}
|
||||
self._unrecognized_lines = []
|
||||
|
||||
def get_path(self):
|
||||
"""
|
||||
Provides the absolute path that we loaded this descriptor from.
|
||||
|
||||
:returns: **str** with the absolute path of the descriptor source
|
||||
"""
|
||||
|
||||
return self._path
|
||||
|
||||
def get_archive_path(self):
|
||||
"""
|
||||
If this descriptor came from an archive then provides its path within the
|
||||
archive. This is only set if the descriptor came from a
|
||||
:class:`~stem.descriptor.reader.DescriptorReader`, and is **None** if this
|
||||
descriptor didn't come from an archive.
|
||||
|
||||
:returns: **str** with the descriptor's path within the archive
|
||||
"""
|
||||
|
||||
return self._archive_path
|
||||
|
||||
def get_bytes(self):
|
||||
"""
|
||||
Provides the ASCII **bytes** of the descriptor. This only differs from
|
||||
**str()** if you're running python 3.x, in which case **str()** provides a
|
||||
**unicode** string.
|
||||
|
||||
:returns: **bytes** for the descriptor's contents
|
||||
"""
|
||||
|
||||
return self._raw_contents
|
||||
|
||||
def get_unrecognized_lines(self):
|
||||
"""
|
||||
Provides a list of lines that were either ignored or had data that we did
|
||||
not know how to process. This is most common due to new descriptor fields
|
||||
that this library does not yet know how to process. Patches welcome!
|
||||
|
||||
:returns: **list** of lines of unrecognized content
|
||||
"""
|
||||
|
||||
if self._lazy_loading:
|
||||
# we need to go ahead and parse the whole document to figure this out
|
||||
self._parse(self._entries, False)
|
||||
self._lazy_loading = False
|
||||
|
||||
return list(self._unrecognized_lines)
|
||||
|
||||
def _parse(self, entries, validate, parser_for_line = None):
|
||||
"""
|
||||
Parses a series of 'keyword => (value, pgp block)' mappings and applies
|
||||
them as attributes.
|
||||
|
||||
:param dict entries: descriptor contents to be applied
|
||||
:param bool validate: checks the validity of descriptor content if True
|
||||
:param dict parsers: mapping of lines to the function for parsing it
|
||||
|
||||
:raises: **ValueError** if an error occurs in validation
|
||||
"""
|
||||
|
||||
if parser_for_line is None:
|
||||
parser_for_line = self.PARSER_FOR_LINE
|
||||
|
||||
# set defaults
|
||||
|
||||
for attr in self.ATTRIBUTES:
|
||||
if not hasattr(self, attr):
|
||||
setattr(self, attr, copy.copy(self.ATTRIBUTES[attr][0]))
|
||||
|
||||
for keyword, values in list(entries.items()):
|
||||
try:
|
||||
if keyword in parser_for_line:
|
||||
parser_for_line[keyword](self, entries)
|
||||
else:
|
||||
for value, block_type, block_contents in values:
|
||||
line = '%s %s' % (keyword, value)
|
||||
|
||||
if block_contents:
|
||||
line += '\n%s' % block_contents
|
||||
|
||||
self._unrecognized_lines.append(line)
|
||||
except ValueError as exc:
|
||||
if validate:
|
||||
raise exc
|
||||
|
||||
def _set_path(self, path):
|
||||
self._path = path
|
||||
|
||||
def _set_archive_path(self, path):
|
||||
self._archive_path = path
|
||||
|
||||
def _name(self, is_plural = False):
|
||||
return str(type(self))
|
||||
|
||||
def _digest_for_signature(self, signing_key, signature):
|
||||
"""
|
||||
Provides the signed digest we should have given this key and signature.
|
||||
|
||||
:param str signing_key: key block used to make this signature
|
||||
:param str signature: signed digest for this descriptor content
|
||||
|
||||
:returns: the digest string encoded in uppercase hex
|
||||
|
||||
:raises: ValueError if unable to provide a validly signed digest
|
||||
"""
|
||||
|
||||
if not stem.prereq.is_crypto_available():
|
||||
raise ValueError('Generating the signed digest requires pycrypto')
|
||||
|
||||
from Crypto.Util import asn1
|
||||
from Crypto.Util.number import bytes_to_long, long_to_bytes
|
||||
|
||||
# get the ASN.1 sequence
|
||||
|
||||
seq = asn1.DerSequence()
|
||||
seq.decode(_bytes_for_block(signing_key))
|
||||
modulus, public_exponent = seq[0], seq[1]
|
||||
|
||||
sig_as_bytes = _bytes_for_block(signature)
|
||||
sig_as_long = bytes_to_long(sig_as_bytes) # convert signature to an int
|
||||
blocksize = 128 # block size will always be 128 for a 1024 bit key
|
||||
|
||||
# use the public exponent[e] & the modulus[n] to decrypt the int
|
||||
|
||||
decrypted_int = pow(sig_as_long, public_exponent, modulus)
|
||||
|
||||
# convert the int to a byte array
|
||||
|
||||
decrypted_bytes = long_to_bytes(decrypted_int, blocksize)
|
||||
|
||||
############################################################################
|
||||
# The decrypted bytes should have a structure exactly along these lines.
|
||||
# 1 byte - [null '\x00']
|
||||
# 1 byte - [block type identifier '\x01'] - Should always be 1
|
||||
# N bytes - [padding '\xFF' ]
|
||||
# 1 byte - [separator '\x00' ]
|
||||
# M bytes - [message]
|
||||
# Total - 128 bytes
|
||||
# More info here http://www.ietf.org/rfc/rfc2313.txt
|
||||
# esp the Notes in section 8.1
|
||||
############################################################################
|
||||
|
||||
try:
|
||||
if decrypted_bytes.index(b'\x00\x01') != 0:
|
||||
raise ValueError('Verification failed, identifier missing')
|
||||
except ValueError:
|
||||
raise ValueError('Verification failed, malformed data')
|
||||
|
||||
try:
|
||||
identifier_offset = 2
|
||||
|
||||
# find the separator
|
||||
seperator_index = decrypted_bytes.index(b'\x00', identifier_offset)
|
||||
except ValueError:
|
||||
raise ValueError('Verification failed, seperator not found')
|
||||
|
||||
digest_hex = codecs.encode(decrypted_bytes[seperator_index + 1:], 'hex_codec')
|
||||
return stem.util.str_tools._to_unicode(digest_hex.upper())
|
||||
|
||||
def _digest_for_content(self, start, end):
|
||||
"""
|
||||
Provides the digest of our descriptor's content in a given range.
|
||||
|
||||
:param bytes start: start of the range to generate a digest for
|
||||
:param bytes end: end of the range to generate a digest for
|
||||
|
||||
:returns: the digest string encoded in uppercase hex
|
||||
|
||||
:raises: ValueError if the digest canot be calculated
|
||||
"""
|
||||
|
||||
raw_descriptor = self.get_bytes()
|
||||
|
||||
start_index = raw_descriptor.find(start)
|
||||
end_index = raw_descriptor.find(end, start_index)
|
||||
|
||||
if start_index == -1:
|
||||
raise ValueError("Digest is for the range starting with '%s' but that isn't in our descriptor" % start)
|
||||
elif end_index == -1:
|
||||
raise ValueError("Digest is for the range ending with '%s' but that isn't in our descriptor" % end)
|
||||
|
||||
digest_content = raw_descriptor[start_index:end_index + len(end)]
|
||||
digest_hash = hashlib.sha1(stem.util.str_tools._to_bytes(digest_content))
|
||||
return stem.util.str_tools._to_unicode(digest_hash.hexdigest().upper())
|
||||
|
||||
def __getattr__(self, name):
|
||||
# If attribute isn't already present we might be lazy loading it...
|
||||
|
||||
if self._lazy_loading and name in self.ATTRIBUTES:
|
||||
default, parsing_function = self.ATTRIBUTES[name]
|
||||
|
||||
try:
|
||||
parsing_function(self, self._entries)
|
||||
except (ValueError, KeyError):
|
||||
try:
|
||||
# despite having a validation failure check to see if we set something
|
||||
return super(Descriptor, self).__getattribute__(name)
|
||||
except AttributeError:
|
||||
setattr(self, name, copy.copy(default))
|
||||
|
||||
return super(Descriptor, self).__getattribute__(name)
|
||||
|
||||
def __str__(self):
|
||||
if stem.prereq.is_python_3():
|
||||
return stem.util.str_tools._to_unicode(self._raw_contents)
|
||||
else:
|
||||
return self._raw_contents
|
||||
|
||||
|
||||
def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_first = False, skip = False, end_position = None, include_ending_keyword = False):
|
||||
"""
|
||||
Reads from the descriptor file until we get to one of the given keywords or reach the
|
||||
end of the file.
|
||||
|
||||
:param str,list keywords: keyword(s) we want to read until
|
||||
:param file descriptor_file: file with the descriptor content
|
||||
:param bool inclusive: includes the line with the keyword if True
|
||||
:param bool ignore_first: doesn't check if the first line read has one of the
|
||||
given keywords
|
||||
:param bool skip: skips buffering content, returning None
|
||||
:param int end_position: end if we reach this point in the file
|
||||
:param bool include_ending_keyword: provides the keyword we broke on if **True**
|
||||
|
||||
:returns: **list** with the lines until we find one of the keywords, this is
|
||||
a two value tuple with the ending keyword if include_ending_keyword is
|
||||
**True**
|
||||
"""
|
||||
|
||||
if skip:
|
||||
content = None
|
||||
content_append = lambda x: None
|
||||
else:
|
||||
content = []
|
||||
content_append = content.append
|
||||
|
||||
ending_keyword = None
|
||||
|
||||
if isinstance(keywords, (bytes, str_type)):
|
||||
keywords = (keywords,)
|
||||
|
||||
if ignore_first:
|
||||
first_line = descriptor_file.readline()
|
||||
|
||||
if first_line:
|
||||
content_append(first_line)
|
||||
|
||||
keyword_match = re.compile(SPECIFIC_KEYWORD_LINE % '|'.join(keywords))
|
||||
|
||||
while True:
|
||||
last_position = descriptor_file.tell()
|
||||
|
||||
if end_position and last_position >= end_position:
|
||||
break
|
||||
|
||||
line = descriptor_file.readline()
|
||||
|
||||
if not line:
|
||||
break # EOF
|
||||
|
||||
line_match = keyword_match.match(stem.util.str_tools._to_unicode(line))
|
||||
|
||||
if line_match:
|
||||
ending_keyword = line_match.groups()[0]
|
||||
|
||||
if not inclusive:
|
||||
descriptor_file.seek(last_position)
|
||||
else:
|
||||
content_append(line)
|
||||
|
||||
break
|
||||
else:
|
||||
content_append(line)
|
||||
|
||||
if include_ending_keyword:
|
||||
return (content, ending_keyword)
|
||||
else:
|
||||
return content
|
||||
|
||||
|
||||
def _bytes_for_block(content):
|
||||
"""
|
||||
Provides the base64 decoded content of a pgp-style block.
|
||||
|
||||
:param str content: block to be decoded
|
||||
|
||||
:returns: decoded block content
|
||||
|
||||
:raises: **TypeError** if this isn't base64 encoded content
|
||||
"""
|
||||
|
||||
# strip the '-----BEGIN RSA PUBLIC KEY-----' header and footer
|
||||
|
||||
content = ''.join(content.split('\n')[1:-1])
|
||||
|
||||
return base64.b64decode(stem.util.str_tools._to_bytes(content))
|
||||
|
||||
|
||||
def _get_pseudo_pgp_block(remaining_contents):
|
||||
"""
|
||||
Checks if given contents begins with a pseudo-Open-PGP-style block and, if
|
||||
so, pops it off and provides it back to the caller.
|
||||
|
||||
:param list remaining_contents: lines to be checked for a public key block
|
||||
|
||||
:returns: **tuple** of the (block_type, content) or None if it doesn't exist
|
||||
|
||||
:raises: **ValueError** if the contents starts with a key block but it's
|
||||
malformed (for instance, if it lacks an ending line)
|
||||
"""
|
||||
|
||||
if not remaining_contents:
|
||||
return None # nothing left
|
||||
|
||||
block_match = PGP_BLOCK_START.match(remaining_contents[0])
|
||||
|
||||
if block_match:
|
||||
block_type = block_match.groups()[0]
|
||||
block_lines = []
|
||||
end_line = PGP_BLOCK_END % block_type
|
||||
|
||||
while True:
|
||||
if not remaining_contents:
|
||||
raise ValueError("Unterminated pgp style block (looking for '%s'):\n%s" % (end_line, '\n'.join(block_lines)))
|
||||
|
||||
line = remaining_contents.pop(0)
|
||||
block_lines.append(line)
|
||||
|
||||
if line == end_line:
|
||||
return (block_type, '\n'.join(block_lines))
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def _get_descriptor_components(raw_contents, validate, extra_keywords = ()):
|
||||
"""
|
||||
Initial breakup of the server descriptor contents to make parsing easier.
|
||||
|
||||
A descriptor contains a series of 'keyword lines' which are simply a keyword
|
||||
followed by an optional value. Lines can also be followed by a signature
|
||||
block.
|
||||
|
||||
To get a sub-listing with just certain keywords use extra_keywords. This can
|
||||
be useful if we care about their relative ordering with respect to each
|
||||
other. For instance, we care about the ordering of 'accept' and 'reject'
|
||||
entries because this influences the resulting exit policy, but for everything
|
||||
else in server descriptors the order does not matter.
|
||||
|
||||
:param str raw_contents: descriptor content provided by the relay
|
||||
:param bool validate: checks the validity of the descriptor's content if
|
||||
True, skips these checks otherwise
|
||||
:param list extra_keywords: entity keywords to put into a separate listing
|
||||
with ordering intact
|
||||
|
||||
:returns:
|
||||
**collections.OrderedDict** with the 'keyword => (value, pgp key) entries'
|
||||
mappings. If a extra_keywords was provided then this instead provides a two
|
||||
value tuple, the second being a list of those entries.
|
||||
"""
|
||||
|
||||
if isinstance(raw_contents, bytes):
|
||||
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
|
||||
|
||||
entries = OrderedDict()
|
||||
extra_entries = [] # entries with a keyword in extra_keywords
|
||||
remaining_lines = raw_contents.split('\n')
|
||||
|
||||
while remaining_lines:
|
||||
line = remaining_lines.pop(0)
|
||||
|
||||
# V2 network status documents explicitly can contain blank lines...
|
||||
#
|
||||
# "Implementations MAY insert blank lines for clarity between sections;
|
||||
# these blank lines are ignored."
|
||||
#
|
||||
# ... and server descriptors end with an extra newline. But other documents
|
||||
# don't say how blank lines should be handled so globally ignoring them.
|
||||
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# Some lines have an 'opt ' for backward compatibility. They should be
|
||||
# ignored. This prefix is being removed in...
|
||||
# https://trac.torproject.org/projects/tor/ticket/5124
|
||||
|
||||
if line.startswith('opt '):
|
||||
line = line[4:]
|
||||
|
||||
line_match = KEYWORD_LINE.match(line)
|
||||
|
||||
if not line_match:
|
||||
if not validate:
|
||||
continue
|
||||
|
||||
raise ValueError('Line contains invalid characters: %s' % line)
|
||||
|
||||
keyword, value = line_match.groups()
|
||||
|
||||
if value is None:
|
||||
value = ''
|
||||
|
||||
try:
|
||||
block_attr = _get_pseudo_pgp_block(remaining_lines)
|
||||
|
||||
if block_attr:
|
||||
block_type, block_contents = block_attr
|
||||
else:
|
||||
block_type, block_contents = None, None
|
||||
except ValueError as exc:
|
||||
if not validate:
|
||||
continue
|
||||
|
||||
raise exc
|
||||
|
||||
if keyword in extra_keywords:
|
||||
extra_entries.append('%s %s' % (keyword, value))
|
||||
else:
|
||||
entries.setdefault(keyword, []).append((value, block_type, block_contents))
|
||||
|
||||
if extra_keywords:
|
||||
return entries, extra_entries
|
||||
else:
|
||||
return entries
|
||||
|
||||
# importing at the end to avoid circular dependencies on our Descriptor class
|
||||
|
||||
import stem.descriptor.server_descriptor
|
||||
import stem.descriptor.extrainfo_descriptor
|
||||
import stem.descriptor.networkstatus
|
||||
import stem.descriptor.microdescriptor
|
||||
import stem.descriptor.tordnsel
|
||||
import stem.descriptor.hidden_service_descriptor
|
110
Shared/lib/python3.4/site-packages/stem/descriptor/export.py
Normal file
110
Shared/lib/python3.4/site-packages/stem/descriptor/export.py
Normal file
|
@ -0,0 +1,110 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Toolkit for exporting descriptors to other formats.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
export_csv - Exports descriptors to a CSV
|
||||
export_csv_file - Writes exported CSV output to a file
|
||||
"""
|
||||
|
||||
import csv
|
||||
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
except ImportError:
|
||||
from io import StringIO
|
||||
|
||||
import stem.descriptor
|
||||
import stem.prereq
|
||||
|
||||
|
||||
class _ExportDialect(csv.excel):
|
||||
lineterminator = '\n'
|
||||
|
||||
|
||||
def export_csv(descriptors, included_fields = (), excluded_fields = (), header = True):
|
||||
"""
|
||||
Provides a newline separated CSV for one or more descriptors. If simply
|
||||
provided with descriptors then the CSV contains all of its attributes,
|
||||
labeled with a header row. Either 'included_fields' or 'excluded_fields' can
|
||||
be used for more granular control over its attributes and the order.
|
||||
|
||||
:param Descriptor,list descriptors: either a
|
||||
:class:`~stem.descriptor.Descriptor` or list of descriptors to be exported
|
||||
:param list included_fields: attributes to include in the csv
|
||||
:param list excluded_fields: attributes to exclude from the csv
|
||||
:param bool header: if **True** then the first line will be a comma separated
|
||||
list of the attribute names (**only supported in python 2.7 and higher**)
|
||||
|
||||
:returns: **str** of the CSV for the descriptors, one per line
|
||||
:raises: **ValueError** if descriptors contain more than one descriptor type
|
||||
"""
|
||||
|
||||
output_buffer = StringIO()
|
||||
export_csv_file(output_buffer, descriptors, included_fields, excluded_fields, header)
|
||||
return output_buffer.getvalue()
|
||||
|
||||
|
||||
def export_csv_file(output_file, descriptors, included_fields = (), excluded_fields = (), header = True):
|
||||
"""
|
||||
Similar to :func:`stem.descriptor.export.export_csv`, except that the CSV is
|
||||
written directly to a file.
|
||||
|
||||
:param file output_file: file to be written to
|
||||
:param Descriptor,list descriptors: either a
|
||||
:class:`~stem.descriptor.Descriptor` or list of descriptors to be exported
|
||||
:param list included_fields: attributes to include in the csv
|
||||
:param list excluded_fields: attributes to exclude from the csv
|
||||
:param bool header: if **True** then the first line will be a comma separated
|
||||
list of the attribute names (**only supported in python 2.7 and higher**)
|
||||
|
||||
:returns: **str** of the CSV for the descriptors, one per line
|
||||
:raises: **ValueError** if descriptors contain more than one descriptor type
|
||||
"""
|
||||
|
||||
if isinstance(descriptors, stem.descriptor.Descriptor):
|
||||
descriptors = (descriptors,)
|
||||
|
||||
if not descriptors:
|
||||
return
|
||||
|
||||
descriptor_type = type(descriptors[0])
|
||||
descriptor_type_label = descriptor_type.__name__
|
||||
included_fields = list(included_fields)
|
||||
|
||||
# If the user didn't specify the fields to include then export everything,
|
||||
# ordered alphabetically. If they did specify fields then make sure that
|
||||
# they exist.
|
||||
|
||||
desc_attr = sorted(vars(descriptors[0]).keys())
|
||||
|
||||
if included_fields:
|
||||
for field in included_fields:
|
||||
if field not in desc_attr:
|
||||
raise ValueError("%s does not have a '%s' attribute, valid fields are: %s" % (descriptor_type_label, field, ', '.join(desc_attr)))
|
||||
else:
|
||||
included_fields = [attr for attr in desc_attr if not attr.startswith('_')]
|
||||
|
||||
for field in excluded_fields:
|
||||
try:
|
||||
included_fields.remove(field)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
writer = csv.DictWriter(output_file, included_fields, dialect = _ExportDialect(), extrasaction='ignore')
|
||||
|
||||
if header and stem.prereq.is_python_27():
|
||||
writer.writeheader()
|
||||
|
||||
for desc in descriptors:
|
||||
if not isinstance(desc, stem.descriptor.Descriptor):
|
||||
raise ValueError('Unable to export a descriptor CSV since %s is not a descriptor.' % type(desc).__name__)
|
||||
elif descriptor_type != type(desc):
|
||||
raise ValueError('To export a descriptor CSV all of the descriptors must be of the same type. First descriptor was a %s but we later got a %s.' % (descriptor_type_label, type(desc)))
|
||||
|
||||
writer.writerow(vars(desc))
|
|
@ -0,0 +1,939 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Parsing for Tor extra-info descriptors. These are published by relays whenever
|
||||
their server descriptor is published and have a similar format. However, unlike
|
||||
server descriptors these don't contain information that Tor clients require to
|
||||
function and as such aren't fetched by default.
|
||||
|
||||
Defined in section 2.2 of the `dir-spec
|
||||
<https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_,
|
||||
extra-info descriptors contain interesting but non-vital information such as
|
||||
usage statistics. Tor clients cannot request these documents for bridges.
|
||||
|
||||
Extra-info descriptors are available from a few sources...
|
||||
|
||||
* If you have 'DownloadExtraInfo 1' in your torrc...
|
||||
|
||||
* control port via 'GETINFO extra-info/digest/\*' queries
|
||||
* the 'cached-extrainfo' file in tor's data directory
|
||||
|
||||
* Archived descriptors provided by CollecTor
|
||||
(https://collector.torproject.org/).
|
||||
|
||||
* Directory authorities and mirrors via their DirPort.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
ExtraInfoDescriptor - Tor extra-info descriptor.
|
||||
|- RelayExtraInfoDescriptor - Extra-info descriptor for a relay.
|
||||
|- BridgeExtraInfoDescriptor - Extra-info descriptor for a bridge.
|
||||
|
|
||||
+- digest - calculates the upper-case hex digest value for our content
|
||||
|
||||
.. data:: DirResponse (enum)
|
||||
|
||||
Enumeration for known statuses for ExtraInfoDescriptor's dir_*_responses.
|
||||
|
||||
=================== ===========
|
||||
DirResponse Description
|
||||
=================== ===========
|
||||
**OK** network status requests that were answered
|
||||
**NOT_ENOUGH_SIGS** network status wasn't signed by enough authorities
|
||||
**UNAVAILABLE** requested network status was unavailable
|
||||
**NOT_FOUND** requested network status was not found
|
||||
**NOT_MODIFIED** network status unmodified since If-Modified-Since time
|
||||
**BUSY** directory was busy
|
||||
=================== ===========
|
||||
|
||||
.. data:: DirStat (enum)
|
||||
|
||||
Enumeration for known stats for ExtraInfoDescriptor's dir_*_direct_dl and
|
||||
dir_*_tunneled_dl.
|
||||
|
||||
===================== ===========
|
||||
DirStat Description
|
||||
===================== ===========
|
||||
**COMPLETE** requests that completed successfully
|
||||
**TIMEOUT** requests that didn't complete within a ten minute timeout
|
||||
**RUNNING** requests still in process when measurement's taken
|
||||
**MIN** smallest rate at which a descriptor was downloaded in B/s
|
||||
**MAX** largest rate at which a descriptor was downloaded in B/s
|
||||
**D1-4** and **D6-9** rate of the slowest x/10 download rates in B/s
|
||||
**Q1** and **Q3** rate of the slowest and fastest quarter download rates in B/s
|
||||
**MD** median download rate in B/s
|
||||
===================== ===========
|
||||
"""
|
||||
|
||||
import functools
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
import stem.util.connection
|
||||
import stem.util.enum
|
||||
import stem.util.str_tools
|
||||
|
||||
from stem.descriptor import (
|
||||
PGP_BLOCK_END,
|
||||
Descriptor,
|
||||
_read_until_keywords,
|
||||
_get_descriptor_components,
|
||||
_value,
|
||||
_values,
|
||||
_parse_timestamp_line,
|
||||
_parse_forty_character_hex,
|
||||
_parse_key_block,
|
||||
)
|
||||
|
||||
try:
|
||||
# added in python 3.2
|
||||
from functools import lru_cache
|
||||
except ImportError:
|
||||
from stem.util.lru_cache import lru_cache
|
||||
|
||||
# known statuses for dirreq-v2-resp and dirreq-v3-resp...
|
||||
DirResponse = stem.util.enum.Enum(
|
||||
('OK', 'ok'),
|
||||
('NOT_ENOUGH_SIGS', 'not-enough-sigs'),
|
||||
('UNAVAILABLE', 'unavailable'),
|
||||
('NOT_FOUND', 'not-found'),
|
||||
('NOT_MODIFIED', 'not-modified'),
|
||||
('BUSY', 'busy'),
|
||||
)
|
||||
|
||||
# known stats for dirreq-v2/3-direct-dl and dirreq-v2/3-tunneled-dl...
|
||||
dir_stats = ['complete', 'timeout', 'running', 'min', 'max', 'q1', 'q3', 'md']
|
||||
dir_stats += ['d%i' % i for i in range(1, 5)]
|
||||
dir_stats += ['d%i' % i for i in range(6, 10)]
|
||||
DirStat = stem.util.enum.Enum(*[(stat.upper(), stat) for stat in dir_stats])
|
||||
|
||||
# relay descriptors must have exactly one of the following
|
||||
REQUIRED_FIELDS = (
|
||||
'extra-info',
|
||||
'published',
|
||||
'router-signature',
|
||||
)
|
||||
|
||||
# optional entries that can appear at most once
|
||||
SINGLE_FIELDS = (
|
||||
'read-history',
|
||||
'write-history',
|
||||
'geoip-db-digest',
|
||||
'geoip6-db-digest',
|
||||
'bridge-stats-end',
|
||||
'bridge-ips',
|
||||
'dirreq-stats-end',
|
||||
'dirreq-v2-ips',
|
||||
'dirreq-v3-ips',
|
||||
'dirreq-v2-reqs',
|
||||
'dirreq-v3-reqs',
|
||||
'dirreq-v2-share',
|
||||
'dirreq-v3-share',
|
||||
'dirreq-v2-resp',
|
||||
'dirreq-v3-resp',
|
||||
'dirreq-v2-direct-dl',
|
||||
'dirreq-v3-direct-dl',
|
||||
'dirreq-v2-tunneled-dl',
|
||||
'dirreq-v3-tunneled-dl',
|
||||
'dirreq-read-history',
|
||||
'dirreq-write-history',
|
||||
'entry-stats-end',
|
||||
'entry-ips',
|
||||
'cell-stats-end',
|
||||
'cell-processed-cells',
|
||||
'cell-queued-cells',
|
||||
'cell-time-in-queue',
|
||||
'cell-circuits-per-decile',
|
||||
'conn-bi-direct',
|
||||
'exit-stats-end',
|
||||
'exit-kibibytes-written',
|
||||
'exit-kibibytes-read',
|
||||
'exit-streams-opened',
|
||||
)
|
||||
|
||||
|
||||
_timestamp_re = re.compile('^(.*) \(([0-9]+) s\)( .*)?$')
|
||||
_locale_re = re.compile('^[a-zA-Z0-9\?]{2}$')
|
||||
|
||||
|
||||
def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs):
|
||||
"""
|
||||
Iterates over the extra-info descriptors in a file.
|
||||
|
||||
:param file descriptor_file: file with descriptor content
|
||||
:param bool is_bridge: parses the file as being a bridge descriptor
|
||||
:param bool validate: checks the validity of the descriptor's content if
|
||||
**True**, skips these checks otherwise
|
||||
:param dict kwargs: additional arguments for the descriptor constructor
|
||||
|
||||
:returns: iterator for :class:`~stem.descriptor.extrainfo_descriptor.ExtraInfoDescriptor`
|
||||
instances in the file
|
||||
|
||||
:raises:
|
||||
* **ValueError** if the contents is malformed and validate is **True**
|
||||
* **IOError** if the file can't be read
|
||||
"""
|
||||
|
||||
while True:
|
||||
if not is_bridge:
|
||||
extrainfo_content = _read_until_keywords('router-signature', descriptor_file)
|
||||
|
||||
# we've reached the 'router-signature', now include the pgp style block
|
||||
|
||||
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
|
||||
extrainfo_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
|
||||
else:
|
||||
extrainfo_content = _read_until_keywords('router-digest', descriptor_file, True)
|
||||
|
||||
if extrainfo_content:
|
||||
if extrainfo_content[0].startswith(b'@type'):
|
||||
extrainfo_content = extrainfo_content[1:]
|
||||
|
||||
if is_bridge:
|
||||
yield BridgeExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs)
|
||||
else:
|
||||
yield RelayExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs)
|
||||
else:
|
||||
break # done parsing file
|
||||
|
||||
|
||||
def _parse_timestamp_and_interval(keyword, content):
|
||||
"""
|
||||
Parses a 'YYYY-MM-DD HH:MM:SS (NSEC s) *' entry.
|
||||
|
||||
:param str keyword: line's keyword
|
||||
:param str content: line content to be parsed
|
||||
|
||||
:returns: **tuple** of the form (timestamp (**datetime**), interval
|
||||
(**int**), remaining content (**str**))
|
||||
|
||||
:raises: **ValueError** if the content is malformed
|
||||
"""
|
||||
|
||||
line = '%s %s' % (keyword, content)
|
||||
content_match = _timestamp_re.match(content)
|
||||
|
||||
if not content_match:
|
||||
raise ValueError('Malformed %s line: %s' % (keyword, line))
|
||||
|
||||
timestamp_str, interval, remainder = content_match.groups()
|
||||
|
||||
if remainder:
|
||||
remainder = remainder[1:] # remove leading space
|
||||
|
||||
if not interval.isdigit():
|
||||
raise ValueError("%s line's interval wasn't a number: %s" % (keyword, line))
|
||||
|
||||
try:
|
||||
timestamp = stem.util.str_tools._parse_timestamp(timestamp_str)
|
||||
return timestamp, int(interval), remainder
|
||||
except ValueError:
|
||||
raise ValueError("%s line's timestamp wasn't parsable: %s" % (keyword, line))
|
||||
|
||||
|
||||
def _parse_extra_info_line(descriptor, entries):
|
||||
# "extra-info" Nickname Fingerprint
|
||||
|
||||
value = _value('extra-info', entries)
|
||||
extra_info_comp = value.split()
|
||||
|
||||
if len(extra_info_comp) < 2:
|
||||
raise ValueError('Extra-info line must have two values: extra-info %s' % value)
|
||||
elif not stem.util.tor_tools.is_valid_nickname(extra_info_comp[0]):
|
||||
raise ValueError("Extra-info line entry isn't a valid nickname: %s" % extra_info_comp[0])
|
||||
elif not stem.util.tor_tools.is_valid_fingerprint(extra_info_comp[1]):
|
||||
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % extra_info_comp[1])
|
||||
|
||||
descriptor.nickname = extra_info_comp[0]
|
||||
descriptor.fingerprint = extra_info_comp[1]
|
||||
|
||||
|
||||
def _parse_transport_line(descriptor, entries):
|
||||
# "transport" transportname address:port [arglist]
|
||||
# Everything after the transportname is scrubbed in published bridge
|
||||
# descriptors, so we'll never see it in practice.
|
||||
#
|
||||
# These entries really only make sense for bridges, but have been seen
|
||||
# on non-bridges in the wild when the relay operator configured it this
|
||||
# way.
|
||||
|
||||
transports = {}
|
||||
|
||||
for value in _values('transport', entries):
|
||||
name, address, port, args = None, None, None, None
|
||||
|
||||
if ' ' not in value:
|
||||
# scrubbed
|
||||
name = value
|
||||
else:
|
||||
# not scrubbed
|
||||
value_comp = value.split()
|
||||
|
||||
if len(value_comp) < 1:
|
||||
raise ValueError('Transport line is missing its transport name: transport %s' % value)
|
||||
elif len(value_comp) < 2:
|
||||
raise ValueError('Transport line is missing its address:port value: transport %s' % value)
|
||||
elif ':' not in value_comp[1]:
|
||||
raise ValueError("Transport line's address:port entry is missing a colon: transport %s" % value)
|
||||
|
||||
name = value_comp[0]
|
||||
address, port_str = value_comp[1].split(':', 1)
|
||||
|
||||
if not stem.util.connection.is_valid_ipv4_address(address) or \
|
||||
stem.util.connection.is_valid_ipv6_address(address):
|
||||
raise ValueError('Transport line has a malformed address: transport %s' % value)
|
||||
elif not stem.util.connection.is_valid_port(port_str):
|
||||
raise ValueError('Transport line has a malformed port: transport %s' % value)
|
||||
|
||||
port = int(port_str)
|
||||
args = value_comp[2:] if len(value_comp) >= 3 else []
|
||||
|
||||
transports[name] = (address, port, args)
|
||||
|
||||
descriptor.transport = transports
|
||||
|
||||
|
||||
def _parse_cell_circuits_per_decline_line(descriptor, entries):
|
||||
# "cell-circuits-per-decile" num
|
||||
|
||||
value = _value('cell-circuits-per-decile', entries)
|
||||
|
||||
if not value.isdigit():
|
||||
raise ValueError('Non-numeric cell-circuits-per-decile value: %s' % value)
|
||||
elif int(value) < 0:
|
||||
raise ValueError('Negative cell-circuits-per-decile value: %s' % value)
|
||||
|
||||
descriptor.cell_circuits_per_decile = int(value)
|
||||
|
||||
|
||||
def _parse_dirreq_line(keyword, recognized_counts_attr, unrecognized_counts_attr, descriptor, entries):
|
||||
value = _value(keyword, entries)
|
||||
|
||||
recognized_counts = {}
|
||||
unrecognized_counts = {}
|
||||
|
||||
is_response_stats = keyword in ('dirreq-v2-resp', 'dirreq-v3-resp')
|
||||
key_set = DirResponse if is_response_stats else DirStat
|
||||
|
||||
key_type = 'STATUS' if is_response_stats else 'STAT'
|
||||
error_msg = '%s lines should contain %s=COUNT mappings: %s %s' % (keyword, key_type, keyword, value)
|
||||
|
||||
if value:
|
||||
for entry in value.split(','):
|
||||
if '=' not in entry:
|
||||
raise ValueError(error_msg)
|
||||
|
||||
status, count = entry.split('=', 1)
|
||||
|
||||
if count.isdigit():
|
||||
if status in key_set:
|
||||
recognized_counts[status] = int(count)
|
||||
else:
|
||||
unrecognized_counts[status] = int(count)
|
||||
else:
|
||||
raise ValueError(error_msg)
|
||||
|
||||
setattr(descriptor, recognized_counts_attr, recognized_counts)
|
||||
setattr(descriptor, unrecognized_counts_attr, unrecognized_counts)
|
||||
|
||||
|
||||
def _parse_dirreq_share_line(keyword, attribute, descriptor, entries):
|
||||
value = _value(keyword, entries)
|
||||
|
||||
if not value.endswith('%'):
|
||||
raise ValueError('%s lines should be a percentage: %s %s' % (keyword, keyword, value))
|
||||
elif float(value[:-1]) < 0:
|
||||
raise ValueError('Negative percentage value: %s %s' % (keyword, value))
|
||||
|
||||
# bug means it might be above 100%: https://lists.torproject.org/pipermail/tor-dev/2012-June/003679.html
|
||||
|
||||
setattr(descriptor, attribute, float(value[:-1]) / 100)
|
||||
|
||||
|
||||
def _parse_cell_line(keyword, attribute, descriptor, entries):
|
||||
# "<keyword>" num,...,num
|
||||
|
||||
value = _value(keyword, entries)
|
||||
entries, exc = [], None
|
||||
|
||||
if value:
|
||||
for entry in value.split(','):
|
||||
try:
|
||||
# Values should be positive but as discussed in ticket #5849
|
||||
# there was a bug around this. It was fixed in tor 0.2.2.1.
|
||||
|
||||
entries.append(float(entry))
|
||||
except ValueError:
|
||||
exc = ValueError('Non-numeric entry in %s listing: %s %s' % (keyword, keyword, value))
|
||||
|
||||
setattr(descriptor, attribute, entries)
|
||||
|
||||
if exc:
|
||||
raise exc
|
||||
|
||||
|
||||
def _parse_timestamp_and_interval_line(keyword, end_attribute, interval_attribute, descriptor, entries):
|
||||
# "<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s)
|
||||
|
||||
timestamp, interval, _ = _parse_timestamp_and_interval(keyword, _value(keyword, entries))
|
||||
setattr(descriptor, end_attribute, timestamp)
|
||||
setattr(descriptor, interval_attribute, interval)
|
||||
|
||||
|
||||
def _parse_conn_bi_direct_line(descriptor, entries):
|
||||
# "conn-bi-direct" YYYY-MM-DD HH:MM:SS (NSEC s) BELOW,READ,WRITE,BOTH
|
||||
|
||||
value = _value('conn-bi-direct', entries)
|
||||
timestamp, interval, remainder = _parse_timestamp_and_interval('conn-bi-direct', value)
|
||||
stats = remainder.split(',')
|
||||
|
||||
if len(stats) != 4 or not (stats[0].isdigit() and stats[1].isdigit() and stats[2].isdigit() and stats[3].isdigit()):
|
||||
raise ValueError('conn-bi-direct line should end with four numeric values: conn-bi-direct %s' % value)
|
||||
|
||||
descriptor.conn_bi_direct_end = timestamp
|
||||
descriptor.conn_bi_direct_interval = interval
|
||||
descriptor.conn_bi_direct_below = int(stats[0])
|
||||
descriptor.conn_bi_direct_read = int(stats[1])
|
||||
descriptor.conn_bi_direct_write = int(stats[2])
|
||||
descriptor.conn_bi_direct_both = int(stats[3])
|
||||
|
||||
|
||||
def _parse_history_line(keyword, end_attribute, interval_attribute, values_attribute, descriptor, entries):
|
||||
# "<keyword>" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM...
|
||||
|
||||
value = _value(keyword, entries)
|
||||
timestamp, interval, remainder = _parse_timestamp_and_interval(keyword, value)
|
||||
history_values = []
|
||||
|
||||
if remainder:
|
||||
try:
|
||||
history_values = [int(entry) for entry in remainder.split(',')]
|
||||
except ValueError:
|
||||
raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value))
|
||||
|
||||
setattr(descriptor, end_attribute, timestamp)
|
||||
setattr(descriptor, interval_attribute, interval)
|
||||
setattr(descriptor, values_attribute, history_values)
|
||||
|
||||
|
||||
def _parse_port_count_line(keyword, attribute, descriptor, entries):
|
||||
# "<keyword>" port=N,port=N,...
|
||||
|
||||
value, port_mappings = _value(keyword, entries), {}
|
||||
error_msg = 'Entries in %s line should only be PORT=N entries: %s %s' % (keyword, keyword, value)
|
||||
|
||||
if value:
|
||||
for entry in value.split(','):
|
||||
if '=' not in entry:
|
||||
raise ValueError(error_msg)
|
||||
|
||||
port, stat = entry.split('=', 1)
|
||||
|
||||
if (port == 'other' or stem.util.connection.is_valid_port(port)) and stat.isdigit():
|
||||
if port != 'other':
|
||||
port = int(port)
|
||||
|
||||
port_mappings[port] = int(stat)
|
||||
else:
|
||||
raise ValueError(error_msg)
|
||||
|
||||
setattr(descriptor, attribute, port_mappings)
|
||||
|
||||
|
||||
def _parse_geoip_to_count_line(keyword, attribute, descriptor, entries):
|
||||
# "<keyword>" CC=N,CC=N,...
|
||||
#
|
||||
# The maxmind geoip (https://www.maxmind.com/app/iso3166) has numeric
|
||||
# locale codes for some special values, for instance...
|
||||
# A1,"Anonymous Proxy"
|
||||
# A2,"Satellite Provider"
|
||||
# ??,"Unknown"
|
||||
|
||||
value, locale_usage = _value(keyword, entries), {}
|
||||
error_msg = 'Entries in %s line should only be CC=N entries: %s %s' % (keyword, keyword, value)
|
||||
|
||||
if value:
|
||||
for entry in value.split(','):
|
||||
if '=' not in entry:
|
||||
raise ValueError(error_msg)
|
||||
|
||||
locale, count = entry.split('=', 1)
|
||||
|
||||
if _locale_re.match(locale) and count.isdigit():
|
||||
locale_usage[locale] = int(count)
|
||||
else:
|
||||
raise ValueError(error_msg)
|
||||
|
||||
setattr(descriptor, attribute, locale_usage)
|
||||
|
||||
|
||||
def _parse_bridge_ip_versions_line(descriptor, entries):
|
||||
value, ip_versions = _value('bridge-ip-versions', entries), {}
|
||||
|
||||
if value:
|
||||
for entry in value.split(','):
|
||||
if '=' not in entry:
|
||||
raise stem.ProtocolError("The bridge-ip-versions should be a comma separated listing of '<protocol>=<count>' mappings: bridge-ip-versions %s" % value)
|
||||
|
||||
protocol, count = entry.split('=', 1)
|
||||
|
||||
if not count.isdigit():
|
||||
raise stem.ProtocolError('IP protocol count was non-numeric (%s): bridge-ip-versions %s' % (count, value))
|
||||
|
||||
ip_versions[protocol] = int(count)
|
||||
|
||||
descriptor.ip_versions = ip_versions
|
||||
|
||||
|
||||
def _parse_bridge_ip_transports_line(descriptor, entries):
|
||||
value, ip_transports = _value('bridge-ip-transports', entries), {}
|
||||
|
||||
if value:
|
||||
for entry in value.split(','):
|
||||
if '=' not in entry:
|
||||
raise stem.ProtocolError("The bridge-ip-transports should be a comma separated listing of '<protocol>=<count>' mappings: bridge-ip-transports %s" % value)
|
||||
|
||||
protocol, count = entry.split('=', 1)
|
||||
|
||||
if not count.isdigit():
|
||||
raise stem.ProtocolError('Transport count was non-numeric (%s): bridge-ip-transports %s' % (count, value))
|
||||
|
||||
ip_transports[protocol] = int(count)
|
||||
|
||||
descriptor.ip_transports = ip_transports
|
||||
|
||||
|
||||
def _parse_hs_stats(keyword, stat_attribute, extra_attribute, descriptor, entries):
|
||||
# "<keyword>" num key=val key=val...
|
||||
|
||||
value, stat, extra = _value(keyword, entries), None, {}
|
||||
|
||||
if value is not None:
|
||||
value_comp = value.split()
|
||||
|
||||
if not value_comp:
|
||||
raise ValueError("'%s' line was blank" % keyword)
|
||||
|
||||
try:
|
||||
stat = int(value_comp[0])
|
||||
except ValueError:
|
||||
raise ValueError("'%s' stat was non-numeric (%s): %s %s" % (keyword, value_comp[0], keyword, value))
|
||||
|
||||
for entry in value_comp[1:]:
|
||||
if '=' not in entry:
|
||||
raise ValueError('Entries after the stat in %s lines should only be key=val entries: %s %s' % (keyword, keyword, value))
|
||||
|
||||
key, val = entry.split('=', 1)
|
||||
extra[key] = val
|
||||
|
||||
setattr(descriptor, stat_attribute, stat)
|
||||
setattr(descriptor, extra_attribute, extra)
|
||||
|
||||
|
||||
_parse_geoip_db_digest_line = _parse_forty_character_hex('geoip-db-digest', 'geoip_db_digest')
|
||||
_parse_geoip6_db_digest_line = _parse_forty_character_hex('geoip6-db-digest', 'geoip6_db_digest')
|
||||
_parse_dirreq_v2_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-resp', 'dir_v2_responses', 'dir_v2_responses_unknown')
|
||||
_parse_dirreq_v3_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-resp', 'dir_v3_responses', 'dir_v3_responses_unknown')
|
||||
_parse_dirreq_v2_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-direct-dl', 'dir_v2_direct_dl', 'dir_v2_direct_dl_unknown')
|
||||
_parse_dirreq_v3_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-direct-dl', 'dir_v3_direct_dl', 'dir_v3_direct_dl_unknown')
|
||||
_parse_dirreq_v2_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-tunneled-dl', 'dir_v2_tunneled_dl', 'dir_v2_tunneled_dl_unknown')
|
||||
_parse_dirreq_v3_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-tunneled-dl', 'dir_v3_tunneled_dl', 'dir_v3_tunneled_dl_unknown')
|
||||
_parse_dirreq_v2_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v2-share', 'dir_v2_share')
|
||||
_parse_dirreq_v3_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v3-share', 'dir_v3_share')
|
||||
_parse_cell_processed_cells_line = functools.partial(_parse_cell_line, 'cell-processed-cells', 'cell_processed_cells')
|
||||
_parse_cell_queued_cells_line = functools.partial(_parse_cell_line, 'cell-queued-cells', 'cell_queued_cells')
|
||||
_parse_cell_time_in_queue_line = functools.partial(_parse_cell_line, 'cell-time-in-queue', 'cell_time_in_queue')
|
||||
_parse_published_line = _parse_timestamp_line('published', 'published')
|
||||
_parse_geoip_start_time_line = _parse_timestamp_line('geoip-start-time', 'geoip_start_time')
|
||||
_parse_cell_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'cell-stats-end', 'cell_stats_end', 'cell_stats_interval')
|
||||
_parse_entry_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'entry-stats-end', 'entry_stats_end', 'entry_stats_interval')
|
||||
_parse_exit_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'exit-stats-end', 'exit_stats_end', 'exit_stats_interval')
|
||||
_parse_bridge_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'bridge-stats-end', 'bridge_stats_end', 'bridge_stats_interval')
|
||||
_parse_dirreq_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'dirreq-stats-end', 'dir_stats_end', 'dir_stats_interval')
|
||||
_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values')
|
||||
_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values')
|
||||
_parse_dirreq_read_history_line = functools.partial(_parse_history_line, 'dirreq-read-history', 'dir_read_history_end', 'dir_read_history_interval', 'dir_read_history_values')
|
||||
_parse_dirreq_write_history_line = functools.partial(_parse_history_line, 'dirreq-write-history', 'dir_write_history_end', 'dir_write_history_interval', 'dir_write_history_values')
|
||||
_parse_exit_kibibytes_written_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-written', 'exit_kibibytes_written')
|
||||
_parse_exit_kibibytes_read_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-read', 'exit_kibibytes_read')
|
||||
_parse_exit_streams_opened_line = functools.partial(_parse_port_count_line, 'exit-streams-opened', 'exit_streams_opened')
|
||||
_parse_hidden_service_stats_end_line = _parse_timestamp_line('hidserv-stats-end', 'hs_stats_end')
|
||||
_parse_hidden_service_rend_relayed_cells_line = functools.partial(_parse_hs_stats, 'hidserv-rend-relayed-cells', 'hs_rend_cells', 'hs_rend_cells_attr')
|
||||
_parse_hidden_service_dir_onions_seen_line = functools.partial(_parse_hs_stats, 'hidserv-dir-onions-seen', 'hs_dir_onions_seen', 'hs_dir_onions_seen_attr')
|
||||
_parse_dirreq_v2_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-ips', 'dir_v2_ips')
|
||||
_parse_dirreq_v3_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-ips', 'dir_v3_ips')
|
||||
_parse_dirreq_v2_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-reqs', 'dir_v2_requests')
|
||||
_parse_dirreq_v3_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-reqs', 'dir_v3_requests')
|
||||
_parse_geoip_client_origins_line = functools.partial(_parse_geoip_to_count_line, 'geoip-client-origins', 'geoip_client_origins')
|
||||
_parse_entry_ips_line = functools.partial(_parse_geoip_to_count_line, 'entry-ips', 'entry_ips')
|
||||
_parse_bridge_ips_line = functools.partial(_parse_geoip_to_count_line, 'bridge-ips', 'bridge_ips')
|
||||
_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest')
|
||||
_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE')
|
||||
|
||||
|
||||
class ExtraInfoDescriptor(Descriptor):
|
||||
"""
|
||||
Extra-info descriptor document.
|
||||
|
||||
:var str nickname: **\*** relay's nickname
|
||||
:var str fingerprint: **\*** identity key fingerprint
|
||||
:var datetime published: **\*** time in UTC when this descriptor was made
|
||||
:var str geoip_db_digest: sha1 of the geoIP database file for IPv4 addresses
|
||||
:var str geoip6_db_digest: sha1 of the geoIP database file for IPv6 addresses
|
||||
:var dict transport: **\*** mapping of transport methods to their (address,
|
||||
port, args) tuple, these usually appear on bridges in which case all of
|
||||
those are **None**
|
||||
|
||||
**Bi-directional connection usage:**
|
||||
|
||||
:var datetime conn_bi_direct_end: end of the sampling interval
|
||||
:var int conn_bi_direct_interval: seconds per interval
|
||||
:var int conn_bi_direct_below: connections that read/wrote less than 20 KiB
|
||||
:var int conn_bi_direct_read: connections that read at least 10x more than wrote
|
||||
:var int conn_bi_direct_write: connections that wrote at least 10x more than read
|
||||
:var int conn_bi_direct_both: remaining connections
|
||||
|
||||
**Bytes read/written for relayed traffic:**
|
||||
|
||||
:var datetime read_history_end: end of the sampling interval
|
||||
:var int read_history_interval: seconds per interval
|
||||
:var list read_history_values: bytes read during each interval
|
||||
|
||||
:var datetime write_history_end: end of the sampling interval
|
||||
:var int write_history_interval: seconds per interval
|
||||
:var list write_history_values: bytes written during each interval
|
||||
|
||||
**Cell relaying statistics:**
|
||||
|
||||
:var datetime cell_stats_end: end of the period when stats were gathered
|
||||
:var int cell_stats_interval: length in seconds of the interval
|
||||
:var list cell_processed_cells: measurement of processed cells per circuit
|
||||
:var list cell_queued_cells: measurement of queued cells per circuit
|
||||
:var list cell_time_in_queue: mean enqueued time in milliseconds for cells
|
||||
:var int cell_circuits_per_decile: mean number of circuits in a decile
|
||||
|
||||
**Directory Mirror Attributes:**
|
||||
|
||||
:var datetime dir_stats_end: end of the period when stats were gathered
|
||||
:var int dir_stats_interval: length in seconds of the interval
|
||||
:var dict dir_v2_ips: mapping of locales to rounded count of requester ips
|
||||
:var dict dir_v3_ips: mapping of locales to rounded count of requester ips
|
||||
:var float dir_v2_share: percent of total directory traffic it expects to serve
|
||||
:var float dir_v3_share: percent of total directory traffic it expects to serve
|
||||
:var dict dir_v2_requests: mapping of locales to rounded count of requests
|
||||
:var dict dir_v3_requests: mapping of locales to rounded count of requests
|
||||
|
||||
:var dict dir_v2_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
|
||||
:var dict dir_v3_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count
|
||||
:var dict dir_v2_responses_unknown: mapping of unrecognized statuses to their count
|
||||
:var dict dir_v3_responses_unknown: mapping of unrecognized statuses to their count
|
||||
|
||||
:var dict dir_v2_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
|
||||
:var dict dir_v3_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort
|
||||
:var dict dir_v2_direct_dl_unknown: mapping of unrecognized stats to their measurement
|
||||
:var dict dir_v3_direct_dl_unknown: mapping of unrecognized stats to their measurement
|
||||
|
||||
:var dict dir_v2_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
|
||||
:var dict dir_v3_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort
|
||||
:var dict dir_v2_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
|
||||
:var dict dir_v3_tunneled_dl_unknown: mapping of unrecognized stats to their measurement
|
||||
|
||||
**Bytes read/written for directory mirroring:**
|
||||
|
||||
:var datetime dir_read_history_end: end of the sampling interval
|
||||
:var int dir_read_history_interval: seconds per interval
|
||||
:var list dir_read_history_values: bytes read during each interval
|
||||
|
||||
:var datetime dir_write_history_end: end of the sampling interval
|
||||
:var int dir_write_history_interval: seconds per interval
|
||||
:var list dir_write_history_values: bytes read during each interval
|
||||
|
||||
**Guard Attributes:**
|
||||
|
||||
:var datetime entry_stats_end: end of the period when stats were gathered
|
||||
:var int entry_stats_interval: length in seconds of the interval
|
||||
:var dict entry_ips: mapping of locales to rounded count of unique user ips
|
||||
|
||||
**Exit Attributes:**
|
||||
|
||||
:var datetime exit_stats_end: end of the period when stats were gathered
|
||||
:var int exit_stats_interval: length in seconds of the interval
|
||||
:var dict exit_kibibytes_written: traffic per port (keys are ints or 'other')
|
||||
:var dict exit_kibibytes_read: traffic per port (keys are ints or 'other')
|
||||
:var dict exit_streams_opened: streams per port (keys are ints or 'other')
|
||||
|
||||
**Hidden Service Attributes:**
|
||||
|
||||
:var datetime hs_stats_end: end of the sampling interval
|
||||
:var int hs_rend_cells: rounded count of the RENDEZVOUS1 cells seen
|
||||
:var int hs_rend_cells_attr: **\*** attributes provided for the hs_rend_cells
|
||||
:var int hs_dir_onions_seen: rounded count of the identities seen
|
||||
:var int hs_dir_onions_seen_attr: **\*** attributes provided for the hs_dir_onions_seen
|
||||
|
||||
**Bridge Attributes:**
|
||||
|
||||
:var datetime bridge_stats_end: end of the period when stats were gathered
|
||||
:var int bridge_stats_interval: length in seconds of the interval
|
||||
:var dict bridge_ips: mapping of locales to rounded count of unique user ips
|
||||
:var datetime geoip_start_time: replaced by bridge_stats_end (deprecated)
|
||||
:var dict geoip_client_origins: replaced by bridge_ips (deprecated)
|
||||
:var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
|
||||
:var dict ip_versions: mapping of ip transports to a count for the number of users
|
||||
|
||||
**\*** attribute is either required when we're parsed with validation or has
|
||||
a default value, others are left as **None** if undefined
|
||||
|
||||
.. versionchanged:: 1.4.0
|
||||
Added the hs_stats_end, hs_rend_cells, hs_rend_cells_attr,
|
||||
hs_dir_onions_seen, and hs_dir_onions_seen_attr attributes.
|
||||
"""
|
||||
|
||||
ATTRIBUTES = {
|
||||
'nickname': (None, _parse_extra_info_line),
|
||||
'fingerprint': (None, _parse_extra_info_line),
|
||||
'published': (None, _parse_published_line),
|
||||
'geoip_db_digest': (None, _parse_geoip_db_digest_line),
|
||||
'geoip6_db_digest': (None, _parse_geoip6_db_digest_line),
|
||||
'transport': ({}, _parse_transport_line),
|
||||
|
||||
'conn_bi_direct_end': (None, _parse_conn_bi_direct_line),
|
||||
'conn_bi_direct_interval': (None, _parse_conn_bi_direct_line),
|
||||
'conn_bi_direct_below': (None, _parse_conn_bi_direct_line),
|
||||
'conn_bi_direct_read': (None, _parse_conn_bi_direct_line),
|
||||
'conn_bi_direct_write': (None, _parse_conn_bi_direct_line),
|
||||
'conn_bi_direct_both': (None, _parse_conn_bi_direct_line),
|
||||
|
||||
'read_history_end': (None, _parse_read_history_line),
|
||||
'read_history_interval': (None, _parse_read_history_line),
|
||||
'read_history_values': (None, _parse_read_history_line),
|
||||
|
||||
'write_history_end': (None, _parse_write_history_line),
|
||||
'write_history_interval': (None, _parse_write_history_line),
|
||||
'write_history_values': (None, _parse_write_history_line),
|
||||
|
||||
'cell_stats_end': (None, _parse_cell_stats_end_line),
|
||||
'cell_stats_interval': (None, _parse_cell_stats_end_line),
|
||||
'cell_processed_cells': (None, _parse_cell_processed_cells_line),
|
||||
'cell_queued_cells': (None, _parse_cell_queued_cells_line),
|
||||
'cell_time_in_queue': (None, _parse_cell_time_in_queue_line),
|
||||
'cell_circuits_per_decile': (None, _parse_cell_circuits_per_decline_line),
|
||||
|
||||
'dir_stats_end': (None, _parse_dirreq_stats_end_line),
|
||||
'dir_stats_interval': (None, _parse_dirreq_stats_end_line),
|
||||
'dir_v2_ips': (None, _parse_dirreq_v2_ips_line),
|
||||
'dir_v3_ips': (None, _parse_dirreq_v3_ips_line),
|
||||
'dir_v2_share': (None, _parse_dirreq_v2_share_line),
|
||||
'dir_v3_share': (None, _parse_dirreq_v3_share_line),
|
||||
'dir_v2_requests': (None, _parse_dirreq_v2_reqs_line),
|
||||
'dir_v3_requests': (None, _parse_dirreq_v3_reqs_line),
|
||||
'dir_v2_responses': (None, _parse_dirreq_v2_resp_line),
|
||||
'dir_v3_responses': (None, _parse_dirreq_v3_resp_line),
|
||||
'dir_v2_responses_unknown': (None, _parse_dirreq_v2_resp_line),
|
||||
'dir_v3_responses_unknown': (None, _parse_dirreq_v3_resp_line),
|
||||
'dir_v2_direct_dl': (None, _parse_dirreq_v2_direct_dl_line),
|
||||
'dir_v3_direct_dl': (None, _parse_dirreq_v3_direct_dl_line),
|
||||
'dir_v2_direct_dl_unknown': (None, _parse_dirreq_v2_direct_dl_line),
|
||||
'dir_v3_direct_dl_unknown': (None, _parse_dirreq_v3_direct_dl_line),
|
||||
'dir_v2_tunneled_dl': (None, _parse_dirreq_v2_tunneled_dl_line),
|
||||
'dir_v3_tunneled_dl': (None, _parse_dirreq_v3_tunneled_dl_line),
|
||||
'dir_v2_tunneled_dl_unknown': (None, _parse_dirreq_v2_tunneled_dl_line),
|
||||
'dir_v3_tunneled_dl_unknown': (None, _parse_dirreq_v3_tunneled_dl_line),
|
||||
|
||||
'dir_read_history_end': (None, _parse_dirreq_read_history_line),
|
||||
'dir_read_history_interval': (None, _parse_dirreq_read_history_line),
|
||||
'dir_read_history_values': (None, _parse_dirreq_read_history_line),
|
||||
|
||||
'dir_write_history_end': (None, _parse_dirreq_write_history_line),
|
||||
'dir_write_history_interval': (None, _parse_dirreq_write_history_line),
|
||||
'dir_write_history_values': (None, _parse_dirreq_write_history_line),
|
||||
|
||||
'entry_stats_end': (None, _parse_entry_stats_end_line),
|
||||
'entry_stats_interval': (None, _parse_entry_stats_end_line),
|
||||
'entry_ips': (None, _parse_entry_ips_line),
|
||||
|
||||
'exit_stats_end': (None, _parse_exit_stats_end_line),
|
||||
'exit_stats_interval': (None, _parse_exit_stats_end_line),
|
||||
'exit_kibibytes_written': (None, _parse_exit_kibibytes_written_line),
|
||||
'exit_kibibytes_read': (None, _parse_exit_kibibytes_read_line),
|
||||
'exit_streams_opened': (None, _parse_exit_streams_opened_line),
|
||||
|
||||
'hs_stats_end': (None, _parse_hidden_service_stats_end_line),
|
||||
'hs_rend_cells': (None, _parse_hidden_service_rend_relayed_cells_line),
|
||||
'hs_rend_cells_attr': ({}, _parse_hidden_service_rend_relayed_cells_line),
|
||||
'hs_dir_onions_seen': (None, _parse_hidden_service_dir_onions_seen_line),
|
||||
'hs_dir_onions_seen_attr': ({}, _parse_hidden_service_dir_onions_seen_line),
|
||||
|
||||
'bridge_stats_end': (None, _parse_bridge_stats_end_line),
|
||||
'bridge_stats_interval': (None, _parse_bridge_stats_end_line),
|
||||
'bridge_ips': (None, _parse_bridge_ips_line),
|
||||
'geoip_start_time': (None, _parse_geoip_start_time_line),
|
||||
'geoip_client_origins': (None, _parse_geoip_client_origins_line),
|
||||
|
||||
'ip_versions': (None, _parse_bridge_ip_versions_line),
|
||||
'ip_transports': (None, _parse_bridge_ip_transports_line),
|
||||
}
|
||||
|
||||
PARSER_FOR_LINE = {
|
||||
'extra-info': _parse_extra_info_line,
|
||||
'geoip-db-digest': _parse_geoip_db_digest_line,
|
||||
'geoip6-db-digest': _parse_geoip6_db_digest_line,
|
||||
'transport': _parse_transport_line,
|
||||
'cell-circuits-per-decile': _parse_cell_circuits_per_decline_line,
|
||||
'dirreq-v2-resp': _parse_dirreq_v2_resp_line,
|
||||
'dirreq-v3-resp': _parse_dirreq_v3_resp_line,
|
||||
'dirreq-v2-direct-dl': _parse_dirreq_v2_direct_dl_line,
|
||||
'dirreq-v3-direct-dl': _parse_dirreq_v3_direct_dl_line,
|
||||
'dirreq-v2-tunneled-dl': _parse_dirreq_v2_tunneled_dl_line,
|
||||
'dirreq-v3-tunneled-dl': _parse_dirreq_v3_tunneled_dl_line,
|
||||
'dirreq-v2-share': _parse_dirreq_v2_share_line,
|
||||
'dirreq-v3-share': _parse_dirreq_v3_share_line,
|
||||
'cell-processed-cells': _parse_cell_processed_cells_line,
|
||||
'cell-queued-cells': _parse_cell_queued_cells_line,
|
||||
'cell-time-in-queue': _parse_cell_time_in_queue_line,
|
||||
'published': _parse_published_line,
|
||||
'geoip-start-time': _parse_geoip_start_time_line,
|
||||
'cell-stats-end': _parse_cell_stats_end_line,
|
||||
'entry-stats-end': _parse_entry_stats_end_line,
|
||||
'exit-stats-end': _parse_exit_stats_end_line,
|
||||
'bridge-stats-end': _parse_bridge_stats_end_line,
|
||||
'dirreq-stats-end': _parse_dirreq_stats_end_line,
|
||||
'conn-bi-direct': _parse_conn_bi_direct_line,
|
||||
'read-history': _parse_read_history_line,
|
||||
'write-history': _parse_write_history_line,
|
||||
'dirreq-read-history': _parse_dirreq_read_history_line,
|
||||
'dirreq-write-history': _parse_dirreq_write_history_line,
|
||||
'exit-kibibytes-written': _parse_exit_kibibytes_written_line,
|
||||
'exit-kibibytes-read': _parse_exit_kibibytes_read_line,
|
||||
'exit-streams-opened': _parse_exit_streams_opened_line,
|
||||
'hidserv-stats-end': _parse_hidden_service_stats_end_line,
|
||||
'hidserv-rend-relayed-cells': _parse_hidden_service_rend_relayed_cells_line,
|
||||
'hidserv-dir-onions-seen': _parse_hidden_service_dir_onions_seen_line,
|
||||
'dirreq-v2-ips': _parse_dirreq_v2_ips_line,
|
||||
'dirreq-v3-ips': _parse_dirreq_v3_ips_line,
|
||||
'dirreq-v2-reqs': _parse_dirreq_v2_reqs_line,
|
||||
'dirreq-v3-reqs': _parse_dirreq_v3_reqs_line,
|
||||
'geoip-client-origins': _parse_geoip_client_origins_line,
|
||||
'entry-ips': _parse_entry_ips_line,
|
||||
'bridge-ips': _parse_bridge_ips_line,
|
||||
'bridge-ip-versions': _parse_bridge_ip_versions_line,
|
||||
'bridge-ip-transports': _parse_bridge_ip_transports_line,
|
||||
}
|
||||
|
||||
def __init__(self, raw_contents, validate = False):
|
||||
"""
|
||||
Extra-info descriptor constructor. By default this validates the
|
||||
descriptor's content as it's parsed. This validation can be disabled to
|
||||
either improve performance or be accepting of malformed data.
|
||||
|
||||
:param str raw_contents: extra-info content provided by the relay
|
||||
:param bool validate: checks the validity of the extra-info descriptor if
|
||||
**True**, skips these checks otherwise
|
||||
|
||||
:raises: **ValueError** if the contents is malformed and validate is True
|
||||
"""
|
||||
|
||||
super(ExtraInfoDescriptor, self).__init__(raw_contents, lazy_load = not validate)
|
||||
entries = _get_descriptor_components(raw_contents, validate)
|
||||
|
||||
if validate:
|
||||
for keyword in self._required_fields():
|
||||
if keyword not in entries:
|
||||
raise ValueError("Extra-info descriptor must have a '%s' entry" % keyword)
|
||||
|
||||
for keyword in self._required_fields() + SINGLE_FIELDS:
|
||||
if keyword in entries and len(entries[keyword]) > 1:
|
||||
raise ValueError("The '%s' entry can only appear once in an extra-info descriptor" % keyword)
|
||||
|
||||
expected_first_keyword = self._first_keyword()
|
||||
if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]:
|
||||
raise ValueError("Extra-info descriptor must start with a '%s' entry" % expected_first_keyword)
|
||||
|
||||
expected_last_keyword = self._last_keyword()
|
||||
if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]:
|
||||
raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
|
||||
|
||||
self._parse(entries, validate)
|
||||
else:
|
||||
self._entries = entries
|
||||
|
||||
def digest(self):
|
||||
"""
|
||||
Provides the upper-case hex encoded sha1 of our content. This value is part
|
||||
of the server descriptor entry for this relay.
|
||||
|
||||
:returns: **str** with the upper-case hex digest value for this server
|
||||
descriptor
|
||||
"""
|
||||
|
||||
raise NotImplementedError('Unsupported Operation: this should be implemented by the ExtraInfoDescriptor subclass')
|
||||
|
||||
def _required_fields(self):
|
||||
return REQUIRED_FIELDS
|
||||
|
||||
def _first_keyword(self):
|
||||
return 'extra-info'
|
||||
|
||||
def _last_keyword(self):
|
||||
return 'router-signature'
|
||||
|
||||
|
||||
class RelayExtraInfoDescriptor(ExtraInfoDescriptor):
|
||||
"""
|
||||
Relay extra-info descriptor, constructed from data such as that provided by
|
||||
'GETINFO extra-info/digest/\*', cached descriptors, and metrics
|
||||
(`specification <https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_).
|
||||
|
||||
:var str signature: **\*** signature for this extrainfo descriptor
|
||||
|
||||
**\*** attribute is required when we're parsed with validation
|
||||
"""
|
||||
|
||||
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
|
||||
'signature': (None, _parse_router_signature_line),
|
||||
})
|
||||
|
||||
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
|
||||
'router-signature': _parse_router_signature_line,
|
||||
})
|
||||
|
||||
@lru_cache()
|
||||
def digest(self):
|
||||
# our digest is calculated from everything except our signature
|
||||
raw_content, ending = str(self), '\nrouter-signature\n'
|
||||
raw_content = raw_content[:raw_content.find(ending) + len(ending)]
|
||||
return hashlib.sha1(stem.util.str_tools._to_bytes(raw_content)).hexdigest().upper()
|
||||
|
||||
|
||||
class BridgeExtraInfoDescriptor(ExtraInfoDescriptor):
|
||||
"""
|
||||
Bridge extra-info descriptor (`bridge descriptor specification
|
||||
<https://collector.torproject.org/formats.html#bridge-descriptors>`_)
|
||||
"""
|
||||
|
||||
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
|
||||
'_digest': (None, _parse_router_digest_line),
|
||||
})
|
||||
|
||||
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
|
||||
'router-digest': _parse_router_digest_line,
|
||||
})
|
||||
|
||||
def digest(self):
|
||||
return self._digest
|
||||
|
||||
def _required_fields(self):
|
||||
excluded_fields = [
|
||||
'router-signature',
|
||||
]
|
||||
|
||||
included_fields = [
|
||||
'router-digest',
|
||||
]
|
||||
|
||||
return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields])
|
||||
|
||||
def _last_keyword(self):
|
||||
return None
|
|
@ -0,0 +1,422 @@
|
|||
# Copyright 2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Parsing for Tor hidden service descriptors as described in Tor's `rend-spec
|
||||
<https://gitweb.torproject.org/torspec.git/tree/rend-spec.txt>`_.
|
||||
|
||||
Unlike other descriptor types these describe a hidden service rather than a
|
||||
relay. They're created by the service, and can only be fetched via relays with
|
||||
the HSDir flag.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
HiddenServiceDescriptor - Tor hidden service descriptor.
|
||||
|
||||
.. versionadded:: 1.4.0
|
||||
"""
|
||||
|
||||
# TODO: Add a description for how to retrieve them when tor supports that
|
||||
# (#14847) and then update #15009.
|
||||
|
||||
import base64
|
||||
import binascii
|
||||
import collections
|
||||
import hashlib
|
||||
import io
|
||||
|
||||
import stem.util.connection
|
||||
import stem.util.str_tools
|
||||
|
||||
from stem.descriptor import (
|
||||
PGP_BLOCK_END,
|
||||
Descriptor,
|
||||
_get_descriptor_components,
|
||||
_read_until_keywords,
|
||||
_bytes_for_block,
|
||||
_value,
|
||||
_parse_simple_line,
|
||||
_parse_timestamp_line,
|
||||
_parse_key_block,
|
||||
)
|
||||
|
||||
try:
|
||||
# added in python 3.2
|
||||
from functools import lru_cache
|
||||
except ImportError:
|
||||
from stem.util.lru_cache import lru_cache
|
||||
|
||||
REQUIRED_FIELDS = (
|
||||
'rendezvous-service-descriptor',
|
||||
'version',
|
||||
'permanent-key',
|
||||
'secret-id-part',
|
||||
'publication-time',
|
||||
'protocol-versions',
|
||||
'signature',
|
||||
)
|
||||
|
||||
INTRODUCTION_POINTS_ATTR = {
|
||||
'identifier': None,
|
||||
'address': None,
|
||||
'port': None,
|
||||
'onion_key': None,
|
||||
'service_key': None,
|
||||
'intro_authentication': [],
|
||||
}
|
||||
|
||||
# introduction-point fields that can only appear once
|
||||
|
||||
SINGLE_INTRODUCTION_POINT_FIELDS = [
|
||||
'introduction-point',
|
||||
'ip-address',
|
||||
'onion-port',
|
||||
'onion-key',
|
||||
'service-key',
|
||||
]
|
||||
|
||||
BASIC_AUTH = 1
|
||||
STEALTH_AUTH = 2
|
||||
|
||||
IntroductionPoint = collections.namedtuple('IntroductionPoints', INTRODUCTION_POINTS_ATTR.keys())
|
||||
|
||||
|
||||
class DecryptionFailure(Exception):
|
||||
"""
|
||||
Failure to decrypt the hidden service descriptor's introduction-points.
|
||||
"""
|
||||
|
||||
|
||||
def _parse_file(descriptor_file, validate = False, **kwargs):
|
||||
"""
|
||||
Iterates over the hidden service descriptors in a file.
|
||||
|
||||
:param file descriptor_file: file with descriptor content
|
||||
:param bool validate: checks the validity of the descriptor's content if
|
||||
**True**, skips these checks otherwise
|
||||
:param dict kwargs: additional arguments for the descriptor constructor
|
||||
|
||||
:returns: iterator for :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor`
|
||||
instances in the file
|
||||
|
||||
:raises:
|
||||
* **ValueError** if the contents is malformed and validate is **True**
|
||||
* **IOError** if the file can't be read
|
||||
"""
|
||||
|
||||
while True:
|
||||
descriptor_content = _read_until_keywords('signature', descriptor_file)
|
||||
|
||||
# we've reached the 'signature', now include the pgp style block
|
||||
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
|
||||
descriptor_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
|
||||
|
||||
if descriptor_content:
|
||||
if descriptor_content[0].startswith(b'@type'):
|
||||
descriptor_content = descriptor_content[1:]
|
||||
|
||||
yield HiddenServiceDescriptor(bytes.join(b'', descriptor_content), validate, **kwargs)
|
||||
else:
|
||||
break # done parsing file
|
||||
|
||||
|
||||
def _parse_version_line(descriptor, entries):
|
||||
value = _value('version', entries)
|
||||
|
||||
if value.isdigit():
|
||||
descriptor.version = int(value)
|
||||
else:
|
||||
raise ValueError('version line must have a positive integer value: %s' % value)
|
||||
|
||||
|
||||
def _parse_protocol_versions_line(descriptor, entries):
|
||||
value = _value('protocol-versions', entries)
|
||||
|
||||
try:
|
||||
versions = [int(entry) for entry in value.split(',')]
|
||||
except ValueError:
|
||||
raise ValueError('protocol-versions line has non-numeric versoins: protocol-versions %s' % value)
|
||||
|
||||
for v in versions:
|
||||
if v <= 0:
|
||||
raise ValueError('protocol-versions must be positive integers: %s' % value)
|
||||
|
||||
descriptor.protocol_versions = versions
|
||||
|
||||
|
||||
def _parse_introduction_points_line(descriptor, entries):
|
||||
_, block_type, block_contents = entries['introduction-points'][0]
|
||||
|
||||
if not block_contents or block_type != 'MESSAGE':
|
||||
raise ValueError("'introduction-points' should be followed by a MESSAGE block, but was a %s" % block_type)
|
||||
|
||||
descriptor.introduction_points_encoded = block_contents
|
||||
|
||||
try:
|
||||
decoded_field = _bytes_for_block(block_contents)
|
||||
except TypeError:
|
||||
raise ValueError("'introduction-points' isn't base64 encoded content:\n%s" % block_contents)
|
||||
|
||||
auth_types = []
|
||||
|
||||
while decoded_field.startswith(b'service-authentication ') and b'\n' in decoded_field:
|
||||
auth_line, decoded_field = decoded_field.split(b'\n', 1)
|
||||
auth_line_comp = auth_line.split(b' ')
|
||||
|
||||
if len(auth_line_comp) < 3:
|
||||
raise ValueError("Within introduction-points we expected 'service-authentication [auth_type] [auth_data]', but had '%s'" % auth_line)
|
||||
|
||||
auth_types.append((auth_line_comp[1], auth_line_comp[2]))
|
||||
|
||||
descriptor.introduction_points_auth = auth_types
|
||||
descriptor.introduction_points_content = decoded_field
|
||||
|
||||
_parse_rendezvous_service_descriptor_line = _parse_simple_line('rendezvous-service-descriptor', 'descriptor_id')
|
||||
_parse_permanent_key_line = _parse_key_block('permanent-key', 'permanent_key', 'RSA PUBLIC KEY')
|
||||
_parse_secret_id_part_line = _parse_simple_line('secret-id-part', 'secret_id_part')
|
||||
_parse_publication_time_line = _parse_timestamp_line('publication-time', 'published')
|
||||
_parse_signature_line = _parse_key_block('signature', 'signature', 'SIGNATURE')
|
||||
|
||||
|
||||
class HiddenServiceDescriptor(Descriptor):
|
||||
"""
|
||||
Hidden service descriptor.
|
||||
|
||||
:var str descriptor_id: **\*** identifier for this descriptor, this is a base32 hash of several fields
|
||||
:var int version: **\*** hidden service descriptor version
|
||||
:var str permanent_key: **\*** long term key of the hidden service
|
||||
:var str secret_id_part: **\*** hash of the time period, cookie, and replica
|
||||
values so our descriptor_id can be validated
|
||||
:var datetime published: **\*** time in UTC when this descriptor was made
|
||||
:var list protocol_versions: **\*** list of **int** versions that are supported when establishing a connection
|
||||
:var str introduction_points_encoded: raw introduction points blob
|
||||
:var list introduction_points_auth: **\*** tuples of the form
|
||||
(auth_method, auth_data) for our introduction_points_content
|
||||
:var bytes introduction_points_content: decoded introduction-points content
|
||||
without authentication data, if using cookie authentication this is
|
||||
encrypted
|
||||
:var str signature: signature of the descriptor content
|
||||
|
||||
**\*** attribute is either required when we're parsed with validation or has
|
||||
a default value, others are left as **None** if undefined
|
||||
"""
|
||||
|
||||
ATTRIBUTES = {
|
||||
'descriptor_id': (None, _parse_rendezvous_service_descriptor_line),
|
||||
'version': (None, _parse_version_line),
|
||||
'permanent_key': (None, _parse_permanent_key_line),
|
||||
'secret_id_part': (None, _parse_secret_id_part_line),
|
||||
'published': (None, _parse_publication_time_line),
|
||||
'protocol_versions': ([], _parse_protocol_versions_line),
|
||||
'introduction_points_encoded': (None, _parse_introduction_points_line),
|
||||
'introduction_points_auth': ([], _parse_introduction_points_line),
|
||||
'introduction_points_content': (None, _parse_introduction_points_line),
|
||||
'signature': (None, _parse_signature_line),
|
||||
}
|
||||
|
||||
PARSER_FOR_LINE = {
|
||||
'rendezvous-service-descriptor': _parse_rendezvous_service_descriptor_line,
|
||||
'version': _parse_version_line,
|
||||
'permanent-key': _parse_permanent_key_line,
|
||||
'secret-id-part': _parse_secret_id_part_line,
|
||||
'publication-time': _parse_publication_time_line,
|
||||
'protocol-versions': _parse_protocol_versions_line,
|
||||
'introduction-points': _parse_introduction_points_line,
|
||||
'signature': _parse_signature_line,
|
||||
}
|
||||
|
||||
def __init__(self, raw_contents, validate = False):
|
||||
super(HiddenServiceDescriptor, self).__init__(raw_contents, lazy_load = not validate)
|
||||
entries = _get_descriptor_components(raw_contents, validate)
|
||||
|
||||
if validate:
|
||||
for keyword in REQUIRED_FIELDS:
|
||||
if keyword not in entries:
|
||||
raise ValueError("Hidden service descriptor must have a '%s' entry" % keyword)
|
||||
elif keyword in entries and len(entries[keyword]) > 1:
|
||||
raise ValueError("The '%s' entry can only appear once in a hidden service descriptor" % keyword)
|
||||
|
||||
if 'rendezvous-service-descriptor' != list(entries.keys())[0]:
|
||||
raise ValueError("Hidden service descriptor must start with a 'rendezvous-service-descriptor' entry")
|
||||
elif 'signature' != list(entries.keys())[-1]:
|
||||
raise ValueError("Hidden service descriptor must end with a 'signature' entry")
|
||||
|
||||
self._parse(entries, validate)
|
||||
|
||||
if stem.prereq.is_crypto_available():
|
||||
signed_digest = self._digest_for_signature(self.permanent_key, self.signature)
|
||||
content_digest = self._digest_for_content(b'rendezvous-service-descriptor ', b'\nsignature\n')
|
||||
|
||||
if signed_digest != content_digest:
|
||||
raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, content_digest))
|
||||
else:
|
||||
self._entries = entries
|
||||
|
||||
@lru_cache()
|
||||
def introduction_points(self, authentication_cookie = None):
|
||||
"""
|
||||
Provided this service's introduction points. This provides a list of
|
||||
IntroductionPoint instances, which have the following attributes...
|
||||
|
||||
* **identifier** (str): hash of this introduction point's identity key
|
||||
* **address** (str): address of this introduction point
|
||||
* **port** (int): port where this introduction point is listening
|
||||
* **onion_key** (str): public key for communicating with this introduction point
|
||||
* **service_key** (str): public key for communicating with this hidden service
|
||||
* **intro_authentication** (list): tuples of the form (auth_type, auth_data)
|
||||
for establishing a connection
|
||||
|
||||
:param str authentication_cookie: cookie to decrypt the introduction-points
|
||||
if it's encrypted
|
||||
|
||||
:returns: **list** of IntroductionPoints instances
|
||||
|
||||
:raises:
|
||||
* **ValueError** if the our introduction-points is malformed
|
||||
* **DecryptionFailure** if unable to decrypt this field
|
||||
"""
|
||||
|
||||
content = self.introduction_points_content
|
||||
|
||||
if not content:
|
||||
return []
|
||||
elif authentication_cookie:
|
||||
if not stem.prereq.is_crypto_available():
|
||||
raise DecryptionFailure('Decrypting introduction-points requires pycrypto')
|
||||
|
||||
try:
|
||||
missing_padding = len(authentication_cookie) % 4
|
||||
authentication_cookie = base64.b64decode(stem.util.str_tools._to_bytes(authentication_cookie) + b'=' * missing_padding)
|
||||
except TypeError as exc:
|
||||
raise DecryptionFailure('authentication_cookie must be a base64 encoded string (%s)' % exc)
|
||||
|
||||
authentication_type = int(binascii.hexlify(content[0:1]), 16)
|
||||
|
||||
if authentication_type == BASIC_AUTH:
|
||||
content = HiddenServiceDescriptor._decrypt_basic_auth(content, authentication_cookie)
|
||||
elif authentication_type == STEALTH_AUTH:
|
||||
content = HiddenServiceDescriptor._decrypt_stealth_auth(content, authentication_cookie)
|
||||
else:
|
||||
raise DecryptionFailure("Unrecognized authentication type '%s', currently we only support basic auth (%s) and stealth auth (%s)" % (authentication_type, BASIC_AUTH, STEALTH_AUTH))
|
||||
|
||||
if not content.startswith(b'introduction-point '):
|
||||
raise DecryptionFailure('Unable to decrypt the introduction-points, maybe this is the wrong key?')
|
||||
elif not content.startswith(b'introduction-point '):
|
||||
raise DecryptionFailure('introduction-points content is encrypted, you need to provide its authentication_cookie')
|
||||
|
||||
return HiddenServiceDescriptor._parse_introduction_points(content)
|
||||
|
||||
@staticmethod
|
||||
def _decrypt_basic_auth(content, authentication_cookie):
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util import Counter
|
||||
from Crypto.Util.number import bytes_to_long
|
||||
|
||||
try:
|
||||
client_blocks = int(binascii.hexlify(content[1:2]), 16)
|
||||
except ValueError:
|
||||
raise DecryptionFailure("When using basic auth the content should start with a number of blocks but wasn't a hex digit: %s" % binascii.hexlify(content[1:2]))
|
||||
|
||||
# parse the client id and encrypted session keys
|
||||
|
||||
client_entries_length = client_blocks * 16 * 20
|
||||
client_entries = content[2:2 + client_entries_length]
|
||||
client_keys = [(client_entries[i:i + 4], client_entries[i + 4:i + 20]) for i in range(0, client_entries_length, 4 + 16)]
|
||||
|
||||
iv = content[2 + client_entries_length:2 + client_entries_length + 16]
|
||||
encrypted = content[2 + client_entries_length + 16:]
|
||||
|
||||
client_id = hashlib.sha1(authentication_cookie + iv).digest()[:4]
|
||||
|
||||
for entry_id, encrypted_session_key in client_keys:
|
||||
if entry_id != client_id:
|
||||
continue # not the session key for this client
|
||||
|
||||
# try decrypting the session key
|
||||
|
||||
counter = Counter.new(128, initial_value = 0)
|
||||
cipher = AES.new(authentication_cookie, AES.MODE_CTR, counter = counter)
|
||||
session_key = cipher.decrypt(encrypted_session_key)
|
||||
|
||||
# attempt to decrypt the intro points with the session key
|
||||
|
||||
counter = Counter.new(128, initial_value = bytes_to_long(iv))
|
||||
cipher = AES.new(session_key, AES.MODE_CTR, counter = counter)
|
||||
decrypted = cipher.decrypt(encrypted)
|
||||
|
||||
# check if the decryption looks correct
|
||||
|
||||
if decrypted.startswith(b'introduction-point '):
|
||||
return decrypted
|
||||
|
||||
return content # nope, unable to decrypt the content
|
||||
|
||||
@staticmethod
|
||||
def _decrypt_stealth_auth(content, authentication_cookie):
|
||||
from Crypto.Cipher import AES
|
||||
from Crypto.Util import Counter
|
||||
from Crypto.Util.number import bytes_to_long
|
||||
|
||||
# byte 1 = authentication type, 2-17 = input vector, 18 on = encrypted content
|
||||
|
||||
iv, encrypted = content[1:17], content[17:]
|
||||
counter = Counter.new(128, initial_value = bytes_to_long(iv))
|
||||
cipher = AES.new(authentication_cookie, AES.MODE_CTR, counter = counter)
|
||||
|
||||
return cipher.decrypt(encrypted)
|
||||
|
||||
@staticmethod
|
||||
def _parse_introduction_points(content):
|
||||
"""
|
||||
Provides the parsed list of IntroductionPoint for the unencrypted content.
|
||||
"""
|
||||
|
||||
introduction_points = []
|
||||
content_io = io.BytesIO(content)
|
||||
|
||||
while True:
|
||||
content = b''.join(_read_until_keywords('introduction-point', content_io, ignore_first = True))
|
||||
|
||||
if not content:
|
||||
break # reached the end
|
||||
|
||||
attr = dict(INTRODUCTION_POINTS_ATTR)
|
||||
entries = _get_descriptor_components(content, False)
|
||||
|
||||
for keyword, values in list(entries.items()):
|
||||
value, block_type, block_contents = values[0]
|
||||
|
||||
if keyword in SINGLE_INTRODUCTION_POINT_FIELDS and len(values) > 1:
|
||||
raise ValueError("'%s' can only appear once in an introduction-point block, but appeared %i times" % (keyword, len(values)))
|
||||
|
||||
if keyword == 'introduction-point':
|
||||
attr['identifier'] = value
|
||||
elif keyword == 'ip-address':
|
||||
if not stem.util.connection.is_valid_ipv4_address(value):
|
||||
raise ValueError("'%s' is an invalid IPv4 address" % value)
|
||||
|
||||
attr['address'] = value
|
||||
elif keyword == 'onion-port':
|
||||
if not stem.util.connection.is_valid_port(value):
|
||||
raise ValueError("'%s' is an invalid port" % value)
|
||||
|
||||
attr['port'] = int(value)
|
||||
elif keyword == 'onion-key':
|
||||
attr['onion_key'] = block_contents
|
||||
elif keyword == 'service-key':
|
||||
attr['service_key'] = block_contents
|
||||
elif keyword == 'intro-authentication':
|
||||
auth_entries = []
|
||||
|
||||
for auth_value, _, _ in values:
|
||||
if ' ' not in auth_value:
|
||||
raise ValueError("We expected 'intro-authentication [auth_type] [auth_data]', but had '%s'" % auth_value)
|
||||
|
||||
auth_type, auth_data = auth_value.split(' ')[:2]
|
||||
auth_entries.append((auth_type, auth_data))
|
||||
|
||||
introduction_points.append(IntroductionPoint(**attr))
|
||||
|
||||
return introduction_points
|
|
@ -0,0 +1,314 @@
|
|||
# Copyright 2013-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Parsing for Tor microdescriptors, which contain a distilled version of a
|
||||
relay's server descriptor. As of Tor version 0.2.3.3-alpha Tor no longer
|
||||
downloads server descriptors by default, opting for microdescriptors instead.
|
||||
|
||||
Unlike most descriptor documents these aren't available on the metrics site
|
||||
(since they don't contain any information that the server descriptors don't).
|
||||
|
||||
The limited information in microdescriptors make them rather clunky to use
|
||||
compared with server descriptors. For instance microdescriptors lack the
|
||||
relay's fingerprint, making it difficut to use them to look up the relay's
|
||||
other descriptors.
|
||||
|
||||
To do so you need to match the microdescriptor's digest against its
|
||||
corresponding router status entry. For added fun as of this writing the
|
||||
controller doesn't even surface those router status entries
|
||||
(:trac:`7953`).
|
||||
|
||||
For instance, here's an example that prints the nickname and fignerprints of
|
||||
the exit relays.
|
||||
|
||||
::
|
||||
|
||||
import os
|
||||
|
||||
from stem.control import Controller
|
||||
from stem.descriptor import parse_file
|
||||
|
||||
with Controller.from_port(port = 9051) as controller:
|
||||
controller.authenticate()
|
||||
|
||||
exit_digests = set()
|
||||
data_dir = controller.get_conf('DataDirectory')
|
||||
|
||||
for desc in controller.get_microdescriptors():
|
||||
if desc.exit_policy.is_exiting_allowed():
|
||||
exit_digests.add(desc.digest)
|
||||
|
||||
print 'Exit Relays:'
|
||||
|
||||
for desc in parse_file(os.path.join(data_dir, 'cached-microdesc-consensus')):
|
||||
if desc.digest in exit_digests:
|
||||
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
|
||||
|
||||
Doing the same is trivial with server descriptors...
|
||||
|
||||
::
|
||||
|
||||
from stem.descriptor import parse_file
|
||||
|
||||
print 'Exit Relays:'
|
||||
|
||||
for desc in parse_file('/home/atagar/.tor/cached-descriptors'):
|
||||
if desc.exit_policy.is_exiting_allowed():
|
||||
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
Microdescriptor - Tor microdescriptor.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
|
||||
import stem.exit_policy
|
||||
|
||||
from stem.descriptor import (
|
||||
Descriptor,
|
||||
_get_descriptor_components,
|
||||
_read_until_keywords,
|
||||
_value,
|
||||
_parse_simple_line,
|
||||
_parse_key_block,
|
||||
)
|
||||
|
||||
from stem.descriptor.router_status_entry import (
|
||||
_parse_a_line,
|
||||
_parse_p_line,
|
||||
)
|
||||
|
||||
try:
|
||||
# added in python 3.2
|
||||
from functools import lru_cache
|
||||
except ImportError:
|
||||
from stem.util.lru_cache import lru_cache
|
||||
|
||||
REQUIRED_FIELDS = (
|
||||
'onion-key',
|
||||
)
|
||||
|
||||
SINGLE_FIELDS = (
|
||||
'onion-key',
|
||||
'ntor-onion-key',
|
||||
'family',
|
||||
'p',
|
||||
'p6',
|
||||
)
|
||||
|
||||
|
||||
def _parse_file(descriptor_file, validate = False, **kwargs):
|
||||
"""
|
||||
Iterates over the microdescriptors in a file.
|
||||
|
||||
:param file descriptor_file: file with descriptor content
|
||||
:param bool validate: checks the validity of the descriptor's content if
|
||||
**True**, skips these checks otherwise
|
||||
:param dict kwargs: additional arguments for the descriptor constructor
|
||||
|
||||
:returns: iterator for Microdescriptor instances in the file
|
||||
|
||||
:raises:
|
||||
* **ValueError** if the contents is malformed and validate is True
|
||||
* **IOError** if the file can't be read
|
||||
"""
|
||||
|
||||
while True:
|
||||
annotations = _read_until_keywords('onion-key', descriptor_file)
|
||||
|
||||
# read until we reach an annotation or onion-key line
|
||||
descriptor_lines = []
|
||||
|
||||
# read the onion-key line, done if we're at the end of the document
|
||||
|
||||
onion_key_line = descriptor_file.readline()
|
||||
|
||||
if onion_key_line:
|
||||
descriptor_lines.append(onion_key_line)
|
||||
else:
|
||||
break
|
||||
|
||||
while True:
|
||||
last_position = descriptor_file.tell()
|
||||
line = descriptor_file.readline()
|
||||
|
||||
if not line:
|
||||
break # EOF
|
||||
elif line.startswith(b'@') or line.startswith(b'onion-key'):
|
||||
descriptor_file.seek(last_position)
|
||||
break
|
||||
else:
|
||||
descriptor_lines.append(line)
|
||||
|
||||
if descriptor_lines:
|
||||
if descriptor_lines[0].startswith(b'@type'):
|
||||
descriptor_lines = descriptor_lines[1:]
|
||||
|
||||
# strip newlines from annotations
|
||||
annotations = list(map(bytes.strip, annotations))
|
||||
|
||||
descriptor_text = bytes.join(b'', descriptor_lines)
|
||||
|
||||
yield Microdescriptor(descriptor_text, validate, annotations, **kwargs)
|
||||
else:
|
||||
break # done parsing descriptors
|
||||
|
||||
|
||||
def _parse_id_line(descriptor, entries):
|
||||
value = _value('id', entries)
|
||||
value_comp = value.split()
|
||||
|
||||
if len(value_comp) >= 2:
|
||||
descriptor.identifier_type = value_comp[0]
|
||||
descriptor.identifier = value_comp[1]
|
||||
else:
|
||||
raise ValueError("'id' lines should contain both the key type and digest: id %s" % value)
|
||||
|
||||
|
||||
_parse_digest = lambda descriptor, entries: setattr(descriptor, 'digest', hashlib.sha256(descriptor.get_bytes()).hexdigest().upper())
|
||||
_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY')
|
||||
_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key')
|
||||
_parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', _value('family', entries).split(' '))
|
||||
_parse_p6_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('p6', entries)))
|
||||
|
||||
|
||||
class Microdescriptor(Descriptor):
|
||||
"""
|
||||
Microdescriptor (`descriptor specification
|
||||
<https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_)
|
||||
|
||||
:var str digest: **\*** hex digest for this microdescriptor, this can be used
|
||||
to match against the corresponding digest attribute of a
|
||||
:class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3`
|
||||
:var str onion_key: **\*** key used to encrypt EXTEND cells
|
||||
:var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
|
||||
:var list or_addresses: **\*** alternative for our address/or_port attributes, each
|
||||
entry is a tuple of the form (address (**str**), port (**int**), is_ipv6
|
||||
(**bool**))
|
||||
:var list family: **\*** nicknames or fingerprints of declared family
|
||||
:var stem.exit_policy.MicroExitPolicy exit_policy: **\*** relay's exit policy
|
||||
:var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6
|
||||
:var str identifier_type: identity digest key type
|
||||
:var str identifier: base64 encoded identity digest, this is only used for collision prevention (:trac:`11743`)
|
||||
|
||||
**\*** attribute is required when we're parsed with validation
|
||||
|
||||
.. versionchanged:: 1.1.0
|
||||
Added the identifier and identifier_type attributes.
|
||||
"""
|
||||
|
||||
ATTRIBUTES = {
|
||||
'onion_key': (None, _parse_onion_key_line),
|
||||
'ntor_onion_key': (None, _parse_ntor_onion_key_line),
|
||||
'or_addresses': ([], _parse_a_line),
|
||||
'family': ([], _parse_family_line),
|
||||
'exit_policy': (stem.exit_policy.MicroExitPolicy('reject 1-65535'), _parse_p_line),
|
||||
'exit_policy_v6': (None, _parse_p6_line),
|
||||
'identifier_type': (None, _parse_id_line),
|
||||
'identifier': (None, _parse_id_line),
|
||||
'digest': (None, _parse_digest),
|
||||
}
|
||||
|
||||
PARSER_FOR_LINE = {
|
||||
'onion-key': _parse_onion_key_line,
|
||||
'ntor-onion-key': _parse_ntor_onion_key_line,
|
||||
'a': _parse_a_line,
|
||||
'family': _parse_family_line,
|
||||
'p': _parse_p_line,
|
||||
'p6': _parse_p6_line,
|
||||
'id': _parse_id_line,
|
||||
}
|
||||
|
||||
def __init__(self, raw_contents, validate = False, annotations = None):
|
||||
super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate)
|
||||
self._annotation_lines = annotations if annotations else []
|
||||
entries = _get_descriptor_components(raw_contents, validate)
|
||||
|
||||
if validate:
|
||||
self.digest = hashlib.sha256(self.get_bytes()).hexdigest().upper()
|
||||
self._parse(entries, validate)
|
||||
self._check_constraints(entries)
|
||||
else:
|
||||
self._entries = entries
|
||||
|
||||
@lru_cache()
|
||||
def get_annotations(self):
|
||||
"""
|
||||
Provides content that appeared prior to the descriptor. If this comes from
|
||||
the cached-microdescs then this commonly contains content like...
|
||||
|
||||
::
|
||||
|
||||
@last-listed 2013-02-24 00:18:30
|
||||
|
||||
:returns: **dict** with the key/value pairs in our annotations
|
||||
"""
|
||||
|
||||
annotation_dict = {}
|
||||
|
||||
for line in self._annotation_lines:
|
||||
if b' ' in line:
|
||||
key, value = line.split(b' ', 1)
|
||||
annotation_dict[key] = value
|
||||
else:
|
||||
annotation_dict[line] = None
|
||||
|
||||
return annotation_dict
|
||||
|
||||
def get_annotation_lines(self):
|
||||
"""
|
||||
Provides the lines of content that appeared prior to the descriptor. This
|
||||
is the same as the
|
||||
:func:`~stem.descriptor.microdescriptor.Microdescriptor.get_annotations`
|
||||
results, but with the unparsed lines and ordering retained.
|
||||
|
||||
:returns: **list** with the lines of annotation that came before this descriptor
|
||||
"""
|
||||
|
||||
return self._annotation_lines
|
||||
|
||||
def _check_constraints(self, entries):
|
||||
"""
|
||||
Does a basic check that the entries conform to this descriptor type's
|
||||
constraints.
|
||||
|
||||
:param dict entries: keyword => (value, pgp key) entries
|
||||
|
||||
:raises: **ValueError** if an issue arises in validation
|
||||
"""
|
||||
|
||||
for keyword in REQUIRED_FIELDS:
|
||||
if keyword not in entries:
|
||||
raise ValueError("Microdescriptor must have a '%s' entry" % keyword)
|
||||
|
||||
for keyword in SINGLE_FIELDS:
|
||||
if keyword in entries and len(entries[keyword]) > 1:
|
||||
raise ValueError("The '%s' entry can only appear once in a microdescriptor" % keyword)
|
||||
|
||||
if 'onion-key' != list(entries.keys())[0]:
|
||||
raise ValueError("Microdescriptor must start with a 'onion-key' entry")
|
||||
|
||||
def _name(self, is_plural = False):
|
||||
return 'microdescriptors' if is_plural else 'microdescriptor'
|
||||
|
||||
def _compare(self, other, method):
|
||||
if not isinstance(other, Microdescriptor):
|
||||
return False
|
||||
|
||||
return method(str(self).strip(), str(other).strip())
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self).strip())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other, lambda s, o: s == o)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other, lambda s, o: s < o)
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other, lambda s, o: s <= o)
|
1444
Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py
Normal file
1444
Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py
Normal file
File diff suppressed because it is too large
Load diff
574
Shared/lib/python3.4/site-packages/stem/descriptor/reader.py
Normal file
574
Shared/lib/python3.4/site-packages/stem/descriptor/reader.py
Normal file
|
@ -0,0 +1,574 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Utilities for reading descriptors from local directories and archives. This is
|
||||
mostly done through the :class:`~stem.descriptor.reader.DescriptorReader`
|
||||
class, which is an iterator for the descriptor data in a series of
|
||||
destinations. For example...
|
||||
|
||||
::
|
||||
|
||||
my_descriptors = [
|
||||
'/tmp/server-descriptors-2012-03.tar.bz2',
|
||||
'/tmp/archived_descriptors/',
|
||||
]
|
||||
|
||||
# prints the contents of all the descriptor files
|
||||
with DescriptorReader(my_descriptors) as reader:
|
||||
for descriptor in reader:
|
||||
print descriptor
|
||||
|
||||
This ignores files that cannot be processed due to read errors or unparsable
|
||||
content. To be notified of skipped files you can register a listener with
|
||||
:func:`~stem.descriptor.reader.DescriptorReader.register_skip_listener`.
|
||||
|
||||
The :class:`~stem.descriptor.reader.DescriptorReader` keeps track of the last
|
||||
modified timestamps for descriptor files that it has read so it can skip
|
||||
unchanged files if run again. This listing of processed files can also be
|
||||
persisted and applied to other
|
||||
:class:`~stem.descriptor.reader.DescriptorReader` instances. For example, the
|
||||
following prints descriptors as they're changed over the course of a minute,
|
||||
and picks up where it left off if run again...
|
||||
|
||||
::
|
||||
|
||||
reader = DescriptorReader(['/tmp/descriptor_data'])
|
||||
|
||||
try:
|
||||
processed_files = load_processed_files('/tmp/used_descriptors')
|
||||
reader.set_processed_files(processed_files)
|
||||
except: pass # could not load, maybe this is the first run
|
||||
|
||||
start_time = time.time()
|
||||
|
||||
while (time.time() - start_time) < 60:
|
||||
# prints any descriptors that have changed since last checked
|
||||
with reader:
|
||||
for descriptor in reader:
|
||||
print descriptor
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
save_processed_files('/tmp/used_descriptors', reader.get_processed_files())
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
load_processed_files - Loads a listing of processed files
|
||||
save_processed_files - Saves a listing of processed files
|
||||
|
||||
DescriptorReader - Iterator for descriptor data on the local file system
|
||||
|- get_processed_files - provides the listing of files that we've processed
|
||||
|- set_processed_files - sets our tracking of the files we have processed
|
||||
|- register_read_listener - adds a listener for when files are read
|
||||
|- register_skip_listener - adds a listener that's notified of skipped files
|
||||
|- start - begins reading descriptor data
|
||||
|- stop - stops reading descriptor data
|
||||
|- __enter__ / __exit__ - manages the descriptor reader thread in the context
|
||||
+- __iter__ - iterates over descriptor data in unread files
|
||||
|
||||
FileSkipped - Base exception for a file that was skipped
|
||||
|- AlreadyRead - We've already read a file with this last modified timestamp
|
||||
|- ParsingFailure - Contents can't be parsed as descriptor data
|
||||
|- UnrecognizedType - File extension indicates non-descriptor data
|
||||
+- ReadFailed - Wraps an error that was raised while reading the file
|
||||
+- FileMissing - File does not exist
|
||||
"""
|
||||
|
||||
import mimetypes
|
||||
import os
|
||||
import tarfile
|
||||
import threading
|
||||
|
||||
try:
|
||||
import queue
|
||||
except ImportError:
|
||||
import Queue as queue
|
||||
|
||||
import stem.descriptor
|
||||
import stem.prereq
|
||||
import stem.util.system
|
||||
|
||||
from stem import str_type
|
||||
|
||||
# flag to indicate when the reader thread is out of descriptor files to read
|
||||
FINISHED = 'DONE'
|
||||
|
||||
|
||||
class FileSkipped(Exception):
|
||||
"Base error when we can't provide descriptor data from a file."
|
||||
|
||||
|
||||
class AlreadyRead(FileSkipped):
|
||||
"""
|
||||
Already read a file with this 'last modified' timestamp or later.
|
||||
|
||||
:param int last_modified: unix timestamp for when the file was last modified
|
||||
:param int last_modified_when_read: unix timestamp for the modification time
|
||||
when we last read this file
|
||||
"""
|
||||
|
||||
def __init__(self, last_modified, last_modified_when_read):
|
||||
super(AlreadyRead, self).__init__('File has already been read since it was last modified. modification time: %s, last read: %s' % (last_modified, last_modified_when_read))
|
||||
self.last_modified = last_modified
|
||||
self.last_modified_when_read = last_modified_when_read
|
||||
|
||||
|
||||
class ParsingFailure(FileSkipped):
|
||||
"""
|
||||
File contents could not be parsed as descriptor data.
|
||||
|
||||
:param ValueError exception: issue that arose when parsing
|
||||
"""
|
||||
|
||||
def __init__(self, parsing_exception):
|
||||
super(ParsingFailure, self).__init__(parsing_exception)
|
||||
self.exception = parsing_exception
|
||||
|
||||
|
||||
class UnrecognizedType(FileSkipped):
|
||||
"""
|
||||
File doesn't contain descriptor data. This could either be due to its file
|
||||
type or because it doesn't conform to a recognizable descriptor type.
|
||||
|
||||
:param tuple mime_type: the (type, encoding) tuple provided by mimetypes.guess_type()
|
||||
"""
|
||||
|
||||
def __init__(self, mime_type):
|
||||
super(UnrecognizedType, self).__init__('Unrecognized mime type: %s (%s)' % mime_type)
|
||||
self.mime_type = mime_type
|
||||
|
||||
|
||||
class ReadFailed(FileSkipped):
|
||||
"""
|
||||
An IOError occurred while trying to read the file.
|
||||
|
||||
:param IOError exception: issue that arose when reading the file, **None** if
|
||||
this arose due to the file not being present
|
||||
"""
|
||||
|
||||
def __init__(self, read_exception):
|
||||
super(ReadFailed, self).__init__(read_exception)
|
||||
self.exception = read_exception
|
||||
|
||||
|
||||
class FileMissing(ReadFailed):
|
||||
'File does not exist.'
|
||||
|
||||
def __init__(self):
|
||||
super(FileMissing, self).__init__('File does not exist')
|
||||
|
||||
|
||||
def load_processed_files(path):
|
||||
"""
|
||||
Loads a dictionary of 'path => last modified timestamp' mappings, as
|
||||
persisted by :func:`~stem.descriptor.reader.save_processed_files`, from a
|
||||
file.
|
||||
|
||||
:param str path: location to load the processed files dictionary from
|
||||
|
||||
:returns: **dict** of 'path (**str**) => last modified unix timestamp
|
||||
(**int**)' mappings
|
||||
|
||||
:raises:
|
||||
* **IOError** if unable to read the file
|
||||
* **TypeError** if unable to parse the file's contents
|
||||
"""
|
||||
|
||||
processed_files = {}
|
||||
|
||||
with open(path) as input_file:
|
||||
for line in input_file.readlines():
|
||||
line = line.strip()
|
||||
|
||||
if not line:
|
||||
continue # skip blank lines
|
||||
|
||||
if ' ' not in line:
|
||||
raise TypeError('Malformed line: %s' % line)
|
||||
|
||||
path, timestamp = line.rsplit(' ', 1)
|
||||
|
||||
if not os.path.isabs(path):
|
||||
raise TypeError("'%s' is not an absolute path" % path)
|
||||
elif not timestamp.isdigit():
|
||||
raise TypeError("'%s' is not an integer timestamp" % timestamp)
|
||||
|
||||
processed_files[path] = int(timestamp)
|
||||
|
||||
return processed_files
|
||||
|
||||
|
||||
def save_processed_files(path, processed_files):
|
||||
"""
|
||||
Persists a dictionary of 'path => last modified timestamp' mappings (as
|
||||
provided by the DescriptorReader's
|
||||
:func:`~stem.descriptor.reader.DescriptorReader.get_processed_files` method)
|
||||
so that they can be loaded later and applied to another
|
||||
:class:`~stem.descriptor.reader.DescriptorReader`.
|
||||
|
||||
:param str path: location to save the processed files dictionary to
|
||||
:param dict processed_files: 'path => last modified' mappings
|
||||
|
||||
:raises:
|
||||
* **IOError** if unable to write to the file
|
||||
* **TypeError** if processed_files is of the wrong type
|
||||
"""
|
||||
|
||||
# makes the parent directory if it doesn't already exist
|
||||
try:
|
||||
path_dir = os.path.dirname(path)
|
||||
|
||||
if not os.path.exists(path_dir):
|
||||
os.makedirs(path_dir)
|
||||
except OSError as exc:
|
||||
raise IOError(exc)
|
||||
|
||||
with open(path, 'w') as output_file:
|
||||
for path, timestamp in list(processed_files.items()):
|
||||
if not os.path.isabs(path):
|
||||
raise TypeError('Only absolute paths are acceptable: %s' % path)
|
||||
|
||||
output_file.write('%s %i\n' % (path, timestamp))
|
||||
|
||||
|
||||
class DescriptorReader(object):
|
||||
"""
|
||||
Iterator for the descriptor data on the local file system. This can process
|
||||
text files, tarball archives (gzip or bzip2), or recurse directories.
|
||||
|
||||
By default this limits the number of descriptors that we'll read ahead before
|
||||
waiting for our caller to fetch some of them. This is included to avoid
|
||||
unbounded memory usage.
|
||||
|
||||
Our persistence_path argument is a convenient method to persist the listing
|
||||
of files we have processed between runs, however it doesn't allow for error
|
||||
handling. If you want that then use the
|
||||
:func:`~stem.descriptor.reader.load_processed_files` and
|
||||
:func:`~stem.descriptor.reader.save_processed_files` functions instead.
|
||||
|
||||
:param str,list target: path or list of paths for files or directories to be read from
|
||||
:param bool validate: checks the validity of the descriptor's content if
|
||||
**True**, skips these checks otherwise
|
||||
:param bool follow_links: determines if we'll follow symlinks when traversing
|
||||
directories (requires python 2.6)
|
||||
:param int buffer_size: descriptors we'll buffer before waiting for some to
|
||||
be read, this is unbounded if zero
|
||||
:param str persistence_path: if set we will load and save processed file
|
||||
listings from this path, errors are ignored
|
||||
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
|
||||
which to parse :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
|
||||
:param dict kwargs: additional arguments for the descriptor constructor
|
||||
"""
|
||||
|
||||
def __init__(self, target, validate = False, follow_links = False, buffer_size = 100, persistence_path = None, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs):
|
||||
if isinstance(target, (bytes, str_type)):
|
||||
self._targets = [target]
|
||||
else:
|
||||
self._targets = target
|
||||
|
||||
# expand any relative paths we got
|
||||
|
||||
self._targets = list(map(os.path.abspath, self._targets))
|
||||
|
||||
self._validate = validate
|
||||
self._follow_links = follow_links
|
||||
self._persistence_path = persistence_path
|
||||
self._document_handler = document_handler
|
||||
self._kwargs = kwargs
|
||||
self._read_listeners = []
|
||||
self._skip_listeners = []
|
||||
self._processed_files = {}
|
||||
|
||||
self._reader_thread = None
|
||||
self._reader_thread_lock = threading.RLock()
|
||||
|
||||
self._iter_lock = threading.RLock()
|
||||
self._iter_notice = threading.Event()
|
||||
|
||||
self._is_stopped = threading.Event()
|
||||
self._is_stopped.set()
|
||||
|
||||
# Descriptors that we have read but not yet provided to the caller. A
|
||||
# FINISHED entry is used by the reading thread to indicate the end.
|
||||
|
||||
self._unreturned_descriptors = queue.Queue(buffer_size)
|
||||
|
||||
if self._persistence_path:
|
||||
try:
|
||||
processed_files = load_processed_files(self._persistence_path)
|
||||
self.set_processed_files(processed_files)
|
||||
except:
|
||||
pass
|
||||
|
||||
def get_processed_files(self):
|
||||
"""
|
||||
For each file that we have read descriptor data from this provides a
|
||||
mapping of the form...
|
||||
|
||||
::
|
||||
|
||||
absolute path (str) => last modified unix timestamp (int)
|
||||
|
||||
This includes entries set through the
|
||||
:func:`~stem.descriptor.reader.DescriptorReader.set_processed_files`
|
||||
method. Each run resets this to only the files that were present during
|
||||
that run.
|
||||
|
||||
:returns: **dict** with the absolute paths and unix timestamp for the last
|
||||
modified times of the files we have processed
|
||||
"""
|
||||
|
||||
# make sure that we only provide back absolute paths
|
||||
return dict((os.path.abspath(k), v) for (k, v) in list(self._processed_files.items()))
|
||||
|
||||
def set_processed_files(self, processed_files):
|
||||
"""
|
||||
Sets the listing of the files we have processed. Most often this is used
|
||||
with a newly created :class:`~stem.descriptor.reader.DescriptorReader` to
|
||||
pre-populate the listing of descriptor files that we have seen.
|
||||
|
||||
:param dict processed_files: mapping of absolute paths (**str**) to unix
|
||||
timestamps for the last modified time (**int**)
|
||||
"""
|
||||
|
||||
self._processed_files = dict(processed_files)
|
||||
|
||||
def register_read_listener(self, listener):
|
||||
"""
|
||||
Registers a listener for when files are read. This is executed prior to
|
||||
processing files. Listeners are expected to be of the form...
|
||||
|
||||
::
|
||||
|
||||
my_listener(path)
|
||||
|
||||
:param functor listener: functor to be notified when files are read
|
||||
"""
|
||||
|
||||
self._read_listeners.append(listener)
|
||||
|
||||
def register_skip_listener(self, listener):
|
||||
"""
|
||||
Registers a listener for files that are skipped. This listener is expected
|
||||
to be a functor of the form...
|
||||
|
||||
::
|
||||
|
||||
my_listener(path, exception)
|
||||
|
||||
:param functor listener: functor to be notified of files that are skipped
|
||||
to read errors or because they couldn't be parsed as valid descriptor data
|
||||
"""
|
||||
|
||||
self._skip_listeners.append(listener)
|
||||
|
||||
def get_buffered_descriptor_count(self):
|
||||
"""
|
||||
Provides the number of descriptors that are waiting to be iterated over.
|
||||
This is limited to the buffer_size that we were constructed with.
|
||||
|
||||
:returns: **int** for the estimated number of currently enqueued
|
||||
descriptors, this is not entirely reliable
|
||||
"""
|
||||
|
||||
return self._unreturned_descriptors.qsize()
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Starts reading our descriptor files.
|
||||
|
||||
:raises: **ValueError** if we're already reading the descriptor files
|
||||
"""
|
||||
|
||||
with self._reader_thread_lock:
|
||||
if self._reader_thread:
|
||||
raise ValueError('Already running, you need to call stop() first')
|
||||
else:
|
||||
self._is_stopped.clear()
|
||||
self._reader_thread = threading.Thread(target = self._read_descriptor_files, name='Descriptor Reader')
|
||||
self._reader_thread.setDaemon(True)
|
||||
self._reader_thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stops further reading of descriptor files.
|
||||
"""
|
||||
|
||||
with self._reader_thread_lock:
|
||||
self._is_stopped.set()
|
||||
self._iter_notice.set()
|
||||
|
||||
# clears our queue to unblock enqueue calls
|
||||
|
||||
try:
|
||||
while True:
|
||||
self._unreturned_descriptors.get_nowait()
|
||||
except queue.Empty:
|
||||
pass
|
||||
|
||||
self._reader_thread.join()
|
||||
self._reader_thread = None
|
||||
|
||||
if self._persistence_path:
|
||||
try:
|
||||
processed_files = self.get_processed_files()
|
||||
save_processed_files(self._persistence_path, processed_files)
|
||||
except:
|
||||
pass
|
||||
|
||||
def _read_descriptor_files(self):
|
||||
new_processed_files = {}
|
||||
remaining_files = list(self._targets)
|
||||
|
||||
while remaining_files and not self._is_stopped.is_set():
|
||||
target = remaining_files.pop(0)
|
||||
|
||||
if not os.path.exists(target):
|
||||
self._notify_skip_listeners(target, FileMissing())
|
||||
continue
|
||||
|
||||
if os.path.isdir(target):
|
||||
walker = os.walk(target, followlinks = self._follow_links)
|
||||
self._handle_walker(walker, new_processed_files)
|
||||
else:
|
||||
self._handle_file(target, new_processed_files)
|
||||
|
||||
self._processed_files = new_processed_files
|
||||
|
||||
if not self._is_stopped.is_set():
|
||||
self._unreturned_descriptors.put(FINISHED)
|
||||
|
||||
self._iter_notice.set()
|
||||
|
||||
def __iter__(self):
|
||||
with self._iter_lock:
|
||||
while not self._is_stopped.is_set():
|
||||
try:
|
||||
descriptor = self._unreturned_descriptors.get_nowait()
|
||||
|
||||
if descriptor == FINISHED:
|
||||
break
|
||||
else:
|
||||
yield descriptor
|
||||
except queue.Empty:
|
||||
self._iter_notice.wait()
|
||||
self._iter_notice.clear()
|
||||
|
||||
def _handle_walker(self, walker, new_processed_files):
|
||||
for root, _, files in walker:
|
||||
for filename in files:
|
||||
self._handle_file(os.path.join(root, filename), new_processed_files)
|
||||
|
||||
# this can take a while if, say, we're including the root directory
|
||||
if self._is_stopped.is_set():
|
||||
return
|
||||
|
||||
def _handle_file(self, target, new_processed_files):
|
||||
# This is a file. Register its last modified timestamp and check if
|
||||
# it's a file that we should skip.
|
||||
|
||||
try:
|
||||
last_modified = int(os.stat(target).st_mtime)
|
||||
last_used = self._processed_files.get(target)
|
||||
new_processed_files[target] = last_modified
|
||||
except OSError as exc:
|
||||
self._notify_skip_listeners(target, ReadFailed(exc))
|
||||
return
|
||||
|
||||
if last_used and last_used >= last_modified:
|
||||
self._notify_skip_listeners(target, AlreadyRead(last_modified, last_used))
|
||||
return
|
||||
|
||||
# Block devices and such are never descriptors, and can cause us to block
|
||||
# for quite a while so skipping anything that isn't a regular file.
|
||||
|
||||
if not os.path.isfile(target):
|
||||
return
|
||||
|
||||
# The mimetypes module only checks the file extension. To actually
|
||||
# check the content (like the 'file' command) we'd need something like
|
||||
# pymagic (https://github.com/cloudburst/pymagic).
|
||||
|
||||
target_type = mimetypes.guess_type(target)
|
||||
|
||||
if target_type[0] in (None, 'text/plain'):
|
||||
# either '.txt' or an unknown type
|
||||
self._handle_descriptor_file(target, target_type)
|
||||
elif stem.util.system.is_tarfile(target):
|
||||
# handles gzip, bz2, and decompressed tarballs among others
|
||||
self._handle_archive(target)
|
||||
else:
|
||||
self._notify_skip_listeners(target, UnrecognizedType(target_type))
|
||||
|
||||
def _handle_descriptor_file(self, target, mime_type):
|
||||
try:
|
||||
self._notify_read_listeners(target)
|
||||
|
||||
with open(target, 'rb') as target_file:
|
||||
for desc in stem.descriptor.parse_file(target_file, validate = self._validate, document_handler = self._document_handler, **self._kwargs):
|
||||
if self._is_stopped.is_set():
|
||||
return
|
||||
|
||||
self._unreturned_descriptors.put(desc)
|
||||
self._iter_notice.set()
|
||||
except TypeError as exc:
|
||||
self._notify_skip_listeners(target, UnrecognizedType(mime_type))
|
||||
except ValueError as exc:
|
||||
self._notify_skip_listeners(target, ParsingFailure(exc))
|
||||
except IOError as exc:
|
||||
self._notify_skip_listeners(target, ReadFailed(exc))
|
||||
|
||||
def _handle_archive(self, target):
|
||||
# TODO: When dropping python 2.6 support go back to using 'with' for
|
||||
# tarfiles...
|
||||
#
|
||||
# http://bugs.python.org/issue7232
|
||||
|
||||
tar_file = None
|
||||
|
||||
try:
|
||||
self._notify_read_listeners(target)
|
||||
tar_file = tarfile.open(target)
|
||||
|
||||
for tar_entry in tar_file:
|
||||
if tar_entry.isfile():
|
||||
entry = tar_file.extractfile(tar_entry)
|
||||
|
||||
try:
|
||||
for desc in stem.descriptor.parse_file(entry, validate = self._validate, document_handler = self._document_handler, **self._kwargs):
|
||||
if self._is_stopped.is_set():
|
||||
return
|
||||
|
||||
desc._set_path(os.path.abspath(target))
|
||||
desc._set_archive_path(tar_entry.name)
|
||||
self._unreturned_descriptors.put(desc)
|
||||
self._iter_notice.set()
|
||||
except TypeError as exc:
|
||||
self._notify_skip_listeners(target, ParsingFailure(exc))
|
||||
except ValueError as exc:
|
||||
self._notify_skip_listeners(target, ParsingFailure(exc))
|
||||
finally:
|
||||
entry.close()
|
||||
except IOError as exc:
|
||||
self._notify_skip_listeners(target, ReadFailed(exc))
|
||||
finally:
|
||||
if tar_file:
|
||||
tar_file.close()
|
||||
|
||||
def _notify_read_listeners(self, path):
|
||||
for listener in self._read_listeners:
|
||||
listener(path)
|
||||
|
||||
def _notify_skip_listeners(self, path, exception):
|
||||
for listener in self._skip_listeners:
|
||||
listener(path, exception)
|
||||
|
||||
def __enter__(self):
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exit_type, value, traceback):
|
||||
self.stop()
|
777
Shared/lib/python3.4/site-packages/stem/descriptor/remote.py
Normal file
777
Shared/lib/python3.4/site-packages/stem/descriptor/remote.py
Normal file
|
@ -0,0 +1,777 @@
|
|||
# Copyright 2013-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Module for remotely retrieving descriptors from directory authorities and
|
||||
mirrors. This is most easily done through the
|
||||
:class:`~stem.descriptor.remote.DescriptorDownloader` class, which issues
|
||||
:class:`~stem.descriptor.remote.Query` instances to get you the descriptor
|
||||
content. For example...
|
||||
|
||||
::
|
||||
|
||||
from stem.descriptor.remote import DescriptorDownloader
|
||||
|
||||
downloader = DescriptorDownloader(
|
||||
use_mirrors = True,
|
||||
timeout = 10,
|
||||
)
|
||||
|
||||
query = downloader.get_server_descriptors()
|
||||
|
||||
print 'Exit Relays:'
|
||||
|
||||
try:
|
||||
for desc in query.run():
|
||||
if desc.exit_policy.is_exiting_allowed():
|
||||
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
|
||||
|
||||
print
|
||||
print 'Query took %0.2f seconds' % query.runtime
|
||||
except Exception as exc:
|
||||
print 'Unable to retrieve the server descriptors: %s' % exc
|
||||
|
||||
If you don't care about errors then you can also simply iterate over the query
|
||||
itself...
|
||||
|
||||
::
|
||||
|
||||
for desc in downloader.get_server_descriptors():
|
||||
if desc.exit_policy.is_exiting_allowed():
|
||||
print ' %s (%s)' % (desc.nickname, desc.fingerprint)
|
||||
|
||||
::
|
||||
|
||||
get_authorities - Provides tor directory information.
|
||||
|
||||
DirectoryAuthority - Information about a tor directory authority.
|
||||
|
||||
Query - Asynchronous request to download tor descriptors
|
||||
|- start - issues the query if it isn't already running
|
||||
+- run - blocks until the request is finished and provides the results
|
||||
|
||||
DescriptorDownloader - Configurable class for issuing queries
|
||||
|- use_directory_mirrors - use directory mirrors to download future descriptors
|
||||
|- get_server_descriptors - provides present server descriptors
|
||||
|- get_extrainfo_descriptors - provides present extrainfo descriptors
|
||||
|- get_microdescriptors - provides present microdescriptors
|
||||
|- get_consensus - provides the present consensus or router status entries
|
||||
|- get_key_certificates - provides present authority key certificates
|
||||
+- query - request an arbitrary descriptor resource
|
||||
|
||||
.. versionadded:: 1.1.0
|
||||
|
||||
.. data:: MAX_FINGERPRINTS
|
||||
|
||||
Maximum number of descriptors that can requested at a time by their
|
||||
fingerprints.
|
||||
|
||||
.. data:: MAX_MICRODESCRIPTOR_HASHES
|
||||
|
||||
Maximum number of microdescriptors that can requested at a time by their
|
||||
hashes.
|
||||
"""
|
||||
|
||||
import io
|
||||
import random
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import zlib
|
||||
|
||||
try:
|
||||
import urllib.request as urllib
|
||||
except ImportError:
|
||||
import urllib2 as urllib
|
||||
|
||||
import stem.descriptor
|
||||
|
||||
from stem import Flag
|
||||
from stem.util import log
|
||||
|
||||
# Tor has a limited number of descriptors we can fetch explicitly by their
|
||||
# fingerprint or hashes due to a limit on the url length by squid proxies.
|
||||
|
||||
MAX_FINGERPRINTS = 96
|
||||
MAX_MICRODESCRIPTOR_HASHES = 92
|
||||
|
||||
# We commonly only want authorities that vote in the consensus, and hence have
|
||||
# a v3ident.
|
||||
|
||||
HAS_V3IDENT = lambda auth: auth.v3ident is not None
|
||||
|
||||
|
||||
def _guess_descriptor_type(resource):
|
||||
# Attempts to determine the descriptor type based on the resource url. This
|
||||
# raises a ValueError if the resource isn't recognized.
|
||||
|
||||
if resource.startswith('/tor/server/'):
|
||||
return 'server-descriptor 1.0'
|
||||
elif resource.startswith('/tor/extra/'):
|
||||
return 'extra-info 1.0'
|
||||
elif resource.startswith('/tor/micro/'):
|
||||
return 'microdescriptor 1.0'
|
||||
elif resource.startswith('/tor/status-vote/'):
|
||||
return 'network-status-consensus-3 1.0'
|
||||
elif resource.startswith('/tor/keys/'):
|
||||
return 'dir-key-certificate-3 1.0'
|
||||
else:
|
||||
raise ValueError("Unable to determine the descriptor type for '%s'" % resource)
|
||||
|
||||
|
||||
class Query(object):
|
||||
"""
|
||||
Asynchronous request for descriptor content from a directory authority or
|
||||
mirror. These can either be made through the
|
||||
:class:`~stem.descriptor.remote.DescriptorDownloader` or directly for more
|
||||
advanced usage.
|
||||
|
||||
To block on the response and get results either call
|
||||
:func:`~stem.descriptor.remote.Query.run` or iterate over the Query. The
|
||||
:func:`~stem.descriptor.remote.Query.run` method pass along any errors that
|
||||
arise...
|
||||
|
||||
::
|
||||
|
||||
from stem.descriptor.remote import Query
|
||||
|
||||
query = Query(
|
||||
'/tor/server/all.z',
|
||||
block = True,
|
||||
timeout = 30,
|
||||
)
|
||||
|
||||
print 'Current relays:'
|
||||
|
||||
if not query.error:
|
||||
for desc in query:
|
||||
print desc.fingerprint
|
||||
else:
|
||||
print 'Unable to retrieve the server descriptors: %s' % query.error
|
||||
|
||||
... while iterating fails silently...
|
||||
|
||||
::
|
||||
|
||||
print 'Current relays:'
|
||||
|
||||
for desc in Query('/tor/server/all.z', 'server-descriptor 1.0'):
|
||||
print desc.fingerprint
|
||||
|
||||
In either case exceptions are available via our 'error' attribute.
|
||||
|
||||
Tor provides quite a few different descriptor resources via its directory
|
||||
protocol (see section 4.2 and later of the `dir-spec
|
||||
<https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_).
|
||||
Commonly useful ones include...
|
||||
|
||||
===================================== ===========
|
||||
Resource Description
|
||||
===================================== ===========
|
||||
/tor/server/all.z all present server descriptors
|
||||
/tor/server/fp/<fp1>+<fp2>+<fp3>.z server descriptors with the given fingerprints
|
||||
/tor/extra/all.z all present extrainfo descriptors
|
||||
/tor/extra/fp/<fp1>+<fp2>+<fp3>.z extrainfo descriptors with the given fingerprints
|
||||
/tor/micro/d/<hash1>-<hash2>.z microdescriptors with the given hashes
|
||||
/tor/status-vote/current/consensus.z present consensus
|
||||
/tor/keys/all.z key certificates for the authorities
|
||||
/tor/keys/fp/<v3ident1>+<v3ident2>.z key certificates for specific authorities
|
||||
===================================== ===========
|
||||
|
||||
The '.z' suffix can be excluded to get a plaintext rather than compressed
|
||||
response. Compression is handled transparently, so this shouldn't matter to
|
||||
the caller.
|
||||
|
||||
:var str resource: resource being fetched, such as '/tor/server/all.z'
|
||||
:var str descriptor_type: type of descriptors being fetched (for options see
|
||||
:func:`~stem.descriptor.__init__.parse_file`), this is guessed from the
|
||||
resource if **None**
|
||||
|
||||
:var list endpoints: (address, dirport) tuples of the authority or mirror
|
||||
we're querying, this uses authorities if undefined
|
||||
:var int retries: number of times to attempt the request if downloading it
|
||||
fails
|
||||
:var bool fall_back_to_authority: when retrying request issues the last
|
||||
request to a directory authority if **True**
|
||||
|
||||
:var str content: downloaded descriptor content
|
||||
:var Exception error: exception if a problem occured
|
||||
:var bool is_done: flag that indicates if our request has finished
|
||||
:var str download_url: last url used to download the descriptor, this is
|
||||
unset until we've actually made a download attempt
|
||||
|
||||
:var float start_time: unix timestamp when we first started running
|
||||
:var float timeout: duration before we'll time out our request
|
||||
:var float runtime: time our query took, this is **None** if it's not yet
|
||||
finished
|
||||
|
||||
:var bool validate: checks the validity of the descriptor's content if
|
||||
**True**, skips these checks otherwise
|
||||
:var stem.descriptor.__init__.DocumentHandler document_handler: method in
|
||||
which to parse a :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
|
||||
:var dict kwargs: additional arguments for the descriptor constructor
|
||||
|
||||
:param bool start: start making the request when constructed (default is **True**)
|
||||
:param bool block: only return after the request has been completed, this is
|
||||
the same as running **query.run(True)** (default is **False**)
|
||||
"""
|
||||
|
||||
def __init__(self, resource, descriptor_type = None, endpoints = None, retries = 2, fall_back_to_authority = False, timeout = None, start = True, block = False, validate = False, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs):
|
||||
if not resource.startswith('/'):
|
||||
raise ValueError("Resources should start with a '/': %s" % resource)
|
||||
|
||||
self.resource = resource
|
||||
|
||||
if descriptor_type:
|
||||
self.descriptor_type = descriptor_type
|
||||
else:
|
||||
self.descriptor_type = _guess_descriptor_type(resource)
|
||||
|
||||
self.endpoints = endpoints if endpoints else []
|
||||
self.retries = retries
|
||||
self.fall_back_to_authority = fall_back_to_authority
|
||||
|
||||
self.content = None
|
||||
self.error = None
|
||||
self.is_done = False
|
||||
self.download_url = None
|
||||
|
||||
self.start_time = None
|
||||
self.timeout = timeout
|
||||
self.runtime = None
|
||||
|
||||
self.validate = validate
|
||||
self.document_handler = document_handler
|
||||
self.kwargs = kwargs
|
||||
|
||||
self._downloader_thread = None
|
||||
self._downloader_thread_lock = threading.RLock()
|
||||
|
||||
if start:
|
||||
self.start()
|
||||
|
||||
if block:
|
||||
self.run(True)
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Starts downloading the scriptors if we haven't started already.
|
||||
"""
|
||||
|
||||
with self._downloader_thread_lock:
|
||||
if self._downloader_thread is None:
|
||||
self._downloader_thread = threading.Thread(
|
||||
name = 'Descriptor Query',
|
||||
target = self._download_descriptors,
|
||||
args = (self.retries,)
|
||||
)
|
||||
|
||||
self._downloader_thread.setDaemon(True)
|
||||
self._downloader_thread.start()
|
||||
|
||||
def run(self, suppress = False):
|
||||
"""
|
||||
Blocks until our request is complete then provides the descriptors. If we
|
||||
haven't yet started our request then this does so.
|
||||
|
||||
:param bool suppress: avoids raising exceptions if **True**
|
||||
|
||||
:returns: list for the requested :class:`~stem.descriptor.__init__.Descriptor` instances
|
||||
|
||||
:raises:
|
||||
Using the iterator can fail with the following if **suppress** is
|
||||
**False**...
|
||||
|
||||
* **ValueError** if the descriptor contents is malformed
|
||||
* **socket.timeout** if our request timed out
|
||||
* **urllib2.URLError** for most request failures
|
||||
|
||||
Note that the urllib2 module may fail with other exception types, in
|
||||
which case we'll pass it along.
|
||||
"""
|
||||
|
||||
return list(self._run(suppress))
|
||||
|
||||
def _run(self, suppress):
|
||||
with self._downloader_thread_lock:
|
||||
self.start()
|
||||
self._downloader_thread.join()
|
||||
|
||||
if self.error:
|
||||
if suppress:
|
||||
return
|
||||
|
||||
raise self.error
|
||||
else:
|
||||
if self.content is None:
|
||||
if suppress:
|
||||
return
|
||||
|
||||
raise ValueError('BUG: _download_descriptors() finished without either results or an error')
|
||||
|
||||
try:
|
||||
results = stem.descriptor.parse_file(
|
||||
io.BytesIO(self.content),
|
||||
self.descriptor_type,
|
||||
validate = self.validate,
|
||||
document_handler = self.document_handler,
|
||||
**self.kwargs
|
||||
)
|
||||
|
||||
for desc in results:
|
||||
yield desc
|
||||
except ValueError as exc:
|
||||
self.error = exc # encountered a parsing error
|
||||
|
||||
if suppress:
|
||||
return
|
||||
|
||||
raise self.error
|
||||
|
||||
def __iter__(self):
|
||||
for desc in self._run(True):
|
||||
yield desc
|
||||
|
||||
def _pick_url(self, use_authority = False):
|
||||
"""
|
||||
Provides a url that can be queried. If we have multiple endpoints then one
|
||||
will be picked randomly.
|
||||
|
||||
:param bool use_authority: ignores our endpoints and uses a directory
|
||||
authority instead
|
||||
|
||||
:returns: **str** for the url being queried by this request
|
||||
"""
|
||||
|
||||
if use_authority or not self.endpoints:
|
||||
authority = random.choice(filter(HAS_V3IDENT, get_authorities().values()))
|
||||
address, dirport = authority.address, authority.dir_port
|
||||
else:
|
||||
address, dirport = random.choice(self.endpoints)
|
||||
|
||||
return 'http://%s:%i/%s' % (address, dirport, self.resource.lstrip('/'))
|
||||
|
||||
def _download_descriptors(self, retries):
|
||||
try:
|
||||
use_authority = retries == 0 and self.fall_back_to_authority
|
||||
self.download_url = self._pick_url(use_authority)
|
||||
|
||||
self.start_time = time.time()
|
||||
response = urllib.urlopen(self.download_url, timeout = self.timeout).read()
|
||||
|
||||
if self.download_url.endswith('.z'):
|
||||
response = zlib.decompress(response)
|
||||
|
||||
self.content = response.strip()
|
||||
|
||||
self.runtime = time.time() - self.start_time
|
||||
log.trace("Descriptors retrieved from '%s' in %0.2fs" % (self.download_url, self.runtime))
|
||||
except:
|
||||
exc = sys.exc_info()[1]
|
||||
|
||||
if retries > 0:
|
||||
log.debug("Unable to download descriptors from '%s' (%i retries remaining): %s" % (self.download_url, retries, exc))
|
||||
return self._download_descriptors(retries - 1)
|
||||
else:
|
||||
log.debug("Unable to download descriptors from '%s': %s" % (self.download_url, exc))
|
||||
self.error = exc
|
||||
finally:
|
||||
self.is_done = True
|
||||
|
||||
|
||||
class DescriptorDownloader(object):
|
||||
"""
|
||||
Configurable class that issues :class:`~stem.descriptor.remote.Query`
|
||||
instances on your behalf.
|
||||
|
||||
:param bool use_mirrors: downloads the present consensus and uses the directory
|
||||
mirrors to fetch future requests, this fails silently if the consensus
|
||||
cannot be downloaded
|
||||
:param default_args: default arguments for the
|
||||
:class:`~stem.descriptor.remote.Query` constructor
|
||||
"""
|
||||
|
||||
def __init__(self, use_mirrors = False, **default_args):
|
||||
self._default_args = default_args
|
||||
|
||||
authorities = filter(HAS_V3IDENT, get_authorities().values())
|
||||
self._endpoints = [(auth.address, auth.dir_port) for auth in authorities]
|
||||
|
||||
if use_mirrors:
|
||||
try:
|
||||
start_time = time.time()
|
||||
self.use_directory_mirrors()
|
||||
log.debug('Retrieved directory mirrors (took %0.2fs)' % (time.time() - start_time))
|
||||
except Exception as exc:
|
||||
log.debug('Unable to retrieve directory mirrors: %s' % exc)
|
||||
|
||||
def use_directory_mirrors(self):
|
||||
"""
|
||||
Downloads the present consensus and configures ourselves to use directory
|
||||
mirrors, in addition to authorities.
|
||||
|
||||
:returns: :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`
|
||||
from which we got the directory mirrors
|
||||
|
||||
:raises: **Exception** if unable to determine the directory mirrors
|
||||
"""
|
||||
|
||||
authorities = filter(HAS_V3IDENT, get_authorities().values())
|
||||
new_endpoints = set([(auth.address, auth.dir_port) for auth in authorities])
|
||||
|
||||
consensus = list(self.get_consensus(document_handler = stem.descriptor.DocumentHandler.DOCUMENT).run())[0]
|
||||
|
||||
for desc in consensus.routers.values():
|
||||
if Flag.V2DIR in desc.flags:
|
||||
new_endpoints.add((desc.address, desc.dir_port))
|
||||
|
||||
# we need our endpoints to be a list rather than set for random.choice()
|
||||
|
||||
self._endpoints = list(new_endpoints)
|
||||
|
||||
return consensus
|
||||
|
||||
def get_server_descriptors(self, fingerprints = None, **query_args):
|
||||
"""
|
||||
Provides the server descriptors with the given fingerprints. If no
|
||||
fingerprints are provided then this returns all descriptors in the present
|
||||
consensus.
|
||||
|
||||
:param str,list fingerprints: fingerprint or list of fingerprints to be
|
||||
retrieved, gets all descriptors if **None**
|
||||
:param query_args: additional arguments for the
|
||||
:class:`~stem.descriptor.remote.Query` constructor
|
||||
|
||||
:returns: :class:`~stem.descriptor.remote.Query` for the server descriptors
|
||||
|
||||
:raises: **ValueError** if we request more than 96 descriptors by their
|
||||
fingerprints (this is due to a limit on the url length by squid proxies).
|
||||
"""
|
||||
|
||||
resource = '/tor/server/all.z'
|
||||
|
||||
if isinstance(fingerprints, str):
|
||||
fingerprints = [fingerprints]
|
||||
|
||||
if fingerprints:
|
||||
if len(fingerprints) > MAX_FINGERPRINTS:
|
||||
raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS)
|
||||
|
||||
resource = '/tor/server/fp/%s.z' % '+'.join(fingerprints)
|
||||
|
||||
return self.query(resource, **query_args)
|
||||
|
||||
def get_extrainfo_descriptors(self, fingerprints = None, **query_args):
|
||||
"""
|
||||
Provides the extrainfo descriptors with the given fingerprints. If no
|
||||
fingerprints are provided then this returns all descriptors in the present
|
||||
consensus.
|
||||
|
||||
:param str,list fingerprints: fingerprint or list of fingerprints to be
|
||||
retrieved, gets all descriptors if **None**
|
||||
:param query_args: additional arguments for the
|
||||
:class:`~stem.descriptor.remote.Query` constructor
|
||||
|
||||
:returns: :class:`~stem.descriptor.remote.Query` for the extrainfo descriptors
|
||||
|
||||
:raises: **ValueError** if we request more than 96 descriptors by their
|
||||
fingerprints (this is due to a limit on the url length by squid proxies).
|
||||
"""
|
||||
|
||||
resource = '/tor/extra/all.z'
|
||||
|
||||
if isinstance(fingerprints, str):
|
||||
fingerprints = [fingerprints]
|
||||
|
||||
if fingerprints:
|
||||
if len(fingerprints) > MAX_FINGERPRINTS:
|
||||
raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS)
|
||||
|
||||
resource = '/tor/extra/fp/%s.z' % '+'.join(fingerprints)
|
||||
|
||||
return self.query(resource, **query_args)
|
||||
|
||||
def get_microdescriptors(self, hashes, **query_args):
|
||||
"""
|
||||
Provides the microdescriptors with the given hashes. To get these see the
|
||||
'microdescriptor_hashes' attribute of
|
||||
:class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`. Note
|
||||
that these are only provided via a microdescriptor consensus (such as
|
||||
'cached-microdesc-consensus' in your data directory).
|
||||
|
||||
:param str,list hashes: microdescriptor hash or list of hashes to be
|
||||
retrieved
|
||||
:param query_args: additional arguments for the
|
||||
:class:`~stem.descriptor.remote.Query` constructor
|
||||
|
||||
:returns: :class:`~stem.descriptor.remote.Query` for the microdescriptors
|
||||
|
||||
:raises: **ValueError** if we request more than 92 microdescriptors by their
|
||||
hashes (this is due to a limit on the url length by squid proxies).
|
||||
"""
|
||||
|
||||
if isinstance(hashes, str):
|
||||
hashes = [hashes]
|
||||
|
||||
if len(hashes) > MAX_MICRODESCRIPTOR_HASHES:
|
||||
raise ValueError('Unable to request more than %i microdescriptors at a time by their hashes' % MAX_MICRODESCRIPTOR_HASHES)
|
||||
|
||||
return self.query('/tor/micro/d/%s.z' % '-'.join(hashes), **query_args)
|
||||
|
||||
def get_consensus(self, authority_v3ident = None, **query_args):
|
||||
"""
|
||||
Provides the present router status entries.
|
||||
|
||||
:param str authority_v3ident: fingerprint of the authority key for which
|
||||
to get the consensus, see `'v3ident' in tor's config.c
|
||||
<https://gitweb.torproject.org/tor.git/tree/src/or/config.c#n819>`_
|
||||
for the values.
|
||||
:param query_args: additional arguments for the
|
||||
:class:`~stem.descriptor.remote.Query` constructor
|
||||
|
||||
:returns: :class:`~stem.descriptor.remote.Query` for the router status
|
||||
entries
|
||||
"""
|
||||
|
||||
resource = '/tor/status-vote/current/consensus'
|
||||
|
||||
if authority_v3ident:
|
||||
resource += '/%s' % authority_v3ident
|
||||
|
||||
return self.query(resource + '.z', **query_args)
|
||||
|
||||
def get_vote(self, authority, **query_args):
|
||||
"""
|
||||
Provides the present vote for a given directory authority.
|
||||
|
||||
:param stem.descriptor.remote.DirectoryAuthority authority: authority for which to retrieve a vote for
|
||||
:param query_args: additional arguments for the
|
||||
:class:`~stem.descriptor.remote.Query` constructor
|
||||
|
||||
:returns: :class:`~stem.descriptor.remote.Query` for the router status
|
||||
entries
|
||||
"""
|
||||
|
||||
resource = '/tor/status-vote/current/authority'
|
||||
|
||||
if 'endpoint' not in query_args:
|
||||
query_args['endpoints'] = [(authority.address, authority.dir_port)]
|
||||
|
||||
return self.query(resource + '.z', **query_args)
|
||||
|
||||
def get_key_certificates(self, authority_v3idents = None, **query_args):
|
||||
"""
|
||||
Provides the key certificates for authorities with the given fingerprints.
|
||||
If no fingerprints are provided then this returns all present key
|
||||
certificates.
|
||||
|
||||
:param str authority_v3idents: fingerprint or list of fingerprints of the
|
||||
authority keys, see `'v3ident' in tor's config.c
|
||||
<https://gitweb.torproject.org/tor.git/tree/src/or/config.c#n819>`_
|
||||
for the values.
|
||||
:param query_args: additional arguments for the
|
||||
:class:`~stem.descriptor.remote.Query` constructor
|
||||
|
||||
:returns: :class:`~stem.descriptor.remote.Query` for the key certificates
|
||||
|
||||
:raises: **ValueError** if we request more than 96 key certificates by
|
||||
their identity fingerprints (this is due to a limit on the url length by
|
||||
squid proxies).
|
||||
"""
|
||||
|
||||
resource = '/tor/keys/all.z'
|
||||
|
||||
if isinstance(authority_v3idents, str):
|
||||
authority_v3idents = [authority_v3idents]
|
||||
|
||||
if authority_v3idents:
|
||||
if len(authority_v3idents) > MAX_FINGERPRINTS:
|
||||
raise ValueError('Unable to request more than %i key certificates at a time by their identity fingerprints' % MAX_FINGERPRINTS)
|
||||
|
||||
resource = '/tor/keys/fp/%s.z' % '+'.join(authority_v3idents)
|
||||
|
||||
return self.query(resource, **query_args)
|
||||
|
||||
def query(self, resource, **query_args):
|
||||
"""
|
||||
Issues a request for the given resource.
|
||||
|
||||
:param str resource: resource being fetched, such as '/tor/server/all.z'
|
||||
:param query_args: additional arguments for the
|
||||
:class:`~stem.descriptor.remote.Query` constructor
|
||||
|
||||
:returns: :class:`~stem.descriptor.remote.Query` for the descriptors
|
||||
|
||||
:raises: **ValueError** if resource is clearly invalid or the descriptor
|
||||
type can't be determined when 'descriptor_type' is **None**
|
||||
"""
|
||||
|
||||
args = dict(self._default_args)
|
||||
args.update(query_args)
|
||||
|
||||
if 'endpoints' not in args:
|
||||
args['endpoints'] = self._endpoints
|
||||
|
||||
if 'fall_back_to_authority' not in args:
|
||||
args['fall_back_to_authority'] = True
|
||||
|
||||
return Query(
|
||||
resource,
|
||||
**args
|
||||
)
|
||||
|
||||
|
||||
class DirectoryAuthority(object):
|
||||
"""
|
||||
Tor directory authority, a special type of relay `hardcoded into tor
|
||||
<https://gitweb.torproject.org/tor.git/tree/src/or/config.c#n819>`_
|
||||
that enumerates the other relays within the network.
|
||||
|
||||
At a very high level tor works as follows...
|
||||
|
||||
1. A volunteer starts up a new tor relay, during which it sends a `server
|
||||
descriptor <server_descriptor.html>`_ to each of the directory
|
||||
authorities.
|
||||
|
||||
2. Each hour the directory authorities make a `vote <networkstatus.html>`_
|
||||
that says who they think the active relays are in the network and some
|
||||
attributes about them.
|
||||
|
||||
3. The directory authorities send each other their votes, and compile that
|
||||
into the `consensus <networkstatus.html>`_. This document is very similar
|
||||
to the votes, the only difference being that the majority of the
|
||||
authorities agree upon and sign this document. The idividual relay entries
|
||||
in the vote or consensus is called `router status entries
|
||||
<router_status_entry.html>`_.
|
||||
|
||||
4. Tor clients (people using the service) download the consensus from one of
|
||||
the authorities or a mirror to determine the active relays within the
|
||||
network. They in turn use this to construct their circuits and use the
|
||||
network.
|
||||
|
||||
.. versionchanged:: 1.3.0
|
||||
Added the is_bandwidth_authority attribute.
|
||||
|
||||
:var str nickname: nickname of the authority
|
||||
:var str address: IP address of the authority, currently they're all IPv4 but
|
||||
this may not always be the case
|
||||
:var int or_port: port on which the relay services relay traffic
|
||||
:var int dir_port: port on which directory information is available
|
||||
:var str fingerprint: relay fingerprint
|
||||
:var str v3ident: identity key fingerprint used to sign votes and consensus
|
||||
"""
|
||||
|
||||
def __init__(self, nickname = None, address = None, or_port = None, dir_port = None, is_bandwidth_authority = False, fingerprint = None, v3ident = None):
|
||||
self.nickname = nickname
|
||||
self.address = address
|
||||
self.or_port = or_port
|
||||
self.dir_port = dir_port
|
||||
self.is_bandwidth_authority = is_bandwidth_authority
|
||||
self.fingerprint = fingerprint
|
||||
self.v3ident = v3ident
|
||||
|
||||
|
||||
DIRECTORY_AUTHORITIES = {
|
||||
'moria1': DirectoryAuthority(
|
||||
nickname = 'moria1',
|
||||
address = '128.31.0.39',
|
||||
or_port = 9101,
|
||||
dir_port = 9131,
|
||||
is_bandwidth_authority = True,
|
||||
fingerprint = '9695DFC35FFEB861329B9F1AB04C46397020CE31',
|
||||
v3ident = 'D586D18309DED4CD6D57C18FDB97EFA96D330566',
|
||||
),
|
||||
'tor26': DirectoryAuthority(
|
||||
nickname = 'tor26',
|
||||
address = '86.59.21.38',
|
||||
or_port = 443,
|
||||
dir_port = 80,
|
||||
is_bandwidth_authority = True,
|
||||
fingerprint = '847B1F850344D7876491A54892F904934E4EB85D',
|
||||
v3ident = '14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4',
|
||||
),
|
||||
'dizum': DirectoryAuthority(
|
||||
nickname = 'dizum',
|
||||
address = '194.109.206.212',
|
||||
or_port = 443,
|
||||
dir_port = 80,
|
||||
is_bandwidth_authority = False,
|
||||
fingerprint = '7EA6EAD6FD83083C538F44038BBFA077587DD755',
|
||||
v3ident = 'E8A9C45EDE6D711294FADF8E7951F4DE6CA56B58',
|
||||
),
|
||||
'Tonga': DirectoryAuthority(
|
||||
nickname = 'Tonga',
|
||||
address = '82.94.251.203',
|
||||
or_port = 443,
|
||||
dir_port = 80,
|
||||
is_bandwidth_authority = False,
|
||||
fingerprint = '4A0CCD2DDC7995083D73F5D667100C8A5831F16D',
|
||||
v3ident = None, # does not vote in the consensus
|
||||
),
|
||||
'gabelmoo': DirectoryAuthority(
|
||||
nickname = 'gabelmoo',
|
||||
address = '131.188.40.189',
|
||||
or_port = 443,
|
||||
dir_port = 80,
|
||||
is_bandwidth_authority = True,
|
||||
fingerprint = 'F2044413DAC2E02E3D6BCF4735A19BCA1DE97281',
|
||||
v3ident = 'ED03BB616EB2F60BEC80151114BB25CEF515B226',
|
||||
),
|
||||
'dannenberg': DirectoryAuthority(
|
||||
nickname = 'dannenberg',
|
||||
address = '193.23.244.244',
|
||||
or_port = 443,
|
||||
dir_port = 80,
|
||||
is_bandwidth_authority = False,
|
||||
fingerprint = '7BE683E65D48141321C5ED92F075C55364AC7123',
|
||||
v3ident = '585769C78764D58426B8B52B6651A5A71137189A',
|
||||
),
|
||||
'urras': DirectoryAuthority(
|
||||
nickname = 'urras',
|
||||
address = '208.83.223.34',
|
||||
or_port = 80,
|
||||
dir_port = 443,
|
||||
is_bandwidth_authority = False,
|
||||
fingerprint = '0AD3FA884D18F89EEA2D89C019379E0E7FD94417',
|
||||
v3ident = '80550987E1D626E3EBA5E5E75A458DE0626D088C',
|
||||
),
|
||||
'maatuska': DirectoryAuthority(
|
||||
nickname = 'maatuska',
|
||||
address = '171.25.193.9',
|
||||
or_port = 80,
|
||||
dir_port = 443,
|
||||
is_bandwidth_authority = True,
|
||||
fingerprint = 'BD6A829255CB08E66FBE7D3748363586E46B3810',
|
||||
v3ident = '49015F787433103580E3B66A1707A00E60F2D15B',
|
||||
),
|
||||
'Faravahar': DirectoryAuthority(
|
||||
nickname = 'Faravahar',
|
||||
address = '154.35.175.225',
|
||||
or_port = 443,
|
||||
dir_port = 80,
|
||||
is_bandwidth_authority = False,
|
||||
fingerprint = 'CF6D0AAFB385BE71B8E111FC5CFF4B47923733BC',
|
||||
v3ident = 'EFCBE720AB3A82B99F9E953CD5BF50F7EEFC7B97',
|
||||
),
|
||||
'longclaw': DirectoryAuthority(
|
||||
nickname = 'longclaw',
|
||||
address = '199.254.238.52',
|
||||
or_port = 443,
|
||||
dir_port = 80,
|
||||
is_bandwidth_authority = True,
|
||||
fingerprint = '74A910646BCEEFBCD2E874FC1DC997430F968145',
|
||||
v3ident = '23D15D965BC35114467363C165C4F724B64B4F66',
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def get_authorities():
|
||||
"""
|
||||
Provides the Tor directory authority information as of **Tor on 11/21/14**.
|
||||
The directory information hardcoded into Tor and occasionally changes, so the
|
||||
information this provides might not necessarily match your version of tor.
|
||||
|
||||
:returns: dict of str nicknames to :class:`~stem.descriptor.remote.DirectoryAuthority` instances
|
||||
"""
|
||||
|
||||
return dict(DIRECTORY_AUTHORITIES)
|
|
@ -0,0 +1,625 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Parsing for router status entries, the information for individual routers
|
||||
within a network status document. This information is provided from a few
|
||||
sources...
|
||||
|
||||
* control port via 'GETINFO ns/\*' and 'GETINFO md/\*' queries
|
||||
* router entries in a network status document, like the cached-consensus
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
RouterStatusEntry - Common parent for router status entries
|
||||
|- RouterStatusEntryV2 - Entry for a network status v2 document
|
||||
|- RouterStatusEntryV3 - Entry for a network status v3 document
|
||||
+- RouterStatusEntryMicroV3 - Entry for a microdescriptor flavored v3 document
|
||||
"""
|
||||
|
||||
import base64
|
||||
import binascii
|
||||
|
||||
import stem.exit_policy
|
||||
import stem.prereq
|
||||
import stem.util.str_tools
|
||||
|
||||
from stem.descriptor import (
|
||||
KEYWORD_LINE,
|
||||
Descriptor,
|
||||
_value,
|
||||
_values,
|
||||
_get_descriptor_components,
|
||||
_read_until_keywords,
|
||||
)
|
||||
|
||||
|
||||
def _parse_file(document_file, validate, entry_class, entry_keyword = 'r', start_position = None, end_position = None, section_end_keywords = (), extra_args = ()):
|
||||
"""
|
||||
Reads a range of the document_file containing some number of entry_class
|
||||
instances. We deliminate the entry_class entries by the keyword on their
|
||||
first line (entry_keyword). When finished the document is left at the
|
||||
end_position.
|
||||
|
||||
Either an end_position or section_end_keywords must be provided.
|
||||
|
||||
:param file document_file: file with network status document content
|
||||
:param bool validate: checks the validity of the document's contents if
|
||||
**True**, skips these checks otherwise
|
||||
:param class entry_class: class to construct instance for
|
||||
:param str entry_keyword: first keyword for the entry instances
|
||||
:param int start_position: start of the section, default is the current position
|
||||
:param int end_position: end of the section
|
||||
:param tuple section_end_keywords: keyword(s) that deliminate the end of the
|
||||
section if no end_position was provided
|
||||
:param tuple extra_args: extra arguments for the entry_class (after the
|
||||
content and validate flag)
|
||||
|
||||
:returns: iterator over entry_class instances
|
||||
|
||||
:raises:
|
||||
* **ValueError** if the contents is malformed and validate is **True**
|
||||
* **IOError** if the file can't be read
|
||||
"""
|
||||
|
||||
if start_position:
|
||||
document_file.seek(start_position)
|
||||
else:
|
||||
start_position = document_file.tell()
|
||||
|
||||
# check if we're starting at the end of the section (ie, there's no entries to read)
|
||||
if section_end_keywords:
|
||||
first_keyword = None
|
||||
line_match = KEYWORD_LINE.match(stem.util.str_tools._to_unicode(document_file.readline()))
|
||||
|
||||
if line_match:
|
||||
first_keyword = line_match.groups()[0]
|
||||
|
||||
document_file.seek(start_position)
|
||||
|
||||
if first_keyword in section_end_keywords:
|
||||
return
|
||||
|
||||
while end_position is None or document_file.tell() < end_position:
|
||||
desc_lines, ending_keyword = _read_until_keywords(
|
||||
(entry_keyword,) + section_end_keywords,
|
||||
document_file,
|
||||
ignore_first = True,
|
||||
end_position = end_position,
|
||||
include_ending_keyword = True
|
||||
)
|
||||
|
||||
desc_content = bytes.join(b'', desc_lines)
|
||||
|
||||
if desc_content:
|
||||
yield entry_class(desc_content, validate, *extra_args)
|
||||
|
||||
# check if we stopped at the end of the section
|
||||
if ending_keyword in section_end_keywords:
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
def _parse_r_line(descriptor, entries):
|
||||
# Parses a RouterStatusEntry's 'r' line. They're very nearly identical for
|
||||
# all current entry types (v2, v3, and microdescriptor v3) with one little
|
||||
# wrinkle: only the microdescriptor flavor excludes a 'digest' field.
|
||||
#
|
||||
# For v2 and v3 router status entries:
|
||||
# "r" nickname identity digest publication IP ORPort DirPort
|
||||
# example: r mauer BD7xbfsCFku3+tgybEZsg8Yjhvw itcuKQ6PuPLJ7m/Oi928WjO2j8g 2012-06-22 13:19:32 80.101.105.103 9001 0
|
||||
#
|
||||
# For v3 microdescriptor router status entries:
|
||||
# "r" nickname identity publication IP ORPort DirPort
|
||||
# example: r Konata ARIJF2zbqirB9IwsW0mQznccWww 2012-09-24 13:40:40 69.64.48.168 9001 9030
|
||||
|
||||
value = _value('r', entries)
|
||||
include_digest = not isinstance(descriptor, RouterStatusEntryMicroV3)
|
||||
|
||||
r_comp = value.split(' ')
|
||||
|
||||
# inject a None for the digest to normalize the field positioning
|
||||
if not include_digest:
|
||||
r_comp.insert(2, None)
|
||||
|
||||
if len(r_comp) < 8:
|
||||
expected_field_count = 'eight' if include_digest else 'seven'
|
||||
raise ValueError("%s 'r' line must have %s values: r %s" % (descriptor._name(), expected_field_count, value))
|
||||
|
||||
if not stem.util.tor_tools.is_valid_nickname(r_comp[0]):
|
||||
raise ValueError("%s nickname isn't valid: %s" % (descriptor._name(), r_comp[0]))
|
||||
elif not stem.util.connection.is_valid_ipv4_address(r_comp[5]):
|
||||
raise ValueError("%s address isn't a valid IPv4 address: %s" % (descriptor._name(), r_comp[5]))
|
||||
elif not stem.util.connection.is_valid_port(r_comp[6]):
|
||||
raise ValueError('%s ORPort is invalid: %s' % (descriptor._name(), r_comp[6]))
|
||||
elif not stem.util.connection.is_valid_port(r_comp[7], allow_zero = True):
|
||||
raise ValueError('%s DirPort is invalid: %s' % (descriptor._name(), r_comp[7]))
|
||||
|
||||
descriptor.nickname = r_comp[0]
|
||||
descriptor.fingerprint = _base64_to_hex(r_comp[1])
|
||||
|
||||
if include_digest:
|
||||
descriptor.digest = _base64_to_hex(r_comp[2])
|
||||
|
||||
descriptor.address = r_comp[5]
|
||||
descriptor.or_port = int(r_comp[6])
|
||||
descriptor.dir_port = None if r_comp[7] == '0' else int(r_comp[7])
|
||||
|
||||
try:
|
||||
published = '%s %s' % (r_comp[3], r_comp[4])
|
||||
descriptor.published = stem.util.str_tools._parse_timestamp(published)
|
||||
except ValueError:
|
||||
raise ValueError("Publication time time wasn't parsable: r %s" % value)
|
||||
|
||||
|
||||
def _parse_a_line(descriptor, entries):
|
||||
# "a" SP address ":" portlist
|
||||
# example: a [2001:888:2133:0:82:94:251:204]:9001
|
||||
|
||||
or_addresses = []
|
||||
|
||||
for value in _values('a', entries):
|
||||
if ':' not in value:
|
||||
raise ValueError("%s 'a' line must be of the form '[address]:[ports]': a %s" % (descriptor._name(), value))
|
||||
|
||||
address, port = value.rsplit(':', 1)
|
||||
is_ipv6 = address.startswith('[') and address.endswith(']')
|
||||
|
||||
if is_ipv6:
|
||||
address = address[1:-1] # remove brackets
|
||||
|
||||
if not ((not is_ipv6 and stem.util.connection.is_valid_ipv4_address(address)) or
|
||||
(is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))):
|
||||
raise ValueError("%s 'a' line must start with an IPv6 address: a %s" % (descriptor._name(), value))
|
||||
|
||||
if stem.util.connection.is_valid_port(port):
|
||||
or_addresses.append((address, int(port), is_ipv6))
|
||||
else:
|
||||
raise ValueError("%s 'a' line had an invalid port (%s): a %s" % (descriptor._name(), port, value))
|
||||
|
||||
descriptor.or_addresses = or_addresses
|
||||
|
||||
|
||||
def _parse_s_line(descriptor, entries):
|
||||
# "s" Flags
|
||||
# example: s Named Running Stable Valid
|
||||
|
||||
value = _value('s', entries)
|
||||
flags = [] if value == '' else value.split(' ')
|
||||
descriptor.flags = flags
|
||||
|
||||
for flag in flags:
|
||||
if flags.count(flag) > 1:
|
||||
raise ValueError('%s had duplicate flags: s %s' % (descriptor._name(), value))
|
||||
elif flag == '':
|
||||
raise ValueError("%s had extra whitespace on its 's' line: s %s" % (descriptor._name(), value))
|
||||
|
||||
|
||||
def _parse_v_line(descriptor, entries):
|
||||
# "v" version
|
||||
# example: v Tor 0.2.2.35
|
||||
#
|
||||
# The spec says that if this starts with "Tor " then what follows is a
|
||||
# tor version. If not then it has "upgraded to a more sophisticated
|
||||
# protocol versioning system".
|
||||
|
||||
value = _value('v', entries)
|
||||
descriptor.version_line = value
|
||||
|
||||
if value.startswith('Tor '):
|
||||
try:
|
||||
descriptor.version = stem.version._get_version(value[4:])
|
||||
except ValueError as exc:
|
||||
raise ValueError('%s has a malformed tor version (%s): v %s' % (descriptor._name(), exc, value))
|
||||
|
||||
|
||||
def _parse_w_line(descriptor, entries):
|
||||
# "w" "Bandwidth=" INT ["Measured=" INT] ["Unmeasured=1"]
|
||||
# example: w Bandwidth=7980
|
||||
|
||||
value = _value('w', entries)
|
||||
w_comp = value.split(' ')
|
||||
|
||||
if len(w_comp) < 1:
|
||||
raise ValueError("%s 'w' line is blank: w %s" % (descriptor._name(), value))
|
||||
elif not w_comp[0].startswith('Bandwidth='):
|
||||
raise ValueError("%s 'w' line needs to start with a 'Bandwidth=' entry: w %s" % (descriptor._name(), value))
|
||||
|
||||
for w_entry in w_comp:
|
||||
if '=' in w_entry:
|
||||
w_key, w_value = w_entry.split('=', 1)
|
||||
else:
|
||||
w_key, w_value = w_entry, None
|
||||
|
||||
if w_key == 'Bandwidth':
|
||||
if not (w_value and w_value.isdigit()):
|
||||
raise ValueError("%s 'Bandwidth=' entry needs to have a numeric value: w %s" % (descriptor._name(), value))
|
||||
|
||||
descriptor.bandwidth = int(w_value)
|
||||
elif w_key == 'Measured':
|
||||
if not (w_value and w_value.isdigit()):
|
||||
raise ValueError("%s 'Measured=' entry needs to have a numeric value: w %s" % (descriptor._name(), value))
|
||||
|
||||
descriptor.measured = int(w_value)
|
||||
elif w_key == 'Unmeasured':
|
||||
if w_value != '1':
|
||||
raise ValueError("%s 'Unmeasured=' should only have the value of '1': w %s" % (descriptor._name(), value))
|
||||
|
||||
descriptor.is_unmeasured = True
|
||||
else:
|
||||
descriptor.unrecognized_bandwidth_entries.append(w_entry)
|
||||
|
||||
|
||||
def _parse_p_line(descriptor, entries):
|
||||
# "p" ("accept" / "reject") PortList
|
||||
# p reject 1-65535
|
||||
# example: p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001
|
||||
|
||||
value = _value('p', entries)
|
||||
|
||||
try:
|
||||
descriptor.exit_policy = stem.exit_policy.MicroExitPolicy(value)
|
||||
except ValueError as exc:
|
||||
raise ValueError('%s exit policy is malformed (%s): p %s' % (descriptor._name(), exc, value))
|
||||
|
||||
|
||||
def _parse_m_line(descriptor, entries):
|
||||
# "m" methods 1*(algorithm "=" digest)
|
||||
# example: m 8,9,10,11,12 sha256=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs
|
||||
|
||||
all_hashes = []
|
||||
|
||||
for value in _values('m', entries):
|
||||
m_comp = value.split(' ')
|
||||
|
||||
if not (descriptor.document and descriptor.document.is_vote):
|
||||
vote_status = 'vote' if descriptor.document else '<undefined document>'
|
||||
raise ValueError("%s 'm' line should only appear in votes (appeared in a %s): m %s" % (descriptor._name(), vote_status, value))
|
||||
elif len(m_comp) < 1:
|
||||
raise ValueError("%s 'm' line needs to start with a series of methods: m %s" % (descriptor._name(), value))
|
||||
|
||||
try:
|
||||
methods = [int(entry) for entry in m_comp[0].split(',')]
|
||||
except ValueError:
|
||||
raise ValueError('%s microdescriptor methods should be a series of comma separated integers: m %s' % (descriptor._name(), value))
|
||||
|
||||
hashes = {}
|
||||
|
||||
for entry in m_comp[1:]:
|
||||
if '=' not in entry:
|
||||
raise ValueError("%s can only have a series of 'algorithm=digest' mappings after the methods: m %s" % (descriptor._name(), value))
|
||||
|
||||
hash_name, digest = entry.split('=', 1)
|
||||
hashes[hash_name] = digest
|
||||
|
||||
all_hashes.append((methods, hashes))
|
||||
|
||||
descriptor.microdescriptor_hashes = all_hashes
|
||||
|
||||
|
||||
def _parse_microdescriptor_m_line(descriptor, entries):
|
||||
# "m" digest
|
||||
# example: m aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70
|
||||
|
||||
descriptor.digest = _base64_to_hex(_value('m', entries), check_if_fingerprint = False)
|
||||
|
||||
|
||||
def _base64_to_hex(identity, check_if_fingerprint = True):
|
||||
"""
|
||||
Decodes a base64 value to hex. For example...
|
||||
|
||||
::
|
||||
|
||||
>>> _base64_to_hex('p1aag7VwarGxqctS7/fS0y5FU+s')
|
||||
'A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB'
|
||||
|
||||
:param str identity: encoded fingerprint from the consensus
|
||||
:param bool check_if_fingerprint: asserts that the result is a fingerprint if **True**
|
||||
|
||||
:returns: **str** with the uppercase hex encoding of the relay's fingerprint
|
||||
|
||||
:raises: **ValueError** if the result isn't a valid fingerprint
|
||||
"""
|
||||
|
||||
# trailing equal signs were stripped from the identity
|
||||
missing_padding = len(identity) % 4
|
||||
identity += '=' * missing_padding
|
||||
|
||||
try:
|
||||
identity_decoded = base64.b64decode(stem.util.str_tools._to_bytes(identity))
|
||||
except (TypeError, binascii.Error):
|
||||
raise ValueError("Unable to decode identity string '%s'" % identity)
|
||||
|
||||
fingerprint = binascii.b2a_hex(identity_decoded).upper()
|
||||
|
||||
if stem.prereq.is_python_3():
|
||||
fingerprint = stem.util.str_tools._to_unicode(fingerprint)
|
||||
|
||||
if check_if_fingerprint:
|
||||
if not stem.util.tor_tools.is_valid_fingerprint(fingerprint):
|
||||
raise ValueError("Decoded '%s' to be '%s', which isn't a valid fingerprint" % (identity, fingerprint))
|
||||
|
||||
return fingerprint
|
||||
|
||||
|
||||
class RouterStatusEntry(Descriptor):
|
||||
"""
|
||||
Information about an individual router stored within a network status
|
||||
document. This is the common parent for concrete status entry types.
|
||||
|
||||
:var stem.descriptor.networkstatus.NetworkStatusDocument document: **\*** document that this descriptor came from
|
||||
|
||||
:var str nickname: **\*** router's nickname
|
||||
:var str fingerprint: **\*** router's fingerprint
|
||||
:var datetime published: **\*** router's publication
|
||||
:var str address: **\*** router's IP address
|
||||
:var int or_port: **\*** router's ORPort
|
||||
:var int dir_port: **\*** router's DirPort
|
||||
|
||||
:var list flags: **\*** list of :data:`~stem.Flag` associated with the relay
|
||||
|
||||
:var stem.version.Version version: parsed version of tor, this is **None** if
|
||||
the relay's using a new versioning scheme
|
||||
:var str version_line: versioning information reported by the relay
|
||||
"""
|
||||
|
||||
ATTRIBUTES = {
|
||||
'nickname': (None, _parse_r_line),
|
||||
'fingerprint': (None, _parse_r_line),
|
||||
'published': (None, _parse_r_line),
|
||||
'address': (None, _parse_r_line),
|
||||
'or_port': (None, _parse_r_line),
|
||||
'dir_port': (None, _parse_r_line),
|
||||
|
||||
'flags': (None, _parse_s_line),
|
||||
|
||||
'version_line': (None, _parse_v_line),
|
||||
'version': (None, _parse_v_line),
|
||||
}
|
||||
|
||||
PARSER_FOR_LINE = {
|
||||
'r': _parse_r_line,
|
||||
's': _parse_s_line,
|
||||
'v': _parse_v_line,
|
||||
}
|
||||
|
||||
def __init__(self, content, validate = False, document = None):
|
||||
"""
|
||||
Parse a router descriptor in a network status document.
|
||||
|
||||
:param str content: router descriptor content to be parsed
|
||||
:param NetworkStatusDocument document: document this descriptor came from
|
||||
:param bool validate: checks the validity of the content if **True**, skips
|
||||
these checks otherwise
|
||||
|
||||
:raises: **ValueError** if the descriptor data is invalid
|
||||
"""
|
||||
|
||||
super(RouterStatusEntry, self).__init__(content, lazy_load = not validate)
|
||||
self.document = document
|
||||
entries = _get_descriptor_components(content, validate)
|
||||
|
||||
if validate:
|
||||
for keyword in self._required_fields():
|
||||
if keyword not in entries:
|
||||
raise ValueError("%s must have a '%s' line:\n%s" % (self._name(True), keyword, str(self)))
|
||||
|
||||
for keyword in self._single_fields():
|
||||
if keyword in entries and len(entries[keyword]) > 1:
|
||||
raise ValueError("%s can only have a single '%s' line, got %i:\n%s" % (self._name(True), keyword, len(entries[keyword]), str(self)))
|
||||
|
||||
if 'r' != list(entries.keys())[0]:
|
||||
raise ValueError("%s are expected to start with a 'r' line:\n%s" % (self._name(True), str(self)))
|
||||
|
||||
self._parse(entries, validate)
|
||||
else:
|
||||
self._entries = entries
|
||||
|
||||
def _name(self, is_plural = False):
|
||||
"""
|
||||
Name for this descriptor type.
|
||||
"""
|
||||
|
||||
return 'Router status entries' if is_plural else 'Router status entry'
|
||||
|
||||
def _required_fields(self):
|
||||
"""
|
||||
Provides lines that must appear in the descriptor.
|
||||
"""
|
||||
|
||||
return ()
|
||||
|
||||
def _single_fields(self):
|
||||
"""
|
||||
Provides lines that can only appear in the descriptor once.
|
||||
"""
|
||||
|
||||
return ()
|
||||
|
||||
def _compare(self, other, method):
|
||||
if not isinstance(other, RouterStatusEntry):
|
||||
return False
|
||||
|
||||
return method(str(self).strip(), str(other).strip())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other, lambda s, o: s == o)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other, lambda s, o: s < o)
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other, lambda s, o: s <= o)
|
||||
|
||||
|
||||
class RouterStatusEntryV2(RouterStatusEntry):
|
||||
"""
|
||||
Information about an individual router stored within a version 2 network
|
||||
status document.
|
||||
|
||||
:var str digest: **\*** router's upper-case hex digest
|
||||
|
||||
**\*** attribute is either required when we're parsed with validation or has
|
||||
a default value, others are left as **None** if undefined
|
||||
"""
|
||||
|
||||
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
|
||||
'digest': (None, _parse_r_line),
|
||||
})
|
||||
|
||||
def _name(self, is_plural = False):
|
||||
return 'Router status entries (v2)' if is_plural else 'Router status entry (v2)'
|
||||
|
||||
def _required_fields(self):
|
||||
return ('r')
|
||||
|
||||
def _single_fields(self):
|
||||
return ('r', 's', 'v')
|
||||
|
||||
def _compare(self, other, method):
|
||||
if not isinstance(other, RouterStatusEntryV2):
|
||||
return False
|
||||
|
||||
return method(str(self).strip(), str(other).strip())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other, lambda s, o: s == o)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other, lambda s, o: s < o)
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other, lambda s, o: s <= o)
|
||||
|
||||
|
||||
class RouterStatusEntryV3(RouterStatusEntry):
|
||||
"""
|
||||
Information about an individual router stored within a version 3 network
|
||||
status document.
|
||||
|
||||
:var list or_addresses: **\*** relay's OR addresses, this is a tuple listing
|
||||
of the form (address (**str**), port (**int**), is_ipv6 (**bool**))
|
||||
:var str digest: **\*** router's upper-case hex digest
|
||||
|
||||
:var int bandwidth: bandwidth claimed by the relay (in kb/s)
|
||||
:var int measured: bandwidth measured to be available by the relay, this is a
|
||||
unit-less heuristic generated by the Bandwidth authoritites to weight relay
|
||||
selection
|
||||
:var bool is_unmeasured: bandwidth measurement isn't based on three or more
|
||||
measurements
|
||||
:var list unrecognized_bandwidth_entries: **\*** bandwidth weighting
|
||||
information that isn't yet recognized
|
||||
|
||||
:var stem.exit_policy.MicroExitPolicy exit_policy: router's exit policy
|
||||
|
||||
:var list microdescriptor_hashes: **\*** tuples of two values, the list of
|
||||
consensus methods for generating a set of digests and the 'algorithm =>
|
||||
digest' mappings
|
||||
|
||||
**\*** attribute is either required when we're parsed with validation or has
|
||||
a default value, others are left as **None** if undefined
|
||||
"""
|
||||
|
||||
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
|
||||
'digest': (None, _parse_r_line),
|
||||
'or_addresses': ([], _parse_a_line),
|
||||
|
||||
'bandwidth': (None, _parse_w_line),
|
||||
'measured': (None, _parse_w_line),
|
||||
'is_unmeasured': (False, _parse_w_line),
|
||||
'unrecognized_bandwidth_entries': ([], _parse_w_line),
|
||||
|
||||
'exit_policy': (None, _parse_p_line),
|
||||
'microdescriptor_hashes': ([], _parse_m_line),
|
||||
})
|
||||
|
||||
PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{
|
||||
'a': _parse_a_line,
|
||||
'w': _parse_w_line,
|
||||
'p': _parse_p_line,
|
||||
'm': _parse_m_line,
|
||||
})
|
||||
|
||||
def _name(self, is_plural = False):
|
||||
return 'Router status entries (v3)' if is_plural else 'Router status entry (v3)'
|
||||
|
||||
def _required_fields(self):
|
||||
return ('r', 's')
|
||||
|
||||
def _single_fields(self):
|
||||
return ('r', 's', 'v', 'w', 'p')
|
||||
|
||||
def _compare(self, other, method):
|
||||
if not isinstance(other, RouterStatusEntryV3):
|
||||
return False
|
||||
|
||||
return method(str(self).strip(), str(other).strip())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other, lambda s, o: s == o)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other, lambda s, o: s < o)
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other, lambda s, o: s <= o)
|
||||
|
||||
|
||||
class RouterStatusEntryMicroV3(RouterStatusEntry):
|
||||
"""
|
||||
Information about an individual router stored within a microdescriptor
|
||||
flavored network status document.
|
||||
|
||||
:var int bandwidth: bandwidth claimed by the relay (in kb/s)
|
||||
:var int measured: bandwidth measured to be available by the relay
|
||||
:var bool is_unmeasured: bandwidth measurement isn't based on three or more
|
||||
measurements
|
||||
:var list unrecognized_bandwidth_entries: **\*** bandwidth weighting
|
||||
information that isn't yet recognized
|
||||
|
||||
:var str digest: **\*** router's hex encoded digest of our corresponding microdescriptor
|
||||
|
||||
**\*** attribute is either required when we're parsed with validation or has
|
||||
a default value, others are left as **None** if undefined
|
||||
"""
|
||||
|
||||
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
|
||||
'bandwidth': (None, _parse_w_line),
|
||||
'measured': (None, _parse_w_line),
|
||||
'is_unmeasured': (False, _parse_w_line),
|
||||
'unrecognized_bandwidth_entries': ([], _parse_w_line),
|
||||
|
||||
'digest': (None, _parse_microdescriptor_m_line),
|
||||
})
|
||||
|
||||
PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{
|
||||
'w': _parse_w_line,
|
||||
'm': _parse_microdescriptor_m_line,
|
||||
})
|
||||
|
||||
def _name(self, is_plural = False):
|
||||
return 'Router status entries (micro v3)' if is_plural else 'Router status entry (micro v3)'
|
||||
|
||||
def _required_fields(self):
|
||||
return ('r', 's', 'm')
|
||||
|
||||
def _single_fields(self):
|
||||
return ('r', 's', 'v', 'w', 'm')
|
||||
|
||||
def _compare(self, other, method):
|
||||
if not isinstance(other, RouterStatusEntryMicroV3):
|
||||
return False
|
||||
|
||||
return method(str(self).strip(), str(other).strip())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other, lambda s, o: s == o)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other, lambda s, o: s < o)
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other, lambda s, o: s <= o)
|
|
@ -0,0 +1,822 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Parsing for Tor server descriptors, which contains the infrequently changing
|
||||
information about a Tor relay (contact information, exit policy, public keys,
|
||||
etc). This information is provided from a few sources...
|
||||
|
||||
* The control port via 'GETINFO desc/\*' queries.
|
||||
|
||||
* The 'cached-descriptors' file in Tor's data directory.
|
||||
|
||||
* Archived descriptors provided by CollecTor
|
||||
(https://collector.torproject.org/).
|
||||
|
||||
* Directory authorities and mirrors via their DirPort.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
ServerDescriptor - Tor server descriptor.
|
||||
|- RelayDescriptor - Server descriptor for a relay.
|
||||
|
|
||||
|- BridgeDescriptor - Scrubbed server descriptor for a bridge.
|
||||
| |- is_scrubbed - checks if our content has been properly scrubbed
|
||||
| +- get_scrubbing_issues - description of issues with our scrubbing
|
||||
|
|
||||
|- digest - calculates the upper-case hex digest value for our content
|
||||
|- get_annotations - dictionary of content prior to the descriptor entry
|
||||
+- get_annotation_lines - lines that provided the annotations
|
||||
"""
|
||||
|
||||
import functools
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
import stem.descriptor.extrainfo_descriptor
|
||||
import stem.exit_policy
|
||||
import stem.prereq
|
||||
import stem.util.connection
|
||||
import stem.util.str_tools
|
||||
import stem.util.tor_tools
|
||||
import stem.version
|
||||
|
||||
from stem import str_type
|
||||
|
||||
from stem.descriptor import (
|
||||
PGP_BLOCK_END,
|
||||
Descriptor,
|
||||
_get_descriptor_components,
|
||||
_read_until_keywords,
|
||||
_bytes_for_block,
|
||||
_value,
|
||||
_values,
|
||||
_parse_simple_line,
|
||||
_parse_bytes_line,
|
||||
_parse_timestamp_line,
|
||||
_parse_forty_character_hex,
|
||||
_parse_key_block,
|
||||
)
|
||||
|
||||
try:
|
||||
# added in python 3.2
|
||||
from functools import lru_cache
|
||||
except ImportError:
|
||||
from stem.util.lru_cache import lru_cache
|
||||
|
||||
# relay descriptors must have exactly one of the following
|
||||
REQUIRED_FIELDS = (
|
||||
'router',
|
||||
'bandwidth',
|
||||
'published',
|
||||
'onion-key',
|
||||
'signing-key',
|
||||
'router-signature',
|
||||
)
|
||||
|
||||
# optional entries that can appear at most once
|
||||
SINGLE_FIELDS = (
|
||||
'platform',
|
||||
'fingerprint',
|
||||
'hibernating',
|
||||
'uptime',
|
||||
'contact',
|
||||
'read-history',
|
||||
'write-history',
|
||||
'eventdns',
|
||||
'family',
|
||||
'caches-extra-info',
|
||||
'extra-info-digest',
|
||||
'hidden-service-dir',
|
||||
'protocols',
|
||||
'allow-single-hop-exits',
|
||||
'ntor-onion-key',
|
||||
)
|
||||
|
||||
DEFAULT_IPV6_EXIT_POLICY = stem.exit_policy.MicroExitPolicy('reject 1-65535')
|
||||
REJECT_ALL_POLICY = stem.exit_policy.ExitPolicy('reject *:*')
|
||||
|
||||
|
||||
def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs):
|
||||
"""
|
||||
Iterates over the server descriptors in a file.
|
||||
|
||||
:param file descriptor_file: file with descriptor content
|
||||
:param bool is_bridge: parses the file as being a bridge descriptor
|
||||
:param bool validate: checks the validity of the descriptor's content if
|
||||
**True**, skips these checks otherwise
|
||||
:param dict kwargs: additional arguments for the descriptor constructor
|
||||
|
||||
:returns: iterator for ServerDescriptor instances in the file
|
||||
|
||||
:raises:
|
||||
* **ValueError** if the contents is malformed and validate is True
|
||||
* **IOError** if the file can't be read
|
||||
"""
|
||||
|
||||
# Handler for relay descriptors
|
||||
#
|
||||
# Cached descriptors consist of annotations followed by the descriptor
|
||||
# itself. For instance...
|
||||
#
|
||||
# @downloaded-at 2012-03-14 16:31:05
|
||||
# @source "145.53.65.130"
|
||||
# router caerSidi 71.35.143.157 9001 0 0
|
||||
# platform Tor 0.2.1.30 on Linux x86_64
|
||||
# <rest of the descriptor content>
|
||||
# router-signature
|
||||
# -----BEGIN SIGNATURE-----
|
||||
# <signature for the above descriptor>
|
||||
# -----END SIGNATURE-----
|
||||
#
|
||||
# Metrics descriptor files are the same, but lack any annotations. The
|
||||
# following simply does the following...
|
||||
#
|
||||
# - parse as annotations until we get to 'router'
|
||||
# - parse as descriptor content until we get to 'router-signature' followed
|
||||
# by the end of the signature block
|
||||
# - construct a descriptor and provide it back to the caller
|
||||
#
|
||||
# Any annotations after the last server descriptor is ignored (never provided
|
||||
# to the caller).
|
||||
|
||||
while True:
|
||||
annotations = _read_until_keywords('router', descriptor_file)
|
||||
|
||||
if not is_bridge:
|
||||
descriptor_content = _read_until_keywords('router-signature', descriptor_file)
|
||||
|
||||
# we've reached the 'router-signature', now include the pgp style block
|
||||
|
||||
block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]
|
||||
descriptor_content += _read_until_keywords(block_end_prefix, descriptor_file, True)
|
||||
else:
|
||||
descriptor_content = _read_until_keywords('router-digest', descriptor_file, True)
|
||||
|
||||
if descriptor_content:
|
||||
if descriptor_content[0].startswith(b'@type'):
|
||||
descriptor_content = descriptor_content[1:]
|
||||
|
||||
# strip newlines from annotations
|
||||
annotations = list(map(bytes.strip, annotations))
|
||||
|
||||
descriptor_text = bytes.join(b'', descriptor_content)
|
||||
|
||||
if is_bridge:
|
||||
yield BridgeDescriptor(descriptor_text, validate, annotations, **kwargs)
|
||||
else:
|
||||
yield RelayDescriptor(descriptor_text, validate, annotations, **kwargs)
|
||||
else:
|
||||
if validate and annotations:
|
||||
orphaned_annotations = stem.util.str_tools._to_unicode(b'\n'.join(annotations))
|
||||
raise ValueError('Content conform to being a server descriptor:\n%s' % orphaned_annotations)
|
||||
|
||||
break # done parsing descriptors
|
||||
|
||||
|
||||
def _parse_router_line(descriptor, entries):
|
||||
# "router" nickname address ORPort SocksPort DirPort
|
||||
|
||||
value = _value('router', entries)
|
||||
router_comp = value.split()
|
||||
|
||||
if len(router_comp) < 5:
|
||||
raise ValueError('Router line must have five values: router %s' % value)
|
||||
elif not stem.util.tor_tools.is_valid_nickname(router_comp[0]):
|
||||
raise ValueError("Router line entry isn't a valid nickname: %s" % router_comp[0])
|
||||
elif not stem.util.connection.is_valid_ipv4_address(router_comp[1]):
|
||||
raise ValueError("Router line entry isn't a valid IPv4 address: %s" % router_comp[1])
|
||||
elif not stem.util.connection.is_valid_port(router_comp[2], allow_zero = True):
|
||||
raise ValueError("Router line's ORPort is invalid: %s" % router_comp[2])
|
||||
elif not stem.util.connection.is_valid_port(router_comp[3], allow_zero = True):
|
||||
raise ValueError("Router line's SocksPort is invalid: %s" % router_comp[3])
|
||||
elif not stem.util.connection.is_valid_port(router_comp[4], allow_zero = True):
|
||||
raise ValueError("Router line's DirPort is invalid: %s" % router_comp[4])
|
||||
|
||||
descriptor.nickname = router_comp[0]
|
||||
descriptor.address = router_comp[1]
|
||||
descriptor.or_port = int(router_comp[2])
|
||||
descriptor.socks_port = None if router_comp[3] == '0' else int(router_comp[3])
|
||||
descriptor.dir_port = None if router_comp[4] == '0' else int(router_comp[4])
|
||||
|
||||
|
||||
def _parse_bandwidth_line(descriptor, entries):
|
||||
# "bandwidth" bandwidth-avg bandwidth-burst bandwidth-observed
|
||||
|
||||
value = _value('bandwidth', entries)
|
||||
bandwidth_comp = value.split()
|
||||
|
||||
if len(bandwidth_comp) < 3:
|
||||
raise ValueError('Bandwidth line must have three values: bandwidth %s' % value)
|
||||
elif not bandwidth_comp[0].isdigit():
|
||||
raise ValueError("Bandwidth line's average rate isn't numeric: %s" % bandwidth_comp[0])
|
||||
elif not bandwidth_comp[1].isdigit():
|
||||
raise ValueError("Bandwidth line's burst rate isn't numeric: %s" % bandwidth_comp[1])
|
||||
elif not bandwidth_comp[2].isdigit():
|
||||
raise ValueError("Bandwidth line's observed rate isn't numeric: %s" % bandwidth_comp[2])
|
||||
|
||||
descriptor.average_bandwidth = int(bandwidth_comp[0])
|
||||
descriptor.burst_bandwidth = int(bandwidth_comp[1])
|
||||
descriptor.observed_bandwidth = int(bandwidth_comp[2])
|
||||
|
||||
|
||||
def _parse_platform_line(descriptor, entries):
|
||||
# "platform" string
|
||||
|
||||
_parse_bytes_line('platform', 'platform')(descriptor, entries)
|
||||
|
||||
# The platform attribute was set earlier. This line can contain any
|
||||
# arbitrary data, but tor seems to report its version followed by the
|
||||
# os like the following...
|
||||
#
|
||||
# platform Tor 0.2.2.35 (git-73ff13ab3cc9570d) on Linux x86_64
|
||||
#
|
||||
# There's no guarantee that we'll be able to pick these out the
|
||||
# version, but might as well try to save our caller the effort.
|
||||
|
||||
value = _value('platform', entries)
|
||||
platform_match = re.match('^(?:node-)?Tor (\S*).* on (.*)$', value)
|
||||
|
||||
if platform_match:
|
||||
version_str, descriptor.operating_system = platform_match.groups()
|
||||
|
||||
try:
|
||||
descriptor.tor_version = stem.version._get_version(version_str)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
def _parse_fingerprint_line(descriptor, entries):
|
||||
# This is forty hex digits split into space separated groups of four.
|
||||
# Checking that we match this pattern.
|
||||
|
||||
value = _value('fingerprint', entries)
|
||||
fingerprint = value.replace(' ', '')
|
||||
|
||||
for grouping in value.split(' '):
|
||||
if len(grouping) != 4:
|
||||
raise ValueError('Fingerprint line should have groupings of four hex digits: %s' % value)
|
||||
|
||||
if not stem.util.tor_tools.is_valid_fingerprint(fingerprint):
|
||||
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value)
|
||||
|
||||
descriptor.fingerprint = fingerprint
|
||||
|
||||
|
||||
def _parse_hibernating_line(descriptor, entries):
|
||||
# "hibernating" 0|1 (in practice only set if one)
|
||||
|
||||
value = _value('hibernating', entries)
|
||||
|
||||
if value not in ('0', '1'):
|
||||
raise ValueError('Hibernating line had an invalid value, must be zero or one: %s' % value)
|
||||
|
||||
descriptor.hibernating = value == '1'
|
||||
|
||||
|
||||
def _parse_hidden_service_dir_line(descriptor, entries):
|
||||
value = _value('hidden-service-dir', entries)
|
||||
|
||||
if value:
|
||||
descriptor.hidden_service_dir = value.split(' ')
|
||||
else:
|
||||
descriptor.hidden_service_dir = ['2']
|
||||
|
||||
|
||||
def _parse_uptime_line(descriptor, entries):
|
||||
# We need to be tolerant of negative uptimes to accommodate a past tor
|
||||
# bug...
|
||||
#
|
||||
# Changes in version 0.1.2.7-alpha - 2007-02-06
|
||||
# - If our system clock jumps back in time, don't publish a negative
|
||||
# uptime in the descriptor. Also, don't let the global rate limiting
|
||||
# buckets go absurdly negative.
|
||||
#
|
||||
# After parsing all of the attributes we'll double check that negative
|
||||
# uptimes only occurred prior to this fix.
|
||||
|
||||
value = _value('uptime', entries)
|
||||
|
||||
try:
|
||||
descriptor.uptime = int(value)
|
||||
except ValueError:
|
||||
raise ValueError('Uptime line must have an integer value: %s' % value)
|
||||
|
||||
|
||||
def _parse_protocols_line(descriptor, entries):
|
||||
value = _value('protocols', entries)
|
||||
protocols_match = re.match('^Link (.*) Circuit (.*)$', value)
|
||||
|
||||
if not protocols_match:
|
||||
raise ValueError('Protocols line did not match the expected pattern: protocols %s' % value)
|
||||
|
||||
link_versions, circuit_versions = protocols_match.groups()
|
||||
descriptor.link_protocols = link_versions.split(' ')
|
||||
descriptor.circuit_protocols = circuit_versions.split(' ')
|
||||
|
||||
|
||||
def _parse_or_address_line(descriptor, entries):
|
||||
all_values = _values('or-address', entries)
|
||||
or_addresses = []
|
||||
|
||||
for entry in all_values:
|
||||
line = 'or-address %s' % entry
|
||||
|
||||
if ':' not in entry:
|
||||
raise ValueError('or-address line missing a colon: %s' % line)
|
||||
|
||||
address, port = entry.rsplit(':', 1)
|
||||
is_ipv6 = address.startswith('[') and address.endswith(']')
|
||||
|
||||
if is_ipv6:
|
||||
address = address[1:-1] # remove brackets
|
||||
|
||||
if not ((not is_ipv6 and stem.util.connection.is_valid_ipv4_address(address)) or
|
||||
(is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))):
|
||||
raise ValueError('or-address line has a malformed address: %s' % line)
|
||||
|
||||
if not stem.util.connection.is_valid_port(port):
|
||||
raise ValueError('or-address line has a malformed port: %s' % line)
|
||||
|
||||
or_addresses.append((address, int(port), is_ipv6))
|
||||
|
||||
descriptor.or_addresses = or_addresses
|
||||
|
||||
|
||||
def _parse_history_line(keyword, history_end_attribute, history_interval_attribute, history_values_attribute, descriptor, entries):
|
||||
value = _value(keyword, entries)
|
||||
timestamp, interval, remainder = stem.descriptor.extrainfo_descriptor._parse_timestamp_and_interval(keyword, value)
|
||||
|
||||
try:
|
||||
if remainder:
|
||||
history_values = [int(entry) for entry in remainder.split(',')]
|
||||
else:
|
||||
history_values = []
|
||||
except ValueError:
|
||||
raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value))
|
||||
|
||||
setattr(descriptor, history_end_attribute, timestamp)
|
||||
setattr(descriptor, history_interval_attribute, interval)
|
||||
setattr(descriptor, history_values_attribute, history_values)
|
||||
|
||||
|
||||
def _parse_exit_policy(descriptor, entries):
|
||||
if hasattr(descriptor, '_unparsed_exit_policy'):
|
||||
if descriptor._unparsed_exit_policy == [str_type('reject *:*')]:
|
||||
descriptor.exit_policy = REJECT_ALL_POLICY
|
||||
else:
|
||||
descriptor.exit_policy = stem.exit_policy.ExitPolicy(*descriptor._unparsed_exit_policy)
|
||||
|
||||
del descriptor._unparsed_exit_policy
|
||||
|
||||
|
||||
_parse_contact_line = _parse_bytes_line('contact', 'contact')
|
||||
_parse_published_line = _parse_timestamp_line('published', 'published')
|
||||
_parse_extrainfo_digest_line = _parse_forty_character_hex('extra-info-digest', 'extra_info_digest')
|
||||
_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values')
|
||||
_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values')
|
||||
_parse_ipv6_policy_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('ipv6-policy', entries)))
|
||||
_parse_allow_single_hop_exits_line = lambda descriptor, entries: setattr(descriptor, 'allow_single_hop_exits', 'allow_single_hop_exits' in entries)
|
||||
_parse_caches_extra_info_line = lambda descriptor, entries: setattr(descriptor, 'extra_info_cache', 'extra_info_cache' in entries)
|
||||
_parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', set(_value('family', entries).split(' ')))
|
||||
_parse_eventdns_line = lambda descriptor, entries: setattr(descriptor, 'eventdns', _value('eventdns', entries) == '1')
|
||||
_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY')
|
||||
_parse_signing_key_line = _parse_key_block('signing-key', 'signing_key', 'RSA PUBLIC KEY')
|
||||
_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE')
|
||||
_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key')
|
||||
_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest')
|
||||
|
||||
|
||||
class ServerDescriptor(Descriptor):
|
||||
"""
|
||||
Common parent for server descriptors.
|
||||
|
||||
:var str nickname: **\*** relay's nickname
|
||||
:var str fingerprint: identity key fingerprint
|
||||
:var datetime published: **\*** time in UTC when this descriptor was made
|
||||
|
||||
:var str address: **\*** IPv4 address of the relay
|
||||
:var int or_port: **\*** port used for relaying
|
||||
:var int socks_port: **\*** port used as client (deprecated, always **None**)
|
||||
:var int dir_port: **\*** port used for descriptor mirroring
|
||||
|
||||
:var bytes platform: line with operating system and tor version
|
||||
:var stem.version.Version tor_version: version of tor
|
||||
:var str operating_system: operating system
|
||||
:var int uptime: uptime when published in seconds
|
||||
:var bytes contact: contact information
|
||||
:var stem.exit_policy.ExitPolicy exit_policy: **\*** stated exit policy
|
||||
:var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6
|
||||
:var set family: **\*** nicknames or fingerprints of declared family
|
||||
|
||||
:var int average_bandwidth: **\*** average rate it's willing to relay in bytes/s
|
||||
:var int burst_bandwidth: **\*** burst rate it's willing to relay in bytes/s
|
||||
:var int observed_bandwidth: **\*** estimated capacity based on usage in bytes/s
|
||||
|
||||
:var list link_protocols: link protocols supported by the relay
|
||||
:var list circuit_protocols: circuit protocols supported by the relay
|
||||
:var bool hibernating: **\*** hibernating when published
|
||||
:var bool allow_single_hop_exits: **\*** flag if single hop exiting is allowed
|
||||
:var bool extra_info_cache: **\*** flag if a mirror for extra-info documents
|
||||
:var str extra_info_digest: upper-case hex encoded digest of our extra-info document
|
||||
:var bool eventdns: flag for evdns backend (deprecated, always unset)
|
||||
:var list or_addresses: **\*** alternative for our address/or_port
|
||||
attributes, each entry is a tuple of the form (address (**str**), port
|
||||
(**int**), is_ipv6 (**bool**))
|
||||
|
||||
Deprecated, moved to extra-info descriptor...
|
||||
|
||||
:var datetime read_history_end: end of the sampling interval
|
||||
:var int read_history_interval: seconds per interval
|
||||
:var list read_history_values: bytes read during each interval
|
||||
|
||||
:var datetime write_history_end: end of the sampling interval
|
||||
:var int write_history_interval: seconds per interval
|
||||
:var list write_history_values: bytes written during each interval
|
||||
|
||||
**\*** attribute is either required when we're parsed with validation or has
|
||||
a default value, others are left as **None** if undefined
|
||||
"""
|
||||
|
||||
ATTRIBUTES = {
|
||||
'nickname': (None, _parse_router_line),
|
||||
'fingerprint': (None, _parse_fingerprint_line),
|
||||
'contact': (None, _parse_contact_line),
|
||||
'published': (None, _parse_published_line),
|
||||
'exit_policy': (None, _parse_exit_policy),
|
||||
|
||||
'address': (None, _parse_router_line),
|
||||
'or_port': (None, _parse_router_line),
|
||||
'socks_port': (None, _parse_router_line),
|
||||
'dir_port': (None, _parse_router_line),
|
||||
|
||||
'platform': (None, _parse_platform_line),
|
||||
'tor_version': (None, _parse_platform_line),
|
||||
'operating_system': (None, _parse_platform_line),
|
||||
'uptime': (None, _parse_uptime_line),
|
||||
'exit_policy_v6': (DEFAULT_IPV6_EXIT_POLICY, _parse_ipv6_policy_line),
|
||||
'family': (set(), _parse_family_line),
|
||||
|
||||
'average_bandwidth': (None, _parse_bandwidth_line),
|
||||
'burst_bandwidth': (None, _parse_bandwidth_line),
|
||||
'observed_bandwidth': (None, _parse_bandwidth_line),
|
||||
|
||||
'link_protocols': (None, _parse_protocols_line),
|
||||
'circuit_protocols': (None, _parse_protocols_line),
|
||||
'hibernating': (False, _parse_hibernating_line),
|
||||
'allow_single_hop_exits': (False, _parse_allow_single_hop_exits_line),
|
||||
'extra_info_cache': (False, _parse_caches_extra_info_line),
|
||||
'extra_info_digest': (None, _parse_extrainfo_digest_line),
|
||||
'hidden_service_dir': (None, _parse_hidden_service_dir_line),
|
||||
'eventdns': (None, _parse_eventdns_line),
|
||||
'or_addresses': ([], _parse_or_address_line),
|
||||
|
||||
'read_history_end': (None, _parse_read_history_line),
|
||||
'read_history_interval': (None, _parse_read_history_line),
|
||||
'read_history_values': (None, _parse_read_history_line),
|
||||
|
||||
'write_history_end': (None, _parse_write_history_line),
|
||||
'write_history_interval': (None, _parse_write_history_line),
|
||||
'write_history_values': (None, _parse_write_history_line),
|
||||
}
|
||||
|
||||
PARSER_FOR_LINE = {
|
||||
'router': _parse_router_line,
|
||||
'bandwidth': _parse_bandwidth_line,
|
||||
'platform': _parse_platform_line,
|
||||
'published': _parse_published_line,
|
||||
'fingerprint': _parse_fingerprint_line,
|
||||
'contact': _parse_contact_line,
|
||||
'hibernating': _parse_hibernating_line,
|
||||
'extra-info-digest': _parse_extrainfo_digest_line,
|
||||
'hidden-service-dir': _parse_hidden_service_dir_line,
|
||||
'uptime': _parse_uptime_line,
|
||||
'protocols': _parse_protocols_line,
|
||||
'or-address': _parse_or_address_line,
|
||||
'read-history': _parse_read_history_line,
|
||||
'write-history': _parse_write_history_line,
|
||||
'ipv6-policy': _parse_ipv6_policy_line,
|
||||
'allow-single-hop-exits': _parse_allow_single_hop_exits_line,
|
||||
'caches-extra-info': _parse_caches_extra_info_line,
|
||||
'family': _parse_family_line,
|
||||
'eventdns': _parse_eventdns_line,
|
||||
}
|
||||
|
||||
def __init__(self, raw_contents, validate = False, annotations = None):
|
||||
"""
|
||||
Server descriptor constructor, created from an individual relay's
|
||||
descriptor content (as provided by 'GETINFO desc/*', cached descriptors,
|
||||
and metrics).
|
||||
|
||||
By default this validates the descriptor's content as it's parsed. This
|
||||
validation can be disables to either improve performance or be accepting of
|
||||
malformed data.
|
||||
|
||||
:param str raw_contents: descriptor content provided by the relay
|
||||
:param bool validate: checks the validity of the descriptor's content if
|
||||
**True**, skips these checks otherwise
|
||||
:param list annotations: lines that appeared prior to the descriptor
|
||||
|
||||
:raises: **ValueError** if the contents is malformed and validate is True
|
||||
"""
|
||||
|
||||
super(ServerDescriptor, self).__init__(raw_contents, lazy_load = not validate)
|
||||
self._annotation_lines = annotations if annotations else []
|
||||
|
||||
# A descriptor contains a series of 'keyword lines' which are simply a
|
||||
# keyword followed by an optional value. Lines can also be followed by a
|
||||
# signature block.
|
||||
#
|
||||
# We care about the ordering of 'accept' and 'reject' entries because this
|
||||
# influences the resulting exit policy, but for everything else the order
|
||||
# does not matter so breaking it into key / value pairs.
|
||||
|
||||
entries, self._unparsed_exit_policy = _get_descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, ('accept', 'reject'))
|
||||
|
||||
if validate:
|
||||
self._parse(entries, validate)
|
||||
|
||||
_parse_exit_policy(self, entries)
|
||||
|
||||
# if we have a negative uptime and a tor version that shouldn't exhibit
|
||||
# this bug then fail validation
|
||||
|
||||
if validate and self.uptime and self.tor_version:
|
||||
if self.uptime < 0 and self.tor_version >= stem.version.Version('0.1.2.7'):
|
||||
raise ValueError("Descriptor for version '%s' had a negative uptime value: %i" % (self.tor_version, self.uptime))
|
||||
|
||||
self._check_constraints(entries)
|
||||
else:
|
||||
self._entries = entries
|
||||
|
||||
def digest(self):
|
||||
"""
|
||||
Provides the hex encoded sha1 of our content. This value is part of the
|
||||
network status entry for this relay.
|
||||
|
||||
:returns: **unicode** with the upper-case hex digest value for this server descriptor
|
||||
"""
|
||||
|
||||
raise NotImplementedError('Unsupported Operation: this should be implemented by the ServerDescriptor subclass')
|
||||
|
||||
@lru_cache()
|
||||
def get_annotations(self):
|
||||
"""
|
||||
Provides content that appeared prior to the descriptor. If this comes from
|
||||
the cached-descriptors file then this commonly contains content like...
|
||||
|
||||
::
|
||||
|
||||
@downloaded-at 2012-03-18 21:18:29
|
||||
@source "173.254.216.66"
|
||||
|
||||
:returns: **dict** with the key/value pairs in our annotations
|
||||
"""
|
||||
|
||||
annotation_dict = {}
|
||||
|
||||
for line in self._annotation_lines:
|
||||
if b' ' in line:
|
||||
key, value = line.split(b' ', 1)
|
||||
annotation_dict[key] = value
|
||||
else:
|
||||
annotation_dict[line] = None
|
||||
|
||||
return annotation_dict
|
||||
|
||||
def get_annotation_lines(self):
|
||||
"""
|
||||
Provides the lines of content that appeared prior to the descriptor. This
|
||||
is the same as the
|
||||
:func:`~stem.descriptor.server_descriptor.ServerDescriptor.get_annotations`
|
||||
results, but with the unparsed lines and ordering retained.
|
||||
|
||||
:returns: **list** with the lines of annotation that came before this descriptor
|
||||
"""
|
||||
|
||||
return self._annotation_lines
|
||||
|
||||
def _check_constraints(self, entries):
|
||||
"""
|
||||
Does a basic check that the entries conform to this descriptor type's
|
||||
constraints.
|
||||
|
||||
:param dict entries: keyword => (value, pgp key) entries
|
||||
|
||||
:raises: **ValueError** if an issue arises in validation
|
||||
"""
|
||||
|
||||
for keyword in self._required_fields():
|
||||
if keyword not in entries:
|
||||
raise ValueError("Descriptor must have a '%s' entry" % keyword)
|
||||
|
||||
for keyword in self._single_fields():
|
||||
if keyword in entries and len(entries[keyword]) > 1:
|
||||
raise ValueError("The '%s' entry can only appear once in a descriptor" % keyword)
|
||||
|
||||
expected_first_keyword = self._first_keyword()
|
||||
if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]:
|
||||
raise ValueError("Descriptor must start with a '%s' entry" % expected_first_keyword)
|
||||
|
||||
expected_last_keyword = self._last_keyword()
|
||||
if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]:
|
||||
raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
|
||||
|
||||
if not self.exit_policy:
|
||||
raise ValueError("Descriptor must have at least one 'accept' or 'reject' entry")
|
||||
|
||||
# Constraints that the descriptor must meet to be valid. These can be None if
|
||||
# not applicable.
|
||||
|
||||
def _required_fields(self):
|
||||
return REQUIRED_FIELDS
|
||||
|
||||
def _single_fields(self):
|
||||
return REQUIRED_FIELDS + SINGLE_FIELDS
|
||||
|
||||
def _first_keyword(self):
|
||||
return 'router'
|
||||
|
||||
def _last_keyword(self):
|
||||
return 'router-signature'
|
||||
|
||||
|
||||
class RelayDescriptor(ServerDescriptor):
|
||||
"""
|
||||
Server descriptor (`descriptor specification
|
||||
<https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_)
|
||||
|
||||
:var str onion_key: **\*** key used to encrypt EXTEND cells
|
||||
:var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
|
||||
:var str signing_key: **\*** relay's long-term identity key
|
||||
:var str signature: **\*** signature for this descriptor
|
||||
|
||||
**\*** attribute is required when we're parsed with validation
|
||||
"""
|
||||
|
||||
ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{
|
||||
'onion_key': (None, _parse_onion_key_line),
|
||||
'ntor_onion_key': (None, _parse_ntor_onion_key_line),
|
||||
'signing_key': (None, _parse_signing_key_line),
|
||||
'signature': (None, _parse_router_signature_line),
|
||||
})
|
||||
|
||||
PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{
|
||||
'onion-key': _parse_onion_key_line,
|
||||
'ntor-onion-key': _parse_ntor_onion_key_line,
|
||||
'signing-key': _parse_signing_key_line,
|
||||
'router-signature': _parse_router_signature_line,
|
||||
})
|
||||
|
||||
def __init__(self, raw_contents, validate = False, annotations = None):
|
||||
super(RelayDescriptor, self).__init__(raw_contents, validate, annotations)
|
||||
|
||||
if validate:
|
||||
if self.fingerprint:
|
||||
key_hash = hashlib.sha1(_bytes_for_block(self.signing_key)).hexdigest()
|
||||
|
||||
if key_hash != self.fingerprint.lower():
|
||||
raise ValueError('Fingerprint does not match the hash of our signing key (fingerprint: %s, signing key hash: %s)' % (self.fingerprint.lower(), key_hash))
|
||||
|
||||
if stem.prereq.is_crypto_available():
|
||||
signed_digest = self._digest_for_signature(self.signing_key, self.signature)
|
||||
|
||||
if signed_digest != self.digest():
|
||||
raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, self.digest()))
|
||||
|
||||
@lru_cache()
|
||||
def digest(self):
|
||||
"""
|
||||
Provides the digest of our descriptor's content.
|
||||
|
||||
:returns: the digest string encoded in uppercase hex
|
||||
|
||||
:raises: ValueError if the digest canot be calculated
|
||||
"""
|
||||
|
||||
return self._digest_for_content(b'router ', b'\nrouter-signature\n')
|
||||
|
||||
def _compare(self, other, method):
|
||||
if not isinstance(other, RelayDescriptor):
|
||||
return False
|
||||
|
||||
return method(str(self).strip(), str(other).strip())
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self).strip())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other, lambda s, o: s == o)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other, lambda s, o: s < o)
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other, lambda s, o: s <= o)
|
||||
|
||||
|
||||
class BridgeDescriptor(ServerDescriptor):
|
||||
"""
|
||||
Bridge descriptor (`bridge descriptor specification
|
||||
<https://collector.torproject.org/formats.html#bridge-descriptors>`_)
|
||||
"""
|
||||
|
||||
ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{
|
||||
'_digest': (None, _parse_router_digest_line),
|
||||
})
|
||||
|
||||
PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{
|
||||
'router-digest': _parse_router_digest_line,
|
||||
})
|
||||
|
||||
def digest(self):
|
||||
return self._digest
|
||||
|
||||
def is_scrubbed(self):
|
||||
"""
|
||||
Checks if we've been properly scrubbed in accordance with the `bridge
|
||||
descriptor specification
|
||||
<https://collector.torproject.org/formats.html#bridge-descriptors>`_.
|
||||
Validation is a moving target so this may not be fully up to date.
|
||||
|
||||
:returns: **True** if we're scrubbed, **False** otherwise
|
||||
"""
|
||||
|
||||
return self.get_scrubbing_issues() == []
|
||||
|
||||
@lru_cache()
|
||||
def get_scrubbing_issues(self):
|
||||
"""
|
||||
Provides issues with our scrubbing.
|
||||
|
||||
:returns: **list** of strings which describe issues we have with our
|
||||
scrubbing, this list is empty if we're properly scrubbed
|
||||
"""
|
||||
|
||||
issues = []
|
||||
|
||||
if not self.address.startswith('10.'):
|
||||
issues.append("Router line's address should be scrubbed to be '10.x.x.x': %s" % self.address)
|
||||
|
||||
if self.contact and self.contact != 'somebody':
|
||||
issues.append("Contact line should be scrubbed to be 'somebody', but instead had '%s'" % self.contact)
|
||||
|
||||
for address, _, is_ipv6 in self.or_addresses:
|
||||
if not is_ipv6 and not address.startswith('10.'):
|
||||
issues.append("or-address line's address should be scrubbed to be '10.x.x.x': %s" % address)
|
||||
elif is_ipv6 and not address.startswith('fd9f:2e19:3bcf::'):
|
||||
# TODO: this check isn't quite right because we aren't checking that
|
||||
# the next grouping of hex digits contains 1-2 digits
|
||||
issues.append("or-address line's address should be scrubbed to be 'fd9f:2e19:3bcf::xx:xxxx': %s" % address)
|
||||
|
||||
for line in self.get_unrecognized_lines():
|
||||
if line.startswith('onion-key '):
|
||||
issues.append('Bridge descriptors should have their onion-key scrubbed: %s' % line)
|
||||
elif line.startswith('signing-key '):
|
||||
issues.append('Bridge descriptors should have their signing-key scrubbed: %s' % line)
|
||||
elif line.startswith('router-signature '):
|
||||
issues.append('Bridge descriptors should have their signature scrubbed: %s' % line)
|
||||
|
||||
return issues
|
||||
|
||||
def _required_fields(self):
|
||||
# bridge required fields are the same as a relay descriptor, minus items
|
||||
# excluded according to the format page
|
||||
|
||||
excluded_fields = [
|
||||
'onion-key',
|
||||
'signing-key',
|
||||
'router-signature',
|
||||
]
|
||||
|
||||
included_fields = [
|
||||
'router-digest',
|
||||
]
|
||||
|
||||
return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields])
|
||||
|
||||
def _single_fields(self):
|
||||
return self._required_fields() + SINGLE_FIELDS
|
||||
|
||||
def _last_keyword(self):
|
||||
return None
|
||||
|
||||
def _compare(self, other, method):
|
||||
if not isinstance(other, BridgeDescriptor):
|
||||
return False
|
||||
|
||||
return method(str(self).strip(), str(other).strip())
|
||||
|
||||
def __hash__(self):
|
||||
return hash(str(self).strip())
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other, lambda s, o: s == o)
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._compare(other, lambda s, o: s < o)
|
||||
|
||||
def __le__(self, other):
|
||||
return self._compare(other, lambda s, o: s <= o)
|
117
Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py
Normal file
117
Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py
Normal file
|
@ -0,0 +1,117 @@
|
|||
# Copyright 2013-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Parsing for `TorDNSEL <https://www.torproject.org/projects/tordnsel.html.en>`_
|
||||
exit list files.
|
||||
|
||||
::
|
||||
|
||||
TorDNSEL - Exit list provided by TorDNSEL
|
||||
"""
|
||||
|
||||
import stem.util.connection
|
||||
import stem.util.str_tools
|
||||
import stem.util.tor_tools
|
||||
|
||||
from stem.descriptor import (
|
||||
Descriptor,
|
||||
_read_until_keywords,
|
||||
_get_descriptor_components,
|
||||
)
|
||||
|
||||
|
||||
def _parse_file(tordnsel_file, validate = False, **kwargs):
|
||||
"""
|
||||
Iterates over a tordnsel file.
|
||||
|
||||
:returns: iterator for :class:`~stem.descriptor.tordnsel.TorDNSEL`
|
||||
instances in the file
|
||||
|
||||
:raises:
|
||||
* **ValueError** if the contents is malformed and validate is **True**
|
||||
* **IOError** if the file can't be read
|
||||
"""
|
||||
|
||||
# skip content prior to the first ExitNode
|
||||
_read_until_keywords('ExitNode', tordnsel_file, skip = True)
|
||||
|
||||
while True:
|
||||
contents = _read_until_keywords('ExitAddress', tordnsel_file)
|
||||
contents += _read_until_keywords('ExitNode', tordnsel_file)
|
||||
|
||||
if contents:
|
||||
yield TorDNSEL(bytes.join(b'', contents), validate, **kwargs)
|
||||
else:
|
||||
break # done parsing file
|
||||
|
||||
|
||||
class TorDNSEL(Descriptor):
|
||||
"""
|
||||
TorDNSEL descriptor (`exitlist specification
|
||||
<https://www.torproject.org/tordnsel/exitlist-spec.txt>`_)
|
||||
|
||||
:var str fingerprint: **\*** authority's fingerprint
|
||||
:var datetime published: **\*** time in UTC when this descriptor was made
|
||||
:var datetime last_status: **\*** time in UTC when the relay was seen in a v2 network status
|
||||
:var list exit_addresses: **\*** list of (str address, datetime date) tuples consisting of the found IPv4 exit address and the time
|
||||
|
||||
**\*** attribute is either required when we're parsed with validation or has
|
||||
a default value, others are left as **None** if undefined
|
||||
"""
|
||||
|
||||
def __init__(self, raw_contents, validate):
|
||||
super(TorDNSEL, self).__init__(raw_contents)
|
||||
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
|
||||
entries = _get_descriptor_components(raw_contents, validate)
|
||||
|
||||
self.fingerprint = None
|
||||
self.published = None
|
||||
self.last_status = None
|
||||
self.exit_addresses = []
|
||||
|
||||
self._parse(entries, validate)
|
||||
|
||||
def _parse(self, entries, validate):
|
||||
|
||||
for keyword, values in list(entries.items()):
|
||||
value, block_type, block_content = values[0]
|
||||
|
||||
if validate and block_content:
|
||||
raise ValueError('Unexpected block content: %s' % block_content)
|
||||
|
||||
if keyword == 'ExitNode':
|
||||
if validate and not stem.util.tor_tools.is_valid_fingerprint(value):
|
||||
raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value)
|
||||
|
||||
self.fingerprint = value
|
||||
elif keyword == 'Published':
|
||||
try:
|
||||
self.published = stem.util.str_tools._parse_timestamp(value)
|
||||
except ValueError:
|
||||
if validate:
|
||||
raise ValueError("Published time wasn't parsable: %s" % value)
|
||||
elif keyword == 'LastStatus':
|
||||
try:
|
||||
self.last_status = stem.util.str_tools._parse_timestamp(value)
|
||||
except ValueError:
|
||||
if validate:
|
||||
raise ValueError("LastStatus time wasn't parsable: %s" % value)
|
||||
elif keyword == 'ExitAddress':
|
||||
for value, block_type, block_content in values:
|
||||
address, date = value.split(' ', 1)
|
||||
|
||||
if validate:
|
||||
if not stem.util.connection.is_valid_ipv4_address(address):
|
||||
raise ValueError("ExitAddress isn't a valid IPv4 address: %s" % address)
|
||||
elif block_content:
|
||||
raise ValueError('Unexpected block content: %s' % block_content)
|
||||
|
||||
try:
|
||||
date = stem.util.str_tools._parse_timestamp(date)
|
||||
self.exit_addresses.append((address, date))
|
||||
except ValueError:
|
||||
if validate:
|
||||
raise ValueError("ExitAddress found time wasn't parsable: %s" % value)
|
||||
elif validate:
|
||||
raise ValueError('Unrecognized keyword: %s' % keyword)
|
1094
Shared/lib/python3.4/site-packages/stem/exit_policy.py
Normal file
1094
Shared/lib/python3.4/site-packages/stem/exit_policy.py
Normal file
File diff suppressed because it is too large
Load diff
141
Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py
Normal file
141
Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py
Normal file
|
@ -0,0 +1,141 @@
|
|||
# Copyright 2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Interactive interpreter for interacting with Tor directly. This adds usability
|
||||
features such as tab completion, history, and IRC-style functions (like /help).
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'arguments',
|
||||
'autocomplete',
|
||||
'commands',
|
||||
'help',
|
||||
]
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import stem
|
||||
import stem.connection
|
||||
import stem.prereq
|
||||
import stem.process
|
||||
import stem.util.conf
|
||||
import stem.util.system
|
||||
import stem.util.term
|
||||
|
||||
from stem.util.term import Attr, Color, format
|
||||
|
||||
PROMPT = format('>>> ', Color.GREEN, Attr.BOLD, Attr.READLINE_ESCAPE)
|
||||
|
||||
STANDARD_OUTPUT = (Color.BLUE, )
|
||||
BOLD_OUTPUT = (Color.BLUE, Attr.BOLD)
|
||||
HEADER_OUTPUT = (Color.GREEN, )
|
||||
HEADER_BOLD_OUTPUT = (Color.GREEN, Attr.BOLD)
|
||||
ERROR_OUTPUT = (Attr.BOLD, Color.RED)
|
||||
|
||||
settings_path = os.path.join(os.path.dirname(__file__), 'settings.cfg')
|
||||
uses_settings = stem.util.conf.uses_settings('stem_interpreter', settings_path)
|
||||
|
||||
|
||||
@uses_settings
|
||||
def msg(message, config, **attr):
|
||||
return config.get(message).format(**attr)
|
||||
|
||||
|
||||
def main():
|
||||
import readline
|
||||
|
||||
import stem.interpreter.arguments
|
||||
import stem.interpreter.autocomplete
|
||||
import stem.interpreter.commands
|
||||
|
||||
try:
|
||||
args = stem.interpreter.arguments.parse(sys.argv[1:])
|
||||
except ValueError as exc:
|
||||
print(exc)
|
||||
sys.exit(1)
|
||||
|
||||
if args.print_help:
|
||||
print(stem.interpreter.arguments.get_help())
|
||||
sys.exit()
|
||||
|
||||
if args.disable_color:
|
||||
global PROMPT
|
||||
stem.util.term.DISABLE_COLOR_SUPPORT = True
|
||||
PROMPT = '>>> '
|
||||
|
||||
# If the user isn't connecting to something in particular then offer to start
|
||||
# tor if it isn't running.
|
||||
|
||||
if not (args.user_provided_port or args.user_provided_socket):
|
||||
is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real')
|
||||
|
||||
if not is_tor_running:
|
||||
if not stem.util.system.is_available('tor'):
|
||||
print(format(msg('msg.tor_unavailable'), *ERROR_OUTPUT))
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(format(msg('msg.starting_tor'), *HEADER_OUTPUT))
|
||||
|
||||
stem.process.launch_tor_with_config(
|
||||
config = {
|
||||
'SocksPort': '0',
|
||||
'ControlPort': str(args.control_port),
|
||||
'CookieAuthentication': '1',
|
||||
'ExitPolicy': 'reject *:*',
|
||||
},
|
||||
completion_percent = 5,
|
||||
take_ownership = True,
|
||||
)
|
||||
|
||||
control_port = (args.control_address, args.control_port)
|
||||
control_socket = args.control_socket
|
||||
|
||||
# If the user explicitely specified an endpoint then just try to connect to
|
||||
# that.
|
||||
|
||||
if args.user_provided_socket and not args.user_provided_port:
|
||||
control_port = None
|
||||
elif args.user_provided_port and not args.user_provided_socket:
|
||||
control_socket = None
|
||||
|
||||
controller = stem.connection.connect(
|
||||
control_port = control_port,
|
||||
control_socket = control_socket,
|
||||
password_prompt = True,
|
||||
)
|
||||
|
||||
if controller is None:
|
||||
sys.exit(1)
|
||||
|
||||
with controller:
|
||||
autocompleter = stem.interpreter.autocomplete.Autocompleter(controller)
|
||||
readline.parse_and_bind('tab: complete')
|
||||
readline.set_completer(autocompleter.complete)
|
||||
readline.set_completer_delims('\n')
|
||||
|
||||
interpreter = stem.interpreter.commands.ControlInterpretor(controller)
|
||||
|
||||
for line in msg('msg.startup_banner').splitlines():
|
||||
line_format = HEADER_BOLD_OUTPUT if line.startswith(' ') else HEADER_OUTPUT
|
||||
print(format(line, *line_format))
|
||||
|
||||
print('')
|
||||
|
||||
while True:
|
||||
try:
|
||||
prompt = '... ' if interpreter.is_multiline_context else PROMPT
|
||||
|
||||
if stem.prereq.is_python_3():
|
||||
user_input = input(prompt)
|
||||
else:
|
||||
user_input = raw_input(prompt)
|
||||
|
||||
response = interpreter.run_command(user_input)
|
||||
|
||||
if response is not None:
|
||||
print(response)
|
||||
except (KeyboardInterrupt, EOFError, stem.SocketClosed) as exc:
|
||||
print('') # move cursor to the following line
|
||||
break
|
|
@ -0,0 +1,94 @@
|
|||
# Copyright 2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Commandline argument parsing for our interpreter prompt.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import getopt
|
||||
|
||||
import stem.interpreter
|
||||
import stem.util.connection
|
||||
|
||||
DEFAULT_ARGS = {
|
||||
'control_address': '127.0.0.1',
|
||||
'control_port': 9051,
|
||||
'user_provided_port': False,
|
||||
'control_socket': '/var/run/tor/control',
|
||||
'user_provided_socket': False,
|
||||
'disable_color': False,
|
||||
'print_help': False,
|
||||
}
|
||||
|
||||
OPT = 'i:s:h'
|
||||
OPT_EXPANDED = ['interface=', 'socket=', 'no-color', 'help']
|
||||
|
||||
|
||||
def parse(argv):
|
||||
"""
|
||||
Parses our arguments, providing a named tuple with their values.
|
||||
|
||||
:param list argv: input arguments to be parsed
|
||||
|
||||
:returns: a **named tuple** with our parsed arguments
|
||||
|
||||
:raises: **ValueError** if we got an invalid argument
|
||||
"""
|
||||
|
||||
args = dict(DEFAULT_ARGS)
|
||||
|
||||
try:
|
||||
recognized_args, unrecognized_args = getopt.getopt(argv, OPT, OPT_EXPANDED)
|
||||
|
||||
if unrecognized_args:
|
||||
error_msg = "aren't recognized arguments" if len(unrecognized_args) > 1 else "isn't a recognized argument"
|
||||
raise getopt.GetoptError("'%s' %s" % ("', '".join(unrecognized_args), error_msg))
|
||||
except Exception as exc:
|
||||
raise ValueError('%s (for usage provide --help)' % exc)
|
||||
|
||||
for opt, arg in recognized_args:
|
||||
if opt in ('-i', '--interface'):
|
||||
if ':' in arg:
|
||||
address, port = arg.split(':', 1)
|
||||
else:
|
||||
address, port = None, arg
|
||||
|
||||
if address is not None:
|
||||
if not stem.util.connection.is_valid_ipv4_address(address):
|
||||
raise ValueError("'%s' isn't a valid IPv4 address" % address)
|
||||
|
||||
args['control_address'] = address
|
||||
|
||||
if not stem.util.connection.is_valid_port(port):
|
||||
raise ValueError("'%s' isn't a valid port number" % port)
|
||||
|
||||
args['control_port'] = int(port)
|
||||
args['user_provided_port'] = True
|
||||
elif opt in ('-s', '--socket'):
|
||||
args['control_socket'] = arg
|
||||
args['user_provided_socket'] = True
|
||||
elif opt == '--no-color':
|
||||
args['disable_color'] = True
|
||||
elif opt in ('-h', '--help'):
|
||||
args['print_help'] = True
|
||||
|
||||
# translates our args dict into a named tuple
|
||||
|
||||
Args = collections.namedtuple('Args', args.keys())
|
||||
return Args(**args)
|
||||
|
||||
|
||||
def get_help():
|
||||
"""
|
||||
Provides our --help usage information.
|
||||
|
||||
:returns: **str** with our usage information
|
||||
"""
|
||||
|
||||
return stem.interpreter.msg(
|
||||
'msg.help',
|
||||
address = DEFAULT_ARGS['control_address'],
|
||||
port = DEFAULT_ARGS['control_port'],
|
||||
socket = DEFAULT_ARGS['control_socket'],
|
||||
)
|
|
@ -0,0 +1,115 @@
|
|||
# Copyright 2014-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Tab completion for our interpreter prompt.
|
||||
"""
|
||||
|
||||
from stem.interpreter import uses_settings
|
||||
|
||||
try:
|
||||
# added in python 3.2
|
||||
from functools import lru_cache
|
||||
except ImportError:
|
||||
from stem.util.lru_cache import lru_cache
|
||||
|
||||
|
||||
@uses_settings
|
||||
def _get_commands(controller, config):
|
||||
"""
|
||||
Provides commands recognized by tor.
|
||||
"""
|
||||
|
||||
commands = config.get('autocomplete', [])
|
||||
|
||||
if controller is None:
|
||||
return commands
|
||||
|
||||
# GETINFO commands. Lines are of the form '[option] -- [description]'. This
|
||||
# strips '*' from options that accept values.
|
||||
|
||||
results = controller.get_info('info/names', None)
|
||||
|
||||
if results:
|
||||
for line in results.splitlines():
|
||||
option = line.split(' ', 1)[0].rstrip('*')
|
||||
commands.append('GETINFO %s' % option)
|
||||
else:
|
||||
commands.append('GETINFO ')
|
||||
|
||||
# GETCONF, SETCONF, and RESETCONF commands. Lines are of the form
|
||||
# '[option] [type]'.
|
||||
|
||||
results = controller.get_info('config/names', None)
|
||||
|
||||
if results:
|
||||
for line in results.splitlines():
|
||||
option = line.split(' ', 1)[0]
|
||||
|
||||
commands.append('GETCONF %s' % option)
|
||||
commands.append('SETCONF %s' % option)
|
||||
commands.append('RESETCONF %s' % option)
|
||||
else:
|
||||
commands += ['GETCONF ', 'SETCONF ', 'RESETCONF ']
|
||||
|
||||
# SETEVENT, USEFEATURE, and SIGNAL commands. For each of these the GETINFO
|
||||
# results are simply a space separated lists of the values they can have.
|
||||
|
||||
options = (
|
||||
('SETEVENTS ', 'events/names'),
|
||||
('USEFEATURE ', 'features/names'),
|
||||
('SIGNAL ', 'signal/names'),
|
||||
)
|
||||
|
||||
for prefix, getinfo_cmd in options:
|
||||
results = controller.get_info(getinfo_cmd, None)
|
||||
|
||||
if results:
|
||||
commands += [prefix + value for value in results.split()]
|
||||
else:
|
||||
commands.append(prefix)
|
||||
|
||||
# Adds /help commands.
|
||||
|
||||
usage_info = config.get('help.usage', {})
|
||||
|
||||
for cmd in usage_info.keys():
|
||||
commands.append('/help ' + cmd)
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
class Autocompleter(object):
|
||||
def __init__(self, controller):
|
||||
self._commands = _get_commands(controller)
|
||||
|
||||
@lru_cache()
|
||||
def matches(self, text):
|
||||
"""
|
||||
Provides autocompletion matches for the given text.
|
||||
|
||||
:param str text: text to check for autocompletion matches with
|
||||
|
||||
:returns: **list** with possible matches
|
||||
"""
|
||||
|
||||
lowercase_text = text.lower()
|
||||
return [cmd for cmd in self._commands if cmd.lower().startswith(lowercase_text)]
|
||||
|
||||
def complete(self, text, state):
|
||||
"""
|
||||
Provides case insensetive autocompletion options, acting as a functor for
|
||||
the readlines set_completer function.
|
||||
|
||||
:param str text: text to check for autocompletion matches with
|
||||
:param int state: index of result to be provided, readline fetches matches
|
||||
until this function provides None
|
||||
|
||||
:returns: **str** with the autocompletion match, **None** if eithe none
|
||||
exists or state is higher than our number of matches
|
||||
"""
|
||||
|
||||
try:
|
||||
return self.matches(text)[state]
|
||||
except IndexError:
|
||||
return None
|
354
Shared/lib/python3.4/site-packages/stem/interpreter/commands.py
Normal file
354
Shared/lib/python3.4/site-packages/stem/interpreter/commands.py
Normal file
|
@ -0,0 +1,354 @@
|
|||
# Copyright 2014-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Handles making requests and formatting the responses.
|
||||
"""
|
||||
|
||||
import code
|
||||
import socket
|
||||
|
||||
import stem
|
||||
import stem.control
|
||||
import stem.descriptor.remote
|
||||
import stem.interpreter.help
|
||||
import stem.util.connection
|
||||
import stem.util.str_tools
|
||||
import stem.util.tor_tools
|
||||
|
||||
from stem.interpreter import STANDARD_OUTPUT, BOLD_OUTPUT, ERROR_OUTPUT, uses_settings, msg
|
||||
from stem.util.term import format
|
||||
|
||||
|
||||
def _get_fingerprint(arg, controller):
|
||||
"""
|
||||
Resolves user input into a relay fingerprint. This accepts...
|
||||
|
||||
* Fingerprints
|
||||
* Nicknames
|
||||
* IPv4 addresses, either with or without an ORPort
|
||||
* Empty input, which is resolved to ourselves if we're a relay
|
||||
|
||||
:param str arg: input to be resolved to a relay fingerprint
|
||||
:param stem.control.Controller controller: tor control connection
|
||||
|
||||
:returns: **str** for the relay fingerprint
|
||||
|
||||
:raises: **ValueError** if we're unable to resolve the input to a relay
|
||||
"""
|
||||
|
||||
if not arg:
|
||||
try:
|
||||
return controller.get_info('fingerprint')
|
||||
except:
|
||||
raise ValueError("We aren't a relay, no information to provide")
|
||||
elif stem.util.tor_tools.is_valid_fingerprint(arg):
|
||||
return arg
|
||||
elif stem.util.tor_tools.is_valid_nickname(arg):
|
||||
try:
|
||||
return controller.get_network_status(arg).fingerprint
|
||||
except:
|
||||
raise ValueError("Unable to find a relay with the nickname of '%s'" % arg)
|
||||
elif ':' in arg or stem.util.connection.is_valid_ipv4_address(arg):
|
||||
if ':' in arg:
|
||||
address, port = arg.split(':', 1)
|
||||
|
||||
if not stem.util.connection.is_valid_ipv4_address(address):
|
||||
raise ValueError("'%s' isn't a valid IPv4 address" % address)
|
||||
elif port and not stem.util.connection.is_valid_port(port):
|
||||
raise ValueError("'%s' isn't a valid port" % port)
|
||||
|
||||
port = int(port)
|
||||
else:
|
||||
address, port = arg, None
|
||||
|
||||
matches = {}
|
||||
|
||||
for desc in controller.get_network_statuses():
|
||||
if desc.address == address:
|
||||
if not port or desc.or_port == port:
|
||||
matches[desc.or_port] = desc.fingerprint
|
||||
|
||||
if len(matches) == 0:
|
||||
raise ValueError('No relays found at %s' % arg)
|
||||
elif len(matches) == 1:
|
||||
return list(matches.values())[0]
|
||||
else:
|
||||
response = "There's multiple relays at %s, include a port to specify which.\n\n" % arg
|
||||
|
||||
for i, or_port in enumerate(matches):
|
||||
response += ' %i. %s:%s, fingerprint: %s\n' % (i + 1, address, or_port, matches[or_port])
|
||||
|
||||
raise ValueError(response)
|
||||
else:
|
||||
raise ValueError("'%s' isn't a fingerprint, nickname, or IP address" % arg)
|
||||
|
||||
|
||||
class ControlInterpretor(code.InteractiveConsole):
|
||||
"""
|
||||
Handles issuing requests and providing nicely formed responses, with support
|
||||
for special irc style subcommands.
|
||||
"""
|
||||
|
||||
def __init__(self, controller):
|
||||
self._received_events = []
|
||||
|
||||
code.InteractiveConsole.__init__(self, {
|
||||
'stem': stem,
|
||||
'stem.control': stem.control,
|
||||
'controller': controller,
|
||||
'events': self.get_events,
|
||||
})
|
||||
|
||||
self._controller = controller
|
||||
self._run_python_commands = True
|
||||
|
||||
# Indicates if we're processing a multiline command, such as conditional
|
||||
# block or loop.
|
||||
|
||||
self.is_multiline_context = False
|
||||
|
||||
# Intercept events our controller hears about at a pretty low level since
|
||||
# the user will likely be requesting them by direct 'SETEVENTS' calls.
|
||||
|
||||
handle_event_real = self._controller._handle_event
|
||||
|
||||
def handle_event_wrapper(event_message):
|
||||
handle_event_real(event_message)
|
||||
self._received_events.append(event_message)
|
||||
|
||||
self._controller._handle_event = handle_event_wrapper
|
||||
|
||||
def get_events(self, *event_types):
|
||||
events = list(self._received_events)
|
||||
event_types = list(map(str.upper, event_types)) # make filtering case insensitive
|
||||
|
||||
if event_types:
|
||||
events = [e for e in events if e.type in event_types]
|
||||
|
||||
return events
|
||||
|
||||
def do_help(self, arg):
|
||||
"""
|
||||
Performs the '/help' operation, giving usage information for the given
|
||||
argument or a general summary if there wasn't one.
|
||||
"""
|
||||
|
||||
return stem.interpreter.help.response(self._controller, arg)
|
||||
|
||||
def do_events(self, arg):
|
||||
"""
|
||||
Performs the '/events' operation, dumping the events that we've received
|
||||
belonging to the given types. If no types are specified then this provides
|
||||
all buffered events.
|
||||
|
||||
If the user runs '/events clear' then this clears the list of events we've
|
||||
received.
|
||||
"""
|
||||
|
||||
event_types = arg.upper().split()
|
||||
|
||||
if 'CLEAR' in event_types:
|
||||
del self._received_events[:]
|
||||
return format('cleared event backlog', *STANDARD_OUTPUT)
|
||||
|
||||
return '\n'.join([format(str(e), *STANDARD_OUTPUT) for e in self.get_events(*event_types)])
|
||||
|
||||
def do_info(self, arg):
|
||||
"""
|
||||
Performs the '/info' operation, looking up a relay by fingerprint, IP
|
||||
address, or nickname and printing its descriptor and consensus entries in a
|
||||
pretty fashion.
|
||||
"""
|
||||
|
||||
try:
|
||||
fingerprint = _get_fingerprint(arg, self._controller)
|
||||
except ValueError as exc:
|
||||
return format(str(exc), *ERROR_OUTPUT)
|
||||
|
||||
ns_desc = self._controller.get_network_status(fingerprint, None)
|
||||
server_desc = self._controller.get_server_descriptor(fingerprint, None)
|
||||
extrainfo_desc = None
|
||||
micro_desc = self._controller.get_microdescriptor(fingerprint, None)
|
||||
|
||||
# We'll mostly rely on the router status entry. Either the server
|
||||
# descriptor or microdescriptor will be missing, so we'll treat them as
|
||||
# being optional.
|
||||
|
||||
if not ns_desc:
|
||||
return format('Unable to find consensus information for %s' % fingerprint, *ERROR_OUTPUT)
|
||||
|
||||
# More likely than not we'll have the microdescriptor but not server and
|
||||
# extrainfo descriptors. If so then fetching them.
|
||||
|
||||
downloader = stem.descriptor.remote.DescriptorDownloader(timeout = 5)
|
||||
server_desc_query = downloader.get_server_descriptors(fingerprint)
|
||||
extrainfo_desc_query = downloader.get_extrainfo_descriptors(fingerprint)
|
||||
|
||||
for desc in server_desc_query:
|
||||
server_desc = desc
|
||||
|
||||
for desc in extrainfo_desc_query:
|
||||
extrainfo_desc = desc
|
||||
|
||||
address_extrainfo = []
|
||||
|
||||
try:
|
||||
address_extrainfo.append(socket.gethostbyaddr(ns_desc.address)[0])
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
address_extrainfo.append(self._controller.get_info('ip-to-country/%s' % ns_desc.address))
|
||||
except:
|
||||
pass
|
||||
|
||||
address_extrainfo_label = ' (%s)' % ', '.join(address_extrainfo) if address_extrainfo else ''
|
||||
|
||||
if server_desc:
|
||||
exit_policy_label = str(server_desc.exit_policy)
|
||||
elif micro_desc:
|
||||
exit_policy_label = str(micro_desc.exit_policy)
|
||||
else:
|
||||
exit_policy_label = 'Unknown'
|
||||
|
||||
lines = [
|
||||
'%s (%s)' % (ns_desc.nickname, fingerprint),
|
||||
format('address: ', *BOLD_OUTPUT) + '%s:%s%s' % (ns_desc.address, ns_desc.or_port, address_extrainfo_label),
|
||||
]
|
||||
|
||||
if server_desc:
|
||||
lines.append(format('tor version: ', *BOLD_OUTPUT) + str(server_desc.tor_version))
|
||||
|
||||
lines.append(format('flags: ', *BOLD_OUTPUT) + ', '.join(ns_desc.flags))
|
||||
lines.append(format('exit policy: ', *BOLD_OUTPUT) + exit_policy_label)
|
||||
|
||||
if server_desc and server_desc.contact:
|
||||
contact = stem.util.str_tools._to_unicode(server_desc.contact)
|
||||
|
||||
# clears up some highly common obscuring
|
||||
|
||||
for alias in (' at ', ' AT '):
|
||||
contact = contact.replace(alias, '@')
|
||||
|
||||
for alias in (' dot ', ' DOT '):
|
||||
contact = contact.replace(alias, '.')
|
||||
|
||||
lines.append(format('contact: ', *BOLD_OUTPUT) + contact)
|
||||
|
||||
descriptor_section = [
|
||||
('Server Descriptor:', server_desc),
|
||||
('Extrainfo Descriptor:', extrainfo_desc),
|
||||
('Microdescriptor:', micro_desc),
|
||||
('Router Status Entry:', ns_desc),
|
||||
]
|
||||
|
||||
div = format('-' * 80, *STANDARD_OUTPUT)
|
||||
|
||||
for label, desc in descriptor_section:
|
||||
if desc:
|
||||
lines += ['', div, format(label, *BOLD_OUTPUT), div, '']
|
||||
lines += [format(l, *STANDARD_OUTPUT) for l in str(desc).splitlines()]
|
||||
|
||||
return '\n'.join(lines)
|
||||
|
||||
def do_python(self, arg):
|
||||
"""
|
||||
Performs the '/python' operation, toggling if we accept python commands or
|
||||
not.
|
||||
"""
|
||||
|
||||
if not arg:
|
||||
status = 'enabled' if self._run_python_commands else 'disabled'
|
||||
return format('Python support is currently %s.' % status, *STANDARD_OUTPUT)
|
||||
elif arg.lower() == 'enable':
|
||||
self._run_python_commands = True
|
||||
elif arg.lower() == 'disable':
|
||||
self._run_python_commands = False
|
||||
else:
|
||||
return format("'%s' is not recognized. Please run either '/python enable' or '/python disable'." % arg, *ERROR_OUTPUT)
|
||||
|
||||
if self._run_python_commands:
|
||||
response = "Python support enabled, we'll now run non-interpreter commands as python."
|
||||
else:
|
||||
response = "Python support disabled, we'll now pass along all commands to tor."
|
||||
|
||||
return format(response, *STANDARD_OUTPUT)
|
||||
|
||||
@uses_settings
|
||||
def run_command(self, command, config):
|
||||
"""
|
||||
Runs the given command. Requests starting with a '/' are special commands
|
||||
to the interpreter, and anything else is sent to the control port.
|
||||
|
||||
:param stem.control.Controller controller: tor control connection
|
||||
:param str command: command to be processed
|
||||
|
||||
:returns: **list** out output lines, each line being a list of
|
||||
(msg, format) tuples
|
||||
|
||||
:raises: **stem.SocketClosed** if the control connection has been severed
|
||||
"""
|
||||
|
||||
if not self._controller.is_alive():
|
||||
raise stem.SocketClosed()
|
||||
|
||||
# Commands fall into three categories:
|
||||
#
|
||||
# * Interpretor commands. These start with a '/'.
|
||||
#
|
||||
# * Controller commands stem knows how to handle. We use our Controller's
|
||||
# methods for these to take advantage of caching and present nicer
|
||||
# output.
|
||||
#
|
||||
# * Other tor commands. We pass these directly on to the control port.
|
||||
|
||||
cmd, arg = command.strip(), ''
|
||||
|
||||
if ' ' in cmd:
|
||||
cmd, arg = cmd.split(' ', 1)
|
||||
|
||||
output = ''
|
||||
|
||||
if cmd.startswith('/'):
|
||||
cmd = cmd.lower()
|
||||
|
||||
if cmd == '/quit':
|
||||
raise stem.SocketClosed()
|
||||
elif cmd == '/events':
|
||||
output = self.do_events(arg)
|
||||
elif cmd == '/info':
|
||||
output = self.do_info(arg)
|
||||
elif cmd == '/python':
|
||||
output = self.do_python(arg)
|
||||
elif cmd == '/help':
|
||||
output = self.do_help(arg)
|
||||
else:
|
||||
output = format("'%s' isn't a recognized command" % command, *ERROR_OUTPUT)
|
||||
else:
|
||||
cmd = cmd.upper() # makes commands uppercase to match the spec
|
||||
|
||||
if cmd.replace('+', '') in ('LOADCONF', 'POSTDESCRIPTOR'):
|
||||
# provides a notice that multi-line controller input isn't yet implemented
|
||||
output = format(msg('msg.multiline_unimplemented_notice'), *ERROR_OUTPUT)
|
||||
elif cmd == 'QUIT':
|
||||
self._controller.msg(command)
|
||||
raise stem.SocketClosed()
|
||||
else:
|
||||
is_tor_command = cmd in config.get('help.usage', {}) and cmd.lower() != 'events'
|
||||
|
||||
if self._run_python_commands and not is_tor_command:
|
||||
self.is_multiline_context = code.InteractiveConsole.push(self, command)
|
||||
return
|
||||
else:
|
||||
try:
|
||||
output = format(self._controller.msg(command).raw_content().strip(), *STANDARD_OUTPUT)
|
||||
except stem.ControllerError as exc:
|
||||
if isinstance(exc, stem.SocketClosed):
|
||||
raise exc
|
||||
else:
|
||||
output = format(str(exc), *ERROR_OUTPUT)
|
||||
|
||||
output += '\n' # give ourselves an extra line before the next prompt
|
||||
|
||||
return output
|
145
Shared/lib/python3.4/site-packages/stem/interpreter/help.py
Normal file
145
Shared/lib/python3.4/site-packages/stem/interpreter/help.py
Normal file
|
@ -0,0 +1,145 @@
|
|||
# Copyright 2014-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Provides our /help responses.
|
||||
"""
|
||||
|
||||
from stem.interpreter import (
|
||||
STANDARD_OUTPUT,
|
||||
BOLD_OUTPUT,
|
||||
ERROR_OUTPUT,
|
||||
msg,
|
||||
uses_settings,
|
||||
)
|
||||
|
||||
from stem.util.term import format
|
||||
|
||||
try:
|
||||
# added in python 3.2
|
||||
from functools import lru_cache
|
||||
except ImportError:
|
||||
from stem.util.lru_cache import lru_cache
|
||||
|
||||
|
||||
def response(controller, arg):
|
||||
"""
|
||||
Provides our /help response.
|
||||
|
||||
:param stem.control.Controller controller: tor control connection
|
||||
:param str arg: controller or interpreter command to provide help output for
|
||||
|
||||
:returns: **str** with our help response
|
||||
"""
|
||||
|
||||
# Normalizing inputs first so we can better cache responses.
|
||||
|
||||
return _response(controller, _normalize(arg))
|
||||
|
||||
|
||||
def _normalize(arg):
|
||||
arg = arg.upper()
|
||||
|
||||
# If there's multiple arguments then just take the first. This is
|
||||
# particularly likely if they're trying to query a full command (for
|
||||
# instance "/help GETINFO version")
|
||||
|
||||
arg = arg.split(' ')[0]
|
||||
|
||||
# strip slash if someone enters an interpreter command (ex. "/help /help")
|
||||
|
||||
if arg.startswith('/'):
|
||||
arg = arg[1:]
|
||||
|
||||
return arg
|
||||
|
||||
|
||||
@lru_cache()
|
||||
@uses_settings
|
||||
def _response(controller, arg, config):
|
||||
if not arg:
|
||||
return _general_help()
|
||||
|
||||
usage_info = config.get('help.usage', {})
|
||||
|
||||
if arg not in usage_info:
|
||||
return format("No help information available for '%s'..." % arg, *ERROR_OUTPUT)
|
||||
|
||||
output = format(usage_info[arg] + '\n', *BOLD_OUTPUT)
|
||||
|
||||
description = config.get('help.description.%s' % arg.lower(), '')
|
||||
|
||||
for line in description.splitlines():
|
||||
output += format(' ' + line, *STANDARD_OUTPUT) + '\n'
|
||||
|
||||
output += '\n'
|
||||
|
||||
if arg == 'GETINFO':
|
||||
results = controller.get_info('info/names', None)
|
||||
|
||||
if results:
|
||||
for line in results.splitlines():
|
||||
if ' -- ' in line:
|
||||
opt, summary = line.split(' -- ', 1)
|
||||
|
||||
output += format('%-33s' % opt, *BOLD_OUTPUT)
|
||||
output += format(' - %s' % summary, *STANDARD_OUTPUT) + '\n'
|
||||
elif arg == 'GETCONF':
|
||||
results = controller.get_info('config/names', None)
|
||||
|
||||
if results:
|
||||
options = [opt.split(' ', 1)[0] for opt in results.splitlines()]
|
||||
|
||||
for i in range(0, len(options), 2):
|
||||
line = ''
|
||||
|
||||
for entry in options[i:i + 2]:
|
||||
line += '%-42s' % entry
|
||||
|
||||
output += format(line.rstrip(), *STANDARD_OUTPUT) + '\n'
|
||||
elif arg == 'SIGNAL':
|
||||
signal_options = config.get('help.signal.options', {})
|
||||
|
||||
for signal, summary in signal_options.items():
|
||||
output += format('%-15s' % signal, *BOLD_OUTPUT)
|
||||
output += format(' - %s' % summary, *STANDARD_OUTPUT) + '\n'
|
||||
elif arg == 'SETEVENTS':
|
||||
results = controller.get_info('events/names', None)
|
||||
|
||||
if results:
|
||||
entries = results.split()
|
||||
|
||||
# displays four columns of 20 characters
|
||||
|
||||
for i in range(0, len(entries), 4):
|
||||
line = ''
|
||||
|
||||
for entry in entries[i:i + 4]:
|
||||
line += '%-20s' % entry
|
||||
|
||||
output += format(line.rstrip(), *STANDARD_OUTPUT) + '\n'
|
||||
elif arg == 'USEFEATURE':
|
||||
results = controller.get_info('features/names', None)
|
||||
|
||||
if results:
|
||||
output += format(results, *STANDARD_OUTPUT) + '\n'
|
||||
elif arg in ('LOADCONF', 'POSTDESCRIPTOR'):
|
||||
# gives a warning that this option isn't yet implemented
|
||||
output += format(msg('msg.multiline_unimplemented_notice'), *ERROR_OUTPUT) + '\n'
|
||||
|
||||
return output.rstrip()
|
||||
|
||||
|
||||
def _general_help():
|
||||
lines = []
|
||||
|
||||
for line in msg('help.general').splitlines():
|
||||
div = line.find(' - ')
|
||||
|
||||
if div != -1:
|
||||
cmd, description = line[:div], line[div:]
|
||||
lines.append(format(cmd, *BOLD_OUTPUT) + format(description, *STANDARD_OUTPUT))
|
||||
else:
|
||||
lines.append(format(line, *BOLD_OUTPUT))
|
||||
|
||||
return '\n'.join(lines)
|
326
Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg
Normal file
326
Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg
Normal file
|
@ -0,0 +1,326 @@
|
|||
################################################################################
|
||||
#
|
||||
# Configuration data used by Stem's interpreter prompt.
|
||||
#
|
||||
################################################################################
|
||||
|
||||
##################
|
||||
# GENERAL MESSAGES #
|
||||
##################
|
||||
|
||||
msg.multiline_unimplemented_notice Multi-line control options like this are not yet implemented.
|
||||
|
||||
msg.help
|
||||
|Interactive interpreter for Tor. This provides you with direct access
|
||||
|to Tor's control interface via either python or direct requests.
|
||||
|
|
||||
| -i, --interface [ADDRESS:]PORT change control interface from {address}:{port}
|
||||
| -s, --socket SOCKET_PATH attach using unix domain socket if present,
|
||||
| SOCKET_PATH defaults to: {socket}
|
||||
| --no-color disables colorized output
|
||||
| -h, --help presents this help
|
||||
|
|
||||
|
||||
msg.startup_banner
|
||||
|Welcome to Stem's interpreter prompt. This provides you with direct access to
|
||||
|Tor's control interface.
|
||||
|
|
||||
|This acts like a standard python interpreter with a Tor connection available
|
||||
|via your 'controller' variable...
|
||||
|
|
||||
| >>> controller.get_info('version')
|
||||
| '0.2.5.1-alpha-dev (git-245ecfff36c0cecc)'
|
||||
|
|
||||
|You can also issue requests directly to Tor...
|
||||
|
|
||||
| >>> GETINFO version
|
||||
| 250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc)
|
||||
| 250 OK
|
||||
|
|
||||
|For more information run '/help'.
|
||||
|
|
||||
|
||||
msg.tor_unavailable Tor isn't running and the command currently isn't in your PATH.
|
||||
|
||||
msg.starting_tor
|
||||
|Tor isn't running. Starting a temporary Tor instance for our interpreter to
|
||||
|interact with. This will have a minimal non-relaying configuration, and be
|
||||
|shut down when you're done.
|
||||
|
|
||||
|--------------------------------------------------------------------------------
|
||||
|
|
||||
|
||||
#################
|
||||
# OUTPUT OF /HELP #
|
||||
#################
|
||||
|
||||
# Response for the '/help' command without any arguments.
|
||||
|
||||
help.general
|
||||
|Interpretor commands include:
|
||||
| /help - provides information for interpreter and tor commands
|
||||
| /events - prints events that we've received
|
||||
| /info - general information for a relay
|
||||
| /python - enable or disable support for running python commands
|
||||
| /quit - shuts down the interpreter
|
||||
|
|
||||
|Tor commands include:
|
||||
| GETINFO - queries information from tor
|
||||
| GETCONF, SETCONF, RESETCONF - show or edit a configuration option
|
||||
| SIGNAL - issues control signal to the process (for resetting, stopping, etc)
|
||||
| SETEVENTS - configures the events tor will notify us of
|
||||
|
|
||||
| USEFEATURE - enables custom behavior for the controller
|
||||
| SAVECONF - writes tor's current configuration to our torrc
|
||||
| LOADCONF - loads the given input like it was part of our torrc
|
||||
| MAPADDRESS - replaces requests for one address with another
|
||||
| POSTDESCRIPTOR - adds a relay descriptor to our cache
|
||||
| EXTENDCIRCUIT - create or extend a tor circuit
|
||||
| SETCIRCUITPURPOSE - configures the purpose associated with a circuit
|
||||
| CLOSECIRCUIT - closes the given circuit
|
||||
| ATTACHSTREAM - associates an application's stream with a tor circuit
|
||||
| REDIRECTSTREAM - sets a stream's destination
|
||||
| CLOSESTREAM - closes the given stream
|
||||
| ADD_ONION - create a new hidden service
|
||||
| DEL_ONION - delete a hidden service that was created with ADD_ONION
|
||||
| HSFETCH - retrieve a hidden service descriptor, providing it in a HS_DESC_CONTENT event
|
||||
| HSPOST - uploads a hidden service descriptor
|
||||
| RESOLVE - issues an asynchronous dns or rdns request over tor
|
||||
| TAKEOWNERSHIP - instructs tor to quit when this control connection is closed
|
||||
| PROTOCOLINFO - queries version and controller authentication information
|
||||
| QUIT - disconnect the control connection
|
||||
|
|
||||
|For more information use '/help [OPTION]'.
|
||||
|
||||
# Usage of tor and interpreter commands.
|
||||
|
||||
help.usage HELP => /help [OPTION]
|
||||
help.usage EVENTS => /events [types]
|
||||
help.usage INFO => /info [relay fingerprint, nickname, or IP address]
|
||||
help.usage PYTHON => /python [enable,disable]
|
||||
help.usage QUIT => /quit
|
||||
help.usage GETINFO => GETINFO OPTION
|
||||
help.usage GETCONF => GETCONF OPTION
|
||||
help.usage SETCONF => SETCONF PARAM[=VALUE]
|
||||
help.usage RESETCONF => RESETCONF PARAM[=VALUE]
|
||||
help.usage SIGNAL => SIGNAL SIG
|
||||
help.usage SETEVENTS => SETEVENTS [EXTENDED] [EVENTS]
|
||||
help.usage USEFEATURE => USEFEATURE OPTION
|
||||
help.usage SAVECONF => SAVECONF
|
||||
help.usage LOADCONF => LOADCONF...
|
||||
help.usage MAPADDRESS => MAPADDRESS SOURCE_ADDR=DESTINATION_ADDR
|
||||
help.usage POSTDESCRIPTOR => POSTDESCRIPTOR [purpose=general/controller/bridge] [cache=yes/no]...
|
||||
help.usage EXTENDCIRCUIT => EXTENDCIRCUIT CircuitID [PATH] [purpose=general/controller]
|
||||
help.usage SETCIRCUITPURPOSE => SETCIRCUITPURPOSE CircuitID purpose=general/controller
|
||||
help.usage CLOSECIRCUIT => CLOSECIRCUIT CircuitID [IfUnused]
|
||||
help.usage ATTACHSTREAM => ATTACHSTREAM StreamID CircuitID [HOP=HopNum]
|
||||
help.usage REDIRECTSTREAM => REDIRECTSTREAM StreamID Address [Port]
|
||||
help.usage CLOSESTREAM => CLOSESTREAM StreamID Reason [Flag]
|
||||
help.usage ADD_ONION => KeyType:KeyBlob [Flags=Flag] (Port=Port [,Target])...
|
||||
help.usage DEL_ONION => ServiceID
|
||||
help.usage HSFETCH => HSFETCH (HSAddress/v2-DescId) [SERVER=Server]...
|
||||
help.usage HSPOST => [SERVER=Server] DESCRIPTOR
|
||||
help.usage RESOLVE => RESOLVE [mode=reverse] address
|
||||
help.usage TAKEOWNERSHIP => TAKEOWNERSHIP
|
||||
help.usage PROTOCOLINFO => PROTOCOLINFO [ProtocolVersion]
|
||||
|
||||
# Longer description of what tor and interpreter commands do.
|
||||
|
||||
help.description.help
|
||||
|Provides usage information for the given interpreter, tor command, or tor
|
||||
|configuration option.
|
||||
|
|
||||
|Example:
|
||||
| /help info # provides a description of the '/info' option
|
||||
| /help GETINFO # usage information for tor's GETINFO controller option
|
||||
|
||||
help.description.events
|
||||
|Provides events that we've received belonging to the given event types. If
|
||||
|no types are specified then this provides all the messages that we've
|
||||
|received.
|
||||
|
|
||||
|You can also run '/events clear' to clear the backlog of events we've
|
||||
|received.
|
||||
|
||||
help.description.info
|
||||
|Provides information for a relay that's currently in the consensus. If no
|
||||
|relay is specified then this provides information on ourselves.
|
||||
|
||||
help.description.python
|
||||
|Enables or disables support for running python commands. This determines how
|
||||
|we treat commands this interpreter doesn't recognize...
|
||||
|
|
||||
|* If enabled then unrecognized commands are executed as python.
|
||||
|* If disabled then unrecognized commands are passed along to tor.
|
||||
|
||||
help.description.quit
|
||||
|Terminates the interpreter.
|
||||
|
||||
help.description.getinfo
|
||||
|Queries the tor process for information. Options are...
|
||||
|
|
||||
|
||||
help.description.getconf
|
||||
|Provides the current value for a given configuration value. Options include...
|
||||
|
|
||||
|
||||
help.description.setconf
|
||||
|Sets the given configuration parameters. Values can be quoted or non-quoted
|
||||
|strings, and reverts the option to 0 or NULL if not provided.
|
||||
|
|
||||
|Examples:
|
||||
| * Sets a contact address and resets our family to NULL
|
||||
| SETCONF MyFamily ContactInfo=foo@bar.com
|
||||
|
|
||||
| * Sets an exit policy that only includes port 80/443
|
||||
| SETCONF ExitPolicy=\"accept *:80, accept *:443, reject *:*\"\
|
||||
|
||||
help.description.resetconf
|
||||
|Reverts the given configuration options to their default values. If a value
|
||||
|is provided then this behaves in the same way as SETCONF.
|
||||
|
|
||||
|Examples:
|
||||
| * Returns both of our accounting parameters to their defaults
|
||||
| RESETCONF AccountingMax AccountingStart
|
||||
|
|
||||
| * Uses the default exit policy and sets our nickname to be 'Goomba'
|
||||
| RESETCONF ExitPolicy Nickname=Goomba
|
||||
|
||||
help.description.signal
|
||||
|Issues a signal that tells the tor process to reload its torrc, dump its
|
||||
|stats, halt, etc.
|
||||
|
||||
help.description.setevents
|
||||
|Sets the events that we will receive. This turns off any events that aren't
|
||||
|listed so sending 'SETEVENTS' without any values will turn off all event reporting.
|
||||
|
|
||||
|For Tor versions between 0.1.1.9 and 0.2.2.1 adding 'EXTENDED' causes some
|
||||
|events to give us additional information. After version 0.2.2.1 this is
|
||||
|always on.
|
||||
|
|
||||
|Events include...
|
||||
|
|
||||
|
||||
help.description.usefeature
|
||||
|Customizes the behavior of the control port. Options include...
|
||||
|
|
||||
|
||||
help.description.saveconf
|
||||
|Writes Tor's current configuration to its torrc.
|
||||
|
||||
help.description.loadconf
|
||||
|Reads the given text like it belonged to our torrc.
|
||||
|
|
||||
|Example:
|
||||
| +LOADCONF
|
||||
| # sets our exit policy to just accept ports 80 and 443
|
||||
| ExitPolicy accept *:80
|
||||
| ExitPolicy accept *:443
|
||||
| ExitPolicy reject *:*
|
||||
| .
|
||||
|
||||
help.description.mapaddress
|
||||
|Replaces future requests for one address with another.
|
||||
|
|
||||
|Example:
|
||||
| MAPADDRESS 0.0.0.0=torproject.org 1.2.3.4=tor.freehaven.net
|
||||
|
||||
help.description.postdescriptor
|
||||
|Simulates getting a new relay descriptor.
|
||||
|
||||
help.description.extendcircuit
|
||||
|Extends the given circuit or create a new one if the CircuitID is zero. The
|
||||
|PATH is a comma separated list of fingerprints. If it isn't set then this
|
||||
|uses Tor's normal path selection.
|
||||
|
||||
help.description.setcircuitpurpose
|
||||
|Sets the purpose attribute for a circuit.
|
||||
|
||||
help.description.closecircuit
|
||||
|Closes the given circuit. If "IfUnused" is included then this only closes
|
||||
|the circuit if it isn't currently being used.
|
||||
|
||||
help.description.attachstream
|
||||
|Attaches a stream with the given built circuit (tor picks one on its own if
|
||||
|CircuitID is zero). If HopNum is given then this hop is used to exit the
|
||||
|circuit, otherwise the last relay is used.
|
||||
|
||||
help.description.redirectstream
|
||||
|Sets the destination for a given stream. This can only be done after a
|
||||
|stream is created but before it's attached to a circuit.
|
||||
|
||||
help.description.closestream
|
||||
|Closes the given stream, the reason being an integer matching a reason as
|
||||
|per section 6.3 of the tor-spec.
|
||||
|
||||
help.description.add_onion
|
||||
|Creates a new hidden service. Unlike 'SETCONF HiddenServiceDir...' this
|
||||
|doesn't persist the service to disk.
|
||||
|
||||
help.description.del_onion
|
||||
|Delete a hidden service that was created with ADD_ONION.
|
||||
|
||||
help.description.hsfetch
|
||||
|Retrieves the descriptor for a hidden service. This is an asynchronous
|
||||
|request, with the descriptor provided by a HS_DESC_CONTENT event.
|
||||
|
||||
help.description.hspost
|
||||
|Uploads a descriptor to a hidden service directory.
|
||||
|
||||
help.description.resolve
|
||||
|Performs IPv4 DNS resolution over tor, doing a reverse lookup instead if
|
||||
|"mode=reverse" is included. This request is processed in the background and
|
||||
|results in a ADDRMAP event with the response.
|
||||
|
||||
help.description.takeownership
|
||||
|Instructs Tor to gracefully shut down when this control connection is closed.
|
||||
|
||||
help.description.protocolinfo
|
||||
|Provides bootstrapping information that a controller might need when first
|
||||
|starting, like Tor's version and controller authentication. This can be done
|
||||
|before authenticating to the control port.
|
||||
|
||||
help.signal.options RELOAD / HUP => reload our torrc
|
||||
help.signal.options SHUTDOWN / INT => gracefully shut down, waiting 30 seconds if we're a relay
|
||||
help.signal.options DUMP / USR1 => logs information about open connections and circuits
|
||||
help.signal.options DEBUG / USR2 => makes us log at the DEBUG runlevel
|
||||
help.signal.options HALT / TERM => immediately shut down
|
||||
help.signal.options CLEARDNSCACHE => clears any cached DNS results
|
||||
help.signal.options NEWNYM => clears the DNS cache and uses new circuits for future connections
|
||||
|
||||
################
|
||||
# TAB COMPLETION #
|
||||
################
|
||||
|
||||
# Commands we'll autocomplete when the user hits tab. This is just the start of
|
||||
# our autocompletion list - more are determined dynamically by checking what
|
||||
# tor supports.
|
||||
|
||||
autocomplete /help
|
||||
autocomplete /events
|
||||
autocomplete /info
|
||||
autocomplete /quit
|
||||
autocomplete SAVECONF
|
||||
autocomplete MAPADDRESS
|
||||
autocomplete EXTENDCIRCUIT
|
||||
autocomplete SETCIRCUITPURPOSE
|
||||
autocomplete SETROUTERPURPOSE
|
||||
autocomplete ATTACHSTREAM
|
||||
#autocomplete +POSTDESCRIPTOR # TODO: needs multi-line support
|
||||
autocomplete REDIRECTSTREAM
|
||||
autocomplete CLOSESTREAM
|
||||
autocomplete CLOSECIRCUIT
|
||||
autocomplete QUIT
|
||||
autocomplete RESOLVE
|
||||
autocomplete PROTOCOLINFO
|
||||
#autocomplete +LOADCONF # TODO: needs multi-line support
|
||||
autocomplete TAKEOWNERSHIP
|
||||
autocomplete AUTHCHALLENGE
|
||||
autocomplete DROPGUARDS
|
||||
autocomplete ADD_ONION NEW:BEST
|
||||
autocomplete ADD_ONION NEW:RSA1024
|
||||
autocomplete ADD_ONION RSA1024:
|
||||
autocomplete DEL_ONION
|
||||
autocomplete HSFETCH
|
||||
autocomplete HSPOST
|
||||
|
132
Shared/lib/python3.4/site-packages/stem/prereq.py
Normal file
132
Shared/lib/python3.4/site-packages/stem/prereq.py
Normal file
|
@ -0,0 +1,132 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Checks for stem dependencies. We require python 2.6 or greater (including the
|
||||
3.x series). Other requirements for complete functionality are...
|
||||
|
||||
* pycrypto module
|
||||
|
||||
* validating descriptor signature integrity
|
||||
|
||||
::
|
||||
|
||||
check_requirements - checks for minimum requirements for running stem
|
||||
|
||||
is_python_27 - checks if python 2.7 or later is available
|
||||
is_python_3 - checks if python 3.0 or later is available
|
||||
|
||||
is_crypto_available - checks if the pycrypto module is available
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import sys
|
||||
|
||||
try:
|
||||
# added in python 3.2
|
||||
from functools import lru_cache
|
||||
except ImportError:
|
||||
from stem.util.lru_cache import lru_cache
|
||||
|
||||
CRYPTO_UNAVAILABLE = "Unable to import the pycrypto module. Because of this we'll be unable to verify descriptor signature integrity. You can get pycrypto from: https://www.dlitz.net/software/pycrypto/"
|
||||
|
||||
|
||||
def check_requirements():
|
||||
"""
|
||||
Checks that we meet the minimum requirements to run stem. If we don't then
|
||||
this raises an ImportError with the issue.
|
||||
|
||||
:raises: **ImportError** with the problem if we don't meet stem's
|
||||
requirements
|
||||
"""
|
||||
|
||||
major_version, minor_version = sys.version_info[0:2]
|
||||
|
||||
if major_version < 2 or (major_version == 2 and minor_version < 6):
|
||||
raise ImportError('stem requires python version 2.6 or greater')
|
||||
|
||||
|
||||
def is_python_27():
|
||||
"""
|
||||
Checks if we're running python 2.7 or above (including the 3.x series).
|
||||
|
||||
:returns: **True** if we meet this requirement and **False** otherwise
|
||||
"""
|
||||
|
||||
major_version, minor_version = sys.version_info[0:2]
|
||||
|
||||
return major_version > 2 or (major_version == 2 and minor_version >= 7)
|
||||
|
||||
|
||||
def is_python_3():
|
||||
"""
|
||||
Checks if we're in the 3.0 - 3.x range.
|
||||
|
||||
:returns: **True** if we meet this requirement and **False** otherwise
|
||||
"""
|
||||
|
||||
return sys.version_info[0] == 3
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def is_crypto_available():
|
||||
"""
|
||||
Checks if the pycrypto functions we use are available. This is used for
|
||||
verifying relay descriptor signatures.
|
||||
|
||||
:returns: **True** if we can use pycrypto and **False** otherwise
|
||||
"""
|
||||
|
||||
from stem.util import log
|
||||
|
||||
try:
|
||||
from Crypto.PublicKey import RSA
|
||||
from Crypto.Util import asn1
|
||||
from Crypto.Util.number import long_to_bytes
|
||||
return True
|
||||
except ImportError:
|
||||
log.log_once('stem.prereq.is_crypto_available', log.INFO, CRYPTO_UNAVAILABLE)
|
||||
return False
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def is_mock_available():
|
||||
"""
|
||||
Checks if the mock module is available. In python 3.3 and up it is a builtin
|
||||
unittest module, but before this it needed to be `installed separately
|
||||
<https://pypi.python.org/pypi/mock/>`_. Imports should be as follows....
|
||||
|
||||
::
|
||||
|
||||
try:
|
||||
# added in python 3.3
|
||||
from unittest.mock import Mock
|
||||
except ImportError:
|
||||
from mock import Mock
|
||||
|
||||
:returns: **True** if the mock module is available and **False** otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
# checks for python 3.3 version
|
||||
import unittest.mock
|
||||
return True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
import mock
|
||||
|
||||
# check for mock's patch.dict() which was introduced in version 0.7.0
|
||||
|
||||
if not hasattr(mock.patch, 'dict'):
|
||||
raise ImportError()
|
||||
|
||||
# check for mock's new_callable argument for patch() which was introduced in version 0.8.0
|
||||
|
||||
if 'new_callable' not in inspect.getargspec(mock.patch).args:
|
||||
raise ImportError()
|
||||
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
272
Shared/lib/python3.4/site-packages/stem/process.py
Normal file
272
Shared/lib/python3.4/site-packages/stem/process.py
Normal file
|
@ -0,0 +1,272 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Helper functions for working with tor as a process.
|
||||
|
||||
:NO_TORRC:
|
||||
when provided as a torrc_path tor is ran with a blank configuration
|
||||
|
||||
:DEFAULT_INIT_TIMEOUT:
|
||||
number of seconds before we time out our attempt to start a tor instance
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
launch_tor - starts up a tor process
|
||||
launch_tor_with_config - starts a tor process with a custom torrc
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
import stem.prereq
|
||||
import stem.util.str_tools
|
||||
import stem.util.system
|
||||
import stem.version
|
||||
|
||||
NO_TORRC = '<no torrc>'
|
||||
DEFAULT_INIT_TIMEOUT = 90
|
||||
|
||||
|
||||
def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, stdin = None):
|
||||
"""
|
||||
Initializes a tor process. This blocks until initialization completes or we
|
||||
error out.
|
||||
|
||||
If tor's data directory is missing or stale then bootstrapping will include
|
||||
making several requests to the directory authorities which can take a little
|
||||
while. Usually this is done in 50 seconds or so, but occasionally calls seem
|
||||
to get stuck, taking well over the default timeout.
|
||||
|
||||
**To work to must log at NOTICE runlevel to stdout.** It does this by
|
||||
default, but if you have a 'Log' entry in your torrc then you'll also need
|
||||
'Log NOTICE stdout'.
|
||||
|
||||
Note: The timeout argument does not work on Windows, and relies on the global
|
||||
state of the signal module.
|
||||
|
||||
:param str tor_cmd: command for starting tor
|
||||
:param list args: additional arguments for tor
|
||||
:param str torrc_path: location of the torrc for us to use
|
||||
:param int completion_percent: percent of bootstrap completion at which
|
||||
this'll return
|
||||
:param functor init_msg_handler: optional functor that will be provided with
|
||||
tor's initialization stdout as we get it
|
||||
:param int timeout: time after which the attempt to start tor is aborted, no
|
||||
timeouts are applied if **None**
|
||||
:param bool take_ownership: asserts ownership over the tor process so it
|
||||
aborts if this python process terminates or a :class:`~stem.control.Controller`
|
||||
we establish to it disconnects
|
||||
:param str stdin: content to provide on stdin
|
||||
|
||||
:returns: **subprocess.Popen** instance for the tor subprocess
|
||||
|
||||
:raises: **OSError** if we either fail to create the tor process or reached a
|
||||
timeout without success
|
||||
"""
|
||||
|
||||
if stem.util.system.is_windows():
|
||||
timeout = None
|
||||
|
||||
# sanity check that we got a tor binary
|
||||
|
||||
if os.path.sep in tor_cmd:
|
||||
# got a path (either relative or absolute), check what it leads to
|
||||
|
||||
if os.path.isdir(tor_cmd):
|
||||
raise OSError("'%s' is a directory, not the tor executable" % tor_cmd)
|
||||
elif not os.path.isfile(tor_cmd):
|
||||
raise OSError("'%s' doesn't exist" % tor_cmd)
|
||||
elif not stem.util.system.is_available(tor_cmd):
|
||||
raise OSError("'%s' isn't available on your system. Maybe it's not in your PATH?" % tor_cmd)
|
||||
|
||||
# double check that we have a torrc to work with
|
||||
if torrc_path not in (None, NO_TORRC) and not os.path.exists(torrc_path):
|
||||
raise OSError("torrc doesn't exist (%s)" % torrc_path)
|
||||
|
||||
# starts a tor subprocess, raising an OSError if it fails
|
||||
runtime_args, temp_file = [tor_cmd], None
|
||||
|
||||
if args:
|
||||
runtime_args += args
|
||||
|
||||
if torrc_path:
|
||||
if torrc_path == NO_TORRC:
|
||||
temp_file = tempfile.mkstemp(prefix = 'empty-torrc-', text = True)[1]
|
||||
runtime_args += ['-f', temp_file]
|
||||
else:
|
||||
runtime_args += ['-f', torrc_path]
|
||||
|
||||
if take_ownership:
|
||||
runtime_args += ['__OwningControllerProcess', str(os.getpid())]
|
||||
|
||||
tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE)
|
||||
|
||||
if stdin:
|
||||
tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin))
|
||||
tor_process.stdin.close()
|
||||
|
||||
if timeout:
|
||||
def timeout_handler(signum, frame):
|
||||
# terminates the uninitialized tor process and raise on timeout
|
||||
|
||||
tor_process.kill()
|
||||
raise OSError('reached a %i second timeout without success' % timeout)
|
||||
|
||||
signal.signal(signal.SIGALRM, timeout_handler)
|
||||
signal.alarm(timeout)
|
||||
|
||||
bootstrap_line = re.compile('Bootstrapped ([0-9]+)%: ')
|
||||
problem_line = re.compile('\[(warn|err)\] (.*)$')
|
||||
last_problem = 'Timed out'
|
||||
|
||||
try:
|
||||
while True:
|
||||
# Tor's stdout will be read as ASCII bytes. This is fine for python 2, but
|
||||
# in python 3 that means it'll mismatch with other operations (for instance
|
||||
# the bootstrap_line.search() call later will fail).
|
||||
#
|
||||
# It seems like python 2.x is perfectly happy for this to be unicode, so
|
||||
# normalizing to that.
|
||||
|
||||
init_line = tor_process.stdout.readline().decode('utf-8', 'replace').strip()
|
||||
|
||||
# this will provide empty results if the process is terminated
|
||||
|
||||
if not init_line:
|
||||
tor_process.kill() # ... but best make sure
|
||||
raise OSError('Process terminated: %s' % last_problem)
|
||||
|
||||
# provide the caller with the initialization message if they want it
|
||||
|
||||
if init_msg_handler:
|
||||
init_msg_handler(init_line)
|
||||
|
||||
# return the process if we're done with bootstrapping
|
||||
|
||||
bootstrap_match = bootstrap_line.search(init_line)
|
||||
problem_match = problem_line.search(init_line)
|
||||
|
||||
if bootstrap_match and int(bootstrap_match.group(1)) >= completion_percent:
|
||||
return tor_process
|
||||
elif problem_match:
|
||||
runlevel, msg = problem_match.groups()
|
||||
|
||||
if 'see warnings above' not in msg:
|
||||
if ': ' in msg:
|
||||
msg = msg.split(': ')[-1].strip()
|
||||
|
||||
last_problem = msg
|
||||
finally:
|
||||
if timeout:
|
||||
signal.alarm(0) # stop alarm
|
||||
|
||||
tor_process.stdout.close()
|
||||
tor_process.stderr.close()
|
||||
|
||||
if temp_file:
|
||||
try:
|
||||
os.remove(temp_file)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False):
|
||||
"""
|
||||
Initializes a tor process, like :func:`~stem.process.launch_tor`, but with a
|
||||
customized configuration. This writes a temporary torrc to disk, launches
|
||||
tor, then deletes the torrc.
|
||||
|
||||
For example...
|
||||
|
||||
::
|
||||
|
||||
tor_process = stem.process.launch_tor_with_config(
|
||||
config = {
|
||||
'ControlPort': '2778',
|
||||
'Log': [
|
||||
'NOTICE stdout',
|
||||
'ERR file /tmp/tor_error_log',
|
||||
],
|
||||
},
|
||||
)
|
||||
|
||||
:param dict config: configuration options, such as "{'ControlPort': '9051'}",
|
||||
values can either be a **str** or **list of str** if for multiple values
|
||||
:param str tor_cmd: command for starting tor
|
||||
:param int completion_percent: percent of bootstrap completion at which
|
||||
this'll return
|
||||
:param functor init_msg_handler: optional functor that will be provided with
|
||||
tor's initialization stdout as we get it
|
||||
:param int timeout: time after which the attempt to start tor is aborted, no
|
||||
timeouts are applied if **None**
|
||||
:param bool take_ownership: asserts ownership over the tor process so it
|
||||
aborts if this python process terminates or a :class:`~stem.control.Controller`
|
||||
we establish to it disconnects
|
||||
|
||||
:returns: **subprocess.Popen** instance for the tor subprocess
|
||||
|
||||
:raises: **OSError** if we either fail to create the tor process or reached a
|
||||
timeout without success
|
||||
"""
|
||||
|
||||
# TODO: Drop this version check when tor 0.2.6.3 or higher is the only game
|
||||
# in town.
|
||||
|
||||
try:
|
||||
use_stdin = stem.version.get_system_tor_version(tor_cmd) >= stem.version.Requirement.TORRC_VIA_STDIN
|
||||
except IOError:
|
||||
use_stdin = False
|
||||
|
||||
# we need to be sure that we're logging to stdout to figure out when we're
|
||||
# done bootstrapping
|
||||
|
||||
if 'Log' in config:
|
||||
stdout_options = ['DEBUG stdout', 'INFO stdout', 'NOTICE stdout']
|
||||
|
||||
if isinstance(config['Log'], str):
|
||||
config['Log'] = [config['Log']]
|
||||
|
||||
has_stdout = False
|
||||
|
||||
for log_config in config['Log']:
|
||||
if log_config in stdout_options:
|
||||
has_stdout = True
|
||||
break
|
||||
|
||||
if not has_stdout:
|
||||
config['Log'].append('NOTICE stdout')
|
||||
|
||||
config_str = ''
|
||||
|
||||
for key, values in list(config.items()):
|
||||
if isinstance(values, str):
|
||||
config_str += '%s %s\n' % (key, values)
|
||||
else:
|
||||
for value in values:
|
||||
config_str += '%s %s\n' % (key, value)
|
||||
|
||||
if use_stdin:
|
||||
return launch_tor(tor_cmd, ['-f', '-'], None, completion_percent, init_msg_handler, timeout, take_ownership, stdin = config_str)
|
||||
else:
|
||||
torrc_descriptor, torrc_path = tempfile.mkstemp(prefix = 'torrc-', text = True)
|
||||
|
||||
try:
|
||||
with open(torrc_path, 'w') as torrc_file:
|
||||
torrc_file.write(config_str)
|
||||
|
||||
# prevents tor from erroring out due to a missing torrc if it gets a sighup
|
||||
args = ['__ReloadTorrcOnSIGHUP', '0']
|
||||
|
||||
return launch_tor(tor_cmd, args, torrc_path, completion_percent, init_msg_handler, timeout, take_ownership)
|
||||
finally:
|
||||
try:
|
||||
os.close(torrc_descriptor)
|
||||
os.remove(torrc_path)
|
||||
except:
|
||||
pass
|
588
Shared/lib/python3.4/site-packages/stem/response/__init__.py
Normal file
588
Shared/lib/python3.4/site-packages/stem/response/__init__.py
Normal file
|
@ -0,0 +1,588 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Parses replies from the control socket.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
convert - translates a ControlMessage into a particular response subclass
|
||||
|
||||
ControlMessage - Message that's read from the control socket.
|
||||
|- SingleLineResponse - Simple tor response only including a single line of information.
|
||||
|
|
||||
|- from_str - provides a ControlMessage for the given string
|
||||
|- is_ok - response had a 250 status
|
||||
|- content - provides the parsed message content
|
||||
|- raw_content - unparsed socket data
|
||||
|- __str__ - content stripped of protocol formatting
|
||||
+- __iter__ - ControlLine entries for the content of the message
|
||||
|
||||
ControlLine - String subclass with methods for parsing controller responses.
|
||||
|- remainder - provides the unparsed content
|
||||
|- is_empty - checks if the remaining content is empty
|
||||
|- is_next_quoted - checks if the next entry is a quoted value
|
||||
|- is_next_mapping - checks if the next entry is a KEY=VALUE mapping
|
||||
|- peek_key - provides the key of the next entry
|
||||
|- pop - removes and returns the next entry
|
||||
+- pop_mapping - removes and returns the next entry as a KEY=VALUE mapping
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'add_onion',
|
||||
'events',
|
||||
'getinfo',
|
||||
'getconf',
|
||||
'protocolinfo',
|
||||
'authchallenge',
|
||||
'convert',
|
||||
'ControlMessage',
|
||||
'ControlLine',
|
||||
'SingleLineResponse',
|
||||
]
|
||||
|
||||
import re
|
||||
import threading
|
||||
|
||||
try:
|
||||
from StringIO import StringIO
|
||||
except ImportError:
|
||||
from io import StringIO
|
||||
|
||||
import stem.socket
|
||||
|
||||
KEY_ARG = re.compile('^(\S+)=')
|
||||
|
||||
# Escape sequences from the 'esc_for_log' function of tor's 'common/util.c'.
|
||||
# It's hard to tell what controller functions use this in practice, but direct
|
||||
# users are...
|
||||
# - 'COOKIEFILE' field of PROTOCOLINFO responses
|
||||
# - logged messages about bugs
|
||||
# - the 'getinfo_helper_listeners' function of control.c
|
||||
|
||||
CONTROL_ESCAPES = {r'\\': '\\', r'\"': '\"', r'\'': '\'',
|
||||
r'\r': '\r', r'\n': '\n', r'\t': '\t'}
|
||||
|
||||
|
||||
def convert(response_type, message, **kwargs):
|
||||
"""
|
||||
Converts a :class:`~stem.response.ControlMessage` into a particular kind of
|
||||
tor response. This does an in-place conversion of the message from being a
|
||||
:class:`~stem.response.ControlMessage` to a subclass for its response type.
|
||||
Recognized types include...
|
||||
|
||||
=================== =====
|
||||
response_type Class
|
||||
=================== =====
|
||||
**GETINFO** :class:`stem.response.getinfo.GetInfoResponse`
|
||||
**GETCONF** :class:`stem.response.getconf.GetConfResponse`
|
||||
**MAPADDRESS** :class:`stem.response.mapaddress.MapAddressResponse`
|
||||
**EVENT** :class:`stem.response.events.Event` subclass
|
||||
**PROTOCOLINFO** :class:`stem.response.protocolinfo.ProtocolInfoResponse`
|
||||
**AUTHCHALLENGE** :class:`stem.response.authchallenge.AuthChallengeResponse`
|
||||
**SINGLELINE** :class:`stem.response.SingleLineResponse`
|
||||
=================== =====
|
||||
|
||||
:param str response_type: type of tor response to convert to
|
||||
:param stem.response.ControlMessage message: message to be converted
|
||||
:param kwargs: optional keyword arguments to be passed to the parser method
|
||||
|
||||
:raises:
|
||||
* :class:`stem.ProtocolError` the message isn't a proper response of
|
||||
that type
|
||||
* :class:`stem.InvalidArguments` the arguments given as input are
|
||||
invalid, this is can only be raised if the response_type is: **GETINFO**,
|
||||
**GETCONF**
|
||||
* :class:`stem.InvalidRequest` the arguments given as input are
|
||||
invalid, this is can only be raised if the response_type is:
|
||||
**MAPADDRESS**
|
||||
* :class:`stem.OperationFailed` if the action the event represents failed,
|
||||
this is can only be raised if the response_type is: **MAPADDRESS**
|
||||
* **TypeError** if argument isn't a :class:`~stem.response.ControlMessage`
|
||||
or response_type isn't supported
|
||||
"""
|
||||
|
||||
import stem.response.add_onion
|
||||
import stem.response.authchallenge
|
||||
import stem.response.events
|
||||
import stem.response.getinfo
|
||||
import stem.response.getconf
|
||||
import stem.response.mapaddress
|
||||
import stem.response.protocolinfo
|
||||
|
||||
if not isinstance(message, ControlMessage):
|
||||
raise TypeError('Only able to convert stem.response.ControlMessage instances')
|
||||
|
||||
response_types = {
|
||||
'ADD_ONION': stem.response.add_onion.AddOnionResponse,
|
||||
'AUTHCHALLENGE': stem.response.authchallenge.AuthChallengeResponse,
|
||||
'EVENT': stem.response.events.Event,
|
||||
'GETINFO': stem.response.getinfo.GetInfoResponse,
|
||||
'GETCONF': stem.response.getconf.GetConfResponse,
|
||||
'MAPADDRESS': stem.response.mapaddress.MapAddressResponse,
|
||||
'SINGLELINE': SingleLineResponse,
|
||||
'PROTOCOLINFO': stem.response.protocolinfo.ProtocolInfoResponse,
|
||||
}
|
||||
|
||||
try:
|
||||
response_class = response_types[response_type]
|
||||
except TypeError:
|
||||
raise TypeError('Unsupported response type: %s' % response_type)
|
||||
|
||||
message.__class__ = response_class
|
||||
message._parse_message(**kwargs)
|
||||
|
||||
|
||||
class ControlMessage(object):
|
||||
"""
|
||||
Message from the control socket. This is iterable and can be stringified for
|
||||
individual message components stripped of protocol formatting. Messages are
|
||||
never empty.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def from_str(content, msg_type = None, **kwargs):
|
||||
"""
|
||||
Provides a ControlMessage for the given content.
|
||||
|
||||
.. versionadded:: 1.1.0
|
||||
|
||||
:param str content: message to construct the message from
|
||||
:param str msg_type: type of tor reply to parse the content as
|
||||
:param kwargs: optional keyword arguments to be passed to the parser method
|
||||
|
||||
:returns: stem.response.ControlMessage instance
|
||||
"""
|
||||
|
||||
msg = stem.socket.recv_message(StringIO(content))
|
||||
|
||||
if msg_type is not None:
|
||||
convert(msg_type, msg, **kwargs)
|
||||
|
||||
return msg
|
||||
|
||||
def __init__(self, parsed_content, raw_content):
|
||||
if not parsed_content:
|
||||
raise ValueError("ControlMessages can't be empty")
|
||||
|
||||
self._parsed_content = parsed_content
|
||||
self._raw_content = raw_content
|
||||
|
||||
def is_ok(self):
|
||||
"""
|
||||
Checks if any of our lines have a 250 response.
|
||||
|
||||
:returns: **True** if any lines have a 250 response code, **False** otherwise
|
||||
"""
|
||||
|
||||
for code, _, _ in self._parsed_content:
|
||||
if code == '250':
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def content(self, get_bytes = False):
|
||||
"""
|
||||
Provides the parsed message content. These are entries of the form...
|
||||
|
||||
::
|
||||
|
||||
(status_code, divider, content)
|
||||
|
||||
**status_code**
|
||||
Three character code for the type of response (defined in section 4 of
|
||||
the control-spec).
|
||||
|
||||
**divider**
|
||||
Single character to indicate if this is mid-reply, data, or an end to the
|
||||
message (defined in section 2.3 of the control-spec).
|
||||
|
||||
**content**
|
||||
The following content is the actual payload of the line.
|
||||
|
||||
For data entries the content is the full multi-line payload with newline
|
||||
linebreaks and leading periods unescaped.
|
||||
|
||||
The **status_code** and **divider** are both strings (**bytes** in python
|
||||
2.x and **unicode** in python 3.x). The **content** however is **bytes** if
|
||||
**get_bytes** is **True**.
|
||||
|
||||
.. versionchanged:: 1.1.0
|
||||
Added the get_bytes argument.
|
||||
|
||||
:param bool get_bytes: provides **bytes** for the **content** rather than a **str**
|
||||
|
||||
:returns: **list** of (str, str, str) tuples for the components of this message
|
||||
"""
|
||||
|
||||
if stem.prereq.is_python_3() and not get_bytes:
|
||||
return [(code, div, stem.util.str_tools._to_unicode(content)) for (code, div, content) in self._parsed_content]
|
||||
else:
|
||||
return list(self._parsed_content)
|
||||
|
||||
def raw_content(self, get_bytes = False):
|
||||
"""
|
||||
Provides the unparsed content read from the control socket.
|
||||
|
||||
.. versionchanged:: 1.1.0
|
||||
Added the get_bytes argument.
|
||||
|
||||
:param bool get_bytes: if **True** then this provides **bytes** rather than a **str**
|
||||
|
||||
:returns: **str** of the socket data used to generate this message
|
||||
"""
|
||||
|
||||
if stem.prereq.is_python_3() and not get_bytes:
|
||||
return stem.util.str_tools._to_unicode(self._raw_content)
|
||||
else:
|
||||
return self._raw_content
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Content of the message, stripped of status code and divider protocol
|
||||
formatting.
|
||||
"""
|
||||
|
||||
return '\n'.join(list(self))
|
||||
|
||||
def __iter__(self):
|
||||
"""
|
||||
Provides :class:`~stem.response.ControlLine` instances for the content of
|
||||
the message. This is stripped of status codes and dividers, for instance...
|
||||
|
||||
::
|
||||
|
||||
250+info/names=
|
||||
desc/id/* -- Router descriptors by ID.
|
||||
desc/name/* -- Router descriptors by nickname.
|
||||
.
|
||||
250 OK
|
||||
|
||||
Would provide two entries...
|
||||
|
||||
::
|
||||
|
||||
1st - "info/names=
|
||||
desc/id/* -- Router descriptors by ID.
|
||||
desc/name/* -- Router descriptors by nickname."
|
||||
2nd - "OK"
|
||||
"""
|
||||
|
||||
for _, _, content in self._parsed_content:
|
||||
if stem.prereq.is_python_3():
|
||||
content = stem.util.str_tools._to_unicode(content)
|
||||
|
||||
yield ControlLine(content)
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
:returns: number of ControlLines
|
||||
"""
|
||||
|
||||
return len(self._parsed_content)
|
||||
|
||||
def __getitem__(self, index):
|
||||
"""
|
||||
:returns: :class:`~stem.response.ControlLine` at the index
|
||||
"""
|
||||
|
||||
content = self._parsed_content[index][2]
|
||||
|
||||
if stem.prereq.is_python_3():
|
||||
content = stem.util.str_tools._to_unicode(content)
|
||||
|
||||
return ControlLine(content)
|
||||
|
||||
|
||||
class ControlLine(str):
|
||||
"""
|
||||
String subclass that represents a line of controller output. This behaves as
|
||||
a normal string with additional methods for parsing and popping entries from
|
||||
a space delimited series of elements like a stack.
|
||||
|
||||
None of these additional methods effect ourselves as a string (which is still
|
||||
immutable). All methods are thread safe.
|
||||
"""
|
||||
|
||||
def __new__(self, value):
|
||||
return str.__new__(self, value)
|
||||
|
||||
def __init__(self, value):
|
||||
self._remainder = value
|
||||
self._remainder_lock = threading.RLock()
|
||||
|
||||
def remainder(self):
|
||||
"""
|
||||
Provides our unparsed content. This is an empty string after we've popped
|
||||
all entries.
|
||||
|
||||
:returns: **str** of the unparsed content
|
||||
"""
|
||||
|
||||
return self._remainder
|
||||
|
||||
def is_empty(self):
|
||||
"""
|
||||
Checks if we have further content to pop or not.
|
||||
|
||||
:returns: **True** if we have additional content, **False** otherwise
|
||||
"""
|
||||
|
||||
return self._remainder == ''
|
||||
|
||||
def is_next_quoted(self, escaped = False):
|
||||
"""
|
||||
Checks if our next entry is a quoted value or not.
|
||||
|
||||
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
|
||||
|
||||
:returns: **True** if the next entry can be parsed as a quoted value, **False** otherwise
|
||||
"""
|
||||
|
||||
start_quote, end_quote = _get_quote_indices(self._remainder, escaped)
|
||||
return start_quote == 0 and end_quote != -1
|
||||
|
||||
def is_next_mapping(self, key = None, quoted = False, escaped = False):
|
||||
"""
|
||||
Checks if our next entry is a KEY=VALUE mapping or not.
|
||||
|
||||
:param str key: checks that the key matches this value, skipping the check if **None**
|
||||
:param bool quoted: checks that the mapping is to a quoted value
|
||||
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
|
||||
|
||||
:returns: **True** if the next entry can be parsed as a key=value mapping,
|
||||
**False** otherwise
|
||||
"""
|
||||
|
||||
remainder = self._remainder # temp copy to avoid locking
|
||||
key_match = KEY_ARG.match(remainder)
|
||||
|
||||
if key_match:
|
||||
if key and key != key_match.groups()[0]:
|
||||
return False
|
||||
|
||||
if quoted:
|
||||
# checks that we have a quoted value and that it comes after the 'key='
|
||||
start_quote, end_quote = _get_quote_indices(remainder, escaped)
|
||||
return start_quote == key_match.end() and end_quote != -1
|
||||
else:
|
||||
return True # we just needed to check for the key
|
||||
else:
|
||||
return False # doesn't start with a key
|
||||
|
||||
def peek_key(self):
|
||||
"""
|
||||
Provides the key of the next entry, providing **None** if it isn't a
|
||||
key/value mapping.
|
||||
|
||||
:returns: **str** with the next entry's key
|
||||
"""
|
||||
|
||||
remainder = self._remainder
|
||||
key_match = KEY_ARG.match(remainder)
|
||||
|
||||
if key_match:
|
||||
return key_match.groups()[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def pop(self, quoted = False, escaped = False):
|
||||
"""
|
||||
Parses the next space separated entry, removing it and the space from our
|
||||
remaining content. Examples...
|
||||
|
||||
::
|
||||
|
||||
>>> line = ControlLine("\\"We're all mad here.\\" says the grinning cat.")
|
||||
>>> print line.pop(True)
|
||||
"We're all mad here."
|
||||
>>> print line.pop()
|
||||
"says"
|
||||
>>> print line.remainder()
|
||||
"the grinning cat."
|
||||
|
||||
>>> line = ControlLine("\\"this has a \\\\\\" and \\\\\\\\ in it\\" foo=bar more_data")
|
||||
>>> print line.pop(True, True)
|
||||
"this has a \\" and \\\\ in it"
|
||||
|
||||
:param bool quoted: parses the next entry as a quoted value, removing the quotes
|
||||
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
|
||||
|
||||
:returns: **str** of the next space separated entry
|
||||
|
||||
:raises:
|
||||
* **ValueError** if quoted is True without the value being quoted
|
||||
* **IndexError** if we don't have any remaining content left to parse
|
||||
"""
|
||||
|
||||
with self._remainder_lock:
|
||||
next_entry, remainder = _parse_entry(self._remainder, quoted, escaped)
|
||||
self._remainder = remainder
|
||||
return next_entry
|
||||
|
||||
def pop_mapping(self, quoted = False, escaped = False):
|
||||
"""
|
||||
Parses the next space separated entry as a KEY=VALUE mapping, removing it
|
||||
and the space from our remaining content.
|
||||
|
||||
:param bool quoted: parses the value as being quoted, removing the quotes
|
||||
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
|
||||
|
||||
:returns: **tuple** of the form (key, value)
|
||||
|
||||
:raises: **ValueError** if this isn't a KEY=VALUE mapping or if quoted is
|
||||
**True** without the value being quoted
|
||||
:raises: **IndexError** if there's nothing to parse from the line
|
||||
"""
|
||||
|
||||
with self._remainder_lock:
|
||||
if self.is_empty():
|
||||
raise IndexError('no remaining content to parse')
|
||||
|
||||
key_match = KEY_ARG.match(self._remainder)
|
||||
|
||||
if not key_match:
|
||||
raise ValueError("the next entry isn't a KEY=VALUE mapping: " + self._remainder)
|
||||
|
||||
# parse off the key
|
||||
key = key_match.groups()[0]
|
||||
remainder = self._remainder[key_match.end():]
|
||||
|
||||
next_entry, remainder = _parse_entry(remainder, quoted, escaped)
|
||||
self._remainder = remainder
|
||||
return (key, next_entry)
|
||||
|
||||
|
||||
def _parse_entry(line, quoted, escaped):
|
||||
"""
|
||||
Parses the next entry from the given space separated content.
|
||||
|
||||
:param str line: content to be parsed
|
||||
:param bool quoted: parses the next entry as a quoted value, removing the quotes
|
||||
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
|
||||
|
||||
:returns: **tuple** of the form (entry, remainder)
|
||||
|
||||
:raises:
|
||||
* **ValueError** if quoted is True without the next value being quoted
|
||||
* **IndexError** if there's nothing to parse from the line
|
||||
"""
|
||||
|
||||
if line == '':
|
||||
raise IndexError('no remaining content to parse')
|
||||
|
||||
next_entry, remainder = '', line
|
||||
|
||||
if quoted:
|
||||
# validate and parse the quoted value
|
||||
start_quote, end_quote = _get_quote_indices(remainder, escaped)
|
||||
|
||||
if start_quote != 0 or end_quote == -1:
|
||||
raise ValueError("the next entry isn't a quoted value: " + line)
|
||||
|
||||
next_entry, remainder = remainder[1:end_quote], remainder[end_quote + 1:]
|
||||
else:
|
||||
# non-quoted value, just need to check if there's more data afterward
|
||||
if ' ' in remainder:
|
||||
next_entry, remainder = remainder.split(' ', 1)
|
||||
else:
|
||||
next_entry, remainder = remainder, ''
|
||||
|
||||
if escaped:
|
||||
next_entry = _unescape(next_entry)
|
||||
|
||||
return (next_entry, remainder.lstrip())
|
||||
|
||||
|
||||
def _get_quote_indices(line, escaped):
|
||||
"""
|
||||
Provides the indices of the next two quotes in the given content.
|
||||
|
||||
:param str line: content to be parsed
|
||||
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
|
||||
|
||||
:returns: **tuple** of two ints, indices being -1 if a quote doesn't exist
|
||||
"""
|
||||
|
||||
indices, quote_index = [], -1
|
||||
|
||||
for _ in range(2):
|
||||
quote_index = line.find('"', quote_index + 1)
|
||||
|
||||
# if we have escapes then we need to skip any r'\"' entries
|
||||
if escaped:
|
||||
# skip check if index is -1 (no match) or 0 (first character)
|
||||
while quote_index >= 1 and line[quote_index - 1] == '\\':
|
||||
quote_index = line.find('"', quote_index + 1)
|
||||
|
||||
indices.append(quote_index)
|
||||
|
||||
return tuple(indices)
|
||||
|
||||
|
||||
def _unescape(entry):
|
||||
# Unescapes the given string with the mappings in CONTROL_ESCAPES.
|
||||
#
|
||||
# This can't be a simple series of str.replace() calls because replacements
|
||||
# need to be excluded from consideration for further unescaping. For
|
||||
# instance, '\\t' should be converted to '\t' rather than a tab.
|
||||
|
||||
def _pop_with_unescape(entry):
|
||||
# Pop either the first character or the escape sequence conversion the
|
||||
# entry starts with. This provides a tuple of...
|
||||
#
|
||||
# (unescaped prefix, remaining entry)
|
||||
|
||||
for esc_sequence, replacement in CONTROL_ESCAPES.items():
|
||||
if entry.startswith(esc_sequence):
|
||||
return (replacement, entry[len(esc_sequence):])
|
||||
|
||||
return (entry[0], entry[1:])
|
||||
|
||||
result = []
|
||||
|
||||
while entry:
|
||||
prefix, entry = _pop_with_unescape(entry)
|
||||
result.append(prefix)
|
||||
|
||||
return ''.join(result)
|
||||
|
||||
|
||||
class SingleLineResponse(ControlMessage):
|
||||
"""
|
||||
Reply to a request that performs an action rather than querying data. These
|
||||
requests only contain a single line, which is 'OK' if successful, and a
|
||||
description of the problem if not.
|
||||
|
||||
:var str code: status code for our line
|
||||
:var str message: content of the line
|
||||
"""
|
||||
|
||||
def is_ok(self, strict = False):
|
||||
"""
|
||||
Checks if the response code is "250". If strict is **True** then this
|
||||
checks if the response is "250 OK"
|
||||
|
||||
:param bool strict: checks for a "250 OK" message if **True**
|
||||
|
||||
:returns:
|
||||
* If strict is **False**: **True** if the response code is "250", **False** otherwise
|
||||
* If strict is **True**: **True** if the response is "250 OK", **False** otherwise
|
||||
"""
|
||||
|
||||
if strict:
|
||||
return self.content()[0] == ('250', ' ', 'OK')
|
||||
|
||||
return self.content()[0][0] == '250'
|
||||
|
||||
def _parse_message(self):
|
||||
content = self.content()
|
||||
|
||||
if len(content) > 1:
|
||||
raise stem.ProtocolError('Received multi-line response')
|
||||
elif len(content) == 0:
|
||||
raise stem.ProtocolError('Received empty response')
|
||||
else:
|
||||
self.code, _, self.message = content[0]
|
|
@ -0,0 +1,43 @@
|
|||
# Copyright 2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
import stem.response
|
||||
|
||||
|
||||
class AddOnionResponse(stem.response.ControlMessage):
|
||||
"""
|
||||
ADD_ONION response.
|
||||
|
||||
:var str service_id: hidden service address without the '.onion' suffix
|
||||
:var str private_key: base64 encoded hidden service private key
|
||||
:var str private_key_type: crypto used to generate the hidden service private
|
||||
key (such as RSA1024)
|
||||
"""
|
||||
|
||||
def _parse_message(self):
|
||||
# Example:
|
||||
# 250-ServiceID=gfzprpioee3hoppz
|
||||
# 250-PrivateKey=RSA1024:MIICXgIBAAKBgQDZvYVxv...
|
||||
# 250 OK
|
||||
|
||||
self.service_id = None
|
||||
self.private_key = None
|
||||
self.private_key_type = None
|
||||
|
||||
if not self.is_ok():
|
||||
raise stem.ProtocolError("ADD_ONION response didn't have an OK status: %s" % self)
|
||||
|
||||
if not str(self).startswith('ServiceID='):
|
||||
raise stem.ProtocolError('ADD_ONION response should start with the service id: %s' % self)
|
||||
|
||||
for line in list(self):
|
||||
if '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
|
||||
if key == 'ServiceID':
|
||||
self.service_id = value
|
||||
elif key == 'PrivateKey':
|
||||
if ':' not in value:
|
||||
raise stem.ProtocolError("ADD_ONION PrivateKey lines should be of the form 'PrivateKey=[type]:[key]: %s" % self)
|
||||
|
||||
self.private_key_type, self.private_key = value.split(':', 1)
|
|
@ -0,0 +1,56 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
import binascii
|
||||
|
||||
import stem.response
|
||||
import stem.socket
|
||||
import stem.util.str_tools
|
||||
import stem.util.tor_tools
|
||||
|
||||
|
||||
class AuthChallengeResponse(stem.response.ControlMessage):
|
||||
"""
|
||||
AUTHCHALLENGE query response.
|
||||
|
||||
:var str server_hash: server hash provided by tor
|
||||
:var str server_nonce: server nonce provided by tor
|
||||
"""
|
||||
|
||||
def _parse_message(self):
|
||||
# Example:
|
||||
# 250 AUTHCHALLENGE SERVERHASH=680A73C9836C4F557314EA1C4EDE54C285DB9DC89C83627401AEF9D7D27A95D5 SERVERNONCE=F8EA4B1F2C8B40EF1AF68860171605B910E3BBCABADF6FC3DB1FA064F4690E85
|
||||
|
||||
self.server_hash = None
|
||||
self.server_nonce = None
|
||||
|
||||
if not self.is_ok():
|
||||
raise stem.ProtocolError("AUTHCHALLENGE response didn't have an OK status:\n%s" % self)
|
||||
elif len(self) > 1:
|
||||
raise stem.ProtocolError('Received multiline AUTHCHALLENGE response:\n%s' % self)
|
||||
|
||||
line = self[0]
|
||||
|
||||
# sanity check that we're a AUTHCHALLENGE response
|
||||
if not line.pop() == 'AUTHCHALLENGE':
|
||||
raise stem.ProtocolError('Message is not an AUTHCHALLENGE response (%s)' % self)
|
||||
|
||||
if line.is_next_mapping('SERVERHASH'):
|
||||
value = line.pop_mapping()[1]
|
||||
|
||||
if not stem.util.tor_tools.is_hex_digits(value, 64):
|
||||
raise stem.ProtocolError('SERVERHASH has an invalid value: %s' % value)
|
||||
|
||||
self.server_hash = binascii.a2b_hex(stem.util.str_tools._to_bytes(value))
|
||||
else:
|
||||
raise stem.ProtocolError('Missing SERVERHASH mapping: %s' % line)
|
||||
|
||||
if line.is_next_mapping('SERVERNONCE'):
|
||||
value = line.pop_mapping()[1]
|
||||
|
||||
if not stem.util.tor_tools.is_hex_digits(value, 64):
|
||||
raise stem.ProtocolError('SERVERNONCE has an invalid value: %s' % value)
|
||||
|
||||
self.server_nonce = binascii.a2b_hex(stem.util.str_tools._to_bytes(value))
|
||||
else:
|
||||
raise stem.ProtocolError('Missing SERVERNONCE mapping: %s' % line)
|
1331
Shared/lib/python3.4/site-packages/stem/response/events.py
Normal file
1331
Shared/lib/python3.4/site-packages/stem/response/events.py
Normal file
File diff suppressed because it is too large
Load diff
55
Shared/lib/python3.4/site-packages/stem/response/getconf.py
Normal file
55
Shared/lib/python3.4/site-packages/stem/response/getconf.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
import stem.response
|
||||
import stem.socket
|
||||
|
||||
|
||||
class GetConfResponse(stem.response.ControlMessage):
|
||||
"""
|
||||
Reply for a GETCONF query.
|
||||
|
||||
Note that configuration parameters won't match what we queried for if it's one
|
||||
of the special mapping options (ex. 'HiddenServiceOptions').
|
||||
|
||||
:var dict entries: mapping between the config parameter (**str**) and their
|
||||
values (**list** of **str**)
|
||||
"""
|
||||
|
||||
def _parse_message(self):
|
||||
# Example:
|
||||
# 250-CookieAuthentication=0
|
||||
# 250-ControlPort=9100
|
||||
# 250-DataDirectory=/home/neena/.tor
|
||||
# 250 DirPort
|
||||
|
||||
self.entries = {}
|
||||
remaining_lines = list(self)
|
||||
|
||||
if self.content() == [('250', ' ', 'OK')]:
|
||||
return
|
||||
|
||||
if not self.is_ok():
|
||||
unrecognized_keywords = []
|
||||
for code, _, line in self.content():
|
||||
if code == '552' and line.startswith('Unrecognized configuration key "') and line.endswith('"'):
|
||||
unrecognized_keywords.append(line[32:-1])
|
||||
|
||||
if unrecognized_keywords:
|
||||
raise stem.InvalidArguments('552', 'GETCONF request contained unrecognized keywords: %s' % ', '.join(unrecognized_keywords), unrecognized_keywords)
|
||||
else:
|
||||
raise stem.ProtocolError('GETCONF response contained a non-OK status code:\n%s' % self)
|
||||
|
||||
while remaining_lines:
|
||||
line = remaining_lines.pop(0)
|
||||
|
||||
if line.is_next_mapping():
|
||||
key, value = line.split('=', 1)
|
||||
else:
|
||||
key, value = (line.pop(), None)
|
||||
|
||||
if key not in self.entries:
|
||||
self.entries[key] = []
|
||||
|
||||
if value is not None:
|
||||
self.entries[key].append(value)
|
78
Shared/lib/python3.4/site-packages/stem/response/getinfo.py
Normal file
78
Shared/lib/python3.4/site-packages/stem/response/getinfo.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
import stem.response
|
||||
import stem.socket
|
||||
|
||||
|
||||
class GetInfoResponse(stem.response.ControlMessage):
|
||||
"""
|
||||
Reply for a GETINFO query.
|
||||
|
||||
:var dict entries: mapping between the queried options and their bytes values
|
||||
"""
|
||||
|
||||
def _parse_message(self):
|
||||
# Example:
|
||||
# 250-version=0.2.3.11-alpha-dev (git-ef0bc7f8f26a917c)
|
||||
# 250+config-text=
|
||||
# ControlPort 9051
|
||||
# DataDirectory /home/atagar/.tor
|
||||
# ExitPolicy reject *:*
|
||||
# Log notice stdout
|
||||
# Nickname Unnamed
|
||||
# ORPort 9050
|
||||
# .
|
||||
# 250 OK
|
||||
|
||||
self.entries = {}
|
||||
remaining_lines = [content for (code, div, content) in self.content(get_bytes = True)]
|
||||
|
||||
if not self.is_ok() or not remaining_lines.pop() == b'OK':
|
||||
unrecognized_keywords = []
|
||||
for code, _, line in self.content():
|
||||
if code == '552' and line.startswith('Unrecognized key "') and line.endswith('"'):
|
||||
unrecognized_keywords.append(line[18:-1])
|
||||
|
||||
if unrecognized_keywords:
|
||||
raise stem.InvalidArguments('552', 'GETINFO request contained unrecognized keywords: %s\n' % ', '.join(unrecognized_keywords), unrecognized_keywords)
|
||||
else:
|
||||
raise stem.ProtocolError("GETINFO response didn't have an OK status:\n%s" % self)
|
||||
|
||||
while remaining_lines:
|
||||
try:
|
||||
key, value = remaining_lines.pop(0).split(b'=', 1)
|
||||
except ValueError:
|
||||
raise stem.ProtocolError('GETINFO replies should only contain parameter=value mappings:\n%s' % self)
|
||||
|
||||
if stem.prereq.is_python_3():
|
||||
key = stem.util.str_tools._to_unicode(key)
|
||||
|
||||
# if the value is a multiline value then it *must* be of the form
|
||||
# '<key>=\n<value>'
|
||||
|
||||
if b'\n' in value:
|
||||
if not value.startswith(b'\n'):
|
||||
raise stem.ProtocolError("GETINFO response contained a multi-line value that didn't start with a newline:\n%s" % self)
|
||||
|
||||
value = value[1:]
|
||||
|
||||
self.entries[key] = value
|
||||
|
||||
def _assert_matches(self, params):
|
||||
"""
|
||||
Checks if we match a given set of parameters, and raise a ProtocolError if not.
|
||||
|
||||
:param set params: parameters to assert that we contain
|
||||
|
||||
:raises:
|
||||
* :class:`stem.ProtocolError` if parameters don't match this response
|
||||
"""
|
||||
|
||||
reply_params = set(self.entries.keys())
|
||||
|
||||
if params != reply_params:
|
||||
requested_label = ', '.join(params)
|
||||
reply_label = ', '.join(reply_params)
|
||||
|
||||
raise stem.ProtocolError("GETINFO reply doesn't match the parameters that we requested. Queried '%s' but got '%s'." % (requested_label, reply_label))
|
|
@ -0,0 +1,42 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
import stem.response
|
||||
import stem.socket
|
||||
|
||||
|
||||
class MapAddressResponse(stem.response.ControlMessage):
|
||||
"""
|
||||
Reply for a MAPADDRESS query.
|
||||
Doesn't raise an exception unless no addresses were mapped successfully.
|
||||
|
||||
:var dict entries: mapping between the original and replacement addresses
|
||||
|
||||
:raises:
|
||||
* :class:`stem.OperationFailed` if Tor was unable to satisfy the request
|
||||
* :class:`stem.InvalidRequest` if the addresses provided were invalid
|
||||
"""
|
||||
|
||||
def _parse_message(self):
|
||||
# Example:
|
||||
# 250-127.192.10.10=torproject.org
|
||||
# 250 1.2.3.4=tor.freehaven.net
|
||||
|
||||
if not self.is_ok():
|
||||
for code, _, message in self.content():
|
||||
if code == '512':
|
||||
raise stem.InvalidRequest(code, message)
|
||||
elif code == '451':
|
||||
raise stem.OperationFailed(code, message)
|
||||
else:
|
||||
raise stem.ProtocolError('MAPADDRESS returned unexpected response code: %s', code)
|
||||
|
||||
self.entries = {}
|
||||
|
||||
for code, _, message in self.content():
|
||||
if code == '250':
|
||||
try:
|
||||
key, value = message.split('=', 1)
|
||||
self.entries[key] = value
|
||||
except ValueError:
|
||||
raise stem.ProtocolError(None, "MAPADDRESS returned '%s', which isn't a mapping" % message)
|
122
Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py
Normal file
122
Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py
Normal file
|
@ -0,0 +1,122 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
import stem.response
|
||||
import stem.socket
|
||||
import stem.version
|
||||
|
||||
from stem.connection import AuthMethod
|
||||
from stem.util import log
|
||||
|
||||
|
||||
class ProtocolInfoResponse(stem.response.ControlMessage):
|
||||
"""
|
||||
Version one PROTOCOLINFO query response.
|
||||
|
||||
The protocol_version is the only mandatory data for a valid PROTOCOLINFO
|
||||
response, so all other values are None if undefined or empty if a collection.
|
||||
|
||||
:var int protocol_version: protocol version of the response
|
||||
:var stem.version.Version tor_version: version of the tor process
|
||||
:var tuple auth_methods: :data:`stem.connection.AuthMethod` types that tor will accept
|
||||
:var tuple unknown_auth_methods: strings of unrecognized auth methods
|
||||
:var str cookie_path: path of tor's authentication cookie
|
||||
"""
|
||||
|
||||
def _parse_message(self):
|
||||
# Example:
|
||||
# 250-PROTOCOLINFO 1
|
||||
# 250-AUTH METHODS=COOKIE COOKIEFILE="/home/atagar/.tor/control_auth_cookie"
|
||||
# 250-VERSION Tor="0.2.1.30"
|
||||
# 250 OK
|
||||
|
||||
self.protocol_version = None
|
||||
self.tor_version = None
|
||||
self.auth_methods = ()
|
||||
self.unknown_auth_methods = ()
|
||||
self.cookie_path = None
|
||||
|
||||
auth_methods, unknown_auth_methods = [], []
|
||||
remaining_lines = list(self)
|
||||
|
||||
if not self.is_ok() or not remaining_lines.pop() == 'OK':
|
||||
raise stem.ProtocolError("PROTOCOLINFO response didn't have an OK status:\n%s" % self)
|
||||
|
||||
# sanity check that we're a PROTOCOLINFO response
|
||||
if not remaining_lines[0].startswith('PROTOCOLINFO'):
|
||||
raise stem.ProtocolError('Message is not a PROTOCOLINFO response:\n%s' % self)
|
||||
|
||||
while remaining_lines:
|
||||
line = remaining_lines.pop(0)
|
||||
line_type = line.pop()
|
||||
|
||||
if line_type == 'PROTOCOLINFO':
|
||||
# Line format:
|
||||
# FirstLine = "PROTOCOLINFO" SP PIVERSION CRLF
|
||||
# PIVERSION = 1*DIGIT
|
||||
|
||||
if line.is_empty():
|
||||
raise stem.ProtocolError("PROTOCOLINFO response's initial line is missing the protocol version: %s" % line)
|
||||
|
||||
try:
|
||||
self.protocol_version = int(line.pop())
|
||||
except ValueError:
|
||||
raise stem.ProtocolError('PROTOCOLINFO response version is non-numeric: %s' % line)
|
||||
|
||||
# The piversion really should be '1' but, according to the spec, tor
|
||||
# does not necessarily need to provide the PROTOCOLINFO version that we
|
||||
# requested. Log if it's something we aren't expecting but still make
|
||||
# an effort to parse like a v1 response.
|
||||
|
||||
if self.protocol_version != 1:
|
||||
log.info("We made a PROTOCOLINFO version 1 query but got a version %i response instead. We'll still try to use it, but this may cause problems." % self.protocol_version)
|
||||
elif line_type == 'AUTH':
|
||||
# Line format:
|
||||
# AuthLine = "250-AUTH" SP "METHODS=" AuthMethod *("," AuthMethod)
|
||||
# *(SP "COOKIEFILE=" AuthCookieFile) CRLF
|
||||
# AuthMethod = "NULL" / "HASHEDPASSWORD" / "COOKIE"
|
||||
# AuthCookieFile = QuotedString
|
||||
|
||||
# parse AuthMethod mapping
|
||||
if not line.is_next_mapping('METHODS'):
|
||||
raise stem.ProtocolError("PROTOCOLINFO response's AUTH line is missing its mandatory 'METHODS' mapping: %s" % line)
|
||||
|
||||
for method in line.pop_mapping()[1].split(','):
|
||||
if method == 'NULL':
|
||||
auth_methods.append(AuthMethod.NONE)
|
||||
elif method == 'HASHEDPASSWORD':
|
||||
auth_methods.append(AuthMethod.PASSWORD)
|
||||
elif method == 'COOKIE':
|
||||
auth_methods.append(AuthMethod.COOKIE)
|
||||
elif method == 'SAFECOOKIE':
|
||||
auth_methods.append(AuthMethod.SAFECOOKIE)
|
||||
else:
|
||||
unknown_auth_methods.append(method)
|
||||
message_id = 'stem.response.protocolinfo.unknown_auth_%s' % method
|
||||
log.log_once(message_id, log.INFO, "PROTOCOLINFO response included a type of authentication that we don't recognize: %s" % method)
|
||||
|
||||
# our auth_methods should have a single AuthMethod.UNKNOWN entry if
|
||||
# any unknown authentication methods exist
|
||||
if AuthMethod.UNKNOWN not in auth_methods:
|
||||
auth_methods.append(AuthMethod.UNKNOWN)
|
||||
|
||||
# parse optional COOKIEFILE mapping (quoted and can have escapes)
|
||||
if line.is_next_mapping('COOKIEFILE', True, True):
|
||||
self.cookie_path = line.pop_mapping(True, True)[1]
|
||||
elif line_type == 'VERSION':
|
||||
# Line format:
|
||||
# VersionLine = "250-VERSION" SP "Tor=" TorVersion OptArguments CRLF
|
||||
# TorVersion = QuotedString
|
||||
|
||||
if not line.is_next_mapping('Tor', True):
|
||||
raise stem.ProtocolError("PROTOCOLINFO response's VERSION line is missing its mandatory tor version mapping: %s" % line)
|
||||
|
||||
try:
|
||||
self.tor_version = stem.version.Version(line.pop_mapping(True)[1])
|
||||
except ValueError as exc:
|
||||
raise stem.ProtocolError(exc)
|
||||
else:
|
||||
log.debug("Unrecognized PROTOCOLINFO line type '%s', ignoring it: %s" % (line_type, line))
|
||||
|
||||
self.auth_methods = tuple(auth_methods)
|
||||
self.unknown_auth_methods = tuple(unknown_auth_methods)
|
663
Shared/lib/python3.4/site-packages/stem/socket.py
Normal file
663
Shared/lib/python3.4/site-packages/stem/socket.py
Normal file
|
@ -0,0 +1,663 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Supports communication with sockets speaking the Tor control protocol. This
|
||||
allows us to send messages as basic strings, and receive responses as
|
||||
:class:`~stem.response.ControlMessage` instances.
|
||||
|
||||
**This module only consists of low level components, and is not intended for
|
||||
users.** See our `tutorials <../tutorials.html>`_ and `Control Module
|
||||
<control.html>`_ if you're new to Stem and looking to get started.
|
||||
|
||||
With that aside, these can still be used for raw socket communication with
|
||||
Tor...
|
||||
|
||||
::
|
||||
|
||||
import stem
|
||||
import stem.connection
|
||||
import stem.socket
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
control_socket = stem.socket.ControlPort(port = 9051)
|
||||
stem.connection.authenticate(control_socket)
|
||||
except stem.SocketError as exc:
|
||||
print 'Unable to connect to tor on port 9051: %s' % exc
|
||||
sys.exit(1)
|
||||
except stem.connection.AuthenticationFailure as exc:
|
||||
print 'Unable to authenticate: %s' % exc
|
||||
sys.exit(1)
|
||||
|
||||
print "Issuing 'GETINFO version' query...\\n"
|
||||
control_socket.send('GETINFO version')
|
||||
print control_socket.recv()
|
||||
|
||||
::
|
||||
|
||||
% python example.py
|
||||
Issuing 'GETINFO version' query...
|
||||
|
||||
version=0.2.4.10-alpha-dev (git-8be6058d8f31e578)
|
||||
OK
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
ControlSocket - Socket wrapper that speaks the tor control protocol.
|
||||
|- ControlPort - Control connection via a port.
|
||||
| |- get_address - provides the ip address of our socket
|
||||
| +- get_port - provides the port of our socket
|
||||
|
|
||||
|- ControlSocketFile - Control connection via a local file socket.
|
||||
| +- get_socket_path - provides the path of the socket we connect to
|
||||
|
|
||||
|- send - sends a message to the socket
|
||||
|- recv - receives a ControlMessage from the socket
|
||||
|- is_alive - reports if the socket is known to be closed
|
||||
|- is_localhost - returns if the socket is for the local system or not
|
||||
|- connect - connects a new socket
|
||||
|- close - shuts down the socket
|
||||
+- __enter__ / __exit__ - manages socket connection
|
||||
|
||||
send_message - Writes a message to a control socket.
|
||||
recv_message - Reads a ControlMessage from a control socket.
|
||||
send_formatting - Performs the formatting expected from sent messages.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import re
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
|
||||
import stem.prereq
|
||||
import stem.response
|
||||
import stem.util.str_tools
|
||||
|
||||
from stem.util import log
|
||||
|
||||
|
||||
class ControlSocket(object):
|
||||
"""
|
||||
Wrapper for a socket connection that speaks the Tor control protocol. To the
|
||||
better part this transparently handles the formatting for sending and
|
||||
receiving complete messages. All methods are thread safe.
|
||||
|
||||
Callers should not instantiate this class directly, but rather use subclasses
|
||||
which are expected to implement the **_make_socket()** method.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._socket, self._socket_file = None, None
|
||||
self._is_alive = False
|
||||
self._connection_time = 0.0 # time when we last connected or disconnected
|
||||
|
||||
# Tracks sending and receiving separately. This should be safe, and doing
|
||||
# so prevents deadlock where we block writes because we're waiting to read
|
||||
# a message that isn't coming.
|
||||
|
||||
self._send_lock = threading.RLock()
|
||||
self._recv_lock = threading.RLock()
|
||||
|
||||
def send(self, message, raw = False):
|
||||
"""
|
||||
Formats and sends a message to the control socket. For more information see
|
||||
the :func:`~stem.socket.send_message` function.
|
||||
|
||||
:param str message: message to be formatted and sent to the socket
|
||||
:param bool raw: leaves the message formatting untouched, passing it to the socket as-is
|
||||
|
||||
:raises:
|
||||
* :class:`stem.SocketError` if a problem arises in using the socket
|
||||
* :class:`stem.SocketClosed` if the socket is known to be shut down
|
||||
"""
|
||||
|
||||
with self._send_lock:
|
||||
try:
|
||||
if not self.is_alive():
|
||||
raise stem.SocketClosed()
|
||||
|
||||
send_message(self._socket_file, message, raw)
|
||||
except stem.SocketClosed as exc:
|
||||
# if send_message raises a SocketClosed then we should properly shut
|
||||
# everything down
|
||||
|
||||
if self.is_alive():
|
||||
self.close()
|
||||
|
||||
raise exc
|
||||
|
||||
def recv(self):
|
||||
"""
|
||||
Receives a message from the control socket, blocking until we've received
|
||||
one. For more information see the :func:`~stem.socket.recv_message` function.
|
||||
|
||||
:returns: :class:`~stem.response.ControlMessage` for the message received
|
||||
|
||||
:raises:
|
||||
* :class:`stem.ProtocolError` the content from the socket is malformed
|
||||
* :class:`stem.SocketClosed` if the socket closes before we receive a complete message
|
||||
"""
|
||||
|
||||
with self._recv_lock:
|
||||
try:
|
||||
# makes a temporary reference to the _socket_file because connect()
|
||||
# and close() may set or unset it
|
||||
|
||||
socket_file = self._socket_file
|
||||
|
||||
if not socket_file:
|
||||
raise stem.SocketClosed()
|
||||
|
||||
return recv_message(socket_file)
|
||||
except stem.SocketClosed as exc:
|
||||
# If recv_message raises a SocketClosed then we should properly shut
|
||||
# everything down. However, there's a couple cases where this will
|
||||
# cause deadlock...
|
||||
#
|
||||
# * this socketClosed was *caused by* a close() call, which is joining
|
||||
# on our thread
|
||||
#
|
||||
# * a send() call that's currently in flight is about to call close(),
|
||||
# also attempting to join on us
|
||||
#
|
||||
# To resolve this we make a non-blocking call to acquire the send lock.
|
||||
# If we get it then great, we can close safely. If not then one of the
|
||||
# above are in progress and we leave the close to them.
|
||||
|
||||
if self.is_alive():
|
||||
if self._send_lock.acquire(False):
|
||||
self.close()
|
||||
self._send_lock.release()
|
||||
|
||||
raise exc
|
||||
|
||||
def is_alive(self):
|
||||
"""
|
||||
Checks if the socket is known to be closed. We won't be aware if it is
|
||||
until we either use it or have explicitily shut it down.
|
||||
|
||||
In practice a socket derived from a port knows about its disconnection
|
||||
after a failed :func:`~stem.socket.ControlSocket.recv` call. Socket file
|
||||
derived connections know after either a
|
||||
:func:`~stem.socket.ControlSocket.send` or
|
||||
:func:`~stem.socket.ControlSocket.recv`.
|
||||
|
||||
This means that to have reliable detection for when we're disconnected
|
||||
you need to continually pull from the socket (which is part of what the
|
||||
:class:`~stem.control.BaseController` does).
|
||||
|
||||
:returns: **bool** that's **True** if our socket is connected and **False** otherwise
|
||||
"""
|
||||
|
||||
return self._is_alive
|
||||
|
||||
def is_localhost(self):
|
||||
"""
|
||||
Returns if the connection is for the local system or not.
|
||||
|
||||
:returns: **bool** that's **True** if the connection is for the local host and **False** otherwise
|
||||
"""
|
||||
|
||||
return False
|
||||
|
||||
def connection_time(self):
|
||||
"""
|
||||
Provides the unix timestamp for when our socket was either connected or
|
||||
disconnected. That is to say, the time we connected if we're currently
|
||||
connected and the time we disconnected if we're not connected.
|
||||
|
||||
.. versionadded:: 1.3.0
|
||||
|
||||
:returns: **float** for when we last connected or disconnected, zero if
|
||||
we've never connected
|
||||
"""
|
||||
|
||||
return self._connection_time
|
||||
|
||||
def connect(self):
|
||||
"""
|
||||
Connects to a new socket, closing our previous one if we're already
|
||||
attached.
|
||||
|
||||
:raises: :class:`stem.SocketError` if unable to make a socket
|
||||
"""
|
||||
|
||||
with self._send_lock:
|
||||
# Closes the socket if we're currently attached to one. Once we're no
|
||||
# longer alive it'll be safe to acquire the recv lock because recv()
|
||||
# calls no longer block (raising SocketClosed instead).
|
||||
|
||||
if self.is_alive():
|
||||
self.close()
|
||||
|
||||
with self._recv_lock:
|
||||
self._socket = self._make_socket()
|
||||
self._socket_file = self._socket.makefile(mode = 'rwb')
|
||||
self._is_alive = True
|
||||
self._connection_time = time.time()
|
||||
|
||||
# It's possible for this to have a transient failure...
|
||||
# SocketError: [Errno 4] Interrupted system call
|
||||
#
|
||||
# It's safe to retry, so give it another try if it fails.
|
||||
|
||||
try:
|
||||
self._connect()
|
||||
except stem.SocketError:
|
||||
self._connect() # single retry
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
Shuts down the socket. If it's already closed then this is a no-op.
|
||||
"""
|
||||
|
||||
with self._send_lock:
|
||||
# Function is idempotent with one exception: we notify _close() if this
|
||||
# is causing our is_alive() state to change.
|
||||
|
||||
is_change = self.is_alive()
|
||||
|
||||
if self._socket:
|
||||
# if we haven't yet established a connection then this raises an error
|
||||
# socket.error: [Errno 107] Transport endpoint is not connected
|
||||
|
||||
try:
|
||||
self._socket.shutdown(socket.SHUT_RDWR)
|
||||
except socket.error:
|
||||
pass
|
||||
|
||||
# Suppressing unexpected exceptions from close. For instance, if the
|
||||
# socket's file has already been closed then with python 2.7 that raises
|
||||
# with...
|
||||
# error: [Errno 32] Broken pipe
|
||||
|
||||
try:
|
||||
self._socket.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
if self._socket_file:
|
||||
try:
|
||||
self._socket_file.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
self._socket = None
|
||||
self._socket_file = None
|
||||
self._is_alive = False
|
||||
self._connection_time = time.time()
|
||||
|
||||
if is_change:
|
||||
self._close()
|
||||
|
||||
def _get_send_lock(self):
|
||||
"""
|
||||
The send lock is useful to classes that interact with us at a deep level
|
||||
because it's used to lock :func:`stem.socket.ControlSocket.connect` /
|
||||
:func:`stem.socket.ControlSocket.close`, and by extension our
|
||||
:func:`stem.socket.ControlSocket.is_alive` state changes.
|
||||
|
||||
:returns: **threading.RLock** that governs sending messages to our socket
|
||||
and state changes
|
||||
"""
|
||||
|
||||
return self._send_lock
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exit_type, value, traceback):
|
||||
self.close()
|
||||
|
||||
def _connect(self):
|
||||
"""
|
||||
Connection callback that can be overwritten by subclasses and wrappers.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def _close(self):
|
||||
"""
|
||||
Disconnection callback that can be overwritten by subclasses and wrappers.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def _make_socket(self):
|
||||
"""
|
||||
Constructs and connects new socket. This is implemented by subclasses.
|
||||
|
||||
:returns: **socket.socket** for our configuration
|
||||
|
||||
:raises:
|
||||
* :class:`stem.SocketError` if unable to make a socket
|
||||
* **NotImplementedError** if not implemented by a subclass
|
||||
"""
|
||||
|
||||
raise NotImplementedError('Unsupported Operation: this should be implemented by the ControlSocket subclass')
|
||||
|
||||
|
||||
class ControlPort(ControlSocket):
|
||||
"""
|
||||
Control connection to tor. For more information see tor's ControlPort torrc
|
||||
option.
|
||||
"""
|
||||
|
||||
def __init__(self, address = '127.0.0.1', port = 9051, connect = True):
|
||||
"""
|
||||
ControlPort constructor.
|
||||
|
||||
:param str address: ip address of the controller
|
||||
:param int port: port number of the controller
|
||||
:param bool connect: connects to the socket if True, leaves it unconnected otherwise
|
||||
|
||||
:raises: :class:`stem.SocketError` if connect is **True** and we're
|
||||
unable to establish a connection
|
||||
"""
|
||||
|
||||
super(ControlPort, self).__init__()
|
||||
self._control_addr = address
|
||||
self._control_port = port
|
||||
|
||||
if connect:
|
||||
self.connect()
|
||||
|
||||
def get_address(self):
|
||||
"""
|
||||
Provides the ip address our socket connects to.
|
||||
|
||||
:returns: str with the ip address of our socket
|
||||
"""
|
||||
|
||||
return self._control_addr
|
||||
|
||||
def get_port(self):
|
||||
"""
|
||||
Provides the port our socket connects to.
|
||||
|
||||
:returns: int with the port of our socket
|
||||
"""
|
||||
|
||||
return self._control_port
|
||||
|
||||
def is_localhost(self):
|
||||
return self._control_addr == '127.0.0.1'
|
||||
|
||||
def _make_socket(self):
|
||||
try:
|
||||
control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
control_socket.connect((self._control_addr, self._control_port))
|
||||
return control_socket
|
||||
except socket.error as exc:
|
||||
raise stem.SocketError(exc)
|
||||
|
||||
|
||||
class ControlSocketFile(ControlSocket):
|
||||
"""
|
||||
Control connection to tor. For more information see tor's ControlSocket torrc
|
||||
option.
|
||||
"""
|
||||
|
||||
def __init__(self, path = '/var/run/tor/control', connect = True):
|
||||
"""
|
||||
ControlSocketFile constructor.
|
||||
|
||||
:param str socket_path: path where the control socket is located
|
||||
:param bool connect: connects to the socket if True, leaves it unconnected otherwise
|
||||
|
||||
:raises: :class:`stem.SocketError` if connect is **True** and we're
|
||||
unable to establish a connection
|
||||
"""
|
||||
|
||||
super(ControlSocketFile, self).__init__()
|
||||
self._socket_path = path
|
||||
|
||||
if connect:
|
||||
self.connect()
|
||||
|
||||
def get_socket_path(self):
|
||||
"""
|
||||
Provides the path our socket connects to.
|
||||
|
||||
:returns: str with the path for our control socket
|
||||
"""
|
||||
|
||||
return self._socket_path
|
||||
|
||||
def is_localhost(self):
|
||||
return True
|
||||
|
||||
def _make_socket(self):
|
||||
try:
|
||||
control_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
control_socket.connect(self._socket_path)
|
||||
return control_socket
|
||||
except socket.error as exc:
|
||||
raise stem.SocketError(exc)
|
||||
|
||||
|
||||
def send_message(control_file, message, raw = False):
|
||||
"""
|
||||
Sends a message to the control socket, adding the expected formatting for
|
||||
single verses multi-line messages. Neither message type should contain an
|
||||
ending newline (if so it'll be treated as a multi-line message with a blank
|
||||
line at the end). If the message doesn't contain a newline then it's sent
|
||||
as...
|
||||
|
||||
::
|
||||
|
||||
<message>\\r\\n
|
||||
|
||||
and if it does contain newlines then it's split on ``\\n`` and sent as...
|
||||
|
||||
::
|
||||
|
||||
+<line 1>\\r\\n
|
||||
<line 2>\\r\\n
|
||||
<line 3>\\r\\n
|
||||
.\\r\\n
|
||||
|
||||
:param file control_file: file derived from the control socket (see the
|
||||
socket's makefile() method for more information)
|
||||
:param str message: message to be sent on the control socket
|
||||
:param bool raw: leaves the message formatting untouched, passing it to the
|
||||
socket as-is
|
||||
|
||||
:raises:
|
||||
* :class:`stem.SocketError` if a problem arises in using the socket
|
||||
* :class:`stem.SocketClosed` if the socket is known to be shut down
|
||||
"""
|
||||
|
||||
if not raw:
|
||||
message = send_formatting(message)
|
||||
|
||||
try:
|
||||
control_file.write(stem.util.str_tools._to_bytes(message))
|
||||
control_file.flush()
|
||||
|
||||
log_message = message.replace('\r\n', '\n').rstrip()
|
||||
log.trace('Sent to tor:\n' + log_message)
|
||||
except socket.error as exc:
|
||||
log.info('Failed to send message: %s' % exc)
|
||||
|
||||
# When sending there doesn't seem to be a reliable method for
|
||||
# distinguishing between failures from a disconnect verses other things.
|
||||
# Just accounting for known disconnection responses.
|
||||
|
||||
if str(exc) == '[Errno 32] Broken pipe':
|
||||
raise stem.SocketClosed(exc)
|
||||
else:
|
||||
raise stem.SocketError(exc)
|
||||
except AttributeError:
|
||||
# if the control_file has been closed then flush will receive:
|
||||
# AttributeError: 'NoneType' object has no attribute 'sendall'
|
||||
|
||||
log.info('Failed to send message: file has been closed')
|
||||
raise stem.SocketClosed('file has been closed')
|
||||
|
||||
|
||||
def recv_message(control_file):
|
||||
"""
|
||||
Pulls from a control socket until we either have a complete message or
|
||||
encounter a problem.
|
||||
|
||||
:param file control_file: file derived from the control socket (see the
|
||||
socket's makefile() method for more information)
|
||||
|
||||
:returns: :class:`~stem.response.ControlMessage` read from the socket
|
||||
|
||||
:raises:
|
||||
* :class:`stem.ProtocolError` the content from the socket is malformed
|
||||
* :class:`stem.SocketClosed` if the socket closes before we receive
|
||||
a complete message
|
||||
"""
|
||||
|
||||
parsed_content, raw_content = [], b''
|
||||
logging_prefix = 'Error while receiving a control message (%s): '
|
||||
|
||||
while True:
|
||||
try:
|
||||
# From a real socket readline() would always provide bytes, but during
|
||||
# tests we might be given a StringIO in which case it's unicode under
|
||||
# python 3.x.
|
||||
|
||||
line = stem.util.str_tools._to_bytes(control_file.readline())
|
||||
except AttributeError:
|
||||
# if the control_file has been closed then we will receive:
|
||||
# AttributeError: 'NoneType' object has no attribute 'recv'
|
||||
|
||||
prefix = logging_prefix % 'SocketClosed'
|
||||
log.info(prefix + 'socket file has been closed')
|
||||
raise stem.SocketClosed('socket file has been closed')
|
||||
except (socket.error, ValueError) as exc:
|
||||
# When disconnected we get...
|
||||
#
|
||||
# Python 2:
|
||||
# socket.error: [Errno 107] Transport endpoint is not connected
|
||||
#
|
||||
# Python 3:
|
||||
# ValueError: I/O operation on closed file.
|
||||
|
||||
prefix = logging_prefix % 'SocketClosed'
|
||||
log.info(prefix + 'received exception "%s"' % exc)
|
||||
raise stem.SocketClosed(exc)
|
||||
|
||||
raw_content += line
|
||||
|
||||
# Parses the tor control lines. These are of the form...
|
||||
# <status code><divider><content>\r\n
|
||||
|
||||
if len(line) == 0:
|
||||
# if the socket is disconnected then the readline() method will provide
|
||||
# empty content
|
||||
|
||||
prefix = logging_prefix % 'SocketClosed'
|
||||
log.info(prefix + 'empty socket content')
|
||||
raise stem.SocketClosed('Received empty socket content.')
|
||||
elif len(line) < 4:
|
||||
prefix = logging_prefix % 'ProtocolError'
|
||||
log.info(prefix + 'line too short, "%s"' % log.escape(line))
|
||||
raise stem.ProtocolError('Badly formatted reply line: too short')
|
||||
elif not re.match(b'^[a-zA-Z0-9]{3}[-+ ]', line):
|
||||
prefix = logging_prefix % 'ProtocolError'
|
||||
log.info(prefix + 'malformed status code/divider, "%s"' % log.escape(line))
|
||||
raise stem.ProtocolError('Badly formatted reply line: beginning is malformed')
|
||||
elif not line.endswith(b'\r\n'):
|
||||
prefix = logging_prefix % 'ProtocolError'
|
||||
log.info(prefix + 'no CRLF linebreak, "%s"' % log.escape(line))
|
||||
raise stem.ProtocolError('All lines should end with CRLF')
|
||||
|
||||
line = line[:-2] # strips off the CRLF
|
||||
status_code, divider, content = line[:3], line[3:4], line[4:]
|
||||
|
||||
if stem.prereq.is_python_3():
|
||||
status_code = stem.util.str_tools._to_unicode(status_code)
|
||||
divider = stem.util.str_tools._to_unicode(divider)
|
||||
|
||||
if divider == '-':
|
||||
# mid-reply line, keep pulling for more content
|
||||
parsed_content.append((status_code, divider, content))
|
||||
elif divider == ' ':
|
||||
# end of the message, return the message
|
||||
parsed_content.append((status_code, divider, content))
|
||||
|
||||
log_message = raw_content.replace(b'\r\n', b'\n').rstrip()
|
||||
log.trace('Received from tor:\n' + stem.util.str_tools._to_unicode(log_message))
|
||||
|
||||
return stem.response.ControlMessage(parsed_content, raw_content)
|
||||
elif divider == '+':
|
||||
# data entry, all of the following lines belong to the content until we
|
||||
# get a line with just a period
|
||||
|
||||
while True:
|
||||
try:
|
||||
line = stem.util.str_tools._to_bytes(control_file.readline())
|
||||
except socket.error as exc:
|
||||
prefix = logging_prefix % 'SocketClosed'
|
||||
log.info(prefix + 'received an exception while mid-way through a data reply (exception: "%s", read content: "%s")' % (exc, log.escape(raw_content)))
|
||||
raise stem.SocketClosed(exc)
|
||||
|
||||
raw_content += line
|
||||
|
||||
if not line.endswith(b'\r\n'):
|
||||
prefix = logging_prefix % 'ProtocolError'
|
||||
log.info(prefix + 'CRLF linebreaks missing from a data reply, "%s"' % log.escape(raw_content))
|
||||
raise stem.ProtocolError('All lines should end with CRLF')
|
||||
elif line == b'.\r\n':
|
||||
break # data block termination
|
||||
|
||||
line = line[:-2] # strips off the CRLF
|
||||
|
||||
# lines starting with a period are escaped by a second period (as per
|
||||
# section 2.4 of the control-spec)
|
||||
|
||||
if line.startswith(b'..'):
|
||||
line = line[1:]
|
||||
|
||||
# appends to previous content, using a newline rather than CRLF
|
||||
# separator (more conventional for multi-line string content outside
|
||||
# the windows world)
|
||||
|
||||
content += b'\n' + line
|
||||
|
||||
parsed_content.append((status_code, divider, content))
|
||||
else:
|
||||
# this should never be reached due to the prefix regex, but might as well
|
||||
# be safe...
|
||||
prefix = logging_prefix % 'ProtocolError'
|
||||
log.warn(prefix + "\"%s\" isn't a recognized divider type" % divider)
|
||||
raise stem.ProtocolError("Unrecognized divider type '%s': %s" % (divider, stem.util.str_tools._to_unicode(line)))
|
||||
|
||||
|
||||
def send_formatting(message):
|
||||
"""
|
||||
Performs the formatting expected from sent control messages. For more
|
||||
information see the :func:`~stem.socket.send_message` function.
|
||||
|
||||
:param str message: message to be formatted
|
||||
|
||||
:returns: **str** of the message wrapped by the formatting expected from
|
||||
controllers
|
||||
"""
|
||||
|
||||
# From control-spec section 2.2...
|
||||
# Command = Keyword OptArguments CRLF / "+" Keyword OptArguments CRLF CmdData
|
||||
# Keyword = 1*ALPHA
|
||||
# OptArguments = [ SP *(SP / VCHAR) ]
|
||||
#
|
||||
# A command is either a single line containing a Keyword and arguments, or a
|
||||
# multiline command whose initial keyword begins with +, and whose data
|
||||
# section ends with a single "." on a line of its own.
|
||||
|
||||
# if we already have \r\n entries then standardize on \n to start with
|
||||
message = message.replace('\r\n', '\n')
|
||||
|
||||
if '\n' in message:
|
||||
return '+%s\r\n.\r\n' % message.replace('\n', '\r\n')
|
||||
else:
|
||||
return message + '\r\n'
|
20
Shared/lib/python3.4/site-packages/stem/util/__init__.py
Normal file
20
Shared/lib/python3.4/site-packages/stem/util/__init__.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Utility functions used by the stem library.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'conf',
|
||||
'connection',
|
||||
'enum',
|
||||
'log',
|
||||
'lru_cache',
|
||||
'ordereddict',
|
||||
'proc',
|
||||
'system',
|
||||
'term',
|
||||
'test_tools',
|
||||
'tor_tools',
|
||||
]
|
745
Shared/lib/python3.4/site-packages/stem/util/conf.py
Normal file
745
Shared/lib/python3.4/site-packages/stem/util/conf.py
Normal file
|
@ -0,0 +1,745 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Handlers for text configuration files. Configurations are simple string to
|
||||
string mappings, with the configuration files using the following rules...
|
||||
|
||||
* the key/value is separated by a space
|
||||
* anything after a '#' is ignored as a comment
|
||||
* excess whitespace is trimmed
|
||||
* empty lines are ignored
|
||||
* multi-line values can be defined by following the key with lines starting
|
||||
with a '|'
|
||||
|
||||
For instance...
|
||||
|
||||
::
|
||||
|
||||
# This is my sample config
|
||||
user.name Galen
|
||||
user.password yabba1234 # here's an inline comment
|
||||
user.notes takes a fancy to pepperjack cheese
|
||||
blankEntry.example
|
||||
|
||||
msg.greeting
|
||||
|Multi-line message exclaiming of the
|
||||
|wonder and awe that is pepperjack!
|
||||
|
||||
... would be loaded as...
|
||||
|
||||
::
|
||||
|
||||
config = {
|
||||
'user.name': 'Galen',
|
||||
'user.password': 'yabba1234',
|
||||
'user.notes': 'takes a fancy to pepperjack cheese',
|
||||
'blankEntry.example': '',
|
||||
'msg.greeting': 'Multi-line message exclaiming of the\\nwonder and awe that is pepperjack!',
|
||||
}
|
||||
|
||||
Configurations are managed via the :class:`~stem.util.conf.Config` class. The
|
||||
:class:`~stem.util.conf.Config` can be be used directly with its
|
||||
:func:`~stem.util.conf.Config.get` and :func:`~stem.util.conf.Config.set`
|
||||
methods, but usually modules will want a local dictionary with just the
|
||||
configurations that it cares about.
|
||||
|
||||
To do this use the :func:`~stem.util.conf.config_dict` function. For example...
|
||||
|
||||
::
|
||||
|
||||
import getpass
|
||||
from stem.util import conf, connection
|
||||
|
||||
def config_validator(key, value):
|
||||
if key == 'timeout':
|
||||
# require at least a one second timeout
|
||||
return max(1, value)
|
||||
elif key == 'endpoint':
|
||||
if not connection.is_valid_ipv4_address(value):
|
||||
raise ValueError("'%s' isn't a valid IPv4 address" % value)
|
||||
elif key == 'port':
|
||||
if not connection.is_valid_port(value):
|
||||
raise ValueError("'%s' isn't a valid port" % value)
|
||||
elif key == 'retries':
|
||||
# negative retries really don't make sense
|
||||
return max(0, value)
|
||||
|
||||
CONFIG = conf.config_dict('ssh_login', {
|
||||
'username': getpass.getuser(),
|
||||
'password': '',
|
||||
'timeout': 10,
|
||||
'endpoint': '263.12.8.0',
|
||||
'port': 22,
|
||||
'reconnect': False,
|
||||
'retries': 3,
|
||||
}, config_validator)
|
||||
|
||||
There's several things going on here so lets take it step by step...
|
||||
|
||||
* The :func:`~stem.util.conf.config_dict` provides a dictionary that's bound
|
||||
to a given configuration. If the "ssh_proxy_config" configuration changes
|
||||
then so will the contents of CONFIG.
|
||||
|
||||
* The dictionary we're passing to :func:`~stem.util.conf.config_dict` provides
|
||||
two important pieces of information: default values and their types. See the
|
||||
Config's :func:`~stem.util.conf.Config.get` method for how these type
|
||||
inferences work.
|
||||
|
||||
* The config_validator is a hook we're adding to make sure CONFIG only gets
|
||||
values we think are valid. In this case it ensures that our timeout value
|
||||
is at least one second, and rejects endpoints or ports that are invalid.
|
||||
|
||||
Now lets say our user has the following configuration file...
|
||||
|
||||
::
|
||||
|
||||
username waddle_doo
|
||||
password jabberwocky
|
||||
timeout -15
|
||||
port 9000000
|
||||
retries lots
|
||||
reconnect true
|
||||
logging debug
|
||||
|
||||
... and we load it as follows...
|
||||
|
||||
::
|
||||
|
||||
>>> from stem.util import conf
|
||||
>>> our_config = conf.get_config('ssh_login')
|
||||
>>> our_config.load('/home/atagar/user_config')
|
||||
>>> print CONFIG # doctest: +SKIP
|
||||
{
|
||||
"username": "waddle_doo",
|
||||
"password": "jabberwocky",
|
||||
"timeout": 1,
|
||||
"endpoint": "263.12.8.0",
|
||||
"port": 22,
|
||||
"reconnect": True,
|
||||
"retries": 3,
|
||||
}
|
||||
|
||||
Here's an expanation of what happened...
|
||||
|
||||
* the username, password, and reconnect attributes took the values in the
|
||||
configuration file
|
||||
|
||||
* the 'config_validator' we added earlier allows for a minimum timeout of one
|
||||
and rejected the invalid port (with a log message)
|
||||
|
||||
* we weren't able to convert the retries' "lots" value to an integer so it kept
|
||||
its default value and logged a warning
|
||||
|
||||
* the user didn't supply an endpoint so that remained unchanged
|
||||
|
||||
* our CONFIG didn't have a 'logging' attribute so it was ignored
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
config_dict - provides a dictionary that's kept in sync with our config
|
||||
get_config - singleton for getting configurations
|
||||
uses_settings - provides an annotation for functions that use configurations
|
||||
parse_enum_csv - helper funcion for parsing confguration entries for enums
|
||||
|
||||
Config - Custom configuration
|
||||
|- load - reads a configuration file
|
||||
|- save - writes the current configuration to a file
|
||||
|- clear - empties our loaded configuration contents
|
||||
|- add_listener - notifies the given listener when an update occurs
|
||||
|- clear_listeners - removes any attached listeners
|
||||
|- keys - provides keys in the loaded configuration
|
||||
|- set - sets the given key/value pair
|
||||
|- unused_keys - provides keys that have never been requested
|
||||
|- get - provides the value for a given key, with type inference
|
||||
+- get_value - provides the value for a given key as a string
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import threading
|
||||
|
||||
from stem.util import log
|
||||
|
||||
try:
|
||||
# added in python 2.7
|
||||
from collections import OrderedDict
|
||||
except ImportError:
|
||||
from stem.util.ordereddict import OrderedDict
|
||||
|
||||
CONFS = {} # mapping of identifier to singleton instances of configs
|
||||
|
||||
|
||||
class _SyncListener(object):
|
||||
def __init__(self, config_dict, interceptor):
|
||||
self.config_dict = config_dict
|
||||
self.interceptor = interceptor
|
||||
|
||||
def update(self, config, key):
|
||||
if key in self.config_dict:
|
||||
new_value = config.get(key, self.config_dict[key])
|
||||
|
||||
if new_value == self.config_dict[key]:
|
||||
return # no change
|
||||
|
||||
if self.interceptor:
|
||||
interceptor_value = self.interceptor(key, new_value)
|
||||
|
||||
if interceptor_value:
|
||||
new_value = interceptor_value
|
||||
|
||||
self.config_dict[key] = new_value
|
||||
|
||||
|
||||
def config_dict(handle, conf_mappings, handler = None):
|
||||
"""
|
||||
Makes a dictionary that stays synchronized with a configuration.
|
||||
|
||||
This takes a dictionary of 'config_key => default_value' mappings and
|
||||
changes the values to reflect our current configuration. This will leave
|
||||
the previous values alone if...
|
||||
|
||||
* we don't have a value for that config_key
|
||||
* we can't convert our value to be the same type as the default_value
|
||||
|
||||
If a handler is provided then this is called just prior to assigning new
|
||||
values to the config_dict. The handler function is expected to accept the
|
||||
(key, value) for the new values and return what we should actually insert
|
||||
into the dictionary. If this returns None then the value is updated as
|
||||
normal.
|
||||
|
||||
For more information about how we convert types see our
|
||||
:func:`~stem.util.conf.Config.get` method.
|
||||
|
||||
**The dictionary you get from this is manged by the
|
||||
:class:`~stem.util.conf.Config` class and should be treated as being
|
||||
read-only.**
|
||||
|
||||
:param str handle: unique identifier for a config instance
|
||||
:param dict conf_mappings: config key/value mappings used as our defaults
|
||||
:param functor handler: function referred to prior to assigning values
|
||||
"""
|
||||
|
||||
selected_config = get_config(handle)
|
||||
selected_config.add_listener(_SyncListener(conf_mappings, handler).update)
|
||||
return conf_mappings
|
||||
|
||||
|
||||
def get_config(handle):
|
||||
"""
|
||||
Singleton constructor for configuration file instances. If a configuration
|
||||
already exists for the handle then it's returned. Otherwise a fresh instance
|
||||
is constructed.
|
||||
|
||||
:param str handle: unique identifier used to access this config instance
|
||||
"""
|
||||
|
||||
if handle not in CONFS:
|
||||
CONFS[handle] = Config()
|
||||
|
||||
return CONFS[handle]
|
||||
|
||||
|
||||
def uses_settings(handle, path, lazy_load = True):
|
||||
"""
|
||||
Provides a function that can be used as a decorator for other functions that
|
||||
require settings to be loaded. Functions with this decorator will be provided
|
||||
with the configuration as its 'config' keyword argument.
|
||||
|
||||
.. versionchanged:: 1.3.0
|
||||
Omits the 'config' argument if the funcion we're decorating doesn't accept
|
||||
it.
|
||||
|
||||
::
|
||||
|
||||
uses_settings = stem.util.conf.uses_settings('my_app', '/path/to/settings.cfg')
|
||||
|
||||
@uses_settings
|
||||
def my_function(config):
|
||||
print 'hello %s!' % config.get('username', '')
|
||||
|
||||
:param str handle: hande for the configuration
|
||||
:param str path: path where the configuration should be loaded from
|
||||
:param bool lazy_load: loads the configuration file when the decorator is
|
||||
used if true, otherwise it's loaded right away
|
||||
|
||||
:returns: **function** that can be used as a decorator to provide the
|
||||
configuration
|
||||
|
||||
:raises: **IOError** if we fail to read the configuration file, if
|
||||
**lazy_load** is true then this arises when we use the decorator
|
||||
"""
|
||||
|
||||
config = get_config(handle)
|
||||
|
||||
if not lazy_load and not config.get('settings_loaded', False):
|
||||
config.load(path)
|
||||
config.set('settings_loaded', 'true')
|
||||
|
||||
def decorator(func):
|
||||
def wrapped(*args, **kwargs):
|
||||
if lazy_load and not config.get('settings_loaded', False):
|
||||
config.load(path)
|
||||
config.set('settings_loaded', 'true')
|
||||
|
||||
if 'config' in inspect.getargspec(func).args:
|
||||
return func(*args, config = config, **kwargs)
|
||||
else:
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def parse_enum(key, value, enumeration):
|
||||
"""
|
||||
Provides the enumeration value for a given key. This is a case insensitive
|
||||
lookup and raises an exception if the enum key doesn't exist.
|
||||
|
||||
:param str key: configuration key being looked up
|
||||
:param str value: value to be parsed
|
||||
:param stem.util.enum.Enum enumeration: enumeration the values should be in
|
||||
|
||||
:returns: enumeration value
|
||||
|
||||
:raises: **ValueError** if the **value** isn't among the enumeration keys
|
||||
"""
|
||||
|
||||
return parse_enum_csv(key, value, enumeration, 1)[0]
|
||||
|
||||
|
||||
def parse_enum_csv(key, value, enumeration, count = None):
|
||||
"""
|
||||
Parses a given value as being a comma separated listing of enumeration keys,
|
||||
returning the corresponding enumeration values. This is intended to be a
|
||||
helper for config handlers. The checks this does are case insensitive.
|
||||
|
||||
The **count** attribute can be used to make assertions based on the number of
|
||||
values. This can be...
|
||||
|
||||
* None to indicate that there's no restrictions.
|
||||
* An int to indicate that we should have this many values.
|
||||
* An (int, int) tuple to indicate the range that values can be in. This range
|
||||
is inclusive and either can be None to indicate the lack of a lower or
|
||||
upper bound.
|
||||
|
||||
:param str key: configuration key being looked up
|
||||
:param str value: value to be parsed
|
||||
:param stem.util.enum.Enum enumeration: enumeration the values should be in
|
||||
:param int,tuple count: validates that we have this many items
|
||||
|
||||
:returns: list with the enumeration values
|
||||
|
||||
:raises: **ValueError** if the count assertion fails or the **value** entries
|
||||
don't match the enumeration keys
|
||||
"""
|
||||
|
||||
values = [val.upper().strip() for val in value.split(',')]
|
||||
|
||||
if values == ['']:
|
||||
return []
|
||||
|
||||
if count is None:
|
||||
pass # no count validateion checks to do
|
||||
elif isinstance(count, int):
|
||||
if len(values) != count:
|
||||
raise ValueError("Config entry '%s' is expected to be %i comma separated values, got '%s'" % (key, count, value))
|
||||
elif isinstance(count, tuple) and len(count) == 2:
|
||||
minimum, maximum = count
|
||||
|
||||
if minimum is not None and len(values) < minimum:
|
||||
raise ValueError("Config entry '%s' must have at least %i comma separated values, got '%s'" % (key, minimum, value))
|
||||
|
||||
if maximum is not None and len(values) > maximum:
|
||||
raise ValueError("Config entry '%s' can have at most %i comma separated values, got '%s'" % (key, maximum, value))
|
||||
else:
|
||||
raise ValueError("The count must be None, an int, or two value tuple. Got '%s' (%s)'" % (count, type(count)))
|
||||
|
||||
result = []
|
||||
enum_keys = [k.upper() for k in list(enumeration.keys())]
|
||||
enum_values = list(enumeration)
|
||||
|
||||
for val in values:
|
||||
if val in enum_keys:
|
||||
result.append(enum_values[enum_keys.index(val)])
|
||||
else:
|
||||
raise ValueError("The '%s' entry of config entry '%s' wasn't in the enumeration (expected %s)" % (val, key, ', '.join(enum_keys)))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class Config(object):
|
||||
"""
|
||||
Handler for easily working with custom configurations, providing persistence
|
||||
to and from files. All operations are thread safe.
|
||||
|
||||
**Example usage:**
|
||||
|
||||
User has a file at '/home/atagar/myConfig' with...
|
||||
|
||||
::
|
||||
|
||||
destination.ip 1.2.3.4
|
||||
destination.port blarg
|
||||
|
||||
startup.run export PATH=$PATH:~/bin
|
||||
startup.run alias l=ls
|
||||
|
||||
And they have a script with...
|
||||
|
||||
::
|
||||
|
||||
from stem.util import conf
|
||||
|
||||
# Configuration values we'll use in this file. These are mappings of
|
||||
# configuration keys to the default values we'll use if the user doesn't
|
||||
# have something different in their config file (or it doesn't match this
|
||||
# type).
|
||||
|
||||
ssh_config = conf.config_dict('ssh_login', {
|
||||
'login.user': 'atagar',
|
||||
'login.password': 'pepperjack_is_awesome!',
|
||||
'destination.ip': '127.0.0.1',
|
||||
'destination.port': 22,
|
||||
'startup.run': [],
|
||||
})
|
||||
|
||||
# Makes an empty config instance with the handle of 'ssh_login'. This is
|
||||
# a singleton so other classes can fetch this same configuration from
|
||||
# this handle.
|
||||
|
||||
user_config = conf.get_config('ssh_login')
|
||||
|
||||
# Loads the user's configuration file, warning if this fails.
|
||||
|
||||
try:
|
||||
user_config.load("/home/atagar/myConfig")
|
||||
except IOError as exc:
|
||||
print "Unable to load the user's config: %s" % exc
|
||||
|
||||
# This replace the contents of ssh_config with the values from the user's
|
||||
# config file if...
|
||||
#
|
||||
# * the key is present in the config file
|
||||
# * we're able to convert the configuration file's value to the same type
|
||||
# as what's in the mapping (see the Config.get() method for how these
|
||||
# type inferences work)
|
||||
#
|
||||
# For instance in this case...
|
||||
#
|
||||
# * the login values are left alone because they aren't in the user's
|
||||
# config file
|
||||
#
|
||||
# * the 'destination.port' is also left with the value of 22 because we
|
||||
# can't turn "blarg" into an integer
|
||||
#
|
||||
# The other values are replaced, so ssh_config now becomes...
|
||||
#
|
||||
# {'login.user': 'atagar',
|
||||
# 'login.password': 'pepperjack_is_awesome!',
|
||||
# 'destination.ip': '1.2.3.4',
|
||||
# 'destination.port': 22,
|
||||
# 'startup.run': ['export PATH=$PATH:~/bin', 'alias l=ls']}
|
||||
#
|
||||
# Information for what values fail to load and why are reported to
|
||||
# 'stem.util.log'.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._path = None # location we last loaded from or saved to
|
||||
self._contents = {} # configuration key/value pairs
|
||||
self._listeners = [] # functors to be notified of config changes
|
||||
|
||||
# used for accessing _contents
|
||||
self._contents_lock = threading.RLock()
|
||||
|
||||
# keys that have been requested (used to provide unused config contents)
|
||||
self._requested_keys = set()
|
||||
|
||||
def load(self, path = None):
|
||||
"""
|
||||
Reads in the contents of the given path, adding its configuration values
|
||||
to our current contents. If the path is a directory then this loads each
|
||||
of the files, recursively.
|
||||
|
||||
.. versionchanged:: 1.3.0
|
||||
Added support for directories.
|
||||
|
||||
:param str path: file or directory path to be loaded, this uses the last
|
||||
loaded path if not provided
|
||||
|
||||
:raises:
|
||||
* **IOError** if we fail to read the file (it doesn't exist, insufficient
|
||||
permissions, etc)
|
||||
* **ValueError** if no path was provided and we've never been provided one
|
||||
"""
|
||||
|
||||
if path:
|
||||
self._path = path
|
||||
elif not self._path:
|
||||
raise ValueError('Unable to load configuration: no path provided')
|
||||
|
||||
if os.path.isdir(self._path):
|
||||
for root, dirnames, filenames in os.walk(self._path):
|
||||
for filename in filenames:
|
||||
self.load(os.path.join(root, filename))
|
||||
|
||||
return
|
||||
|
||||
with open(self._path, 'r') as config_file:
|
||||
read_contents = config_file.readlines()
|
||||
|
||||
with self._contents_lock:
|
||||
while read_contents:
|
||||
line = read_contents.pop(0)
|
||||
|
||||
# strips any commenting or excess whitespace
|
||||
comment_start = line.find('#')
|
||||
|
||||
if comment_start != -1:
|
||||
line = line[:comment_start]
|
||||
|
||||
line = line.strip()
|
||||
|
||||
# parse the key/value pair
|
||||
if line:
|
||||
try:
|
||||
key, value = line.split(' ', 1)
|
||||
value = value.strip()
|
||||
except ValueError:
|
||||
log.debug("Config entry '%s' is expected to be of the format 'Key Value', defaulting to '%s' -> ''" % (line, line))
|
||||
key, value = line, ''
|
||||
|
||||
if not value:
|
||||
# this might be a multi-line entry, try processing it as such
|
||||
multiline_buffer = []
|
||||
|
||||
while read_contents and read_contents[0].lstrip().startswith('|'):
|
||||
content = read_contents.pop(0).lstrip()[1:] # removes '\s+|' prefix
|
||||
content = content.rstrip('\n') # trailing newline
|
||||
multiline_buffer.append(content)
|
||||
|
||||
if multiline_buffer:
|
||||
self.set(key, '\n'.join(multiline_buffer), False)
|
||||
continue
|
||||
|
||||
self.set(key, value, False)
|
||||
|
||||
def save(self, path = None):
|
||||
"""
|
||||
Saves configuration contents to disk. If a path is provided then it
|
||||
replaces the configuration location that we track.
|
||||
|
||||
:param str path: location to be saved to
|
||||
|
||||
:raises: **ValueError** if no path was provided and we've never been provided one
|
||||
"""
|
||||
|
||||
if path:
|
||||
self._path = path
|
||||
elif not self._path:
|
||||
raise ValueError('Unable to save configuration: no path provided')
|
||||
|
||||
with self._contents_lock:
|
||||
with open(self._path, 'w') as output_file:
|
||||
for entry_key in sorted(self.keys()):
|
||||
for entry_value in self.get_value(entry_key, multiple = True):
|
||||
# check for multi line entries
|
||||
if '\n' in entry_value:
|
||||
entry_value = '\n|' + entry_value.replace('\n', '\n|')
|
||||
|
||||
output_file.write('%s %s\n' % (entry_key, entry_value))
|
||||
|
||||
def clear(self):
|
||||
"""
|
||||
Drops the configuration contents and reverts back to a blank, unloaded
|
||||
state.
|
||||
"""
|
||||
|
||||
with self._contents_lock:
|
||||
self._contents.clear()
|
||||
self._requested_keys = set()
|
||||
|
||||
def add_listener(self, listener, backfill = True):
|
||||
"""
|
||||
Registers the function to be notified of configuration updates. Listeners
|
||||
are expected to be functors which accept (config, key).
|
||||
|
||||
:param functor listener: function to be notified when our configuration is changed
|
||||
:param bool backfill: calls the function with our current values if **True**
|
||||
"""
|
||||
|
||||
with self._contents_lock:
|
||||
self._listeners.append(listener)
|
||||
|
||||
if backfill:
|
||||
for key in self.keys():
|
||||
listener(self, key)
|
||||
|
||||
def clear_listeners(self):
|
||||
"""
|
||||
Removes all attached listeners.
|
||||
"""
|
||||
|
||||
self._listeners = []
|
||||
|
||||
def keys(self):
|
||||
"""
|
||||
Provides all keys in the currently loaded configuration.
|
||||
|
||||
:returns: **list** if strings for the configuration keys we've loaded
|
||||
"""
|
||||
|
||||
return list(self._contents.keys())
|
||||
|
||||
def unused_keys(self):
|
||||
"""
|
||||
Provides the configuration keys that have never been provided to a caller
|
||||
via :func:`~stem.util.conf.config_dict` or the
|
||||
:func:`~stem.util.conf.Config.get` and
|
||||
:func:`~stem.util.conf.Config.get_value` methods.
|
||||
|
||||
:returns: **set** of configuration keys we've loaded but have never been requested
|
||||
"""
|
||||
|
||||
return set(self.keys()).difference(self._requested_keys)
|
||||
|
||||
def set(self, key, value, overwrite = True):
|
||||
"""
|
||||
Appends the given key/value configuration mapping, behaving the same as if
|
||||
we'd loaded this from a configuration file.
|
||||
|
||||
:param str key: key for the configuration mapping
|
||||
:param str,list value: value we're setting the mapping to
|
||||
:param bool overwrite: replaces the previous value if **True**, otherwise
|
||||
the values are appended
|
||||
"""
|
||||
|
||||
with self._contents_lock:
|
||||
if isinstance(value, str):
|
||||
if not overwrite and key in self._contents:
|
||||
self._contents[key].append(value)
|
||||
else:
|
||||
self._contents[key] = [value]
|
||||
|
||||
for listener in self._listeners:
|
||||
listener(self, key)
|
||||
elif isinstance(value, (list, tuple)):
|
||||
if not overwrite and key in self._contents:
|
||||
self._contents[key] += value
|
||||
else:
|
||||
self._contents[key] = value
|
||||
|
||||
for listener in self._listeners:
|
||||
listener(self, key)
|
||||
else:
|
||||
raise ValueError("Config.set() only accepts str, list, or tuple. Provided value was a '%s'" % type(value))
|
||||
|
||||
def get(self, key, default = None):
|
||||
"""
|
||||
Fetches the given configuration, using the key and default value to
|
||||
determine the type it should be. Recognized inferences are:
|
||||
|
||||
* **default is a boolean => boolean**
|
||||
|
||||
* values are case insensitive
|
||||
* provides the default if the value isn't "true" or "false"
|
||||
|
||||
* **default is an integer => int**
|
||||
|
||||
* provides the default if the value can't be converted to an int
|
||||
|
||||
* **default is a float => float**
|
||||
|
||||
* provides the default if the value can't be converted to a float
|
||||
|
||||
* **default is a list => list**
|
||||
|
||||
* string contents for all configuration values with this key
|
||||
|
||||
* **default is a tuple => tuple**
|
||||
|
||||
* string contents for all configuration values with this key
|
||||
|
||||
* **default is a dictionary => dict**
|
||||
|
||||
* values without "=>" in them are ignored
|
||||
* values are split into key/value pairs on "=>" with extra whitespace
|
||||
stripped
|
||||
|
||||
:param str key: config setting to be fetched
|
||||
:param default object: value provided if no such key exists or fails to be converted
|
||||
|
||||
:returns: given configuration value with its type inferred with the above rules
|
||||
"""
|
||||
|
||||
is_multivalue = isinstance(default, (list, tuple, dict))
|
||||
val = self.get_value(key, default, is_multivalue)
|
||||
|
||||
if val == default:
|
||||
return val # don't try to infer undefined values
|
||||
|
||||
if isinstance(default, bool):
|
||||
if val.lower() == 'true':
|
||||
val = True
|
||||
elif val.lower() == 'false':
|
||||
val = False
|
||||
else:
|
||||
log.debug("Config entry '%s' is expected to be a boolean, defaulting to '%s'" % (key, str(default)))
|
||||
val = default
|
||||
elif isinstance(default, int):
|
||||
try:
|
||||
val = int(val)
|
||||
except ValueError:
|
||||
log.debug("Config entry '%s' is expected to be an integer, defaulting to '%i'" % (key, default))
|
||||
val = default
|
||||
elif isinstance(default, float):
|
||||
try:
|
||||
val = float(val)
|
||||
except ValueError:
|
||||
log.debug("Config entry '%s' is expected to be a float, defaulting to '%f'" % (key, default))
|
||||
val = default
|
||||
elif isinstance(default, list):
|
||||
val = list(val) # make a shallow copy
|
||||
elif isinstance(default, tuple):
|
||||
val = tuple(val)
|
||||
elif isinstance(default, dict):
|
||||
val_map = OrderedDict()
|
||||
for entry in val:
|
||||
if '=>' in entry:
|
||||
entry_key, entry_val = entry.split('=>', 1)
|
||||
val_map[entry_key.strip()] = entry_val.strip()
|
||||
else:
|
||||
log.debug('Ignoring invalid %s config entry (expected a mapping, but "%s" was missing "=>")' % (key, entry))
|
||||
val = val_map
|
||||
|
||||
return val
|
||||
|
||||
def get_value(self, key, default = None, multiple = False):
|
||||
"""
|
||||
This provides the current value associated with a given key.
|
||||
|
||||
:param str key: config setting to be fetched
|
||||
:param object default: value provided if no such key exists
|
||||
:param bool multiple: provides back a list of all values if **True**,
|
||||
otherwise this returns the last loaded configuration value
|
||||
|
||||
:returns: **str** or **list** of string configuration values associated
|
||||
with the given key, providing the default if no such key exists
|
||||
"""
|
||||
|
||||
with self._contents_lock:
|
||||
if key in self._contents:
|
||||
self._requested_keys.add(key)
|
||||
|
||||
if multiple:
|
||||
return self._contents[key]
|
||||
else:
|
||||
return self._contents[key][-1]
|
||||
else:
|
||||
message_id = 'stem.util.conf.missing_config_key_%s' % key
|
||||
log.log_once(message_id, log.TRACE, "config entry '%s' not found, defaulting to '%s'" % (key, default))
|
||||
return default
|
651
Shared/lib/python3.4/site-packages/stem/util/connection.py
Normal file
651
Shared/lib/python3.4/site-packages/stem/util/connection.py
Normal file
|
@ -0,0 +1,651 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Connection and networking based utility functions.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
get_connections - quieries the connections belonging to a given process
|
||||
system_resolvers - provides connection resolution methods that are likely to be available
|
||||
port_usage - brief description of the common usage for a port
|
||||
|
||||
is_valid_ipv4_address - checks if a string is a valid IPv4 address
|
||||
is_valid_ipv6_address - checks if a string is a valid IPv6 address
|
||||
is_valid_port - checks if something is a valid representation for a port
|
||||
is_private_address - checks if an IPv4 address belongs to a private range or not
|
||||
|
||||
expand_ipv6_address - provides an IPv6 address with its collapsed portions expanded
|
||||
get_mask_ipv4 - provides the mask representation for a given number of bits
|
||||
get_mask_ipv6 - provides the IPv6 mask representation for a given number of bits
|
||||
|
||||
.. data:: Resolver (enum)
|
||||
|
||||
Method for resolving a process' connections.
|
||||
|
||||
.. versionadded:: 1.1.0
|
||||
.. versionchanged:: 1.4.0
|
||||
Added **NETSTAT_WINDOWS**.
|
||||
|
||||
==================== ===========
|
||||
Resolver Description
|
||||
==================== ===========
|
||||
**PROC** /proc contents
|
||||
**NETSTAT** netstat
|
||||
**NETSTAT_WINDOWS** netstat command under Windows
|
||||
**SS** ss command
|
||||
**LSOF** lsof command
|
||||
**SOCKSTAT** sockstat command under *nix
|
||||
**BSD_SOCKSTAT** sockstat command under FreeBSD
|
||||
**BSD_PROCSTAT** procstat command under FreeBSD
|
||||
==================== ===========
|
||||
"""
|
||||
|
||||
import collections
|
||||
import hashlib
|
||||
import hmac
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
|
||||
import stem.util.proc
|
||||
import stem.util.system
|
||||
|
||||
from stem import str_type
|
||||
from stem.util import conf, enum, log
|
||||
|
||||
# Connection resolution is risky to log about since it's highly likely to
|
||||
# contain sensitive information. That said, it's also difficult to get right in
|
||||
# a platform independent fashion. To opt into the logging requried to
|
||||
# troubleshoot connection resolution set the following...
|
||||
|
||||
LOG_CONNECTION_RESOLUTION = False
|
||||
|
||||
Resolver = enum.Enum(
|
||||
('PROC', 'proc'),
|
||||
('NETSTAT', 'netstat'),
|
||||
('NETSTAT_WINDOWS', 'netstat (windows)'),
|
||||
('SS', 'ss'),
|
||||
('LSOF', 'lsof'),
|
||||
('SOCKSTAT', 'sockstat'),
|
||||
('BSD_SOCKSTAT', 'sockstat (bsd)'),
|
||||
('BSD_PROCSTAT', 'procstat (bsd)')
|
||||
)
|
||||
|
||||
Connection = collections.namedtuple('Connection', [
|
||||
'local_address',
|
||||
'local_port',
|
||||
'remote_address',
|
||||
'remote_port',
|
||||
'protocol',
|
||||
])
|
||||
|
||||
FULL_IPv4_MASK = '255.255.255.255'
|
||||
FULL_IPv6_MASK = 'FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF'
|
||||
|
||||
CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE = os.urandom(32)
|
||||
|
||||
PORT_USES = None # port number => description
|
||||
|
||||
RESOLVER_COMMAND = {
|
||||
Resolver.PROC: '',
|
||||
|
||||
# -n = prevents dns lookups, -p = include process
|
||||
Resolver.NETSTAT: 'netstat -np',
|
||||
|
||||
# -a = show all TCP/UDP connections, -n = numeric addresses and ports, -o = include pid
|
||||
Resolver.NETSTAT_WINDOWS: 'netstat -ano',
|
||||
|
||||
# -n = numeric ports, -p = include process, -t = tcp sockets, -u = udp sockets
|
||||
Resolver.SS: 'ss -nptu',
|
||||
|
||||
# -n = prevent dns lookups, -P = show port numbers (not names), -i = ip only, -w = no warnings
|
||||
# (lsof provides a '-p <pid>' but oddly in practice it seems to be ~11-28% slower)
|
||||
Resolver.LSOF: 'lsof -wnPi',
|
||||
|
||||
Resolver.SOCKSTAT: 'sockstat',
|
||||
|
||||
# -4 = IPv4, -c = connected sockets
|
||||
Resolver.BSD_SOCKSTAT: 'sockstat -4c',
|
||||
|
||||
# -f <pid> = process pid
|
||||
Resolver.BSD_PROCSTAT: 'procstat -f {pid}',
|
||||
}
|
||||
|
||||
RESOLVER_FILTER = {
|
||||
Resolver.PROC: '',
|
||||
|
||||
# tcp 0 586 192.168.0.1:44284 38.229.79.2:443 ESTABLISHED 15843/tor
|
||||
Resolver.NETSTAT: '^{protocol}\s+.*\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED\s+{pid}/{name}\s*$',
|
||||
|
||||
# tcp 586 192.168.0.1:44284 38.229.79.2:443 ESTABLISHED 15843
|
||||
Resolver.NETSTAT_WINDOWS: '^\s*{protocol}\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED\s+{pid}\s*$',
|
||||
|
||||
# tcp ESTAB 0 0 192.168.0.20:44415 38.229.79.2:443 users:(("tor",15843,9))
|
||||
Resolver.SS: '^{protocol}\s+ESTAB\s+.*\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+users:\(\("{name}",{pid},[0-9]+\)\)$',
|
||||
|
||||
# tor 3873 atagar 45u IPv4 40994 0t0 TCP 10.243.55.20:45724->194.154.227.109:9001 (ESTABLISHED)
|
||||
Resolver.LSOF: '^{name}\s+{pid}\s+.*\s+{protocol}\s+{local_address}:{local_port}->{remote_address}:{remote_port} \(ESTABLISHED\)$',
|
||||
|
||||
# atagar tor 15843 tcp4 192.168.0.20:44092 68.169.35.102:443 ESTABLISHED
|
||||
Resolver.SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+{protocol}4\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED$',
|
||||
|
||||
# _tor tor 4397 12 tcp4 172.27.72.202:54011 127.0.0.1:9001
|
||||
Resolver.BSD_SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+\S+\s+{protocol}4\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}$',
|
||||
|
||||
# 3561 tor 4 s - rw---n-- 2 0 TCP 10.0.0.2:9050 10.0.0.1:22370
|
||||
Resolver.BSD_PROCSTAT: '^\s*{pid}\s+{name}\s+.*\s+{protocol}\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}$',
|
||||
}
|
||||
|
||||
|
||||
def get_connections(resolver, process_pid = None, process_name = None):
|
||||
"""
|
||||
Retrieves a list of the current connections for a given process. This
|
||||
provides a list of Connection instances, which have five attributes...
|
||||
|
||||
* **local_address** (str)
|
||||
* **local_port** (int)
|
||||
* **remote_address** (str)
|
||||
* **remote_port** (int)
|
||||
* **protocol** (str, generally either 'tcp' or 'udp')
|
||||
|
||||
.. versionadded:: 1.1.0
|
||||
|
||||
:param Resolver resolver: method of connection resolution to use
|
||||
:param int process_pid: pid of the process to retrieve
|
||||
:param str process_name: name of the process to retrieve
|
||||
|
||||
:returns: **list** of Connection instances
|
||||
|
||||
:raises:
|
||||
* **ValueError** if using **Resolver.PROC** or **Resolver.BSD_PROCSTAT**
|
||||
and the process_pid wasn't provided
|
||||
|
||||
* **IOError** if no connections are available or resolution fails
|
||||
(generally they're indistinguishable). The common causes are the
|
||||
command being unavailable or permissions.
|
||||
"""
|
||||
|
||||
def _log(msg):
|
||||
if LOG_CONNECTION_RESOLUTION:
|
||||
log.debug(msg)
|
||||
|
||||
_log('=' * 80)
|
||||
_log('Querying connections for resolver: %s, pid: %s, name: %s' % (resolver, process_pid, process_name))
|
||||
|
||||
if isinstance(process_pid, str):
|
||||
try:
|
||||
process_pid = int(process_pid)
|
||||
except ValueError:
|
||||
raise ValueError('Process pid was non-numeric: %s' % process_pid)
|
||||
|
||||
if process_pid is None and process_name and resolver == Resolver.NETSTAT_WINDOWS:
|
||||
process_pid = stem.util.system.pid_by_name(process_name)
|
||||
|
||||
if process_pid is None and resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT):
|
||||
raise ValueError('%s resolution requires a pid' % resolver)
|
||||
|
||||
if resolver == Resolver.PROC:
|
||||
return [Connection(*conn) for conn in stem.util.proc.connections(process_pid)]
|
||||
|
||||
resolver_command = RESOLVER_COMMAND[resolver].format(pid = process_pid)
|
||||
|
||||
try:
|
||||
results = stem.util.system.call(resolver_command)
|
||||
except OSError as exc:
|
||||
raise IOError("Unable to query '%s': %s" % (resolver_command, exc))
|
||||
|
||||
resolver_regex_str = RESOLVER_FILTER[resolver].format(
|
||||
protocol = '(?P<protocol>\S+)',
|
||||
local_address = '(?P<local_address>[0-9.]+)',
|
||||
local_port = '(?P<local_port>[0-9]+)',
|
||||
remote_address = '(?P<remote_address>[0-9.]+)',
|
||||
remote_port = '(?P<remote_port>[0-9]+)',
|
||||
pid = process_pid if process_pid else '[0-9]*',
|
||||
name = process_name if process_name else '\S*',
|
||||
)
|
||||
|
||||
_log('Resolver regex: %s' % resolver_regex_str)
|
||||
_log('Resolver results:\n%s' % '\n'.join(results))
|
||||
|
||||
connections = []
|
||||
resolver_regex = re.compile(resolver_regex_str)
|
||||
|
||||
for line in results:
|
||||
match = resolver_regex.match(line)
|
||||
|
||||
if match:
|
||||
attr = match.groupdict()
|
||||
local_addr = attr['local_address']
|
||||
local_port = int(attr['local_port'])
|
||||
remote_addr = attr['remote_address']
|
||||
remote_port = int(attr['remote_port'])
|
||||
protocol = attr['protocol'].lower()
|
||||
|
||||
if remote_addr == '0.0.0.0':
|
||||
continue # procstat response for unestablished connections
|
||||
|
||||
if not (is_valid_ipv4_address(local_addr) and is_valid_ipv4_address(remote_addr)):
|
||||
_log('Invalid address (%s or %s): %s' % (local_addr, remote_addr, line))
|
||||
elif not (is_valid_port(local_port) and is_valid_port(remote_port)):
|
||||
_log('Invalid port (%s or %s): %s' % (local_port, remote_port, line))
|
||||
elif protocol not in ('tcp', 'udp'):
|
||||
_log('Unrecognized protocol (%s): %s' % (protocol, line))
|
||||
|
||||
conn = Connection(local_addr, local_port, remote_addr, remote_port, protocol)
|
||||
connections.append(conn)
|
||||
_log(str(conn))
|
||||
|
||||
_log('%i connections found' % len(connections))
|
||||
|
||||
if not connections:
|
||||
raise IOError('No results found using: %s' % resolver_command)
|
||||
|
||||
return connections
|
||||
|
||||
|
||||
def system_resolvers(system = None):
|
||||
"""
|
||||
Provides the types of connection resolvers likely to be available on this platform.
|
||||
|
||||
.. versionadded:: 1.1.0
|
||||
|
||||
.. versionchanged:: 1.3.0
|
||||
Renamed from get_system_resolvers() to system_resolvers(). The old name
|
||||
still works as an alias, but will be dropped in Stem version 2.0.0.
|
||||
|
||||
:param str system: system to get resolvers for, this is determined by
|
||||
platform.system() if not provided
|
||||
|
||||
:returns: **list** of :data:`~stem.util.connection.Resolver` instances available on this platform
|
||||
"""
|
||||
if system is None:
|
||||
if stem.util.system.is_gentoo():
|
||||
system = 'Gentoo'
|
||||
else:
|
||||
system = platform.system()
|
||||
|
||||
if system == 'Windows':
|
||||
resolvers = [Resolver.NETSTAT_WINDOWS]
|
||||
elif system in ('Darwin', 'OpenBSD'):
|
||||
resolvers = [Resolver.LSOF]
|
||||
elif system == 'FreeBSD':
|
||||
# Netstat is available, but lacks a '-p' equivalent so we can't associate
|
||||
# the results to processes. The platform also has a ss command, but it
|
||||
# belongs to a spreadsheet application.
|
||||
|
||||
resolvers = [Resolver.BSD_SOCKSTAT, Resolver.BSD_PROCSTAT, Resolver.LSOF]
|
||||
else:
|
||||
# Sockstat isn't available by default on ubuntu.
|
||||
|
||||
resolvers = [Resolver.NETSTAT, Resolver.SOCKSTAT, Resolver.LSOF, Resolver.SS]
|
||||
|
||||
# remove any that aren't in the user's PATH
|
||||
|
||||
resolvers = [r for r in resolvers if stem.util.system.is_available(RESOLVER_COMMAND[r])]
|
||||
|
||||
# proc resolution, by far, outperforms the others so defaults to this is able
|
||||
|
||||
if stem.util.proc.is_available() and os.access('/proc/net/tcp', os.R_OK) and os.access('/proc/net/udp', os.R_OK):
|
||||
resolvers = [Resolver.PROC] + resolvers
|
||||
|
||||
return resolvers
|
||||
|
||||
|
||||
def port_usage(port):
|
||||
"""
|
||||
Provides the common use of a given port. For example, 'HTTP' for port 80 or
|
||||
'SSH' for 22.
|
||||
|
||||
.. versionadded:: 1.2.0
|
||||
|
||||
:param int port: port number to look up
|
||||
|
||||
:returns: **str** with a description for the port, **None** if none is known
|
||||
"""
|
||||
|
||||
global PORT_USES
|
||||
|
||||
if PORT_USES is None:
|
||||
config = conf.Config()
|
||||
config_path = os.path.join(os.path.dirname(__file__), 'ports.cfg')
|
||||
|
||||
try:
|
||||
config.load(config_path)
|
||||
port_uses = {}
|
||||
|
||||
for key, value in config.get('port', {}).items():
|
||||
if key.isdigit():
|
||||
port_uses[int(key)] = value
|
||||
elif '-' in key:
|
||||
min_port, max_port = key.split('-', 1)
|
||||
|
||||
for port_entry in range(int(min_port), int(max_port) + 1):
|
||||
port_uses[port_entry] = value
|
||||
else:
|
||||
raise ValueError("'%s' is an invalid key" % key)
|
||||
|
||||
PORT_USES = port_uses
|
||||
except Exception as exc:
|
||||
log.warn("BUG: stem failed to load its internal port descriptions from '%s': %s" % (config_path, exc))
|
||||
|
||||
if not PORT_USES:
|
||||
return None
|
||||
|
||||
if isinstance(port, str) and port.isdigit():
|
||||
port = int(port)
|
||||
|
||||
return PORT_USES.get(port)
|
||||
|
||||
|
||||
def is_valid_ipv4_address(address):
|
||||
"""
|
||||
Checks if a string is a valid IPv4 address.
|
||||
|
||||
:param str address: string to be checked
|
||||
|
||||
:returns: **True** if input is a valid IPv4 address, **False** otherwise
|
||||
"""
|
||||
|
||||
if not isinstance(address, (bytes, str_type)):
|
||||
return False
|
||||
|
||||
# checks if theres four period separated values
|
||||
|
||||
if address.count('.') != 3:
|
||||
return False
|
||||
|
||||
# checks that each value in the octet are decimal values between 0-255
|
||||
for entry in address.split('.'):
|
||||
if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
|
||||
return False
|
||||
elif entry[0] == '0' and len(entry) > 1:
|
||||
return False # leading zeros, for instance in '1.2.3.001'
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_valid_ipv6_address(address, allow_brackets = False):
|
||||
"""
|
||||
Checks if a string is a valid IPv6 address.
|
||||
|
||||
:param str address: string to be checked
|
||||
:param bool allow_brackets: ignore brackets which form '[address]'
|
||||
|
||||
:returns: **True** if input is a valid IPv6 address, **False** otherwise
|
||||
"""
|
||||
|
||||
if allow_brackets:
|
||||
if address.startswith('[') and address.endswith(']'):
|
||||
address = address[1:-1]
|
||||
|
||||
# addresses are made up of eight colon separated groups of four hex digits
|
||||
# with leading zeros being optional
|
||||
# https://en.wikipedia.org/wiki/IPv6#Address_format
|
||||
|
||||
colon_count = address.count(':')
|
||||
|
||||
if colon_count > 7:
|
||||
return False # too many groups
|
||||
elif colon_count != 7 and '::' not in address:
|
||||
return False # not enough groups and none are collapsed
|
||||
elif address.count('::') > 1 or ':::' in address:
|
||||
return False # multiple groupings of zeros can't be collapsed
|
||||
|
||||
for entry in address.split(':'):
|
||||
if not re.match('^[0-9a-fA-f]{0,4}$', entry):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def is_valid_port(entry, allow_zero = False):
|
||||
"""
|
||||
Checks if a string or int is a valid port number.
|
||||
|
||||
:param list,str,int entry: string, integer or list to be checked
|
||||
:param bool allow_zero: accept port number of zero (reserved by definition)
|
||||
|
||||
:returns: **True** if input is an integer and within the valid port range, **False** otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
value = int(entry)
|
||||
|
||||
if str(value) != str(entry):
|
||||
return False # invalid leading char, e.g. space or zero
|
||||
elif allow_zero and value == 0:
|
||||
return True
|
||||
else:
|
||||
return value > 0 and value < 65536
|
||||
except TypeError:
|
||||
if isinstance(entry, (tuple, list)):
|
||||
for port in entry:
|
||||
if not is_valid_port(port, allow_zero):
|
||||
return False
|
||||
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_private_address(address):
|
||||
"""
|
||||
Checks if the IPv4 address is in a range belonging to the local network or
|
||||
loopback. These include:
|
||||
|
||||
* Private ranges: 10.*, 172.16.* - 172.31.*, 192.168.*
|
||||
* Loopback: 127.*
|
||||
|
||||
.. versionadded:: 1.1.0
|
||||
|
||||
:param str address: string to be checked
|
||||
|
||||
:returns: **True** if input is in a private range, **False** otherwise
|
||||
|
||||
:raises: **ValueError** if the address isn't a valid IPv4 address
|
||||
"""
|
||||
|
||||
if not is_valid_ipv4_address(address):
|
||||
raise ValueError("'%s' isn't a valid IPv4 address" % address)
|
||||
|
||||
# checks for any of the simple wildcard ranges
|
||||
|
||||
if address.startswith('10.') or address.startswith('192.168.') or address.startswith('127.'):
|
||||
return True
|
||||
|
||||
# checks for the 172.16.* - 172.31.* range
|
||||
|
||||
if address.startswith('172.'):
|
||||
second_octet = int(address.split('.')[1])
|
||||
|
||||
if second_octet >= 16 and second_octet <= 31:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def expand_ipv6_address(address):
|
||||
"""
|
||||
Expands abbreviated IPv6 addresses to their full colon separated hex format.
|
||||
For instance...
|
||||
|
||||
::
|
||||
|
||||
>>> expand_ipv6_address('2001:db8::ff00:42:8329')
|
||||
'2001:0db8:0000:0000:0000:ff00:0042:8329'
|
||||
|
||||
>>> expand_ipv6_address('::')
|
||||
'0000:0000:0000:0000:0000:0000:0000:0000'
|
||||
|
||||
:param str address: IPv6 address to be expanded
|
||||
|
||||
:raises: **ValueError** if the address can't be expanded due to being malformed
|
||||
"""
|
||||
|
||||
if not is_valid_ipv6_address(address):
|
||||
raise ValueError("'%s' isn't a valid IPv6 address" % address)
|
||||
|
||||
# expands collapsed groupings, there can only be a single '::' in a valid
|
||||
# address
|
||||
if '::' in address:
|
||||
missing_groups = 7 - address.count(':')
|
||||
address = address.replace('::', '::' + ':' * missing_groups)
|
||||
|
||||
# inserts missing zeros
|
||||
for index in range(8):
|
||||
start = index * 5
|
||||
end = address.index(':', start) if index != 7 else len(address)
|
||||
missing_zeros = 4 - (end - start)
|
||||
|
||||
if missing_zeros > 0:
|
||||
address = address[:start] + '0' * missing_zeros + address[start:]
|
||||
|
||||
return address
|
||||
|
||||
|
||||
def get_mask_ipv4(bits):
|
||||
"""
|
||||
Provides the IPv4 mask for a given number of bits, in the dotted-quad format.
|
||||
|
||||
:param int bits: number of bits to be converted
|
||||
|
||||
:returns: **str** with the subnet mask representation for this many bits
|
||||
|
||||
:raises: **ValueError** if given a number of bits outside the range of 0-32
|
||||
"""
|
||||
|
||||
if bits > 32 or bits < 0:
|
||||
raise ValueError('A mask can only be 0-32 bits, got %i' % bits)
|
||||
elif bits == 32:
|
||||
return FULL_IPv4_MASK
|
||||
|
||||
# get the binary representation of the mask
|
||||
mask_bin = _get_binary(2 ** bits - 1, 32)[::-1]
|
||||
|
||||
# breaks it into eight character groupings
|
||||
octets = [mask_bin[8 * i:8 * (i + 1)] for i in range(4)]
|
||||
|
||||
# converts each octet into its integer value
|
||||
return '.'.join([str(int(octet, 2)) for octet in octets])
|
||||
|
||||
|
||||
def get_mask_ipv6(bits):
|
||||
"""
|
||||
Provides the IPv6 mask for a given number of bits, in the hex colon-delimited
|
||||
format.
|
||||
|
||||
:param int bits: number of bits to be converted
|
||||
|
||||
:returns: **str** with the subnet mask representation for this many bits
|
||||
|
||||
:raises: **ValueError** if given a number of bits outside the range of 0-128
|
||||
"""
|
||||
|
||||
if bits > 128 or bits < 0:
|
||||
raise ValueError('A mask can only be 0-128 bits, got %i' % bits)
|
||||
elif bits == 128:
|
||||
return FULL_IPv6_MASK
|
||||
|
||||
# get the binary representation of the mask
|
||||
mask_bin = _get_binary(2 ** bits - 1, 128)[::-1]
|
||||
|
||||
# breaks it into sixteen character groupings
|
||||
groupings = [mask_bin[16 * i:16 * (i + 1)] for i in range(8)]
|
||||
|
||||
# converts each group into its hex value
|
||||
return ':'.join(['%04x' % int(group, 2) for group in groupings]).upper()
|
||||
|
||||
|
||||
def _get_masked_bits(mask):
|
||||
"""
|
||||
Provides the number of bits that an IPv4 subnet mask represents. Note that
|
||||
not all masks can be represented by a bit count.
|
||||
|
||||
:param str mask: mask to be converted
|
||||
|
||||
:returns: **int** with the number of bits represented by the mask
|
||||
|
||||
:raises: **ValueError** if the mask is invalid or can't be converted
|
||||
"""
|
||||
|
||||
if not is_valid_ipv4_address(mask):
|
||||
raise ValueError("'%s' is an invalid subnet mask" % mask)
|
||||
|
||||
# converts octets to binary representation
|
||||
mask_bin = _get_address_binary(mask)
|
||||
mask_match = re.match('^(1*)(0*)$', mask_bin)
|
||||
|
||||
if mask_match:
|
||||
return 32 - len(mask_match.groups()[1])
|
||||
else:
|
||||
raise ValueError('Unable to convert mask to a bit count: %s' % mask)
|
||||
|
||||
|
||||
def _get_binary(value, bits):
|
||||
"""
|
||||
Provides the given value as a binary string, padded with zeros to the given
|
||||
number of bits.
|
||||
|
||||
:param int value: value to be converted
|
||||
:param int bits: number of bits to pad to
|
||||
"""
|
||||
|
||||
# http://www.daniweb.com/code/snippet216539.html
|
||||
return ''.join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)])
|
||||
|
||||
|
||||
def _get_address_binary(address):
|
||||
"""
|
||||
Provides the binary value for an IPv4 or IPv6 address.
|
||||
|
||||
:returns: **str** with the binary representation of this address
|
||||
|
||||
:raises: **ValueError** if address is neither an IPv4 nor IPv6 address
|
||||
"""
|
||||
|
||||
if is_valid_ipv4_address(address):
|
||||
return ''.join([_get_binary(int(octet), 8) for octet in address.split('.')])
|
||||
elif is_valid_ipv6_address(address):
|
||||
address = expand_ipv6_address(address)
|
||||
return ''.join([_get_binary(int(grouping, 16), 16) for grouping in address.split(':')])
|
||||
else:
|
||||
raise ValueError("'%s' is neither an IPv4 or IPv6 address" % address)
|
||||
|
||||
|
||||
def _hmac_sha256(key, msg):
|
||||
"""
|
||||
Generates a sha256 digest using the given key and message.
|
||||
|
||||
:param str key: starting key for the hash
|
||||
:param str msg: message to be hashed
|
||||
|
||||
:returns: sha256 digest of msg as bytes, hashed using the given key
|
||||
"""
|
||||
|
||||
return hmac.new(key, msg, hashlib.sha256).digest()
|
||||
|
||||
|
||||
def _cryptovariables_equal(x, y):
|
||||
"""
|
||||
Compares two strings for equality securely.
|
||||
|
||||
:param str x: string to be compared.
|
||||
:param str y: the other string to be compared.
|
||||
|
||||
:returns: **True** if both strings are equal, **False** otherwise.
|
||||
"""
|
||||
|
||||
return (
|
||||
_hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) ==
|
||||
_hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y))
|
||||
|
||||
# TODO: drop with stem 2.x
|
||||
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
|
||||
# names for backward compatability.
|
||||
|
||||
get_system_resolvers = system_resolvers
|
172
Shared/lib/python3.4/site-packages/stem/util/enum.py
Normal file
172
Shared/lib/python3.4/site-packages/stem/util/enum.py
Normal file
|
@ -0,0 +1,172 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Basic enumeration, providing ordered types for collections. These can be
|
||||
constructed as simple type listings...
|
||||
|
||||
::
|
||||
|
||||
>>> from stem.util import enum
|
||||
>>> insects = enum.Enum('ANT', 'WASP', 'LADYBUG', 'FIREFLY')
|
||||
>>> insects.ANT
|
||||
'Ant'
|
||||
>>> tuple(insects)
|
||||
('Ant', 'Wasp', 'Ladybug', 'Firefly')
|
||||
|
||||
... or with overwritten string counterparts...
|
||||
|
||||
::
|
||||
|
||||
>>> from stem.util import enum
|
||||
>>> pets = enum.Enum(('DOG', 'Skippy'), 'CAT', ('FISH', 'Nemo'))
|
||||
>>> pets.DOG
|
||||
'Skippy'
|
||||
>>> pets.CAT
|
||||
'Cat'
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
UppercaseEnum - Provides an enum instance with capitalized values
|
||||
|
||||
Enum - Provides a basic, ordered enumeration
|
||||
|- keys - string representation of our enum keys
|
||||
|- index_of - index of an enum value
|
||||
|- next - provides the enum after a given enum value
|
||||
|- previous - provides the enum before a given value
|
||||
|- __getitem__ - provides the value for an enum key
|
||||
+- __iter__ - iterator over our enum keys
|
||||
"""
|
||||
|
||||
from stem import str_type
|
||||
|
||||
|
||||
def UppercaseEnum(*args):
|
||||
"""
|
||||
Provides an :class:`~stem.util.enum.Enum` instance where the values are
|
||||
identical to the keys. Since the keys are uppercase by convention this means
|
||||
the values are too. For instance...
|
||||
|
||||
::
|
||||
|
||||
>>> from stem.util import enum
|
||||
>>> runlevels = enum.UppercaseEnum('DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERROR')
|
||||
>>> runlevels.DEBUG
|
||||
'DEBUG'
|
||||
|
||||
:param list args: enum keys to initialize with
|
||||
|
||||
:returns: :class:`~stem.util.enum.Enum` instance with the given keys
|
||||
"""
|
||||
|
||||
return Enum(*[(v, v) for v in args])
|
||||
|
||||
|
||||
class Enum(object):
|
||||
"""
|
||||
Basic enumeration.
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
from stem.util.str_tools import _to_camel_case
|
||||
|
||||
# ordered listings of our keys and values
|
||||
keys, values = [], []
|
||||
|
||||
for entry in args:
|
||||
if isinstance(entry, (bytes, str_type)):
|
||||
key, val = entry, _to_camel_case(entry)
|
||||
elif isinstance(entry, tuple) and len(entry) == 2:
|
||||
key, val = entry
|
||||
else:
|
||||
raise ValueError('Unrecognized input: %s' % args)
|
||||
|
||||
keys.append(key)
|
||||
values.append(val)
|
||||
setattr(self, key, val)
|
||||
|
||||
self._keys = tuple(keys)
|
||||
self._values = tuple(values)
|
||||
|
||||
def keys(self):
|
||||
"""
|
||||
Provides an ordered listing of the enumeration keys in this set.
|
||||
|
||||
:returns: **list** with our enum keys
|
||||
"""
|
||||
|
||||
return list(self._keys)
|
||||
|
||||
def index_of(self, value):
|
||||
"""
|
||||
Provides the index of the given value in the collection.
|
||||
|
||||
:param str value: entry to be looked up
|
||||
|
||||
:returns: **int** index of the given entry
|
||||
|
||||
:raises: **ValueError** if no such element exists
|
||||
"""
|
||||
|
||||
return self._values.index(value)
|
||||
|
||||
def next(self, value):
|
||||
"""
|
||||
Provides the next enumeration after the given value.
|
||||
|
||||
:param str value: enumeration for which to get the next entry
|
||||
|
||||
:returns: enum value following the given entry
|
||||
|
||||
:raises: **ValueError** if no such element exists
|
||||
"""
|
||||
|
||||
if value not in self._values:
|
||||
raise ValueError('No such enumeration exists: %s (options: %s)' % (value, ', '.join(self._values)))
|
||||
|
||||
next_index = (self._values.index(value) + 1) % len(self._values)
|
||||
return self._values[next_index]
|
||||
|
||||
def previous(self, value):
|
||||
"""
|
||||
Provides the previous enumeration before the given value.
|
||||
|
||||
:param str value: enumeration for which to get the previous entry
|
||||
|
||||
:returns: enum value proceeding the given entry
|
||||
|
||||
:raises: **ValueError** if no such element exists
|
||||
"""
|
||||
|
||||
if value not in self._values:
|
||||
raise ValueError('No such enumeration exists: %s (options: %s)' % (value, ', '.join(self._values)))
|
||||
|
||||
prev_index = (self._values.index(value) - 1) % len(self._values)
|
||||
return self._values[prev_index]
|
||||
|
||||
def __getitem__(self, item):
|
||||
"""
|
||||
Provides the values for the given key.
|
||||
|
||||
:param str item: key to be looked up
|
||||
|
||||
:returns: **str** with the value for the given key
|
||||
|
||||
:raises: **ValueError** if the key doesn't exist
|
||||
"""
|
||||
|
||||
if item in vars(self):
|
||||
return getattr(self, item)
|
||||
else:
|
||||
keys = ', '.join(self.keys())
|
||||
raise ValueError("'%s' isn't among our enumeration keys, which includes: %s" % (item, keys))
|
||||
|
||||
def __iter__(self):
|
||||
"""
|
||||
Provides an ordered listing of the enums in this set.
|
||||
"""
|
||||
|
||||
for entry in self._values:
|
||||
yield entry
|
253
Shared/lib/python3.4/site-packages/stem/util/log.py
Normal file
253
Shared/lib/python3.4/site-packages/stem/util/log.py
Normal file
|
@ -0,0 +1,253 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Functions to aid library logging. The default logging
|
||||
:data:`~stem.util.log.Runlevel` is usually NOTICE and above.
|
||||
|
||||
**Stem users are more than welcome to listen for stem events, but these
|
||||
functions are not being vended to our users. They may change in the future, use
|
||||
them at your own risk.**
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
get_logger - provides the stem's Logger instance
|
||||
logging_level - converts a runlevel to its logging number
|
||||
escape - escapes special characters in a message in preparation for logging
|
||||
|
||||
log - logs a message at the given runlevel
|
||||
log_once - logs a message, deduplicating if it has already been logged
|
||||
trace - logs a message at the TRACE runlevel
|
||||
debug - logs a message at the DEBUG runlevel
|
||||
info - logs a message at the INFO runlevel
|
||||
notice - logs a message at the NOTICE runlevel
|
||||
warn - logs a message at the WARN runlevel
|
||||
error - logs a message at the ERROR runlevel
|
||||
|
||||
LogBuffer - Buffers logged events so they can be iterated over.
|
||||
|- is_empty - checks if there's events in our buffer
|
||||
+- __iter__ - iterates over and removes the buffered events
|
||||
|
||||
log_to_stdout - reports further logged events to stdout
|
||||
|
||||
.. data:: Runlevel (enum)
|
||||
|
||||
Enumeration for logging runlevels.
|
||||
|
||||
========== ===========
|
||||
Runlevel Description
|
||||
========== ===========
|
||||
**ERROR** critical issue occurred, the user needs to be notified
|
||||
**WARN** non-critical issue occurred that the user should be aware of
|
||||
**NOTICE** information that is helpful to the user
|
||||
**INFO** high level library activity
|
||||
**DEBUG** low level library activity
|
||||
**TRACE** request/reply logging
|
||||
========== ===========
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import stem.prereq
|
||||
import stem.util.enum
|
||||
import stem.util.str_tools
|
||||
|
||||
# Logging runlevels. These are *very* commonly used so including shorter
|
||||
# aliases (so they can be referenced as log.DEBUG, log.WARN, etc).
|
||||
|
||||
Runlevel = stem.util.enum.UppercaseEnum('TRACE', 'DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERROR')
|
||||
TRACE, DEBUG, INFO, NOTICE, WARN, ERR = list(Runlevel)
|
||||
|
||||
# mapping of runlevels to the logger module's values, TRACE and DEBUG aren't
|
||||
# built into the module
|
||||
|
||||
LOG_VALUES = {
|
||||
Runlevel.TRACE: logging.DEBUG - 5,
|
||||
Runlevel.DEBUG: logging.DEBUG,
|
||||
Runlevel.INFO: logging.INFO,
|
||||
Runlevel.NOTICE: logging.INFO + 5,
|
||||
Runlevel.WARN: logging.WARN,
|
||||
Runlevel.ERROR: logging.ERROR,
|
||||
}
|
||||
|
||||
logging.addLevelName(LOG_VALUES[TRACE], 'TRACE')
|
||||
logging.addLevelName(LOG_VALUES[NOTICE], 'NOTICE')
|
||||
|
||||
LOGGER = logging.getLogger('stem')
|
||||
LOGGER.setLevel(LOG_VALUES[TRACE])
|
||||
|
||||
# There's some messages that we don't want to log more than once. This set has
|
||||
# the messages IDs that we've logged which fall into this category.
|
||||
DEDUPLICATION_MESSAGE_IDS = set()
|
||||
|
||||
# Adds a default nullhandler for the stem logger, suppressing the 'No handlers
|
||||
# could be found for logger "stem"' warning as per...
|
||||
# http://docs.python.org/release/3.1.3/library/logging.html#configuring-logging-for-a-library
|
||||
|
||||
|
||||
class _NullHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
pass
|
||||
|
||||
if not LOGGER.handlers:
|
||||
LOGGER.addHandler(_NullHandler())
|
||||
|
||||
|
||||
def get_logger():
|
||||
"""
|
||||
Provides the stem logger.
|
||||
|
||||
:return: **logging.Logger** for stem
|
||||
"""
|
||||
|
||||
return LOGGER
|
||||
|
||||
|
||||
def logging_level(runlevel):
|
||||
"""
|
||||
Translates a runlevel into the value expected by the logging module.
|
||||
|
||||
:param stem.util.log.Runlevel runlevel: runlevel to be returned, no logging if **None**
|
||||
"""
|
||||
|
||||
if runlevel:
|
||||
return LOG_VALUES[runlevel]
|
||||
else:
|
||||
return logging.FATAL + 5
|
||||
|
||||
|
||||
def escape(message):
|
||||
"""
|
||||
Escapes specific sequences for logging (newlines, tabs, carriage returns). If
|
||||
the input is **bytes** then this converts it to **unicode** under python 3.x.
|
||||
|
||||
:param str message: string to be escaped
|
||||
|
||||
:returns: str that is escaped
|
||||
"""
|
||||
|
||||
if stem.prereq.is_python_3():
|
||||
message = stem.util.str_tools._to_unicode(message)
|
||||
|
||||
for pattern, replacement in (('\n', '\\n'), ('\r', '\\r'), ('\t', '\\t')):
|
||||
message = message.replace(pattern, replacement)
|
||||
|
||||
return message
|
||||
|
||||
|
||||
def log(runlevel, message):
|
||||
"""
|
||||
Logs a message at the given runlevel.
|
||||
|
||||
:param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None**
|
||||
:param str message: message to be logged
|
||||
"""
|
||||
|
||||
if runlevel:
|
||||
LOGGER.log(LOG_VALUES[runlevel], message)
|
||||
|
||||
|
||||
def log_once(message_id, runlevel, message):
|
||||
"""
|
||||
Logs a message at the given runlevel. If a message with this ID has already
|
||||
been logged then this is a no-op.
|
||||
|
||||
:param str message_id: unique message identifier to deduplicate on
|
||||
:param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None**
|
||||
:param str message: message to be logged
|
||||
|
||||
:returns: **True** if we log the message, **False** otherwise
|
||||
"""
|
||||
|
||||
if not runlevel or message_id in DEDUPLICATION_MESSAGE_IDS:
|
||||
return False
|
||||
else:
|
||||
DEDUPLICATION_MESSAGE_IDS.add(message_id)
|
||||
log(runlevel, message)
|
||||
|
||||
# shorter aliases for logging at a runlevel
|
||||
|
||||
|
||||
def trace(message):
|
||||
log(Runlevel.TRACE, message)
|
||||
|
||||
|
||||
def debug(message):
|
||||
log(Runlevel.DEBUG, message)
|
||||
|
||||
|
||||
def info(message):
|
||||
log(Runlevel.INFO, message)
|
||||
|
||||
|
||||
def notice(message):
|
||||
log(Runlevel.NOTICE, message)
|
||||
|
||||
|
||||
def warn(message):
|
||||
log(Runlevel.WARN, message)
|
||||
|
||||
|
||||
def error(message):
|
||||
log(Runlevel.ERROR, message)
|
||||
|
||||
|
||||
class LogBuffer(logging.Handler):
|
||||
"""
|
||||
Basic log handler that listens for stem events and stores them so they can be
|
||||
read later. Log entries are cleared as they are read.
|
||||
|
||||
.. versionchanged:: 1.4.0
|
||||
Added the yield_records argument.
|
||||
"""
|
||||
|
||||
def __init__(self, runlevel, yield_records = False):
|
||||
# TODO: At least in python 2.6 logging.Handler has a bug in that it doesn't
|
||||
# extend object, causing our super() call to fail. When we drop python 2.6
|
||||
# support we should switch back to using super() instead.
|
||||
#
|
||||
# super(LogBuffer, self).__init__(level = logging_level(runlevel))
|
||||
|
||||
logging.Handler.__init__(self, level = logging_level(runlevel))
|
||||
|
||||
self.formatter = logging.Formatter(
|
||||
fmt = '%(asctime)s [%(levelname)s] %(message)s',
|
||||
datefmt = '%m/%d/%Y %H:%M:%S')
|
||||
|
||||
self._buffer = []
|
||||
self._yield_records = yield_records
|
||||
|
||||
def is_empty(self):
|
||||
return not bool(self._buffer)
|
||||
|
||||
def __iter__(self):
|
||||
while self._buffer:
|
||||
record = self._buffer.pop(0)
|
||||
yield record if self._yield_records else self.formatter.format(record)
|
||||
|
||||
def emit(self, record):
|
||||
self._buffer.append(record)
|
||||
|
||||
|
||||
class _StdoutLogger(logging.Handler):
|
||||
def __init__(self, runlevel):
|
||||
logging.Handler.__init__(self, level = logging_level(runlevel))
|
||||
|
||||
self.formatter = logging.Formatter(
|
||||
fmt = '%(asctime)s [%(levelname)s] %(message)s',
|
||||
datefmt = '%m/%d/%Y %H:%M:%S')
|
||||
|
||||
def emit(self, record):
|
||||
print(self.formatter.format(record))
|
||||
|
||||
|
||||
def log_to_stdout(runlevel):
|
||||
"""
|
||||
Logs further events to stdout.
|
||||
|
||||
:param stem.util.log.Runlevel runlevel: minimum runlevel a message needs to be to be logged
|
||||
"""
|
||||
|
||||
get_logger().addHandler(_StdoutLogger(runlevel))
|
182
Shared/lib/python3.4/site-packages/stem/util/lru_cache.py
Normal file
182
Shared/lib/python3.4/site-packages/stem/util/lru_cache.py
Normal file
|
@ -0,0 +1,182 @@
|
|||
# Drop in replace for python 3.2's collections.lru_cache, from...
|
||||
# http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/
|
||||
#
|
||||
# ... which is under the MIT license. Stem users should *not* rely upon this
|
||||
# module. It will be removed when we drop support for python 3.2 and below.
|
||||
|
||||
"""
|
||||
Memoization decorator that caches a function's return value. If later called
|
||||
with the same arguments then the cached value is returned rather than
|
||||
reevaluated.
|
||||
|
||||
This is a a python 2.x port of `functools.lru_cache
|
||||
<http://docs.python.org/3/library/functools.html#functools.lru_cache>`_. If
|
||||
using python 3.2 or later you should use that instead.
|
||||
"""
|
||||
|
||||
from collections import namedtuple
|
||||
from functools import update_wrapper
|
||||
from threading import RLock
|
||||
|
||||
_CacheInfo = namedtuple('CacheInfo', ['hits', 'misses', 'maxsize', 'currsize'])
|
||||
|
||||
|
||||
class _HashedSeq(list):
|
||||
__slots__ = 'hashvalue'
|
||||
|
||||
def __init__(self, tup, hash=hash):
|
||||
self[:] = tup
|
||||
self.hashvalue = hash(tup)
|
||||
|
||||
def __hash__(self):
|
||||
return self.hashvalue
|
||||
|
||||
|
||||
def _make_key(args, kwds, typed,
|
||||
kwd_mark = (object(),),
|
||||
fasttypes = set([int, str, frozenset, type(None)]),
|
||||
sorted=sorted, tuple=tuple, type=type, len=len):
|
||||
'Make a cache key from optionally typed positional and keyword arguments'
|
||||
key = args
|
||||
if kwds:
|
||||
sorted_items = sorted(kwds.items())
|
||||
key += kwd_mark
|
||||
for item in sorted_items:
|
||||
key += item
|
||||
if typed:
|
||||
key += tuple(type(v) for v in args)
|
||||
if kwds:
|
||||
key += tuple(type(v) for k, v in sorted_items)
|
||||
elif len(key) == 1 and type(key[0]) in fasttypes:
|
||||
return key[0]
|
||||
return _HashedSeq(key)
|
||||
|
||||
|
||||
def lru_cache(maxsize=100, typed=False):
|
||||
"""Least-recently-used cache decorator.
|
||||
|
||||
If *maxsize* is set to None, the LRU features are disabled and the cache
|
||||
can grow without bound.
|
||||
|
||||
If *typed* is True, arguments of different types will be cached separately.
|
||||
For example, f(3.0) and f(3) will be treated as distinct calls with
|
||||
distinct results.
|
||||
|
||||
Arguments to the cached function must be hashable.
|
||||
|
||||
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
|
||||
f.cache_info(). Clear the cache and statistics with f.cache_clear().
|
||||
Access the underlying function with f.__wrapped__.
|
||||
|
||||
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
|
||||
|
||||
"""
|
||||
|
||||
# Users should only access the lru_cache through its public API:
|
||||
# cache_info, cache_clear, and f.__wrapped__
|
||||
# The internals of the lru_cache are encapsulated for thread safety and
|
||||
# to allow the implementation to change (including a possible C version).
|
||||
|
||||
def decorating_function(user_function):
|
||||
|
||||
cache = dict()
|
||||
stats = [0, 0] # make statistics updateable non-locally
|
||||
HITS, MISSES = 0, 1 # names for the stats fields
|
||||
make_key = _make_key
|
||||
cache_get = cache.get # bound method to lookup key or return None
|
||||
_len = len # localize the global len() function
|
||||
lock = RLock() # because linkedlist updates aren't threadsafe
|
||||
root = [] # root of the circular doubly linked list
|
||||
root[:] = [root, root, None, None] # initialize by pointing to self
|
||||
nonlocal_root = [root] # make updateable non-locally
|
||||
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
|
||||
|
||||
if maxsize == 0:
|
||||
|
||||
def wrapper(*args, **kwds):
|
||||
# no caching, just do a statistics update after a successful call
|
||||
result = user_function(*args, **kwds)
|
||||
stats[MISSES] += 1
|
||||
return result
|
||||
|
||||
elif maxsize is None:
|
||||
|
||||
def wrapper(*args, **kwds):
|
||||
# simple caching without ordering or size limit
|
||||
key = make_key(args, kwds, typed)
|
||||
result = cache_get(key, root) # root used here as a unique not-found sentinel
|
||||
if result is not root:
|
||||
stats[HITS] += 1
|
||||
return result
|
||||
result = user_function(*args, **kwds)
|
||||
cache[key] = result
|
||||
stats[MISSES] += 1
|
||||
return result
|
||||
|
||||
else:
|
||||
|
||||
def wrapper(*args, **kwds):
|
||||
# size limited caching that tracks accesses by recency
|
||||
key = make_key(args, kwds, typed) if kwds or typed else args
|
||||
with lock:
|
||||
link = cache_get(key)
|
||||
if link is not None:
|
||||
# record recent use of the key by moving it to the front of the list
|
||||
root, = nonlocal_root
|
||||
link_prev, link_next, key, result = link
|
||||
link_prev[NEXT] = link_next
|
||||
link_next[PREV] = link_prev
|
||||
last = root[PREV]
|
||||
last[NEXT] = root[PREV] = link
|
||||
link[PREV] = last
|
||||
link[NEXT] = root
|
||||
stats[HITS] += 1
|
||||
return result
|
||||
result = user_function(*args, **kwds)
|
||||
with lock:
|
||||
root, = nonlocal_root
|
||||
if key in cache:
|
||||
# getting here means that this same key was added to the
|
||||
# cache while the lock was released. since the link
|
||||
# update is already done, we need only return the
|
||||
# computed result and update the count of misses.
|
||||
pass
|
||||
elif _len(cache) >= maxsize:
|
||||
# use the old root to store the new key and result
|
||||
oldroot = root
|
||||
oldroot[KEY] = key
|
||||
oldroot[RESULT] = result
|
||||
# empty the oldest link and make it the new root
|
||||
root = nonlocal_root[0] = oldroot[NEXT]
|
||||
oldkey = root[KEY]
|
||||
root[KEY] = root[RESULT] = None
|
||||
# now update the cache dictionary for the new links
|
||||
del cache[oldkey]
|
||||
cache[key] = oldroot
|
||||
else:
|
||||
# put result in a new link at the front of the list
|
||||
last = root[PREV]
|
||||
link = [last, root, key, result]
|
||||
last[NEXT] = root[PREV] = cache[key] = link
|
||||
stats[MISSES] += 1
|
||||
return result
|
||||
|
||||
def cache_info():
|
||||
"""Report cache statistics"""
|
||||
with lock:
|
||||
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
|
||||
|
||||
def cache_clear():
|
||||
"""Clear the cache and cache statistics"""
|
||||
with lock:
|
||||
cache.clear()
|
||||
root = nonlocal_root[0]
|
||||
root[:] = [root, root, None, None]
|
||||
stats[:] = [0, 0]
|
||||
|
||||
wrapper.__wrapped__ = user_function
|
||||
wrapper.cache_info = cache_info
|
||||
wrapper.cache_clear = cache_clear
|
||||
return update_wrapper(wrapper, user_function)
|
||||
|
||||
return decorating_function
|
133
Shared/lib/python3.4/site-packages/stem/util/ordereddict.py
Normal file
133
Shared/lib/python3.4/site-packages/stem/util/ordereddict.py
Normal file
|
@ -0,0 +1,133 @@
|
|||
# Drop in replacement for python 2.7's OrderedDict, from...
|
||||
# http://pypi.python.org/pypi/ordereddict
|
||||
#
|
||||
# Stem users should *not* rely upon this module. It will be removed when we
|
||||
# drop support for python 2.6 and below.
|
||||
|
||||
# Copyright (c) 2009 Raymond Hettinger
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person
|
||||
# obtaining a copy of this software and associated documentation files
|
||||
# (the "Software"), to deal in the Software without restriction,
|
||||
# including without limitation the rights to use, copy, modify, merge,
|
||||
# publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
# and to permit persons to whom the Software is furnished to do so,
|
||||
# subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
from UserDict import DictMixin
|
||||
|
||||
|
||||
class OrderedDict(dict, DictMixin):
|
||||
def __init__(self, *args, **kwds):
|
||||
if len(args) > 1:
|
||||
raise TypeError('expected at most 1 arguments, got %d' % len(args))
|
||||
try:
|
||||
self.__end
|
||||
except AttributeError:
|
||||
self.clear()
|
||||
self.update(*args, **kwds)
|
||||
|
||||
def clear(self):
|
||||
self.__end = end = []
|
||||
end += [None, end, end] # sentinel node for doubly linked list
|
||||
self.__map = {} # key --> [key, prev, next]
|
||||
dict.clear(self)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key not in self:
|
||||
end = self.__end
|
||||
curr = end[1]
|
||||
curr[2] = end[1] = self.__map[key] = [key, curr, end]
|
||||
dict.__setitem__(self, key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
dict.__delitem__(self, key)
|
||||
key, prev, next = self.__map.pop(key)
|
||||
prev[2] = next
|
||||
next[1] = prev
|
||||
|
||||
def __iter__(self):
|
||||
end = self.__end
|
||||
curr = end[2]
|
||||
while curr is not end:
|
||||
yield curr[0]
|
||||
curr = curr[2]
|
||||
|
||||
def __reversed__(self):
|
||||
end = self.__end
|
||||
curr = end[1]
|
||||
while curr is not end:
|
||||
yield curr[0]
|
||||
curr = curr[1]
|
||||
|
||||
def popitem(self, last=True):
|
||||
if not self:
|
||||
raise KeyError('dictionary is empty')
|
||||
if last:
|
||||
key = reversed(self).next()
|
||||
else:
|
||||
key = iter(self).next()
|
||||
value = self.pop(key)
|
||||
return key, value
|
||||
|
||||
def __reduce__(self):
|
||||
items = [[k, self[k]] for k in self]
|
||||
tmp = self.__map, self.__end
|
||||
del self.__map, self.__end
|
||||
inst_dict = vars(self).copy()
|
||||
self.__map, self.__end = tmp
|
||||
if inst_dict:
|
||||
return (self.__class__, (items,), inst_dict)
|
||||
return self.__class__, (items,)
|
||||
|
||||
def keys(self):
|
||||
return list(self)
|
||||
|
||||
setdefault = DictMixin.setdefault
|
||||
update = DictMixin.update
|
||||
pop = DictMixin.pop
|
||||
values = DictMixin.values
|
||||
items = DictMixin.items
|
||||
iterkeys = DictMixin.iterkeys
|
||||
itervalues = DictMixin.itervalues
|
||||
iteritems = DictMixin.iteritems
|
||||
|
||||
def __repr__(self):
|
||||
if not self:
|
||||
return '%s()' % (self.__class__.__name__,)
|
||||
return '%s(%r)' % (self.__class__.__name__, self.items())
|
||||
|
||||
def copy(self):
|
||||
return self.__class__(self)
|
||||
|
||||
@classmethod
|
||||
def fromkeys(cls, iterable, value=None):
|
||||
d = cls()
|
||||
for key in iterable:
|
||||
d[key] = value
|
||||
return d
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, OrderedDict):
|
||||
if len(self) != len(other):
|
||||
return False
|
||||
for p, q in zip(self.items(), other.items()):
|
||||
if p != q:
|
||||
return False
|
||||
return True
|
||||
return dict.__eq__(self, other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
313
Shared/lib/python3.4/site-packages/stem/util/ports.cfg
Normal file
313
Shared/lib/python3.4/site-packages/stem/util/ports.cfg
Normal file
|
@ -0,0 +1,313 @@
|
|||
################################################################################
|
||||
#
|
||||
# Common usages for port . This is based on...
|
||||
#
|
||||
# https://secure.wikimedia.org/wikipedia/en/wiki/List_of_TCP_and_UDP_port numbers
|
||||
# http://isc.sans.edu/services.html
|
||||
#
|
||||
################################################################################
|
||||
|
||||
port 1 => TCPMUX
|
||||
port 2 => CompressNET
|
||||
port 3 => CompressNET
|
||||
port 5 => RJE
|
||||
port 7 => Echo
|
||||
port 9 => Discard
|
||||
port 11 => SYSTAT
|
||||
port 13 => Daytime
|
||||
port 15 => netstat
|
||||
port 17 => QOTD
|
||||
port 18 => MSP
|
||||
port 19 => CHARGEN
|
||||
port 20 => FTP
|
||||
port 21 => FTP
|
||||
port 22 => SSH
|
||||
port 23 => Telnet
|
||||
port 24 => Priv-mail
|
||||
port 25 => SMTP
|
||||
port 34 => RF
|
||||
port 35 => Printer
|
||||
port 37 => TIME
|
||||
port 39 => RLP
|
||||
port 41 => Graphics
|
||||
port 42 => WINS
|
||||
port 43 => WHOIS
|
||||
port 47 => NI FTP
|
||||
port 49 => TACACS
|
||||
port 50 => Remote Mail
|
||||
port 51 => IMP
|
||||
port 52 => XNS
|
||||
port 53 => DNS
|
||||
port 54 => XNS
|
||||
port 55 => ISI-GL
|
||||
port 56 => RAP
|
||||
port 57 => MTP
|
||||
port 58 => XNS
|
||||
port 67 => BOOTP
|
||||
port 68 => BOOTP
|
||||
port 69 => TFTP
|
||||
port 70 => Gopher
|
||||
port 79 => Finger
|
||||
port 80 => HTTP
|
||||
port 81 => HTTP Alternate
|
||||
port 82 => Torpark
|
||||
port 83 => MIT ML
|
||||
port 88 => Kerberos
|
||||
port 90 => dnsix
|
||||
port 99 => WIP
|
||||
port 101 => NIC
|
||||
port 102 => ISO-TSAP
|
||||
port 104 => ACR/NEMA
|
||||
port 105 => CCSO
|
||||
port 107 => Telnet
|
||||
port 108 => SNA
|
||||
port 109 => POP2
|
||||
port 110 => POP3
|
||||
port 111 => ONC RPC
|
||||
port 113 => ident
|
||||
port 115 => SFTP
|
||||
port 117 => UUCP
|
||||
port 118 => SQL
|
||||
port 119 => NNTP
|
||||
port 123 => NTP
|
||||
port 135 => DCE
|
||||
port 137 => NetBIOS
|
||||
port 138 => NetBIOS
|
||||
port 139 => NetBIOS
|
||||
port 143 => IMAP
|
||||
port 152 => BFTP
|
||||
port 153 => SGMP
|
||||
port 156 => SQL
|
||||
port 158 => DMSP
|
||||
port 161 => SNMP
|
||||
port 162 => SNMPTRAP
|
||||
port 170 => Print-srv
|
||||
port 177 => XDMCP
|
||||
port 179 => BGP
|
||||
port 194 => IRC
|
||||
port 199 => SMUX
|
||||
port 201 => AppleTalk
|
||||
port 209 => QMTP
|
||||
port 210 => ANSI
|
||||
port 213 => IPX
|
||||
port 218 => MPP
|
||||
port 220 => IMAP
|
||||
port 256 => 2DEV
|
||||
port 259 => ESRO
|
||||
port 264 => BGMP
|
||||
port 308 => Novastor
|
||||
port 311 => OSX Admin
|
||||
port 318 => PKIX TSP
|
||||
port 319 => PTP
|
||||
port 320 => PTP
|
||||
port 323 => IMMP
|
||||
port 350 => MATIP
|
||||
port 351 => MATIP
|
||||
port 366 => ODMR
|
||||
port 369 => Rpc2port ap
|
||||
port 370 => codaauth2
|
||||
port 371 => ClearCase
|
||||
port 383 => HP Alarm Mgr
|
||||
port 384 => ARNS
|
||||
port 387 => AURP
|
||||
port 389 => LDAP
|
||||
port 401 => UPS
|
||||
port 402 => Altiris
|
||||
port 427 => SLP
|
||||
port 443 => HTTPS
|
||||
port 444 => SNPP
|
||||
port 445 => SMB
|
||||
port 464 => Kerberos (kpasswd)
|
||||
port 465 => SMTP
|
||||
port 475 => tcpnethaspsrv
|
||||
port 497 => Retrospect
|
||||
port 500 => ISAKMP
|
||||
port 501 => STMF
|
||||
port 502 => Modbus
|
||||
port 504 => Citadel
|
||||
port 510 => FirstClass
|
||||
port 512 => Rexec
|
||||
port 513 => rlogin
|
||||
port 514 => rsh
|
||||
port 515 => LPD
|
||||
port 517 => Talk
|
||||
port 518 => NTalk
|
||||
port 520 => efs
|
||||
port 524 => NCP
|
||||
port 530 => RPC
|
||||
port 531 => AIM/IRC
|
||||
port 532 => netnews
|
||||
port 533 => netwall
|
||||
port 540 => UUCP
|
||||
port 542 => commerce
|
||||
port 543 => Kerberos (klogin)
|
||||
port 544 => Kerberos (kshell)
|
||||
port 545 => OSISoft PI
|
||||
port 546 => DHCPv6
|
||||
port 547 => DHCPv6
|
||||
port 548 => AFP
|
||||
port 550 => new-who
|
||||
port 554 => RTSP
|
||||
port 556 => RFS
|
||||
port 560 => rmonitor
|
||||
port 561 => monitor
|
||||
port 563 => NNTPS
|
||||
port 587 => SMTP
|
||||
port 591 => FileMaker
|
||||
port 593 => HTTP RPC
|
||||
port 604 => TUNNEL
|
||||
port 623 => ASF-RMCP
|
||||
port 631 => CUPS
|
||||
port 635 => RLZ DBase
|
||||
port 636 => LDAPS
|
||||
port 639 => MSDP
|
||||
port 641 => Support oft
|
||||
port 646 => LDP
|
||||
port 647 => DHCP
|
||||
port 648 => RRP
|
||||
port 651 => IEEE-MMS
|
||||
port 652 => DTCP
|
||||
port 653 => Support oft
|
||||
port 654 => MMS/MMP
|
||||
port 657 => RMC
|
||||
port 660 => OSX Admin
|
||||
port 665 => sun-dr
|
||||
port 666 => Doom
|
||||
port 674 => ACAP
|
||||
port 691 => MS Exchange
|
||||
port 692 => Hyperwave-ISP
|
||||
port 694 => Linux-HA
|
||||
port 695 => IEEE-MMS-SSL
|
||||
port 698 => OLSR
|
||||
port 699 => Access Network
|
||||
port 700 => EPP
|
||||
port 701 => LMP
|
||||
port 702 => IRIS
|
||||
port 706 => SILC
|
||||
port 711 => MPLS
|
||||
port 712 => TBRPF
|
||||
port 720 => SMQP
|
||||
port 749 => Kerberos (admin)
|
||||
port 750 => rfile
|
||||
port 751 => pump
|
||||
port 752 => qrh
|
||||
port 753 => rrh
|
||||
port 754 => tell send
|
||||
port 760 => ns
|
||||
port 782 => Conserver
|
||||
port 783 => spamd
|
||||
port 829 => CMP
|
||||
port 843 => Flash
|
||||
port 847 => DHCP
|
||||
port 860 => iSCSI
|
||||
port 873 => rsync
|
||||
port 888 => CDDB
|
||||
port 901 => SWAT
|
||||
port 902-904 => VMware
|
||||
port 911 => NCA
|
||||
port 953 => DNS RNDC
|
||||
port 981 => SofaWare Firewall
|
||||
port 989 => FTPS
|
||||
port 990 => FTPS
|
||||
port 991 => NAS
|
||||
port 992 => Telnets
|
||||
port 993 => IMAPS
|
||||
port 994 => IRCS
|
||||
port 995 => POP3S
|
||||
port 999 => ScimoreDB
|
||||
port 1001 => JtoMB
|
||||
port 1002 => cogbot
|
||||
|
||||
port 1080 => SOCKS
|
||||
port 1085 => WebObjects
|
||||
port 1109 => KPOP
|
||||
port 1169 => Tripwire
|
||||
port 1194 => OpenVPN
|
||||
port 1214 => Kazaa
|
||||
port 1220 => QuickTime
|
||||
port 1234 => VLC
|
||||
port 1241 => Nessus
|
||||
port 1270 => SCOM
|
||||
port 1293 => IPSec
|
||||
port 1433 => MSSQL
|
||||
port 1434 => MSSQL
|
||||
port 1500 => NetGuard
|
||||
port 1503 => MSN
|
||||
port 1512 => WINS
|
||||
port 1521 => Oracle
|
||||
port 1526 => Oracle
|
||||
port 1533 => Sametime
|
||||
port 1666 => Perforce
|
||||
port 1677 => GroupWise
|
||||
port 1723 => PPTP
|
||||
port 1725 => Steam
|
||||
port 1863 => MSNP
|
||||
port 2049 => NFS
|
||||
port 2082 => Infowave
|
||||
port 2083 => radsec
|
||||
port 2086 => GNUnet
|
||||
port 2087 => ELI
|
||||
port 2095 => NBX SER
|
||||
port 2096 => NBX DIR
|
||||
port 2102-2104 => Zephyr
|
||||
port 2401 => CVS
|
||||
port 2525 => SMTP
|
||||
port 2710 => BitTorrent
|
||||
port 3074 => XBox LIVE
|
||||
port 3101 => BlackBerry
|
||||
port 3128 => SQUID
|
||||
port 3306 => MySQL
|
||||
port 3389 => WBT
|
||||
port 3690 => SVN
|
||||
port 3723 => Battle.net
|
||||
port 3724 => WoW
|
||||
port 4321 => RWHOIS
|
||||
port 4643 => Virtuozzo
|
||||
port 4662 => eMule
|
||||
port 5003 => FileMaker
|
||||
port 5050 => Yahoo IM
|
||||
port 5060 => SIP
|
||||
port 5061 => SIP
|
||||
port 5190 => AIM/ICQ
|
||||
port 5222 => Jabber
|
||||
port 5223 => Jabber
|
||||
port 5228 => Android Market
|
||||
port 5269 => Jabber
|
||||
port 5298 => Jabber
|
||||
port 5432 => PostgreSQL
|
||||
port 5500 => VNC
|
||||
port 5556 => Freeciv
|
||||
port 5666 => NRPE
|
||||
port 5667 => NSCA
|
||||
port 5800 => VNC
|
||||
port 5900 => VNC
|
||||
port 6346 => gnutella
|
||||
port 6347 => gnutella
|
||||
port 6660-6669 => IRC
|
||||
port 6679 => IRC
|
||||
port 6697 => IRC
|
||||
port 6881-6999 => BitTorrent
|
||||
port 8000 => iRDMI
|
||||
port 8008 => HTTP Alternate
|
||||
port 8010 => XMPP
|
||||
port 8074 => Gadu-Gadu
|
||||
port 8080 => HTTP Proxy
|
||||
port 8087 => SPP
|
||||
port 8088 => Radan HTTP
|
||||
port 8118 => Privoxy
|
||||
port 8123 => Polipo
|
||||
port 8332-8333 => Bitcoin
|
||||
port 8443 => PCsync HTTPS
|
||||
port 8888 => NewsEDGE
|
||||
port 9030 => Tor
|
||||
port 9050 => Tor
|
||||
port 9051 => Tor
|
||||
port 9418 => Git
|
||||
port 9999 => distinct
|
||||
port 10000 => Webmin
|
||||
port 19294 => Google Voice
|
||||
port 19638 => Ensim
|
||||
port 23399 => Skype
|
||||
port 30301 => BitTorrent
|
||||
port 33434 => traceroute
|
||||
|
547
Shared/lib/python3.4/site-packages/stem/util/proc.py
Normal file
547
Shared/lib/python3.4/site-packages/stem/util/proc.py
Normal file
|
@ -0,0 +1,547 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Helper functions for querying process and system information from the /proc
|
||||
contents. Fetching information this way provides huge performance benefits
|
||||
over lookups via system utilities (ps, netstat, etc). For instance, resolving
|
||||
connections this way cuts the runtime by around 90% verses the alternatives.
|
||||
These functions may not work on all platforms (only Linux?).
|
||||
|
||||
The method for reading these files (and a little code) are borrowed from
|
||||
`psutil <https://code.google.com/p/psutil/>`_, which was written by Jay Loden,
|
||||
Dave Daeschler, Giampaolo Rodola' and is under the BSD license.
|
||||
|
||||
**These functions are not being vended to stem users. They may change in the
|
||||
future, use them at your own risk.**
|
||||
|
||||
.. versionchanged:: 1.3.0
|
||||
Dropped the get_* prefix from several function names. The old names still
|
||||
work, but are deprecated aliases.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
is_available - checks if proc utilities can be used on this system
|
||||
system_start_time - unix timestamp for when the system started
|
||||
physical_memory - memory available on this system
|
||||
cwd - provides the current working directory for a process
|
||||
uid - provides the user id a process is running under
|
||||
memory_usage - provides the memory usage of a process
|
||||
stats - queries statistics about a process
|
||||
file_descriptors_used - number of file descriptors used by a process
|
||||
connections - provides the connections made by a process
|
||||
|
||||
.. data:: Stat (enum)
|
||||
|
||||
Types of data available via the :func:`~stem.util.proc.stats` function.
|
||||
|
||||
============== ===========
|
||||
Stat Description
|
||||
============== ===========
|
||||
**COMMAND** command name under which the process is running
|
||||
**CPU_UTIME** total user time spent on the process
|
||||
**CPU_STIME** total system time spent on the process
|
||||
**START_TIME** when this process began, in unix time
|
||||
============== ===========
|
||||
"""
|
||||
|
||||
import base64
|
||||
import os
|
||||
import platform
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
|
||||
import stem.util.enum
|
||||
|
||||
from stem.util import log
|
||||
|
||||
try:
|
||||
# added in python 3.2
|
||||
from functools import lru_cache
|
||||
except ImportError:
|
||||
from stem.util.lru_cache import lru_cache
|
||||
|
||||
# os.sysconf is only defined on unix
|
||||
try:
|
||||
CLOCK_TICKS = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
|
||||
except AttributeError:
|
||||
CLOCK_TICKS = None
|
||||
|
||||
Stat = stem.util.enum.Enum(
|
||||
('COMMAND', 'command'), ('CPU_UTIME', 'utime'),
|
||||
('CPU_STIME', 'stime'), ('START_TIME', 'start time')
|
||||
)
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def is_available():
|
||||
"""
|
||||
Checks if proc information is available on this platform.
|
||||
|
||||
:returns: **True** if proc contents exist on this platform, **False** otherwise
|
||||
"""
|
||||
|
||||
if platform.system() != 'Linux':
|
||||
return False
|
||||
else:
|
||||
# list of process independent proc paths we use
|
||||
proc_paths = ('/proc/stat', '/proc/meminfo', '/proc/net/tcp', '/proc/net/udp')
|
||||
|
||||
for path in proc_paths:
|
||||
if not os.path.exists(path):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def system_start_time():
|
||||
"""
|
||||
Provides the unix time (seconds since epoch) when the system started.
|
||||
|
||||
:returns: **float** for the unix time of when the system started
|
||||
|
||||
:raises: **IOError** if it can't be determined
|
||||
"""
|
||||
|
||||
start_time, parameter = time.time(), 'system start time'
|
||||
btime_line = _get_line('/proc/stat', 'btime', parameter)
|
||||
|
||||
try:
|
||||
result = float(btime_line.strip().split()[1])
|
||||
_log_runtime(parameter, '/proc/stat[btime]', start_time)
|
||||
return result
|
||||
except:
|
||||
exc = IOError('unable to parse the /proc/stat btime entry: %s' % btime_line)
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def physical_memory():
|
||||
"""
|
||||
Provides the total physical memory on the system in bytes.
|
||||
|
||||
:returns: **int** for the bytes of physical memory this system has
|
||||
|
||||
:raises: **IOError** if it can't be determined
|
||||
"""
|
||||
|
||||
start_time, parameter = time.time(), 'system physical memory'
|
||||
mem_total_line = _get_line('/proc/meminfo', 'MemTotal:', parameter)
|
||||
|
||||
try:
|
||||
result = int(mem_total_line.split()[1]) * 1024
|
||||
_log_runtime(parameter, '/proc/meminfo[MemTotal]', start_time)
|
||||
return result
|
||||
except:
|
||||
exc = IOError('unable to parse the /proc/meminfo MemTotal entry: %s' % mem_total_line)
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
|
||||
|
||||
def cwd(pid):
|
||||
"""
|
||||
Provides the current working directory for the given process.
|
||||
|
||||
:param int pid: process id of the process to be queried
|
||||
|
||||
:returns: **str** with the path of the working directory for the process
|
||||
|
||||
:raises: **IOError** if it can't be determined
|
||||
"""
|
||||
|
||||
start_time, parameter = time.time(), 'cwd'
|
||||
proc_cwd_link = '/proc/%s/cwd' % pid
|
||||
|
||||
if pid == 0:
|
||||
cwd = ''
|
||||
else:
|
||||
try:
|
||||
cwd = os.readlink(proc_cwd_link)
|
||||
except OSError:
|
||||
exc = IOError('unable to read %s' % proc_cwd_link)
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
|
||||
_log_runtime(parameter, proc_cwd_link, start_time)
|
||||
return cwd
|
||||
|
||||
|
||||
def uid(pid):
|
||||
"""
|
||||
Provides the user ID the given process is running under.
|
||||
|
||||
:param int pid: process id of the process to be queried
|
||||
|
||||
:returns: **int** with the user id for the owner of the process
|
||||
|
||||
:raises: **IOError** if it can't be determined
|
||||
"""
|
||||
|
||||
start_time, parameter = time.time(), 'uid'
|
||||
status_path = '/proc/%s/status' % pid
|
||||
uid_line = _get_line(status_path, 'Uid:', parameter)
|
||||
|
||||
try:
|
||||
result = int(uid_line.split()[1])
|
||||
_log_runtime(parameter, '%s[Uid]' % status_path, start_time)
|
||||
return result
|
||||
except:
|
||||
exc = IOError('unable to parse the %s Uid entry: %s' % (status_path, uid_line))
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
|
||||
|
||||
def memory_usage(pid):
|
||||
"""
|
||||
Provides the memory usage in bytes for the given process.
|
||||
|
||||
:param int pid: process id of the process to be queried
|
||||
|
||||
:returns: **tuple** of two ints with the memory usage of the process, of the
|
||||
form **(resident_size, virtual_size)**
|
||||
|
||||
:raises: **IOError** if it can't be determined
|
||||
"""
|
||||
|
||||
# checks if this is the kernel process
|
||||
|
||||
if pid == 0:
|
||||
return (0, 0)
|
||||
|
||||
start_time, parameter = time.time(), 'memory usage'
|
||||
status_path = '/proc/%s/status' % pid
|
||||
mem_lines = _get_lines(status_path, ('VmRSS:', 'VmSize:'), parameter)
|
||||
|
||||
try:
|
||||
residentSize = int(mem_lines['VmRSS:'].split()[1]) * 1024
|
||||
virtualSize = int(mem_lines['VmSize:'].split()[1]) * 1024
|
||||
|
||||
_log_runtime(parameter, '%s[VmRSS|VmSize]' % status_path, start_time)
|
||||
return (residentSize, virtualSize)
|
||||
except:
|
||||
exc = IOError('unable to parse the %s VmRSS and VmSize entries: %s' % (status_path, ', '.join(mem_lines)))
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
|
||||
|
||||
def stats(pid, *stat_types):
|
||||
"""
|
||||
Provides process specific information. See the :data:`~stem.util.proc.Stat`
|
||||
enum for valid options.
|
||||
|
||||
:param int pid: process id of the process to be queried
|
||||
:param Stat stat_types: information to be provided back
|
||||
|
||||
:returns: **tuple** with all of the requested statistics as strings
|
||||
|
||||
:raises: **IOError** if it can't be determined
|
||||
"""
|
||||
|
||||
if CLOCK_TICKS is None:
|
||||
raise IOError('Unable to look up SC_CLK_TCK')
|
||||
|
||||
start_time, parameter = time.time(), 'process %s' % ', '.join(stat_types)
|
||||
|
||||
# the stat file contains a single line, of the form...
|
||||
# 8438 (tor) S 8407 8438 8407 34818 8438 4202496...
|
||||
stat_path = '/proc/%s/stat' % pid
|
||||
stat_line = _get_line(stat_path, str(pid), parameter)
|
||||
|
||||
# breaks line into component values
|
||||
stat_comp = []
|
||||
cmd_start, cmd_end = stat_line.find('('), stat_line.find(')')
|
||||
|
||||
if cmd_start != -1 and cmd_end != -1:
|
||||
stat_comp.append(stat_line[:cmd_start])
|
||||
stat_comp.append(stat_line[cmd_start + 1:cmd_end])
|
||||
stat_comp += stat_line[cmd_end + 1:].split()
|
||||
|
||||
if len(stat_comp) < 44 and _is_float(stat_comp[13], stat_comp[14], stat_comp[21]):
|
||||
exc = IOError('stat file had an unexpected format: %s' % stat_path)
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
|
||||
results = []
|
||||
for stat_type in stat_types:
|
||||
if stat_type == Stat.COMMAND:
|
||||
if pid == 0:
|
||||
results.append('sched')
|
||||
else:
|
||||
results.append(stat_comp[1])
|
||||
elif stat_type == Stat.CPU_UTIME:
|
||||
if pid == 0:
|
||||
results.append('0')
|
||||
else:
|
||||
results.append(str(float(stat_comp[13]) / CLOCK_TICKS))
|
||||
elif stat_type == Stat.CPU_STIME:
|
||||
if pid == 0:
|
||||
results.append('0')
|
||||
else:
|
||||
results.append(str(float(stat_comp[14]) / CLOCK_TICKS))
|
||||
elif stat_type == Stat.START_TIME:
|
||||
if pid == 0:
|
||||
return system_start_time()
|
||||
else:
|
||||
# According to documentation, starttime is in field 21 and the unit is
|
||||
# jiffies (clock ticks). We divide it for clock ticks, then add the
|
||||
# uptime to get the seconds since the epoch.
|
||||
p_start_time = float(stat_comp[21]) / CLOCK_TICKS
|
||||
results.append(str(p_start_time + system_start_time()))
|
||||
|
||||
_log_runtime(parameter, stat_path, start_time)
|
||||
return tuple(results)
|
||||
|
||||
|
||||
def file_descriptors_used(pid):
|
||||
"""
|
||||
Provides the number of file descriptors currently being used by a process.
|
||||
|
||||
.. versionadded:: 1.3.0
|
||||
|
||||
:param int pid: process id of the process to be queried
|
||||
|
||||
:returns: **int** of the number of file descriptors used
|
||||
|
||||
:raises: **IOError** if it can't be determined
|
||||
"""
|
||||
|
||||
try:
|
||||
pid = int(pid)
|
||||
|
||||
if pid < 0:
|
||||
raise IOError("Process pids can't be negative: %s" % pid)
|
||||
except (ValueError, TypeError):
|
||||
raise IOError('Process pid was non-numeric: %s' % pid)
|
||||
|
||||
try:
|
||||
return len(os.listdir('/proc/%i/fd' % pid))
|
||||
except Exception as exc:
|
||||
raise IOError('Unable to check number of file descriptors used: %s' % exc)
|
||||
|
||||
|
||||
def connections(pid):
|
||||
"""
|
||||
Queries connection related information from the proc contents. This provides
|
||||
similar results to netstat, lsof, sockstat, and other connection resolution
|
||||
utilities (though the lookup is far quicker).
|
||||
|
||||
:param int pid: process id of the process to be queried
|
||||
|
||||
:returns: A listing of connection tuples of the form **[(local_ipAddr1,
|
||||
local_port1, foreign_ipAddr1, foreign_port1, protocol), ...]** (addresses
|
||||
and protocols are strings and ports are ints)
|
||||
|
||||
:raises: **IOError** if it can't be determined
|
||||
"""
|
||||
|
||||
try:
|
||||
pid = int(pid)
|
||||
|
||||
if pid < 0:
|
||||
raise IOError("Process pids can't be negative: %s" % pid)
|
||||
except (ValueError, TypeError):
|
||||
raise IOError('Process pid was non-numeric: %s' % pid)
|
||||
|
||||
if pid == 0:
|
||||
return []
|
||||
|
||||
# fetches the inode numbers for socket file descriptors
|
||||
|
||||
start_time, parameter = time.time(), 'process connections'
|
||||
inodes = []
|
||||
|
||||
for fd in os.listdir('/proc/%s/fd' % pid):
|
||||
fd_path = '/proc/%s/fd/%s' % (pid, fd)
|
||||
|
||||
try:
|
||||
# File descriptor link, such as 'socket:[30899]'
|
||||
|
||||
fd_name = os.readlink(fd_path)
|
||||
|
||||
if fd_name.startswith('socket:['):
|
||||
inodes.append(fd_name[8:-1])
|
||||
except OSError as exc:
|
||||
if not os.path.exists(fd_path):
|
||||
continue # descriptors may shift while we're in the middle of iterating over them
|
||||
|
||||
# most likely couldn't be read due to permissions
|
||||
exc = IOError('unable to determine file descriptor destination (%s): %s' % (exc, fd_path))
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
|
||||
if not inodes:
|
||||
# unable to fetch any connections for this process
|
||||
return []
|
||||
|
||||
# check for the connection information from the /proc/net contents
|
||||
|
||||
conn = []
|
||||
|
||||
for proc_file_path in ('/proc/net/tcp', '/proc/net/udp'):
|
||||
try:
|
||||
proc_file = open(proc_file_path)
|
||||
proc_file.readline() # skip the first line
|
||||
|
||||
for line in proc_file:
|
||||
_, l_addr, f_addr, status, _, _, _, _, _, inode = line.split()[:10]
|
||||
|
||||
if inode in inodes:
|
||||
# if a tcp connection, skip if it isn't yet established
|
||||
if proc_file_path.endswith('/tcp') and status != '01':
|
||||
continue
|
||||
|
||||
local_ip, local_port = _decode_proc_address_encoding(l_addr)
|
||||
foreign_ip, foreign_port = _decode_proc_address_encoding(f_addr)
|
||||
protocol = proc_file_path[10:]
|
||||
conn.append((local_ip, local_port, foreign_ip, foreign_port, protocol))
|
||||
|
||||
proc_file.close()
|
||||
except IOError as exc:
|
||||
exc = IOError("unable to read '%s': %s" % (proc_file_path, exc))
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
except Exception as exc:
|
||||
exc = IOError("unable to parse '%s': %s" % (proc_file_path, exc))
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
|
||||
_log_runtime(parameter, '/proc/net/[tcp|udp]', start_time)
|
||||
return conn
|
||||
|
||||
|
||||
def _decode_proc_address_encoding(addr):
|
||||
"""
|
||||
Translates an address entry in the /proc/net/* contents to a human readable
|
||||
form (`reference <http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html>`_,
|
||||
for instance:
|
||||
|
||||
::
|
||||
|
||||
"0500000A:0016" -> ("10.0.0.5", 22)
|
||||
|
||||
:param str addr: proc address entry to be decoded
|
||||
|
||||
:returns: **tuple** of the form **(addr, port)**, with addr as a string and port an int
|
||||
"""
|
||||
|
||||
ip, port = addr.split(':')
|
||||
|
||||
# the port is represented as a two-byte hexadecimal number
|
||||
port = int(port, 16)
|
||||
|
||||
if sys.version_info >= (3,):
|
||||
ip = ip.encode('ascii')
|
||||
|
||||
# The IPv4 address portion is a little-endian four-byte hexadecimal number.
|
||||
# That is, the least significant byte is listed first, so we need to reverse
|
||||
# the order of the bytes to convert it to an IP address.
|
||||
#
|
||||
# This needs to account for the endian ordering as per...
|
||||
# http://code.google.com/p/psutil/issues/detail?id=201
|
||||
# https://trac.torproject.org/projects/tor/ticket/4777
|
||||
|
||||
if sys.byteorder == 'little':
|
||||
ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip)[::-1])
|
||||
else:
|
||||
ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip))
|
||||
|
||||
return (ip, port)
|
||||
|
||||
|
||||
def _is_float(*value):
|
||||
try:
|
||||
for v in value:
|
||||
float(v)
|
||||
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def _get_line(file_path, line_prefix, parameter):
|
||||
return _get_lines(file_path, (line_prefix, ), parameter)[line_prefix]
|
||||
|
||||
|
||||
def _get_lines(file_path, line_prefixes, parameter):
|
||||
"""
|
||||
Fetches lines with the given prefixes from a file. This only provides back
|
||||
the first instance of each prefix.
|
||||
|
||||
:param str file_path: path of the file to read
|
||||
:param tuple line_prefixes: string prefixes of the lines to return
|
||||
:param str parameter: description of the proc attribute being fetch
|
||||
|
||||
:returns: mapping of prefixes to the matching line
|
||||
|
||||
:raises: **IOError** if unable to read the file or can't find all of the prefixes
|
||||
"""
|
||||
|
||||
try:
|
||||
remaining_prefixes = list(line_prefixes)
|
||||
proc_file, results = open(file_path), {}
|
||||
|
||||
for line in proc_file:
|
||||
if not remaining_prefixes:
|
||||
break # found everything we're looking for
|
||||
|
||||
for prefix in remaining_prefixes:
|
||||
if line.startswith(prefix):
|
||||
results[prefix] = line
|
||||
remaining_prefixes.remove(prefix)
|
||||
break
|
||||
|
||||
proc_file.close()
|
||||
|
||||
if remaining_prefixes:
|
||||
if len(remaining_prefixes) == 1:
|
||||
msg = '%s did not contain a %s entry' % (file_path, remaining_prefixes[0])
|
||||
else:
|
||||
msg = '%s did not contain %s entries' % (file_path, ', '.join(remaining_prefixes))
|
||||
|
||||
raise IOError(msg)
|
||||
else:
|
||||
return results
|
||||
except IOError as exc:
|
||||
_log_failure(parameter, exc)
|
||||
raise exc
|
||||
|
||||
|
||||
def _log_runtime(parameter, proc_location, start_time):
|
||||
"""
|
||||
Logs a message indicating a successful proc query.
|
||||
|
||||
:param str parameter: description of the proc attribute being fetch
|
||||
:param str proc_location: proc files we were querying
|
||||
:param int start_time: unix time for when this query was started
|
||||
"""
|
||||
|
||||
runtime = time.time() - start_time
|
||||
log.debug('proc call (%s): %s (runtime: %0.4f)' % (parameter, proc_location, runtime))
|
||||
|
||||
|
||||
def _log_failure(parameter, exc):
|
||||
"""
|
||||
Logs a message indicating that the proc query failed.
|
||||
|
||||
:param str parameter: description of the proc attribute being fetch
|
||||
:param Exception exc: exception that we're raising
|
||||
"""
|
||||
|
||||
log.debug('proc call failed (%s): %s' % (parameter, exc))
|
||||
|
||||
# TODO: drop with stem 2.x
|
||||
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
|
||||
# names for backward compatability.
|
||||
|
||||
get_system_start_time = system_start_time
|
||||
get_physical_memory = physical_memory
|
||||
get_cwd = cwd
|
||||
get_uid = uid
|
||||
get_memory_usage = memory_usage
|
||||
get_stats = stats
|
||||
get_connections = connections
|
558
Shared/lib/python3.4/site-packages/stem/util/str_tools.py
Normal file
558
Shared/lib/python3.4/site-packages/stem/util/str_tools.py
Normal file
|
@ -0,0 +1,558 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Toolkit for various string activity.
|
||||
|
||||
.. versionchanged:: 1.3.0
|
||||
Dropped the get_* prefix from several function names. The old names still
|
||||
work, but are deprecated aliases.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
crop - shortens string to a given length
|
||||
|
||||
size_label - human readable label for a number of bytes
|
||||
time_label - human readable label for a number of seconds
|
||||
time_labels - human readable labels for each time unit
|
||||
short_time_label - condensed time label output
|
||||
parse_short_time_label - seconds represented by a short time label
|
||||
"""
|
||||
|
||||
import codecs
|
||||
import datetime
|
||||
import re
|
||||
import sys
|
||||
|
||||
import stem.prereq
|
||||
import stem.util.enum
|
||||
|
||||
from stem import str_type
|
||||
|
||||
# label conversion tuples of the form...
|
||||
# (bits / bytes / seconds, short label, long label)
|
||||
|
||||
SIZE_UNITS_BITS = (
|
||||
(140737488355328.0, ' Pb', ' Petabit'),
|
||||
(137438953472.0, ' Tb', ' Terabit'),
|
||||
(134217728.0, ' Gb', ' Gigabit'),
|
||||
(131072.0, ' Mb', ' Megabit'),
|
||||
(128.0, ' Kb', ' Kilobit'),
|
||||
(0.125, ' b', ' Bit'),
|
||||
)
|
||||
|
||||
SIZE_UNITS_BYTES = (
|
||||
(1125899906842624.0, ' PB', ' Petabyte'),
|
||||
(1099511627776.0, ' TB', ' Terabyte'),
|
||||
(1073741824.0, ' GB', ' Gigabyte'),
|
||||
(1048576.0, ' MB', ' Megabyte'),
|
||||
(1024.0, ' KB', ' Kilobyte'),
|
||||
(1.0, ' B', ' Byte'),
|
||||
)
|
||||
|
||||
TIME_UNITS = (
|
||||
(86400.0, 'd', ' day'),
|
||||
(3600.0, 'h', ' hour'),
|
||||
(60.0, 'm', ' minute'),
|
||||
(1.0, 's', ' second'),
|
||||
)
|
||||
|
||||
_timestamp_re = re.compile(r'(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})')
|
||||
|
||||
if stem.prereq.is_python_3():
|
||||
def _to_bytes_impl(msg):
|
||||
if isinstance(msg, str):
|
||||
return codecs.latin_1_encode(msg, 'replace')[0]
|
||||
else:
|
||||
return msg
|
||||
|
||||
def _to_unicode_impl(msg):
|
||||
if msg is not None and not isinstance(msg, str):
|
||||
return msg.decode('utf-8', 'replace')
|
||||
else:
|
||||
return msg
|
||||
else:
|
||||
def _to_bytes_impl(msg):
|
||||
if msg is not None and isinstance(msg, str_type):
|
||||
return codecs.latin_1_encode(msg, 'replace')[0]
|
||||
else:
|
||||
return msg
|
||||
|
||||
def _to_unicode_impl(msg):
|
||||
if msg is not None and not isinstance(msg, str_type):
|
||||
return msg.decode('utf-8', 'replace')
|
||||
else:
|
||||
return msg
|
||||
|
||||
|
||||
def _to_bytes(msg):
|
||||
"""
|
||||
Provides the ASCII bytes for the given string. This is purely to provide
|
||||
python 3 compatability, normalizing the unicode/ASCII change in the version
|
||||
bump. For an explanation of this see...
|
||||
|
||||
http://python3porting.com/problems.html#nicer-solutions
|
||||
|
||||
:param str,unicode msg: string to be converted
|
||||
|
||||
:returns: ASCII bytes for string
|
||||
"""
|
||||
|
||||
return _to_bytes_impl(msg)
|
||||
|
||||
|
||||
def _to_unicode(msg):
|
||||
"""
|
||||
Provides the unicode string for the given ASCII bytes. This is purely to
|
||||
provide python 3 compatability, normalizing the unicode/ASCII change in the
|
||||
version bump.
|
||||
|
||||
:param str,unicode msg: string to be converted
|
||||
|
||||
:returns: unicode conversion
|
||||
"""
|
||||
|
||||
return _to_unicode_impl(msg)
|
||||
|
||||
|
||||
def _to_camel_case(label, divider = '_', joiner = ' '):
|
||||
"""
|
||||
Converts the given string to camel case, ie:
|
||||
|
||||
::
|
||||
|
||||
>>> _to_camel_case('I_LIKE_PEPPERJACK!')
|
||||
'I Like Pepperjack!'
|
||||
|
||||
:param str label: input string to be converted
|
||||
:param str divider: word boundary
|
||||
:param str joiner: replacement for word boundaries
|
||||
|
||||
:returns: camel cased string
|
||||
"""
|
||||
|
||||
words = []
|
||||
for entry in label.split(divider):
|
||||
if len(entry) == 0:
|
||||
words.append('')
|
||||
elif len(entry) == 1:
|
||||
words.append(entry.upper())
|
||||
else:
|
||||
words.append(entry[0].upper() + entry[1:].lower())
|
||||
|
||||
return joiner.join(words)
|
||||
|
||||
|
||||
# This needs to be defined after _to_camel_case() to avoid a circular
|
||||
# dependency with the enum module.
|
||||
|
||||
Ending = stem.util.enum.Enum('ELLIPSE', 'HYPHEN')
|
||||
|
||||
|
||||
def crop(msg, size, min_word_length = 4, min_crop = 0, ending = Ending.ELLIPSE, get_remainder = False):
|
||||
"""
|
||||
Shortens a string to a given length.
|
||||
|
||||
If we crop content then a given ending is included (counting itself toward
|
||||
the size limitation). This crops on word breaks so we only include a word if
|
||||
we can display at least **min_word_length** characters of it.
|
||||
|
||||
If there isn't room for even a truncated single word (or one word plus the
|
||||
ellipse if including those) then this provides an empty string.
|
||||
|
||||
If a cropped string ends with a comma or period then it's stripped (unless
|
||||
we're providing the remainder back). For example...
|
||||
|
||||
>>> crop('This is a looooong message', 17)
|
||||
'This is a looo...'
|
||||
|
||||
>>> crop('This is a looooong message', 12)
|
||||
'This is a...'
|
||||
|
||||
>>> crop('This is a looooong message', 3)
|
||||
''
|
||||
|
||||
The whole point of this method is to provide human friendly croppings, and as
|
||||
such details of how this works might change in the future. Callers should not
|
||||
rely on the details of how this crops.
|
||||
|
||||
.. versionadded:: 1.3.0
|
||||
|
||||
:param str msg: text to be processed
|
||||
:param int size: space available for text
|
||||
:param int min_word_length: minimum characters before which a word is
|
||||
dropped, requires whole word if **None**
|
||||
:param int min_crop: minimum characters that must be dropped if a word is
|
||||
cropped
|
||||
:param Ending ending: type of ending used when truncating, no special
|
||||
truncation is used if **None**
|
||||
:param bool get_remainder: returns a tuple with the second part being the
|
||||
cropped portion of the message
|
||||
|
||||
:returns: **str** of the text truncated to the given length
|
||||
"""
|
||||
|
||||
# checks if there's room for the whole message
|
||||
|
||||
if len(msg) <= size:
|
||||
return (msg, '') if get_remainder else msg
|
||||
|
||||
if size < 0:
|
||||
raise ValueError("Crop size can't be negative (received %i)" % size)
|
||||
elif min_word_length and min_word_length < 0:
|
||||
raise ValueError("Crop's min_word_length can't be negative (received %i)" % min_word_length)
|
||||
elif min_crop < 0:
|
||||
raise ValueError("Crop's min_crop can't be negative (received %i)" % min_crop)
|
||||
|
||||
# since we're cropping, the effective space available is less with an
|
||||
# ellipse, and cropping words requires an extra space for hyphens
|
||||
|
||||
if ending == Ending.ELLIPSE:
|
||||
size -= 3
|
||||
elif min_word_length and ending == Ending.HYPHEN:
|
||||
min_word_length += 1
|
||||
|
||||
if min_word_length is None:
|
||||
min_word_length = sys.maxsize
|
||||
|
||||
# checks if there isn't the minimum space needed to include anything
|
||||
|
||||
last_wordbreak = msg.rfind(' ', 0, size + 1)
|
||||
|
||||
if last_wordbreak == -1:
|
||||
# we're splitting the first word
|
||||
|
||||
if size < min_word_length:
|
||||
return ('', msg) if get_remainder else ''
|
||||
|
||||
include_crop = True
|
||||
else:
|
||||
last_wordbreak = len(msg[:last_wordbreak].rstrip()) # drops extra ending whitespaces
|
||||
include_crop = size - last_wordbreak - 1 >= min_word_length
|
||||
|
||||
# if there's a max crop size then make sure we're cropping at least that many characters
|
||||
|
||||
if include_crop and min_crop:
|
||||
next_wordbreak = msg.find(' ', size)
|
||||
|
||||
if next_wordbreak == -1:
|
||||
next_wordbreak = len(msg)
|
||||
|
||||
include_crop = next_wordbreak - size + 1 >= min_crop
|
||||
|
||||
if include_crop:
|
||||
return_msg, remainder = msg[:size], msg[size:]
|
||||
|
||||
if ending == Ending.HYPHEN:
|
||||
remainder = return_msg[-1] + remainder
|
||||
return_msg = return_msg[:-1].rstrip() + '-'
|
||||
else:
|
||||
return_msg, remainder = msg[:last_wordbreak], msg[last_wordbreak:]
|
||||
|
||||
# if this is ending with a comma or period then strip it off
|
||||
|
||||
if not get_remainder and return_msg and return_msg[-1] in (',', '.'):
|
||||
return_msg = return_msg[:-1]
|
||||
|
||||
if ending == Ending.ELLIPSE:
|
||||
return_msg = return_msg.rstrip() + '...'
|
||||
|
||||
return (return_msg, remainder) if get_remainder else return_msg
|
||||
|
||||
|
||||
def size_label(byte_count, decimal = 0, is_long = False, is_bytes = True):
|
||||
"""
|
||||
Converts a number of bytes into a human readable label in its most
|
||||
significant units. For instance, 7500 bytes would return "7 KB". If the
|
||||
is_long option is used this expands unit labels to be the properly pluralized
|
||||
full word (for instance 'Kilobytes' rather than 'KB'). Units go up through
|
||||
petabytes.
|
||||
|
||||
::
|
||||
|
||||
>>> size_label(2000000)
|
||||
'1 MB'
|
||||
|
||||
>>> size_label(1050, 2)
|
||||
'1.02 KB'
|
||||
|
||||
>>> size_label(1050, 3, True)
|
||||
'1.025 Kilobytes'
|
||||
|
||||
:param int byte_count: number of bytes to be converted
|
||||
:param int decimal: number of decimal digits to be included
|
||||
:param bool is_long: expands units label
|
||||
:param bool is_bytes: provides units in bytes if **True**, bits otherwise
|
||||
|
||||
:returns: **str** with human readable representation of the size
|
||||
"""
|
||||
|
||||
if is_bytes:
|
||||
return _get_label(SIZE_UNITS_BYTES, byte_count, decimal, is_long)
|
||||
else:
|
||||
return _get_label(SIZE_UNITS_BITS, byte_count, decimal, is_long)
|
||||
|
||||
|
||||
def time_label(seconds, decimal = 0, is_long = False):
|
||||
"""
|
||||
Converts seconds into a time label truncated to its most significant units.
|
||||
For instance, 7500 seconds would return "2h". Units go up through days.
|
||||
|
||||
This defaults to presenting single character labels, but if the is_long
|
||||
option is used this expands labels to be the full word (space included and
|
||||
properly pluralized). For instance, "4h" would be "4 hours" and "1m" would
|
||||
become "1 minute".
|
||||
|
||||
::
|
||||
|
||||
>>> time_label(10000)
|
||||
'2h'
|
||||
|
||||
>>> time_label(61, 1, True)
|
||||
'1.0 minute'
|
||||
|
||||
>>> time_label(61, 2, True)
|
||||
'1.01 minutes'
|
||||
|
||||
:param int seconds: number of seconds to be converted
|
||||
:param int decimal: number of decimal digits to be included
|
||||
:param bool is_long: expands units label
|
||||
|
||||
:returns: **str** with human readable representation of the time
|
||||
"""
|
||||
|
||||
return _get_label(TIME_UNITS, seconds, decimal, is_long)
|
||||
|
||||
|
||||
def time_labels(seconds, is_long = False):
|
||||
"""
|
||||
Provides a list of label conversions for each time unit, starting with its
|
||||
most significant units on down. Any counts that evaluate to zero are omitted.
|
||||
For example...
|
||||
|
||||
::
|
||||
|
||||
>>> time_labels(400)
|
||||
['6m', '40s']
|
||||
|
||||
>>> time_labels(3640, True)
|
||||
['1 hour', '40 seconds']
|
||||
|
||||
:param int seconds: number of seconds to be converted
|
||||
:param bool is_long: expands units label
|
||||
|
||||
:returns: **list** of strings with human readable representations of the time
|
||||
"""
|
||||
|
||||
time_labels = []
|
||||
|
||||
for count_per_unit, _, _ in TIME_UNITS:
|
||||
if abs(seconds) >= count_per_unit:
|
||||
time_labels.append(_get_label(TIME_UNITS, seconds, 0, is_long))
|
||||
seconds %= count_per_unit
|
||||
|
||||
return time_labels
|
||||
|
||||
|
||||
def short_time_label(seconds):
|
||||
"""
|
||||
Provides a time in the following format:
|
||||
[[dd-]hh:]mm:ss
|
||||
|
||||
::
|
||||
|
||||
>>> short_time_label(111)
|
||||
'01:51'
|
||||
|
||||
>>> short_time_label(544100)
|
||||
'6-07:08:20'
|
||||
|
||||
:param int seconds: number of seconds to be converted
|
||||
|
||||
:returns: **str** with the short representation for the time
|
||||
|
||||
:raises: **ValueError** if the input is negative
|
||||
"""
|
||||
|
||||
if seconds < 0:
|
||||
raise ValueError("Input needs to be a non-negative integer, got '%i'" % seconds)
|
||||
|
||||
time_comp = {}
|
||||
|
||||
for amount, _, label in TIME_UNITS:
|
||||
count = int(seconds / amount)
|
||||
seconds %= amount
|
||||
time_comp[label.strip()] = count
|
||||
|
||||
label = '%02i:%02i' % (time_comp['minute'], time_comp['second'])
|
||||
|
||||
if time_comp['day']:
|
||||
label = '%i-%02i:%s' % (time_comp['day'], time_comp['hour'], label)
|
||||
elif time_comp['hour']:
|
||||
label = '%02i:%s' % (time_comp['hour'], label)
|
||||
|
||||
return label
|
||||
|
||||
|
||||
def parse_short_time_label(label):
|
||||
"""
|
||||
Provides the number of seconds corresponding to the formatting used for the
|
||||
cputime and etime fields of ps:
|
||||
[[dd-]hh:]mm:ss or mm:ss.ss
|
||||
|
||||
::
|
||||
|
||||
>>> parse_short_time_label('01:51')
|
||||
111
|
||||
|
||||
>>> parse_short_time_label('6-07:08:20')
|
||||
544100
|
||||
|
||||
:param str label: time entry to be parsed
|
||||
|
||||
:returns: **int** with the number of seconds represented by the label
|
||||
|
||||
:raises: **ValueError** if input is malformed
|
||||
"""
|
||||
|
||||
days, hours, minutes, seconds = '0', '0', '0', '0'
|
||||
|
||||
if '-' in label:
|
||||
days, label = label.split('-', 1)
|
||||
|
||||
time_comp = label.split(':')
|
||||
|
||||
if len(time_comp) == 3:
|
||||
hours, minutes, seconds = time_comp
|
||||
elif len(time_comp) == 2:
|
||||
minutes, seconds = time_comp
|
||||
else:
|
||||
raise ValueError("Invalid time format, we expected '[[dd-]hh:]mm:ss' or 'mm:ss.ss': %s" % label)
|
||||
|
||||
try:
|
||||
time_sum = int(float(seconds))
|
||||
time_sum += int(minutes) * 60
|
||||
time_sum += int(hours) * 3600
|
||||
time_sum += int(days) * 86400
|
||||
return time_sum
|
||||
except ValueError:
|
||||
raise ValueError('Non-numeric value in time entry: %s' % label)
|
||||
|
||||
|
||||
def _parse_timestamp(entry):
|
||||
"""
|
||||
Parses the date and time that in format like like...
|
||||
|
||||
::
|
||||
|
||||
2012-11-08 16:48:41
|
||||
|
||||
:param str entry: timestamp to be parsed
|
||||
|
||||
:returns: **datetime** for the time represented by the timestamp
|
||||
|
||||
:raises: **ValueError** if the timestamp is malformed
|
||||
"""
|
||||
|
||||
if not isinstance(entry, (str, str_type)):
|
||||
raise ValueError('parse_timestamp() input must be a str, got a %s' % type(entry))
|
||||
|
||||
try:
|
||||
time = [int(x) for x in _timestamp_re.match(entry).groups()]
|
||||
except AttributeError:
|
||||
raise ValueError('Expected timestamp in format YYYY-MM-DD HH:MM:ss but got ' + entry)
|
||||
|
||||
return datetime.datetime(time[0], time[1], time[2], time[3], time[4], time[5])
|
||||
|
||||
|
||||
def _parse_iso_timestamp(entry):
|
||||
"""
|
||||
Parses the ISO 8601 standard that provides for timestamps like...
|
||||
|
||||
::
|
||||
|
||||
2012-11-08T16:48:41.420251
|
||||
|
||||
:param str entry: timestamp to be parsed
|
||||
|
||||
:returns: **datetime** for the time represented by the timestamp
|
||||
|
||||
:raises: **ValueError** if the timestamp is malformed
|
||||
"""
|
||||
|
||||
if not isinstance(entry, (str, str_type)):
|
||||
raise ValueError('parse_iso_timestamp() input must be a str, got a %s' % type(entry))
|
||||
|
||||
# based after suggestions from...
|
||||
# http://stackoverflow.com/questions/127803/how-to-parse-iso-formatted-date-in-python
|
||||
|
||||
if '.' in entry:
|
||||
timestamp_str, microseconds = entry.split('.')
|
||||
else:
|
||||
timestamp_str, microseconds = entry, '000000'
|
||||
|
||||
if len(microseconds) != 6 or not microseconds.isdigit():
|
||||
raise ValueError("timestamp's microseconds should be six digits")
|
||||
|
||||
if timestamp_str[10] == 'T':
|
||||
timestamp_str = timestamp_str[:10] + ' ' + timestamp_str[11:]
|
||||
else:
|
||||
raise ValueError("timestamp didn't contain delimeter 'T' between date and time")
|
||||
|
||||
timestamp = _parse_timestamp(timestamp_str)
|
||||
return timestamp + datetime.timedelta(microseconds = int(microseconds))
|
||||
|
||||
|
||||
def _get_label(units, count, decimal, is_long):
|
||||
"""
|
||||
Provides label corresponding to units of the highest significance in the
|
||||
provided set. This rounds down (ie, integer truncation after visible units).
|
||||
|
||||
:param tuple units: type of units to be used for conversion, containing
|
||||
(count_per_unit, short_label, long_label)
|
||||
:param int count: number of base units being converted
|
||||
:param int decimal: decimal precision of label
|
||||
:param bool is_long: uses the long label if **True**, short label otherwise
|
||||
"""
|
||||
|
||||
# formatted string for the requested number of digits
|
||||
label_format = '%%.%if' % decimal
|
||||
|
||||
if count < 0:
|
||||
label_format = '-' + label_format
|
||||
count = abs(count)
|
||||
elif count == 0:
|
||||
units_label = units[-1][2] + 's' if is_long else units[-1][1]
|
||||
return '%s%s' % (label_format % count, units_label)
|
||||
|
||||
for count_per_unit, short_label, long_label in units:
|
||||
if count >= count_per_unit:
|
||||
# Rounding down with a '%f' is a little clunky. Reducing the count so
|
||||
# it'll divide evenly as the rounded down value.
|
||||
|
||||
count -= count % (count_per_unit / (10 ** decimal))
|
||||
count_label = label_format % (count / count_per_unit)
|
||||
|
||||
if is_long:
|
||||
# Pluralize if any of the visible units make it greater than one. For
|
||||
# instance 1.0003 is plural but 1.000 isn't.
|
||||
|
||||
if decimal > 0:
|
||||
is_plural = count > count_per_unit
|
||||
else:
|
||||
is_plural = count >= count_per_unit * 2
|
||||
|
||||
return count_label + long_label + ('s' if is_plural else '')
|
||||
else:
|
||||
return count_label + short_label
|
||||
|
||||
# TODO: drop with stem 2.x
|
||||
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
|
||||
# names for backward compatability.
|
||||
|
||||
get_size_label = size_label
|
||||
get_time_label = time_label
|
||||
get_time_labels = time_labels
|
||||
get_short_time_label = short_time_label
|
1176
Shared/lib/python3.4/site-packages/stem/util/system.py
Normal file
1176
Shared/lib/python3.4/site-packages/stem/util/system.py
Normal file
File diff suppressed because it is too large
Load diff
116
Shared/lib/python3.4/site-packages/stem/util/term.py
Normal file
116
Shared/lib/python3.4/site-packages/stem/util/term.py
Normal file
|
@ -0,0 +1,116 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Utilities for working with the terminal.
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
format - wrap text with ANSI for the given colors or attributes
|
||||
|
||||
.. data:: Color (enum)
|
||||
.. data:: BgColor (enum)
|
||||
|
||||
Enumerations for foreground or background terminal color.
|
||||
|
||||
=========== ===========
|
||||
Color Description
|
||||
=========== ===========
|
||||
**BLACK** black color
|
||||
**BLUE** blue color
|
||||
**CYAN** cyan color
|
||||
**GREEN** green color
|
||||
**MAGENTA** magenta color
|
||||
**RED** red color
|
||||
**WHITE** white color
|
||||
**YELLOW** yellow color
|
||||
=========== ===========
|
||||
|
||||
.. data:: Attr (enum)
|
||||
|
||||
Enumerations of terminal text attributes.
|
||||
|
||||
=================== ===========
|
||||
Attr Description
|
||||
=================== ===========
|
||||
**BOLD** heavy typeface
|
||||
**HILIGHT** inverted foreground and background
|
||||
**UNDERLINE** underlined text
|
||||
**READLINE_ESCAPE** wrap encodings in `RL_PROMPT_START_IGNORE and RL_PROMPT_END_IGNORE sequences <https://stackoverflow.com/questions/9468435/look-how-to-fix-column-calculation-in-python-readline-if-use-color-prompt>`_
|
||||
=================== ===========
|
||||
"""
|
||||
|
||||
import stem.util.enum
|
||||
import stem.util.str_tools
|
||||
|
||||
TERM_COLORS = ('BLACK', 'RED', 'GREEN', 'YELLOW', 'BLUE', 'MAGENTA', 'CYAN', 'WHITE')
|
||||
|
||||
# DISABLE_COLOR_SUPPORT is *not* being vended to Stem users. This is likely to
|
||||
# go away if I can think of a more graceful method for color toggling.
|
||||
|
||||
DISABLE_COLOR_SUPPORT = False
|
||||
|
||||
Color = stem.util.enum.Enum(*TERM_COLORS)
|
||||
BgColor = stem.util.enum.Enum(*['BG_' + color for color in TERM_COLORS])
|
||||
Attr = stem.util.enum.Enum('BOLD', 'UNDERLINE', 'HILIGHT', 'READLINE_ESCAPE')
|
||||
|
||||
# mappings of terminal attribute enums to their ANSI escape encoding
|
||||
FG_ENCODING = dict([(list(Color)[i], str(30 + i)) for i in range(8)])
|
||||
BG_ENCODING = dict([(list(BgColor)[i], str(40 + i)) for i in range(8)])
|
||||
ATTR_ENCODING = {Attr.BOLD: '1', Attr.UNDERLINE: '4', Attr.HILIGHT: '7'}
|
||||
|
||||
CSI = '\x1B[%sm'
|
||||
RESET = CSI % '0'
|
||||
|
||||
|
||||
def format(msg, *attr):
|
||||
"""
|
||||
Simple terminal text formatting using `ANSI escape sequences
|
||||
<https://en.wikipedia.org/wiki/ANSI_escape_code#CSI_codes>`_.
|
||||
The following are some toolkits providing similar capabilities:
|
||||
|
||||
* `django.utils.termcolors <https://github.com/django/django/blob/master/django/utils/termcolors.py>`_
|
||||
* `termcolor <https://pypi.python.org/pypi/termcolor>`_
|
||||
* `colorama <https://pypi.python.org/pypi/colorama>`_
|
||||
|
||||
:param str msg: string to be formatted
|
||||
:param str attr: text attributes, this can be :data:`~stem.util.term.Color`,
|
||||
:data:`~stem.util.term.BgColor`, or :data:`~stem.util.term.Attr` enums
|
||||
and are case insensitive (so strings like 'red' are fine)
|
||||
|
||||
:returns: **str** wrapped with ANSI escape encodings, starting with the given
|
||||
attributes and ending with a reset
|
||||
"""
|
||||
|
||||
if DISABLE_COLOR_SUPPORT:
|
||||
return msg
|
||||
|
||||
# if we have reset sequences in the message then apply our attributes
|
||||
# after each of them
|
||||
|
||||
if RESET in msg:
|
||||
return ''.join([format(comp, *attr) for comp in msg.split(RESET)])
|
||||
|
||||
encodings = []
|
||||
|
||||
for text_attr in attr:
|
||||
text_attr, encoding = stem.util.str_tools._to_camel_case(text_attr), None
|
||||
encoding = FG_ENCODING.get(text_attr, encoding)
|
||||
encoding = BG_ENCODING.get(text_attr, encoding)
|
||||
encoding = ATTR_ENCODING.get(text_attr, encoding)
|
||||
|
||||
if encoding:
|
||||
encodings.append(encoding)
|
||||
|
||||
if encodings:
|
||||
prefix, suffix = CSI % ';'.join(encodings), RESET
|
||||
|
||||
if Attr.READLINE_ESCAPE in attr:
|
||||
prefix = '\001%s\002' % prefix
|
||||
suffix = '\001%s\002' % suffix
|
||||
|
||||
return prefix + msg + suffix
|
||||
else:
|
||||
return msg
|
341
Shared/lib/python3.4/site-packages/stem/util/test_tools.py
Normal file
341
Shared/lib/python3.4/site-packages/stem/util/test_tools.py
Normal file
|
@ -0,0 +1,341 @@
|
|||
# Copyright 2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Helper functions for testing.
|
||||
|
||||
.. versionadded:: 1.2.0
|
||||
|
||||
::
|
||||
|
||||
clean_orphaned_pyc - delete *.pyc files without corresponding *.py
|
||||
|
||||
is_pyflakes_available - checks if pyflakes is available
|
||||
is_pep8_available - checks if pep8 is available
|
||||
|
||||
stylistic_issues - checks for PEP8 and other stylistic issues
|
||||
pyflakes_issues - static checks for problems via pyflakes
|
||||
"""
|
||||
|
||||
import collections
|
||||
import linecache
|
||||
import os
|
||||
import re
|
||||
|
||||
import stem.util.conf
|
||||
import stem.util.system
|
||||
|
||||
CONFIG = stem.util.conf.config_dict('test', {
|
||||
'pep8.ignore': [],
|
||||
'pyflakes.ignore': [],
|
||||
'exclude_paths': [],
|
||||
})
|
||||
|
||||
Issue = collections.namedtuple('Issue', [
|
||||
'line_number',
|
||||
'message',
|
||||
'line',
|
||||
])
|
||||
|
||||
|
||||
def clean_orphaned_pyc(paths):
|
||||
"""
|
||||
Deletes any file with a *.pyc extention without a corresponding *.py. This
|
||||
helps to address a common gotcha when deleting python files...
|
||||
|
||||
* You delete module 'foo.py' and run the tests to ensure that you haven't
|
||||
broken anything. They pass, however there *are* still some 'import foo'
|
||||
statements that still work because the bytecode (foo.pyc) is still around.
|
||||
|
||||
* You push your change.
|
||||
|
||||
* Another developer clones our repository and is confused because we have a
|
||||
bunch of ImportErrors.
|
||||
|
||||
:param list paths: paths to search for orphaned pyc files
|
||||
|
||||
:returns: list of absolute paths that were deleted
|
||||
"""
|
||||
|
||||
orphaned_pyc = []
|
||||
|
||||
for path in paths:
|
||||
for pyc_path in stem.util.system.files_with_suffix(path, '.pyc'):
|
||||
py_path = pyc_path[:-1]
|
||||
|
||||
# If we're running python 3 then the *.pyc files are no longer bundled
|
||||
# with the *.py. Rather, they're in a __pycache__ directory.
|
||||
|
||||
pycache = '%s__pycache__%s' % (os.path.sep, os.path.sep)
|
||||
|
||||
if pycache in pyc_path:
|
||||
directory, pycache_filename = pyc_path.split(pycache, 1)
|
||||
|
||||
if not pycache_filename.endswith('.pyc'):
|
||||
continue # should look like 'test_tools.cpython-32.pyc'
|
||||
|
||||
py_path = os.path.join(directory, pycache_filename.split('.')[0] + '.py')
|
||||
|
||||
if not os.path.exists(py_path):
|
||||
orphaned_pyc.append(pyc_path)
|
||||
os.remove(pyc_path)
|
||||
|
||||
return orphaned_pyc
|
||||
|
||||
|
||||
def is_pyflakes_available():
|
||||
"""
|
||||
Checks if pyflakes is availalbe.
|
||||
|
||||
:returns: **True** if we can use pyflakes and **False** otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
import pyflakes.api
|
||||
import pyflakes.reporter
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
|
||||
def is_pep8_available():
|
||||
"""
|
||||
Checks if pep8 is availalbe.
|
||||
|
||||
:returns: **True** if we can use pep8 and **False** otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
import pep8
|
||||
|
||||
if not hasattr(pep8, 'BaseReport'):
|
||||
raise ImportError()
|
||||
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
|
||||
def stylistic_issues(paths, check_two_space_indents = False, check_newlines = False, check_trailing_whitespace = False, check_exception_keyword = False, prefer_single_quotes = False):
|
||||
"""
|
||||
Checks for stylistic issues that are an issue according to the parts of PEP8
|
||||
we conform to. You can suppress PEP8 issues by making a 'test' configuration
|
||||
that sets 'pep8.ignore'.
|
||||
|
||||
For example, with a 'test/settings.cfg' of...
|
||||
|
||||
::
|
||||
|
||||
# PEP8 compliance issues that we're ignoreing...
|
||||
#
|
||||
# * E111 and E121 four space indentations
|
||||
# * E501 line is over 79 characters
|
||||
|
||||
pep8.ignore E111
|
||||
pep8.ignore E121
|
||||
pep8.ignore E501
|
||||
|
||||
... you can then run tests with...
|
||||
|
||||
::
|
||||
|
||||
import stem.util.conf
|
||||
|
||||
test_config = stem.util.conf.get_config('test')
|
||||
test_config.load('test/settings.cfg')
|
||||
|
||||
issues = stylistic_issues('my_project')
|
||||
|
||||
If a 'exclude_paths' was set in our test config then we exclude any absolute
|
||||
paths matching those regexes.
|
||||
|
||||
.. versionchanged:: 1.3.0
|
||||
Renamed from get_stylistic_issues() to stylistic_issues(). The old name
|
||||
still works as an alias, but will be dropped in Stem version 2.0.0.
|
||||
|
||||
.. versionchanged:: 1.4.0
|
||||
Changing tuples in return value to be namedtuple instances, and adding the
|
||||
line that had the issue.
|
||||
|
||||
.. versionchanged:: 1.4.0
|
||||
Added the prefer_single_quotes option.
|
||||
|
||||
:param list paths: paths to search for stylistic issues
|
||||
:param bool check_two_space_indents: check for two space indentations and
|
||||
that no tabs snuck in
|
||||
:param bool check_newlines: check that we have standard newlines (\\n), not
|
||||
windows (\\r\\n) nor classic mac (\\r)
|
||||
:param bool check_trailing_whitespace: check that our lines don't end with
|
||||
trailing whitespace
|
||||
:param bool check_exception_keyword: checks that we're using 'as' for
|
||||
exceptions rather than a comma
|
||||
:param bool prefer_single_quotes: standardize on using single rather than
|
||||
double quotes for strings, when reasonable
|
||||
|
||||
:returns: **dict** of the form ``path => [(line_number, message)...]``
|
||||
"""
|
||||
|
||||
issues = {}
|
||||
|
||||
if is_pep8_available():
|
||||
import pep8
|
||||
|
||||
class StyleReport(pep8.BaseReport):
|
||||
def __init__(self, options):
|
||||
super(StyleReport, self).__init__(options)
|
||||
|
||||
def error(self, line_number, offset, text, check):
|
||||
code = super(StyleReport, self).error(line_number, offset, text, check)
|
||||
|
||||
if code:
|
||||
issues.setdefault(self.filename, []).append(Issue(line_number, '%s %s' % (code, text), text))
|
||||
|
||||
style_checker = pep8.StyleGuide(ignore = CONFIG['pep8.ignore'], reporter = StyleReport)
|
||||
style_checker.check_files(list(_python_files(paths)))
|
||||
|
||||
if check_two_space_indents or check_newlines or check_trailing_whitespace or check_exception_keyword:
|
||||
for path in _python_files(paths):
|
||||
with open(path) as f:
|
||||
file_contents = f.read()
|
||||
|
||||
lines = file_contents.split('\n')
|
||||
is_block_comment = False
|
||||
|
||||
for index, line in enumerate(lines):
|
||||
whitespace, content = re.match('^(\s*)(.*)$', line).groups()
|
||||
|
||||
# TODO: This does not check that block indentations are two spaces
|
||||
# because differentiating source from string blocks ("""foo""") is more
|
||||
# of a pita than I want to deal with right now.
|
||||
|
||||
if '"""' in content:
|
||||
is_block_comment = not is_block_comment
|
||||
|
||||
if check_two_space_indents and '\t' in whitespace:
|
||||
issues.setdefault(path, []).append(Issue(index + 1, 'indentation has a tab', line))
|
||||
elif check_newlines and '\r' in content:
|
||||
issues.setdefault(path, []).append(Issue(index + 1, 'contains a windows newline', line))
|
||||
elif check_trailing_whitespace and content != content.rstrip():
|
||||
issues.setdefault(path, []).append(Issue(index + 1, 'line has trailing whitespace', line))
|
||||
elif check_exception_keyword and content.lstrip().startswith('except') and content.endswith(', exc:'):
|
||||
# Python 2.6 - 2.7 supports two forms for exceptions...
|
||||
#
|
||||
# except ValueError, exc:
|
||||
# except ValueError as exc:
|
||||
#
|
||||
# The former is the old method and no longer supported in python 3
|
||||
# going forward.
|
||||
|
||||
# TODO: This check only works if the exception variable is called
|
||||
# 'exc'. We should generalize this via a regex so other names work
|
||||
# too.
|
||||
|
||||
issues.setdefault(path, []).append(Issue(index + 1, "except clause should use 'as', not comma", line))
|
||||
|
||||
if prefer_single_quotes and line and not is_block_comment:
|
||||
content = line.strip().split('#', 1)[0]
|
||||
|
||||
if '"' in content and "'" not in content and '"""' not in content and not content.endswith('\\'):
|
||||
# Checking if the line already has any single quotes since that
|
||||
# usually means double quotes are preferable for the content (for
|
||||
# instance "I'm hungry"). Also checking for '\' at the end since
|
||||
# that can indicate a multi-line string.
|
||||
|
||||
issues.setdefault(path, []).append(Issue(index + 1, "use single rather than double quotes", line))
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def pyflakes_issues(paths):
|
||||
"""
|
||||
Performs static checks via pyflakes. False positives can be ignored via
|
||||
'pyflakes.ignore' entries in our 'test' config. For instance...
|
||||
|
||||
::
|
||||
|
||||
pyflakes.ignore stem/util/test_tools.py => 'pyflakes' imported but unused
|
||||
pyflakes.ignore stem/util/test_tools.py => 'pep8' imported but unused
|
||||
|
||||
If a 'exclude_paths' was set in our test config then we exclude any absolute
|
||||
paths matching those regexes.
|
||||
|
||||
.. versionchanged:: 1.3.0
|
||||
Renamed from get_pyflakes_issues() to pyflakes_issues(). The old name
|
||||
still works as an alias, but will be dropped in Stem version 2.0.0.
|
||||
|
||||
.. versionchanged:: 1.4.0
|
||||
Changing tuples in return value to be namedtuple instances, and adding the
|
||||
line that had the issue.
|
||||
|
||||
:param list paths: paths to search for problems
|
||||
|
||||
:returns: dict of the form ``path => [(line_number, message)...]``
|
||||
"""
|
||||
|
||||
issues = {}
|
||||
|
||||
if is_pyflakes_available():
|
||||
import pyflakes.api
|
||||
import pyflakes.reporter
|
||||
|
||||
class Reporter(pyflakes.reporter.Reporter):
|
||||
def __init__(self):
|
||||
self._ignored_issues = {}
|
||||
|
||||
for line in CONFIG['pyflakes.ignore']:
|
||||
path, issue = line.split('=>')
|
||||
self._ignored_issues.setdefault(path.strip(), []).append(issue.strip())
|
||||
|
||||
def unexpectedError(self, filename, msg):
|
||||
self._register_issue(filename, None, msg, None)
|
||||
|
||||
def syntaxError(self, filename, msg, lineno, offset, text):
|
||||
self._register_issue(filename, lineno, msg, text)
|
||||
|
||||
def flake(self, msg):
|
||||
self._register_issue(msg.filename, msg.lineno, msg.message % msg.message_args, None)
|
||||
|
||||
def _is_ignored(self, path, issue):
|
||||
# Paths in pyflakes_ignore are relative, so we need to check to see if our
|
||||
# path ends with any of them.
|
||||
|
||||
for ignored_path, ignored_issues in self._ignored_issues.items():
|
||||
if path.endswith(ignored_path) and issue in ignored_issues:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _register_issue(self, path, line_number, issue, line):
|
||||
if not self._is_ignored(path, issue):
|
||||
if path and line_number and not line:
|
||||
line = linecache.getline(path, line_number)
|
||||
|
||||
issues.setdefault(path, []).append(Issue(line_number, issue, line))
|
||||
|
||||
reporter = Reporter()
|
||||
|
||||
for path in _python_files(paths):
|
||||
pyflakes.api.checkPath(path, reporter)
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def _python_files(paths):
|
||||
for path in paths:
|
||||
for file_path in stem.util.system.files_with_suffix(path, '.py'):
|
||||
skip = False
|
||||
|
||||
for exclude_path in CONFIG['exclude_paths']:
|
||||
if re.match(exclude_path, file_path):
|
||||
skip = True
|
||||
break
|
||||
|
||||
if not skip:
|
||||
yield file_path
|
||||
|
||||
# TODO: drop with stem 2.x
|
||||
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
|
||||
# names for backward compatability.
|
||||
|
||||
get_stylistic_issues = stylistic_issues
|
||||
get_pyflakes_issues = pyflakes_issues
|
151
Shared/lib/python3.4/site-packages/stem/util/tor_tools.py
Normal file
151
Shared/lib/python3.4/site-packages/stem/util/tor_tools.py
Normal file
|
@ -0,0 +1,151 @@
|
|||
# Copyright 2012-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Miscellaneous utility functions for working with tor.
|
||||
|
||||
.. versionadded:: 1.2.0
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
is_valid_fingerprint - checks if a string is a valid tor relay fingerprint
|
||||
is_valid_nickname - checks if a string is a valid tor relay nickname
|
||||
is_valid_circuit_id - checks if a string is a valid tor circuit id
|
||||
is_valid_stream_id - checks if a string is a valid tor stream id
|
||||
is_valid_connection_id - checks if a string is a valid tor connection id
|
||||
is_valid_hidden_service_address - checks if a string is a valid hidden service address
|
||||
is_hex_digits - checks if a string is only made up of hex digits
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
# The control-spec defines the following as...
|
||||
#
|
||||
# Fingerprint = "$" 40*HEXDIG
|
||||
# NicknameChar = "a"-"z" / "A"-"Z" / "0" - "9"
|
||||
# Nickname = 1*19 NicknameChar
|
||||
#
|
||||
# CircuitID = 1*16 IDChar
|
||||
# IDChar = ALPHA / DIGIT
|
||||
#
|
||||
# HEXDIG is defined in RFC 5234 as being uppercase and used in RFC 5987 as
|
||||
# case insensitive. Tor doesn't define this in the spec so flipping a coin
|
||||
# and going with case insensitive.
|
||||
|
||||
NICKNAME_PATTERN = re.compile('^[a-zA-Z0-9]{1,19}$')
|
||||
CIRC_ID_PATTERN = re.compile('^[a-zA-Z0-9]{1,16}$')
|
||||
|
||||
# Hidden service addresses are sixteen base32 characters.
|
||||
|
||||
HS_ADDRESS_PATTERN = re.compile('^[a-z2-7]{16}$')
|
||||
|
||||
|
||||
def is_valid_fingerprint(entry, check_prefix = False):
|
||||
"""
|
||||
Checks if a string is a properly formatted relay fingerprint. This checks for
|
||||
a '$' prefix if check_prefix is true, otherwise this only validates the hex
|
||||
digits.
|
||||
|
||||
:param str entry: string to be checked
|
||||
:param bool check_prefix: checks for a '$' prefix
|
||||
|
||||
:returns: **True** if the string could be a relay fingerprint, **False** otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
if check_prefix:
|
||||
if not entry or entry[0] != '$':
|
||||
return False
|
||||
|
||||
entry = entry[1:]
|
||||
|
||||
return is_hex_digits(entry, 40)
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_nickname(entry):
|
||||
"""
|
||||
Checks if a string is a valid format for being a nickname.
|
||||
|
||||
:param str entry: string to be checked
|
||||
|
||||
:returns: **True** if the string could be a nickname, **False** otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
return bool(NICKNAME_PATTERN.match(entry))
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_circuit_id(entry):
|
||||
"""
|
||||
Checks if a string is a valid format for being a circuit identifier.
|
||||
|
||||
:returns: **True** if the string could be a circuit id, **False** otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
return bool(CIRC_ID_PATTERN.match(entry))
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_stream_id(entry):
|
||||
"""
|
||||
Checks if a string is a valid format for being a stream identifier.
|
||||
Currently, this is just an alias to :func:`~stem.util.tor_tools.is_valid_circuit_id`.
|
||||
|
||||
:returns: **True** if the string could be a stream id, **False** otherwise
|
||||
"""
|
||||
|
||||
return is_valid_circuit_id(entry)
|
||||
|
||||
|
||||
def is_valid_connection_id(entry):
|
||||
"""
|
||||
Checks if a string is a valid format for being a connection identifier.
|
||||
Currently, this is just an alias to :func:`~stem.util.tor_tools.is_valid_circuit_id`.
|
||||
|
||||
:returns: **True** if the string could be a connection id, **False** otherwise
|
||||
"""
|
||||
|
||||
return is_valid_circuit_id(entry)
|
||||
|
||||
|
||||
def is_valid_hidden_service_address(entry):
|
||||
"""
|
||||
Checks if a string is a valid format for being a hidden service address (not
|
||||
including the '.onion' suffix).
|
||||
|
||||
:returns: **True** if the string could be a hidden service address, **False** otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
return bool(HS_ADDRESS_PATTERN.match(entry))
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
|
||||
def is_hex_digits(entry, count):
|
||||
"""
|
||||
Checks if a string is the given number of hex digits. Digits represented by
|
||||
letters are case insensitive.
|
||||
|
||||
:param str entry: string to be checked
|
||||
:param int count: number of hex digits to be checked for
|
||||
|
||||
:returns: **True** if the given number of hex digits, **False** otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
if len(entry) != count:
|
||||
return False
|
||||
|
||||
int(entry, 16) # attempt to convert it as hex
|
||||
return True
|
||||
except (ValueError, TypeError):
|
||||
return False
|
376
Shared/lib/python3.4/site-packages/stem/version.py
Normal file
376
Shared/lib/python3.4/site-packages/stem/version.py
Normal file
|
@ -0,0 +1,376 @@
|
|||
# Copyright 2011-2015, Damian Johnson and The Tor Project
|
||||
# See LICENSE for licensing information
|
||||
|
||||
"""
|
||||
Tor versioning information and requirements for its features. These can be
|
||||
easily parsed and compared, for instance...
|
||||
|
||||
::
|
||||
|
||||
>>> from stem.version import get_system_tor_version, Requirement
|
||||
>>> my_version = get_system_tor_version()
|
||||
>>> print(my_version)
|
||||
0.2.1.30
|
||||
>>> my_version >= Requirement.TORRC_CONTROL_SOCKET
|
||||
True
|
||||
|
||||
**Module Overview:**
|
||||
|
||||
::
|
||||
|
||||
get_system_tor_version - gets the version of our system's tor installation
|
||||
|
||||
Version - Tor versioning information
|
||||
|
||||
.. data:: Requirement (enum)
|
||||
|
||||
Enumerations for the version requirements of features.
|
||||
|
||||
===================================== ===========
|
||||
Requirement Description
|
||||
===================================== ===========
|
||||
**AUTH_SAFECOOKIE** SAFECOOKIE authentication method
|
||||
**DROPGUARDS** DROPGUARDS requests
|
||||
**EVENT_AUTHDIR_NEWDESCS** AUTHDIR_NEWDESC events
|
||||
**EVENT_BUILDTIMEOUT_SET** BUILDTIMEOUT_SET events
|
||||
**EVENT_CIRC_MINOR** CIRC_MINOR events
|
||||
**EVENT_CLIENTS_SEEN** CLIENTS_SEEN events
|
||||
**EVENT_CONF_CHANGED** CONF_CHANGED events
|
||||
**EVENT_DESCCHANGED** DESCCHANGED events
|
||||
**EVENT_GUARD** GUARD events
|
||||
**EVENT_HS_DESC_CONTENT** HS_DESC_CONTENT events
|
||||
**EVENT_NEWCONSENSUS** NEWCONSENSUS events
|
||||
**EVENT_NS** NS events
|
||||
**EVENT_SIGNAL** SIGNAL events
|
||||
**EVENT_STATUS** STATUS_GENERAL, STATUS_CLIENT, and STATUS_SERVER events
|
||||
**EVENT_STREAM_BW** STREAM_BW events
|
||||
**EVENT_TRANSPORT_LAUNCHED** TRANSPORT_LAUNCHED events
|
||||
**EVENT_CONN_BW** CONN_BW events
|
||||
**EVENT_CIRC_BW** CIRC_BW events
|
||||
**EVENT_CELL_STATS** CELL_STATS events
|
||||
**EVENT_TB_EMPTY** TB_EMPTY events
|
||||
**EVENT_HS_DESC** HS_DESC events
|
||||
**EXTENDCIRCUIT_PATH_OPTIONAL** EXTENDCIRCUIT queries can omit the path if the circuit is zero
|
||||
**FEATURE_EXTENDED_EVENTS** 'EXTENDED_EVENTS' optional feature
|
||||
**FEATURE_VERBOSE_NAMES** 'VERBOSE_NAMES' optional feature
|
||||
**GETINFO_CONFIG_TEXT** 'GETINFO config-text' query
|
||||
**HSFETCH** HSFETCH requests
|
||||
**HSPOST** HSPOST requests
|
||||
**ADD_ONION** ADD_ONION and DEL_ONION requests
|
||||
**LOADCONF** LOADCONF requests
|
||||
**MICRODESCRIPTOR_IS_DEFAULT** Tor gets microdescriptors by default rather than server descriptors
|
||||
**TAKEOWNERSHIP** TAKEOWNERSHIP requests
|
||||
**TORRC_CONTROL_SOCKET** 'ControlSocket <path>' config option
|
||||
**TORRC_PORT_FORWARDING** 'PortForwarding' config option
|
||||
**TORRC_DISABLE_DEBUGGER_ATTACHMENT** 'DisableDebuggerAttachment' config option
|
||||
**TORRC_VIA_STDIN** Allow torrc options via 'tor -f -' (:trac:`13865`)
|
||||
===================================== ===========
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
import stem.util.enum
|
||||
import stem.util.system
|
||||
|
||||
try:
|
||||
# added in python 3.2
|
||||
from functools import lru_cache
|
||||
except ImportError:
|
||||
from stem.util.lru_cache import lru_cache
|
||||
|
||||
# cache for the get_system_tor_version function
|
||||
VERSION_CACHE = {}
|
||||
|
||||
|
||||
def get_system_tor_version(tor_cmd = 'tor'):
|
||||
"""
|
||||
Queries tor for its version. This is os dependent, only working on linux,
|
||||
osx, and bsd.
|
||||
|
||||
:param str tor_cmd: command used to run tor
|
||||
|
||||
:returns: :class:`~stem.version.Version` provided by the tor command
|
||||
|
||||
:raises: **IOError** if unable to query or parse the version
|
||||
"""
|
||||
|
||||
if tor_cmd not in VERSION_CACHE:
|
||||
version_cmd = '%s --version' % tor_cmd
|
||||
|
||||
try:
|
||||
version_output = stem.util.system.call(version_cmd)
|
||||
except OSError as exc:
|
||||
# make the error message nicer if this is due to tor being unavialable
|
||||
|
||||
if 'No such file or directory' in str(exc):
|
||||
if os.path.isabs(tor_cmd):
|
||||
exc = "Unable to check tor's version. '%s' doesn't exist." % tor_cmd
|
||||
else:
|
||||
exc = "Unable to run '%s'. Mabye tor isn't in your PATH?" % version_cmd
|
||||
|
||||
raise IOError(exc)
|
||||
|
||||
if version_output:
|
||||
# output example:
|
||||
# Oct 21 07:19:27.438 [notice] Tor v0.2.1.30. This is experimental software. Do not rely on it for strong anonymity. (Running on Linux i686)
|
||||
# Tor version 0.2.1.30.
|
||||
|
||||
last_line = version_output[-1]
|
||||
|
||||
if last_line.startswith('Tor version ') and last_line.endswith('.'):
|
||||
try:
|
||||
version_str = last_line[12:-1]
|
||||
VERSION_CACHE[tor_cmd] = Version(version_str)
|
||||
except ValueError as exc:
|
||||
raise IOError(exc)
|
||||
else:
|
||||
raise IOError("Unexpected response from '%s': %s" % (version_cmd, last_line))
|
||||
else:
|
||||
raise IOError("'%s' didn't have any output" % version_cmd)
|
||||
|
||||
return VERSION_CACHE[tor_cmd]
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def _get_version(version_str):
|
||||
return Version(version_str)
|
||||
|
||||
|
||||
class Version(object):
|
||||
"""
|
||||
Comparable tor version. These are constructed from strings that conform to
|
||||
the 'new' style in the `tor version-spec
|
||||
<https://gitweb.torproject.org/torspec.git/tree/version-spec.txt>`_,
|
||||
such as "0.1.4" or "0.2.2.23-alpha (git-7dcd105be34a4f44)".
|
||||
|
||||
:var int major: major version
|
||||
:var int minor: minor version
|
||||
:var int micro: micro version
|
||||
:var int patch: patch level (**None** if undefined)
|
||||
:var str status: status tag such as 'alpha' or 'beta-dev' (**None** if undefined)
|
||||
:var str extra: extra information without its parentheses such as
|
||||
'git-8be6058d8f31e578' (**None** if undefined)
|
||||
:var str git_commit: git commit id (**None** if it wasn't provided)
|
||||
|
||||
:param str version_str: version to be parsed
|
||||
|
||||
:raises: **ValueError** if input isn't a valid tor version
|
||||
"""
|
||||
|
||||
def __init__(self, version_str):
|
||||
self.version_str = version_str
|
||||
version_parts = re.match(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)?(-\S*)?( \(\S*\))?$', version_str)
|
||||
self._hash = None
|
||||
|
||||
if version_parts:
|
||||
major, minor, micro, patch, status, extra = version_parts.groups()
|
||||
|
||||
# The patch and status matches are optional (may be None) and have an extra
|
||||
# proceeding period or dash if they exist. Stripping those off.
|
||||
|
||||
if patch:
|
||||
patch = int(patch[1:])
|
||||
|
||||
if status:
|
||||
status = status[1:]
|
||||
|
||||
if extra:
|
||||
extra = extra[2:-1]
|
||||
|
||||
self.major = int(major)
|
||||
self.minor = int(minor)
|
||||
self.micro = int(micro)
|
||||
self.patch = patch
|
||||
self.status = status
|
||||
self.extra = extra
|
||||
|
||||
if extra and re.match('^git-[0-9a-f]{16}$', extra):
|
||||
self.git_commit = extra[4:]
|
||||
else:
|
||||
self.git_commit = None
|
||||
else:
|
||||
raise ValueError("'%s' isn't a properly formatted tor version" % version_str)
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Provides the string used to construct the version.
|
||||
"""
|
||||
|
||||
return self.version_str
|
||||
|
||||
def _compare(self, other, method):
|
||||
"""
|
||||
Compares version ordering according to the spec.
|
||||
"""
|
||||
|
||||
if not isinstance(other, Version):
|
||||
return False
|
||||
|
||||
for attr in ('major', 'minor', 'micro', 'patch'):
|
||||
my_version = getattr(self, attr)
|
||||
other_version = getattr(other, attr)
|
||||
|
||||
if my_version is None:
|
||||
my_version = 0
|
||||
|
||||
if other_version is None:
|
||||
other_version = 0
|
||||
|
||||
if my_version != other_version:
|
||||
return method(my_version, other_version)
|
||||
|
||||
# According to the version spec...
|
||||
#
|
||||
# If we *do* encounter two versions that differ only by status tag, we
|
||||
# compare them lexically as ASCII byte strings.
|
||||
|
||||
my_status = self.status if self.status else ''
|
||||
other_status = other.status if other.status else ''
|
||||
|
||||
return method(my_status, other_status)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self._compare(other, lambda s, o: s == o)
|
||||
|
||||
def __gt__(self, other):
|
||||
"""
|
||||
Checks if this version meets the requirements for a given feature. We can
|
||||
be compared to either a :class:`~stem.version.Version` or
|
||||
:class:`~stem.version._VersionRequirements`.
|
||||
"""
|
||||
|
||||
if isinstance(other, _VersionRequirements):
|
||||
for rule in other.rules:
|
||||
if rule(self):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
return self._compare(other, lambda s, o: s > o)
|
||||
|
||||
def __ge__(self, other):
|
||||
if isinstance(other, _VersionRequirements):
|
||||
for rule in other.rules:
|
||||
if rule(self):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
return self._compare(other, lambda s, o: s >= o)
|
||||
|
||||
def __hash__(self):
|
||||
if self._hash is None:
|
||||
my_hash = 0
|
||||
|
||||
for attr in ('major', 'minor', 'micro', 'patch', 'status'):
|
||||
my_hash *= 1024
|
||||
|
||||
attr_value = getattr(self, attr)
|
||||
|
||||
if attr_value is not None:
|
||||
my_hash += hash(attr_value)
|
||||
|
||||
self._hash = my_hash
|
||||
|
||||
return self._hash
|
||||
|
||||
|
||||
class _VersionRequirements(object):
|
||||
"""
|
||||
Series of version constraints that can be compared to. For instance, this
|
||||
allows for comparisons like 'if I'm greater than version X in the 0.2.2
|
||||
series, or greater than version Y in the 0.2.3 series'.
|
||||
|
||||
This is a logical 'or' of the series of rules.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.rules = []
|
||||
|
||||
def greater_than(self, version, inclusive = True):
|
||||
"""
|
||||
Adds a constraint that we're greater than the given version.
|
||||
|
||||
:param stem.version.Version version: version we're checking against
|
||||
:param bool inclusive: if comparison is inclusive or not
|
||||
"""
|
||||
|
||||
if inclusive:
|
||||
self.rules.append(lambda v: version <= v)
|
||||
else:
|
||||
self.rules.append(lambda v: version < v)
|
||||
|
||||
def less_than(self, version, inclusive = True):
|
||||
"""
|
||||
Adds a constraint that we're less than the given version.
|
||||
|
||||
:param stem.version.Version version: version we're checking against
|
||||
:param bool inclusive: if comparison is inclusive or not
|
||||
"""
|
||||
|
||||
if inclusive:
|
||||
self.rules.append(lambda v: version >= v)
|
||||
else:
|
||||
self.rules.append(lambda v: version > v)
|
||||
|
||||
def in_range(self, from_version, to_version, from_inclusive = True, to_inclusive = False):
|
||||
"""
|
||||
Adds constraint that we're within the range from one version to another.
|
||||
|
||||
:param stem.version.Version from_version: beginning of the comparison range
|
||||
:param stem.version.Version to_version: end of the comparison range
|
||||
:param bool from_inclusive: if comparison is inclusive with the starting version
|
||||
:param bool to_inclusive: if comparison is inclusive with the ending version
|
||||
"""
|
||||
|
||||
if from_inclusive and to_inclusive:
|
||||
new_rule = lambda v: from_version <= v <= to_version
|
||||
elif from_inclusive:
|
||||
new_rule = lambda v: from_version <= v < to_version
|
||||
else:
|
||||
new_rule = lambda v: from_version < v < to_version
|
||||
|
||||
self.rules.append(new_rule)
|
||||
|
||||
safecookie_req = _VersionRequirements()
|
||||
safecookie_req.in_range(Version('0.2.2.36'), Version('0.2.3.0'))
|
||||
safecookie_req.greater_than(Version('0.2.3.13'))
|
||||
|
||||
Requirement = stem.util.enum.Enum(
|
||||
('AUTH_SAFECOOKIE', safecookie_req),
|
||||
('DROPGUARDS', Version('0.2.5.1-alpha')),
|
||||
('EVENT_AUTHDIR_NEWDESCS', Version('0.1.1.10-alpha')),
|
||||
('EVENT_BUILDTIMEOUT_SET', Version('0.2.2.7-alpha')),
|
||||
('EVENT_CIRC_MINOR', Version('0.2.3.11-alpha')),
|
||||
('EVENT_CLIENTS_SEEN', Version('0.2.1.10-alpha')),
|
||||
('EVENT_CONF_CHANGED', Version('0.2.3.3-alpha')),
|
||||
('EVENT_DESCCHANGED', Version('0.1.2.2-alpha')),
|
||||
('EVENT_GUARD', Version('0.1.2.5-alpha')),
|
||||
('EVENT_HS_DESC_CONTENT', Version('0.2.7.1-alpha')),
|
||||
('EVENT_NS', Version('0.1.2.3-alpha')),
|
||||
('EVENT_NEWCONSENSUS', Version('0.2.1.13-alpha')),
|
||||
('EVENT_SIGNAL', Version('0.2.3.1-alpha')),
|
||||
('EVENT_STATUS', Version('0.1.2.3-alpha')),
|
||||
('EVENT_STREAM_BW', Version('0.1.2.8-beta')),
|
||||
('EVENT_TRANSPORT_LAUNCHED', Version('0.2.5.0-alpha')),
|
||||
('EVENT_CONN_BW', Version('0.2.5.2-alpha')),
|
||||
('EVENT_CIRC_BW', Version('0.2.5.2-alpha')),
|
||||
('EVENT_CELL_STATS', Version('0.2.5.2-alpha')),
|
||||
('EVENT_TB_EMPTY', Version('0.2.5.2-alpha')),
|
||||
('EVENT_HS_DESC', Version('0.2.5.2-alpha')),
|
||||
('EXTENDCIRCUIT_PATH_OPTIONAL', Version('0.2.2.9')),
|
||||
('FEATURE_EXTENDED_EVENTS', Version('0.2.2.1-alpha')),
|
||||
('FEATURE_VERBOSE_NAMES', Version('0.2.2.1-alpha')),
|
||||
('GETINFO_CONFIG_TEXT', Version('0.2.2.7-alpha')),
|
||||
('HSFETCH', Version('0.2.7.1-alpha')),
|
||||
('HSPOST', Version('0.2.7.1-alpha')),
|
||||
('ADD_ONION', Version('0.2.7.1-alpha')),
|
||||
('LOADCONF', Version('0.2.1.1')),
|
||||
('MICRODESCRIPTOR_IS_DEFAULT', Version('0.2.3.3')),
|
||||
('TAKEOWNERSHIP', Version('0.2.2.28-beta')),
|
||||
('TORRC_CONTROL_SOCKET', Version('0.2.0.30')),
|
||||
('TORRC_PORT_FORWARDING', Version('0.2.3.1-alpha')),
|
||||
('TORRC_DISABLE_DEBUGGER_ATTACHMENT', Version('0.2.3.9')),
|
||||
('TORRC_VIA_STDIN', Version('0.2.6.3-alpha')),
|
||||
)
|
Loading…
Reference in a new issue