run update

This commit is contained in:
j 2018-12-15 01:08:54 +01:00
commit 6806bebb7c
607 changed files with 52543 additions and 31832 deletions

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -8,14 +8,23 @@ Library for working with the tor process.
::
Endpoint - Networking endpoint.
|- ORPort - Tor relay endpoint.
+- DirPort - Descriptor mirror.
ControllerError - Base exception raised when using the controller.
|- ProtocolError - Malformed socket data.
|
|- OperationFailed - Tor was unable to successfully complete the operation.
| |- UnsatisfiableRequest - Tor was unable to satisfy a valid request.
| | +- CircuitExtensionFailed - Attempt to make or extend a circuit failed.
| |- DescriptorUnavailable - The given relay descriptor is unavailable.
| | |- CircuitExtensionFailed - Attempt to make or extend a circuit failed.
| | |- DescriptorUnavailable - The given relay descriptor is unavailable.
| | +- Timeout - Caller requested timeout was reached.
| |
| |
| +- InvalidRequest - Invalid request.
| +- InvalidArguments - Invalid request parameters.
|
+- SocketError - Communication with the socket failed.
+- SocketClosed - Socket has been shut down.
@ -37,6 +46,9 @@ Library for working with the tor process.
Signals that the tor process will accept.
.. versionchanged:: 1.3.0
Added the HEARTBEAT signal.
========================= ===========
Signal Description
========================= ===========
@ -57,37 +69,45 @@ Library for working with the tor process.
**Note:** The BADDIRECTORY flag was `removed from tor <https://gitweb.torproject.org/torspec.git/commit/dir-spec.txt?id=2f012f1>`_.
================= ===========
Flag Description
================= ===========
**AUTHORITY** relay is a directory authority
**BADEXIT** relay shouldn't be used as an exit due to being either problematic or malicious (`wiki <https://trac.torproject.org/projects/tor/wiki/doc/badRelays>`_)
**BADDIRECTORY** relay shouldn't be used for directory information
**EXIT** relay's exit policy makes it more useful as an exit rather than middle hop
**FAST** relay's suitable for high-bandwidth circuits
**GUARD** relay's suitable for being an entry guard (first hop)
**HSDIR** relay is being used as a v2 hidden service directory
**NAMED** relay can be referred to by its nickname
**RUNNING** relay is currently usable
**STABLE** relay's suitable for long-lived circuits
**UNNAMED** relay isn't currently bound to a nickname
**V2DIR** relay supports the v2 directory protocol
**VALID** relay has been validated
================= ===========
.. versionchanged:: 1.5.0
Added the NO_ED_CONSENSUS flag.
=================== ===========
Flag Description
=================== ===========
**AUTHORITY** relay is a directory authority
**BADEXIT** relay shouldn't be used as an exit due to being either problematic or malicious
**BADDIRECTORY** relay shouldn't be used for directory information
**EXIT** relay's exit policy makes it more useful as an exit rather than middle hop
**FAST** relay's suitable for high-bandwidth circuits
**GUARD** relay's suitable for being an entry guard (first hop)
**HSDIR** relay is being used as a v2 hidden service directory
**NAMED** relay can be referred to by its nickname
**NO_ED_CONSENSUS** relay's Ed25519 doesn't reflrect the consensus
**RUNNING** relay is currently usable
**STABLE** relay's suitable for long-lived circuits
**UNNAMED** relay isn't currently bound to a nickname
**V2DIR** relay supports the v2 directory protocol
**VALID** relay has been validated
=================== ===========
.. data:: CircStatus (enum)
Statuses that a circuit can be in. Tor may provide statuses not in this enum.
============ ===========
CircStatus Description
============ ===========
**LAUNCHED** new circuit was created
**BUILT** circuit finished being created and can accept traffic
**EXTENDED** circuit has been extended by a hop
**FAILED** circuit construction failed
**CLOSED** circuit has been closed
============ ===========
.. versionchanged:: 1.6.0
Added the GUARD_WAIT signal.
============== ===========
CircStatus Description
============== ===========
**LAUNCHED** new circuit was created
**BUILT** circuit finished being created and can accept traffic
**GUARD_WAIT** waiting to see if there's a circuit with a better guard before using
**EXTENDED** circuit has been extended by a hop
**FAILED** circuit construction failed
**CLOSED** circuit has been closed
============== ===========
.. data:: CircBuildFlag (enum)
@ -378,6 +398,8 @@ Library for working with the tor process.
The meaning behind these values is a bit unclear, pending :trac:`10086`.
.. versionadded:: 1.2.0
=============== ===========
ConnectionType Description
=============== ===========
@ -390,6 +412,8 @@ Library for working with the tor process.
Bucket categories of TB_EMPTY events.
.. versionadded:: 1.2.0
=============== ===========
TokenBucket Description
=============== ===========
@ -402,6 +426,14 @@ Library for working with the tor process.
Action beeing taken in a HS_DESC event.
.. versionadded:: 1.2.0
.. versionchanged:: 1.4.0
Added the UPLOAD and UPLOADED actions.
.. versionchanged:: 1.5.0
Added the CREATED action.
=============== ===========
HSDescAction Description
=============== ===========
@ -411,12 +443,21 @@ Library for working with the tor process.
**UPLOADED** descriptor was uploaded with HSPOST
**IGNORE** fetched descriptor was ignored because we already have its v0 descriptor
**FAILED** we were unable to retrieve the descriptor
**CREATED** hidden service descriptor was just created
=============== ===========
.. data:: HSDescReason (enum)
Reason for the hidden service descriptor to fail to be fetched.
.. versionadded:: 1.3.0
.. versionchanged:: 1.4.0
Added the UPLOAD_REJECTED reason.
.. versionchanged:: 1.6.0
Added the QUERY_NO_HSDIR reason.
=================== ===========
HSDescReason Description
=================== ===========
@ -424,6 +465,7 @@ Library for working with the tor process.
**QUERY_REJECTED** hidden service directory refused to provide the descriptor
**UPLOAD_REJECTED** descriptor was rejected by the hidden service directory
**NOT_FOUND** descriptor with the given identifier wasn't found
**QUERY_NO_HSDIR** no hidden service directory was found
**UNEXPECTED** failure type is unknown
=================== ===========
@ -431,6 +473,8 @@ Library for working with the tor process.
Type of authentication being used for a HS_DESC event.
.. versionadded:: 1.2.0
================= ===========
HSAuth Description
================= ===========
@ -441,18 +485,23 @@ Library for working with the tor process.
================= ===========
"""
__version__ = '1.4.0'
import stem.util
import stem.util.enum
__version__ = '1.7.0'
__author__ = 'Damian Johnson'
__contact__ = 'atagar@torproject.org'
__url__ = 'https://stem.torproject.org/'
__license__ = 'LGPLv3'
__all__ = [
'client',
'descriptor',
'response',
'util',
'connection',
'control',
'directory',
'exit_policy',
'prereq',
'process',
@ -464,6 +513,7 @@ __all__ = [
'UnsatisfiableRequest',
'CircuitExtensionFailed',
'DescriptorUnavailable',
'Timeout',
'InvalidRequest',
'InvalidArguments',
'SocketError',
@ -494,16 +544,8 @@ __all__ = [
'TimeoutSetType',
]
import stem.prereq
if stem.prereq.is_python_3():
str_type = str
int_type = int
else:
str_type = unicode
int_type = long
import stem.util.enum
# Constant that we use by default for our User-Agent when downloading descriptors
stem.USER_AGENT = 'Stem/%s' % __version__
# Constant to indicate an undefined argument default. Usually we'd use None for
# this, but users will commonly provide None as the argument so need something
@ -512,6 +554,57 @@ import stem.util.enum
UNDEFINED = '<Undefined_ >'
class Endpoint(object):
"""
Tor endpint that can be connected to.
.. versionadded:: 1.7.0
:var str address: ip address of the endpoint
:var int port: port of the endpoint
"""
def __init__(self, address, port):
if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address):
raise ValueError("'%s' isn't a valid IPv4 or IPv6 address" % address)
elif not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port" % port)
self.address = address
self.port = int(port)
def __hash__(self):
return stem.util._hash_attr(self, 'address', 'port', cache = True)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Endpoint) else False
def __ne__(self, other):
return not self == other
class ORPort(Endpoint):
"""
Tor relay's ORPort. The endpoint on which Tor accepts relay traffic.
:var list link_protocols: link protocol version we're willing to establish
"""
def __init__(self, address, port, link_protocols = None):
super(ORPort, self).__init__(address, port)
self.link_protocols = link_protocols
def __hash__(self):
return stem.util._hash_attr(self, 'link_protocols', parent = Endpoint, cache = True)
class DirPort(Endpoint):
"""
Tor relay's DirPort. The endpoint on which Tor provides http access for
downloading descriptors.
"""
class ControllerError(Exception):
'Base error for controller communication issues.'
@ -553,15 +646,29 @@ class CircuitExtensionFailed(UnsatisfiableRequest):
self.circ = circ
class DescriptorUnavailable(OperationFailed):
class DescriptorUnavailable(UnsatisfiableRequest):
"""
Tor was unable to provide a descriptor for the given relay.
.. versionchanged:: 1.7.0
Subclassed under UnsatisfiableRequest rather than OperationFailed.
"""
def __init__(self, message):
super(DescriptorUnavailable, self).__init__(message = message)
class Timeout(UnsatisfiableRequest):
"""
Timeout requested by the caller was reached.
.. versionadded:: 1.7.0
"""
def __init__(self, message):
super(Timeout, self).__init__(message = message)
class InvalidRequest(OperationFailed):
"""
Exception raised when the request was invalid or malformed.
@ -590,6 +697,7 @@ class SocketError(ControllerError):
class SocketClosed(SocketError):
'Control socket was closed before completing the message.'
Runlevel = stem.util.enum.UppercaseEnum(
'DEBUG',
'INFO',
@ -607,6 +715,7 @@ Flag = stem.util.enum.Enum(
('GUARD', 'Guard'),
('HSDIR', 'HSDir'),
('NAMED', 'Named'),
('NO_ED_CONSENSUS', 'NoEdConsensus'),
('RUNNING', 'Running'),
('STABLE', 'Stable'),
('UNNAMED', 'Unnamed'),
@ -634,6 +743,7 @@ Signal = stem.util.enum.UppercaseEnum(
CircStatus = stem.util.enum.UppercaseEnum(
'LAUNCHED',
'BUILT',
'GUARD_WAIT',
'EXTENDED',
'FAILED',
'CLOSED',
@ -815,6 +925,7 @@ HSDescAction = stem.util.enum.UppercaseEnum(
'UPLOADED',
'IGNORE',
'FAILED',
'CREATED',
)
HSDescReason = stem.util.enum.UppercaseEnum(
@ -822,6 +933,7 @@ HSDescReason = stem.util.enum.UppercaseEnum(
'QUERY_REJECTED',
'UPLOAD_REJECTED',
'NOT_FOUND',
'QUERY_NO_HSDIR',
'UNEXPECTED',
)
@ -831,3 +943,6 @@ HSAuth = stem.util.enum.UppercaseEnum(
'STEALTH_AUTH',
'UNKNOWN',
)
import stem.util.connection # importing afterward to avoid circular dependency

View file

@ -0,0 +1,855 @@
tor_commit a42e52dded44a2c58a7200511e27a5c0e01cd78b
stem_commit 4d7cc882b5b8966f69232d8489bb5b07226abc81
header.timestamp 20180106205601
header.version 2.0.0
header.type fallback
001524DD403D729F08F7E5D77813EF12756CFA8D.address 185.13.39.197
001524DD403D729F08F7E5D77813EF12756CFA8D.or_port 443
001524DD403D729F08F7E5D77813EF12756CFA8D.dir_port 80
001524DD403D729F08F7E5D77813EF12756CFA8D.nickname Neldoreth
001524DD403D729F08F7E5D77813EF12756CFA8D.has_extrainfo false
0111BA9B604669E636FFD5B503F382A4B7AD6E80.address 176.10.104.240
0111BA9B604669E636FFD5B503F382A4B7AD6E80.or_port 443
0111BA9B604669E636FFD5B503F382A4B7AD6E80.dir_port 80
0111BA9B604669E636FFD5B503F382A4B7AD6E80.nickname DigiGesTor1e1
0111BA9B604669E636FFD5B503F382A4B7AD6E80.has_extrainfo false
025B66CEBC070FCB0519D206CF0CF4965C20C96E.address 185.100.85.61
025B66CEBC070FCB0519D206CF0CF4965C20C96E.or_port 443
025B66CEBC070FCB0519D206CF0CF4965C20C96E.dir_port 80
025B66CEBC070FCB0519D206CF0CF4965C20C96E.nickname nibbana
025B66CEBC070FCB0519D206CF0CF4965C20C96E.has_extrainfo false
0756B7CD4DFC8182BE23143FAC0642F515182CEB.address 5.9.110.236
0756B7CD4DFC8182BE23143FAC0642F515182CEB.or_port 9001
0756B7CD4DFC8182BE23143FAC0642F515182CEB.dir_port 9030
0756B7CD4DFC8182BE23143FAC0642F515182CEB.nickname rueckgrat
0756B7CD4DFC8182BE23143FAC0642F515182CEB.has_extrainfo true
0756B7CD4DFC8182BE23143FAC0642F515182CEB.orport6_address 2a01:4f8:162:51e2::2
0756B7CD4DFC8182BE23143FAC0642F515182CEB.orport6_port 9001
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.address 163.172.149.155
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.or_port 443
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.dir_port 80
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.nickname niij02
0B85617241252517E8ECF2CFC7F4C1A32DCD153F.has_extrainfo false
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.address 5.39.92.199
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.or_port 443
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.dir_port 80
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.nickname BaelorTornodePw
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.has_extrainfo false
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.orport6_address 2001:41d0:8:b1c7::1
0BEA4A88D069753218EAAAD6D22EA87B9A1319D6.orport6_port 443
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.address 163.172.25.118
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.or_port 22
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.dir_port 80
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.nickname torpidsFRonline4
0CF8F3E6590F45D50B70F2F7DA6605ECA6CD408F.has_extrainfo false
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.address 178.62.197.82
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.or_port 443
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.dir_port 80
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.nickname HY100
0D3EBA17E1C78F1E9900BABDB23861D46FCAF163.has_extrainfo false
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.address 185.100.86.100
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.or_port 443
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.dir_port 80
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.nickname saveyourprivacyex1
0E8C0C8315B66DB5F703804B3889A1DD66C67CE0.has_extrainfo false
11DF0017A43AF1F08825CD5D973297F81AB00FF3.address 37.120.174.249
11DF0017A43AF1F08825CD5D973297F81AB00FF3.or_port 443
11DF0017A43AF1F08825CD5D973297F81AB00FF3.dir_port 80
11DF0017A43AF1F08825CD5D973297F81AB00FF3.nickname gGDHjdcC6zAlM8k08lX
11DF0017A43AF1F08825CD5D973297F81AB00FF3.has_extrainfo false
11DF0017A43AF1F08825CD5D973297F81AB00FF3.orport6_address 2a03:4000:6:724c:df98:15f9:b34d:443
11DF0017A43AF1F08825CD5D973297F81AB00FF3.orport6_port 443
12AD30E5D25AA67F519780E2111E611A455FDC89.address 193.11.114.43
12AD30E5D25AA67F519780E2111E611A455FDC89.or_port 9001
12AD30E5D25AA67F519780E2111E611A455FDC89.dir_port 9030
12AD30E5D25AA67F519780E2111E611A455FDC89.nickname mdfnet1
12AD30E5D25AA67F519780E2111E611A455FDC89.has_extrainfo false
12AD30E5D25AA67F519780E2111E611A455FDC89.orport6_address 2001:6b0:30:1000::99
12AD30E5D25AA67F519780E2111E611A455FDC89.orport6_port 9050
12FD624EE73CEF37137C90D38B2406A66F68FAA2.address 37.157.195.87
12FD624EE73CEF37137C90D38B2406A66F68FAA2.or_port 443
12FD624EE73CEF37137C90D38B2406A66F68FAA2.dir_port 8030
12FD624EE73CEF37137C90D38B2406A66F68FAA2.nickname thanatosCZ
12FD624EE73CEF37137C90D38B2406A66F68FAA2.has_extrainfo false
136F9299A5009A4E0E96494E723BDB556FB0A26B.address 178.16.208.59
136F9299A5009A4E0E96494E723BDB556FB0A26B.or_port 443
136F9299A5009A4E0E96494E723BDB556FB0A26B.dir_port 80
136F9299A5009A4E0E96494E723BDB556FB0A26B.nickname bakunin2
136F9299A5009A4E0E96494E723BDB556FB0A26B.has_extrainfo false
136F9299A5009A4E0E96494E723BDB556FB0A26B.orport6_address 2a00:1c20:4089:1234:bff6:e1bb:1ce3:8dc6
136F9299A5009A4E0E96494E723BDB556FB0A26B.orport6_port 443
16102E458460349EE45C0901DAA6C30094A9BBEA.address 163.172.138.22
16102E458460349EE45C0901DAA6C30094A9BBEA.or_port 443
16102E458460349EE45C0901DAA6C30094A9BBEA.dir_port 80
16102E458460349EE45C0901DAA6C30094A9BBEA.nickname mkultra
16102E458460349EE45C0901DAA6C30094A9BBEA.has_extrainfo false
16102E458460349EE45C0901DAA6C30094A9BBEA.orport6_address 2001:bc8:4400:2100::1:3
16102E458460349EE45C0901DAA6C30094A9BBEA.orport6_port 443
175921396C7C426309AB03775A9930B6F611F794.address 178.62.60.37
175921396C7C426309AB03775A9930B6F611F794.or_port 443
175921396C7C426309AB03775A9930B6F611F794.dir_port 80
175921396C7C426309AB03775A9930B6F611F794.nickname lovejoy
175921396C7C426309AB03775A9930B6F611F794.has_extrainfo false
185663B7C12777F052B2C2D23D7A239D8DA88A0F.address 171.25.193.25
185663B7C12777F052B2C2D23D7A239D8DA88A0F.or_port 443
185663B7C12777F052B2C2D23D7A239D8DA88A0F.dir_port 80
185663B7C12777F052B2C2D23D7A239D8DA88A0F.nickname DFRI5
185663B7C12777F052B2C2D23D7A239D8DA88A0F.has_extrainfo false
185663B7C12777F052B2C2D23D7A239D8DA88A0F.orport6_address 2001:67c:289c::25
185663B7C12777F052B2C2D23D7A239D8DA88A0F.orport6_port 443
1938EBACBB1A7BFA888D9623C90061130E63BB3F.address 149.56.141.138
1938EBACBB1A7BFA888D9623C90061130E63BB3F.or_port 9001
1938EBACBB1A7BFA888D9623C90061130E63BB3F.dir_port 9030
1938EBACBB1A7BFA888D9623C90061130E63BB3F.nickname Aerodynamik04
1938EBACBB1A7BFA888D9623C90061130E63BB3F.has_extrainfo false
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.address 81.7.14.253
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.or_port 443
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.dir_port 9001
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.nickname Ichotolot60
1AE039EE0B11DB79E4B4B29CBA9F752864A0259E.has_extrainfo false
1C90D3AEADFF3BCD079810632C8B85637924A58E.address 163.172.53.84
1C90D3AEADFF3BCD079810632C8B85637924A58E.or_port 21
1C90D3AEADFF3BCD079810632C8B85637924A58E.dir_port 143
1C90D3AEADFF3BCD079810632C8B85637924A58E.nickname Multivac
1C90D3AEADFF3BCD079810632C8B85637924A58E.has_extrainfo false
1C90D3AEADFF3BCD079810632C8B85637924A58E.orport6_address 2001:bc8:24f8::
1C90D3AEADFF3BCD079810632C8B85637924A58E.orport6_port 21
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.address 46.101.151.222
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.or_port 443
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.dir_port 80
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.nickname flanders
1DBAED235E3957DE1ABD25B4206BE71406FB61F8.has_extrainfo false
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.address 91.219.237.229
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.or_port 443
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.dir_port 80
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.nickname JakeDidNothingWrong
1ECD73B936CB6E6B3CD647CC204F108D9DF2C9F7.has_extrainfo false
1F6ABD086F40B890A33C93CC4606EE68B31C9556.address 199.184.246.250
1F6ABD086F40B890A33C93CC4606EE68B31C9556.or_port 443
1F6ABD086F40B890A33C93CC4606EE68B31C9556.dir_port 80
1F6ABD086F40B890A33C93CC4606EE68B31C9556.nickname dao
1F6ABD086F40B890A33C93CC4606EE68B31C9556.has_extrainfo false
1F6ABD086F40B890A33C93CC4606EE68B31C9556.orport6_address 2620:124:1009:1::171
1F6ABD086F40B890A33C93CC4606EE68B31C9556.orport6_port 443
1FA8F638298645BE58AC905276680889CB795A94.address 185.129.249.124
1FA8F638298645BE58AC905276680889CB795A94.or_port 9001
1FA8F638298645BE58AC905276680889CB795A94.dir_port 9030
1FA8F638298645BE58AC905276680889CB795A94.nickname treadstone
1FA8F638298645BE58AC905276680889CB795A94.has_extrainfo false
20462CBA5DA4C2D963567D17D0B7249718114A68.address 212.47.229.2
20462CBA5DA4C2D963567D17D0B7249718114A68.or_port 9001
20462CBA5DA4C2D963567D17D0B7249718114A68.dir_port 9030
20462CBA5DA4C2D963567D17D0B7249718114A68.nickname scaletor
20462CBA5DA4C2D963567D17D0B7249718114A68.has_extrainfo false
20462CBA5DA4C2D963567D17D0B7249718114A68.orport6_address 2001:bc8:4400:2100::f03
20462CBA5DA4C2D963567D17D0B7249718114A68.orport6_port 9001
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.address 77.247.181.164
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.or_port 443
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.dir_port 80
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.nickname HaveHeart
204DFD2A2C6A0DC1FA0EACB495218E0B661704FD.has_extrainfo false
230A8B2A8BA861210D9B4BA97745AEC217A94207.address 163.172.176.167
230A8B2A8BA861210D9B4BA97745AEC217A94207.or_port 443
230A8B2A8BA861210D9B4BA97745AEC217A94207.dir_port 80
230A8B2A8BA861210D9B4BA97745AEC217A94207.nickname niij01
230A8B2A8BA861210D9B4BA97745AEC217A94207.has_extrainfo false
231C2B9C8C31C295C472D031E06964834B745996.address 37.200.98.5
231C2B9C8C31C295C472D031E06964834B745996.or_port 443
231C2B9C8C31C295C472D031E06964834B745996.dir_port 80
231C2B9C8C31C295C472D031E06964834B745996.nickname torpidsDEdomainf
231C2B9C8C31C295C472D031E06964834B745996.has_extrainfo false
231C2B9C8C31C295C472D031E06964834B745996.orport6_address 2a00:1158:3::11a
231C2B9C8C31C295C472D031E06964834B745996.orport6_port 993
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.address 138.201.250.33
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.or_port 9011
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.dir_port 9012
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.nickname storm
2BA2C8E96B2590E1072AECE2BDB5C48921BF8510.has_extrainfo false
2CDCFED0142B28B002E89D305CBA2E26063FADE2.address 178.16.208.56
2CDCFED0142B28B002E89D305CBA2E26063FADE2.or_port 443
2CDCFED0142B28B002E89D305CBA2E26063FADE2.dir_port 80
2CDCFED0142B28B002E89D305CBA2E26063FADE2.nickname jaures
2CDCFED0142B28B002E89D305CBA2E26063FADE2.has_extrainfo false
2CDCFED0142B28B002E89D305CBA2E26063FADE2.orport6_address 2a00:1c20:4089:1234:cd49:b58a:9ebe:67ec
2CDCFED0142B28B002E89D305CBA2E26063FADE2.orport6_port 443
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.address 97.74.237.196
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.or_port 9001
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.dir_port 9030
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.nickname Minotaur
2F0F32AB1E5B943CA7D062C03F18960C86E70D94.has_extrainfo false
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.address 64.113.32.29
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.or_port 9001
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.dir_port 9030
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.nickname Libero
30C19B81981F450C402306E2E7CFB6C3F79CB6B2.has_extrainfo false
328E54981C6DDD7D89B89E418724A4A7881E3192.address 80.127.117.180
328E54981C6DDD7D89B89E418724A4A7881E3192.or_port 443
328E54981C6DDD7D89B89E418724A4A7881E3192.dir_port 80
328E54981C6DDD7D89B89E418724A4A7881E3192.nickname sjc01
328E54981C6DDD7D89B89E418724A4A7881E3192.has_extrainfo false
328E54981C6DDD7D89B89E418724A4A7881E3192.orport6_address 2001:985:e77:10::4
328E54981C6DDD7D89B89E418724A4A7881E3192.orport6_port 443
330CD3DB6AD266DC70CDB512B036957D03D9BC59.address 185.100.84.212
330CD3DB6AD266DC70CDB512B036957D03D9BC59.or_port 443
330CD3DB6AD266DC70CDB512B036957D03D9BC59.dir_port 80
330CD3DB6AD266DC70CDB512B036957D03D9BC59.nickname TeamTardis
330CD3DB6AD266DC70CDB512B036957D03D9BC59.has_extrainfo false
330CD3DB6AD266DC70CDB512B036957D03D9BC59.orport6_address 2a06:1700:0:7::1
330CD3DB6AD266DC70CDB512B036957D03D9BC59.orport6_port 443
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.address 163.172.13.165
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.or_port 9001
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.dir_port 9030
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.nickname mullbinde9
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.has_extrainfo false
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.orport6_address 2001:bc8:38cb:201::8
33DA0CAB7C27812EFF2E22C9705630A54D101FEB.orport6_port 9001
3711E80B5B04494C971FB0459D4209AB7F2EA799.address 91.121.23.100
3711E80B5B04494C971FB0459D4209AB7F2EA799.or_port 9001
3711E80B5B04494C971FB0459D4209AB7F2EA799.dir_port 9030
3711E80B5B04494C971FB0459D4209AB7F2EA799.nickname 0x3d002
3711E80B5B04494C971FB0459D4209AB7F2EA799.has_extrainfo false
379FB450010D17078B3766C2273303C358C3A442.address 176.126.252.12
379FB450010D17078B3766C2273303C358C3A442.or_port 8080
379FB450010D17078B3766C2273303C358C3A442.dir_port 21
379FB450010D17078B3766C2273303C358C3A442.nickname aurora
379FB450010D17078B3766C2273303C358C3A442.has_extrainfo true
379FB450010D17078B3766C2273303C358C3A442.orport6_address 2a02:59e0:0:7::12
379FB450010D17078B3766C2273303C358C3A442.orport6_port 81
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.address 62.210.92.11
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.or_port 9101
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.dir_port 9130
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.nickname redjohn1
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.has_extrainfo false
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.orport6_address 2001:bc8:338c::1
387B065A38E4DAA16D9D41C2964ECBC4B31D30FF.orport6_port 9101
39F096961ED2576975C866D450373A9913AFDC92.address 198.50.191.95
39F096961ED2576975C866D450373A9913AFDC92.or_port 443
39F096961ED2576975C866D450373A9913AFDC92.dir_port 80
39F096961ED2576975C866D450373A9913AFDC92.nickname thomas
39F096961ED2576975C866D450373A9913AFDC92.has_extrainfo false
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.address 164.132.77.175
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.or_port 9001
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.dir_port 9030
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.nickname rofltor1
3B33F6FCA645AD4E91428A3AF7DC736AD9FB727B.has_extrainfo false
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.address 212.83.154.33
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.or_port 443
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.dir_port 8888
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.nickname bauruine203
3C79699D4FBC37DE1A212D5033B56DAE079AC0EF.has_extrainfo false
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.address 176.10.107.180
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.or_port 9001
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.dir_port 9030
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.nickname schokomilch
3D7E274A87D9A89AF064C13D1EE4CA1F184F2600.has_extrainfo false
3E53D3979DB07EFD736661C934A1DED14127B684.address 217.79.179.177
3E53D3979DB07EFD736661C934A1DED14127B684.or_port 9001
3E53D3979DB07EFD736661C934A1DED14127B684.dir_port 9030
3E53D3979DB07EFD736661C934A1DED14127B684.nickname Unnamed
3E53D3979DB07EFD736661C934A1DED14127B684.has_extrainfo false
3E53D3979DB07EFD736661C934A1DED14127B684.orport6_address 2001:4ba0:fff9:131:6c4f::90d3
3E53D3979DB07EFD736661C934A1DED14127B684.orport6_port 9001
4061C553CA88021B8302F0814365070AAE617270.address 185.100.85.101
4061C553CA88021B8302F0814365070AAE617270.or_port 9001
4061C553CA88021B8302F0814365070AAE617270.dir_port 9030
4061C553CA88021B8302F0814365070AAE617270.nickname TorExitRomania
4061C553CA88021B8302F0814365070AAE617270.has_extrainfo false
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.address 199.249.223.61
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.or_port 443
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.dir_port 80
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.nickname Quintex12
40E7D6CE5085E4CDDA31D51A29D1457EB53F12AD.has_extrainfo false
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.address 178.17.170.156
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.or_port 9001
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.dir_port 9030
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.nickname TorExitMoldova2
41C59606AFE1D1AA6EC6EF6719690B856F0B6587.has_extrainfo false
439D0447772CB107B886F7782DBC201FA26B92D1.address 178.62.86.96
439D0447772CB107B886F7782DBC201FA26B92D1.or_port 9001
439D0447772CB107B886F7782DBC201FA26B92D1.dir_port 9030
439D0447772CB107B886F7782DBC201FA26B92D1.nickname pablobm001
439D0447772CB107B886F7782DBC201FA26B92D1.has_extrainfo false
439D0447772CB107B886F7782DBC201FA26B92D1.orport6_address 2a03:b0c0:1:d0::3cf:7001
439D0447772CB107B886F7782DBC201FA26B92D1.orport6_port 9050
4623A9EC53BFD83155929E56D6F7B55B5E718C24.address 163.172.157.213
4623A9EC53BFD83155929E56D6F7B55B5E718C24.or_port 443
4623A9EC53BFD83155929E56D6F7B55B5E718C24.dir_port 8080
4623A9EC53BFD83155929E56D6F7B55B5E718C24.nickname Cotopaxi
4623A9EC53BFD83155929E56D6F7B55B5E718C24.has_extrainfo false
46791D156C9B6C255C2665D4D8393EC7DBAA7798.address 31.31.78.49
46791D156C9B6C255C2665D4D8393EC7DBAA7798.or_port 443
46791D156C9B6C255C2665D4D8393EC7DBAA7798.dir_port 80
46791D156C9B6C255C2665D4D8393EC7DBAA7798.nickname KrigHaBandolo
46791D156C9B6C255C2665D4D8393EC7DBAA7798.has_extrainfo false
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.address 193.70.43.76
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.or_port 9001
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.dir_port 9030
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.nickname Aerodynamik03
484A10BA2B8D48A5F0216674C8DD50EF27BC32F3.has_extrainfo false
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.address 37.187.102.186
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.or_port 9001
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.dir_port 9030
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.nickname txtfileTorNode65536
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.has_extrainfo false
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.orport6_address 2001:41d0:a:26ba::1
489D94333DF66D57FFE34D9D59CC2D97E2CB0053.orport6_port 9001
4CC9CC9195EC38645B699A33307058624F660CCF.address 51.254.101.242
4CC9CC9195EC38645B699A33307058624F660CCF.or_port 9001
4CC9CC9195EC38645B699A33307058624F660CCF.dir_port 9002
4CC9CC9195EC38645B699A33307058624F660CCF.nickname devsum
4CC9CC9195EC38645B699A33307058624F660CCF.has_extrainfo false
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.address 108.53.208.157
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.or_port 443
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.dir_port 80
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.nickname Binnacle
4F0DB7E687FC7C0AE55C8F243DA8B0EB27FBF1F2.has_extrainfo true
50586E25BE067FD1F739998550EDDCB1A14CA5B2.address 212.51.134.123
50586E25BE067FD1F739998550EDDCB1A14CA5B2.or_port 9001
50586E25BE067FD1F739998550EDDCB1A14CA5B2.dir_port 9030
50586E25BE067FD1F739998550EDDCB1A14CA5B2.nickname Jans
50586E25BE067FD1F739998550EDDCB1A14CA5B2.has_extrainfo false
51E1CF613FD6F9F11FE24743C91D6F9981807D82.address 81.7.16.182
51E1CF613FD6F9F11FE24743C91D6F9981807D82.or_port 443
51E1CF613FD6F9F11FE24743C91D6F9981807D82.dir_port 80
51E1CF613FD6F9F11FE24743C91D6F9981807D82.nickname torpidsDEisppro3
51E1CF613FD6F9F11FE24743C91D6F9981807D82.has_extrainfo false
51E1CF613FD6F9F11FE24743C91D6F9981807D82.orport6_address 2a02:180:1:1::517:10b6
51E1CF613FD6F9F11FE24743C91D6F9981807D82.orport6_port 993
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.address 85.25.159.65
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.or_port 80
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.dir_port 995
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.nickname BeastieJoy63
52BFADA8BEAA01BA46C8F767F83C18E2FE50C1B9.has_extrainfo false
587E0A9552E4274B251F29B5B2673D38442EE4BF.address 95.130.12.119
587E0A9552E4274B251F29B5B2673D38442EE4BF.or_port 443
587E0A9552E4274B251F29B5B2673D38442EE4BF.dir_port 80
587E0A9552E4274B251F29B5B2673D38442EE4BF.nickname Nuath
587E0A9552E4274B251F29B5B2673D38442EE4BF.has_extrainfo false
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.address 185.21.100.50
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.or_port 9001
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.dir_port 9030
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.nickname SamAAdams2
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.has_extrainfo false
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.orport6_address 2a00:1158:2:cd00:0:74:6f:72
58ED9C9C35E433EE58764D62892B4FFD518A3CD0.orport6_port 443
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.address 172.98.193.43
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.or_port 443
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.dir_port 80
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.nickname Backplane
5E56738E7F97AA81DEEF59AF28494293DFBFCCDF.has_extrainfo false
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.address 199.249.223.74
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.or_port 443
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.dir_port 80
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.nickname QuintexAirVPN7
5F4CD12099AF20FAF9ADFDCEC65316A376D0201C.has_extrainfo false
616081EC829593AF4232550DE6FFAA1D75B37A90.address 95.128.43.164
616081EC829593AF4232550DE6FFAA1D75B37A90.or_port 443
616081EC829593AF4232550DE6FFAA1D75B37A90.dir_port 80
616081EC829593AF4232550DE6FFAA1D75B37A90.nickname AquaRayTerminus
616081EC829593AF4232550DE6FFAA1D75B37A90.has_extrainfo false
616081EC829593AF4232550DE6FFAA1D75B37A90.orport6_address 2a02:ec0:209:10::4
616081EC829593AF4232550DE6FFAA1D75B37A90.orport6_port 443
68F175CCABE727AA2D2309BCD8789499CEE36ED7.address 163.172.139.104
68F175CCABE727AA2D2309BCD8789499CEE36ED7.or_port 443
68F175CCABE727AA2D2309BCD8789499CEE36ED7.dir_port 8080
68F175CCABE727AA2D2309BCD8789499CEE36ED7.nickname Pichincha
68F175CCABE727AA2D2309BCD8789499CEE36ED7.has_extrainfo false
6A7551EEE18F78A9813096E82BF84F740D32B911.address 85.214.62.48
6A7551EEE18F78A9813096E82BF84F740D32B911.or_port 443
6A7551EEE18F78A9813096E82BF84F740D32B911.dir_port 80
6A7551EEE18F78A9813096E82BF84F740D32B911.nickname TorMachine
6A7551EEE18F78A9813096E82BF84F740D32B911.has_extrainfo false
6EF897645B79B6CB35E853B32506375014DE3621.address 80.127.137.19
6EF897645B79B6CB35E853B32506375014DE3621.or_port 443
6EF897645B79B6CB35E853B32506375014DE3621.dir_port 80
6EF897645B79B6CB35E853B32506375014DE3621.nickname d6relay
6EF897645B79B6CB35E853B32506375014DE3621.has_extrainfo false
6EF897645B79B6CB35E853B32506375014DE3621.orport6_address 2001:981:47c1:1::6
6EF897645B79B6CB35E853B32506375014DE3621.orport6_port 443
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.address 85.235.250.88
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.or_port 443
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.dir_port 80
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.nickname TykRelay01
72B2B12A3F60408BDBC98C6DF53988D3A0B3F0EE.has_extrainfo false
7600680249A22080ECC6173FBBF64D6FCF330A61.address 81.7.14.31
7600680249A22080ECC6173FBBF64D6FCF330A61.or_port 443
7600680249A22080ECC6173FBBF64D6FCF330A61.dir_port 9001
7600680249A22080ECC6173FBBF64D6FCF330A61.nickname Ichotolot62
7600680249A22080ECC6173FBBF64D6FCF330A61.has_extrainfo false
763C9556602BD6207771A7A3D958091D44C43228.address 134.119.36.135
763C9556602BD6207771A7A3D958091D44C43228.or_port 443
763C9556602BD6207771A7A3D958091D44C43228.dir_port 80
763C9556602BD6207771A7A3D958091D44C43228.nickname torpidsDEdomainf2
763C9556602BD6207771A7A3D958091D44C43228.has_extrainfo false
763C9556602BD6207771A7A3D958091D44C43228.orport6_address 2a00:1158:3::2a8
763C9556602BD6207771A7A3D958091D44C43228.orport6_port 993
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.address 188.166.133.133
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.or_port 9001
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.dir_port 9030
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.nickname dropsy
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.has_extrainfo false
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.orport6_address 2a03:b0c0:2:d0::26c0:1
774555642FDC1E1D4FDF2E0C31B7CA9501C5C9C7.orport6_port 9001
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.address 5.196.23.64
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.or_port 9001
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.dir_port 9030
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.nickname Aerodynamik01
775B0FAFDE71AADC23FFC8782B7BEB1D5A92733E.has_extrainfo false
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.address 81.30.158.213
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.or_port 9001
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.dir_port 9030
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.nickname dumpster
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.has_extrainfo false
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.orport6_address 2001:4ba0:cafe:e84::1
789EA6C9AE9ADDD8760903171CFA9AC5741B0C70.orport6_port 9001
78E2BE744A53631B4AAB781468E94C52AB73968B.address 104.200.20.46
78E2BE744A53631B4AAB781468E94C52AB73968B.or_port 9001
78E2BE744A53631B4AAB781468E94C52AB73968B.dir_port 80
78E2BE744A53631B4AAB781468E94C52AB73968B.nickname bynumlawtor
78E2BE744A53631B4AAB781468E94C52AB73968B.has_extrainfo false
79E169B25E4C7CE99584F6ED06F379478F23E2B8.address 62.210.129.246
79E169B25E4C7CE99584F6ED06F379478F23E2B8.or_port 443
79E169B25E4C7CE99584F6ED06F379478F23E2B8.dir_port 80
79E169B25E4C7CE99584F6ED06F379478F23E2B8.nickname MilesPrower
79E169B25E4C7CE99584F6ED06F379478F23E2B8.has_extrainfo false
7A32C9519D80CA458FC8B034A28F5F6815649A98.address 82.223.21.74
7A32C9519D80CA458FC8B034A28F5F6815649A98.or_port 9001
7A32C9519D80CA458FC8B034A28F5F6815649A98.dir_port 9030
7A32C9519D80CA458FC8B034A28F5F6815649A98.nickname silentrocket
7A32C9519D80CA458FC8B034A28F5F6815649A98.has_extrainfo false
7A32C9519D80CA458FC8B034A28F5F6815649A98.orport6_address 2001:470:53e0::cafe
7A32C9519D80CA458FC8B034A28F5F6815649A98.orport6_port 9050
7BB70F8585DFC27E75D692970C0EEB0F22983A63.address 51.254.136.195
7BB70F8585DFC27E75D692970C0EEB0F22983A63.or_port 443
7BB70F8585DFC27E75D692970C0EEB0F22983A63.dir_port 80
7BB70F8585DFC27E75D692970C0EEB0F22983A63.nickname torproxy02
7BB70F8585DFC27E75D692970C0EEB0F22983A63.has_extrainfo false
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.address 77.247.181.162
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.or_port 443
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.dir_port 80
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.nickname sofia
7BFB908A3AA5B491DA4CA72CCBEE0E1F2A939B55.has_extrainfo false
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.address 185.100.84.82
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.or_port 443
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.dir_port 80
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.nickname saveyourprivacyexit
7D05A38E39FC5D29AFE6BE487B9B4DC9E635D09E.has_extrainfo false
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.address 199.249.223.69
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.or_port 443
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.dir_port 80
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.nickname Quintex20
7FA8E7E44F1392A4E40FFC3B69DB3B00091B7FD3.has_extrainfo false
80AAF8D5956A43C197104CEF2550CD42D165C6FB.address 193.11.114.45
80AAF8D5956A43C197104CEF2550CD42D165C6FB.or_port 9002
80AAF8D5956A43C197104CEF2550CD42D165C6FB.dir_port 9031
80AAF8D5956A43C197104CEF2550CD42D165C6FB.nickname mdfnet2
80AAF8D5956A43C197104CEF2550CD42D165C6FB.has_extrainfo false
8456DFA94161CDD99E480C2A2992C366C6564410.address 62.210.254.132
8456DFA94161CDD99E480C2A2992C366C6564410.or_port 443
8456DFA94161CDD99E480C2A2992C366C6564410.dir_port 80
8456DFA94161CDD99E480C2A2992C366C6564410.nickname turingmachine
8456DFA94161CDD99E480C2A2992C366C6564410.has_extrainfo false
855BC2DABE24C861CD887DB9B2E950424B49FC34.address 85.230.184.93
855BC2DABE24C861CD887DB9B2E950424B49FC34.or_port 443
855BC2DABE24C861CD887DB9B2E950424B49FC34.dir_port 9030
855BC2DABE24C861CD887DB9B2E950424B49FC34.nickname Logforme
855BC2DABE24C861CD887DB9B2E950424B49FC34.has_extrainfo false
8567AD0A6369ED08527A8A8533A5162AC00F7678.address 72.52.75.27
8567AD0A6369ED08527A8A8533A5162AC00F7678.or_port 9001
8567AD0A6369ED08527A8A8533A5162AC00F7678.dir_port 9030
8567AD0A6369ED08527A8A8533A5162AC00F7678.nickname piecoopdotnet
8567AD0A6369ED08527A8A8533A5162AC00F7678.has_extrainfo false
86C281AD135058238D7A337D546C902BE8505DDE.address 185.96.88.29
86C281AD135058238D7A337D546C902BE8505DDE.or_port 443
86C281AD135058238D7A337D546C902BE8505DDE.dir_port 80
86C281AD135058238D7A337D546C902BE8505DDE.nickname TykRelay05
86C281AD135058238D7A337D546C902BE8505DDE.has_extrainfo false
88487BDD980BF6E72092EE690E8C51C0AA4A538C.address 176.10.104.243
88487BDD980BF6E72092EE690E8C51C0AA4A538C.or_port 443
88487BDD980BF6E72092EE690E8C51C0AA4A538C.dir_port 80
88487BDD980BF6E72092EE690E8C51C0AA4A538C.nickname DigiGesTor2e1
88487BDD980BF6E72092EE690E8C51C0AA4A538C.has_extrainfo false
8C00FA7369A7A308F6A137600F0FA07990D9D451.address 163.172.194.53
8C00FA7369A7A308F6A137600F0FA07990D9D451.or_port 9001
8C00FA7369A7A308F6A137600F0FA07990D9D451.dir_port 9030
8C00FA7369A7A308F6A137600F0FA07990D9D451.nickname GrmmlLitavis
8C00FA7369A7A308F6A137600F0FA07990D9D451.has_extrainfo false
8C00FA7369A7A308F6A137600F0FA07990D9D451.orport6_address 2001:bc8:225f:142:6c69:7461:7669:73
8C00FA7369A7A308F6A137600F0FA07990D9D451.orport6_port 9001
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.address 5.189.169.190
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.or_port 8080
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.dir_port 8030
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.nickname thanatosDE
8D79F73DCD91FC4F5017422FAC70074D6DB8DD81.has_extrainfo false
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.address 151.80.42.103
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.or_port 9001
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.dir_port 9030
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.nickname matlink
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.has_extrainfo false
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.orport6_address 2001:41d0:e:f67::114
9007C1D8E4F03D506A4A011B907A9E8D04E3C605.orport6_port 9001
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.address 37.187.20.59
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.or_port 443
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.dir_port 80
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.nickname torpidsFRovh
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.has_extrainfo false
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.orport6_address 2001:41d0:a:143b::1
91D23D8A539B83D2FB56AA67ECD4D75CC093AC55.orport6_port 993
9285B22F7953D7874604EEE2B470609AD81C74E9.address 62.138.7.171
9285B22F7953D7874604EEE2B470609AD81C74E9.or_port 8001
9285B22F7953D7874604EEE2B470609AD81C74E9.dir_port 8030
9285B22F7953D7874604EEE2B470609AD81C74E9.nickname 0x3d005
9285B22F7953D7874604EEE2B470609AD81C74E9.has_extrainfo false
92CFD9565B24646CAC2D172D3DB503D69E777B8A.address 178.16.208.57
92CFD9565B24646CAC2D172D3DB503D69E777B8A.or_port 443
92CFD9565B24646CAC2D172D3DB503D69E777B8A.dir_port 80
92CFD9565B24646CAC2D172D3DB503D69E777B8A.nickname bakunin
92CFD9565B24646CAC2D172D3DB503D69E777B8A.has_extrainfo false
92CFD9565B24646CAC2D172D3DB503D69E777B8A.orport6_address 2a00:1c20:4089:1234:7825:2c5d:1ecd:c66f
92CFD9565B24646CAC2D172D3DB503D69E777B8A.orport6_port 443
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.address 91.219.237.244
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.or_port 443
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.dir_port 80
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.nickname lewwerDuarUesSlaav
92ECC9E0E2AF81BB954719B189AC362E254AD4A5.has_extrainfo false
9772EFB535397C942C3AB8804FB35CFFAD012438.address 37.153.1.10
9772EFB535397C942C3AB8804FB35CFFAD012438.or_port 9001
9772EFB535397C942C3AB8804FB35CFFAD012438.dir_port 9030
9772EFB535397C942C3AB8804FB35CFFAD012438.nickname smallsweatnode
9772EFB535397C942C3AB8804FB35CFFAD012438.has_extrainfo false
998BF3ED7F70E33D1C307247B9626D9E7573C438.address 163.172.223.200
998BF3ED7F70E33D1C307247B9626D9E7573C438.or_port 443
998BF3ED7F70E33D1C307247B9626D9E7573C438.dir_port 80
998BF3ED7F70E33D1C307247B9626D9E7573C438.nickname Outfall2
998BF3ED7F70E33D1C307247B9626D9E7573C438.has_extrainfo false
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.address 91.229.20.27
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.or_port 9001
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.dir_port 9030
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.nickname gordonkeybag
9A0D54D3A6D2E0767596BF1515E6162A75B3293F.has_extrainfo false
9A68B85A02318F4E7E87F2828039FBD5D75B0142.address 66.111.2.20
9A68B85A02318F4E7E87F2828039FBD5D75B0142.or_port 9001
9A68B85A02318F4E7E87F2828039FBD5D75B0142.dir_port 9030
9A68B85A02318F4E7E87F2828039FBD5D75B0142.nickname NYCBUG0
9A68B85A02318F4E7E87F2828039FBD5D75B0142.has_extrainfo false
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.address 185.100.86.128
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.or_port 9001
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.dir_port 9030
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.nickname TorExitFinland
9B31F1F1C1554F9FFB3455911F82E818EF7C7883.has_extrainfo false
9EC5E097663862DF861A18C32B37C5F82284B27D.address 146.185.177.103
9EC5E097663862DF861A18C32B37C5F82284B27D.or_port 9030
9EC5E097663862DF861A18C32B37C5F82284B27D.dir_port 80
9EC5E097663862DF861A18C32B37C5F82284B27D.nickname Winter
9EC5E097663862DF861A18C32B37C5F82284B27D.has_extrainfo false
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.address 199.249.223.64
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.or_port 443
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.dir_port 80
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.nickname Quintex15
9F2856F6D2B89AD4EF6D5723FAB167DB5A53519A.has_extrainfo false
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.address 46.28.110.244
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.or_port 443
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.dir_port 80
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.nickname Nivrim
9F7D6E6420183C2B76D3CE99624EBC98A21A967E.has_extrainfo false
9FBEB75E8BC142565F12CBBE078D63310236A334.address 91.121.84.137
9FBEB75E8BC142565F12CBBE078D63310236A334.or_port 4052
9FBEB75E8BC142565F12CBBE078D63310236A334.dir_port 4952
9FBEB75E8BC142565F12CBBE078D63310236A334.nickname lindon
9FBEB75E8BC142565F12CBBE078D63310236A334.has_extrainfo false
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.address 46.165.230.5
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.or_port 443
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.dir_port 80
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.nickname Dhalgren
A0F06C2FADF88D3A39AA3072B406F09D7095AC9E.has_extrainfo true
A10C4F666D27364036B562823E5830BC448E046A.address 171.25.193.77
A10C4F666D27364036B562823E5830BC448E046A.or_port 443
A10C4F666D27364036B562823E5830BC448E046A.dir_port 80
A10C4F666D27364036B562823E5830BC448E046A.nickname DFRI1
A10C4F666D27364036B562823E5830BC448E046A.has_extrainfo false
A10C4F666D27364036B562823E5830BC448E046A.orport6_address 2001:67c:289c:3::77
A10C4F666D27364036B562823E5830BC448E046A.orport6_port 443
A2E6BB5C391CD46B38C55B4329C35304540771F1.address 81.7.3.67
A2E6BB5C391CD46B38C55B4329C35304540771F1.or_port 443
A2E6BB5C391CD46B38C55B4329C35304540771F1.dir_port 993
A2E6BB5C391CD46B38C55B4329C35304540771F1.nickname BeastieJoy62
A2E6BB5C391CD46B38C55B4329C35304540771F1.has_extrainfo false
A478E421F83194C114F41E94F95999672AED51FE.address 171.25.193.78
A478E421F83194C114F41E94F95999672AED51FE.or_port 443
A478E421F83194C114F41E94F95999672AED51FE.dir_port 80
A478E421F83194C114F41E94F95999672AED51FE.nickname DFRI4
A478E421F83194C114F41E94F95999672AED51FE.has_extrainfo false
A478E421F83194C114F41E94F95999672AED51FE.orport6_address 2001:67c:289c:3::78
A478E421F83194C114F41E94F95999672AED51FE.orport6_port 443
A4C98CEA3F34E05299417E9F885A642C88EF6029.address 178.16.208.58
A4C98CEA3F34E05299417E9F885A642C88EF6029.or_port 443
A4C98CEA3F34E05299417E9F885A642C88EF6029.dir_port 80
A4C98CEA3F34E05299417E9F885A642C88EF6029.nickname jaures2
A4C98CEA3F34E05299417E9F885A642C88EF6029.has_extrainfo false
A4C98CEA3F34E05299417E9F885A642C88EF6029.orport6_address 2a00:1c20:4089:1234:cdae:1b3e:cc38:3d45
A4C98CEA3F34E05299417E9F885A642C88EF6029.orport6_port 443
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.address 163.172.149.122
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.or_port 443
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.dir_port 80
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.nickname niij03
A9406A006D6E7B5DA30F2C6D4E42A338B5E340B2.has_extrainfo false
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.address 195.154.164.243
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.or_port 443
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.dir_port 80
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.nickname torpidsFRonline3
AC66FFA4AB35A59EBBF5BF4C70008BF24D8A7A5C.has_extrainfo false
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.address 86.59.119.88
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.or_port 443
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.dir_port 80
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.nickname ph3x
ACD889D86E02EDDAB1AFD81F598C0936238DC6D0.has_extrainfo false
ACDD9E85A05B127BA010466C13C8C47212E8A38F.address 185.129.62.62
ACDD9E85A05B127BA010466C13C8C47212E8A38F.or_port 9001
ACDD9E85A05B127BA010466C13C8C47212E8A38F.dir_port 9030
ACDD9E85A05B127BA010466C13C8C47212E8A38F.nickname kramse
ACDD9E85A05B127BA010466C13C8C47212E8A38F.has_extrainfo false
ACDD9E85A05B127BA010466C13C8C47212E8A38F.orport6_address 2a06:d380:0:3700::62
ACDD9E85A05B127BA010466C13C8C47212E8A38F.orport6_port 9001
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.address 188.40.128.246
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.or_port 9001
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.dir_port 9030
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.nickname sputnik
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.has_extrainfo false
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.orport6_address 2a01:4f8:221:1ac1:dead:beef:7005:9001
AD19490C7DBB26D3A68EFC824F67E69B0A96E601.orport6_port 9001
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.address 176.126.252.11
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.or_port 9001
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.dir_port 443
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.nickname chulak
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.has_extrainfo true
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.orport6_address 2a02:59e0:0:7::11
B0279A521375F3CB2AE210BDBFC645FDD2E1973A.orport6_port 9003
B0553175AADB0501E5A61FC61CEA3970BE130FF2.address 5.9.147.226
B0553175AADB0501E5A61FC61CEA3970BE130FF2.or_port 9001
B0553175AADB0501E5A61FC61CEA3970BE130FF2.dir_port 9030
B0553175AADB0501E5A61FC61CEA3970BE130FF2.nickname zwiubel
B0553175AADB0501E5A61FC61CEA3970BE130FF2.has_extrainfo false
B0553175AADB0501E5A61FC61CEA3970BE130FF2.orport6_address 2a01:4f8:190:30e1::2
B0553175AADB0501E5A61FC61CEA3970BE130FF2.orport6_port 9001
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.address 178.17.174.14
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.or_port 9001
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.dir_port 9030
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.nickname TorExitMoldova
B06F093A3D4DFAD3E923F4F28A74901BD4F74EB1.has_extrainfo false
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.address 199.249.223.40
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.or_port 443
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.dir_port 80
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.nickname Quintex31
B0CD9F9B5B60651ADC5919C0F1EAA87DBA1D9249.has_extrainfo false
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.address 212.129.62.232
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.or_port 443
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.dir_port 80
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.nickname wardsback
B143D439B72D239A419F8DCE07B8A8EB1B486FA7.has_extrainfo false
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.address 136.243.214.137
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.or_port 443
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.dir_port 80
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.nickname TorKIT
B291D30517D23299AD7CEE3E60DFE60D0E3A4664.has_extrainfo false
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.address 212.47.233.86
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.or_port 9001
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.dir_port 9030
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.nickname netimanmu
B4CAFD9CBFB34EC5DAAC146920DC7DFAFE91EA20.has_extrainfo false
B5212DB685A2A0FCFBAE425738E478D12361710D.address 93.115.97.242
B5212DB685A2A0FCFBAE425738E478D12361710D.or_port 9001
B5212DB685A2A0FCFBAE425738E478D12361710D.dir_port 9030
B5212DB685A2A0FCFBAE425738E478D12361710D.nickname firstor
B5212DB685A2A0FCFBAE425738E478D12361710D.has_extrainfo false
B6904ADD4C0D10CDA7179E051962350A69A63243.address 81.2.209.10
B6904ADD4C0D10CDA7179E051962350A69A63243.or_port 80
B6904ADD4C0D10CDA7179E051962350A69A63243.dir_port 443
B6904ADD4C0D10CDA7179E051962350A69A63243.nickname torzabehlice
B6904ADD4C0D10CDA7179E051962350A69A63243.has_extrainfo false
B6904ADD4C0D10CDA7179E051962350A69A63243.orport6_address 2001:15e8:201:1::d10a
B6904ADD4C0D10CDA7179E051962350A69A63243.orport6_port 80
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.address 193.11.114.46
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.or_port 9003
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.dir_port 9032
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.nickname mdfnet3
B83DC1558F0D34353BB992EF93AFEAFDB226A73E.has_extrainfo false
B86137AE9681701901C6720E55C16805B46BD8E3.address 81.7.11.186
B86137AE9681701901C6720E55C16805B46BD8E3.or_port 443
B86137AE9681701901C6720E55C16805B46BD8E3.dir_port 1080
B86137AE9681701901C6720E55C16805B46BD8E3.nickname BeastieJoy60
B86137AE9681701901C6720E55C16805B46BD8E3.has_extrainfo false
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.address 197.231.221.211
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.or_port 443
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.dir_port 9030
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.nickname IPredator
BC630CBBB518BE7E9F4E09712AB0269E9DC7D626.has_extrainfo false
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.address 198.96.155.3
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.or_port 5001
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.dir_port 8080
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.nickname gurgle
BCEDF6C193AA687AE471B8A22EBF6BC57C2D285E.has_extrainfo false
BCEF908195805E03E92CCFE669C48738E556B9C5.address 128.199.55.207
BCEF908195805E03E92CCFE669C48738E556B9C5.or_port 9001
BCEF908195805E03E92CCFE669C48738E556B9C5.dir_port 9030
BCEF908195805E03E92CCFE669C48738E556B9C5.nickname EldritchReaper
BCEF908195805E03E92CCFE669C48738E556B9C5.has_extrainfo false
BCEF908195805E03E92CCFE669C48738E556B9C5.orport6_address 2a03:b0c0:2:d0::158:3001
BCEF908195805E03E92CCFE669C48738E556B9C5.orport6_port 9001
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.address 213.141.138.174
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.or_port 9001
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.dir_port 9030
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.nickname Schakalium
BD552C165E2ED2887D3F1CCE9CFF155DDA2D86E6.has_extrainfo false
BF735F669481EE1CCC348F0731551C933D1E2278.address 104.192.5.248
BF735F669481EE1CCC348F0731551C933D1E2278.or_port 9001
BF735F669481EE1CCC348F0731551C933D1E2278.dir_port 9030
BF735F669481EE1CCC348F0731551C933D1E2278.nickname Freeway11
BF735F669481EE1CCC348F0731551C933D1E2278.has_extrainfo false
C2AAB088555850FC434E68943F551072042B85F1.address 31.185.104.21
C2AAB088555850FC434E68943F551072042B85F1.or_port 443
C2AAB088555850FC434E68943F551072042B85F1.dir_port 80
C2AAB088555850FC434E68943F551072042B85F1.nickname Digitalcourage3ip3
C2AAB088555850FC434E68943F551072042B85F1.has_extrainfo false
C37BC191AC389179674578C3E6944E925FE186C2.address 213.239.217.18
C37BC191AC389179674578C3E6944E925FE186C2.or_port 1337
C37BC191AC389179674578C3E6944E925FE186C2.dir_port 1338
C37BC191AC389179674578C3E6944E925FE186C2.nickname xzdsb
C37BC191AC389179674578C3E6944E925FE186C2.has_extrainfo false
C37BC191AC389179674578C3E6944E925FE186C2.orport6_address 2a01:4f8:a0:746a:101:1:1:1
C37BC191AC389179674578C3E6944E925FE186C2.orport6_port 1337
C414F28FD2BEC1553024299B31D4E726BEB8E788.address 188.138.112.60
C414F28FD2BEC1553024299B31D4E726BEB8E788.or_port 1521
C414F28FD2BEC1553024299B31D4E726BEB8E788.dir_port 1433
C414F28FD2BEC1553024299B31D4E726BEB8E788.nickname zebra620
C414F28FD2BEC1553024299B31D4E726BEB8E788.has_extrainfo false
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.address 199.249.223.66
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.or_port 443
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.dir_port 80
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.nickname Quintex17
C5A53BCC174EF8FD0DCB223E4AA929FA557DEDB2.has_extrainfo false
CE47F0356D86CF0A1A2008D97623216D560FB0A8.address 85.25.213.211
CE47F0356D86CF0A1A2008D97623216D560FB0A8.or_port 80
CE47F0356D86CF0A1A2008D97623216D560FB0A8.dir_port 465
CE47F0356D86CF0A1A2008D97623216D560FB0A8.nickname BeastieJoy61
CE47F0356D86CF0A1A2008D97623216D560FB0A8.has_extrainfo false
CED527EAC230E7B56E5B363F839671829C3BA01B.address 51.15.13.245
CED527EAC230E7B56E5B363F839671829C3BA01B.or_port 9001
CED527EAC230E7B56E5B363F839671829C3BA01B.dir_port 9030
CED527EAC230E7B56E5B363F839671829C3BA01B.nickname 0x3d006
CED527EAC230E7B56E5B363F839671829C3BA01B.has_extrainfo false
D30E9D4D639068611D6D96861C95C2099140B805.address 46.38.237.221
D30E9D4D639068611D6D96861C95C2099140B805.or_port 9001
D30E9D4D639068611D6D96861C95C2099140B805.dir_port 9030
D30E9D4D639068611D6D96861C95C2099140B805.nickname mine
D30E9D4D639068611D6D96861C95C2099140B805.has_extrainfo false
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.address 31.171.155.108
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.or_port 9001
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.dir_port 9030
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.nickname TorNodeAlbania
D3E5EDDBE5159388704D6785BE51930AAFACEC6F.has_extrainfo false
D64366987CB39F61AD21DBCF8142FA0577B92811.address 37.221.162.226
D64366987CB39F61AD21DBCF8142FA0577B92811.or_port 9001
D64366987CB39F61AD21DBCF8142FA0577B92811.dir_port 9030
D64366987CB39F61AD21DBCF8142FA0577B92811.nickname kasperskytor01
D64366987CB39F61AD21DBCF8142FA0577B92811.has_extrainfo false
D760C5B436E42F93D77EF2D969157EEA14F9B39C.address 46.101.169.151
D760C5B436E42F93D77EF2D969157EEA14F9B39C.or_port 9001
D760C5B436E42F93D77EF2D969157EEA14F9B39C.dir_port 9030
D760C5B436E42F93D77EF2D969157EEA14F9B39C.nickname DanWin1210
D760C5B436E42F93D77EF2D969157EEA14F9B39C.has_extrainfo false
D760C5B436E42F93D77EF2D969157EEA14F9B39C.orport6_address 2a03:b0c0:3:d0::74f:a001
D760C5B436E42F93D77EF2D969157EEA14F9B39C.orport6_port 9001
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.address 85.10.201.47
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.or_port 9001
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.dir_port 9030
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.nickname sif
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.has_extrainfo false
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.orport6_address 2a01:4f8:a0:43eb::beef
D8B7A3A6542AA54D0946B9DC0257C53B6C376679.orport6_port 9001
DAA39FC00B196B353C2A271459C305C429AF09E4.address 193.35.52.53
DAA39FC00B196B353C2A271459C305C429AF09E4.or_port 9001
DAA39FC00B196B353C2A271459C305C429AF09E4.dir_port 9030
DAA39FC00B196B353C2A271459C305C429AF09E4.nickname Arne
DAA39FC00B196B353C2A271459C305C429AF09E4.has_extrainfo false
DD823AFB415380A802DCAEB9461AE637604107FB.address 178.33.183.251
DD823AFB415380A802DCAEB9461AE637604107FB.or_port 443
DD823AFB415380A802DCAEB9461AE637604107FB.dir_port 80
DD823AFB415380A802DCAEB9461AE637604107FB.nickname grenouille
DD823AFB415380A802DCAEB9461AE637604107FB.has_extrainfo false
DD823AFB415380A802DCAEB9461AE637604107FB.orport6_address 2001:41d0:2:a683::251
DD823AFB415380A802DCAEB9461AE637604107FB.orport6_port 443
DD8BD7307017407FCC36F8D04A688F74A0774C02.address 171.25.193.20
DD8BD7307017407FCC36F8D04A688F74A0774C02.or_port 443
DD8BD7307017407FCC36F8D04A688F74A0774C02.dir_port 80
DD8BD7307017407FCC36F8D04A688F74A0774C02.nickname DFRI0
DD8BD7307017407FCC36F8D04A688F74A0774C02.has_extrainfo false
DD8BD7307017407FCC36F8D04A688F74A0774C02.orport6_address 2001:67c:289c::20
DD8BD7307017407FCC36F8D04A688F74A0774C02.orport6_port 443
DED6892FF89DBD737BA689698A171B2392EB3E82.address 92.222.38.67
DED6892FF89DBD737BA689698A171B2392EB3E82.or_port 443
DED6892FF89DBD737BA689698A171B2392EB3E82.dir_port 80
DED6892FF89DBD737BA689698A171B2392EB3E82.nickname ThorExit
DED6892FF89DBD737BA689698A171B2392EB3E82.has_extrainfo false
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.address 166.70.207.2
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.or_port 9001
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.dir_port 9030
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.nickname xmission
E3DB2E354B883B59E8DC56B3E7A353DDFD457812.has_extrainfo false
E480D577F58E782A5BC4FA6F49A6650E9389302F.address 199.249.223.43
E480D577F58E782A5BC4FA6F49A6650E9389302F.or_port 443
E480D577F58E782A5BC4FA6F49A6650E9389302F.dir_port 80
E480D577F58E782A5BC4FA6F49A6650E9389302F.nickname Quintex34
E480D577F58E782A5BC4FA6F49A6650E9389302F.has_extrainfo false
E589316576A399C511A9781A73DA4545640B479D.address 46.252.26.2
E589316576A399C511A9781A73DA4545640B479D.or_port 49991
E589316576A399C511A9781A73DA4545640B479D.dir_port 45212
E589316576A399C511A9781A73DA4545640B479D.nickname marlen
E589316576A399C511A9781A73DA4545640B479D.has_extrainfo false
E781F4EC69671B3F1864AE2753E0890351506329.address 176.31.180.157
E781F4EC69671B3F1864AE2753E0890351506329.or_port 22
E781F4EC69671B3F1864AE2753E0890351506329.dir_port 143
E781F4EC69671B3F1864AE2753E0890351506329.nickname armbrust
E781F4EC69671B3F1864AE2753E0890351506329.has_extrainfo false
E781F4EC69671B3F1864AE2753E0890351506329.orport6_address 2001:41d0:8:eb9d::1
E781F4EC69671B3F1864AE2753E0890351506329.orport6_port 22
E81EF60A73B3809F8964F73766B01BAA0A171E20.address 212.47.244.38
E81EF60A73B3809F8964F73766B01BAA0A171E20.or_port 443
E81EF60A73B3809F8964F73766B01BAA0A171E20.dir_port 8080
E81EF60A73B3809F8964F73766B01BAA0A171E20.nickname Chimborazo
E81EF60A73B3809F8964F73766B01BAA0A171E20.has_extrainfo false
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.address 217.182.75.181
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.or_port 9001
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.dir_port 9030
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.nickname Aerodynamik02
EFEACD781604EB80FBC025EDEDEA2D523AEAAA2F.has_extrainfo false
F4263275CF54A6836EE7BD527B1328836A6F06E1.address 37.187.102.108
F4263275CF54A6836EE7BD527B1328836A6F06E1.or_port 443
F4263275CF54A6836EE7BD527B1328836A6F06E1.dir_port 80
F4263275CF54A6836EE7BD527B1328836A6F06E1.nickname EvilMoe
F4263275CF54A6836EE7BD527B1328836A6F06E1.has_extrainfo false
F4263275CF54A6836EE7BD527B1328836A6F06E1.orport6_address 2001:41d0:a:266c::1
F4263275CF54A6836EE7BD527B1328836A6F06E1.orport6_port 443
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.address 46.28.109.231
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.or_port 9001
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.dir_port 9030
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.nickname wedostor
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.has_extrainfo false
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.orport6_address 2a02:2b88:2:1::4205:1
F70B7C5CD72D74C7F9F2DC84FA9D20D51BA13610.orport6_port 9001
F93D8F37E35C390BCAD9F9069E13085B745EC216.address 185.96.180.29
F93D8F37E35C390BCAD9F9069E13085B745EC216.or_port 443
F93D8F37E35C390BCAD9F9069E13085B745EC216.dir_port 80
F93D8F37E35C390BCAD9F9069E13085B745EC216.nickname TykRelay06
F93D8F37E35C390BCAD9F9069E13085B745EC216.has_extrainfo false
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.address 86.59.119.83
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.or_port 443
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.dir_port 80
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.nickname ph3x
FC9AC8EA0160D88BCCFDE066940D7DD9FA45495B.has_extrainfo false
FE296180018833AF03A8EACD5894A614623D3F76.address 149.56.45.200
FE296180018833AF03A8EACD5894A614623D3F76.or_port 9001
FE296180018833AF03A8EACD5894A614623D3F76.dir_port 9030
FE296180018833AF03A8EACD5894A614623D3F76.nickname PiotrTorpotkinOne
FE296180018833AF03A8EACD5894A614623D3F76.has_extrainfo false
FE296180018833AF03A8EACD5894A614623D3F76.orport6_address 2607:5300:201:3000::17d3
FE296180018833AF03A8EACD5894A614623D3F76.orport6_port 9002

View file

@ -0,0 +1,287 @@
# Copyright 2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Interaction with a Tor relay's ORPort. :class:`~stem.client.Relay` is
a wrapper for :class:`~stem.socket.RelaySocket`, much the same way as
:class:`~stem.control.Controller` provides higher level functions for
:class:`~stem.socket.ControlSocket`.
.. versionadded:: 1.7.0
::
Relay - Connection with a tor relay's ORPort.
| +- connect - Establishes a connection with a relay.
|
|- is_alive - reports if our connection is open or closed
|- connection_time - time when we last connected or disconnected
|- close - shuts down our connection
|
+- create_circuit - establishes a new circuit
Circuit - Circuit we've established through a relay.
|- send - sends a message through this circuit
+- close - closes this circuit
"""
import hashlib
import threading
import stem
import stem.client.cell
import stem.socket
import stem.util.connection
from stem.client.datatype import ZERO, LinkProtocol, Address, KDF, split
__all__ = [
'cell',
'datatype',
]
DEFAULT_LINK_PROTOCOLS = (3, 4, 5)
class Relay(object):
"""
Connection with a Tor relay's ORPort.
:var int link_protocol: link protocol version we established
"""
def __init__(self, orport, link_protocol):
self.link_protocol = LinkProtocol(link_protocol)
self._orport = orport
self._orport_lock = threading.RLock()
self._circuits = {}
@staticmethod
def connect(address, port, link_protocols = DEFAULT_LINK_PROTOCOLS):
"""
Establishes a connection with the given ORPort.
:param str address: ip address of the relay
:param int port: ORPort of the relay
:param tuple link_protocols: acceptable link protocol versions
:raises:
* **ValueError** if address or port are invalid
* :class:`stem.SocketError` if we're unable to establish a connection
"""
relay_addr = Address(address)
if not stem.util.connection.is_valid_port(port):
raise ValueError("'%s' isn't a valid port" % port)
elif not link_protocols:
raise ValueError("Connection can't be established without a link protocol.")
try:
conn = stem.socket.RelaySocket(address, port)
except stem.SocketError as exc:
if 'Connection refused' in str(exc):
raise stem.SocketError("Failed to connect to %s:%i. Maybe it isn't an ORPort?" % (address, port))
# If not an ORPort (for instance, mistakenly connecting to a ControlPort
# instead) we'll likely fail during SSL negotiation. This can result
# in a variety of responses so normalizing what we can...
#
# Debian 9.5: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:661)
# Ubuntu 16.04: [SSL: UNKNOWN_PROTOCOL] unknown protocol (_ssl.c:590)
# Ubuntu 12.04: [Errno 1] _ssl.c:504: error:140770FC:SSL routines:SSL23_GET_SERVER_HELLO:unknown protocol
if 'unknown protocol' in str(exc) or 'wrong version number' in str(exc):
raise stem.SocketError("Failed to SSL authenticate to %s:%i. Maybe it isn't an ORPort?" % (address, port))
raise
# To negotiate our link protocol the first VERSIONS cell is expected to use
# a circuit ID field size from protocol version 1-3 for backward
# compatibility...
#
# The first VERSIONS cell, and any cells sent before the
# first VERSIONS cell, always have CIRCID_LEN == 2 for backward
# compatibility.
conn.send(stem.client.cell.VersionsCell(link_protocols).pack(2))
response = conn.recv()
# Link negotiation ends right away if we lack a common protocol
# version. (#25139)
if not response:
conn.close()
raise stem.SocketError('Unable to establish a common link protocol with %s:%i' % (address, port))
versions_reply = stem.client.cell.Cell.pop(response, 2)[0]
common_protocols = set(link_protocols).intersection(versions_reply.versions)
if not common_protocols:
conn.close()
raise stem.SocketError('Unable to find a common link protocol. We support %s but %s:%i supports %s.' % (', '.join(link_protocols), address, port, ', '.join(versions_reply.versions)))
# Establishing connections requires sending a NETINFO, but including our
# address is optional. We can revisit including it when we have a usecase
# where it would help.
link_protocol = max(common_protocols)
conn.send(stem.client.cell.NetinfoCell(relay_addr, []).pack(link_protocol))
return Relay(conn, link_protocol)
def is_alive(self):
"""
Checks if our socket is currently connected. This is a pass-through for our
socket's :func:`~stem.socket.BaseSocket.is_alive` method.
:returns: **bool** that's **True** if our socket is connected and **False** otherwise
"""
return self._orport.is_alive()
def connection_time(self):
"""
Provides the unix timestamp for when our socket was either connected or
disconnected. That is to say, the time we connected if we're currently
connected and the time we disconnected if we're not connected.
:returns: **float** for when we last connected or disconnected, zero if
we've never connected
"""
return self._orport.connection_time()
def close(self):
"""
Closes our socket connection. This is a pass-through for our socket's
:func:`~stem.socket.BaseSocket.close` method.
"""
with self._orport_lock:
return self._orport.close()
def create_circuit(self):
"""
Establishes a new circuit.
"""
with self._orport_lock:
circ_id = max(self._circuits) + 1 if self._circuits else self.link_protocol.first_circ_id
create_fast_cell = stem.client.cell.CreateFastCell(circ_id)
self._orport.send(create_fast_cell.pack(self.link_protocol))
response = stem.client.cell.Cell.unpack(self._orport.recv(), self.link_protocol)
created_fast_cells = filter(lambda cell: isinstance(cell, stem.client.cell.CreatedFastCell), response)
if not created_fast_cells:
raise ValueError('We should get a CREATED_FAST response from a CREATE_FAST request')
created_fast_cell = list(created_fast_cells)[0]
kdf = KDF.from_value(create_fast_cell.key_material + created_fast_cell.key_material)
if created_fast_cell.derivative_key != kdf.key_hash:
raise ValueError('Remote failed to prove that it knows our shared key')
circ = Circuit(self, circ_id, kdf)
self._circuits[circ.id] = circ
return circ
def __iter__(self):
with self._orport_lock:
for circ in self._circuits.values():
yield circ
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
self.close()
class Circuit(object):
"""
Circuit through which requests can be made of a `Tor relay's ORPort
<https://gitweb.torproject.org/torspec.git/tree/tor-spec.txt>`_.
:var stem.client.Relay relay: relay through which this circuit has been established
:var int id: circuit id
:var hashlib.sha1 forward_digest: digest for forward integrity check
:var hashlib.sha1 backward_digest: digest for backward integrity check
:var bytes forward_key: forward encryption key
:var bytes backward_key: backward encryption key
"""
def __init__(self, relay, circ_id, kdf):
if not stem.prereq.is_crypto_available():
raise ImportError('Circuit construction requires the cryptography module')
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
ctr = modes.CTR(ZERO * (algorithms.AES.block_size // 8))
self.relay = relay
self.id = circ_id
self.forward_digest = hashlib.sha1(kdf.forward_digest)
self.backward_digest = hashlib.sha1(kdf.backward_digest)
self.forward_key = Cipher(algorithms.AES(kdf.forward_key), ctr, default_backend()).encryptor()
self.backward_key = Cipher(algorithms.AES(kdf.backward_key), ctr, default_backend()).decryptor()
def send(self, command, data = '', stream_id = 0):
"""
Sends a message over the circuit.
:param stem.client.datatype.RelayCommand command: command to be issued
:param bytes data: message payload
:param int stream_id: specific stream this concerns
:returns: **list** of :class:`~stem.client.cell.RelayCell` responses
"""
with self.relay._orport_lock:
# Encrypt and send the cell. Our digest/key only updates if the cell is
# successfully sent.
cell = stem.client.cell.RelayCell(self.id, command, data, stream_id = stream_id)
payload, forward_key, forward_digest = cell.encrypt(self.relay.link_protocol, self.forward_key, self.forward_digest)
self.relay._orport.send(payload)
self.forward_digest = forward_digest
self.forward_key = forward_key
# Decrypt relay cells received in response. Again, our digest/key only
# updates when handled successfully.
reply = self.relay._orport.recv()
reply_cells = []
if len(reply) % self.relay.link_protocol.fixed_cell_length != 0:
raise stem.ProtocolError('Circuit response should be a series of RELAY cells, but received an unexpected size for a response: %i' % len(reply))
while reply:
encrypted_cell, reply = split(reply, self.relay.link_protocol.fixed_cell_length)
decrypted_cell, backward_key, backward_digest = stem.client.cell.RelayCell.decrypt(self.relay.link_protocol, encrypted_cell, self.backward_key, self.backward_digest)
if self.id != decrypted_cell.circ_id:
raise stem.ProtocolError('Response should be for circuit id %i, not %i' % (self.id, decrypted_cell.circ_id))
self.backward_digest = backward_digest
self.backward_key = backward_key
reply_cells.append(decrypted_cell)
return reply_cells
def close(self):
with self.relay._orport_lock:
self.relay._orport.send(stem.client.cell.DestroyCell(self.id).pack(self.relay.link_protocol))
del self.relay._circuits[self.id]
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
self.close()

View file

@ -0,0 +1,859 @@
# Copyright 2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Messages communicated over a Tor relay's ORPort.
.. versionadded:: 1.7.0
**Module Overview:**
::
Cell - Base class for ORPort messages.
|- CircuitCell - Circuit management.
| |- CreateCell - Create a circuit. (section 5.1)
| |- CreatedCell - Acknowledge create. (section 5.1)
| |- RelayCell - End-to-end data. (section 6.1)
| |- DestroyCell - Stop using a circuit. (section 5.4)
| |- CreateFastCell - Create a circuit, no PK. (section 5.1)
| |- CreatedFastCell - Circuit created, no PK. (section 5.1)
| |- RelayEarlyCell - End-to-end data; limited. (section 5.6)
| |- Create2Cell - Extended CREATE cell. (section 5.1)
| +- Created2Cell - Extended CREATED cell. (section 5.1)
|
|- PaddingCell - Padding negotiation. (section 7.2)
|- VersionsCell - Negotiate proto version. (section 4)
|- NetinfoCell - Time and address info. (section 4.5)
|- PaddingNegotiateCell - Padding negotiation. (section 7.2)
|- VPaddingCell - Variable-length padding. (section 7.2)
|- CertsCell - Relay certificates. (section 4.2)
|- AuthChallengeCell - Challenge value. (section 4.3)
|- AuthenticateCell - Client authentication. (section 4.5)
|- AuthorizeCell - Client authorization. (not yet used)
|
|- pack - encodes cell into bytes
|- unpack - decodes series of cells
+- pop - decodes cell with remainder
"""
import copy
import datetime
import inspect
import os
import sys
import stem.util
from stem import UNDEFINED
from stem.client.datatype import HASH_LEN, ZERO, LinkProtocol, Address, Certificate, CloseReason, RelayCommand, Size, split
from stem.util import datetime_to_unix, str_tools
FIXED_PAYLOAD_LEN = 509 # PAYLOAD_LEN, per tor-spec section 0.2
AUTH_CHALLENGE_SIZE = 32
RELAY_DIGEST_SIZE = Size.LONG
STREAM_ID_REQUIRED = (
RelayCommand.BEGIN,
RelayCommand.DATA,
RelayCommand.END,
RelayCommand.CONNECTED,
RelayCommand.RESOLVE,
RelayCommand.RESOLVED,
RelayCommand.BEGIN_DIR,
)
STREAM_ID_DISALLOWED = (
RelayCommand.EXTEND,
RelayCommand.EXTENDED,
RelayCommand.TRUNCATE,
RelayCommand.TRUNCATED,
RelayCommand.DROP,
RelayCommand.EXTEND2,
RelayCommand.EXTENDED2,
)
class Cell(object):
"""
Metadata for ORPort cells.
Unused padding are **not** used in equality checks or hashing. If two cells
differ only in their *unused* attribute they are functionally equal.
The following cell types explicitly don't have *unused* content:
* PaddingCell (we consider all content part of payload)
* VersionsCell (all content is unpacked and treated as a version specification)
* VPaddingCell (we consider all content part of payload)
:var bytes unused: unused filler that padded the cell to the expected size
"""
NAME = 'UNKNOWN'
VALUE = -1
IS_FIXED_SIZE = False
def __init__(self, unused = b''):
super(Cell, self).__init__()
self.unused = unused
@staticmethod
def by_name(name):
"""
Provides cell attributes by its name.
:param str name: cell command to fetch
:raises: **ValueError** if cell type is invalid
"""
for _, cls in inspect.getmembers(sys.modules[__name__]):
if name == getattr(cls, 'NAME', UNDEFINED):
return cls
raise ValueError("'%s' isn't a valid cell type" % name)
@staticmethod
def by_value(value):
"""
Provides cell attributes by its value.
:param int value: cell value to fetch
:raises: **ValueError** if cell type is invalid
"""
for _, cls in inspect.getmembers(sys.modules[__name__]):
if value == getattr(cls, 'VALUE', UNDEFINED):
return cls
raise ValueError("'%s' isn't a valid cell value" % value)
def pack(self, link_protocol):
raise NotImplementedError('Packing not yet implemented for %s cells' % type(self).NAME)
@staticmethod
def unpack(content, link_protocol):
"""
Unpacks all cells from a response.
:param bytes content: payload to decode
:param int link_protocol: link protocol version
:returns: :class:`~stem.client.cell.Cell` generator
:raises:
* ValueError if content is malformed
* NotImplementedError if unable to unpack any of the cell types
"""
while content:
cell, content = Cell.pop(content, link_protocol)
yield cell
@staticmethod
def pop(content, link_protocol):
"""
Unpacks the first cell.
:param bytes content: payload to decode
:param int link_protocol: link protocol version
:returns: (:class:`~stem.client.cell.Cell`, remainder) tuple
:raises:
* ValueError if content is malformed
* NotImplementedError if unable to unpack this cell type
"""
link_protocol = LinkProtocol(link_protocol)
circ_id, content = link_protocol.circ_id_size.pop(content)
command, content = Size.CHAR.pop(content)
cls = Cell.by_value(command)
if cls.IS_FIXED_SIZE:
payload_len = FIXED_PAYLOAD_LEN
else:
payload_len, content = Size.SHORT.pop(content)
if len(content) < payload_len:
raise ValueError('%s cell should have a payload of %i bytes, but only had %i' % (cls.NAME, payload_len, len(content)))
payload, content = split(content, payload_len)
return cls._unpack(payload, circ_id, link_protocol), content
@classmethod
def _pack(cls, link_protocol, payload, unused = b'', circ_id = None):
"""
Provides bytes that can be used on the wire for these cell attributes.
Format of a properly packed cell depends on if it's fixed or variable
sized...
::
Fixed: [ CircuitID ][ Command ][ Payload ][ Padding ]
Variable: [ CircuitID ][ Command ][ Size ][ Payload ]
:param str name: cell command
:param int link_protocol: link protocol version
:param bytes payload: cell payload
:param int circ_id: circuit id, if a CircuitCell
:returns: **bytes** with the encoded payload
:raises: **ValueError** if cell type invalid or payload makes cell too large
"""
if issubclass(cls, CircuitCell):
if circ_id is None:
raise ValueError('%s cells require a circuit identifier' % cls.NAME)
elif circ_id < 1:
raise ValueError('Circuit identifiers must a positive integer, not %s' % circ_id)
else:
if circ_id is not None:
raise ValueError('%s cells should not specify a circuit identifier' % cls.NAME)
circ_id = 0 # cell doesn't concern a circuit, default field to zero
link_protocol = LinkProtocol(link_protocol)
cell = bytearray()
cell += link_protocol.circ_id_size.pack(circ_id)
cell += Size.CHAR.pack(cls.VALUE)
cell += b'' if cls.IS_FIXED_SIZE else Size.SHORT.pack(len(payload) + len(unused))
cell += payload
# include the unused portion (typically from unpacking)
cell += unused
# pad fixed sized cells to the required length
if cls.IS_FIXED_SIZE:
if len(cell) > link_protocol.fixed_cell_length:
raise ValueError('Cell of type %s is too large (%i bytes), must not be more than %i. Check payload size (was %i bytes)' % (cls.NAME, len(cell), link_protocol.fixed_cell_length, len(payload)))
cell += ZERO * (link_protocol.fixed_cell_length - len(cell))
return bytes(cell)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
"""
Subclass implementation for unpacking cell content.
:param bytes content: payload to decode
:param stem.client.datatype.LinkProtocol link_protocol: link protocol version
:param int circ_id: circuit id cell is for
:returns: instance of this cell type
:raises: **ValueError** if content is malformed
"""
raise NotImplementedError('Unpacking not yet implemented for %s cells' % cls.NAME)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Cell) else False
def __ne__(self, other):
return not self == other
class CircuitCell(Cell):
"""
Cell concerning circuits.
:var int circ_id: circuit id
"""
def __init__(self, circ_id, unused = b''):
super(CircuitCell, self).__init__(unused)
self.circ_id = circ_id
class PaddingCell(Cell):
"""
Randomized content to either keep activity going on a circuit.
:var bytes payload: randomized payload
"""
NAME = 'PADDING'
VALUE = 0
IS_FIXED_SIZE = True
def __init__(self, payload = None):
if not payload:
payload = os.urandom(FIXED_PAYLOAD_LEN)
elif len(payload) != FIXED_PAYLOAD_LEN:
raise ValueError('Padding payload should be %i bytes, but was %i' % (FIXED_PAYLOAD_LEN, len(payload)))
super(PaddingCell, self).__init__()
self.payload = payload
def pack(self, link_protocol):
return PaddingCell._pack(link_protocol, self.payload)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
return PaddingCell(content)
def __hash__(self):
return stem.util._hash_attr(self, 'payload', cache = True)
class CreateCell(CircuitCell):
NAME = 'CREATE'
VALUE = 1
IS_FIXED_SIZE = True
def __init__(self):
super(CreateCell, self).__init__() # TODO: implement
class CreatedCell(CircuitCell):
NAME = 'CREATED'
VALUE = 2
IS_FIXED_SIZE = True
def __init__(self):
super(CreatedCell, self).__init__() # TODO: implement
class RelayCell(CircuitCell):
"""
Command concerning a relay circuit.
Our 'recognized' attribute provides a cheap (but incomplete) check for if our
cell payload is encrypted. If non-zero our payload *IS* encrypted, but if
zero we're *PROBABLY* fully decrypted. This uncertainty is because encrypted
cells have a small chance of coincidently producing zero for this value as
well.
:var stem.client.RelayCommand command: command to be issued
:var int command_int: integer value of our command
:var bytes data: payload of the cell
:var int recognized: non-zero if payload is encrypted
:var int digest: running digest held with the relay
:var int stream_id: specific stream this concerns
"""
NAME = 'RELAY'
VALUE = 3
IS_FIXED_SIZE = True
def __init__(self, circ_id, command, data, digest = 0, stream_id = 0, recognized = 0, unused = b''):
if 'HASH' in str(type(digest)):
# Unfortunately hashlib generates from a dynamic private class so
# isinstance() isn't such a great option. With python2/python3 the
# name is 'hashlib.HASH' whereas PyPy calls it just 'HASH'.
digest_packed = digest.digest()[:RELAY_DIGEST_SIZE.size]
digest = RELAY_DIGEST_SIZE.unpack(digest_packed)
elif stem.util._is_str(digest):
digest_packed = digest[:RELAY_DIGEST_SIZE.size]
digest = RELAY_DIGEST_SIZE.unpack(digest_packed)
elif stem.util._is_int(digest):
pass
else:
raise ValueError('RELAY cell digest must be a hash, string, or int but was a %s' % type(digest).__name__)
super(RelayCell, self).__init__(circ_id, unused)
self.command, self.command_int = RelayCommand.get(command)
self.recognized = recognized
self.stream_id = stream_id
self.digest = digest
self.data = str_tools._to_bytes(data)
if digest == 0:
if not stream_id and self.command in STREAM_ID_REQUIRED:
raise ValueError('%s relay cells require a stream id' % self.command)
elif stream_id and self.command in STREAM_ID_DISALLOWED:
raise ValueError('%s relay cells concern the circuit itself and cannot have a stream id' % self.command)
def pack(self, link_protocol):
payload = bytearray()
payload += Size.CHAR.pack(self.command_int)
payload += Size.SHORT.pack(self.recognized)
payload += Size.SHORT.pack(self.stream_id)
payload += Size.LONG.pack(self.digest)
payload += Size.SHORT.pack(len(self.data))
payload += self.data
return RelayCell._pack(link_protocol, bytes(payload), self.unused, self.circ_id)
@staticmethod
def decrypt(link_protocol, content, key, digest):
"""
Decrypts content as a relay cell addressed to us. This provides back a
tuple of the form...
::
(cell (RelayCell), new_key (CipherContext), new_digest (HASH))
:param int link_protocol: link protocol version
:param bytes content: cell content to be decrypted
:param cryptography.hazmat.primitives.ciphers.CipherContext key:
key established with the relay we received this cell from
:param HASH digest: running digest held with the relay
:returns: **tuple** with our decrypted cell and updated key/digest
:raises: :class:`stem.ProtocolError` if content doesn't belong to a relay
cell
"""
new_key = copy.copy(key)
new_digest = digest.copy()
if len(content) != link_protocol.fixed_cell_length:
raise stem.ProtocolError('RELAY cells should be %i bytes, but received %i' % (link_protocol.fixed_cell_length, len(content)))
circ_id, content = link_protocol.circ_id_size.pop(content)
command, encrypted_payload = Size.CHAR.pop(content)
if command != RelayCell.VALUE:
raise stem.ProtocolError('Cannot decrypt as a RELAY cell. This had command %i instead.' % command)
payload = new_key.update(encrypted_payload)
cell = RelayCell._unpack(payload, circ_id, link_protocol)
# TODO: Implement our decryption digest. It is used to support relaying
# within multi-hop circuits. On first glance this should go something
# like...
#
# # Our updated digest is calculated based on this cell with a blanked
# # digest field.
#
# digest_cell = RelayCell(self.circ_id, self.command, self.data, 0, self.stream_id, self.recognized, self.unused)
# new_digest.update(digest_cell.pack(link_protocol))
#
# is_encrypted == cell.recognized != 0 or self.digest == new_digest
#
# ... or something like that. Until we attempt to support relaying this is
# both moot and difficult to exercise in order to ensure we get it right.
return cell, new_key, new_digest
def encrypt(self, link_protocol, key, digest):
"""
Encrypts our cell content to be sent with the given key. This provides back
a tuple of the form...
::
(payload (bytes), new_key (CipherContext), new_digest (HASH))
:param int link_protocol: link protocol version
:param cryptography.hazmat.primitives.ciphers.CipherContext key:
key established with the relay we're sending this cell to
:param HASH digest: running digest held with the relay
:returns: **tuple** with our encrypted payload and updated key/digest
"""
new_key = copy.copy(key)
new_digest = digest.copy()
# Digests are computed from our payload, not including our header's circuit
# id (2 or 4 bytes) and command (1 byte).
header_size = link_protocol.circ_id_size.size + 1
payload_without_digest = self.pack(link_protocol)[header_size:]
new_digest.update(payload_without_digest)
# Pack a copy of ourselves with our newly calculated digest, and encrypt
# the payload. Header remains plaintext.
cell = RelayCell(self.circ_id, self.command, self.data, new_digest, self.stream_id, self.recognized, self.unused)
header, payload = split(cell.pack(link_protocol), header_size)
return header + new_key.update(payload), new_key, new_digest
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
command, content = Size.CHAR.pop(content)
recognized, content = Size.SHORT.pop(content) # 'recognized' field
stream_id, content = Size.SHORT.pop(content)
digest, content = Size.LONG.pop(content)
data_len, content = Size.SHORT.pop(content)
data, unused = split(content, data_len)
if len(data) != data_len:
raise ValueError('%s cell said it had %i bytes of data, but only had %i' % (cls.NAME, data_len, len(data)))
return RelayCell(circ_id, command, data, digest, stream_id, recognized, unused)
def __hash__(self):
return stem.util._hash_attr(self, 'command_int', 'stream_id', 'digest', 'data', cache = True)
class DestroyCell(CircuitCell):
"""
Closes the given circuit.
:var stem.client.CloseReason reason: reason the circuit is being closed
:var int reason_int: integer value of our closure reason
"""
NAME = 'DESTROY'
VALUE = 4
IS_FIXED_SIZE = True
def __init__(self, circ_id, reason = CloseReason.NONE, unused = b''):
super(DestroyCell, self).__init__(circ_id, unused)
self.reason, self.reason_int = CloseReason.get(reason)
def pack(self, link_protocol):
return DestroyCell._pack(link_protocol, Size.CHAR.pack(self.reason_int), self.unused, self.circ_id)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
reason, unused = Size.CHAR.pop(content)
return DestroyCell(circ_id, reason, unused)
def __hash__(self):
return stem.util._hash_attr(self, 'circ_id', 'reason_int', cache = True)
class CreateFastCell(CircuitCell):
"""
Create a circuit with our first hop. This is lighter weight than further hops
because we've already established the relay's identity and secret key.
:var bytes key_material: randomized key material
"""
NAME = 'CREATE_FAST'
VALUE = 5
IS_FIXED_SIZE = True
def __init__(self, circ_id, key_material = None, unused = b''):
if not key_material:
key_material = os.urandom(HASH_LEN)
elif len(key_material) != HASH_LEN:
raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material)))
super(CreateFastCell, self).__init__(circ_id, unused)
self.key_material = key_material
def pack(self, link_protocol):
return CreateFastCell._pack(link_protocol, self.key_material, self.unused, self.circ_id)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
key_material, unused = split(content, HASH_LEN)
if len(key_material) != HASH_LEN:
raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material)))
return CreateFastCell(circ_id, key_material, unused)
def __hash__(self):
return stem.util._hash_attr(self, 'circ_id', 'key_material', cache = True)
class CreatedFastCell(CircuitCell):
"""
CREATE_FAST reply.
:var bytes key_material: randomized key material
:var bytes derivative_key: hash proving the relay knows our shared key
"""
NAME = 'CREATED_FAST'
VALUE = 6
IS_FIXED_SIZE = True
def __init__(self, circ_id, derivative_key, key_material = None, unused = b''):
if not key_material:
key_material = os.urandom(HASH_LEN)
elif len(key_material) != HASH_LEN:
raise ValueError('Key material should be %i bytes, but was %i' % (HASH_LEN, len(key_material)))
if len(derivative_key) != HASH_LEN:
raise ValueError('Derivatived key should be %i bytes, but was %i' % (HASH_LEN, len(derivative_key)))
super(CreatedFastCell, self).__init__(circ_id, unused)
self.key_material = key_material
self.derivative_key = derivative_key
def pack(self, link_protocol):
return CreatedFastCell._pack(link_protocol, self.key_material + self.derivative_key, self.unused, self.circ_id)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
if len(content) < HASH_LEN * 2:
raise ValueError('Key material and derivatived key should be %i bytes, but was %i' % (HASH_LEN * 2, len(content)))
key_material, content = split(content, HASH_LEN)
derivative_key, content = split(content, HASH_LEN)
return CreatedFastCell(circ_id, derivative_key, key_material, content)
def __hash__(self):
return stem.util._hash_attr(self, 'circ_id', 'derivative_key', 'key_material', cache = True)
class VersionsCell(Cell):
"""
Link version negotiation cell.
:var list versions: link versions
"""
NAME = 'VERSIONS'
VALUE = 7
IS_FIXED_SIZE = False
def __init__(self, versions):
super(VersionsCell, self).__init__()
self.versions = versions
def pack(self, link_protocol):
payload = b''.join([Size.SHORT.pack(v) for v in self.versions])
return VersionsCell._pack(link_protocol, payload)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
link_protocols = []
while content:
version, content = Size.SHORT.pop(content)
link_protocols.append(version)
return VersionsCell(link_protocols)
def __hash__(self):
return stem.util._hash_attr(self, 'versions', cache = True)
class NetinfoCell(Cell):
"""
Information relays exchange about each other.
:var datetime timestamp: current time
:var stem.client.Address receiver_address: receiver's OR address
:var list sender_addresses: sender's OR addresses
"""
NAME = 'NETINFO'
VALUE = 8
IS_FIXED_SIZE = True
def __init__(self, receiver_address, sender_addresses, timestamp = None, unused = b''):
super(NetinfoCell, self).__init__(unused)
self.timestamp = timestamp if timestamp else datetime.datetime.now()
self.receiver_address = receiver_address
self.sender_addresses = sender_addresses
def pack(self, link_protocol):
payload = bytearray()
payload += Size.LONG.pack(int(datetime_to_unix(self.timestamp)))
payload += self.receiver_address.pack()
payload += Size.CHAR.pack(len(self.sender_addresses))
for addr in self.sender_addresses:
payload += addr.pack()
return NetinfoCell._pack(link_protocol, bytes(payload), self.unused)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
timestamp, content = Size.LONG.pop(content)
receiver_address, content = Address.pop(content)
sender_addresses = []
sender_addr_count, content = Size.CHAR.pop(content)
for i in range(sender_addr_count):
addr, content = Address.pop(content)
sender_addresses.append(addr)
return NetinfoCell(receiver_address, sender_addresses, datetime.datetime.utcfromtimestamp(timestamp), unused = content)
def __hash__(self):
return stem.util._hash_attr(self, 'timestamp', 'receiver_address', 'sender_addresses', cache = True)
class RelayEarlyCell(CircuitCell):
NAME = 'RELAY_EARLY'
VALUE = 9
IS_FIXED_SIZE = True
def __init__(self):
super(RelayEarlyCell, self).__init__() # TODO: implement
class Create2Cell(CircuitCell):
NAME = 'CREATE2'
VALUE = 10
IS_FIXED_SIZE = True
def __init__(self):
super(Create2Cell, self).__init__() # TODO: implement
class Created2Cell(Cell):
NAME = 'CREATED2'
VALUE = 11
IS_FIXED_SIZE = True
def __init__(self):
super(Created2Cell, self).__init__() # TODO: implement
class PaddingNegotiateCell(Cell):
NAME = 'PADDING_NEGOTIATE'
VALUE = 12
IS_FIXED_SIZE = True
def __init__(self):
super(PaddingNegotiateCell, self).__init__() # TODO: implement
class VPaddingCell(Cell):
"""
Variable length randomized content to either keep activity going on a circuit.
:var bytes payload: randomized payload
"""
NAME = 'VPADDING'
VALUE = 128
IS_FIXED_SIZE = False
def __init__(self, size = None, payload = None):
if size is None and payload is None:
raise ValueError('VPaddingCell constructor must specify payload or size')
elif size is not None and size < 0:
raise ValueError('VPaddingCell size (%s) cannot be negative' % size)
elif size is not None and payload is not None and size != len(payload):
raise ValueError('VPaddingCell constructor specified both a size of %i bytes and payload of %i bytes' % (size, len(payload)))
super(VPaddingCell, self).__init__()
self.payload = payload if payload is not None else os.urandom(size)
def pack(self, link_protocol):
return VPaddingCell._pack(link_protocol, self.payload)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
return VPaddingCell(payload = content)
def __hash__(self):
return stem.util._hash_attr(self, 'payload', cache = True)
class CertsCell(Cell):
"""
Certificate held by the relay we're communicating with.
:var list certificates: :class:`~stem.client.Certificate` of the relay
"""
NAME = 'CERTS'
VALUE = 129
IS_FIXED_SIZE = False
def __init__(self, certs, unused = b''):
super(CertsCell, self).__init__(unused)
self.certificates = certs
def pack(self, link_protocol):
return CertsCell._pack(link_protocol, Size.CHAR.pack(len(self.certificates)) + b''.join([cert.pack() for cert in self.certificates]), self.unused)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
cert_count, content = Size.CHAR.pop(content)
certs = []
for i in range(cert_count):
if not content:
raise ValueError('CERTS cell indicates it should have %i certificates, but only contained %i' % (cert_count, len(certs)))
cert, content = Certificate.pop(content)
certs.append(cert)
return CertsCell(certs, unused = content)
def __hash__(self):
return stem.util._hash_attr(self, 'certificates', cache = True)
class AuthChallengeCell(Cell):
"""
First step of the authentication handshake.
:var bytes challenge: random bytes for us to sign to authenticate
:var list methods: authentication methods supported by the relay we're
communicating with
"""
NAME = 'AUTH_CHALLENGE'
VALUE = 130
IS_FIXED_SIZE = False
def __init__(self, methods, challenge = None, unused = b''):
if not challenge:
challenge = os.urandom(AUTH_CHALLENGE_SIZE)
elif len(challenge) != AUTH_CHALLENGE_SIZE:
raise ValueError('AUTH_CHALLENGE must be %i bytes, but was %i' % (AUTH_CHALLENGE_SIZE, len(challenge)))
super(AuthChallengeCell, self).__init__(unused)
self.challenge = challenge
self.methods = methods
def pack(self, link_protocol):
payload = bytearray()
payload += self.challenge
payload += Size.SHORT.pack(len(self.methods))
for method in self.methods:
payload += Size.SHORT.pack(method)
return AuthChallengeCell._pack(link_protocol, bytes(payload), self.unused)
@classmethod
def _unpack(cls, content, circ_id, link_protocol):
min_size = AUTH_CHALLENGE_SIZE + Size.SHORT.size
if len(content) < min_size:
raise ValueError('AUTH_CHALLENGE payload should be at least %i bytes, but was %i' % (min_size, len(content)))
challenge, content = split(content, AUTH_CHALLENGE_SIZE)
method_count, content = Size.SHORT.pop(content)
if len(content) < method_count * Size.SHORT.size:
raise ValueError('AUTH_CHALLENGE should have %i methods, but only had %i bytes for it' % (method_count, len(content)))
methods = []
for i in range(method_count):
method, content = Size.SHORT.pop(content)
methods.append(method)
return AuthChallengeCell(methods, challenge, unused = content)
def __hash__(self):
return stem.util._hash_attr(self, 'challenge', 'methods', cache = True)
class AuthenticateCell(Cell):
NAME = 'AUTHENTICATE'
VALUE = 131
IS_FIXED_SIZE = False
def __init__(self):
super(AuthenticateCell, self).__init__() # TODO: implement
class AuthorizeCell(Cell):
NAME = 'AUTHORIZE'
VALUE = 132
IS_FIXED_SIZE = False
def __init__(self):
super(AuthorizeCell, self).__init__() # TODO: implement

View file

@ -0,0 +1,558 @@
# Copyright 2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Support for `Tor's ORPort protocol
<https://gitweb.torproject.org/torspec.git/tree/tor-spec.txt>`_.
**This module only consists of low level components, and is not intended for
users.** See our :class:`~stem.client.Relay` the API you probably want.
.. versionadded:: 1.7.0
::
split - splits bytes into substrings
LinkProtocol - ORPort protocol version.
Field - Packable and unpackable datatype.
|- Size - Field of a static size.
|- Address - Relay address.
|- Certificate - Relay certificate.
|
|- pack - encodes content
|- unpack - decodes content
+- pop - decodes content with remainder
KDF - KDF-TOR derivatived attributes
+- from_value - parses key material
.. data:: AddrType (enum)
Form an address takes.
===================== ===========
AddressType Description
===================== ===========
**HOSTNAME** relay hostname
**IPv4** IPv4 address
**IPv6** IPv6 address
**ERROR_TRANSIENT** temporarily error retrieving address
**ERROR_PERMANENT** permanent error retrieving address
**UNKNOWN** unrecognized address type
===================== ===========
.. data:: RelayCommand (enum)
Command concerning streams and circuits we've established with a relay.
Commands have two characteristics...
* **forward/backward**: **forward** commands are issued from the orgin,
whereas **backward** come from the relay
* **stream/circuit**: **steam** commands concern an individual steam, whereas
**circuit** concern the entire circuit we've established with a relay
===================== ===========
RelayCommand Description
===================== ===========
**BEGIN** begin a stream (**forward**, **stream**)
**DATA** transmit data (**forward/backward**, **stream**)
**END** end a stream (**forward/backward**, **stream**)
**CONNECTED** BEGIN reply (**backward**, **stream**)
**SENDME** ready to accept more cells (**forward/backward**, **stream/circuit**)
**EXTEND** extend the circuit through another relay (**forward**, **circuit**)
**EXTENDED** EXTEND reply (**backward**, **circuit**)
**TRUNCATE** remove last circuit hop (**forward**, **circuit**)
**TRUNCATED** TRUNCATE reply (**backward**, **circuit**)
**DROP** ignorable no-op (**forward/backward**, **circuit**)
**RESOLVE** request DNS resolution (**forward**, **stream**)
**RESOLVED** RESOLVE reply (**backward**, **stream**)
**BEGIN_DIR** request descriptor (**forward**, **steam**)
**EXTEND2** ntor EXTEND request (**forward**, **circuit**)
**EXTENDED2** EXTEND2 reply (**backward**, **circuit**)
**UNKNOWN** unrecognized command
===================== ===========
.. data:: CertType (enum)
Relay certificate type.
===================== ===========
CertType Description
===================== ===========
**LINK** link key certificate certified by RSA1024 identity
**IDENTITY** RSA1024 Identity certificate
**AUTHENTICATE** RSA1024 AUTHENTICATE cell link certificate
**UNKNOWN** unrecognized certificate type
===================== ===========
.. data:: CloseReason (enum)
Reason a relay is closed.
===================== ===========
CloseReason Description
===================== ===========
**NONE** no reason given
**PROTOCOL** tor protocol violation
**INTERNAL** internal error
**REQUESTED** client sent a TRUNCATE command
**HIBERNATING** relay suspended, trying to save bandwidth
**RESOURCELIMIT** out of memory, sockets, or circuit IDs
**CONNECTFAILED** unable to reach relay
**OR_IDENTITY** connected, but its OR identity was not as expected
**OR_CONN_CLOSED** connection that was carrying this circuit died
**FINISHED** circuit has expired for being dirty or old
**TIMEOUT** circuit construction took too long
**DESTROYED** circuit was destroyed without a client TRUNCATE
**NOSUCHSERVICE** request was for an unknown hidden service
**UNKNOWN** unrecognized reason
===================== ===========
"""
import collections
import hashlib
import struct
import stem.client.cell
import stem.prereq
import stem.util
import stem.util.connection
import stem.util.enum
ZERO = b'\x00'
HASH_LEN = 20
KEY_LEN = 16
class _IntegerEnum(stem.util.enum.Enum):
"""
Integer backed enumeration. Enumerations of this type always have an implicit
**UNKNOWN** value for integer values that lack a mapping.
"""
def __init__(self, *args):
self._enum_to_int = {}
self._int_to_enum = {}
parent_args = []
for entry in args:
if len(entry) == 2:
enum, int_val = entry
str_val = enum
elif len(entry) == 3:
enum, str_val, int_val = entry
else:
raise ValueError('IntegerEnums can only be constructed with two or three value tuples: %s' % repr(entry))
self._enum_to_int[str_val] = int_val
self._int_to_enum[int_val] = str_val
parent_args.append((enum, str_val))
parent_args.append(('UNKNOWN', 'UNKNOWN'))
super(_IntegerEnum, self).__init__(*parent_args)
def get(self, val):
"""
Provides the (enum, int_value) tuple for a given value.
"""
if stem.util._is_int(val):
return self._int_to_enum.get(val, self.UNKNOWN), val
elif val in self:
return val, self._enum_to_int.get(val, val)
else:
raise ValueError("Invalid enumeration '%s', options are %s" % (val, ', '.join(self)))
AddrType = _IntegerEnum(
('HOSTNAME', 0),
('IPv4', 4),
('IPv6', 6),
('ERROR_TRANSIENT', 16),
('ERROR_PERMANENT', 17),
)
RelayCommand = _IntegerEnum(
('BEGIN', 'RELAY_BEGIN', 1),
('DATA', 'RELAY_DATA', 2),
('END', 'RELAY_END', 3),
('CONNECTED', 'RELAY_CONNECTED', 4),
('SENDME', 'RELAY_SENDME', 5),
('EXTEND', 'RELAY_EXTEND', 6),
('EXTENDED', 'RELAY_EXTENDED', 7),
('TRUNCATE', 'RELAY_TRUNCATE', 8),
('TRUNCATED', 'RELAY_TRUNCATED', 9),
('DROP', 'RELAY_DROP', 10),
('RESOLVE', 'RELAY_RESOLVE', 11),
('RESOLVED', 'RELAY_RESOLVED', 12),
('BEGIN_DIR', 'RELAY_BEGIN_DIR', 13),
('EXTEND2', 'RELAY_EXTEND2', 14),
('EXTENDED2', 'RELAY_EXTENDED2', 15),
)
CertType = _IntegerEnum(
('LINK', 1),
('IDENTITY', 2),
('AUTHENTICATE', 3),
)
CloseReason = _IntegerEnum(
('NONE', 0),
('PROTOCOL', 1),
('INTERNAL', 2),
('REQUESTED', 3),
('HIBERNATING', 4),
('RESOURCELIMIT', 5),
('CONNECTFAILED', 6),
('OR_IDENTITY', 7),
('OR_CONN_CLOSED', 8),
('FINISHED', 9),
('TIMEOUT', 10),
('DESTROYED', 11),
('NOSUCHSERVICE', 12),
)
def split(content, size):
"""
Simple split of bytes into two substrings.
:param bytes content: string to split
:param int size: index to split the string on
:returns: two value tuple with the split bytes
"""
return content[:size], content[size:]
class LinkProtocol(int):
"""
Constants that vary by our link protocol version.
:var int version: link protocol version
:var stem.client.datatype.Size circ_id_size: circuit identifier field size
:var int fixed_cell_length: size of cells with a fixed length
:var int first_circ_id: When creating circuits we pick an unused identifier
from a range that's determined by our link protocol.
"""
def __new__(cls, version):
if isinstance(version, LinkProtocol):
return version # already a LinkProtocol
protocol = int.__new__(cls, version)
protocol.version = version
protocol.circ_id_size = Size.LONG if version > 3 else Size.SHORT
protocol.first_circ_id = 0x80000000 if version > 3 else 0x01
cell_header_size = protocol.circ_id_size.size + 1 # circuit id (2 or 4 bytes) + command (1 byte)
protocol.fixed_cell_length = cell_header_size + stem.client.cell.FIXED_PAYLOAD_LEN
return protocol
def __hash__(self):
# All LinkProtocol attributes can be derived from our version, so that's
# all we need in our hash. Offsetting by our type so we don't hash conflict
# with ints.
return self.version * hash(str(type(self)))
def __eq__(self, other):
if isinstance(other, int):
return self.version == other
elif isinstance(other, LinkProtocol):
return hash(self) == hash(other)
else:
return False
def __ne__(self, other):
return not self == other
def __int__(self):
return self.version
class Field(object):
"""
Packable and unpackable datatype.
"""
def pack(self):
"""
Encodes field into bytes.
:returns: **bytes** that can be communicated over Tor's ORPort
:raises: **ValueError** if incorrect type or size
"""
raise NotImplementedError('Not yet available')
@classmethod
def unpack(cls, packed):
"""
Decodes bytes into a field of this type.
:param bytes packed: content to decode
:returns: instance of this class
:raises: **ValueError** if packed data is malformed
"""
unpacked, remainder = cls.pop(packed)
if remainder:
raise ValueError('%s is the wrong size for a %s field' % (repr(packed), cls.__name__))
return unpacked
@staticmethod
def pop(packed):
"""
Decodes bytes as this field type, providing it and the remainder.
:param bytes packed: content to decode
:returns: tuple of the form (unpacked, remainder)
:raises: **ValueError** if packed data is malformed
"""
raise NotImplementedError('Not yet available')
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Field) else False
def __ne__(self, other):
return not self == other
class Size(Field):
"""
Unsigned `struct.pack format
<https://docs.python.org/2/library/struct.html#format-characters>` for
network-order fields.
==================== ===========
Pack Description
==================== ===========
CHAR Unsigned char (1 byte)
SHORT Unsigned short (2 bytes)
LONG Unsigned long (4 bytes)
LONG_LONG Unsigned long long (8 bytes)
==================== ===========
"""
def __init__(self, name, size, pack_format):
self.name = name
self.size = size
self.format = pack_format
@staticmethod
def pop(packed):
raise NotImplementedError("Use our constant's unpack() and pop() instead")
def pack(self, content):
# TODO: Python 2.6's struct module behaves a little differently in a couple
# respsects...
#
# * Invalid types raise a TypeError rather than a struct.error.
#
# * Negative values are happily packed despite being unsigned fields with
# a message printed to stdout (!) that says...
#
# stem/client/datatype.py:362: DeprecationWarning: struct integer overflow masking is deprecated
# packed = struct.pack(self.format, content)
# stem/client/datatype.py:362: DeprecationWarning: 'B' format requires 0 <= number <= 255
# packed = struct.pack(self.format, content)
#
# Rather than adjust this method to account for these differences doing
# duplicate upfront checks just for python 2.6. When we drop 2.6 support
# this can obviously be dropped.
if stem.prereq._is_python_26():
if not stem.util._is_int(content):
raise ValueError('Size.pack encodes an integer, but was a %s' % type(content).__name__)
elif content < 0:
raise ValueError('Packed values must be positive (attempted to pack %i as a %s)' % (content, self.name))
try:
packed = struct.pack(self.format, content)
except struct.error:
if not stem.util._is_int(content):
raise ValueError('Size.pack encodes an integer, but was a %s' % type(content).__name__)
elif content < 0:
raise ValueError('Packed values must be positive (attempted to pack %i as a %s)' % (content, self.name))
else:
raise # some other struct exception
if self.size != len(packed):
raise ValueError('%s is the wrong size for a %s field' % (repr(packed), self.name))
return packed
def unpack(self, packed):
if self.size != len(packed):
raise ValueError('%s is the wrong size for a %s field' % (repr(packed), self.name))
return struct.unpack(self.format, packed)[0]
def pop(self, packed):
to_unpack, remainder = split(packed, self.size)
return self.unpack(to_unpack), remainder
def __hash__(self):
return stem.util._hash_attr(self, 'name', 'size', 'format', cache = True)
class Address(Field):
"""
Relay address.
:var stem.client.AddrType type: address type
:var int type_int: integer value of the address type
:var unicode value: address value
:var bytes value_bin: encoded address value
"""
def __init__(self, value, addr_type = None):
if addr_type is None:
if stem.util.connection.is_valid_ipv4_address(value):
addr_type = AddrType.IPv4
elif stem.util.connection.is_valid_ipv6_address(value):
addr_type = AddrType.IPv6
else:
raise ValueError("'%s' isn't an IPv4 or IPv6 address" % value)
self.type, self.type_int = AddrType.get(addr_type)
if self.type == AddrType.IPv4:
if stem.util.connection.is_valid_ipv4_address(value):
self.value = value
self.value_bin = b''.join([Size.CHAR.pack(int(v)) for v in value.split('.')])
else:
if len(value) != 4:
raise ValueError('Packed IPv4 addresses should be four bytes, but was: %s' % repr(value))
self.value = '.'.join([str(Size.CHAR.unpack(value[i:i + 1])) for i in range(4)])
self.value_bin = value
elif self.type == AddrType.IPv6:
if stem.util.connection.is_valid_ipv6_address(value):
self.value = stem.util.connection.expand_ipv6_address(value).lower()
self.value_bin = b''.join([Size.SHORT.pack(int(v, 16)) for v in self.value.split(':')])
else:
if len(value) != 16:
raise ValueError('Packed IPv6 addresses should be sixteen bytes, but was: %s' % repr(value))
self.value = ':'.join(['%04x' % Size.SHORT.unpack(value[i * 2:(i + 1) * 2]) for i in range(8)])
self.value_bin = value
else:
# The spec doesn't really tell us what form to expect errors to be. For
# now just leaving the value unset so we can fill it in later when we
# know what would be most useful.
self.value = None
self.value_bin = value
def pack(self):
cell = bytearray()
cell += Size.CHAR.pack(self.type_int)
cell += Size.CHAR.pack(len(self.value_bin))
cell += self.value_bin
return bytes(cell)
@staticmethod
def pop(content):
addr_type, content = Size.CHAR.pop(content)
addr_length, content = Size.CHAR.pop(content)
if len(content) < addr_length:
raise ValueError('Address specified a payload of %i bytes, but only had %i' % (addr_length, len(content)))
addr_value, content = split(content, addr_length)
return Address(addr_value, addr_type), content
def __hash__(self):
return stem.util._hash_attr(self, 'type_int', 'value_bin', cache = True)
class Certificate(Field):
"""
Relay certificate as defined in tor-spec section 4.2.
:var stem.client.CertType type: certificate type
:var int type_int: integer value of the certificate type
:var bytes value: certificate value
"""
def __init__(self, cert_type, value):
self.type, self.type_int = CertType.get(cert_type)
self.value = value
def pack(self):
cell = bytearray()
cell += Size.CHAR.pack(self.type_int)
cell += Size.SHORT.pack(len(self.value))
cell += self.value
return bytes(cell)
@staticmethod
def pop(content):
cert_type, content = Size.CHAR.pop(content)
cert_size, content = Size.SHORT.pop(content)
if cert_size > len(content):
raise ValueError('CERTS cell should have a certificate with %i bytes, but only had %i remaining' % (cert_size, len(content)))
cert_bytes, content = split(content, cert_size)
return Certificate(cert_type, cert_bytes), content
def __hash__(self):
return stem.util._hash_attr(self, 'type_int', 'value')
class KDF(collections.namedtuple('KDF', ['key_hash', 'forward_digest', 'backward_digest', 'forward_key', 'backward_key'])):
"""
Computed KDF-TOR derived values for TAP, CREATE_FAST handshakes, and hidden
service protocols as defined tor-spec section 5.2.1.
:var bytes key_hash: hash that proves knowledge of our shared key
:var bytes forward_digest: forward digest hash seed
:var bytes backward_digest: backward digest hash seed
:var bytes forward_key: forward encryption key
:var bytes backward_key: backward encryption key
"""
@staticmethod
def from_value(key_material):
# Derived key material, as per...
#
# K = H(K0 | [00]) | H(K0 | [01]) | H(K0 | [02]) | ...
derived_key = b''
counter = 0
while len(derived_key) < KEY_LEN * 2 + HASH_LEN * 3:
derived_key += hashlib.sha1(key_material + Size.CHAR.pack(counter)).digest()
counter += 1
key_hash, derived_key = split(derived_key, HASH_LEN)
forward_digest, derived_key = split(derived_key, HASH_LEN)
backward_digest, derived_key = split(derived_key, HASH_LEN)
forward_key, derived_key = split(derived_key, KEY_LEN)
backward_key, derived_key = split(derived_key, KEY_LEN)
return KDF(key_hash, forward_digest, backward_digest, forward_key, backward_key)
setattr(Size, 'CHAR', Size('CHAR', 1, '!B'))
setattr(Size, 'SHORT', Size('SHORT', 2, '!H'))
setattr(Size, 'LONG', Size('LONG', 4, '!L'))
setattr(Size, 'LONG_LONG', Size('LONG_LONG', 8, '!Q'))

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -198,8 +198,14 @@ CONNECT_MESSAGES = {
'wrong_socket_type': WRONG_SOCKET_TYPE_MSG.strip(),
}
COMMON_TOR_COMMANDS = (
'tor',
'tor.real', # TBB command ran
'/usr/local/bin/tor', # FreeBSD expands the whole path, this is the default location
)
def connect(control_port = ('127.0.0.1', 9051), control_socket = '/var/run/tor/control', password = None, password_prompt = False, chroot_path = None, controller = stem.control.Controller):
def connect(control_port = ('127.0.0.1', 'default'), control_socket = '/var/run/tor/control', password = None, password_prompt = False, chroot_path = None, controller = stem.control.Controller):
"""
Convenience function for quickly getting a control connection. This is very
handy for debugging or CLI setup, handling setup and prompting for a password
@ -214,8 +220,15 @@ def connect(control_port = ('127.0.0.1', 9051), control_socket = '/var/run/tor/c
details of how this works. Messages and details of this function's behavior
could change in the future.
If the **port** is **'default'** then this checks on both 9051 (default for
relays) and 9151 (default for the Tor Browser). This default may change in
the future.
.. versionadded:: 1.2.0
.. versionchanged:: 1.5.0
Use both port 9051 and 9151 by default.
:param tuple contol_port: address and port tuple, for instance **('127.0.0.1', 9051)**
:param str path: path where the control socket is located
:param str password: passphrase to authenticate to the socket
@ -238,7 +251,7 @@ def connect(control_port = ('127.0.0.1', 9051), control_socket = '/var/run/tor/c
raise ValueError('The control_port argument for connect() should be an (address, port) tuple.')
elif not stem.util.connection.is_valid_ipv4_address(control_port[0]):
raise ValueError("'%s' isn't a vaid IPv4 address" % control_port[0])
elif not stem.util.connection.is_valid_port(control_port[1]):
elif control_port[1] != 'default' and not stem.util.connection.is_valid_port(control_port[1]):
raise ValueError("'%s' isn't a valid port" % control_port[1])
control_connection, error_msg = None, ''
@ -256,19 +269,20 @@ def connect(control_port = ('127.0.0.1', 9051), control_socket = '/var/run/tor/c
address, port = control_port
try:
control_connection = stem.socket.ControlPort(address, port)
if port == 'default':
control_connection = _connection_for_default_port(address)
else:
control_connection = stem.socket.ControlPort(address, int(port))
except stem.SocketError as exc:
error_msg = CONNECT_MESSAGES['unable_to_use_port'].format(address = address, port = port, error = exc)
# If unable to connect to either a control socket or port then finally fail
# out. If we only attempted to connect to one of them then provide the error
# output from that. Otherwise we provide a more generic error message.
#
# We check for a 'tor.real' process name because that's what TBB uses.
if not control_connection:
if control_socket and control_port:
is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real')
is_tor_running = stem.util.system.is_running(COMMON_TOR_COMMANDS)
error_msg = CONNECT_MESSAGES['no_control_port'] if is_tor_running else CONNECT_MESSAGES['tor_isnt_running']
print(error_msg)
@ -361,7 +375,7 @@ def _connect_auth(control_socket, password, password_prompt, chroot_path, contro
return controller(control_socket, is_authenticated = True)
except IncorrectSocketType:
if isinstance(control_socket, stem.socket.ControlPort):
print(CONNECT_MESSAGES['wrong_port_type'].format(port = control_socket.get_port()))
print(CONNECT_MESSAGES['wrong_port_type'].format(port = control_socket.port))
else:
print(CONNECT_MESSAGES['wrong_socket_type'])
@ -574,6 +588,9 @@ def authenticate(controller, password = None, chroot_path = None, protocolinfo_r
else:
authenticate_cookie(controller, cookie_path, False)
if isinstance(controller, stem.control.BaseController):
controller._post_authentication()
return # success!
except OpenAuthRejected as exc:
auth_exceptions.append(exc)
@ -655,7 +672,7 @@ def authenticate_none(controller, suppress_ctl_errors = True):
pass
if not suppress_ctl_errors:
raise exc
raise
else:
raise OpenAuthRejected('Socket failed (%s)' % exc)
@ -725,7 +742,7 @@ def authenticate_password(controller, password, suppress_ctl_errors = True):
pass
if not suppress_ctl_errors:
raise exc
raise
else:
raise PasswordAuthRejected('Socket failed (%s)' % exc)
@ -815,7 +832,7 @@ def authenticate_cookie(controller, cookie_path, suppress_ctl_errors = True):
pass
if not suppress_ctl_errors:
raise exc
raise
else:
raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, False)
@ -912,7 +929,7 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True)
pass
if not suppress_ctl_errors:
raise exc
raise
else:
raise AuthChallengeFailed('Socket failed (%s)' % exc, cookie_path, True)
@ -920,7 +937,7 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True)
stem.response.convert('AUTHCHALLENGE', authchallenge_response)
except stem.ProtocolError as exc:
if not suppress_ctl_errors:
raise exc
raise
else:
raise AuthChallengeFailed('Unable to parse AUTHCHALLENGE response: %s' % exc, cookie_path)
@ -944,7 +961,7 @@ def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True)
pass
if not suppress_ctl_errors:
raise exc
raise
else:
raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, True, auth_response)
@ -972,11 +989,6 @@ def get_protocolinfo(controller):
the tor process running on it. If the socket is already closed then it is
first reconnected.
According to the control spec the cookie_file is an absolute path. However,
this often is not the case (especially for the Tor Browser Bundle). If the
path is relative then we'll make an attempt (which may not work) to correct
this (:trac:`1101`).
This can authenticate to either a :class:`~stem.control.BaseController` or
:class:`~stem.socket.ControlSocket`.
@ -1008,27 +1020,6 @@ def get_protocolinfo(controller):
raise stem.SocketError(exc)
stem.response.convert('PROTOCOLINFO', protocolinfo_response)
# attempt to expand relative cookie paths
if protocolinfo_response.cookie_path:
_expand_cookie_path(protocolinfo_response, stem.util.system.pid_by_name, 'tor')
# attempt to expand relative cookie paths via the control port or socket file
if isinstance(controller, stem.socket.ControlSocket):
control_socket = controller
else:
control_socket = controller.get_socket()
if isinstance(control_socket, stem.socket.ControlPort):
if control_socket.get_address() == '127.0.0.1':
pid_method = stem.util.system.pid_by_port
_expand_cookie_path(protocolinfo_response, pid_method, control_socket.get_port())
elif isinstance(control_socket, stem.socket.ControlSocketFile):
pid_method = stem.util.system.pid_by_open_file
_expand_cookie_path(protocolinfo_response, pid_method, control_socket.get_socket_path())
return protocolinfo_response
@ -1045,6 +1036,28 @@ def _msg(controller, message):
return controller.msg(message)
def _connection_for_default_port(address):
"""
Attempts to provide a controller connection for either port 9051 (default for
relays) or 9151 (default for Tor Browser). If both fail then this raises the
exception for port 9051.
:param str address: address to connect to
:returns: :class:`~stem.socket.ControlPort` for the controller conneciton
:raises: :class:`stem.SocketError` if we're unable to establish a connection
"""
try:
return stem.socket.ControlPort(address, 9051)
except stem.SocketError as exc:
try:
return stem.socket.ControlPort(address, 9151)
except stem.SocketError:
raise exc
def _read_cookie(cookie_path, is_safecookie):
"""
Provides the contents of a given cookie file.
@ -1087,40 +1100,6 @@ def _read_cookie(cookie_path, is_safecookie):
raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie)
def _expand_cookie_path(protocolinfo_response, pid_resolver, pid_resolution_arg):
"""
Attempts to expand a relative cookie path with the given pid resolver. This
leaves the cookie_path alone if it's already absolute, **None**, or the
system calls fail.
"""
cookie_path = protocolinfo_response.cookie_path
if cookie_path and not os.path.isabs(cookie_path):
try:
tor_pid = pid_resolver(pid_resolution_arg)
if not tor_pid:
raise IOError('pid lookup failed')
tor_cwd = stem.util.system.cwd(tor_pid)
if not tor_cwd:
raise IOError('cwd lookup failed')
cookie_path = stem.util.system.expand_path(cookie_path, tor_cwd)
except IOError as exc:
resolver_labels = {
stem.util.system.pid_by_name: ' by name',
stem.util.system.pid_by_port: ' by port',
stem.util.system.pid_by_open_file: ' by socket file',
}
pid_resolver_label = resolver_labels.get(pid_resolver, '')
log.debug('unable to expand relative tor cookie path%s: %s' % (pid_resolver_label, exc))
protocolinfo_response.cookie_path = cookie_path
class AuthenticationFailure(Exception):
"""
Base error for authentication failures.
@ -1265,7 +1244,9 @@ class NoAuthCookie(MissingAuthInfo):
super(NoAuthCookie, self).__init__(message)
self.is_safecookie = is_safecookie
# authentication exceptions ordered as per the authenticate function's pydocs
AUTHENTICATE_EXCEPTIONS = (
IncorrectSocketType,
UnrecognizedAuthMethods,

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -9,6 +9,8 @@ Package for parsing and processing descriptor data.
::
parse_file - Parses the descriptors in a file.
create - Creates a new custom descriptor.
create_signing_key - Cretes a signing key that can be used for creating descriptors.
Descriptor - Common parent for all descriptor file types.
|- get_path - location of the descriptor on disk if it came from a file
@ -27,6 +29,24 @@ Package for parsing and processing descriptor data.
and upfront runtime. However, if read time and memory aren't a concern then
**DOCUMENT** can provide you with a fully populated document.
Handlers don't change the fact that most methods that provide
descriptors return an iterator. In the case of **DOCUMENT** and
**BARE_DOCUMENT** that iterator would have just a single item -
the document itself.
Simple way to handle this is to call **next()** to get the iterator's one and
only value...
::
import stem.descriptor.remote
from stem.descriptor import DocumentHandler
consensus = next(stem.descriptor.remote.get_consensus(
document_handler = DocumentHandler.BARE_DOCUMENT,
)
=================== ===========
DocumentHandler Description
=================== ===========
@ -36,6 +56,29 @@ Package for parsing and processing descriptor data.
=================== ===========
"""
import base64
import codecs
import collections
import copy
import hashlib
import os
import random
import re
import string
import tarfile
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.str_tools
import stem.util.system
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
__all__ = [
'export',
'reader',
@ -50,26 +93,12 @@ __all__ = [
'Descriptor',
]
import base64
import codecs
import copy
import hashlib
import os
import re
import tarfile
UNSEEKABLE_MSG = """\
File object isn't seekable. Try wrapping it with a BytesIO instead...
import stem.prereq
import stem.util.enum
import stem.util.str_tools
import stem.util.system
from stem import str_type
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
content = my_file.read()
parsed_descriptors = stem.descriptor.parse_file(io.BytesIO(content))
"""
KEYWORD_CHAR = 'a-zA-Z0-9-'
WHITESPACE = ' \t'
@ -77,6 +106,17 @@ KEYWORD_LINE = re.compile('^([%s]+)(?:[%s]+(.*))?$' % (KEYWORD_CHAR, WHITESPACE)
SPECIFIC_KEYWORD_LINE = '^(%%s)(?:[%s]+(.*))?$' % WHITESPACE
PGP_BLOCK_START = re.compile('^-----BEGIN ([%s%s]+)-----$' % (KEYWORD_CHAR, WHITESPACE))
PGP_BLOCK_END = '-----END %s-----'
EMPTY_COLLECTION = ([], {}, set())
DIGEST_TYPE_INFO = b'\x00\x01'
DIGEST_PADDING = b'\xFF'
DIGEST_SEPARATOR = b'\x00'
CRYPTO_BLOB = """
MIGJAoGBAJv5IIWQ+WDWYUdyA/0L8qbIkEVH/cwryZWoIaPAzINfrw1WfNZGtBmg
skFtXhOHHqTRN4GPPrZsAIUOQGzQtGb66IQgT4tO/pj+P6QmSCCdTfhvGfgTCsC+
WPi4Fl2qryzTb3QO5r5x7T8OsG2IBUET1bLQzmtbC560SYR49IvVAgMBAAE=
"""
DocumentHandler = stem.util.enum.UppercaseEnum(
'ENTRIES',
@ -85,7 +125,19 @@ DocumentHandler = stem.util.enum.UppercaseEnum(
)
def parse_file(descriptor_file, descriptor_type = None, validate = False, document_handler = DocumentHandler.ENTRIES, **kwargs):
class SigningKey(collections.namedtuple('SigningKey', ['private', 'public', 'public_digest'])):
"""
Key used by relays to sign their server and extrainfo descriptors.
.. versionadded:: 1.6.0
:var cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private: private key
:var cryptography.hazmat.backends.openssl.rsa._RSAPublicKey public: public key
:var bytes public_digest: block that can be used for the a server descrptor's 'signing-key' field
"""
def parse_file(descriptor_file, descriptor_type = None, validate = False, document_handler = DocumentHandler.ENTRIES, normalize_newlines = None, **kwargs):
"""
Simple function to read the descriptor contents from a file, providing an
iterator for its :class:`~stem.descriptor.__init__.Descriptor` contents.
@ -94,7 +146,7 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume
tries to determine the descriptor type based on the following...
* The @type annotation on the first line. These are generally only found in
the `CollecTor archives <https://collector.torproject.org/formats.html#relay-descriptors>`_.
the `CollecTor archives <https://metrics.torproject.org/collector.html#relay-descriptors>`_.
* The filename if it matches something from tor's data directory. For
instance, tor's 'cached-descriptors' contains server descriptors.
@ -138,11 +190,13 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume
my_descriptor_file = open(descriptor_path, 'rb')
:param str,file,tarfile descriptor_file: path or opened file with the descriptor contents
:param str descriptor_type: `descriptor type <https://collector.torproject.org/formats.html>`_, this is guessed if not provided
:param str descriptor_type: `descriptor type <https://metrics.torproject.org/collector.html#data-formats>`_, this is guessed if not provided
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse the :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param bool normalize_newlines: converts windows newlines (CRLF), this is the
default when reading data directories on windows
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for :class:`~stem.descriptor.__init__.Descriptor` instances in the file
@ -157,7 +211,7 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume
handler = None
if isinstance(descriptor_file, (bytes, str_type)):
if stem.util._is_str(descriptor_file):
if stem.util.system.is_tarfile(descriptor_file):
handler = _parse_file_for_tar_path
else:
@ -171,6 +225,16 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume
return
# Not all files are seekable. If unseekable then advising the user.
#
# Python 3.x adds an io.seekable() method, but not an option with python 2.x
# so using an experimental call to tell() to determine this.
try:
descriptor_file.tell()
except IOError:
raise IOError(UNSEEKABLE_MSG)
# The tor descriptor specifications do not provide a reliable method for
# identifying a descriptor file's type and version so we need to guess
# based on its filename. Metrics descriptors, however, can be identified
@ -186,47 +250,48 @@ def parse_file(descriptor_file, descriptor_type = None, validate = False, docume
descriptor_path = getattr(descriptor_file, 'name', None)
filename = '<undefined>' if descriptor_path is None else os.path.basename(descriptor_file.name)
file_parser = None
if descriptor_type is not None:
descriptor_type_match = re.match('^(\S+) (\d+).(\d+)$', descriptor_type)
def parse(descriptor_file):
if normalize_newlines:
descriptor_file = NewlineNormalizer(descriptor_file)
if descriptor_type_match:
desc_type, major_version, minor_version = descriptor_type_match.groups()
file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs)
if descriptor_type is not None:
descriptor_type_match = re.match('^(\S+) (\d+).(\d+)$', descriptor_type)
if descriptor_type_match:
desc_type, major_version, minor_version = descriptor_type_match.groups()
return _parse_metrics_file(desc_type, int(major_version), int(minor_version), descriptor_file, validate, document_handler, **kwargs)
else:
raise ValueError("The descriptor_type must be of the form '<type> <major_version>.<minor_version>'")
elif metrics_header_match:
# Metrics descriptor handling
desc_type, major_version, minor_version = metrics_header_match.groups()
return _parse_metrics_file(desc_type, int(major_version), int(minor_version), descriptor_file, validate, document_handler, **kwargs)
else:
raise ValueError("The descriptor_type must be of the form '<type> <major_version>.<minor_version>'")
elif metrics_header_match:
# Metrics descriptor handling
# Cached descriptor handling. These contain multiple descriptors per file.
desc_type, major_version, minor_version = metrics_header_match.groups()
file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs)
else:
# Cached descriptor handling. These contain multiple descriptors per file.
if normalize_newlines is None and stem.util.system.is_windows():
descriptor_file = NewlineNormalizer(descriptor_file)
if filename == 'cached-descriptors' or filename == 'cached-descriptors.new':
file_parser = lambda f: stem.descriptor.server_descriptor._parse_file(f, validate = validate, **kwargs)
elif filename == 'cached-extrainfo' or filename == 'cached-extrainfo.new':
file_parser = lambda f: stem.descriptor.extrainfo_descriptor._parse_file(f, validate = validate, **kwargs)
elif filename == 'cached-microdescs' or filename == 'cached-microdescs.new':
file_parser = lambda f: stem.descriptor.microdescriptor._parse_file(f, validate = validate, **kwargs)
elif filename == 'cached-consensus':
file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, validate = validate, document_handler = document_handler, **kwargs)
elif filename == 'cached-microdesc-consensus':
file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs)
if filename == 'cached-descriptors' or filename == 'cached-descriptors.new':
return stem.descriptor.server_descriptor._parse_file(descriptor_file, validate = validate, **kwargs)
elif filename == 'cached-extrainfo' or filename == 'cached-extrainfo.new':
return stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, validate = validate, **kwargs)
elif filename == 'cached-microdescs' or filename == 'cached-microdescs.new':
return stem.descriptor.microdescriptor._parse_file(descriptor_file, validate = validate, **kwargs)
elif filename == 'cached-consensus':
return stem.descriptor.networkstatus._parse_file(descriptor_file, validate = validate, document_handler = document_handler, **kwargs)
elif filename == 'cached-microdesc-consensus':
return stem.descriptor.networkstatus._parse_file(descriptor_file, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs)
else:
raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line))
if file_parser:
for desc in file_parser(descriptor_file):
if descriptor_path is not None:
desc._set_path(os.path.abspath(descriptor_path))
for desc in parse(descriptor_file):
if descriptor_path is not None:
desc._set_path(os.path.abspath(descriptor_path))
yield desc
return
# Not recognized as a descriptor file.
raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line))
yield desc
def _parse_file_for_path(descriptor_file, *args, **kwargs):
@ -253,6 +318,9 @@ def _parse_file_for_tarfile(descriptor_file, *args, **kwargs):
if tar_entry.isfile():
entry = descriptor_file.extractfile(tar_entry)
if tar_entry.size == 0:
continue
try:
for desc in parse_file(entry, *args, **kwargs):
desc._set_archive_path(entry.name)
@ -320,6 +388,78 @@ def _parse_metrics_file(descriptor_type, major_version, minor_version, descripto
raise TypeError("Unrecognized metrics descriptor format. type: '%s', version: '%i.%i'" % (descriptor_type, major_version, minor_version))
def _descriptor_content(attr = None, exclude = (), header_template = (), footer_template = ()):
"""
Constructs a minimal descriptor with the given attributes. The content we
provide back is of the form...
* header_template (with matching attr filled in)
* unused attr entries
* footer_template (with matching attr filled in)
So for instance...
::
_descriptor_content(
attr = {'nickname': 'caerSidi', 'contact': 'atagar'},
header_template = (
('nickname', 'foobar'),
('fingerprint', '12345'),
),
)
... would result in...
::
nickname caerSidi
fingerprint 12345
contact atagar
:param dict attr: keyword/value mappings to be included in the descriptor
:param list exclude: mandatory keywords to exclude from the descriptor
:param tuple header_template: key/value pairs for mandatory fields before unrecognized content
:param tuple footer_template: key/value pairs for mandatory fields after unrecognized content
:returns: bytes with the requested descriptor content
"""
header_content, footer_content = [], []
attr = {} if attr is None else OrderedDict(attr) # shallow copy since we're destructive
for content, template in ((header_content, header_template),
(footer_content, footer_template)):
for keyword, value in template:
if keyword in exclude:
continue
value = stem.util.str_tools._to_unicode(attr.pop(keyword, value))
if value is None:
continue
elif isinstance(value, (tuple, list)):
for v in value:
content.append('%s %s' % (keyword, v))
elif value == '':
content.append(keyword)
elif value.startswith('\n'):
# some values like crypto follow the line instead
content.append('%s%s' % (keyword, value))
else:
content.append('%s %s' % (keyword, value))
remainder = []
for k, v in attr.items():
if isinstance(v, (tuple, list)):
remainder += ['%s %s' % (k, entry) for entry in v]
else:
remainder.append('%s %s' % (k, v))
return stem.util.str_tools._to_bytes('\n'.join(header_content + remainder + footer_content))
def _value(line, entries):
return entries[line][0][0]
@ -328,13 +468,18 @@ def _values(line, entries):
return [entry[0] for entry in entries[line]]
def _parse_simple_line(keyword, attribute):
def _parse_simple_line(keyword, attribute, func = None):
def _parse(descriptor, entries):
setattr(descriptor, attribute, _value(keyword, entries))
value = _value(keyword, entries)
setattr(descriptor, attribute, func(value) if func else value)
return _parse
def _parse_if_present(keyword, attribute):
return lambda descriptor, entries: setattr(descriptor, attribute, keyword in entries)
def _parse_bytes_line(keyword, attribute):
def _parse(descriptor, entries):
line_match = re.search(stem.util.str_tools._to_bytes('^(opt )?%s(?:[%s]+(.*))?$' % (keyword, WHITESPACE)), descriptor.get_bytes(), re.MULTILINE)
@ -377,6 +522,37 @@ def _parse_forty_character_hex(keyword, attribute):
return _parse
def _parse_protocol_line(keyword, attribute):
def _parse(descriptor, entries):
# parses 'protocol' entries like: Cons=1-2 Desc=1-2 DirCache=1 HSDir=1
value = _value(keyword, entries)
protocols = OrderedDict()
for k, v in _mappings_for(keyword, value):
versions = []
if not v:
continue
for entry in v.split(','):
if '-' in entry:
min_value, max_value = entry.split('-', 1)
else:
min_value = max_value = entry
if not min_value.isdigit() or not max_value.isdigit():
raise ValueError('Protocol values should be a number or number range, but was: %s %s' % (keyword, value))
versions += range(int(min_value), int(max_value) + 1)
protocols[k] = versions
setattr(descriptor, attribute, protocols)
return _parse
def _parse_key_block(keyword, attribute, expected_block_type, value_attribute = None):
def _parse(descriptor, entries):
value, block_type, block_contents = entries[keyword][0]
@ -392,6 +568,48 @@ def _parse_key_block(keyword, attribute, expected_block_type, value_attribute =
return _parse
def _mappings_for(keyword, value, require_value = False, divider = ' '):
"""
Parses an attribute as a series of 'key=value' mappings. Unlike _parse_*
functions this is a helper, returning the attribute value rather than setting
a descriptor field. This way parsers can perform additional validations.
:param str keyword: descriptor field being parsed
:param str value: 'attribute => values' mappings to parse
:param str divider: separator between the key/value mappings
:param bool require_value: validates that values are not empty
:returns: **generator** with the key/value of the map attribute
:raises: **ValueError** if descriptor content is invalid
"""
if value is None:
return # no descripoter value to process
elif value == '':
return # descriptor field was present, but blank
for entry in value.split(divider):
if '=' not in entry:
raise ValueError("'%s' should be a series of 'key=value' pairs but was: %s" % (keyword, value))
k, v = entry.split('=', 1)
if require_value and not v:
raise ValueError("'%s' line's %s mapping had a blank value: %s" % (keyword, k, value))
yield k, v
def _copy(default):
if default is None or isinstance(default, (bool, stem.exit_policy.ExitPolicy)):
return default # immutable
elif default in EMPTY_COLLECTION:
return type(default)() # collection construction tad faster than copy
else:
return copy.copy(default)
class Descriptor(object):
"""
Common parent for all types of descriptors.
@ -408,6 +626,55 @@ class Descriptor(object):
self._entries = {}
self._unrecognized_lines = []
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
"""
Creates descriptor content with the given attributes. Mandatory fields are
filled with dummy information unless data is supplied. This doesn't yet
create a valid signature.
.. versionadded:: 1.6.0
:param dict attr: keyword/value mappings to be included in the descriptor
:param list exclude: mandatory keywords to exclude from the descriptor, this
results in an invalid descriptor
:param bool sign: includes cryptographic signatures and digests if True
:returns: **str** with the content of a descriptor
:raises:
* **ImportError** if cryptography is unavailable and sign is True
* **NotImplementedError** if not implemented for this descriptor type
"""
raise NotImplementedError("The create and content methods haven't been implemented for %s" % cls.__name__)
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False):
"""
Creates a descriptor with the given attributes. Mandatory fields are filled
with dummy information unless data is supplied. This doesn't yet create a
valid signature.
.. versionadded:: 1.6.0
:param dict attr: keyword/value mappings to be included in the descriptor
:param list exclude: mandatory keywords to exclude from the descriptor, this
results in an invalid descriptor
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param bool sign: includes cryptographic signatures and digests if True
:returns: :class:`~stem.descriptor.Descriptor` subclass
:raises:
* **ValueError** if the contents is malformed and validate is True
* **ImportError** if cryptography is unavailable and sign is True
* **NotImplementedError** if not implemented for this descriptor type
"""
return cls(cls.content(attr, exclude, sign), validate = validate)
def get_path(self):
"""
Provides the absolute path that we loaded this descriptor from.
@ -471,12 +738,6 @@ class Descriptor(object):
if parser_for_line is None:
parser_for_line = self.PARSER_FOR_LINE
# set defaults
for attr in self.ATTRIBUTES:
if not hasattr(self, attr):
setattr(self, attr, copy.copy(self.ATTRIBUTES[attr][0]))
for keyword, values in list(entries.items()):
try:
if keyword in parser_for_line:
@ -489,9 +750,9 @@ class Descriptor(object):
line += '\n%s' % block_contents
self._unrecognized_lines.append(line)
except ValueError as exc:
except ValueError:
if validate:
raise exc
raise
def _set_path(self, path):
self._path = path
@ -515,28 +776,25 @@ class Descriptor(object):
"""
if not stem.prereq.is_crypto_available():
raise ValueError('Generating the signed digest requires pycrypto')
raise ValueError('Generating the signed digest requires the cryptography module')
from Crypto.Util import asn1
from Crypto.Util.number import bytes_to_long, long_to_bytes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import load_der_public_key
from cryptography.utils import int_to_bytes, int_from_bytes
# get the ASN.1 sequence
seq = asn1.DerSequence()
seq.decode(_bytes_for_block(signing_key))
modulus, public_exponent = seq[0], seq[1]
key = load_der_public_key(_bytes_for_block(signing_key), default_backend())
modulus = key.public_numbers().n
public_exponent = key.public_numbers().e
sig_as_bytes = _bytes_for_block(signature)
sig_as_long = bytes_to_long(sig_as_bytes) # convert signature to an int
blocksize = 128 # block size will always be 128 for a 1024 bit key
sig_as_long = int_from_bytes(sig_as_bytes, byteorder='big') # convert signature to an int
blocksize = len(sig_as_bytes) # 256B for NetworkStatusDocuments, 128B for others
# use the public exponent[e] & the modulus[n] to decrypt the int
decrypted_int = pow(sig_as_long, public_exponent, modulus)
# convert the int to a byte array
decrypted_bytes = long_to_bytes(decrypted_int, blocksize)
decrypted_bytes = int_to_bytes(decrypted_int, blocksize)
############################################################################
# The decrypted bytes should have a structure exactly along these lines.
@ -551,7 +809,7 @@ class Descriptor(object):
############################################################################
try:
if decrypted_bytes.index(b'\x00\x01') != 0:
if decrypted_bytes.index(DIGEST_TYPE_INFO) != 0:
raise ValueError('Verification failed, identifier missing')
except ValueError:
raise ValueError('Verification failed, malformed data')
@ -560,7 +818,7 @@ class Descriptor(object):
identifier_offset = 2
# find the separator
seperator_index = decrypted_bytes.index(b'\x00', identifier_offset)
seperator_index = decrypted_bytes.index(DIGEST_SEPARATOR, identifier_offset)
except ValueError:
raise ValueError('Verification failed, seperator not found')
@ -594,19 +852,38 @@ class Descriptor(object):
return stem.util.str_tools._to_unicode(digest_hash.hexdigest().upper())
def __getattr__(self, name):
# If attribute isn't already present we might be lazy loading it...
# We can't use standard hasattr() since it calls this function, recursing.
# Doing so works since it stops recursing after several dozen iterations
# (not sure why), but horrible in terms of performance.
if self._lazy_loading and name in self.ATTRIBUTES:
def has_attr(attr):
try:
super(Descriptor, self).__getattribute__(attr)
return True
except:
return False
# If an attribute we should have isn't present it means either...
#
# a. we still need to lazy load this
# b. we read the whole descriptor but it wasn't present, so needs the default
if name in self.ATTRIBUTES and not has_attr(name):
default, parsing_function = self.ATTRIBUTES[name]
try:
parsing_function(self, self._entries)
except (ValueError, KeyError):
if self._lazy_loading:
try:
# despite having a validation failure check to see if we set something
return super(Descriptor, self).__getattribute__(name)
except AttributeError:
setattr(self, name, copy.copy(default))
parsing_function(self, self._entries)
except (ValueError, KeyError):
# Set defaults for anything the parsing function should've covered.
# Despite having a validation failure some attributes might be set in
# which case we keep them.
for attr_name, (attr_default, attr_parser) in self.ATTRIBUTES.items():
if parsing_function == attr_parser and not has_attr(attr_name):
setattr(self, attr_name, _copy(attr_default))
else:
setattr(self, name, _copy(default))
return super(Descriptor, self).__getattribute__(name)
@ -617,6 +894,31 @@ class Descriptor(object):
return self._raw_contents
class NewlineNormalizer(object):
"""
File wrapper that normalizes CRLF line endings.
"""
def __init__(self, wrapped_file):
self._wrapped_file = wrapped_file
self.name = getattr(wrapped_file, 'name', None)
def read(self, *args):
return self._wrapped_file.read(*args).replace(b'\r\n', b'\n')
def readline(self, *args):
return self._wrapped_file.readline(*args).replace(b'\r\n', b'\n')
def readlines(self, *args):
return [line.rstrip(b'\r') for line in self._wrapped_file.readlines(*args)]
def seek(self, *args):
return self._wrapped_file.seek(*args)
def tell(self, *args):
return self._wrapped_file.tell(*args)
def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_first = False, skip = False, end_position = None, include_ending_keyword = False):
"""
Reads from the descriptor file until we get to one of the given keywords or reach the
@ -636,23 +938,17 @@ def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_fi
**True**
"""
if skip:
content = None
content_append = lambda x: None
else:
content = []
content_append = content.append
content = None if skip else []
ending_keyword = None
if isinstance(keywords, (bytes, str_type)):
if stem.util._is_str(keywords):
keywords = (keywords,)
if ignore_first:
first_line = descriptor_file.readline()
if first_line:
content_append(first_line)
if first_line and content is not None:
content.append(first_line)
keyword_match = re.compile(SPECIFIC_KEYWORD_LINE % '|'.join(keywords))
@ -674,12 +970,12 @@ def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_fi
if not inclusive:
descriptor_file.seek(last_position)
else:
content_append(line)
elif content is not None:
content.append(line)
break
else:
content_append(line)
elif content is not None:
content.append(line)
if include_ending_keyword:
return (content, ending_keyword)
@ -741,7 +1037,109 @@ def _get_pseudo_pgp_block(remaining_contents):
return None
def _get_descriptor_components(raw_contents, validate, extra_keywords = ()):
def create_signing_key(private_key = None):
"""
Serializes a signing key if we have one. Otherwise this creates a new signing
key we can use to create descriptors.
.. versionadded:: 1.6.0
:param cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private_key: private key
:returns: :class:`~stem.descriptor.__init__.SigningKey` that can be used to
create descriptors
:raises: **ImportError** if the cryptography module is unavailable
"""
if not stem.prereq.is_crypto_available():
raise ImportError('Signing requires the cryptography module')
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
if private_key is None:
private_key = rsa.generate_private_key(
public_exponent = 65537,
key_size = 1024,
backend = default_backend(),
)
# When signing the cryptography module includes a constant indicating
# the hash algorithm used. Tor doesn't. This causes signature
# validation failures and unfortunately cryptography have no nice way
# of excluding these so we need to mock out part of their internals...
#
# https://github.com/pyca/cryptography/issues/3713
def no_op(*args, **kwargs):
return 1
private_key._backend._lib.EVP_PKEY_CTX_set_signature_md = no_op
private_key._backend.openssl_assert = no_op
public_key = private_key.public_key()
public_digest = b'\n' + public_key.public_bytes(
encoding = serialization.Encoding.PEM,
format = serialization.PublicFormat.PKCS1,
).strip()
return SigningKey(private_key, public_key, public_digest)
def _append_router_signature(content, private_key):
"""
Appends a router signature to a server or extrainfo descriptor.
:param bytes content: descriptor content up through 'router-signature\\n'
:param cryptography.hazmat.backends.openssl.rsa._RSAPrivateKey private_key:
private relay signing key
:returns: **bytes** with the signed descriptor content
"""
if not stem.prereq.is_crypto_available():
raise ImportError('Signing requires the cryptography module')
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
signature = base64.b64encode(private_key.sign(content, padding.PKCS1v15(), hashes.SHA1()))
return content + b'\n'.join([b'-----BEGIN SIGNATURE-----'] + stem.util.str_tools._split_by_length(signature, 64) + [b'-----END SIGNATURE-----\n'])
def _random_nickname():
return ('Unnamed%i' % random.randint(0, 100000000000000))[:19]
def _random_fingerprint():
return ('%040x' % random.randrange(16 ** 40)).upper()
def _random_ipv4_address():
return '%i.%i.%i.%i' % (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def _random_date():
return '%i-%02i-%02i %02i:%02i:%02i' % (random.randint(2000, 2015), random.randint(1, 12), random.randint(1, 20), random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))
def _random_crypto_blob(block_type = None):
"""
Provides a random string that can be used for crypto blocks.
"""
random_base64 = stem.util.str_tools._to_unicode(base64.b64encode(os.urandom(140)))
crypto_blob = '\n'.join(stem.util.str_tools._split_by_length(random_base64, 64))
if block_type:
return '\n-----BEGIN %s-----\n%s\n-----END %s-----' % (block_type, crypto_blob, block_type)
else:
return crypto_blob
def _descriptor_components(raw_contents, validate, extra_keywords = (), non_ascii_fields = ()):
"""
Initial breakup of the server descriptor contents to make parsing easier.
@ -760,6 +1158,7 @@ def _get_descriptor_components(raw_contents, validate, extra_keywords = ()):
True, skips these checks otherwise
:param list extra_keywords: entity keywords to put into a separate listing
with ordering intact
:param list non_ascii_fields: fields containing non-ascii content
:returns:
**collections.OrderedDict** with the 'keyword => (value, pgp key) entries'
@ -815,11 +1214,18 @@ def _get_descriptor_components(raw_contents, validate, extra_keywords = ()):
block_type, block_contents = block_attr
else:
block_type, block_contents = None, None
except ValueError as exc:
except ValueError:
if not validate:
continue
raise exc
raise
if validate and keyword not in non_ascii_fields:
try:
value.encode('ascii')
except UnicodeError:
replaced = ''.join([(char if char in string.printable else '?') for char in value])
raise ValueError("'%s' line had non-ascii content: %s" % (keyword, replaced))
if keyword in extra_keywords:
extra_entries.append('%s %s' % (keyword, value))
@ -831,6 +1237,7 @@ def _get_descriptor_components(raw_contents, validate, extra_keywords = ()):
else:
return entries
# importing at the end to avoid circular dependencies on our Descriptor class
import stem.descriptor.server_descriptor

View file

@ -0,0 +1,271 @@
# Copyright 2017-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for `Tor Ed25519 certificates
<https://gitweb.torproject.org/torspec.git/tree/cert-spec.txt>`_, which are
used to validate the key used to sign server descriptors.
.. versionadded:: 1.6.0
**Module Overview:**
::
Ed25519Certificate - Ed25519 signing key certificate
| +- Ed25519CertificateV1 - version 1 Ed25519 certificate
| |- is_expired - checks if certificate is presently expired
| +- validate - validates signature of a server descriptor
|
+- parse - reads base64 encoded certificate data
Ed25519Extension - extension included within an Ed25519Certificate
.. data:: CertType (enum)
Purpose of Ed25519 certificate. As new certificate versions are added this
enumeration will expand.
============== ===========
CertType Description
============== ===========
**SIGNING** signing a signing key with an identity key
**LINK_CERT** TLS link certificate signed with ed25519 signing key
**AUTH** authentication key signed with ed25519 signing key
============== ===========
.. data:: ExtensionType (enum)
Recognized exception types.
==================== ===========
ExtensionType Description
==================== ===========
**HAS_SIGNING_KEY** includes key used to sign the certificate
==================== ===========
.. data:: ExtensionFlag (enum)
Flags that can be assigned to Ed25519 certificate extensions.
====================== ===========
ExtensionFlag Description
====================== ===========
**AFFECTS_VALIDATION** extension affects whether the certificate is valid
**UNKNOWN** extension includes flags not yet recognized by stem
====================== ===========
"""
import base64
import binascii
import collections
import datetime
import hashlib
import stem.prereq
import stem.util.enum
import stem.util.str_tools
ED25519_HEADER_LENGTH = 40
ED25519_SIGNATURE_LENGTH = 64
ED25519_ROUTER_SIGNATURE_PREFIX = b'Tor router descriptor signature v1'
CertType = stem.util.enum.UppercaseEnum('SIGNING', 'LINK_CERT', 'AUTH')
ExtensionType = stem.util.enum.Enum(('HAS_SIGNING_KEY', 4),)
ExtensionFlag = stem.util.enum.UppercaseEnum('AFFECTS_VALIDATION', 'UNKNOWN')
class Ed25519Extension(collections.namedtuple('Ed25519Extension', ['type', 'flags', 'flag_int', 'data'])):
"""
Extension within an Ed25519 certificate.
:var int type: extension type
:var list flags: extension attribute flags
:var int flag_int: integer encoding of the extension attribute flags
:var bytes data: data the extension concerns
"""
class Ed25519Certificate(object):
"""
Base class for an Ed25519 certificate.
:var int version: certificate format version
:var str encoded: base64 encoded ed25519 certificate
"""
def __init__(self, version, encoded):
self.version = version
self.encoded = encoded
@staticmethod
def parse(content):
"""
Parses the given base64 encoded data as an Ed25519 certificate.
:param str content: base64 encoded certificate
:returns: :class:`~stem.descriptor.certificate.Ed25519Certificate` subclsss
for the given certificate
:raises: **ValueError** if content is malformed
"""
try:
decoded = base64.b64decode(stem.util.str_tools._to_bytes(content))
if not decoded:
raise TypeError('empty')
except (TypeError, binascii.Error) as exc:
raise ValueError("Ed25519 certificate wasn't propoerly base64 encoded (%s):\n%s" % (exc, content))
version = stem.util.str_tools._to_int(decoded[0:1])
if version == 1:
return Ed25519CertificateV1(version, content, decoded)
else:
raise ValueError('Ed25519 certificate is version %i. Parser presently only supports version 1.' % version)
class Ed25519CertificateV1(Ed25519Certificate):
"""
Version 1 Ed25519 certificate, which are used for signing tor server
descriptors.
:var CertType type: certificate purpose
:var datetime expiration: expiration of the certificate
:var int key_type: format of the key
:var bytes key: key content
:var list extensions: :class:`~stem.descriptor.certificate.Ed25519Extension` in this certificate
:var bytes signature: certificate signature
"""
def __init__(self, version, encoded, decoded):
super(Ed25519CertificateV1, self).__init__(version, encoded)
if len(decoded) < ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH:
raise ValueError('Ed25519 certificate was %i bytes, but should be at least %i' % (len(decoded), ED25519_HEADER_LENGTH + ED25519_SIGNATURE_LENGTH))
cert_type = stem.util.str_tools._to_int(decoded[1:2])
if cert_type in (0, 1, 2, 3):
raise ValueError('Ed25519 certificate cannot have a type of %i. This is reserved to avoid conflicts with tor CERTS cells.' % cert_type)
elif cert_type == 4:
self.type = CertType.SIGNING
elif cert_type == 5:
self.type = CertType.LINK_CERT
elif cert_type == 6:
self.type = CertType.AUTH
elif cert_type == 7:
raise ValueError('Ed25519 certificate cannot have a type of 7. This is reserved for RSA identity cross-certification.')
else:
raise ValueError("BUG: Ed25519 certificate type is decoded from one byte. It shouldn't be possible to have a value of %i." % cert_type)
# expiration time is in hours since epoch
try:
self.expiration = datetime.datetime.utcfromtimestamp(stem.util.str_tools._to_int(decoded[2:6]) * 3600)
except ValueError as exc:
raise ValueError('Invalid expiration timestamp (%s): %s' % (exc, stem.util.str_tools._to_int(decoded[2:6]) * 3600))
self.key_type = stem.util.str_tools._to_int(decoded[6:7])
self.key = decoded[7:39]
self.signature = decoded[-ED25519_SIGNATURE_LENGTH:]
self.extensions = []
extension_count = stem.util.str_tools._to_int(decoded[39:40])
remaining_data = decoded[40:-ED25519_SIGNATURE_LENGTH]
for i in range(extension_count):
if len(remaining_data) < 4:
raise ValueError('Ed25519 extension is missing header field data')
extension_length = stem.util.str_tools._to_int(remaining_data[:2])
extension_type = stem.util.str_tools._to_int(remaining_data[2:3])
extension_flags = stem.util.str_tools._to_int(remaining_data[3:4])
extension_data = remaining_data[4:4 + extension_length]
if extension_length != len(extension_data):
raise ValueError("Ed25519 extension is truncated. It should have %i bytes of data but there's only %i." % (extension_length, len(extension_data)))
flags, remaining_flags = [], extension_flags
if remaining_flags % 2 == 1:
flags.append(ExtensionFlag.AFFECTS_VALIDATION)
remaining_flags -= 1
if remaining_flags:
flags.append(ExtensionFlag.UNKNOWN)
if extension_type == ExtensionType.HAS_SIGNING_KEY and len(extension_data) != 32:
raise ValueError('Ed25519 HAS_SIGNING_KEY extension must be 32 bytes, but was %i.' % len(extension_data))
self.extensions.append(Ed25519Extension(extension_type, flags, extension_flags, extension_data))
remaining_data = remaining_data[4 + extension_length:]
if remaining_data:
raise ValueError('Ed25519 certificate had %i bytes of unused extension data' % len(remaining_data))
def is_expired(self):
"""
Checks if this certificate is presently expired or not.
:returns: **True** if the certificate has expired, **False** otherwise
"""
return datetime.datetime.now() > self.expiration
def validate(self, server_descriptor):
"""
Validates our signing key and that the given descriptor content matches its
Ed25519 signature.
:param stem.descriptor.server_descriptor.Ed25519 server_descriptor: relay
server descriptor to validate
:raises:
* **ValueError** if signing key or descriptor are invalid
* **ImportError** if pynacl module is unavailable
"""
if not stem.prereq._is_pynacl_available():
raise ImportError('Certificate validation requires the pynacl module')
import nacl.signing
import nacl.encoding
from nacl.exceptions import BadSignatureError
descriptor_content = server_descriptor.get_bytes()
signing_key = None
if server_descriptor.ed25519_master_key:
signing_key = nacl.signing.VerifyKey(stem.util.str_tools._to_bytes(server_descriptor.ed25519_master_key) + b'=', encoder = nacl.encoding.Base64Encoder)
else:
for extension in self.extensions:
if extension.type == ExtensionType.HAS_SIGNING_KEY:
signing_key = nacl.signing.VerifyKey(extension.data)
break
if not signing_key:
raise ValueError('Server descriptor missing an ed25519 signing key')
try:
signing_key.verify(base64.b64decode(stem.util.str_tools._to_bytes(self.encoded))[:-ED25519_SIGNATURE_LENGTH], self.signature)
except BadSignatureError as exc:
raise ValueError('Ed25519KeyCertificate signing key is invalid (%s)' % exc)
# ed25519 signature validates descriptor content up until the signature itself
if b'router-sig-ed25519 ' not in descriptor_content:
raise ValueError("Descriptor doesn't have a router-sig-ed25519 entry.")
signed_content = descriptor_content[:descriptor_content.index(b'router-sig-ed25519 ') + 19]
descriptor_sha256_digest = hashlib.sha256(ED25519_ROUTER_SIGNATURE_PREFIX + signed_content).digest()
missing_padding = len(server_descriptor.ed25519_signature) % 4
signature_bytes = base64.b64decode(stem.util.str_tools._to_bytes(server_descriptor.ed25519_signature) + b'=' * missing_padding)
try:
verify_key = nacl.signing.VerifyKey(self.key)
verify_key.verify(descriptor_sha256_digest, signature_bytes)
except BadSignatureError as exc:
raise ValueError('Descriptor Ed25519 certificate signature invalid (%s)' % exc)

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -10,6 +10,11 @@ Toolkit for exporting descriptors to other formats.
export_csv - Exports descriptors to a CSV
export_csv_file - Writes exported CSV output to a file
.. deprecated:: 1.7.0
This module will likely be removed in Stem 2.0 due to lack of usage. If you
use this modle please `let me know <https://www.atagar.com/contact/>`_.
"""
import csv
@ -98,7 +103,7 @@ def export_csv_file(output_file, descriptors, included_fields = (), excluded_fie
writer = csv.DictWriter(output_file, included_fields, dialect = _ExportDialect(), extrasaction='ignore')
if header and stem.prereq.is_python_27():
if header and not stem.prereq._is_python_26():
writer.writeheader()
for desc in descriptors:

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -7,7 +7,7 @@ their server descriptor is published and have a similar format. However, unlike
server descriptors these don't contain information that Tor clients require to
function and as such aren't fetched by default.
Defined in section 2.2 of the `dir-spec
Defined in section 2.1.2 of the `dir-spec
<https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_,
extra-info descriptors contain interesting but non-vital information such as
usage statistics. Tor clients cannot request these documents for bridges.
@ -19,8 +19,7 @@ Extra-info descriptors are available from a few sources...
* control port via 'GETINFO extra-info/digest/\*' queries
* the 'cached-extrainfo' file in tor's data directory
* Archived descriptors provided by CollecTor
(https://collector.torproject.org/).
* Archived descriptors provided by `CollecTor <https://metrics.torproject.org/collector.html>`_.
* Directory authorities and mirrors via their DirPort.
@ -72,6 +71,7 @@ import functools
import hashlib
import re
import stem.prereq
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
@ -79,19 +79,27 @@ import stem.util.str_tools
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
create_signing_key,
_descriptor_content,
_read_until_keywords,
_get_descriptor_components,
_descriptor_components,
_value,
_values,
_parse_simple_line,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_key_block,
_mappings_for,
_append_router_signature,
_random_nickname,
_random_fingerprint,
_random_date,
_random_crypto_blob,
)
try:
# added in python 3.2
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
except ImportError:
else:
from stem.util.lru_cache import lru_cache
# known statuses for dirreq-v2-resp and dirreq-v3-resp...
@ -154,7 +162,6 @@ SINGLE_FIELDS = (
'exit-streams-opened',
)
_timestamp_re = re.compile('^(.*) \(([0-9]+) s\)( .*)?$')
_locale_re = re.compile('^[a-zA-Z0-9\?]{2}$')
@ -280,14 +287,15 @@ def _parse_transport_line(descriptor, entries):
raise ValueError("Transport line's address:port entry is missing a colon: transport %s" % value)
name = value_comp[0]
address, port_str = value_comp[1].split(':', 1)
address, port_str = value_comp[1].rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address) or \
stem.util.connection.is_valid_ipv6_address(address):
stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError('Transport line has a malformed address: transport %s' % value)
elif not stem.util.connection.is_valid_port(port_str):
raise ValueError('Transport line has a malformed port: transport %s' % value)
address.lstrip('[').rstrip(']')
port = int(port_str)
args = value_comp[2:] if len(value_comp) >= 3 else []
@ -309,6 +317,21 @@ def _parse_cell_circuits_per_decline_line(descriptor, entries):
descriptor.cell_circuits_per_decile = int(value)
def _parse_padding_counts_line(descriptor, entries):
# "padding-counts" YYYY-MM-DD HH:MM:SS (NSEC s) key=val key=val...
value = _value('padding-counts', entries)
timestamp, interval, remainder = _parse_timestamp_and_interval('padding-counts', value)
counts = {}
for k, v in _mappings_for('padding-counts', remainder, require_value = True):
counts[k] = int(v) if v.isdigit() else v
setattr(descriptor, 'padding_counts_end', timestamp)
setattr(descriptor, 'padding_counts_interval', interval)
setattr(descriptor, 'padding_counts', counts)
def _parse_dirreq_line(keyword, recognized_counts_attr, unrecognized_counts_attr, descriptor, entries):
value = _value(keyword, entries)
@ -319,22 +342,15 @@ def _parse_dirreq_line(keyword, recognized_counts_attr, unrecognized_counts_attr
key_set = DirResponse if is_response_stats else DirStat
key_type = 'STATUS' if is_response_stats else 'STAT'
error_msg = '%s lines should contain %s=COUNT mappings: %s %s' % (keyword, key_type, keyword, value)
if value:
for entry in value.split(','):
if '=' not in entry:
raise ValueError(error_msg)
for status, count in _mappings_for(keyword, value, divider = ','):
if not count.isdigit():
raise ValueError('%s lines should contain %s=COUNT mappings: %s %s' % (keyword, key_type, keyword, value))
status, count = entry.split('=', 1)
if count.isdigit():
if status in key_set:
recognized_counts[status] = int(count)
else:
unrecognized_counts[status] = int(count)
else:
raise ValueError(error_msg)
if status in key_set:
recognized_counts[status] = int(count)
else:
unrecognized_counts[status] = int(count)
setattr(descriptor, recognized_counts_attr, recognized_counts)
setattr(descriptor, unrecognized_counts_attr, unrecognized_counts)
@ -423,22 +439,13 @@ def _parse_port_count_line(keyword, attribute, descriptor, entries):
# "<keyword>" port=N,port=N,...
value, port_mappings = _value(keyword, entries), {}
error_msg = 'Entries in %s line should only be PORT=N entries: %s %s' % (keyword, keyword, value)
if value:
for entry in value.split(','):
if '=' not in entry:
raise ValueError(error_msg)
for port, stat in _mappings_for(keyword, value, divider = ','):
if (port != 'other' and not stem.util.connection.is_valid_port(port)) or not stat.isdigit():
raise ValueError('Entries in %s line should only be PORT=N entries: %s %s' % (keyword, keyword, value))
port, stat = entry.split('=', 1)
if (port == 'other' or stem.util.connection.is_valid_port(port)) and stat.isdigit():
if port != 'other':
port = int(port)
port_mappings[port] = int(stat)
else:
raise ValueError(error_msg)
port = int(port) if port.isdigit() else port
port_mappings[port] = int(stat)
setattr(descriptor, attribute, port_mappings)
@ -453,19 +460,12 @@ def _parse_geoip_to_count_line(keyword, attribute, descriptor, entries):
# ??,"Unknown"
value, locale_usage = _value(keyword, entries), {}
error_msg = 'Entries in %s line should only be CC=N entries: %s %s' % (keyword, keyword, value)
if value:
for entry in value.split(','):
if '=' not in entry:
raise ValueError(error_msg)
for locale, count in _mappings_for(keyword, value, divider = ','):
if not _locale_re.match(locale) or not count.isdigit():
raise ValueError('Entries in %s line should only be CC=N entries: %s %s' % (keyword, keyword, value))
locale, count = entry.split('=', 1)
if _locale_re.match(locale) and count.isdigit():
locale_usage[locale] = int(count)
else:
raise ValueError(error_msg)
locale_usage[locale] = int(count)
setattr(descriptor, attribute, locale_usage)
@ -473,17 +473,11 @@ def _parse_geoip_to_count_line(keyword, attribute, descriptor, entries):
def _parse_bridge_ip_versions_line(descriptor, entries):
value, ip_versions = _value('bridge-ip-versions', entries), {}
if value:
for entry in value.split(','):
if '=' not in entry:
raise stem.ProtocolError("The bridge-ip-versions should be a comma separated listing of '<protocol>=<count>' mappings: bridge-ip-versions %s" % value)
for protocol, count in _mappings_for('bridge-ip-versions', value, divider = ','):
if not count.isdigit():
raise stem.ProtocolError('IP protocol count was non-numeric (%s): bridge-ip-versions %s' % (count, value))
protocol, count = entry.split('=', 1)
if not count.isdigit():
raise stem.ProtocolError('IP protocol count was non-numeric (%s): bridge-ip-versions %s' % (count, value))
ip_versions[protocol] = int(count)
ip_versions[protocol] = int(count)
descriptor.ip_versions = ip_versions
@ -491,17 +485,11 @@ def _parse_bridge_ip_versions_line(descriptor, entries):
def _parse_bridge_ip_transports_line(descriptor, entries):
value, ip_transports = _value('bridge-ip-transports', entries), {}
if value:
for entry in value.split(','):
if '=' not in entry:
raise stem.ProtocolError("The bridge-ip-transports should be a comma separated listing of '<protocol>=<count>' mappings: bridge-ip-transports %s" % value)
for protocol, count in _mappings_for('bridge-ip-transports', value, divider = ','):
if not count.isdigit():
raise stem.ProtocolError('Transport count was non-numeric (%s): bridge-ip-transports %s' % (count, value))
protocol, count = entry.split('=', 1)
if not count.isdigit():
raise stem.ProtocolError('Transport count was non-numeric (%s): bridge-ip-transports %s' % (count, value))
ip_transports[protocol] = int(count)
ip_transports[protocol] = int(count)
descriptor.ip_transports = ip_transports
@ -511,28 +499,30 @@ def _parse_hs_stats(keyword, stat_attribute, extra_attribute, descriptor, entrie
value, stat, extra = _value(keyword, entries), None, {}
if value is not None:
value_comp = value.split()
if not value_comp:
raise ValueError("'%s' line was blank" % keyword)
if value is None:
pass # not in the descriptor
elif value == '':
raise ValueError("'%s' line was blank" % keyword)
else:
if ' ' in value:
stat_value, remainder = value.split(' ', 1)
else:
stat_value, remainder = value, None
try:
stat = int(value_comp[0])
stat = int(stat_value)
except ValueError:
raise ValueError("'%s' stat was non-numeric (%s): %s %s" % (keyword, value_comp[0], keyword, value))
raise ValueError("'%s' stat was non-numeric (%s): %s %s" % (keyword, stat_value, keyword, value))
for entry in value_comp[1:]:
if '=' not in entry:
raise ValueError('Entries after the stat in %s lines should only be key=val entries: %s %s' % (keyword, keyword, value))
key, val = entry.split('=', 1)
for key, val in _mappings_for(keyword, remainder):
extra[key] = val
setattr(descriptor, stat_attribute, stat)
setattr(descriptor, extra_attribute, extra)
_parse_identity_ed25519_line = _parse_key_block('identity-ed25519', 'ed25519_certificate', 'ED25519 CERT')
_parse_master_key_ed25519_line = _parse_simple_line('master-key-ed25519', 'ed25519_certificate_hash')
_parse_geoip_db_digest_line = _parse_forty_character_hex('geoip-db-digest', 'geoip_db_digest')
_parse_geoip6_db_digest_line = _parse_forty_character_hex('geoip6-db-digest', 'geoip6_db_digest')
_parse_dirreq_v2_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-resp', 'dir_v2_responses', 'dir_v2_responses_unknown')
@ -570,6 +560,8 @@ _parse_dirreq_v3_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirr
_parse_geoip_client_origins_line = functools.partial(_parse_geoip_to_count_line, 'geoip-client-origins', 'geoip_client_origins')
_parse_entry_ips_line = functools.partial(_parse_geoip_to_count_line, 'entry-ips', 'entry_ips')
_parse_bridge_ips_line = functools.partial(_parse_geoip_to_count_line, 'bridge-ips', 'bridge_ips')
_parse_router_sig_ed25519_line = _parse_simple_line('router-sig-ed25519', 'ed25519_signature')
_parse_router_digest_sha256_line = _parse_simple_line('router-digest-sha256', 'router_digest_sha256')
_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest')
_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE')
@ -673,6 +665,12 @@ class ExtraInfoDescriptor(Descriptor):
:var int hs_dir_onions_seen: rounded count of the identities seen
:var int hs_dir_onions_seen_attr: **\*** attributes provided for the hs_dir_onions_seen
**Padding Count Attributes:**
:var dict padding_counts: **\*** padding parameters
:var datetime padding_counts_end: end of the period when padding data is being collected
:var int padding_counts_interval: length in seconds of the interval
**Bridge Attributes:**
:var datetime bridge_stats_end: end of the period when stats were gathered
@ -689,6 +687,10 @@ class ExtraInfoDescriptor(Descriptor):
.. versionchanged:: 1.4.0
Added the hs_stats_end, hs_rend_cells, hs_rend_cells_attr,
hs_dir_onions_seen, and hs_dir_onions_seen_attr attributes.
.. versionchanged:: 1.6.0
Added the padding_counts, padding_counts_end, and padding_counts_interval
attributes.
"""
ATTRIBUTES = {
@ -766,6 +768,10 @@ class ExtraInfoDescriptor(Descriptor):
'hs_dir_onions_seen': (None, _parse_hidden_service_dir_onions_seen_line),
'hs_dir_onions_seen_attr': ({}, _parse_hidden_service_dir_onions_seen_line),
'padding_counts': ({}, _parse_padding_counts_line),
'padding_counts_end': (None, _parse_padding_counts_line),
'padding_counts_interval': (None, _parse_padding_counts_line),
'bridge_stats_end': (None, _parse_bridge_stats_end_line),
'bridge_stats_interval': (None, _parse_bridge_stats_end_line),
'bridge_ips': (None, _parse_bridge_ips_line),
@ -811,6 +817,7 @@ class ExtraInfoDescriptor(Descriptor):
'hidserv-stats-end': _parse_hidden_service_stats_end_line,
'hidserv-rend-relayed-cells': _parse_hidden_service_rend_relayed_cells_line,
'hidserv-dir-onions-seen': _parse_hidden_service_dir_onions_seen_line,
'padding-counts': _parse_padding_counts_line,
'dirreq-v2-ips': _parse_dirreq_v2_ips_line,
'dirreq-v3-ips': _parse_dirreq_v3_ips_line,
'dirreq-v2-reqs': _parse_dirreq_v2_reqs_line,
@ -836,7 +843,7 @@ class ExtraInfoDescriptor(Descriptor):
"""
super(ExtraInfoDescriptor, self).__init__(raw_contents, lazy_load = not validate)
entries = _get_descriptor_components(raw_contents, validate)
entries = _descriptor_components(raw_contents, validate)
if validate:
for keyword in self._required_fields():
@ -886,19 +893,56 @@ class RelayExtraInfoDescriptor(ExtraInfoDescriptor):
'GETINFO extra-info/digest/\*', cached descriptors, and metrics
(`specification <https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_).
:var ed25519_certificate str: base64 encoded ed25519 certificate
:var ed25519_signature str: signature of this document using ed25519
:var str signature: **\*** signature for this extrainfo descriptor
**\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.5.0
Added the ed25519_certificate and ed25519_signature attributes.
"""
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
'ed25519_certificate': (None, _parse_identity_ed25519_line),
'ed25519_signature': (None, _parse_router_sig_ed25519_line),
'signature': (None, _parse_router_signature_line),
})
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
'identity-ed25519': _parse_identity_ed25519_line,
'router-sig-ed25519': _parse_router_sig_ed25519_line,
'router-signature': _parse_router_signature_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False, signing_key = None):
base_header = (
('extra-info', '%s %s' % (_random_nickname(), _random_fingerprint())),
('published', _random_date()),
)
if signing_key:
sign = True
if sign:
if attr and 'router-signature' in attr:
raise ValueError('Cannot sign the descriptor if a router-signature has been provided')
if signing_key is None:
signing_key = create_signing_key()
content = _descriptor_content(attr, exclude, base_header) + b'\nrouter-signature\n'
return _append_router_signature(content, signing_key.private)
else:
return _descriptor_content(attr, exclude, base_header, (
('router-signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None):
return cls(cls.content(attr, exclude, sign, signing_key), validate = validate)
@lru_cache()
def digest(self):
# our digest is calculated from everything except our signature
@ -910,17 +954,39 @@ class RelayExtraInfoDescriptor(ExtraInfoDescriptor):
class BridgeExtraInfoDescriptor(ExtraInfoDescriptor):
"""
Bridge extra-info descriptor (`bridge descriptor specification
<https://collector.torproject.org/formats.html#bridge-descriptors>`_)
<https://metrics.torproject.org/collector.html#bridge-descriptors>`_)
:var str ed25519_certificate_hash: sha256 hash of the original identity-ed25519
:var str router_digest_sha256: sha256 digest of this document
.. versionchanged:: 1.5.0
Added the ed25519_certificate_hash and router_digest_sha256 attributes.
"""
ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{
'ed25519_certificate_hash': (None, _parse_master_key_ed25519_line),
'router_digest_sha256': (None, _parse_router_digest_sha256_line),
'_digest': (None, _parse_router_digest_line),
})
PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{
'master-key-ed25519': _parse_master_key_ed25519_line,
'router-digest-sha256': _parse_router_digest_sha256_line,
'router-digest': _parse_router_digest_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('extra-info', 'ec2bridgereaac65a3 %s' % _random_fingerprint()),
('published', _random_date()),
), (
('router-digest', _random_fingerprint()),
))
def digest(self):
return self._digest

View file

@ -1,4 +1,4 @@
# Copyright 2015, Damian Johnson and The Tor Project
# Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -9,6 +9,9 @@ Unlike other descriptor types these describe a hidden service rather than a
relay. They're created by the service, and can only be fetched via relays with
the HSDir flag.
These are only available through the Controller's
:func:`~stem.control.get_hidden_service_descriptor` method.
**Module Overview:**
::
@ -18,34 +21,34 @@ the HSDir flag.
.. versionadded:: 1.4.0
"""
# TODO: Add a description for how to retrieve them when tor supports that
# (#14847) and then update #15009.
import base64
import binascii
import collections
import hashlib
import io
import stem.prereq
import stem.util.connection
import stem.util.str_tools
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
_get_descriptor_components,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_bytes_for_block,
_value,
_parse_simple_line,
_parse_timestamp_line,
_parse_key_block,
_random_date,
_random_crypto_blob,
)
try:
# added in python 3.2
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
except ImportError:
else:
from stem.util.lru_cache import lru_cache
REQUIRED_FIELDS = (
@ -80,7 +83,17 @@ SINGLE_INTRODUCTION_POINT_FIELDS = [
BASIC_AUTH = 1
STEALTH_AUTH = 2
IntroductionPoint = collections.namedtuple('IntroductionPoints', INTRODUCTION_POINTS_ATTR.keys())
class IntroductionPoints(collections.namedtuple('IntroductionPoints', INTRODUCTION_POINTS_ATTR.keys())):
"""
:var str identifier: hash of this introduction point's identity key
:var str address: address of this introduction point
:var int port: port where this introduction point is listening
:var str onion_key: public key for communicating with this introduction point
:var str service_key: public key for communicating with this hidden service
:var list intro_authentication: tuples of the form (auth_type, auth_data) for
establishing a connection
"""
class DecryptionFailure(Exception):
@ -153,25 +166,13 @@ def _parse_introduction_points_line(descriptor, entries):
raise ValueError("'introduction-points' should be followed by a MESSAGE block, but was a %s" % block_type)
descriptor.introduction_points_encoded = block_contents
descriptor.introduction_points_auth = [] # field was never implemented in tor (#15190)
try:
decoded_field = _bytes_for_block(block_contents)
descriptor.introduction_points_content = _bytes_for_block(block_contents)
except TypeError:
raise ValueError("'introduction-points' isn't base64 encoded content:\n%s" % block_contents)
auth_types = []
while decoded_field.startswith(b'service-authentication ') and b'\n' in decoded_field:
auth_line, decoded_field = decoded_field.split(b'\n', 1)
auth_line_comp = auth_line.split(b' ')
if len(auth_line_comp) < 3:
raise ValueError("Within introduction-points we expected 'service-authentication [auth_type] [auth_data]', but had '%s'" % auth_line)
auth_types.append((auth_line_comp[1], auth_line_comp[2]))
descriptor.introduction_points_auth = auth_types
descriptor.introduction_points_content = decoded_field
_parse_rendezvous_service_descriptor_line = _parse_simple_line('rendezvous-service-descriptor', 'descriptor_id')
_parse_permanent_key_line = _parse_key_block('permanent-key', 'permanent_key', 'RSA PUBLIC KEY')
@ -194,6 +195,7 @@ class HiddenServiceDescriptor(Descriptor):
:var str introduction_points_encoded: raw introduction points blob
:var list introduction_points_auth: **\*** tuples of the form
(auth_method, auth_data) for our introduction_points_content
(**deprecated**, always **[]**)
:var bytes introduction_points_content: decoded introduction-points content
without authentication data, if using cookie authentication this is
encrypted
@ -201,6 +203,14 @@ class HiddenServiceDescriptor(Descriptor):
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.6.0
Moved from the deprecated `pycrypto
<https://www.dlitz.net/software/pycrypto/>`_ module to `cryptography
<https://pypi.python.org/pypi/cryptography>`_ for validating signatures.
.. versionchanged:: 1.6.0
Added the **skip_crypto_validation** constructor argument.
"""
ATTRIBUTES = {
@ -227,9 +237,30 @@ class HiddenServiceDescriptor(Descriptor):
'signature': _parse_signature_line,
}
def __init__(self, raw_contents, validate = False):
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('rendezvous-service-descriptor', 'y3olqqblqw2gbh6phimfuiroechjjafa'),
('version', '2'),
('permanent-key', _random_crypto_blob('RSA PUBLIC KEY')),
('secret-id-part', 'e24kgecavwsznj7gpbktqsiwgvngsf4e'),
('publication-time', _random_date()),
('protocol-versions', '2,3'),
('introduction-points', '\n-----BEGIN MESSAGE-----\n-----END MESSAGE-----'),
), (
('signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False):
return cls(cls.content(attr, exclude, sign), validate = validate, skip_crypto_validation = not sign)
def __init__(self, raw_contents, validate = False, skip_crypto_validation = False):
super(HiddenServiceDescriptor, self).__init__(raw_contents, lazy_load = not validate)
entries = _get_descriptor_components(raw_contents, validate)
entries = _descriptor_components(raw_contents, validate, non_ascii_fields = ('introduction-points'))
if validate:
for keyword in REQUIRED_FIELDS:
@ -245,7 +276,7 @@ class HiddenServiceDescriptor(Descriptor):
self._parse(entries, validate)
if stem.prereq.is_crypto_available():
if not skip_crypto_validation and stem.prereq.is_crypto_available():
signed_digest = self._digest_for_signature(self.permanent_key, self.signature)
content_digest = self._digest_for_content(b'rendezvous-service-descriptor ', b'\nsignature\n')
@ -257,21 +288,9 @@ class HiddenServiceDescriptor(Descriptor):
@lru_cache()
def introduction_points(self, authentication_cookie = None):
"""
Provided this service's introduction points. This provides a list of
IntroductionPoint instances, which have the following attributes...
Provided this service's introduction points.
* **identifier** (str): hash of this introduction point's identity key
* **address** (str): address of this introduction point
* **port** (int): port where this introduction point is listening
* **onion_key** (str): public key for communicating with this introduction point
* **service_key** (str): public key for communicating with this hidden service
* **intro_authentication** (list): tuples of the form (auth_type, auth_data)
for establishing a connection
:param str authentication_cookie: cookie to decrypt the introduction-points
if it's encrypted
:returns: **list** of IntroductionPoints instances
:returns: **list** of :class:`~stem.descriptor.hidden_service_descriptor.IntroductionPoints`
:raises:
* **ValueError** if the our introduction-points is malformed
@ -284,7 +303,7 @@ class HiddenServiceDescriptor(Descriptor):
return []
elif authentication_cookie:
if not stem.prereq.is_crypto_available():
raise DecryptionFailure('Decrypting introduction-points requires pycrypto')
raise DecryptionFailure('Decrypting introduction-points requires the cryptography module')
try:
missing_padding = len(authentication_cookie) % 4
@ -310,9 +329,8 @@ class HiddenServiceDescriptor(Descriptor):
@staticmethod
def _decrypt_basic_auth(content, authentication_cookie):
from Crypto.Cipher import AES
from Crypto.Util import Counter
from Crypto.Util.number import bytes_to_long
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
try:
client_blocks = int(binascii.hexlify(content[1:2]), 16)
@ -336,15 +354,15 @@ class HiddenServiceDescriptor(Descriptor):
# try decrypting the session key
counter = Counter.new(128, initial_value = 0)
cipher = AES.new(authentication_cookie, AES.MODE_CTR, counter = counter)
session_key = cipher.decrypt(encrypted_session_key)
cipher = Cipher(algorithms.AES(authentication_cookie), modes.CTR(b'\x00' * len(iv)), default_backend())
decryptor = cipher.decryptor()
session_key = decryptor.update(encrypted_session_key) + decryptor.finalize()
# attempt to decrypt the intro points with the session key
counter = Counter.new(128, initial_value = bytes_to_long(iv))
cipher = AES.new(session_key, AES.MODE_CTR, counter = counter)
decrypted = cipher.decrypt(encrypted)
cipher = Cipher(algorithms.AES(session_key), modes.CTR(iv), default_backend())
decryptor = cipher.decryptor()
decrypted = decryptor.update(encrypted) + decryptor.finalize()
# check if the decryption looks correct
@ -355,22 +373,20 @@ class HiddenServiceDescriptor(Descriptor):
@staticmethod
def _decrypt_stealth_auth(content, authentication_cookie):
from Crypto.Cipher import AES
from Crypto.Util import Counter
from Crypto.Util.number import bytes_to_long
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
# byte 1 = authentication type, 2-17 = input vector, 18 on = encrypted content
iv, encrypted = content[1:17], content[17:]
counter = Counter.new(128, initial_value = bytes_to_long(iv))
cipher = AES.new(authentication_cookie, AES.MODE_CTR, counter = counter)
cipher = Cipher(algorithms.AES(authentication_cookie), modes.CTR(iv), default_backend())
decryptor = cipher.decryptor()
return cipher.decrypt(encrypted)
return decryptor.update(encrypted) + decryptor.finalize()
@staticmethod
def _parse_introduction_points(content):
"""
Provides the parsed list of IntroductionPoint for the unencrypted content.
Provides the parsed list of IntroductionPoints for the unencrypted content.
"""
introduction_points = []
@ -383,7 +399,7 @@ class HiddenServiceDescriptor(Descriptor):
break # reached the end
attr = dict(INTRODUCTION_POINTS_ATTR)
entries = _get_descriptor_components(content, False)
entries = _descriptor_components(content, False)
for keyword, values in list(entries.items()):
value, block_type, block_contents = values[0]
@ -417,6 +433,6 @@ class HiddenServiceDescriptor(Descriptor):
auth_type, auth_data = auth_value.split(' ')[:2]
auth_entries.append((auth_type, auth_data))
introduction_points.append(IntroductionPoint(**attr))
introduction_points.append(IntroductionPoints(**attr))
return introduction_points

View file

@ -1,4 +1,4 @@
# Copyright 2013-2015, Damian Johnson and The Tor Project
# Copyright 2013-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -19,7 +19,7 @@ corresponding router status entry. For added fun as of this writing the
controller doesn't even surface those router status entries
(:trac:`7953`).
For instance, here's an example that prints the nickname and fignerprints of
For instance, here's an example that prints the nickname and fingerprints of
the exit relays.
::
@ -67,14 +67,18 @@ Doing the same is trivial with server descriptors...
import hashlib
import stem.exit_policy
import stem.prereq
from stem.descriptor import (
Descriptor,
_get_descriptor_components,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_value,
_values,
_parse_simple_line,
_parse_protocol_line,
_parse_key_block,
_random_crypto_blob,
)
from stem.descriptor.router_status_entry import (
@ -82,10 +86,9 @@ from stem.descriptor.router_status_entry import (
_parse_p_line,
)
try:
# added in python 3.2
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
except ImportError:
else:
from stem.util.lru_cache import lru_cache
REQUIRED_FIELDS = (
@ -98,6 +101,7 @@ SINGLE_FIELDS = (
'family',
'p',
'p6',
'pr',
)
@ -159,21 +163,35 @@ def _parse_file(descriptor_file, validate = False, **kwargs):
def _parse_id_line(descriptor, entries):
value = _value('id', entries)
value_comp = value.split()
identities = {}
if len(value_comp) >= 2:
descriptor.identifier_type = value_comp[0]
descriptor.identifier = value_comp[1]
else:
raise ValueError("'id' lines should contain both the key type and digest: id %s" % value)
for entry in _values('id', entries):
entry_comp = entry.split()
if len(entry_comp) >= 2:
key_type, key_value = entry_comp[0], entry_comp[1]
if key_type in identities:
raise ValueError("There can only be one 'id' line per a key type, but '%s' appeared multiple times" % key_type)
descriptor.identifier_type = key_type
descriptor.identifier = key_value
identities[key_type] = key_value
else:
raise ValueError("'id' lines should contain both the key type and digest: id %s" % entry)
descriptor.identifiers = identities
def _parse_digest(descriptor, entries):
setattr(descriptor, 'digest', hashlib.sha256(descriptor.get_bytes()).hexdigest().upper())
_parse_digest = lambda descriptor, entries: setattr(descriptor, 'digest', hashlib.sha256(descriptor.get_bytes()).hexdigest().upper())
_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY')
_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key')
_parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', _value('family', entries).split(' '))
_parse_p6_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('p6', entries)))
_parse_family_line = _parse_simple_line('family', 'family', func = lambda v: v.split(' '))
_parse_p6_line = _parse_simple_line('p6', 'exit_policy_v6', func = lambda v: stem.exit_policy.MicroExitPolicy(v))
_parse_pr_line = _parse_protocol_line('pr', 'protocols')
class Microdescriptor(Descriptor):
@ -192,13 +210,27 @@ class Microdescriptor(Descriptor):
:var list family: **\*** nicknames or fingerprints of declared family
:var stem.exit_policy.MicroExitPolicy exit_policy: **\*** relay's exit policy
:var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6
:var str identifier_type: identity digest key type
:var str identifier: base64 encoded identity digest, this is only used for collision prevention (:trac:`11743`)
:var hash identifiers: mapping of key types (like rsa1024 or ed25519) to
their base64 encoded identity, this is only used for collision prevention
(:trac:`11743`)
:var dict protocols: mapping of protocols to their supported versions
:var str identifier: base64 encoded identity digest (**deprecated**, use
identifiers instead)
:var str identifier_type: identity digest key type (**deprecated**, use
identifiers instead)
**\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.1.0
Added the identifier and identifier_type attributes.
.. versionchanged:: 1.5.0
Added the identifiers attribute, and deprecated identifier and
identifier_type since the field can now appear multiple times.
.. versionchanged:: 1.6.0
Added the protocols attribute.
"""
ATTRIBUTES = {
@ -208,8 +240,10 @@ class Microdescriptor(Descriptor):
'family': ([], _parse_family_line),
'exit_policy': (stem.exit_policy.MicroExitPolicy('reject 1-65535'), _parse_p_line),
'exit_policy_v6': (None, _parse_p6_line),
'identifier_type': (None, _parse_id_line),
'identifier': (None, _parse_id_line),
'identifier_type': (None, _parse_id_line), # deprecated in favor of identifiers
'identifier': (None, _parse_id_line), # deprecated in favor of identifiers
'identifiers': ({}, _parse_id_line),
'protocols': ({}, _parse_pr_line),
'digest': (None, _parse_digest),
}
@ -220,13 +254,23 @@ class Microdescriptor(Descriptor):
'family': _parse_family_line,
'p': _parse_p_line,
'p6': _parse_p6_line,
'pr': _parse_pr_line,
'id': _parse_id_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('onion-key', _random_crypto_blob('RSA PUBLIC KEY')),
))
def __init__(self, raw_contents, validate = False, annotations = None):
super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate)
self._annotation_lines = annotations if annotations else []
entries = _get_descriptor_components(raw_contents, validate)
entries = _descriptor_components(raw_contents, validate)
if validate:
self.digest = hashlib.sha256(self.get_bytes()).hexdigest().upper()
@ -307,6 +351,9 @@ class Microdescriptor(Descriptor):
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)

View file

@ -1,14 +1,15 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Parsing for Tor network status documents. This supports both the v2 and v3
dir-spec. Documents can be obtained from a few sources...
`dir-spec <https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_.
Documents can be obtained from a few sources...
* The 'cached-consensus' file in Tor's data directory.
* Archived descriptors provided by CollecTor
(https://collector.torproject.org/).
* Archived descriptors provided by `CollecTor
<https://metrics.torproject.org/collector.html>`_.
* Directory authorities and mirrors via their DirPort.
@ -19,6 +20,10 @@ dir-spec. Documents can be obtained from a few sources...
* list of :class:`stem.descriptor.router_status_entry.RouterStatusEntry`
* document footer
**For a great graphical overview see** `Jordan Wright's chart describing the
anatomy of the consensus
<https://jordan-wright.github.io/images/blog/how_tor_works/consensus.png>`_.
Of these, the router status entry section can be quite large (on the order of
hundreds of kilobytes). As such we provide a couple of methods for reading
network status documents through :func:`~stem.descriptor.__init__.parse_file`.
@ -47,16 +52,6 @@ For more information see :func:`~stem.descriptor.__init__.DocumentHandler`...
KeyCertificate - Certificate used to authenticate an authority
DocumentSignature - Signature of a document by a directory authority
DirectoryAuthority - Directory authority as defined in a v3 network status document
.. data:: PackageVersion
Latest recommended version of a package that's available.
:var str name: name of the package
:var str version: latest recommended version
:var str url: package's url
:var dict digests: mapping of digest types to their value
"""
import collections
@ -71,13 +66,22 @@ from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
DocumentHandler,
_get_descriptor_components,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_value,
_parse_simple_line,
_parse_if_present,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_protocol_line,
_parse_key_block,
_mappings_for,
_random_nickname,
_random_fingerprint,
_random_ipv4_address,
_random_date,
_random_crypto_blob,
)
from stem.descriptor.router_status_entry import (
@ -86,13 +90,6 @@ from stem.descriptor.router_status_entry import (
RouterStatusEntryMicroV3,
)
PackageVersion = collections.namedtuple('PackageVersion', [
'name',
'version',
'url',
'digests',
])
# Version 2 network status document fields, tuples of the form...
# (keyword, is_mandatory)
@ -130,6 +127,15 @@ HEADER_STATUS_DOCUMENT_FIELDS = (
('package', True, True, False),
('known-flags', True, True, True),
('flag-thresholds', True, False, False),
('shared-rand-participate', True, False, False),
('shared-rand-commit', True, False, False),
('shared-rand-previous-value', True, True, False),
('shared-rand-current-value', True, True, False),
('bandwidth-file-headers', True, False, False),
('recommended-client-protocols', True, True, False),
('recommended-relay-protocols', True, True, False),
('required-client-protocols', True, True, False),
('required-relay-protocols', True, True, False),
('params', True, True, False),
)
@ -139,9 +145,6 @@ FOOTER_STATUS_DOCUMENT_FIELDS = (
('directory-signature', True, True, True),
)
HEADER_FIELDS = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS]
FOOTER_FIELDS = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS]
AUTH_START = 'dir-source'
ROUTERS_START = 'r'
FOOTER_START = 'directory-footer'
@ -159,8 +162,17 @@ DEFAULT_PARAMS = {
'cbttestfreq': 60,
'cbtmintimeout': 2000,
'cbtinitialtimeout': 60000,
'cbtlearntimeout': 180,
'cbtmaxopencircs': 10,
'UseOptimisticData': 1,
'Support022HiddenServices': 1,
'usecreatefast': 1,
'max-consensuses-age-to-cache-for-diff': 72,
'try-diff-for-consensus-newer-than': 72,
'onion-key-rotation-days': 28,
'onion-key-grace-period-days': 7,
'hs_service_max_rdv_failures': 2,
'circ_max_cell_queue_size': 50000,
}
# KeyCertificate fields, tuple is of the form...
@ -197,6 +209,8 @@ PARAM_RANGE = {
'cbtclosequantile': (MIN_PARAM, 99),
'cbttestfreq': (1, MAX_PARAM),
'cbtmintimeout': (500, MAX_PARAM),
'cbtlearntimeout': (10, 60000),
'cbtmaxopencircs': (0, 14),
'UseOptimisticData': (0, 1),
'Support022HiddenServices': (0, 1),
'usecreatefast': (0, 1),
@ -207,9 +221,40 @@ PARAM_RANGE = {
'GuardLifetime': (2592000, 157766400), # min: 30 days, max: 1826 days
'NumNTorsPerTAP': (1, 100000),
'AllowNonearlyExtend': (0, 1),
'AuthDirNumSRVAgreements': (1, MAX_PARAM),
'max-consensuses-age-to-cache-for-diff': (0, 8192),
'try-diff-for-consensus-newer-than': (0, 8192),
'onion-key-rotation-days': (1, 90),
'onion-key-grace-period-days': (1, 90), # max is the highest onion-key-rotation-days
'hs_service_max_rdv_failures': (1, 10),
'circ_max_cell_queue_size': (1000, 4294967295),
}
class PackageVersion(collections.namedtuple('PackageVersion', ['name', 'version', 'url', 'digests'])):
"""
Latest recommended version of a package that's available.
:var str name: name of the package
:var str version: latest recommended version
:var str url: package's url
:var dict digests: mapping of digest types to their value
"""
class SharedRandomnessCommitment(collections.namedtuple('SharedRandomnessCommitment', ['version', 'algorithm', 'identity', 'commit', 'reveal'])):
"""
Directory authority's commitment for generating the next shared random value.
:var int version: shared randomness protocol version
:var str algorithm: hash algorithm used to make the commitment
:var str identity: authority's sha1 identity fingerprint
:var str commit: base64 encoded commitment hash to the shared random value
:var str reveal: base64 encoded commitment to the shared random value,
**None** of not provided
"""
def _parse_file(document_file, document_type = None, validate = False, is_microdescriptor = False, document_handler = DocumentHandler.ENTRIES, **kwargs):
"""
Parses a network status and iterates over the RouterStatusEntry in it. The
@ -361,10 +406,10 @@ _parse_network_status_version_line = _parse_version_line('network-status-version
_parse_fingerprint_line = _parse_forty_character_hex('fingerprint', 'fingerprint')
_parse_contact_line = _parse_simple_line('contact', 'contact')
_parse_dir_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_client_versions_line = lambda descriptor, entries: setattr(descriptor, 'client_versions', _value('client-versions', entries).split(','))
_parse_server_versions_line = lambda descriptor, entries: setattr(descriptor, 'server_versions', _value('server-versions', entries).split(','))
_parse_client_versions_line = _parse_simple_line('client-versions', 'client_versions', func = lambda v: v.split(','))
_parse_server_versions_line = _parse_simple_line('server-versions', 'server_versions', func = lambda v: v.split(','))
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_dir_options_line = lambda descriptor, entries: setattr(descriptor, 'options', _value('dir-options', entries).split())
_parse_dir_options_line = _parse_simple_line('dir-options', 'options', func = lambda v: v.split())
_parse_directory_signature_line = _parse_key_block('directory-signature', 'signature', 'SIGNATURE', value_attribute = 'signing_authority')
@ -428,6 +473,22 @@ class NetworkStatusDocumentV2(NetworkStatusDocument):
'directory-signature': _parse_directory_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('network-status-version', '2'),
('dir-source', '%s %s 80' % (_random_ipv4_address(), _random_ipv4_address())),
('fingerprint', _random_fingerprint()),
('contact', 'arma at mit dot edu'),
('published', _random_date()),
('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
), (
('directory-signature', 'moria2' + _random_crypto_blob('SIGNATURE')),
))
def __init__(self, raw_content, validate = False):
super(NetworkStatusDocumentV2, self).__init__(raw_content, lazy_load = not validate)
@ -450,7 +511,7 @@ class NetworkStatusDocumentV2(NetworkStatusDocument):
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
entries = _get_descriptor_components(document_content + b'\n' + document_file.read(), validate)
entries = _descriptor_components(document_content + b'\n' + document_file.read(), validate)
if validate:
self._check_constraints(entries)
@ -582,26 +643,20 @@ def _parse_header_flag_thresholds_line(descriptor, entries):
value, thresholds = _value('flag-thresholds', entries).strip(), {}
if value:
for entry in value.split(' '):
if '=' not in entry:
raise ValueError("Network status document's 'flag-thresholds' line is expected to be space separated key=value mappings, got: flag-thresholds %s" % value)
for key, val in _mappings_for('flag-thresholds', value):
try:
if val.endswith('%'):
# opting for string manipulation rather than just
# 'float(entry_value) / 100' because floating point arithmetic
# will lose precision
entry_key, entry_value = entry.split('=', 1)
try:
if entry_value.endswith('%'):
# opting for string manipulation rather than just
# 'float(entry_value) / 100' because floating point arithmetic
# will lose precision
thresholds[entry_key] = float('0.' + entry_value[:-1].replace('.', '', 1))
elif '.' in entry_value:
thresholds[entry_key] = float(entry_value)
else:
thresholds[entry_key] = int(entry_value)
except ValueError:
raise ValueError("Network status document's 'flag-thresholds' line is expected to have float values, got: flag-thresholds %s" % value)
thresholds[key] = float('0.' + val[:-1].replace('.', '', 1))
elif '.' in val:
thresholds[key] = float(val)
else:
thresholds[key] = int(val)
except ValueError:
raise ValueError("Network status document's 'flag-thresholds' line is expected to have float values, got: flag-thresholds %s" % value)
descriptor.flag_thresholds = thresholds
@ -617,11 +672,6 @@ def _parse_header_parameters_line(descriptor, entries):
value = _value('params', entries)
# should only appear in consensus-method 7 or later
if not descriptor.meets_consensus_method(7):
raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later")
if value != '':
descriptor.params = _parse_int_mappings('params', value, True)
descriptor._check_params_constraints()
@ -661,7 +711,7 @@ def _parse_package_line(descriptor, entries):
package_versions = []
for value, _, _ in entries['package']:
value_comp = value.split()
value_comp = value.split(' ', 3)
if len(value_comp) < 3:
raise ValueError("'package' must at least have a 'PackageName Version URL': %s" % value)
@ -669,33 +719,98 @@ def _parse_package_line(descriptor, entries):
name, version, url = value_comp[:3]
digests = {}
for digest_entry in value_comp[3:]:
if '=' not in digest_entry:
raise ValueError("'package' digest entries should be 'key=value' pairs: %s" % value)
key, value = digest_entry.split('=', 1)
digests[key] = value
if len(value_comp) == 4:
for key, val in _mappings_for('package', value_comp[3]):
digests[key] = val
package_versions.append(PackageVersion(name, version, url, digests))
descriptor.packages = package_versions
def _parsed_shared_rand_commit(descriptor, entries):
# "shared-rand-commit" Version AlgName Identity Commit [Reveal]
commitments = []
for value, _, _ in entries['shared-rand-commit']:
value_comp = value.split()
if len(value_comp) < 4:
raise ValueError("'shared-rand-commit' must at least have a 'Version AlgName Identity Commit': %s" % value)
version, algorithm, identity, commit = value_comp[:4]
reveal = value_comp[4] if len(value_comp) >= 5 else None
if not version.isdigit():
raise ValueError("The version on our 'shared-rand-commit' line wasn't an integer: %s" % value)
commitments.append(SharedRandomnessCommitment(int(version), algorithm, identity, commit, reveal))
descriptor.shared_randomness_commitments = commitments
def _parse_shared_rand_previous_value(descriptor, entries):
# "shared-rand-previous-value" NumReveals Value
value = _value('shared-rand-previous-value', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit():
descriptor.shared_randomness_previous_reveal_count = int(value_comp[0])
descriptor.shared_randomness_previous_value = value_comp[1]
else:
raise ValueError("A network status document's 'shared-rand-previous-value' line must be a pair of values, the first an integer but was '%s'" % value)
def _parse_shared_rand_current_value(descriptor, entries):
# "shared-rand-current-value" NumReveals Value
value = _value('shared-rand-current-value', entries)
value_comp = value.split(' ')
if len(value_comp) == 2 and value_comp[0].isdigit():
descriptor.shared_randomness_current_reveal_count = int(value_comp[0])
descriptor.shared_randomness_current_value = value_comp[1]
else:
raise ValueError("A network status document's 'shared-rand-current-value' line must be a pair of values, the first an integer but was '%s'" % value)
def _parse_bandwidth_file_headers(descriptor, entries):
# "bandwidth-file-headers" KeyValues
# KeyValues ::= "" | KeyValue | KeyValues SP KeyValue
# KeyValue ::= Keyword '=' Value
# Value ::= ArgumentChar+
value = _value('bandwidth-file-headers', entries)
results = {}
for key, val in _mappings_for('bandwidth-file-headers', value):
results[key] = val
descriptor.bandwidth_file_headers = results
_parse_header_valid_after_line = _parse_timestamp_line('valid-after', 'valid_after')
_parse_header_fresh_until_line = _parse_timestamp_line('fresh-until', 'fresh_until')
_parse_header_valid_until_line = _parse_timestamp_line('valid-until', 'valid_until')
_parse_header_client_versions_line = _parse_versions_line('client-versions', 'client_versions')
_parse_header_server_versions_line = _parse_versions_line('server-versions', 'server_versions')
_parse_header_known_flags_line = lambda descriptor, entries: setattr(descriptor, 'known_flags', [entry for entry in _value('known-flags', entries).split(' ') if entry])
_parse_footer_bandwidth_weights_line = lambda descriptor, entries: setattr(descriptor, 'bandwidth_weights', _parse_int_mappings('bandwidth-weights', _value('bandwidth-weights', entries), True))
_parse_header_known_flags_line = _parse_simple_line('known-flags', 'known_flags', func = lambda v: [entry for entry in v.split(' ') if entry])
_parse_footer_bandwidth_weights_line = _parse_simple_line('bandwidth-weights', 'bandwidth_weights', func = lambda v: _parse_int_mappings('bandwidth-weights', v, True))
_parse_shared_rand_participate_line = _parse_if_present('shared-rand-participate', 'is_shared_randomness_participate')
_parse_recommended_client_protocols_line = _parse_protocol_line('recommended-client-protocols', 'recommended_client_protocols')
_parse_recommended_relay_protocols_line = _parse_protocol_line('recommended-relay-protocols', 'recommended_relay_protocols')
_parse_required_client_protocols_line = _parse_protocol_line('required-client-protocols', 'required_client_protocols')
_parse_required_relay_protocols_line = _parse_protocol_line('required-relay-protocols', 'required_relay_protocols')
class NetworkStatusDocumentV3(NetworkStatusDocument):
"""
Version 3 network status document. This could be either a vote or consensus.
:var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
contained in the document
:var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
mapping for relays contained in the document
:var int version: **\*** document version
:var str version_flavor: **\*** flavor associated with the document (such as 'microdesc')
@ -725,17 +840,59 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
:var int consensus_method: method version used to generate this consensus
:var dict bandwidth_weights: dict of weight(str) => value(int) mappings
:var int shared_randomness_current_reveal_count: number of commitments
used to generate the current shared random value
:var str shared_randomness_current_value: base64 encoded current shared
random value
:var int shared_randomness_previous_reveal_count: number of commitments
used to generate the last shared random value
:var str shared_randomness_previous_value: base64 encoded last shared random
value
**Vote Attributes:**
:var list consensus_methods: list of ints for the supported method versions
:var datetime published: time when the document was published
:var dict flag_thresholds: **\*** mapping of internal performance thresholds used while making the vote, values are **ints** or **floats**
:var dict recommended_client_protocols: recommended protocols for clients
:var dict recommended_relay_protocols: recommended protocols for relays
:var dict required_client_protocols: required protocols for clients
:var dict required_relay_protocols: required protocols for relays
:var dict bandwidth_file_headers: headers from the bandwidth authority that
generated this vote
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as None if undefined
.. versionchanged:: 1.4.0
Added the packages attribute.
.. versionchanged:: 1.5.0
Added the is_shared_randomness_participate, shared_randomness_commitments,
shared_randomness_previous_reveal_count,
shared_randomness_previous_value,
shared_randomness_current_reveal_count, and
shared_randomness_current_value attributes.
.. versionchanged:: 1.6.0
Added the recommended_client_protocols, recommended_relay_protocols,
required_client_protocols, and required_relay_protocols attributes.
.. versionchanged:: 1.6.0
The is_shared_randomness_participate and shared_randomness_commitments
were misdocumented in the tor spec and as such never set. They're now an
attribute of votes in the **directory_authorities**.
.. versionchanged:: 1.7.0
The shared_randomness_current_reveal_count and
shared_randomness_previous_reveal_count attributes were undocumented and
not provided properly if retrieved before their shred_randomness_*_value
counterpart.
.. versionchanged:: 1.7.0
Added the bandwidth_file_headers attributbute.
"""
ATTRIBUTES = {
@ -757,7 +914,16 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
'packages': ([], _parse_package_line),
'known_flags': ([], _parse_header_known_flags_line),
'flag_thresholds': ({}, _parse_header_flag_thresholds_line),
'recommended_client_protocols': ({}, _parse_recommended_client_protocols_line),
'recommended_relay_protocols': ({}, _parse_recommended_relay_protocols_line),
'required_client_protocols': ({}, _parse_required_client_protocols_line),
'required_relay_protocols': ({}, _parse_required_relay_protocols_line),
'params': ({}, _parse_header_parameters_line),
'shared_randomness_previous_reveal_count': (None, _parse_shared_rand_previous_value),
'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value),
'shared_randomness_current_reveal_count': (None, _parse_shared_rand_current_value),
'shared_randomness_current_value': (None, _parse_shared_rand_current_value),
'bandwidth_file_headers': ({}, _parse_bandwidth_file_headers),
'signatures': ([], _parse_footer_directory_signature_line),
'bandwidth_weights': ({}, _parse_footer_bandwidth_weights_line),
@ -778,7 +944,14 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
'package': _parse_package_line,
'known-flags': _parse_header_known_flags_line,
'flag-thresholds': _parse_header_flag_thresholds_line,
'recommended-client-protocols': _parse_recommended_client_protocols_line,
'recommended-relay-protocols': _parse_recommended_relay_protocols_line,
'required-client-protocols': _parse_required_client_protocols_line,
'required-relay-protocols': _parse_required_relay_protocols_line,
'params': _parse_header_parameters_line,
'shared-rand-previous-value': _parse_shared_rand_previous_value,
'shared-rand-current-value': _parse_shared_rand_current_value,
'bandwidth-file-headers': _parse_bandwidth_file_headers,
}
FOOTER_PARSER_FOR_LINE = {
@ -787,6 +960,85 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
'directory-signature': _parse_footer_directory_signature_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False, authorities = None, routers = None):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
attr = {} if attr is None else dict(attr)
is_vote = attr.get('vote-status') == 'vote'
if is_vote:
extra_defaults = {'consensus-methods': '1 9', 'published': _random_date()}
else:
extra_defaults = {'consensus-method': '9'}
if is_vote and authorities is None:
authorities = [DirectoryAuthority.create(is_vote = is_vote)]
for k, v in extra_defaults.items():
if exclude and k in exclude:
continue # explicitly excluding this field
elif k not in attr:
attr[k] = v
desc_content = _descriptor_content(attr, exclude, (
('network-status-version', '3'),
('vote-status', 'consensus'),
('consensus-methods', None),
('consensus-method', None),
('published', None),
('valid-after', _random_date()),
('fresh-until', _random_date()),
('valid-until', _random_date()),
('voting-delay', '300 300'),
('client-versions', None),
('server-versions', None),
('package', None),
('known-flags', 'Authority BadExit Exit Fast Guard HSDir Named Running Stable Unnamed V2Dir Valid'),
('params', None),
), (
('directory-footer', ''),
('bandwidth-weights', None),
('directory-signature', '%s %s%s' % (_random_fingerprint(), _random_fingerprint(), _random_crypto_blob('SIGNATURE'))),
))
# inject the authorities and/or routers between the header and footer
if authorities:
if b'directory-footer' in desc_content:
footer_div = desc_content.find(b'\ndirectory-footer') + 1
elif b'directory-signature' in desc_content:
footer_div = desc_content.find(b'\ndirectory-signature') + 1
else:
if routers:
desc_content += b'\n'
footer_div = len(desc_content) + 1
authority_content = stem.util.str_tools._to_bytes('\n'.join([str(a) for a in authorities]) + '\n')
desc_content = desc_content[:footer_div] + authority_content + desc_content[footer_div:]
if routers:
if b'directory-footer' in desc_content:
footer_div = desc_content.find(b'\ndirectory-footer') + 1
elif b'directory-signature' in desc_content:
footer_div = desc_content.find(b'\ndirectory-signature') + 1
else:
if routers:
desc_content += b'\n'
footer_div = len(desc_content) + 1
router_content = stem.util.str_tools._to_bytes('\n'.join([str(r) for r in routers]) + '\n')
desc_content = desc_content[:footer_div] + router_content + desc_content[footer_div:]
return desc_content
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, authorities = None, routers = None):
return cls(cls.content(attr, exclude, sign, authorities, routers), validate = validate)
def __init__(self, raw_content, validate = False, default_params = True):
"""
Parse a v3 network status document.
@ -802,6 +1054,13 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
super(NetworkStatusDocumentV3, self).__init__(raw_content, lazy_load = not validate)
document_file = io.BytesIO(raw_content)
# TODO: Tor misdocumented these as being in the header rather than the
# authority section. As such these have never been set but we need the
# attributes for stem 1.5 compatability. Drop these in 2.0.
self.is_shared_randomness_participate = False
self.shared_randomness_commitments = []
self._default_params = default_params
self._header(document_file, validate)
@ -829,6 +1088,39 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
self.routers = dict((desc.fingerprint, desc) for desc in router_iter)
self._footer(document_file, validate)
def validate_signatures(self, key_certs):
"""
Validates we're properly signed by the signing certificates.
.. versionadded:: 1.6.0
:param list key_certs: :class:`~stem.descriptor.networkstatus.KeyCertificates`
to validate the consensus against
:raises: **ValueError** if an insufficient number of valid signatures are present.
"""
# sha1 hash of the body and header
local_digest = self._digest_for_content(b'network-status-version', b'directory-signature ')
valid_digests, total_digests = 0, 0
required_digests = len(self.signatures) / 2.0
signing_keys = dict([(cert.fingerprint, cert.signing_key) for cert in key_certs])
for sig in self.signatures:
if sig.identity not in signing_keys:
continue
signed_digest = self._digest_for_signature(signing_keys[sig.identity], sig.signature)
total_digests += 1
if signed_digest == local_digest:
valid_digests += 1
if valid_digests < required_digests:
raise ValueError('Network Status Document has %i valid signatures out of %i total, needed %i' % (valid_digests, total_digests, required_digests))
def get_unrecognized_lines(self):
if self._lazy_loading:
self._parse(self._header_entries, False, parser_for_line = self.HEADER_PARSER_FOR_LINE)
@ -863,13 +1155,14 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
def _header(self, document_file, validate):
content = bytes.join(b'', _read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file))
entries = _get_descriptor_components(content, validate)
entries = _descriptor_components(content, validate)
header_fields = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS]
if validate:
# all known header fields can only appear once except
for keyword, values in list(entries.items()):
if len(values) > 1 and keyword in HEADER_FIELDS and keyword != 'package':
if len(values) > 1 and keyword in header_fields and keyword != 'package' and keyword != 'shared-rand-commit':
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
if self._default_params:
@ -877,8 +1170,12 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
self._parse(entries, validate, parser_for_line = self.HEADER_PARSER_FOR_LINE)
# should only appear in consensus-method 7 or later
if not self.meets_consensus_method(7) and 'params' in list(entries.keys()):
raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later")
_check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS)
_check_for_misordered_fields(entries, HEADER_FIELDS)
# default consensus_method and consensus_methods based on if we're a consensus or vote
@ -891,14 +1188,15 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
self._entries.update(entries)
def _footer(self, document_file, validate):
entries = _get_descriptor_components(document_file.read(), validate)
entries = _descriptor_components(document_file.read(), validate)
footer_fields = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS]
if validate:
for keyword, values in list(entries.items()):
# all known footer fields can only appear once except...
# * 'directory-signature' in a consensus
if len(values) > 1 and keyword in FOOTER_FIELDS:
if len(values) > 1 and keyword in footer_fields:
if not (keyword == 'directory-signature' and self.is_consensus):
raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values)))
@ -917,7 +1215,6 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
raise ValueError("Network status document's footer should start with a 'directory-signature' line prior to consensus-method 9")
_check_for_missing_and_disallowed_fields(self, entries, FOOTER_STATUS_DOCUMENT_FIELDS)
_check_for_misordered_fields(entries, FOOTER_FIELDS)
else:
self._footer_entries = entries
self._entries.update(entries)
@ -946,6 +1243,9 @@ class NetworkStatusDocumentV3(NetworkStatusDocument):
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
@ -986,71 +1286,32 @@ def _check_for_missing_and_disallowed_fields(document, entries, fields):
raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields))
def _check_for_misordered_fields(entries, expected):
"""
To be valid a network status document's fiends need to appear in a specific
order. Checks that known fields appear in that order (unrecognized fields
are ignored).
:param dict entries: ordered keyword/value mappings of the header or footer
:param list expected: ordered list of expected fields (either
**HEADER_FIELDS** or **FOOTER_FIELDS**)
:raises: **ValueError** if entries aren't properly ordered
"""
# Earlier validation has ensured that our fields either belong to our
# document type or are unknown. Remove the unknown fields since they
# reflect a spec change and can appear anywhere in the document.
actual = [field for field in entries.keys() if field in expected]
# Narrow the expected to just what we have. If the lists then match then the
# order's valid.
expected = [field for field in expected if field in actual]
if actual != expected:
actual_label = ', '.join(actual)
expected_label = ', '.join(expected)
raise ValueError("The fields in a section of the document are misordered. It should be '%s' but was '%s'" % (actual_label, expected_label))
def _parse_int_mappings(keyword, value, validate):
# Parse a series of 'key=value' entries, checking the following:
# - values are integers
# - keys are sorted in lexical order
results, seen_keys = {}, []
for entry in value.split(' '):
error_template = "Unable to parse network status document's '%s' line (%%s): %s'" % (keyword, value)
for key, val in _mappings_for(keyword, value):
if validate:
# parameters should be in ascending order by their key
for prior_key in seen_keys:
if prior_key > key:
raise ValueError(error_template % 'parameters must be sorted by their key')
try:
if '=' not in entry:
raise ValueError("must only have 'key=value' entries")
# the int() function accepts things like '+123', but we don't want to
entry_key, entry_value = entry.split('=', 1)
if val.startswith('+'):
raise ValueError()
try:
# the int() function accepts things like '+123', but we don't want to
if entry_value.startswith('+'):
raise ValueError()
results[key] = int(val)
except ValueError:
raise ValueError(error_template % ("'%s' is a non-numeric value" % val))
entry_value = int(entry_value)
except ValueError:
raise ValueError("'%s' is a non-numeric value" % entry_value)
if validate:
# parameters should be in ascending order by their key
for prior_key in seen_keys:
if prior_key > entry_key:
raise ValueError('parameters must be sorted by their key')
results[entry_key] = entry_value
seen_keys.append(entry_key)
except ValueError as exc:
if not validate:
continue
raise ValueError("Unable to parse network status document's '%s' line (%s): %s'" % (keyword, exc, value))
seen_keys.append(key)
return results
@ -1120,11 +1381,31 @@ class DirectoryAuthority(Descriptor):
:var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\***
authority's key certificate
:var bool is_shared_randomness_participate: **\*** **True** if this authority
participates in establishing a shared random value, **False** otherwise
:var list shared_randomness_commitments: **\*** list of
:data:`~stem.descriptor.networkstatus.SharedRandomnessCommitment` entries
:var int shared_randomness_previous_reveal_count: number of commitments
used to generate the last shared random value
:var str shared_randomness_previous_value: base64 encoded last shared random
value
:var int shared_randomness_current_reveal_count: number of commitments
used to generate the current shared random value
:var str shared_randomness_current_value: base64 encoded current shared
random value
**\*** mandatory attribute
.. versionchanged:: 1.4.0
Renamed our 'fingerprint' attribute to 'v3ident' (prior attribute exists
for backward compatability, but is deprecated).
.. versionchanged:: 1.6.0
Added the is_shared_randomness_participate, shared_randomness_commitments,
shared_randomness_previous_reveal_count,
shared_randomness_previous_value,
shared_randomness_current_reveal_count, and
shared_randomness_current_value attributes.
"""
ATTRIBUTES = {
@ -1138,6 +1419,12 @@ class DirectoryAuthority(Descriptor):
'contact': (None, _parse_contact_line),
'vote_digest': (None, _parse_vote_digest_line),
'legacy_dir_key': (None, _parse_legacy_dir_key_line),
'is_shared_randomness_participate': (False, _parse_shared_rand_participate_line),
'shared_randomness_commitments': ([], _parsed_shared_rand_commit),
'shared_randomness_previous_reveal_count': (None, _parse_shared_rand_previous_value),
'shared_randomness_previous_value': (None, _parse_shared_rand_previous_value),
'shared_randomness_current_reveal_count': (None, _parse_shared_rand_current_value),
'shared_randomness_current_value': (None, _parse_shared_rand_current_value),
}
PARSER_FOR_LINE = {
@ -1145,8 +1432,38 @@ class DirectoryAuthority(Descriptor):
'contact': _parse_contact_line,
'legacy-dir-key': _parse_legacy_dir_key_line,
'vote-digest': _parse_vote_digest_line,
'shared-rand-participate': _parse_shared_rand_participate_line,
'shared-rand-commit': _parsed_shared_rand_commit,
'shared-rand-previous-value': _parse_shared_rand_previous_value,
'shared-rand-current-value': _parse_shared_rand_current_value,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False, is_vote = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
attr = {} if attr is None else dict(attr)
# include mandatory 'vote-digest' if a consensus
if not is_vote and not ('vote-digest' in attr or (exclude and 'vote-digest' in exclude)):
attr['vote-digest'] = _random_fingerprint()
content = _descriptor_content(attr, exclude, (
('dir-source', '%s %s no.place.com %s 9030 9090' % (_random_nickname(), _random_fingerprint(), _random_ipv4_address())),
('contact', 'Mike Perry <email>'),
))
if is_vote:
content += b'\n' + KeyCertificate.content()
return content
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, is_vote = False):
return cls(cls.content(attr, exclude, sign, is_vote), validate = validate, is_vote = is_vote)
def __init__(self, raw_content, validate = False, is_vote = False):
"""
Parse a directory authority entry in a v3 network status document.
@ -1171,7 +1488,7 @@ class DirectoryAuthority(Descriptor):
else:
self.key_certificate = None
entries = _get_descriptor_components(content, validate)
entries = _descriptor_components(content, validate)
if validate and 'dir-source' != list(entries.keys())[0]:
raise ValueError("Authority entries are expected to start with a 'dir-source' line:\n%s" % (content))
@ -1233,9 +1550,15 @@ class DirectoryAuthority(Descriptor):
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
@ -1251,7 +1574,7 @@ def _parse_dir_address_line(descriptor, entries):
if ':' not in value:
raise ValueError("Key certificate's 'dir-address' is expected to be of the form ADDRESS:PORT: dir-address %s" % value)
address, dirport = value.split(':', 1)
address, dirport = value.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("Key certificate's address isn't a valid IPv4 address: dir-address %s" % value)
@ -1315,9 +1638,25 @@ class KeyCertificate(Descriptor):
'dir-key-certification': _parse_dir_key_certification_line,
}
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('dir-key-certificate-version', '3'),
('fingerprint', _random_fingerprint()),
('dir-key-published', _random_date()),
('dir-key-expires', _random_date()),
('dir-identity-key', _random_crypto_blob('RSA PUBLIC KEY')),
('dir-signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
), (
('dir-key-certification', _random_crypto_blob('SIGNATURE')),
))
def __init__(self, raw_content, validate = False):
super(KeyCertificate, self).__init__(raw_content, lazy_load = not validate)
entries = _get_descriptor_components(raw_content, validate)
entries = _descriptor_components(raw_content, validate)
if validate:
if 'dir-key-certificate-version' != list(entries.keys())[0]:
@ -1346,9 +1685,15 @@ class KeyCertificate(Descriptor):
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
@ -1395,9 +1740,15 @@ class DocumentSignature(object):
return method(True, True) # we're equal
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
@ -1410,8 +1761,8 @@ class BridgeNetworkStatusDocument(NetworkStatusDocument):
Network status document containing bridges. This is only available through
the metrics site.
:var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2`
contained in the document
:var dict routers: fingerprint to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
mapping for relays contained in the document
:var datetime published: time when the document was published
"""

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -89,10 +89,10 @@ except ImportError:
import stem.descriptor
import stem.prereq
import stem.util
import stem.util.str_tools
import stem.util.system
from stem import str_type
# flag to indicate when the reader thread is out of descriptor files to read
FINISHED = 'DONE'
@ -179,9 +179,9 @@ def load_processed_files(path):
processed_files = {}
with open(path) as input_file:
with open(path, 'rb') as input_file:
for line in input_file.readlines():
line = line.strip()
line = stem.util.str_tools._to_unicode(line.strip())
if not line:
continue # skip blank lines
@ -218,6 +218,7 @@ def save_processed_files(path, processed_files):
"""
# makes the parent directory if it doesn't already exist
try:
path_dir = os.path.dirname(path)
@ -264,10 +265,7 @@ class DescriptorReader(object):
"""
def __init__(self, target, validate = False, follow_links = False, buffer_size = 100, persistence_path = None, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs):
if isinstance(target, (bytes, str_type)):
self._targets = [target]
else:
self._targets = target
self._targets = [target] if stem.util._is_str(target) else target
# expand any relative paths we got
@ -388,7 +386,7 @@ class DescriptorReader(object):
raise ValueError('Already running, you need to call stop() first')
else:
self._is_stopped.clear()
self._reader_thread = threading.Thread(target = self._read_descriptor_files, name='Descriptor Reader')
self._reader_thread = threading.Thread(target = self._read_descriptor_files, name='Descriptor reader')
self._reader_thread.setDaemon(True)
self._reader_thread.start()
@ -514,7 +512,7 @@ class DescriptorReader(object):
self._unreturned_descriptors.put(desc)
self._iter_notice.set()
except TypeError as exc:
except TypeError:
self._notify_skip_listeners(target, UnrecognizedType(mime_type))
except ValueError as exc:
self._notify_skip_listeners(target, ParsingFailure(exc))

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -29,12 +29,19 @@ import stem.util.str_tools
from stem.descriptor import (
KEYWORD_LINE,
Descriptor,
_descriptor_content,
_value,
_values,
_get_descriptor_components,
_descriptor_components,
_parse_protocol_line,
_read_until_keywords,
_random_nickname,
_random_ipv4_address,
_random_date,
)
_parse_pr_line = _parse_protocol_line('pr', 'protocols')
def _parse_file(document_file, validate, entry_class, entry_keyword = 'r', start_position = None, end_position = None, section_end_keywords = (), extra_args = ()):
"""
@ -166,17 +173,12 @@ def _parse_a_line(descriptor, entries):
raise ValueError("%s 'a' line must be of the form '[address]:[ports]': a %s" % (descriptor._name(), value))
address, port = value.rsplit(':', 1)
is_ipv6 = address.startswith('[') and address.endswith(']')
if is_ipv6:
address = address[1:-1] # remove brackets
if not ((not is_ipv6 and stem.util.connection.is_valid_ipv4_address(address)) or
(is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))):
if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError("%s 'a' line must start with an IPv6 address: a %s" % (descriptor._name(), value))
if stem.util.connection.is_valid_port(port):
or_addresses.append((address, int(port), is_ipv6))
or_addresses.append((address.lstrip('[').rstrip(']'), int(port), stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True)))
else:
raise ValueError("%s 'a' line had an invalid port (%s): a %s" % (descriptor._name(), port, value))
@ -228,6 +230,11 @@ def _parse_w_line(descriptor, entries):
elif not w_comp[0].startswith('Bandwidth='):
raise ValueError("%s 'w' line needs to start with a 'Bandwidth=' entry: w %s" % (descriptor._name(), value))
bandwidth = None
measured = None
is_unmeasured = False
unrecognized_bandwidth_entries = []
for w_entry in w_comp:
if '=' in w_entry:
w_key, w_value = w_entry.split('=', 1)
@ -238,25 +245,33 @@ def _parse_w_line(descriptor, entries):
if not (w_value and w_value.isdigit()):
raise ValueError("%s 'Bandwidth=' entry needs to have a numeric value: w %s" % (descriptor._name(), value))
descriptor.bandwidth = int(w_value)
bandwidth = int(w_value)
elif w_key == 'Measured':
if not (w_value and w_value.isdigit()):
raise ValueError("%s 'Measured=' entry needs to have a numeric value: w %s" % (descriptor._name(), value))
descriptor.measured = int(w_value)
measured = int(w_value)
elif w_key == 'Unmeasured':
if w_value != '1':
raise ValueError("%s 'Unmeasured=' should only have the value of '1': w %s" % (descriptor._name(), value))
descriptor.is_unmeasured = True
is_unmeasured = True
else:
descriptor.unrecognized_bandwidth_entries.append(w_entry)
unrecognized_bandwidth_entries.append(w_entry)
descriptor.bandwidth = bandwidth
descriptor.measured = measured
descriptor.is_unmeasured = is_unmeasured
descriptor.unrecognized_bandwidth_entries = unrecognized_bandwidth_entries
def _parse_p_line(descriptor, entries):
# "p" ("accept" / "reject") PortList
# p reject 1-65535
# example: p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001
#
# examples:
#
# p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001
# p reject 1-65535
value = _value('p', entries)
@ -266,6 +281,29 @@ def _parse_p_line(descriptor, entries):
raise ValueError('%s exit policy is malformed (%s): p %s' % (descriptor._name(), exc, value))
def _parse_id_line(descriptor, entries):
# "id" "ed25519" ed25519-identity
#
# examples:
#
# id ed25519 none
# id ed25519 8RH34kO07Pp+XYwzdoATVyCibIvmbslUjRkAm7J4IA8
value = _value('id', entries)
if value:
if descriptor.document and not descriptor.document.is_vote:
raise ValueError("%s 'id' line should only appear in votes: id %s" % (descriptor._name(), value))
value_comp = value.split()
if len(value_comp) >= 2:
descriptor.identifier_type = value_comp[0]
descriptor.identifier = value_comp[1]
else:
raise ValueError("'id' lines should contain both the key type and digest: id %s" % value)
def _parse_m_line(descriptor, entries):
# "m" methods 1*(algorithm "=" digest)
# example: m 8,9,10,11,12 sha256=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs
@ -333,7 +371,7 @@ def _base64_to_hex(identity, check_if_fingerprint = True):
except (TypeError, binascii.Error):
raise ValueError("Unable to decode identity string '%s'" % identity)
fingerprint = binascii.b2a_hex(identity_decoded).upper()
fingerprint = binascii.hexlify(identity_decoded).upper()
if stem.prereq.is_python_3():
fingerprint = stem.util.str_tools._to_unicode(fingerprint)
@ -400,7 +438,7 @@ class RouterStatusEntry(Descriptor):
super(RouterStatusEntry, self).__init__(content, lazy_load = not validate)
self.document = document
entries = _get_descriptor_components(content, validate)
entries = _descriptor_components(content, validate)
if validate:
for keyword in self._required_fields():
@ -445,9 +483,15 @@ class RouterStatusEntry(Descriptor):
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
@ -470,6 +514,15 @@ class RouterStatusEntryV2(RouterStatusEntry):
'digest': (None, _parse_r_line),
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('r', '%s p1aag7VwarGxqctS7/fS0y5FU+s oQZFLYe9e4A7bOkWKR7TaNxb0JE %s %s 9001 0' % (_random_nickname(), _random_date(), _random_ipv4_address())),
))
def _name(self, is_plural = False):
return 'Router status entries (v2)' if is_plural else 'Router status entry (v2)'
@ -485,9 +538,15 @@ class RouterStatusEntryV2(RouterStatusEntry):
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
@ -502,18 +561,21 @@ class RouterStatusEntryV3(RouterStatusEntry):
:var list or_addresses: **\*** relay's OR addresses, this is a tuple listing
of the form (address (**str**), port (**int**), is_ipv6 (**bool**))
:var str identifier_type: identity digest key type
:var str identifier: base64 encoded identity digest
:var str digest: **\*** router's upper-case hex digest
:var int bandwidth: bandwidth claimed by the relay (in kb/s)
:var int measured: bandwidth measured to be available by the relay, this is a
:var int bandwidth: bandwidth measured to be available by the relay, this is a
unit-less heuristic generated by the Bandwidth authoritites to weight relay
selection
:var bool is_unmeasured: bandwidth measurement isn't based on three or more
:var int measured: *bandwidth* vote provided by a bandwidth authority
:var bool is_unmeasured: *bandwidth* measurement isn't based on three or more
measurements
:var list unrecognized_bandwidth_entries: **\*** bandwidth weighting
information that isn't yet recognized
:var stem.exit_policy.MicroExitPolicy exit_policy: router's exit policy
:var dict protocols: mapping of protocols to their supported versions
:var list microdescriptor_hashes: **\*** tuples of two values, the list of
consensus methods for generating a set of digests and the 'algorithm =>
@ -521,11 +583,19 @@ class RouterStatusEntryV3(RouterStatusEntry):
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.5.0
Added the identifier and identifier_type attributes.
.. versionchanged:: 1.6.0
Added the protocols attribute.
"""
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
'digest': (None, _parse_r_line),
'or_addresses': ([], _parse_a_line),
'identifier_type': (None, _parse_id_line),
'identifier': (None, _parse_id_line),
'bandwidth': (None, _parse_w_line),
'measured': (None, _parse_w_line),
@ -533,6 +603,7 @@ class RouterStatusEntryV3(RouterStatusEntry):
'unrecognized_bandwidth_entries': ([], _parse_w_line),
'exit_policy': (None, _parse_p_line),
'protocols': ({}, _parse_pr_line),
'microdescriptor_hashes': ([], _parse_m_line),
})
@ -540,9 +611,21 @@ class RouterStatusEntryV3(RouterStatusEntry):
'a': _parse_a_line,
'w': _parse_w_line,
'p': _parse_p_line,
'pr': _parse_pr_line,
'id': _parse_id_line,
'm': _parse_m_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('r', '%s p1aag7VwarGxqctS7/fS0y5FU+s oQZFLYe9e4A7bOkWKR7TaNxb0JE %s %s 9001 0' % (_random_nickname(), _random_date(), _random_ipv4_address())),
('s', 'Fast Named Running Stable Valid'),
))
def _name(self, is_plural = False):
return 'Router status entries (v3)' if is_plural else 'Router status entry (v3)'
@ -550,7 +633,7 @@ class RouterStatusEntryV3(RouterStatusEntry):
return ('r', 's')
def _single_fields(self):
return ('r', 's', 'v', 'w', 'p')
return ('r', 's', 'v', 'w', 'p', 'pr')
def _compare(self, other, method):
if not isinstance(other, RouterStatusEntryV3):
@ -558,9 +641,15 @@ class RouterStatusEntryV3(RouterStatusEntry):
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
@ -573,33 +662,57 @@ class RouterStatusEntryMicroV3(RouterStatusEntry):
Information about an individual router stored within a microdescriptor
flavored network status document.
:var list or_addresses: **\*** relay's OR addresses, this is a tuple listing
of the form (address (**str**), port (**int**), is_ipv6 (**bool**))
:var int bandwidth: bandwidth claimed by the relay (in kb/s)
:var int measured: bandwidth measured to be available by the relay
:var bool is_unmeasured: bandwidth measurement isn't based on three or more
measurements
:var list unrecognized_bandwidth_entries: **\*** bandwidth weighting
information that isn't yet recognized
:var dict protocols: mapping of protocols to their supported versions
:var str digest: **\*** router's hex encoded digest of our corresponding microdescriptor
.. versionchanged:: 1.6.0
Added the protocols attribute.
.. versionchanged:: 1.7.0
Added the or_addresses attribute.
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
"""
ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{
'or_addresses': ([], _parse_a_line),
'bandwidth': (None, _parse_w_line),
'measured': (None, _parse_w_line),
'is_unmeasured': (False, _parse_w_line),
'unrecognized_bandwidth_entries': ([], _parse_w_line),
'protocols': ({}, _parse_pr_line),
'digest': (None, _parse_microdescriptor_m_line),
})
PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{
'a': _parse_a_line,
'w': _parse_w_line,
'm': _parse_microdescriptor_m_line,
'pr': _parse_pr_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('r', '%s ARIJF2zbqirB9IwsW0mQznccWww %s %s 9001 9030' % (_random_nickname(), _random_date(), _random_ipv4_address())),
('m', 'aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70'),
('s', 'Fast Guard HSDir Named Running Stable V2Dir Valid'),
))
def _name(self, is_plural = False):
return 'Router status entries (micro v3)' if is_plural else 'Router status entry (micro v3)'
@ -607,7 +720,7 @@ class RouterStatusEntryMicroV3(RouterStatusEntry):
return ('r', 's', 'm')
def _single_fields(self):
return ('r', 's', 'v', 'w', 'm')
return ('r', 's', 'v', 'w', 'm', 'pr')
def _compare(self, other, method):
if not isinstance(other, RouterStatusEntryMicroV3):
@ -615,9 +728,15 @@ class RouterStatusEntryMicroV3(RouterStatusEntry):
return method(str(self).strip(), str(other).strip())
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -10,8 +10,7 @@ etc). This information is provided from a few sources...
* The 'cached-descriptors' file in Tor's data directory.
* Archived descriptors provided by CollecTor
(https://collector.torproject.org/).
* Archived descriptors provided by `CollecTor <https://metrics.torproject.org/collector.html>`_.
* Directory authorities and mirrors via their DirPort.
@ -21,6 +20,7 @@ etc). This information is provided from a few sources...
ServerDescriptor - Tor server descriptor.
|- RelayDescriptor - Server descriptor for a relay.
| +- make_router_status_entry - Creates a router status entry for this descriptor.
|
|- BridgeDescriptor - Scrubbed server descriptor for a bridge.
| |- is_scrubbed - checks if our content has been properly scrubbed
@ -29,41 +29,69 @@ etc). This information is provided from a few sources...
|- digest - calculates the upper-case hex digest value for our content
|- get_annotations - dictionary of content prior to the descriptor entry
+- get_annotation_lines - lines that provided the annotations
.. data:: BridgeDistribution (enum)
Preferred method of distributing this relay if a bridge.
.. versionadded:: 1.6.0
===================== ===========
BridgeDistribution Description
===================== ===========
**ANY** No proference, BridgeDB will pick how the bridge is distributed.
**HTTPS** Provided via the `web interface <https://bridges.torproject.org>`_.
**EMAIL** Provided in response to emails to bridges@torproject.org.
**MOAT** Provided in interactive menus within Tor Browser.
**HYPHAE** Provided via a cryptographic invitation-based system.
===================== ===========
"""
import base64
import binascii
import functools
import hashlib
import re
import stem.descriptor.certificate
import stem.descriptor.extrainfo_descriptor
import stem.exit_policy
import stem.prereq
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
import stem.util.tor_tools
import stem.version
from stem import str_type
from stem.descriptor.router_status_entry import RouterStatusEntryV3
from stem.descriptor import (
PGP_BLOCK_END,
Descriptor,
_get_descriptor_components,
create_signing_key,
_descriptor_content,
_descriptor_components,
_read_until_keywords,
_bytes_for_block,
_value,
_values,
_parse_simple_line,
_parse_if_present,
_parse_bytes_line,
_parse_timestamp_line,
_parse_forty_character_hex,
_parse_protocol_line,
_parse_key_block,
_append_router_signature,
_random_nickname,
_random_ipv4_address,
_random_date,
_random_crypto_blob,
)
try:
# added in python 3.2
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
except ImportError:
else:
from stem.util.lru_cache import lru_cache
# relay descriptors must have exactly one of the following
@ -78,6 +106,8 @@ REQUIRED_FIELDS = (
# optional entries that can appear at most once
SINGLE_FIELDS = (
'identity-ed25519',
'master-key-ed25519',
'platform',
'fingerprint',
'hibernating',
@ -86,17 +116,36 @@ SINGLE_FIELDS = (
'read-history',
'write-history',
'eventdns',
'bridge-distribution-request',
'family',
'caches-extra-info',
'extra-info-digest',
'hidden-service-dir',
'protocols',
'allow-single-hop-exits',
'tunnelled-dir-server',
'proto',
'onion-key-crosscert',
'ntor-onion-key',
'ntor-onion-key-crosscert',
'router-sig-ed25519',
)
BridgeDistribution = stem.util.enum.Enum(
('ANY', 'any'),
('HTTPS', 'https'),
('EMAIL', 'email'),
('MOAT', 'moat'),
('HYPHAE', 'hyphae'),
)
DEFAULT_IPV6_EXIT_POLICY = stem.exit_policy.MicroExitPolicy('reject 1-65535')
REJECT_ALL_POLICY = stem.exit_policy.ExitPolicy('reject *:*')
DEFAULT_BRIDGE_DISTRIBUTION = 'any'
def _truncated_b64encode(content):
return stem.util.str_tools._to_unicode(base64.b64encode(content).rstrip(b'='))
def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs):
@ -265,6 +314,17 @@ def _parse_fingerprint_line(descriptor, entries):
descriptor.fingerprint = fingerprint
def _parse_extrainfo_digest_line(descriptor, entries):
value = _value('extra-info-digest', entries)
digest_comp = value.split(' ')
if not stem.util.tor_tools.is_hex_digits(digest_comp[0], 40):
raise ValueError('extra-info-digest should be 40 hex characters: %s' % digest_comp[0])
descriptor.extra_info_digest = digest_comp[0]
descriptor.extra_info_sha256_digest = digest_comp[1] if len(digest_comp) >= 2 else None
def _parse_hibernating_line(descriptor, entries):
# "hibernating" 0|1 (in practice only set if one)
@ -276,15 +336,6 @@ def _parse_hibernating_line(descriptor, entries):
descriptor.hibernating = value == '1'
def _parse_hidden_service_dir_line(descriptor, entries):
value = _value('hidden-service-dir', entries)
if value:
descriptor.hidden_service_dir = value.split(' ')
else:
descriptor.hidden_service_dir = ['2']
def _parse_uptime_line(descriptor, entries):
# We need to be tolerant of negative uptimes to accommodate a past tor
# bug...
@ -328,19 +379,14 @@ def _parse_or_address_line(descriptor, entries):
raise ValueError('or-address line missing a colon: %s' % line)
address, port = entry.rsplit(':', 1)
is_ipv6 = address.startswith('[') and address.endswith(']')
if is_ipv6:
address = address[1:-1] # remove brackets
if not ((not is_ipv6 and stem.util.connection.is_valid_ipv4_address(address)) or
(is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))):
if not stem.util.connection.is_valid_ipv4_address(address) and not stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True):
raise ValueError('or-address line has a malformed address: %s' % line)
if not stem.util.connection.is_valid_port(port):
raise ValueError('or-address line has a malformed port: %s' % line)
or_addresses.append((address, int(port), is_ipv6))
or_addresses.append((address.lstrip('[').rstrip(']'), int(port), stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True)))
descriptor.or_addresses = or_addresses
@ -364,7 +410,7 @@ def _parse_history_line(keyword, history_end_attribute, history_interval_attribu
def _parse_exit_policy(descriptor, entries):
if hasattr(descriptor, '_unparsed_exit_policy'):
if descriptor._unparsed_exit_policy == [str_type('reject *:*')]:
if descriptor._unparsed_exit_policy and stem.util.str_tools._to_unicode(descriptor._unparsed_exit_policy[0]) == 'reject *:*':
descriptor.exit_policy = REJECT_ALL_POLICY
else:
descriptor.exit_policy = stem.exit_policy.ExitPolicy(*descriptor._unparsed_exit_policy)
@ -372,20 +418,39 @@ def _parse_exit_policy(descriptor, entries):
del descriptor._unparsed_exit_policy
def _parse_identity_ed25519_line(descriptor, entries):
_parse_key_block('identity-ed25519', 'ed25519_certificate', 'ED25519 CERT')(descriptor, entries)
if descriptor.ed25519_certificate:
cert_lines = descriptor.ed25519_certificate.split('\n')
if cert_lines[0] == '-----BEGIN ED25519 CERT-----' and cert_lines[-1] == '-----END ED25519 CERT-----':
descriptor.certificate = stem.descriptor.certificate.Ed25519Certificate.parse(''.join(cert_lines[1:-1]))
_parse_master_key_ed25519_line = _parse_simple_line('master-key-ed25519', 'ed25519_master_key')
_parse_master_key_ed25519_for_hash_line = _parse_simple_line('master-key-ed25519', 'ed25519_certificate_hash')
_parse_contact_line = _parse_bytes_line('contact', 'contact')
_parse_published_line = _parse_timestamp_line('published', 'published')
_parse_extrainfo_digest_line = _parse_forty_character_hex('extra-info-digest', 'extra_info_digest')
_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values')
_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values')
_parse_ipv6_policy_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('ipv6-policy', entries)))
_parse_allow_single_hop_exits_line = lambda descriptor, entries: setattr(descriptor, 'allow_single_hop_exits', 'allow_single_hop_exits' in entries)
_parse_caches_extra_info_line = lambda descriptor, entries: setattr(descriptor, 'extra_info_cache', 'extra_info_cache' in entries)
_parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', set(_value('family', entries).split(' ')))
_parse_eventdns_line = lambda descriptor, entries: setattr(descriptor, 'eventdns', _value('eventdns', entries) == '1')
_parse_ipv6_policy_line = _parse_simple_line('ipv6-policy', 'exit_policy_v6', func = lambda v: stem.exit_policy.MicroExitPolicy(v))
_parse_allow_single_hop_exits_line = _parse_if_present('allow-single-hop-exits', 'allow_single_hop_exits')
_parse_tunneled_dir_server_line = _parse_if_present('tunnelled-dir-server', 'allow_tunneled_dir_requests')
_parse_proto_line = _parse_protocol_line('proto', 'protocols')
_parse_hidden_service_dir_line = _parse_if_present('hidden-service-dir', 'is_hidden_service_dir')
_parse_caches_extra_info_line = _parse_if_present('caches-extra-info', 'extra_info_cache')
_parse_bridge_distribution_request_line = _parse_simple_line('bridge-distribution-request', 'bridge_distribution')
_parse_family_line = _parse_simple_line('family', 'family', func = lambda v: set(v.split(' ')))
_parse_eventdns_line = _parse_simple_line('eventdns', 'eventdns', func = lambda v: v == '1')
_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY')
_parse_onion_key_crosscert_line = _parse_key_block('onion-key-crosscert', 'onion_key_crosscert', 'CROSSCERT')
_parse_signing_key_line = _parse_key_block('signing-key', 'signing_key', 'RSA PUBLIC KEY')
_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE')
_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key')
_parse_ntor_onion_key_crosscert_line = _parse_key_block('ntor-onion-key-crosscert', 'ntor_onion_key_crosscert', 'ED25519 CERT', 'ntor_onion_key_crosscert_sign')
_parse_router_sig_ed25519_line = _parse_simple_line('router-sig-ed25519', 'ed25519_signature')
_parse_router_digest_sha256_line = _parse_simple_line('router-digest-sha256', 'router_digest_sha256')
_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest')
@ -399,7 +464,7 @@ class ServerDescriptor(Descriptor):
:var str address: **\*** IPv4 address of the relay
:var int or_port: **\*** port used for relaying
:var int socks_port: **\*** port used as client (deprecated, always **None**)
:var int socks_port: **\*** port used as client (**deprecated**, always **None**)
:var int dir_port: **\*** port used for descriptor mirroring
:var bytes platform: line with operating system and tor version
@ -409,6 +474,8 @@ class ServerDescriptor(Descriptor):
:var bytes contact: contact information
:var stem.exit_policy.ExitPolicy exit_policy: **\*** stated exit policy
:var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6
:var BridgeDistribution bridge_distribution: **\*** preferred method of providing this relay's
address if a bridge
:var set family: **\*** nicknames or fingerprints of declared family
:var int average_bandwidth: **\*** average rate it's willing to relay in bytes/s
@ -417,16 +484,23 @@ class ServerDescriptor(Descriptor):
:var list link_protocols: link protocols supported by the relay
:var list circuit_protocols: circuit protocols supported by the relay
:var bool is_hidden_service_dir: **\*** indicates if the relay serves hidden
service descriptors
:var bool hibernating: **\*** hibernating when published
:var bool allow_single_hop_exits: **\*** flag if single hop exiting is allowed
:var bool allow_tunneled_dir_requests: **\*** flag if tunneled directory
requests are accepted
:var bool extra_info_cache: **\*** flag if a mirror for extra-info documents
:var str extra_info_digest: upper-case hex encoded digest of our extra-info document
:var bool eventdns: flag for evdns backend (deprecated, always unset)
:var str extra_info_sha256_digest: base64 encoded sha256 digest of our extra-info document
:var bool eventdns: flag for evdns backend (**deprecated**, always unset)
:var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
:var list or_addresses: **\*** alternative for our address/or_port
attributes, each entry is a tuple of the form (address (**str**), port
(**int**), is_ipv6 (**bool**))
:var dict protocols: mapping of protocols to their supported versions
Deprecated, moved to extra-info descriptor...
**Deprecated**, moved to extra-info descriptor...
:var datetime read_history_end: end of the sampling interval
:var int read_history_interval: seconds per interval
@ -438,6 +512,20 @@ class ServerDescriptor(Descriptor):
**\*** attribute is either required when we're parsed with validation or has
a default value, others are left as **None** if undefined
.. versionchanged:: 1.5.0
Added the allow_tunneled_dir_requests attribute.
.. versionchanged:: 1.6.0
Added the extra_info_sha256_digest, protocols, and bridge_distribution
attributes.
.. versionchanged:: 1.7.0
Added the is_hidden_service_dir attribute.
.. versionchanged:: 1.7.0
Deprecated the hidden_service_dir field, it's never been populated
(:spec:`43c2f78`). This field will be removed in Stem 2.0.
"""
ATTRIBUTES = {
@ -457,6 +545,7 @@ class ServerDescriptor(Descriptor):
'operating_system': (None, _parse_platform_line),
'uptime': (None, _parse_uptime_line),
'exit_policy_v6': (DEFAULT_IPV6_EXIT_POLICY, _parse_ipv6_policy_line),
'bridge_distribution': (DEFAULT_BRIDGE_DISTRIBUTION, _parse_bridge_distribution_request_line),
'family': (set(), _parse_family_line),
'average_bandwidth': (None, _parse_bandwidth_line),
@ -465,12 +554,16 @@ class ServerDescriptor(Descriptor):
'link_protocols': (None, _parse_protocols_line),
'circuit_protocols': (None, _parse_protocols_line),
'is_hidden_service_dir': (False, _parse_hidden_service_dir_line),
'hibernating': (False, _parse_hibernating_line),
'allow_single_hop_exits': (False, _parse_allow_single_hop_exits_line),
'allow_tunneled_dir_requests': (False, _parse_tunneled_dir_server_line),
'protocols': ({}, _parse_proto_line),
'extra_info_cache': (False, _parse_caches_extra_info_line),
'extra_info_digest': (None, _parse_extrainfo_digest_line),
'hidden_service_dir': (None, _parse_hidden_service_dir_line),
'extra_info_sha256_digest': (None, _parse_extrainfo_digest_line),
'eventdns': (None, _parse_eventdns_line),
'ntor_onion_key': (None, _parse_ntor_onion_key_line),
'or_addresses': ([], _parse_or_address_line),
'read_history_end': (None, _parse_read_history_line),
@ -494,12 +587,16 @@ class ServerDescriptor(Descriptor):
'hidden-service-dir': _parse_hidden_service_dir_line,
'uptime': _parse_uptime_line,
'protocols': _parse_protocols_line,
'ntor-onion-key': _parse_ntor_onion_key_line,
'or-address': _parse_or_address_line,
'read-history': _parse_read_history_line,
'write-history': _parse_write_history_line,
'ipv6-policy': _parse_ipv6_policy_line,
'allow-single-hop-exits': _parse_allow_single_hop_exits_line,
'tunnelled-dir-server': _parse_tunneled_dir_server_line,
'proto': _parse_proto_line,
'caches-extra-info': _parse_caches_extra_info_line,
'bridge-distribution-request': _parse_bridge_distribution_request_line,
'family': _parse_family_line,
'eventdns': _parse_eventdns_line,
}
@ -533,7 +630,13 @@ class ServerDescriptor(Descriptor):
# influences the resulting exit policy, but for everything else the order
# does not matter so breaking it into key / value pairs.
entries, self._unparsed_exit_policy = _get_descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, ('accept', 'reject'))
entries, self._unparsed_exit_policy = _descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, extra_keywords = ('accept', 'reject'), non_ascii_fields = ('contact', 'platform'))
# TODO: Remove the following field in Stem 2.0. It has never been populated...
#
# https://gitweb.torproject.org/torspec.git/commit/?id=43c2f78
self.hidden_service_dir = ['2']
if validate:
self._parse(entries, validate)
@ -624,6 +727,12 @@ class ServerDescriptor(Descriptor):
if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]:
raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword)
if 'identity-ed25519' in entries.keys():
if 'router-sig-ed25519' not in entries.keys():
raise ValueError('Descriptor must have router-sig-ed25519 entry to accompany identity-ed25519')
elif 'router-sig-ed25519' not in list(entries.keys())[-2:]:
raise ValueError("Descriptor must have 'router-sig-ed25519' as the next-to-last entry")
if not self.exit_policy:
raise ValueError("Descriptor must have at least one 'accept' or 'reject' entry")
@ -648,29 +757,68 @@ class RelayDescriptor(ServerDescriptor):
Server descriptor (`descriptor specification
<https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_)
:var stem.certificate.Ed25519Certificate certificate: ed25519 certificate
:var str ed25519_certificate: base64 encoded ed25519 certificate
:var str ed25519_master_key: base64 encoded master key for our ed25519 certificate
:var str ed25519_signature: signature of this document using ed25519
:var str onion_key: **\*** key used to encrypt EXTEND cells
:var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol
:var str onion_key_crosscert: signature generated using the onion_key
:var str ntor_onion_key_crosscert: signature generated using the ntor-onion-key
:var str ntor_onion_key_crosscert_sign: sign of the corresponding ed25519 public key
:var str signing_key: **\*** relay's long-term identity key
:var str signature: **\*** signature for this descriptor
**\*** attribute is required when we're parsed with validation
.. versionchanged:: 1.5.0
Added the ed25519_certificate, ed25519_master_key, ed25519_signature,
onion_key_crosscert, ntor_onion_key_crosscert, and
ntor_onion_key_crosscert_sign attributes.
.. versionchanged:: 1.6.0
Moved from the deprecated `pycrypto
<https://www.dlitz.net/software/pycrypto/>`_ module to `cryptography
<https://pypi.python.org/pypi/cryptography>`_ for validating signatures.
.. versionchanged:: 1.6.0
Added the certificate attribute.
.. deprecated:: 1.6.0
Our **ed25519_certificate** is deprecated in favor of our new
**certificate** attribute. The base64 encoded certificate is available via
the certificate's **encoded** attribute.
.. versionchanged:: 1.6.0
Added the **skip_crypto_validation** constructor argument.
"""
ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{
'certificate': (None, _parse_identity_ed25519_line),
'ed25519_certificate': (None, _parse_identity_ed25519_line),
'ed25519_master_key': (None, _parse_master_key_ed25519_line),
'ed25519_signature': (None, _parse_router_sig_ed25519_line),
'onion_key': (None, _parse_onion_key_line),
'ntor_onion_key': (None, _parse_ntor_onion_key_line),
'onion_key_crosscert': (None, _parse_onion_key_crosscert_line),
'ntor_onion_key_crosscert': (None, _parse_ntor_onion_key_crosscert_line),
'ntor_onion_key_crosscert_sign': (None, _parse_ntor_onion_key_crosscert_line),
'signing_key': (None, _parse_signing_key_line),
'signature': (None, _parse_router_signature_line),
})
PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{
'identity-ed25519': _parse_identity_ed25519_line,
'master-key-ed25519': _parse_master_key_ed25519_line,
'router-sig-ed25519': _parse_router_sig_ed25519_line,
'onion-key': _parse_onion_key_line,
'ntor-onion-key': _parse_ntor_onion_key_line,
'onion-key-crosscert': _parse_onion_key_crosscert_line,
'ntor-onion-key-crosscert': _parse_ntor_onion_key_crosscert_line,
'signing-key': _parse_signing_key_line,
'router-signature': _parse_router_signature_line,
})
def __init__(self, raw_contents, validate = False, annotations = None):
def __init__(self, raw_contents, validate = False, annotations = None, skip_crypto_validation = False):
super(RelayDescriptor, self).__init__(raw_contents, validate, annotations)
if validate:
@ -680,12 +828,65 @@ class RelayDescriptor(ServerDescriptor):
if key_hash != self.fingerprint.lower():
raise ValueError('Fingerprint does not match the hash of our signing key (fingerprint: %s, signing key hash: %s)' % (self.fingerprint.lower(), key_hash))
if stem.prereq.is_crypto_available():
if not skip_crypto_validation and stem.prereq.is_crypto_available():
signed_digest = self._digest_for_signature(self.signing_key, self.signature)
if signed_digest != self.digest():
raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, self.digest()))
if self.onion_key_crosscert and stem.prereq.is_crypto_available():
onion_key_crosscert_digest = self._digest_for_signature(self.onion_key, self.onion_key_crosscert)
if onion_key_crosscert_digest != self._onion_key_crosscert_digest():
raise ValueError('Decrypted onion-key-crosscert digest does not match local digest (calculated: %s, local: %s)' % (onion_key_crosscert_digest, self._onion_key_crosscert_digest()))
if stem.prereq._is_pynacl_available() and self.certificate:
self.certificate.validate(self)
@classmethod
def content(cls, attr = None, exclude = (), sign = False, signing_key = None):
if signing_key:
sign = True
if attr is None:
attr = {}
base_header = (
('router', '%s %s 9001 0 0' % (_random_nickname(), _random_ipv4_address())),
('published', _random_date()),
('bandwidth', '153600 256000 104590'),
('reject', '*:*'),
('onion-key', _random_crypto_blob('RSA PUBLIC KEY')),
('signing-key', _random_crypto_blob('RSA PUBLIC KEY')),
)
if sign:
if attr and 'signing-key' in attr:
raise ValueError('Cannot sign the descriptor if a signing-key has been provided')
elif attr and 'router-signature' in attr:
raise ValueError('Cannot sign the descriptor if a router-signature has been provided')
if signing_key is None:
signing_key = create_signing_key()
if 'fingerprint' not in attr:
fingerprint = hashlib.sha1(_bytes_for_block(stem.util.str_tools._to_unicode(signing_key.public_digest.strip()))).hexdigest().upper()
attr['fingerprint'] = ' '.join(stem.util.str_tools._split_by_length(fingerprint, 4))
attr['signing-key'] = signing_key.public_digest
content = _descriptor_content(attr, exclude, base_header) + b'\nrouter-signature\n'
return _append_router_signature(content, signing_key.private)
else:
return _descriptor_content(attr, exclude, base_header, (
('router-sig-ed25519', None),
('router-signature', _random_crypto_blob('SIGNATURE')),
))
@classmethod
def create(cls, attr = None, exclude = (), validate = True, sign = False, signing_key = None):
return cls(cls.content(attr, exclude, sign, signing_key), validate = validate, skip_crypto_validation = not sign)
@lru_cache()
def digest(self):
"""
@ -693,23 +894,88 @@ class RelayDescriptor(ServerDescriptor):
:returns: the digest string encoded in uppercase hex
:raises: ValueError if the digest canot be calculated
:raises: ValueError if the digest cannot be calculated
"""
return self._digest_for_content(b'router ', b'\nrouter-signature\n')
def make_router_status_entry(self):
"""
Provides a RouterStatusEntryV3 for this descriptor content.
.. versionadded:: 1.6.0
:returns: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`
that would be in the consensus
"""
if not self.fingerprint:
raise ValueError('Server descriptor lacks a fingerprint. This is an optional field, but required to make a router status entry.')
attr = {
'r': ' '.join([
self.nickname,
_truncated_b64encode(binascii.unhexlify(stem.util.str_tools._to_bytes(self.fingerprint))),
_truncated_b64encode(binascii.unhexlify(stem.util.str_tools._to_bytes(self.digest()))),
self.published.strftime('%Y-%m-%d %H:%M:%S'),
self.address,
str(self.or_port),
str(self.dir_port) if self.dir_port else '0',
]),
'w': 'Bandwidth=%i' % self.average_bandwidth,
'p': self.exit_policy.summary().replace(', ', ','),
}
if self.tor_version:
attr['v'] = 'Tor %s' % self.tor_version
if self.or_addresses:
attr['a'] = ['%s:%s' % (addr, port) for addr, port, _ in self.or_addresses]
if self.certificate:
attr['id'] = 'ed25519 %s' % _truncated_b64encode(self.certificate.key)
return RouterStatusEntryV3.create(attr)
@lru_cache()
def _onion_key_crosscert_digest(self):
"""
Provides the digest of the onion-key-crosscert data. This consists of the
RSA identity key sha1 and ed25519 identity key.
:returns: **unicode** digest encoded in uppercase hex
:raises: ValueError if the digest cannot be calculated
"""
signing_key_digest = hashlib.sha1(_bytes_for_block(self.signing_key)).digest()
data = signing_key_digest + base64.b64decode(stem.util.str_tools._to_bytes(self.ed25519_master_key) + b'=')
return stem.util.str_tools._to_unicode(binascii.hexlify(data).upper())
def _compare(self, other, method):
if not isinstance(other, RelayDescriptor):
return False
return method(str(self).strip(), str(other).strip())
def _check_constraints(self, entries):
super(RelayDescriptor, self)._check_constraints(entries)
if self.ed25519_certificate:
if not self.onion_key_crosscert:
raise ValueError("Descriptor must have a 'onion-key-crosscert' when identity-ed25519 is present")
elif not self.ed25519_signature:
raise ValueError("Descriptor must have a 'router-sig-ed25519' when identity-ed25519 is present")
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
@ -720,17 +986,42 @@ class RelayDescriptor(ServerDescriptor):
class BridgeDescriptor(ServerDescriptor):
"""
Bridge descriptor (`bridge descriptor specification
<https://collector.torproject.org/formats.html#bridge-descriptors>`_)
<https://metrics.torproject.org/collector.html#bridge-descriptors>`_)
:var str ed25519_certificate_hash: sha256 hash of the original identity-ed25519
:var str router_digest_sha256: sha256 digest of this document
.. versionchanged:: 1.5.0
Added the ed25519_certificate_hash and router_digest_sha256 attributes.
Also added ntor_onion_key (previously this only belonged to unsanitized
descriptors).
"""
ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{
'ed25519_certificate_hash': (None, _parse_master_key_ed25519_for_hash_line),
'router_digest_sha256': (None, _parse_router_digest_sha256_line),
'_digest': (None, _parse_router_digest_line),
})
PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{
'master-key-ed25519': _parse_master_key_ed25519_for_hash_line,
'router-digest-sha256': _parse_router_digest_sha256_line,
'router-digest': _parse_router_digest_line,
})
@classmethod
def content(cls, attr = None, exclude = (), sign = False):
if sign:
raise NotImplementedError('Signing of %s not implemented' % cls.__name__)
return _descriptor_content(attr, exclude, (
('router', '%s %s 9001 0 0' % (_random_nickname(), _random_ipv4_address())),
('router-digest', '006FD96BA35E7785A6A3B8B75FE2E2435A13BDB4'),
('published', _random_date()),
('bandwidth', '409600 819200 5120'),
('reject', '*:*'),
))
def digest(self):
return self._digest
@ -738,7 +1029,7 @@ class BridgeDescriptor(ServerDescriptor):
"""
Checks if we've been properly scrubbed in accordance with the `bridge
descriptor specification
<https://collector.torproject.org/formats.html#bridge-descriptors>`_.
<https://metrics.torproject.org/collector.html#bridge-descriptors>`_.
Validation is a moving target so this may not be fully up to date.
:returns: **True** if we're scrubbed, **False** otherwise
@ -815,6 +1106,9 @@ class BridgeDescriptor(ServerDescriptor):
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)

View file

@ -1,4 +1,4 @@
# Copyright 2013-2015, Damian Johnson and The Tor Project
# Copyright 2013-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -17,7 +17,7 @@ import stem.util.tor_tools
from stem.descriptor import (
Descriptor,
_read_until_keywords,
_get_descriptor_components,
_descriptor_components,
)
@ -63,7 +63,7 @@ class TorDNSEL(Descriptor):
def __init__(self, raw_contents, validate):
super(TorDNSEL, self).__init__(raw_contents)
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
entries = _get_descriptor_components(raw_contents, validate)
entries = _descriptor_components(raw_contents, validate)
self.fingerprint = None
self.published = None

View file

@ -0,0 +1,659 @@
# Copyright 2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Directories that provide `relay descriptor information
<../tutorials/mirror_mirror_on_the_wall.html>`_. At a very high level tor works
as follows...
1. Volunteer starts a new tor relay, during which it sends a `server
descriptor <descriptor/server_descriptor.html>`_ to each of the directory
authorities.
2. Each hour the directory authorities make a `vote
<descriptor/networkstatus.html>`_ that says who they think the active
relays are in the network and some attributes about them.
3. The directory authorities send each other their votes, and compile that
into the `consensus <descriptor/networkstatus.html>`_. This document is very
similar to the votes, the only difference being that the majority of the
authorities agree upon and sign this document. The idividual relay entries
in the vote or consensus is called `router status entries
<descriptor/router_status_entry.html>`_.
4. Tor clients (people using the service) download the consensus from an
authority, fallback, or other mirror to determine who the active relays in
the network are. They then use this to construct circuits and use the
network.
::
Directory - Relay we can retrieve descriptor information from
| |- from_cache - Provides cached information bundled with Stem.
| +- from_remote - Downloads the latest directory information from tor.
|
|- Authority - Tor directory authority
+- Fallback - Mirrors that can be used instead of the authorities
.. versionadded:: 1.7.0
"""
import os
import re
import stem.util
import stem.util.conf
from stem.util import connection, str_tools, tor_tools
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
GITWEB_AUTHORITY_URL = 'https://gitweb.torproject.org/tor.git/plain/src/app/config/auth_dirs.inc'
GITWEB_FALLBACK_URL = 'https://gitweb.torproject.org/tor.git/plain/src/app/config/fallback_dirs.inc'
FALLBACK_CACHE_PATH = os.path.join(os.path.dirname(__file__), 'cached_fallbacks.cfg')
AUTHORITY_NAME = re.compile('"(\S+) orport=(\d+) .*"')
AUTHORITY_V3IDENT = re.compile('"v3ident=([\dA-F]{40}) "')
AUTHORITY_IPV6 = re.compile('"ipv6=\[([\da-f:]+)\]:(\d+) "')
AUTHORITY_ADDR = re.compile('"([\d\.]+):(\d+) ([\dA-F ]{49})",')
FALLBACK_DIV = '/* ===== */'
FALLBACK_MAPPING = re.compile('/\*\s+(\S+)=(\S*)\s+\*/')
FALLBACK_ADDR = re.compile('"([\d\.]+):(\d+) orport=(\d+) id=([\dA-F]{40}).*')
FALLBACK_NICKNAME = re.compile('/\* nickname=(\S+) \*/')
FALLBACK_EXTRAINFO = re.compile('/\* extrainfo=([0-1]) \*/')
FALLBACK_IPV6 = re.compile('" ipv6=\[([\da-f:]+)\]:(\d+)"')
def _match_with(lines, regexes, required = None):
"""
Scans the given content against a series of regex matchers, providing back a
mapping of regexes to their capture groups. This maping is with the value if
the regex has just a single capture group, and a tuple otherwise.
:param list lines: text to parse
:param list regexes: regexes to match against
:param list required: matches that must be in the content
:returns: **dict** mapping matchers against their capture groups
:raises: **ValueError** if a required match is not present
"""
matches = {}
for line in lines:
for matcher in regexes:
m = matcher.search(str_tools._to_unicode(line))
if m:
match_groups = m.groups()
matches[matcher] = match_groups if len(match_groups) > 1 else match_groups[0]
if required:
for required_matcher in required:
if required_matcher not in matches:
raise ValueError('Failed to parse mandatory data from:\n\n%s' % '\n'.join(lines))
return matches
def _directory_entries(lines, pop_section_func, regexes, required = None):
next_section = pop_section_func(lines)
while next_section:
yield _match_with(next_section, regexes, required)
next_section = pop_section_func(lines)
class Directory(object):
"""
Relay we can contact for descriptor information.
Our :func:`~stem.directory.Directory.from_cache` and
:func:`~stem.directory.Directory.from_remote` functions key off a
different identifier based on our subclass...
* :class:`~stem.directory.Authority` keys off the nickname.
* :class:`~stem.directory.Fallback` keys off fingerprints.
This is because authorities are highly static and canonically known by their
names, whereas fallbacks vary more and don't necessarily have a nickname to
key off of.
:var str address: IPv4 address of the directory
:var int or_port: port on which the relay services relay traffic
:var int dir_port: port on which directory information is available
:var str fingerprint: relay fingerprint
:var str nickname: relay nickname
:var str orport_v6: **(address, port)** tuple for the directory's IPv6
ORPort, or **None** if it doesn't have one
"""
def __init__(self, address, or_port, dir_port, fingerprint, nickname, orport_v6):
identifier = '%s (%s)' % (fingerprint, nickname) if nickname else fingerprint
if not connection.is_valid_ipv4_address(address):
raise ValueError('%s has an invalid IPv4 address: %s' % (identifier, address))
elif not connection.is_valid_port(or_port):
raise ValueError('%s has an invalid ORPort: %s' % (identifier, or_port))
elif not connection.is_valid_port(dir_port):
raise ValueError('%s has an invalid DirPort: %s' % (identifier, dir_port))
elif not tor_tools.is_valid_fingerprint(fingerprint):
raise ValueError('%s has an invalid fingerprint: %s' % (identifier, fingerprint))
elif nickname and not tor_tools.is_valid_nickname(nickname):
raise ValueError('%s has an invalid nickname: %s' % (fingerprint, nickname))
if orport_v6:
if not isinstance(orport_v6, tuple) or len(orport_v6) != 2:
raise ValueError('%s orport_v6 should be a two value tuple: %s' % (identifier, str(orport_v6)))
elif not connection.is_valid_ipv6_address(orport_v6[0]):
raise ValueError('%s has an invalid IPv6 address: %s' % (identifier, orport_v6[0]))
elif not connection.is_valid_port(orport_v6[1]):
raise ValueError('%s has an invalid IPv6 port: %s' % (identifier, orport_v6[1]))
self.address = address
self.or_port = int(or_port)
self.dir_port = int(dir_port)
self.fingerprint = fingerprint
self.nickname = nickname
self.orport_v6 = (orport_v6[0], int(orport_v6[1])) if orport_v6 else None
@staticmethod
def from_cache():
"""
Provides cached Tor directory information. This information is hardcoded
into Tor and occasionally changes, so the information provided by this
method may not necessarily match the latest version of tor.
.. versionadded:: 1.5.0
.. versionchanged:: 1.7.0
Support added to the :class:`~stem.directory.Authority` class.
:returns: **dict** of **str** identifiers to
:class:`~stem.directory.Directory` instances
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the Directory subclass')
@staticmethod
def from_remote(timeout = 60):
"""
Reads and parses tor's directory data `from gitweb.torproject.org <https://gitweb.torproject.org/>`_.
Note that while convenient, this reliance on GitWeb means you should alway
call with a fallback, such as...
::
try:
authorities = stem.directory.Authority.from_remote()
except IOError:
authorities = stem.directory.Authority.from_cache()
.. versionadded:: 1.5.0
.. versionchanged:: 1.7.0
Support added to the :class:`~stem.directory.Authority` class.
:param int timeout: seconds to wait before timing out the request
:returns: **dict** of **str** identifiers to their
:class:`~stem.directory.Directory`
:raises: **IOError** if unable to retrieve the fallback directories
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the Directory subclass')
def __hash__(self):
return stem.util._hash_attr(self, 'address', 'or_port', 'dir_port', 'fingerprint', 'nickname', 'orport_v6')
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Directory) else False
def __ne__(self, other):
return not self == other
class Authority(Directory):
"""
Tor directory authority, a special type of relay `hardcoded into tor
<https://gitweb.torproject.org/tor.git/plain/src/or/auth_dirs.inc>`_
to enumerate the relays in the network.
.. versionchanged:: 1.3.0
Added the is_bandwidth_authority attribute.
.. versionchanged:: 1.7.0
Added the orport_v6 attribute.
.. deprecated:: 1.7.0
The is_bandwidth_authority attribute is deprecated and will be removed in
the future.
:var str v3ident: identity key fingerprint used to sign votes and consensus
"""
def __init__(self, address = None, or_port = None, dir_port = None, fingerprint = None, nickname = None, orport_v6 = None, v3ident = None, is_bandwidth_authority = False):
super(Authority, self).__init__(address, or_port, dir_port, fingerprint, nickname, orport_v6)
if v3ident and not tor_tools.is_valid_fingerprint(v3ident):
identifier = '%s (%s)' % (fingerprint, nickname) if nickname else fingerprint
raise ValueError('%s has an invalid v3ident: %s' % (identifier, v3ident))
self.v3ident = v3ident
self.is_bandwidth_authority = is_bandwidth_authority
@staticmethod
def from_cache():
return dict(DIRECTORY_AUTHORITIES)
@staticmethod
def from_remote(timeout = 60):
try:
lines = str_tools._to_unicode(urllib.urlopen(GITWEB_AUTHORITY_URL, timeout = timeout).read()).splitlines()
except Exception as exc:
raise IOError("Unable to download tor's directory authorities from %s: %s" % (GITWEB_AUTHORITY_URL, exc))
if not lines:
raise IOError('%s did not have any content' % GITWEB_AUTHORITY_URL)
# Entries look like...
#
# "moria1 orport=9101 "
# "v3ident=D586D18309DED4CD6D57C18FDB97EFA96D330566 "
# "128.31.0.39:9131 9695 DFC3 5FFE B861 329B 9F1A B04C 4639 7020 CE31",
try:
results = {}
for matches in _directory_entries(lines, Authority._pop_section, (AUTHORITY_NAME, AUTHORITY_V3IDENT, AUTHORITY_IPV6, AUTHORITY_ADDR), required = (AUTHORITY_NAME, AUTHORITY_ADDR)):
nickname, or_port = matches.get(AUTHORITY_NAME)
address, dir_port, fingerprint = matches.get(AUTHORITY_ADDR)
results[nickname] = Authority(
address = address,
or_port = or_port,
dir_port = dir_port,
fingerprint = fingerprint.replace(' ', ''),
nickname = nickname,
orport_v6 = matches.get(AUTHORITY_IPV6),
v3ident = matches.get(AUTHORITY_V3IDENT),
)
except ValueError as exc:
raise IOError(str(exc))
return results
@staticmethod
def _pop_section(lines):
"""
Provides the next authority entry.
"""
section_lines = []
if lines:
section_lines.append(lines.pop(0))
while lines and lines[0].startswith(' '):
section_lines.append(lines.pop(0))
return section_lines
def __hash__(self):
return stem.util._hash_attr(self, 'v3ident', 'is_bandwidth_authority', parent = Directory, cache = True)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Authority) else False
def __ne__(self, other):
return not self == other
class Fallback(Directory):
"""
Particularly stable relays tor can instead of authorities when
bootstrapping. These relays are `hardcoded in tor
<https://gitweb.torproject.org/tor.git/tree/src/or/fallback_dirs.inc>`_.
For example, the following checks the performance of tor's fallback directories...
::
import time
from stem.descriptor.remote import get_consensus
from stem.directory import Fallback
for fallback in Fallback.from_cache().values():
start = time.time()
get_consensus(endpoints = [(fallback.address, fallback.dir_port)]).run()
print('Downloading the consensus took %0.2f from %s' % (time.time() - start, fallback.fingerprint))
::
% python example.py
Downloading the consensus took 5.07 from 0AD3FA884D18F89EEA2D89C019379E0E7FD94417
Downloading the consensus took 3.59 from C871C91489886D5E2E94C13EA1A5FDC4B6DC5204
Downloading the consensus took 4.16 from 74A910646BCEEFBCD2E874FC1DC997430F968145
...
.. versionadded:: 1.5.0
.. versionchanged:: 1.7.0
Added the has_extrainfo and header attributes which are part of
the `second version of the fallback directories
<https://lists.torproject.org/pipermail/tor-dev/2017-December/012721.html>`_.
:var bool has_extrainfo: **True** if the relay should be able to provide
extrainfo descriptors, **False** otherwise.
:var collections.OrderedDict header: metadata about the fallback directory file this originated from
"""
def __init__(self, address = None, or_port = None, dir_port = None, fingerprint = None, nickname = None, has_extrainfo = False, orport_v6 = None, header = None):
super(Fallback, self).__init__(address, or_port, dir_port, fingerprint, nickname, orport_v6)
self.has_extrainfo = has_extrainfo
self.header = OrderedDict(header) if header else OrderedDict()
@staticmethod
def from_cache(path = FALLBACK_CACHE_PATH):
conf = stem.util.conf.Config()
conf.load(path)
headers = OrderedDict([(k.split('.', 1)[1], conf.get(k)) for k in conf.keys() if k.startswith('header.')])
results = {}
for fingerprint in set([key.split('.')[0] for key in conf.keys()]):
if fingerprint in ('tor_commit', 'stem_commit', 'header'):
continue
attr = {}
for attr_name in ('address', 'or_port', 'dir_port', 'nickname', 'has_extrainfo', 'orport6_address', 'orport6_port'):
key = '%s.%s' % (fingerprint, attr_name)
attr[attr_name] = conf.get(key)
if not attr[attr_name] and attr_name not in ('nickname', 'has_extrainfo', 'orport6_address', 'orport6_port'):
raise IOError("'%s' is missing from %s" % (key, FALLBACK_CACHE_PATH))
if attr['orport6_address'] and attr['orport6_port']:
orport_v6 = (attr['orport6_address'], int(attr['orport6_port']))
else:
orport_v6 = None
results[fingerprint] = Fallback(
address = attr['address'],
or_port = int(attr['or_port']),
dir_port = int(attr['dir_port']),
fingerprint = fingerprint,
nickname = attr['nickname'],
has_extrainfo = attr['has_extrainfo'] == 'true',
orport_v6 = orport_v6,
header = headers,
)
return results
@staticmethod
def from_remote(timeout = 60):
try:
lines = str_tools._to_unicode(urllib.urlopen(GITWEB_FALLBACK_URL, timeout = timeout).read()).splitlines()
except Exception as exc:
raise IOError("Unable to download tor's fallback directories from %s: %s" % (GITWEB_FALLBACK_URL, exc))
if not lines:
raise IOError('%s did not have any content' % GITWEB_FALLBACK_URL)
elif lines[0] != '/* type=fallback */':
raise IOError('%s does not have a type field indicating it is fallback directory metadata' % GITWEB_FALLBACK_URL)
# header metadata
header = {}
for line in Fallback._pop_section(lines):
mapping = FALLBACK_MAPPING.match(line)
if mapping:
header[mapping.group(1)] = mapping.group(2)
else:
raise IOError('Malformed fallback directory header line: %s' % line)
Fallback._pop_section(lines) # skip human readable comments
# Entries look like...
#
# "5.9.110.236:9030 orport=9001 id=0756B7CD4DFC8182BE23143FAC0642F515182CEB"
# " ipv6=[2a01:4f8:162:51e2::2]:9001"
# /* nickname=rueckgrat */
# /* extrainfo=1 */
try:
results = {}
for matches in _directory_entries(lines, Fallback._pop_section, (FALLBACK_ADDR, FALLBACK_NICKNAME, FALLBACK_EXTRAINFO, FALLBACK_IPV6), required = (FALLBACK_ADDR,)):
address, dir_port, or_port, fingerprint = matches[FALLBACK_ADDR]
results[fingerprint] = Fallback(
address = address,
or_port = int(or_port),
dir_port = int(dir_port),
fingerprint = fingerprint,
nickname = matches.get(FALLBACK_NICKNAME),
has_extrainfo = matches.get(FALLBACK_EXTRAINFO) == '1',
orport_v6 = matches.get(FALLBACK_IPV6),
header = header,
)
except ValueError as exc:
raise IOError(str(exc))
return results
@staticmethod
def _pop_section(lines):
"""
Provides lines up through the next divider. This excludes lines with just a
comma since they're an artifact of these being C strings.
"""
section_lines = []
if lines:
line = lines.pop(0)
while lines and line != FALLBACK_DIV:
if line.strip() != ',':
section_lines.append(line)
line = lines.pop(0)
return section_lines
@staticmethod
def _write(fallbacks, tor_commit, stem_commit, headers, path = FALLBACK_CACHE_PATH):
"""
Persists fallback directories to a location in a way that can be read by
from_cache().
:param dict fallbacks: mapping of fingerprints to their fallback directory
:param str tor_commit: tor commit the fallbacks came from
:param str stem_commit: stem commit the fallbacks came from
:param dict headers: metadata about the file these came from
:param str path: location fallbacks will be persisted to
"""
conf = stem.util.conf.Config()
conf.set('tor_commit', tor_commit)
conf.set('stem_commit', stem_commit)
for k, v in headers.items():
conf.set('header.%s' % k, v)
for directory in sorted(fallbacks.values(), key = lambda x: x.fingerprint):
fingerprint = directory.fingerprint
conf.set('%s.address' % fingerprint, directory.address)
conf.set('%s.or_port' % fingerprint, str(directory.or_port))
conf.set('%s.dir_port' % fingerprint, str(directory.dir_port))
conf.set('%s.nickname' % fingerprint, directory.nickname)
conf.set('%s.has_extrainfo' % fingerprint, 'true' if directory.has_extrainfo else 'false')
if directory.orport_v6:
conf.set('%s.orport6_address' % fingerprint, str(directory.orport_v6[0]))
conf.set('%s.orport6_port' % fingerprint, str(directory.orport_v6[1]))
conf.save(path)
def __hash__(self):
return stem.util._hash_attr(self, 'has_extrainfo', 'header', parent = Directory, cache = True)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Fallback) else False
def __ne__(self, other):
return not self == other
def _fallback_directory_differences(previous_directories, new_directories):
"""
Provides a description of how fallback directories differ.
"""
lines = []
added_fp = set(new_directories.keys()).difference(previous_directories.keys())
removed_fp = set(previous_directories.keys()).difference(new_directories.keys())
for fp in added_fp:
directory = new_directories[fp]
orport_v6 = '%s:%s' % directory.orport_v6 if directory.orport_v6 else '[none]'
lines += [
'* Added %s as a new fallback directory:' % directory.fingerprint,
' address: %s' % directory.address,
' or_port: %s' % directory.or_port,
' dir_port: %s' % directory.dir_port,
' nickname: %s' % directory.nickname,
' has_extrainfo: %s' % directory.has_extrainfo,
' orport_v6: %s' % orport_v6,
'',
]
for fp in removed_fp:
lines.append('* Removed %s as a fallback directory' % fp)
for fp in new_directories:
if fp in added_fp or fp in removed_fp:
continue # already discussed these
previous_directory = previous_directories[fp]
new_directory = new_directories[fp]
if previous_directory != new_directory:
for attr in ('address', 'or_port', 'dir_port', 'fingerprint', 'orport_v6'):
old_attr = getattr(previous_directory, attr)
new_attr = getattr(new_directory, attr)
if old_attr != new_attr:
lines.append('* Changed the %s of %s from %s to %s' % (attr, fp, old_attr, new_attr))
return '\n'.join(lines)
DIRECTORY_AUTHORITIES = {
'moria1': Authority(
nickname = 'moria1',
address = '128.31.0.39',
or_port = 9101,
dir_port = 9131,
fingerprint = '9695DFC35FFEB861329B9F1AB04C46397020CE31',
v3ident = 'D586D18309DED4CD6D57C18FDB97EFA96D330566',
),
'tor26': Authority(
nickname = 'tor26',
address = '86.59.21.38',
or_port = 443,
dir_port = 80,
fingerprint = '847B1F850344D7876491A54892F904934E4EB85D',
orport_v6 = ('2001:858:2:2:aabb:0:563b:1526', 443),
v3ident = '14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4',
),
'dizum': Authority(
nickname = 'dizum',
address = '194.109.206.212',
or_port = 443,
dir_port = 80,
fingerprint = '7EA6EAD6FD83083C538F44038BBFA077587DD755',
v3ident = 'E8A9C45EDE6D711294FADF8E7951F4DE6CA56B58',
),
'gabelmoo': Authority(
nickname = 'gabelmoo',
address = '131.188.40.189',
or_port = 443,
dir_port = 80,
fingerprint = 'F2044413DAC2E02E3D6BCF4735A19BCA1DE97281',
orport_v6 = ('2001:638:a000:4140::ffff:189', 443),
v3ident = 'ED03BB616EB2F60BEC80151114BB25CEF515B226',
),
'dannenberg': Authority(
nickname = 'dannenberg',
address = '193.23.244.244',
or_port = 443,
dir_port = 80,
orport_v6 = ('2001:678:558:1000::244', 443),
fingerprint = '7BE683E65D48141321C5ED92F075C55364AC7123',
v3ident = '0232AF901C31A04EE9848595AF9BB7620D4C5B2E',
),
'maatuska': Authority(
nickname = 'maatuska',
address = '171.25.193.9',
or_port = 80,
dir_port = 443,
fingerprint = 'BD6A829255CB08E66FBE7D3748363586E46B3810',
orport_v6 = ('2001:67c:289c::9', 80),
v3ident = '49015F787433103580E3B66A1707A00E60F2D15B',
),
'Faravahar': Authority(
nickname = 'Faravahar',
address = '154.35.175.225',
or_port = 443,
dir_port = 80,
fingerprint = 'CF6D0AAFB385BE71B8E111FC5CFF4B47923733BC',
v3ident = 'EFCBE720AB3A82B99F9E953CD5BF50F7EEFC7B97',
),
'longclaw': Authority(
nickname = 'longclaw',
address = '199.58.81.140',
or_port = 443,
dir_port = 80,
fingerprint = '74A910646BCEEFBCD2E874FC1DC997430F968145',
v3ident = '23D15D965BC35114467363C165C4F724B64B4F66',
),
'bastet': Authority(
nickname = 'bastet',
address = '204.13.164.118',
or_port = 443,
dir_port = 80,
fingerprint = '24E2F139121D4394C54B5BCC368B3B411857C413',
orport_v6 = ('2620:13:4000:6000::1000:118', 443),
v3ident = '27102BC123E7AF1D4741AE047E160C91ADC76B21',
),
'Serge': Authority(
nickname = 'Serge',
address = '66.111.2.131',
or_port = 9001,
dir_port = 9030,
fingerprint = 'BA44A889E64B93FAA2B114E02C2A279A8555C533',
v3ident = None, # does not vote in the consensus
),
}

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -67,20 +67,19 @@ exiting to a destination is permissible or not. For instance...
from __future__ import absolute_import
import re
import socket
import zlib
import stem.prereq
import stem.util
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
from stem import str_type
try:
# added in python 3.2
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
except ImportError:
else:
from stem.util.lru_cache import lru_cache
AddressType = stem.util.enum.Enum(('WILDCARD', 'Wildcard'), ('IPv4', 'IPv4'), ('IPv6', 'IPv6'))
@ -109,6 +108,12 @@ def get_config_policy(rules, ip_address = None):
* ports being optional
* the 'private' keyword
.. deprecated:: 1.7.0
Tor's torrc parameters lack a formal spec, making it difficult for this
method to be reliable. Callers are encouraged to move to
:func:`~stem.control.Controller.get_exit_policy` instead.
:param str,list rules: comma separated rules or list to be converted
:param str ip_address: this relay's IP address for the 'private' policy if
it's present, this defaults to the local address
@ -118,10 +123,12 @@ def get_config_policy(rules, ip_address = None):
:raises: **ValueError** if input isn't a valid tor exit policy
"""
if ip_address and not (stem.util.connection.is_valid_ipv4_address(ip_address) or stem.util.connection.is_valid_ipv6_address(ip_address)):
if ip_address and not (stem.util.connection.is_valid_ipv4_address(ip_address) or stem.util.connection.is_valid_ipv6_address(ip_address, allow_brackets = True)):
raise ValueError("%s isn't a valid IP address" % ip_address)
elif ip_address and stem.util.connection.is_valid_ipv6_address(ip_address, allow_brackets = True) and not (ip_address[0] == '[' and ip_address[-1] == ']'):
ip_address = '[%s]' % ip_address # ExitPolicy validation expects IPv6 addresses to be bracketed
if isinstance(rules, (bytes, str_type)):
if stem.util._is_str(rules):
rules = rules.split(',')
result = []
@ -132,12 +139,12 @@ def get_config_policy(rules, ip_address = None):
if not rule:
continue
if ':' not in rule:
if not re.search(':[\d\-\*]+$', rule):
rule = '%s:*' % rule
if 'private' in rule:
acceptance = rule.split(' ', 1)[0]
port = rule.split(':', 1)[1]
port = rule.rsplit(':', 1)[1]
addresses = list(PRIVATE_ADDRESSES)
if ip_address:
@ -153,12 +160,6 @@ def get_config_policy(rules, ip_address = None):
else:
result.append(ExitPolicyRule(rule))
# torrc policies can apply to IPv4 or IPv6, so we need to make sure /0
# addresses aren't treated as being a full wildcard
for rule in result:
rule._submask_wildcard = False
return ExitPolicy(*result)
@ -169,10 +170,10 @@ def _flag_private_rules(rules):
series of rules exactly matching it.
"""
matches = []
matches = [] # find all possible starting indexes
for i, rule in enumerate(rules):
if i + len(PRIVATE_ADDRESSES) + 1 > len(rules):
if i + len(PRIVATE_ADDRESSES) > len(rules):
break
rule_str = '%s/%s' % (rule.address, rule.get_masked_bits())
@ -184,32 +185,35 @@ def _flag_private_rules(rules):
# To match the private policy the following must all be true...
#
# * series of addresses and bit masks match PRIVATE_ADDRESSES
# * all rules have the same port range and acceptance
# * all rules have the same port range
# * all rules have the same acceptance (all accept or reject entries)
#
# The last rule is dynamically based on the relay's public address. It may
# not be present if get_config_policy() created this policy and we couldn't
# resolve our address.
rule_set = rules[start_index:start_index + len(PRIVATE_ADDRESSES) + 1]
last_index = start_index + len(PRIVATE_ADDRESSES)
rule_set = rules[start_index:last_index]
last_rule = rules[last_index] if len(rules) > last_index else None
is_match = True
min_port, max_port = rule_set[0].min_port, rule_set[0].max_port
is_accept = rule_set[0].is_accept
for i, rule in enumerate(rule_set[:-1]):
for i, rule in enumerate(rule_set):
rule_str = '%s/%s' % (rule.address, rule.get_masked_bits())
if rule_str != PRIVATE_ADDRESSES[i] or rule.min_port != min_port or rule.max_port != max_port or rule.is_accept != is_accept:
is_match = False
break
# The last rule is for the relay's public address, so it's dynamic.
last_rule = rule_set[-1]
if last_rule.is_address_wildcard() or last_rule.min_port != min_port or last_rule.max_port != max_port or last_rule.is_accept != is_accept:
is_match = False
if is_match:
for rule in rule_set:
rule._is_private = True
if last_rule and not last_rule.is_address_wildcard() and last_rule.min_port == min_port and last_rule.max_port == max_port and last_rule.is_accept == is_accept:
last_rule._is_private = True
def _flag_default_rules(rules):
"""
@ -238,7 +242,7 @@ class ExitPolicy(object):
# sanity check the types
for rule in rules:
if not isinstance(rule, (bytes, str_type, ExitPolicyRule)):
if not stem.util._is_str(rule) and not isinstance(rule, ExitPolicyRule):
raise TypeError('Exit policy rules can only contain strings or ExitPolicyRules, got a %s (%s)' % (type(rule), rules))
# Unparsed representation of the rules we were constructed with. Our
@ -249,7 +253,7 @@ class ExitPolicy(object):
is_all_str = True
for rule in rules:
if not isinstance(rule, (bytes, str_type)):
if not stem.util._is_str(rule):
is_all_str = False
if rules and is_all_str:
@ -282,6 +286,9 @@ class ExitPolicy(object):
:returns: **True** if exiting to this destination is allowed, **False** otherwise
"""
if not self.is_exiting_allowed():
return False
for rule in self._get_rules():
if rule.is_match(address, port, strict):
return rule.is_accept
@ -458,7 +465,10 @@ class ExitPolicy(object):
if isinstance(rule, bytes):
rule = stem.util.str_tools._to_unicode(rule)
if isinstance(rule, str_type):
if stem.util._is_str(rule):
if not rule.strip():
continue
rule = ExitPolicyRule(rule.strip())
if rule.is_accept:
@ -522,10 +532,10 @@ class ExitPolicy(object):
return self._hash
def __eq__(self, other):
if isinstance(other, ExitPolicy):
return self._get_rules() == list(other)
else:
return False
return hash(self) == hash(other) if isinstance(other, ExitPolicy) else False
def __ne__(self, other):
return not self == other
class MicroExitPolicy(ExitPolicy):
@ -575,10 +585,10 @@ class MicroExitPolicy(ExitPolicy):
policy = policy[6:]
if not policy.startswith(' ') or (len(policy) - 1 != len(policy.lstrip())):
if not policy.startswith(' '):
raise ValueError('A microdescriptor exit policy should have a space separating accept/reject from its port list: %s' % self._policy)
policy = policy[1:]
policy = policy.lstrip()
# convert our port list into MicroExitPolicyRule
rules = []
@ -605,10 +615,10 @@ class MicroExitPolicy(ExitPolicy):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, MicroExitPolicy):
return str(self) == str(other)
else:
return False
return hash(self) == hash(other) if isinstance(other, MicroExitPolicy) else False
def __ne__(self, other):
return not self == other
class ExitPolicyRule(object):
@ -626,6 +636,9 @@ class ExitPolicyRule(object):
This should be treated as an immutable object.
.. versionchanged:: 1.5.0
Support for 'accept6/reject6' entries and '\*4/6' wildcards.
:var bool is_accept: indicates if exiting is allowed or disallowed
:var str address: address that this rule is for
@ -639,24 +652,27 @@ class ExitPolicyRule(object):
"""
def __init__(self, rule):
# policy ::= "accept" exitpattern | "reject" exitpattern
# policy ::= "accept[6]" exitpattern | "reject[6]" exitpattern
# exitpattern ::= addrspec ":" portspec
if rule.startswith('accept'):
self.is_accept = True
elif rule.startswith('reject'):
self.is_accept = False
rule = stem.util.str_tools._to_unicode(rule)
self.is_accept = rule.startswith('accept')
is_ipv6_only = rule.startswith('accept6') or rule.startswith('reject6')
if rule.startswith('accept6') or rule.startswith('reject6'):
exitpattern = rule[7:]
elif rule.startswith('accept') or rule.startswith('reject'):
exitpattern = rule[6:]
else:
raise ValueError("An exit policy must start with either 'accept' or 'reject': %s" % rule)
raise ValueError("An exit policy must start with either 'accept[6]' or 'reject[6]': %s" % rule)
exitpattern = rule[6:]
if not exitpattern.startswith(' ') or (len(exitpattern) - 1 != len(exitpattern.lstrip())):
if not exitpattern.startswith(' '):
raise ValueError('An exit policy should have a space separating its accept/reject from the exit pattern: %s' % rule)
exitpattern = exitpattern[1:]
exitpattern = exitpattern.lstrip()
if ':' not in exitpattern:
if ':' not in exitpattern or ']' in exitpattern.rsplit(':', 1)[1]:
raise ValueError("An exitpattern must be of the form 'addrspec:portspec': %s" % rule)
self.address = None
@ -671,15 +687,18 @@ class ExitPolicyRule(object):
self._mask = None
# Malformed exit policies are rejected, but there's an exception where it's
# just skipped: when an accept6/reject6 rule has an IPv4 address...
#
# "Using an IPv4 address with accept6 or reject6 is ignored and generates
# a warning."
self._skip_rule = False
addrspec, portspec = exitpattern.rsplit(':', 1)
self._apply_addrspec(rule, addrspec)
self._apply_addrspec(rule, addrspec, is_ipv6_only)
self._apply_portspec(rule, portspec)
# If true then a submask of /0 is treated by is_address_wildcard() as being
# a wildcard.
self._submask_wildcard = True
# Flags to indicate if this rule seems to be expanded from the 'private'
# keyword or tor's default policy suffix.
@ -688,20 +707,14 @@ class ExitPolicyRule(object):
def is_address_wildcard(self):
"""
**True** if we'll match against any address, **False** otherwise.
**True** if we'll match against **any** address, **False** otherwise.
Note that if this policy can apply to both IPv4 and IPv6 then this is
different from being for a /0 (since, for instance, 0.0.0.0/0 wouldn't
match against an IPv6 address). That said, /0 addresses are highly unusual
and most things citing exit policies are IPv4 specific anyway, making this
moot.
Note that this is different than \*4, \*6, or '/0' address which are
wildcards for only either IPv4 or IPv6.
:returns: **bool** for if our address matching is a wildcard
"""
if self._submask_wildcard and self.get_masked_bits() == 0:
return True
return self._address_type == _address_type_to_int(AddressType.WILDCARD)
def is_port_wildcard(self):
@ -729,6 +742,9 @@ class ExitPolicyRule(object):
:raises: **ValueError** if provided with a malformed address or port
"""
if self._skip_rule:
return False
# validate our input and check if the argument doesn't match our address type
if address is not None:
@ -764,7 +780,7 @@ class ExitPolicyRule(object):
if address is None:
fuzzy_match = True
else:
comparison_addr_bin = int(stem.util.connection._get_address_binary(address), 2)
comparison_addr_bin = stem.util.connection.address_to_int(address)
comparison_addr_bin &= self._get_mask_bin()
if self._get_address_bin() != comparison_addr_bin:
@ -800,8 +816,8 @@ class ExitPolicyRule(object):
:returns: str of our subnet mask for the address (ex. '255.255.255.0')
"""
# Lazy loading our mask because it very infrequently requested. There's
# no reason to usually usse memory for it.
# Lazy loading our mask because it is very infrequently requested. There's
# no reason to usually use memory for it.
if not self._mask:
address_type = self.get_address_type()
@ -896,41 +912,30 @@ class ExitPolicyRule(object):
return label
def __hash__(self):
if self._hash is None:
my_hash = 0
for attr in ('is_accept', 'address', 'min_port', 'max_port'):
my_hash *= 1024
attr_value = getattr(self, attr)
if attr_value is not None:
my_hash += hash(attr_value)
my_hash *= 1024
my_hash += hash(self.get_mask(False))
self._hash = my_hash
return self._hash
@lru_cache()
def _get_mask_bin(self):
# provides an integer representation of our mask
return int(stem.util.connection._get_address_binary(self.get_mask(False)), 2)
return int(stem.util.connection._address_to_binary(self.get_mask(False)), 2)
@lru_cache()
def _get_address_bin(self):
# provides an integer representation of our address
return int(stem.util.connection._get_address_binary(self.address), 2) & self._get_mask_bin()
return stem.util.connection.address_to_int(self.address) & self._get_mask_bin()
def _apply_addrspec(self, rule, addrspec):
def _apply_addrspec(self, rule, addrspec, is_ipv6_only):
# Parses the addrspec...
# addrspec ::= "*" | ip4spec | ip6spec
# Expand IPv4 and IPv6 specific wildcards into /0 entries so we have one
# fewer bizarre special case headaches to deal with.
if addrspec == '*4':
addrspec = '0.0.0.0/0'
elif addrspec == '*6' or (addrspec == '*' and is_ipv6_only):
addrspec = '[0000:0000:0000:0000:0000:0000:0000:0000]/0'
if '/' in addrspec:
self.address, addr_extra = addrspec.split('/', 1)
else:
@ -945,6 +950,9 @@ class ExitPolicyRule(object):
# ip4mask ::= an IPv4 mask in dotted-quad format
# num_ip4_bits ::= an integer between 0 and 32
if is_ipv6_only:
self._skip_rule = True
self._address_type = _address_type_to_int(AddressType.IPv4)
if addr_extra is None:
@ -985,7 +993,7 @@ class ExitPolicyRule(object):
else:
raise ValueError("The '%s' isn't a number of bits: %s" % (addr_extra, rule))
else:
raise ValueError("Address isn't a wildcard, IPv4, or IPv6 address: %s" % rule)
raise ValueError("'%s' isn't a wildcard, IPv4, or IPv6 address: %s" % (addrspec, rule))
def _apply_portspec(self, rule, portspec):
# Parses the portspec...
@ -1018,16 +1026,17 @@ class ExitPolicyRule(object):
else:
raise ValueError("Port value isn't a wildcard, integer, or range: %s" % rule)
def __eq__(self, other):
if isinstance(other, ExitPolicyRule):
# Our string representation encompasses our effective policy. Technically
# this isn't quite right since our rule attribute may differ (ie, 'accept
# 0.0.0.0/0' == 'accept 0.0.0.0/0.0.0.0' will be True), but these
# policies are effectively equivalent.
def __hash__(self):
if self._hash is None:
self._hash = stem.util._hash_attr(self, 'is_accept', 'address', 'min_port', 'max_port') * 1024 + hash(self.get_mask(False))
return hash(self) == hash(other)
else:
return False
return self._hash
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ExitPolicyRule) else False
def __ne__(self, other):
return not self == other
def _address_type_to_int(address_type):
@ -1048,7 +1057,7 @@ class MicroExitPolicyRule(ExitPolicyRule):
self.address = None # wildcard address
self.min_port = min_port
self.max_port = max_port
self._hash = None
self._skip_rule = False
def is_address_wildcard(self):
return True
@ -1063,20 +1072,13 @@ class MicroExitPolicyRule(ExitPolicyRule):
return None
def __hash__(self):
if self._hash is None:
my_hash = 0
return stem.util._hash_attr(self, 'is_accept', 'min_port', 'max_port', cache = True)
for attr in ('is_accept', 'min_port', 'max_port'):
my_hash *= 1024
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, MicroExitPolicyRule) else False
attr_value = getattr(self, attr)
if attr_value is not None:
my_hash += hash(attr_value)
self._hash = my_hash
return self._hash
def __ne__(self, other):
return not self == other
DEFAULT_POLICY_RULES = tuple([ExitPolicyRule(rule) for rule in (

View file

@ -1,4 +1,4 @@
# Copyright 2015, Damian Johnson and The Tor Project
# Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -6,13 +6,6 @@ Interactive interpreter for interacting with Tor directly. This adds usability
features such as tab completion, history, and IRC-style functions (like /help).
"""
__all__ = [
'arguments',
'autocomplete',
'commands',
'help',
]
import os
import sys
@ -26,13 +19,20 @@ import stem.util.term
from stem.util.term import Attr, Color, format
__all__ = [
'arguments',
'autocomplete',
'commands',
'help',
]
PROMPT = format('>>> ', Color.GREEN, Attr.BOLD, Attr.READLINE_ESCAPE)
STANDARD_OUTPUT = (Color.BLUE, )
BOLD_OUTPUT = (Color.BLUE, Attr.BOLD)
HEADER_OUTPUT = (Color.GREEN, )
HEADER_BOLD_OUTPUT = (Color.GREEN, Attr.BOLD)
ERROR_OUTPUT = (Attr.BOLD, Color.RED)
STANDARD_OUTPUT = (Color.BLUE, Attr.LINES)
BOLD_OUTPUT = (Color.BLUE, Attr.BOLD, Attr.LINES)
HEADER_OUTPUT = (Color.GREEN, Attr.LINES)
HEADER_BOLD_OUTPUT = (Color.GREEN, Attr.BOLD, Attr.LINES)
ERROR_OUTPUT = (Attr.BOLD, Color.RED, Attr.LINES)
settings_path = os.path.join(os.path.dirname(__file__), 'settings.cfg')
uses_settings = stem.util.conf.uses_settings('stem_interpreter', settings_path)
@ -60,7 +60,7 @@ def main():
print(stem.interpreter.arguments.get_help())
sys.exit()
if args.disable_color:
if args.disable_color or not sys.stdout.isatty():
global PROMPT
stem.util.term.DISABLE_COLOR_SUPPORT = True
PROMPT = '>>> '
@ -72,22 +72,30 @@ def main():
is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real')
if not is_tor_running:
if not stem.util.system.is_available('tor'):
if args.tor_path == 'tor' and not stem.util.system.is_available('tor'):
print(format(msg('msg.tor_unavailable'), *ERROR_OUTPUT))
sys.exit(1)
else:
print(format(msg('msg.starting_tor'), *HEADER_OUTPUT))
if not args.run_cmd and not args.run_path:
print(format(msg('msg.starting_tor'), *HEADER_OUTPUT))
stem.process.launch_tor_with_config(
config = {
'SocksPort': '0',
'ControlPort': str(args.control_port),
'CookieAuthentication': '1',
'ExitPolicy': 'reject *:*',
},
completion_percent = 5,
take_ownership = True,
)
control_port = '9051' if args.control_port == 'default' else str(args.control_port)
try:
stem.process.launch_tor_with_config(
config = {
'SocksPort': '0',
'ControlPort': control_port,
'CookieAuthentication': '1',
'ExitPolicy': 'reject *:*',
},
tor_cmd = args.tor_path,
completion_percent = 5,
take_ownership = True,
)
except OSError as exc:
print(format(msg('msg.unable_to_start_tor', error = exc), *ERROR_OUTPUT))
sys.exit(1)
control_port = (args.control_address, args.control_port)
control_socket = args.control_socket
@ -115,27 +123,64 @@ def main():
readline.set_completer(autocompleter.complete)
readline.set_completer_delims('\n')
interpreter = stem.interpreter.commands.ControlInterpretor(controller)
interpreter = stem.interpreter.commands.ControlInterpreter(controller)
showed_close_confirmation = False
for line in msg('msg.startup_banner').splitlines():
line_format = HEADER_BOLD_OUTPUT if line.startswith(' ') else HEADER_OUTPUT
print(format(line, *line_format))
if args.run_cmd:
if args.run_cmd.upper().startswith('SETEVENTS '):
# TODO: we can use a lambda here when dropping python 2.x support, but
# until then print's status as a keyword prevents it from being used in
# lambdas
print('')
def handle_event(event_message):
print(format(str(event_message), *STANDARD_OUTPUT))
while True:
controller._handle_event = handle_event
if sys.stdout.isatty():
events = args.run_cmd.upper().split(' ', 1)[1]
print(format('Listening to %s events. Press any key to quit.\n' % events, *HEADER_BOLD_OUTPUT))
controller.msg(args.run_cmd)
try:
raw_input()
except (KeyboardInterrupt, stem.SocketClosed):
pass
else:
interpreter.run_command(args.run_cmd, print_response = True)
elif args.run_path:
try:
prompt = '... ' if interpreter.is_multiline_context else PROMPT
for line in open(args.run_path).readlines():
interpreter.run_command(line.strip(), print_response = True)
except IOError as exc:
print(format(msg('msg.unable_to_read_file', path = args.run_path, error = exc), *ERROR_OUTPUT))
sys.exit(1)
if stem.prereq.is_python_3():
user_input = input(prompt)
else:
user_input = raw_input(prompt)
else:
for line in msg('msg.startup_banner').splitlines():
line_format = HEADER_BOLD_OUTPUT if line.startswith(' ') else HEADER_OUTPUT
print(format(line, *line_format))
response = interpreter.run_command(user_input)
print('')
if response is not None:
print(response)
except (KeyboardInterrupt, EOFError, stem.SocketClosed) as exc:
print('') # move cursor to the following line
break
while True:
try:
prompt = '... ' if interpreter.is_multiline_context else PROMPT
user_input = input(prompt) if stem.prereq.is_python_3() else raw_input(prompt)
interpreter.run_command(user_input, print_response = True)
except stem.SocketClosed:
if showed_close_confirmation:
print(format('Unable to run tor commands. The control connection has been closed.', *ERROR_OUTPUT))
else:
prompt = format("Tor's control port has closed. Do you want to continue this interpreter? (y/n) ", *HEADER_BOLD_OUTPUT)
user_input = input(prompt) if stem.prereq.is_python_3() else raw_input(prompt)
print('') # blank line
if user_input.lower() in ('y', 'yes'):
showed_close_confirmation = True
else:
break
except (KeyboardInterrupt, EOFError, stem.SocketClosed):
print('') # move cursor to the following line
break

View file

@ -1,4 +1,4 @@
# Copyright 2015, Damian Johnson and The Tor Project
# Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -7,22 +7,26 @@ Commandline argument parsing for our interpreter prompt.
import collections
import getopt
import os
import stem.interpreter
import stem.util.connection
DEFAULT_ARGS = {
'control_address': '127.0.0.1',
'control_port': 9051,
'control_port': 'default',
'user_provided_port': False,
'control_socket': '/var/run/tor/control',
'user_provided_socket': False,
'tor_path': 'tor',
'run_cmd': None,
'run_path': None,
'disable_color': False,
'print_help': False,
}
OPT = 'i:s:h'
OPT_EXPANDED = ['interface=', 'socket=', 'no-color', 'help']
OPT_EXPANDED = ['interface=', 'socket=', 'tor=', 'run=', 'no-color', 'help']
def parse(argv):
@ -50,7 +54,7 @@ def parse(argv):
for opt, arg in recognized_args:
if opt in ('-i', '--interface'):
if ':' in arg:
address, port = arg.split(':', 1)
address, port = arg.rsplit(':', 1)
else:
address, port = None, arg
@ -68,6 +72,13 @@ def parse(argv):
elif opt in ('-s', '--socket'):
args['control_socket'] = arg
args['user_provided_socket'] = True
elif opt in ('--tor'):
args['tor_path'] = arg
elif opt in ('--run'):
if os.path.exists(arg):
args['run_path'] = arg
else:
args['run_cmd'] = arg
elif opt == '--no-color':
args['disable_color'] = True
elif opt in ('-h', '--help'):

View file

@ -1,16 +1,17 @@
# Copyright 2014-2015, Damian Johnson and The Tor Project
# Copyright 2014-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Tab completion for our interpreter prompt.
"""
import stem.prereq
from stem.interpreter import uses_settings
try:
# added in python 3.2
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
except ImportError:
else:
from stem.util.lru_cache import lru_cache

View file

@ -1,4 +1,4 @@
# Copyright 2014-2015, Damian Johnson and The Tor Project
# Copyright 2014-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -6,7 +6,9 @@ Handles making requests and formatting the responses.
"""
import code
import contextlib
import socket
import sys
import stem
import stem.control
@ -19,6 +21,13 @@ import stem.util.tor_tools
from stem.interpreter import STANDARD_OUTPUT, BOLD_OUTPUT, ERROR_OUTPUT, uses_settings, msg
from stem.util.term import format
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
MAX_EVENTS = 100
def _get_fingerprint(arg, controller):
"""
@ -51,7 +60,7 @@ def _get_fingerprint(arg, controller):
raise ValueError("Unable to find a relay with the nickname of '%s'" % arg)
elif ':' in arg or stem.util.connection.is_valid_ipv4_address(arg):
if ':' in arg:
address, port = arg.split(':', 1)
address, port = arg.rsplit(':', 1)
if not stem.util.connection.is_valid_ipv4_address(address):
raise ValueError("'%s' isn't a valid IPv4 address" % address)
@ -84,7 +93,18 @@ def _get_fingerprint(arg, controller):
raise ValueError("'%s' isn't a fingerprint, nickname, or IP address" % arg)
class ControlInterpretor(code.InteractiveConsole):
@contextlib.contextmanager
def redirect(stdout, stderr):
original = sys.stdout, sys.stderr
sys.stdout, sys.stderr = stdout, stderr
try:
yield
finally:
sys.stdout, sys.stderr = original
class ControlInterpreter(code.InteractiveConsole):
"""
Handles issuing requests and providing nicely formed responses, with support
for special irc style subcommands.
@ -115,7 +135,10 @@ class ControlInterpretor(code.InteractiveConsole):
def handle_event_wrapper(event_message):
handle_event_real(event_message)
self._received_events.append(event_message)
self._received_events.insert(0, event_message)
if len(self._received_events) > MAX_EVENTS:
self._received_events.pop()
self._controller._handle_event = handle_event_wrapper
@ -276,13 +299,14 @@ class ControlInterpretor(code.InteractiveConsole):
return format(response, *STANDARD_OUTPUT)
@uses_settings
def run_command(self, command, config):
def run_command(self, command, config, print_response = False):
"""
Runs the given command. Requests starting with a '/' are special commands
to the interpreter, and anything else is sent to the control port.
:param stem.control.Controller controller: tor control connection
:param str command: command to be processed
:param bool print_response: prints the response to stdout if true
:returns: **list** out output lines, each line being a list of
(msg, format) tuples
@ -290,12 +314,9 @@ class ControlInterpretor(code.InteractiveConsole):
:raises: **stem.SocketClosed** if the control connection has been severed
"""
if not self._controller.is_alive():
raise stem.SocketClosed()
# Commands fall into three categories:
#
# * Interpretor commands. These start with a '/'.
# * Interpreter commands. These start with a '/'.
#
# * Controller commands stem knows how to handle. We use our Controller's
# methods for these to take advantage of caching and present nicer
@ -338,17 +359,25 @@ class ControlInterpretor(code.InteractiveConsole):
is_tor_command = cmd in config.get('help.usage', {}) and cmd.lower() != 'events'
if self._run_python_commands and not is_tor_command:
self.is_multiline_context = code.InteractiveConsole.push(self, command)
return
console_output = StringIO()
with redirect(console_output, console_output):
self.is_multiline_context = code.InteractiveConsole.push(self, command)
output = console_output.getvalue().strip()
else:
try:
output = format(self._controller.msg(command).raw_content().strip(), *STANDARD_OUTPUT)
except stem.ControllerError as exc:
if isinstance(exc, stem.SocketClosed):
raise exc
raise
else:
output = format(str(exc), *ERROR_OUTPUT)
output += '\n' # give ourselves an extra line before the next prompt
if output:
output += '\n' # give ourselves an extra line before the next prompt
if print_response:
print(output)
return output

View file

@ -1,10 +1,12 @@
# Copyright 2014-2015, Damian Johnson and The Tor Project
# Copyright 2014-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Provides our /help responses.
"""
import stem.prereq
from stem.interpreter import (
STANDARD_OUTPUT,
BOLD_OUTPUT,
@ -15,10 +17,9 @@ from stem.interpreter import (
from stem.util.term import format
try:
# added in python 3.2
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
except ImportError:
else:
from stem.util.lru_cache import lru_cache

View file

@ -17,6 +17,8 @@ msg.help
| -i, --interface [ADDRESS:]PORT change control interface from {address}:{port}
| -s, --socket SOCKET_PATH attach using unix domain socket if present,
| SOCKET_PATH defaults to: {socket}
| --tor PATH tor binary if tor isn't already running
| --run executes the given command or file of commands
| --no-color disables colorized output
| -h, --help presents this help
|
@ -41,6 +43,8 @@ msg.startup_banner
|
msg.tor_unavailable Tor isn't running and the command currently isn't in your PATH.
msg.unable_to_start_tor Unable to start tor: {error}
msg.unable_to_read_file Unable to read {path}: {error}
msg.starting_tor
|Tor isn't running. Starting a temporary Tor instance for our interpreter to
@ -57,7 +61,7 @@ msg.starting_tor
# Response for the '/help' command without any arguments.
help.general
|Interpretor commands include:
|Interpreter commands include:
| /help - provides information for interpreter and tor commands
| /events - prints events that we've received
| /info - general information for a relay
@ -319,7 +323,9 @@ autocomplete AUTHCHALLENGE
autocomplete DROPGUARDS
autocomplete ADD_ONION NEW:BEST
autocomplete ADD_ONION NEW:RSA1024
autocomplete ADD_ONION NEW:ED25519-V3
autocomplete ADD_ONION RSA1024:
autocomplete ADD_ONION ED25519-V3:
autocomplete DEL_ONION
autocomplete HSFETCH
autocomplete HSPOST

View file

@ -0,0 +1,810 @@
# Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Information available about Tor from `its manual
<https://www.torproject.org/docs/tor-manual.html.en>`_. This provides three
methods of getting this information...
* :func:`~stem.manual.Manual.from_cache` provides manual content bundled with
Stem. This is the fastest and most reliable method but only as up-to-date as
Stem's release.
* :func:`~stem.manual.Manual.from_man` reads Tor's local man page for
information about it.
* :func:`~stem.manual.Manual.from_remote` fetches the latest manual information
remotely. This is the slowest and least reliable method but provides the most
recent information about Tor.
Manual information includes arguments, signals, and probably most usefully the
torrc configuration options. For example, say we want a little script that told
us what our torrc options do...
.. literalinclude:: /_static/example/manual_config_options.py
:language: python
|
.. image:: /_static/manual_output.png
|
**Module Overview:**
::
query - performs a query on our cached sqlite manual information
is_important - Indicates if a configuration option is of particularly common importance.
download_man_page - Downloads tor's latest man page.
Manual - Information about Tor available from its manual.
| |- from_cache - Provides manual information cached with Stem.
| |- from_man - Retrieves manual information from its man page.
| +- from_remote - Retrieves manual information remotely from tor's latest manual.
|
+- save - writes the manual contents to a given location
.. versionadded:: 1.5.0
"""
import os
import shutil
import sys
import tempfile
import stem.prereq
import stem.util
import stem.util.conf
import stem.util.enum
import stem.util.log
import stem.util.system
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
try:
# account for urllib's change between python 2.x and 3.x
import urllib.request as urllib
except ImportError:
import urllib2 as urllib
Category = stem.util.enum.Enum('GENERAL', 'CLIENT', 'RELAY', 'DIRECTORY', 'AUTHORITY', 'HIDDEN_SERVICE', 'DENIAL_OF_SERVICE', 'TESTING', 'UNKNOWN')
GITWEB_MANUAL_URL = 'https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt'
CACHE_PATH = os.path.join(os.path.dirname(__file__), 'cached_manual.sqlite')
DATABASE = None # cache database connections
HAS_ENCODING_ARG = not stem.util.system.is_mac() and not stem.util.system.is_bsd() and not stem.util.system.is_slackware()
SCHEMA_VERSION = 1 # version of our scheme, bump this if you change the following
SCHEMA = (
'CREATE TABLE schema(version INTEGER)',
'INSERT INTO schema(version) VALUES (%i)' % SCHEMA_VERSION,
'CREATE TABLE metadata(name TEXT, synopsis TEXT, description TEXT, man_commit TEXT, stem_commit TEXT)',
'CREATE TABLE commandline(name TEXT PRIMARY KEY, description TEXT)',
'CREATE TABLE signals(name TEXT PRIMARY KEY, description TEXT)',
'CREATE TABLE files(name TEXT PRIMARY KEY, description TEXT)',
'CREATE TABLE torrc(key TEXT PRIMARY KEY, name TEXT, category TEXT, usage TEXT, summary TEXT, description TEXT, position INTEGER)',
)
CATEGORY_SECTIONS = OrderedDict((
('GENERAL OPTIONS', Category.GENERAL),
('CLIENT OPTIONS', Category.CLIENT),
('SERVER OPTIONS', Category.RELAY),
('DIRECTORY SERVER OPTIONS', Category.DIRECTORY),
('DIRECTORY AUTHORITY SERVER OPTIONS', Category.AUTHORITY),
('HIDDEN SERVICE OPTIONS', Category.HIDDEN_SERVICE),
('DENIAL OF SERVICE MITIGATION OPTIONS', Category.DENIAL_OF_SERVICE),
('TESTING NETWORK OPTIONS', Category.TESTING),
))
class SchemaMismatch(IOError):
"""
Database schema doesn't match what Stem supports.
.. versionadded:: 1.6.0
:var int database_schema: schema of the database
:var tuple supported_schemas: schemas library supports
"""
def __init__(self, message, database_schema, library_schema):
super(SchemaMismatch, self).__init__(message)
self.database_schema = database_schema
self.library_schema = library_schema
def query(query, *param):
"""
Performs the given query on our sqlite manual cache. This database should
be treated as being read-only. File permissions generally enforce this, and
in the future will be enforced by this function as well.
::
>>> import stem.manual
>>> print(stem.manual.query('SELECT description FROM torrc WHERE key=?', 'CONTROLSOCKET').fetchone()[0])
Like ControlPort, but listens on a Unix domain socket, rather than a TCP socket. 0 disables ControlSocket. (Unix and Unix-like systems only.) (Default: 0)
.. versionadded:: 1.6.0
:param str query: query to run on the cache
:param list param: query parameters
:returns: :class:`sqlite3.Cursor` with the query results
:raises:
* **ImportError** if the sqlite3 module is unavailable
* **sqlite3.OperationalError** if query fails
"""
if not stem.prereq.is_sqlite_available():
raise ImportError('Querying requires the sqlite3 module')
import sqlite3
# The only reason to explicitly close the sqlite connection is to ensure
# transactions are committed. Since we're only using read-only access this
# doesn't matter, and can allow interpreter shutdown to do the needful.
#
# TODO: When we only support python 3.4+ we can use sqlite's uri argument
# to enforce a read-only connection...
#
# https://docs.python.org/3/library/sqlite3.html#sqlite3.connect
global DATABASE
if DATABASE is None:
DATABASE = sqlite3.connect(CACHE_PATH)
return DATABASE.execute(query, param)
class ConfigOption(object):
"""
Tor configuration attribute found in its torrc.
:var str name: name of the configuration option
:var stem.manual.Category category: category the config option was listed
under, this is Category.UNKNOWN if we didn't recognize the category
:var str usage: arguments accepted by the option
:var str summary: brief description of what the option does
:var str description: longer manual description with details
"""
def __init__(self, name, category = Category.UNKNOWN, usage = '', summary = '', description = ''):
self.name = name
self.category = category
self.usage = usage
self.summary = summary
self.description = description
def __hash__(self):
return stem.util._hash_attr(self, 'name', 'category', 'usage', 'summary', 'description', cache = True)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ConfigOption) else False
def __ne__(self, other):
return not self == other
@lru_cache()
def _config(lowercase = True):
"""
Provides a dictionary for our settings.cfg. This has a couple categories...
* manual.important (list) - configuration options considered to be important
* manual.summary.* (str) - summary descriptions of config options
:param bool lowercase: uses lowercase keys if **True** to allow for case
insensitive lookups
"""
config = stem.util.conf.Config()
config_path = os.path.join(os.path.dirname(__file__), 'settings.cfg')
try:
config.load(config_path)
config_dict = dict([(key.lower() if lowercase else key, config.get_value(key)) for key in config.keys() if key.startswith('manual.summary.')])
config_dict['manual.important'] = [name.lower() if lowercase else name for name in config.get_value('manual.important', [], multiple = True)]
return config_dict
except Exception as exc:
stem.util.log.warn("BUG: stem failed to load its internal manual information from '%s': %s" % (config_path, exc))
return {}
def _manual_differences(previous_manual, new_manual):
"""
Provides a description of how two manuals differ.
"""
lines = []
for attr in ('name', 'synopsis', 'description', 'commandline_options', 'signals', 'files', 'config_options'):
previous_attr = getattr(previous_manual, attr)
new_attr = getattr(new_manual, attr)
if previous_attr != new_attr:
lines.append("* Manual's %s attribute changed\n" % attr)
if attr in ('name', 'synopsis', 'description'):
lines.append(' Previously...\n\n%s\n' % previous_attr)
lines.append(' Updating to...\n\n%s' % new_attr)
elif attr == 'config_options':
for config_name, config_attr in new_attr.items():
previous = previous_attr.get(config_name)
if previous is None:
lines.append(' adding new config option => %s' % config_name)
elif config_attr != previous:
for attr in ('name', 'category', 'usage', 'summary', 'description'):
if getattr(config_attr, attr) != getattr(previous, attr):
lines.append(' modified %s (%s) => %s' % (config_name, attr, getattr(config_attr, attr)))
for config_name in set(previous_attr.keys()).difference(new_attr.keys()):
lines.append(' removing config option => %s' % config_name)
else:
added_items = set(new_attr.items()).difference(previous_attr.items())
removed_items = set(previous_attr.items()).difference(new_attr.items())
for added_item in added_items:
lines.append(' adding %s => %s' % added_item)
for removed_item in removed_items:
lines.append(' removing %s => %s' % removed_item)
lines.append('\n')
return '\n'.join(lines)
def is_important(option):
"""
Indicates if a configuration option of particularly common importance or not.
:param str option: tor configuration option to check
:returns: **bool** that's **True** if this is an important option and
**False** otherwise
"""
return option.lower() in _config()['manual.important']
def download_man_page(path = None, file_handle = None, url = GITWEB_MANUAL_URL, timeout = 20):
"""
Downloads tor's latest man page from `gitweb.torproject.org
<https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt>`_. This method is
both slow and unreliable - please see the warnings on
:func:`~stem.manual.Manual.from_remote`.
:param str path: path to save tor's man page to
:param file file_handle: file handler to save tor's man page to
:param str url: url to download tor's asciidoc manual from
:param int timeout: seconds to wait before timing out the request
:raises: **IOError** if unable to retrieve the manual
"""
if not path and not file_handle:
raise ValueError("Either the path or file_handle we're saving to must be provided")
elif not stem.util.system.is_available('a2x'):
raise IOError('We require a2x from asciidoc to provide a man page')
dirpath = tempfile.mkdtemp()
asciidoc_path = os.path.join(dirpath, 'tor.1.txt')
manual_path = os.path.join(dirpath, 'tor.1')
try:
try:
with open(asciidoc_path, 'wb') as asciidoc_file:
request = urllib.urlopen(url, timeout = timeout)
shutil.copyfileobj(request, asciidoc_file)
except:
exc = sys.exc_info()[1]
raise IOError("Unable to download tor's manual from %s to %s: %s" % (url, asciidoc_path, exc))
try:
stem.util.system.call('a2x -f manpage %s' % asciidoc_path)
if not os.path.exists(manual_path):
raise OSError('no man page was generated')
except stem.util.system.CallError as exc:
raise IOError("Unable to run '%s': %s" % (exc.command, exc.stderr))
if path:
try:
path_dir = os.path.dirname(path)
if not os.path.exists(path_dir):
os.makedirs(path_dir)
shutil.copyfile(manual_path, path)
except OSError as exc:
raise IOError(exc)
if file_handle:
with open(manual_path, 'rb') as manual_file:
shutil.copyfileobj(manual_file, file_handle)
file_handle.flush()
finally:
shutil.rmtree(dirpath)
class Manual(object):
"""
Parsed tor man page. Tor makes no guarantees about its man page format so
this may not always be compatible. If not you can use the cached manual
information stored with Stem.
This does not include every bit of information from the tor manual. For
instance, I've excluded the 'THE CONFIGURATION FILE FORMAT' section. If
there's a part you'd find useful then `file an issue
<https://trac.torproject.org/projects/tor/wiki/doc/stem/bugs>`_ and we can
add it.
:var str name: brief description of the tor command
:var str synopsis: brief tor command usage
:var str description: general description of what tor does
:var collections.OrderedDict commandline_options: mapping of commandline arguments to their descripton
:var collections.OrderedDict signals: mapping of signals tor accepts to their description
:var collections.OrderedDict files: mapping of file paths to their description
:var collections.OrderedDict config_options: :class:`~stem.manual.ConfigOption` tuples for tor configuration options
:var str man_commit: latest tor commit editing the man page when this
information was cached
:var str stem_commit: stem commit to cache this manual information
"""
def __init__(self, name, synopsis, description, commandline_options, signals, files, config_options):
self.name = name
self.synopsis = synopsis
self.description = description
self.commandline_options = OrderedDict(commandline_options)
self.signals = OrderedDict(signals)
self.files = OrderedDict(files)
self.config_options = OrderedDict(config_options)
self.man_commit = None
self.stem_commit = None
self.schema = None
@staticmethod
def from_cache(path = None):
"""
Provides manual information cached with Stem. Unlike
:func:`~stem.manual.Manual.from_man` and
:func:`~stem.manual.Manual.from_remote` this doesn't have any system
requirements, and is faster too. Only drawback is that this manual
content is only as up to date as the Stem release we're using.
.. versionchanged:: 1.6.0
Added support for sqlite cache. Support for
:class:`~stem.util.conf.Config` caches will be dropped in Stem 2.x.
:param str path: cached manual content to read, if not provided this uses
the bundled manual information
:returns: :class:`~stem.manual.Manual` with our bundled manual information
:raises:
* **ImportError** if cache is sqlite and the sqlite3 module is
unavailable
* **IOError** if a **path** was provided and we were unable to read
it or the schema is out of date
"""
# TODO: drop _from_config_cache() with stem 2.x
if path is None:
path = CACHE_PATH
if path is not None and path.endswith('.sqlite'):
return Manual._from_sqlite_cache(path)
else:
return Manual._from_config_cache(path)
@staticmethod
def _from_sqlite_cache(path):
if not stem.prereq.is_sqlite_available():
raise ImportError('Reading a sqlite cache requires the sqlite3 module')
import sqlite3
if not os.path.exists(path):
raise IOError("%s doesn't exist" % path)
with sqlite3.connect(path) as conn:
try:
schema = conn.execute('SELECT version FROM schema').fetchone()[0]
if schema != SCHEMA_VERSION:
raise SchemaMismatch("Stem's current manual schema version is %s, but %s was version %s" % (SCHEMA_VERSION, path, schema), schema, (SCHEMA_VERSION,))
name, synopsis, description, man_commit, stem_commit = conn.execute('SELECT name, synopsis, description, man_commit, stem_commit FROM metadata').fetchone()
except sqlite3.OperationalError as exc:
raise IOError('Failed to read database metadata from %s: %s' % (path, exc))
commandline = dict(conn.execute('SELECT name, description FROM commandline').fetchall())
signals = dict(conn.execute('SELECT name, description FROM signals').fetchall())
files = dict(conn.execute('SELECT name, description FROM files').fetchall())
config_options = OrderedDict()
for entry in conn.execute('SELECT name, category, usage, summary, description FROM torrc ORDER BY position').fetchall():
option, category, usage, summary, option_description = entry
config_options[option] = ConfigOption(option, category, usage, summary, option_description)
manual = Manual(name, synopsis, description, commandline, signals, files, config_options)
manual.man_commit = man_commit
manual.stem_commit = stem_commit
manual.schema = schema
return manual
@staticmethod
def _from_config_cache(path):
conf = stem.util.conf.Config()
conf.load(path, commenting = False)
config_options = OrderedDict()
for key in conf.keys():
if key.startswith('config_options.'):
key = key.split('.')[1]
if key not in config_options:
config_options[key] = ConfigOption(
conf.get('config_options.%s.name' % key, ''),
conf.get('config_options.%s.category' % key, ''),
conf.get('config_options.%s.usage' % key, ''),
conf.get('config_options.%s.summary' % key, ''),
conf.get('config_options.%s.description' % key, '')
)
manual = Manual(
conf.get('name', ''),
conf.get('synopsis', ''),
conf.get('description', ''),
conf.get('commandline_options', OrderedDict()),
conf.get('signals', OrderedDict()),
conf.get('files', OrderedDict()),
config_options,
)
manual.man_commit = conf.get('man_commit', None)
manual.stem_commit = conf.get('stem_commit', None)
return manual
@staticmethod
def from_man(man_path = 'tor'):
"""
Reads and parses a given man page.
On OSX the man command doesn't have an '--encoding' argument so its results
may not quite match other platforms. For instance, it normalizes long
dashes into '--'.
:param str man_path: path argument for 'man', for example you might want
'/path/to/tor/doc/tor.1' to read from tor's git repository
:returns: :class:`~stem.manual.Manual` for the system's man page
:raises: **IOError** if unable to retrieve the manual
"""
man_cmd = 'man %s -P cat %s' % ('--encoding=ascii' if HAS_ENCODING_ARG else '', man_path)
try:
man_output = stem.util.system.call(man_cmd, env = {'MANWIDTH': '10000000'})
except OSError as exc:
raise IOError("Unable to run '%s': %s" % (man_cmd, exc))
categories, config_options = _get_categories(man_output), OrderedDict()
for category_header, category_enum in CATEGORY_SECTIONS.items():
_add_config_options(config_options, category_enum, categories.get(category_header, []))
for category in categories:
if category.endswith(' OPTIONS') and category not in CATEGORY_SECTIONS and category not in ('COMMAND-LINE OPTIONS', 'NON-PERSISTENT OPTIONS'):
_add_config_options(config_options, Category.UNKNOWN, categories.get(category, []))
return Manual(
_join_lines(categories.get('NAME', [])),
_join_lines(categories.get('SYNOPSIS', [])),
_join_lines(categories.get('DESCRIPTION', [])),
_get_indented_descriptions(categories.get('COMMAND-LINE OPTIONS', [])),
_get_indented_descriptions(categories.get('SIGNALS', [])),
_get_indented_descriptions(categories.get('FILES', [])),
config_options,
)
@staticmethod
def from_remote(timeout = 60):
"""
Reads and parses the latest tor man page `from gitweb.torproject.org
<https://gitweb.torproject.org/tor.git/plain/doc/tor.1.txt>`_. Note that
while convenient, this reliance on GitWeb means you should alway call with
a fallback, such as...
::
try:
manual = stem.manual.from_remote()
except IOError:
manual = stem.manual.from_cache()
In addition to our GitWeb dependency this requires 'a2x' which is part of
`asciidoc <http://asciidoc.org/INSTALL.html>`_ and... isn't quick.
Personally this takes ~7.41s, breaking down for me as follows...
* 1.67s to download tor.1.txt
* 5.57s to convert the asciidoc to a man page
* 0.17s for stem to read and parse the manual
:param int timeout: seconds to wait before timing out the request
:returns: latest :class:`~stem.manual.Manual` available for tor
:raises: **IOError** if unable to retrieve the manual
"""
with tempfile.NamedTemporaryFile() as tmp:
download_man_page(file_handle = tmp, timeout = timeout)
return Manual.from_man(tmp.name)
def save(self, path):
"""
Persists the manual content to a given location.
.. versionchanged:: 1.6.0
Added support for sqlite cache. Support for
:class:`~stem.util.conf.Config` caches will be dropped in Stem 2.x.
:param str path: path to save our manual content to
:raises:
* **ImportError** if saving as sqlite and the sqlite3 module is
unavailable
* **IOError** if unsuccessful
"""
# TODO: drop _save_as_config() with stem 2.x
if path.endswith('.sqlite'):
return self._save_as_sqlite(path)
else:
return self._save_as_config(path)
def _save_as_sqlite(self, path):
if not stem.prereq.is_sqlite_available():
raise ImportError('Saving a sqlite cache requires the sqlite3 module')
import sqlite3
tmp_path = path + '.new'
if os.path.exists(tmp_path):
os.remove(tmp_path)
with sqlite3.connect(tmp_path) as conn:
for cmd in SCHEMA:
conn.execute(cmd)
conn.execute('INSERT INTO metadata(name, synopsis, description, man_commit, stem_commit) VALUES (?,?,?,?,?)', (self.name, self.synopsis, self.description, self.man_commit, self.stem_commit))
for k, v in self.commandline_options.items():
conn.execute('INSERT INTO commandline(name, description) VALUES (?,?)', (k, v))
for k, v in self.signals.items():
conn.execute('INSERT INTO signals(name, description) VALUES (?,?)', (k, v))
for k, v in self.files.items():
conn.execute('INSERT INTO files(name, description) VALUES (?,?)', (k, v))
for i, v in enumerate(self.config_options.values()):
conn.execute('INSERT INTO torrc(key, name, category, usage, summary, description, position) VALUES (?,?,?,?,?,?,?)', (v.name.upper(), v.name, v.category, v.usage, v.summary, v.description, i))
if os.path.exists(path):
os.remove(path)
os.rename(tmp_path, path)
def _save_as_config(self, path):
conf = stem.util.conf.Config()
conf.set('name', self.name)
conf.set('synopsis', self.synopsis)
conf.set('description', self.description)
if self.man_commit:
conf.set('man_commit', self.man_commit)
if self.stem_commit:
conf.set('stem_commit', self.stem_commit)
for k, v in self.commandline_options.items():
conf.set('commandline_options', '%s => %s' % (k, v), overwrite = False)
for k, v in self.signals.items():
conf.set('signals', '%s => %s' % (k, v), overwrite = False)
for k, v in self.files.items():
conf.set('files', '%s => %s' % (k, v), overwrite = False)
for k, v in self.config_options.items():
conf.set('config_options.%s.category' % k, v.category)
conf.set('config_options.%s.name' % k, v.name)
conf.set('config_options.%s.usage' % k, v.usage)
conf.set('config_options.%s.summary' % k, v.summary)
conf.set('config_options.%s.description' % k, v.description)
conf.save(path)
def __hash__(self):
return stem.util._hash_attr(self, 'name', 'synopsis', 'description', 'commandline_options', 'signals', 'files', 'config_options', cache = True)
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, Manual) else False
def __ne__(self, other):
return not self == other
def _get_categories(content):
"""
The man page is headers followed by an indented section. First pass gets
the mapping of category titles to their lines.
"""
# skip header and footer lines
if content and 'TOR(1)' in content[0]:
content = content[1:]
if content and content[-1].startswith('Tor'):
content = content[:-1]
categories = OrderedDict()
category, lines = None, []
for line in content:
# replace non-ascii characters
#
# \u2019 - smart single quote
# \u2014 - extra long dash
# \xb7 - centered dot
char_for = chr if stem.prereq.is_python_3() else unichr
line = line.replace(char_for(0x2019), "'").replace(char_for(0x2014), '-').replace(char_for(0xb7), '*')
if line and not line.startswith(' '):
if category:
if lines and lines[-1] == '':
lines = lines[:-1] # sections end with an extra empty line
categories[category] = lines
category, lines = line.strip(), []
else:
if line.startswith(' '):
line = line[7:] # contents of a section have a seven space indentation
lines.append(line)
if category:
categories[category] = lines
return categories
def _get_indented_descriptions(lines):
"""
Parses the commandline argument and signal sections. These are options
followed by an indented description. For example...
::
-f FILE
Specify a new configuration file to contain further Tor configuration
options OR pass - to make Tor read its configuration from standard
input. (Default: /usr/local/etc/tor/torrc, or $HOME/.torrc if that file
is not found)
There can be additional paragraphs not related to any particular argument but
ignoring those.
"""
options, last_arg = OrderedDict(), None
for line in lines:
if line and not line.startswith(' '):
options[line], last_arg = [], line
elif last_arg and line.startswith(' '):
options[last_arg].append(line[4:])
return dict([(arg, ' '.join(desc_lines)) for arg, desc_lines in options.items() if desc_lines])
def _add_config_options(config_options, category, lines):
"""
Parses a section of tor configuration options. These have usage information,
followed by an indented description. For instance...
::
ConnLimit NUM
The minimum number of file descriptors that must be available to the
Tor process before it will start. Tor will ask the OS for as many file
descriptors as the OS will allow (you can find this by "ulimit -H -n").
If this number is less than ConnLimit, then Tor will refuse to start.
You probably don't need to adjust this. It has no effect on Windows
since that platform lacks getrlimit(). (Default: 1000)
"""
last_option, usage, description = None, None, []
# Drop the section description. Each ends with a paragraph saying 'The
# following options...'.
desc_paragraph_index = None
for i, line in enumerate(lines):
if 'The following options' in line:
desc_paragraph_index = i
break
if desc_paragraph_index is not None:
lines = lines[desc_paragraph_index:] # trim to the description paragrah
lines = lines[lines.index(''):] # drop the paragraph
for line in lines:
if line and not line.startswith(' '):
if last_option:
summary = _config().get('manual.summary.%s' % last_option.lower(), '')
config_options[last_option] = ConfigOption(last_option, category, usage, summary, _join_lines(description).strip())
if ' ' in line:
last_option, usage = line.split(' ', 1)
else:
last_option, usage = line, ''
description = []
else:
if line.startswith(' '):
line = line[4:]
description.append(line)
if last_option:
summary = _config().get('manual.summary.%s' % last_option.lower(), '')
config_options[last_option] = ConfigOption(last_option, category, usage, summary, _join_lines(description).strip())
def _join_lines(lines):
"""
Simple join, except we want empty lines to still provide a newline.
"""
result = []
for line in lines:
if not line:
if result and result[-1] != '\n':
result.append('\n')
else:
result.append(line + '\n')
return ''.join(result).strip()

View file

@ -1,34 +1,35 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Checks for stem dependencies. We require python 2.6 or greater (including the
3.x series). Other requirements for complete functionality are...
3.x series), but note we'll be bumping our requirements to python 2.7 in stem
2.0. Other requirements for complete functionality are...
* pycrypto module
* cryptography module
* validating descriptor signature integrity
::
check_requirements - checks for minimum requirements for running stem
is_python_27 - checks if python 2.7 or later is available
is_python_3 - checks if python 3.0 or later is available
is_crypto_available - checks if the pycrypto module is available
is_sqlite_available - checks if the sqlite3 module is available
is_crypto_available - checks if the cryptography module is available
is_zstd_available - checks if the zstd module is available
is_lzma_available - checks if the lzma module is available
is_mock_available - checks if the mock module is available
"""
import functools
import inspect
import platform
import sys
try:
# added in python 3.2
from functools import lru_cache
except ImportError:
from stem.util.lru_cache import lru_cache
CRYPTO_UNAVAILABLE = "Unable to import the pycrypto module. Because of this we'll be unable to verify descriptor signature integrity. You can get pycrypto from: https://www.dlitz.net/software/pycrypto/"
CRYPTO_UNAVAILABLE = "Unable to import the cryptography module. Because of this we'll be unable to verify descriptor signature integrity. You can get cryptography from: https://pypi.python.org/pypi/cryptography"
ZSTD_UNAVAILABLE = 'ZSTD compression requires the zstandard module (https://pypi.python.org/pypi/zstandard)'
LZMA_UNAVAILABLE = 'LZMA compression requires the lzma module (https://docs.python.org/3/library/lzma.html)'
PYNACL_UNAVAILABLE = "Unable to import the pynacl module. Because of this we'll be unable to verify descriptor ed25519 certificate integrity. You can get pynacl from https://pypi.python.org/pypi/PyNaCl/"
def check_requirements():
@ -46,10 +47,26 @@ def check_requirements():
raise ImportError('stem requires python version 2.6 or greater')
def _is_python_26():
"""
Checks if we're running python 2.6. This isn't for users as it'll be removed
in stem 2.0 (when python 2.6 support goes away).
:returns: **True** if we're running python 2.6, **False** otherwise
"""
major_version, minor_version = sys.version_info[0:2]
return major_version == 2 and minor_version == 6
def is_python_27():
"""
Checks if we're running python 2.7 or above (including the 3.x series).
.. deprecated:: 1.5.0
Function lacks much utility and will be eventually removed.
:returns: **True** if we meet this requirement and **False** otherwise
"""
@ -68,28 +85,103 @@ def is_python_3():
return sys.version_info[0] == 3
@lru_cache()
def is_crypto_available():
def is_pypy():
"""
Checks if the pycrypto functions we use are available. This is used for
verifying relay descriptor signatures.
Checks if we're running PyPy.
:returns: **True** if we can use pycrypto and **False** otherwise
.. versionadded:: 1.7.0
:returns: **True** if running pypy, **False** otherwise
"""
from stem.util import log
return platform.python_implementation() == 'PyPy'
def is_sqlite_available():
"""
Checks if the sqlite3 module is available. Usually this is built in, but some
platforms such as FreeBSD and Gentoo exclude it by default.
.. versionadded:: 1.6.0
:returns: **True** if we can use the sqlite3 module and **False** otherwise
"""
try:
from Crypto.PublicKey import RSA
from Crypto.Util import asn1
from Crypto.Util.number import long_to_bytes
import sqlite3
return True
except ImportError:
return False
def is_crypto_available():
"""
Checks if the cryptography functions we use are available. This is used for
verifying relay descriptor signatures.
:returns: **True** if we can use the cryptography module and **False**
otherwise
"""
try:
from cryptography.utils import int_from_bytes, int_to_bytes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.serialization import load_der_public_key
if not hasattr(rsa.RSAPrivateKey, 'sign'):
raise ImportError()
return True
except ImportError:
from stem.util import log
log.log_once('stem.prereq.is_crypto_available', log.INFO, CRYPTO_UNAVAILABLE)
return False
@lru_cache()
def is_zstd_available():
"""
Checks if the `zstd module <https://pypi.python.org/pypi/zstandard>`_ is
available.
.. versionadded:: 1.7.0
:returns: **True** if we can use the zstd module and **False** otherwise
"""
try:
# Unfortunately the zstandard module uses the same namespace as another
# zstd module (https://pypi.python.org/pypi/zstd), so we need to
# differentiate them.
import zstd
return hasattr(zstd, 'ZstdDecompressor')
except ImportError:
from stem.util import log
log.log_once('stem.prereq.is_zstd_available', log.INFO, ZSTD_UNAVAILABLE)
return False
def is_lzma_available():
"""
Checks if the `lzma module <https://docs.python.org/3/library/lzma.html>`_ is
available. This was added as a builtin in Python 3.3.
.. versionadded:: 1.7.0
:returns: **True** if we can use the lzma module and **False** otherwise
"""
try:
import lzma
return True
except ImportError:
from stem.util import log
log.log_once('stem.prereq.is_lzma_available', log.INFO, LZMA_UNAVAILABLE)
return False
def is_mock_available():
"""
Checks if the mock module is available. In python 3.3 and up it is a builtin
@ -130,3 +222,37 @@ def is_mock_available():
return True
except ImportError:
return False
def _is_lru_cache_available():
"""
Functools added lru_cache to the standard library in Python 3.2. Prior to
this using a bundled implementation. We're also using this with Python 3.5
due to a buggy implementation. (:trac:`26412`)
"""
major_version, minor_version = sys.version_info[0:2]
if major_version == 3 and minor_version == 5:
return False
else:
return hasattr(functools, 'lru_cache')
def _is_pynacl_available():
"""
Checks if the pynacl functions we use are available. This is used for
verifying ed25519 certificates in relay descriptor signatures.
:returns: **True** if we can use pynacl and **False** otherwise
"""
from stem.util import log
try:
from nacl import encoding
from nacl import signing
return True
except ImportError:
log.log_once('stem.prereq._is_pynacl_available', log.INFO, PYNACL_UNAVAILABLE)
return False

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -23,6 +23,7 @@ import re
import signal
import subprocess
import tempfile
import threading
import stem.prereq
import stem.util.str_tools
@ -33,7 +34,7 @@ NO_TORRC = '<no torrc>'
DEFAULT_INIT_TIMEOUT = 90
def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, stdin = None):
def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, close_output = True, stdin = None):
"""
Initializes a tor process. This blocks until initialization completes or we
error out.
@ -47,8 +48,14 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce
default, but if you have a 'Log' entry in your torrc then you'll also need
'Log NOTICE stdout'.
Note: The timeout argument does not work on Windows, and relies on the global
state of the signal module.
Note: The timeout argument does not work on Windows or when outside the
main thread, and relies on the global state of the signal module.
.. versionchanged:: 1.6.0
Allowing the timeout argument to be a float.
.. versionchanged:: 1.7.0
Added the **close_output** argument.
:param str tor_cmd: command for starting tor
:param list args: additional arguments for tor
@ -62,6 +69,8 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce
:param bool take_ownership: asserts ownership over the tor process so it
aborts if this python process terminates or a :class:`~stem.control.Controller`
we establish to it disconnects
:param bool close_output: closes tor's stdout and stderr streams when
bootstrapping is complete if true
:param str stdin: content to provide on stdin
:returns: **subprocess.Popen** instance for the tor subprocess
@ -71,6 +80,14 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce
"""
if stem.util.system.is_windows():
if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT:
raise OSError('You cannot launch tor with a timeout on Windows')
timeout = None
elif threading.current_thread().__class__.__name__ != '_MainThread':
if timeout is not None and timeout != DEFAULT_INIT_TIMEOUT:
raise OSError('Launching tor with a timeout can only be done in the main thread')
timeout = None
# sanity check that we got a tor binary
@ -105,27 +122,26 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce
if take_ownership:
runtime_args += ['__OwningControllerProcess', str(os.getpid())]
tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE)
if stdin:
tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin))
tor_process.stdin.close()
if timeout:
def timeout_handler(signum, frame):
# terminates the uninitialized tor process and raise on timeout
tor_process.kill()
raise OSError('reached a %i second timeout without success' % timeout)
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout)
bootstrap_line = re.compile('Bootstrapped ([0-9]+)%: ')
problem_line = re.compile('\[(warn|err)\] (.*)$')
last_problem = 'Timed out'
tor_process = None
try:
tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE)
if stdin:
tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin))
tor_process.stdin.close()
if timeout:
def timeout_handler(signum, frame):
raise OSError('reached a %i second timeout without success' % timeout)
signal.signal(signal.SIGALRM, timeout_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
bootstrap_line = re.compile('Bootstrapped ([0-9]+)%: ')
problem_line = re.compile('\[(warn|err)\] (.*)$')
last_problem = 'Timed out'
while True:
# Tor's stdout will be read as ASCII bytes. This is fine for python 2, but
# in python 3 that means it'll mismatch with other operations (for instance
@ -139,7 +155,6 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce
# this will provide empty results if the process is terminated
if not init_line:
tor_process.kill() # ... but best make sure
raise OSError('Process terminated: %s' % last_problem)
# provide the caller with the initialization message if they want it
@ -162,12 +177,22 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce
msg = msg.split(': ')[-1].strip()
last_problem = msg
except:
if tor_process:
tor_process.kill() # don't leave a lingering process
tor_process.wait()
raise
finally:
if timeout:
signal.alarm(0) # stop alarm
tor_process.stdout.close()
tor_process.stderr.close()
if tor_process and close_output:
if tor_process.stdout:
tor_process.stdout.close()
if tor_process.stderr:
tor_process.stderr.close()
if temp_file:
try:
@ -176,7 +201,7 @@ def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_perce
pass
def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False):
def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, close_output = True):
"""
Initializes a tor process, like :func:`~stem.process.launch_tor`, but with a
customized configuration. This writes a temporary torrc to disk, launches
@ -196,6 +221,9 @@ def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, in
},
)
.. versionchanged:: 1.7.0
Added the **close_output** argument.
:param dict config: configuration options, such as "{'ControlPort': '9051'}",
values can either be a **str** or **list of str** if for multiple values
:param str tor_cmd: command for starting tor
@ -208,6 +236,8 @@ def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, in
:param bool take_ownership: asserts ownership over the tor process so it
aborts if this python process terminates or a :class:`~stem.control.Controller`
we establish to it disconnects
:param bool close_output: closes tor's stdout and stderr streams when
bootstrapping is complete if true
:returns: **subprocess.Popen** instance for the tor subprocess
@ -252,7 +282,7 @@ def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, in
config_str += '%s %s\n' % (key, value)
if use_stdin:
return launch_tor(tor_cmd, ['-f', '-'], None, completion_percent, init_msg_handler, timeout, take_ownership, stdin = config_str)
return launch_tor(tor_cmd, ['-f', '-'], None, completion_percent, init_msg_handler, timeout, take_ownership, close_output, stdin = config_str)
else:
torrc_descriptor, torrc_path = tempfile.mkstemp(prefix = 'torrc-', text = True)

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -16,9 +16,7 @@ Parses replies from the control socket.
|- from_str - provides a ControlMessage for the given string
|- is_ok - response had a 250 status
|- content - provides the parsed message content
|- raw_content - unparsed socket data
|- __str__ - content stripped of protocol formatting
+- __iter__ - ControlLine entries for the content of the message
+- raw_content - unparsed socket data
ControlLine - String subclass with methods for parsing controller responses.
|- remainder - provides the unparsed content
@ -30,6 +28,15 @@ Parses replies from the control socket.
+- pop_mapping - removes and returns the next entry as a KEY=VALUE mapping
"""
import codecs
import io
import re
import threading
import stem.socket
import stem.util
import stem.util.str_tools
__all__ = [
'add_onion',
'events',
@ -43,28 +50,8 @@ __all__ = [
'SingleLineResponse',
]
import re
import threading
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import stem.socket
KEY_ARG = re.compile('^(\S+)=')
# Escape sequences from the 'esc_for_log' function of tor's 'common/util.c'.
# It's hard to tell what controller functions use this in practice, but direct
# users are...
# - 'COOKIEFILE' field of PROTOCOLINFO responses
# - logged messages about bugs
# - the 'getinfo_helper_listeners' function of control.c
CONTROL_ESCAPES = {r'\\': '\\', r'\"': '\"', r'\'': '\'',
r'\r': '\r', r'\n': '\n', r'\t': '\t'}
def convert(response_type, message, **kwargs):
"""
@ -76,12 +63,13 @@ def convert(response_type, message, **kwargs):
=================== =====
response_type Class
=================== =====
**GETINFO** :class:`stem.response.getinfo.GetInfoResponse`
**GETCONF** :class:`stem.response.getconf.GetConfResponse`
**MAPADDRESS** :class:`stem.response.mapaddress.MapAddressResponse`
**EVENT** :class:`stem.response.events.Event` subclass
**PROTOCOLINFO** :class:`stem.response.protocolinfo.ProtocolInfoResponse`
**ADD_ONION** :class:`stem.response.add_onion.AddOnionResponse`
**AUTHCHALLENGE** :class:`stem.response.authchallenge.AuthChallengeResponse`
**EVENT** :class:`stem.response.events.Event` subclass
**GETCONF** :class:`stem.response.getconf.GetConfResponse`
**GETINFO** :class:`stem.response.getinfo.GetInfoResponse`
**MAPADDRESS** :class:`stem.response.mapaddress.MapAddressResponse`
**PROTOCOLINFO** :class:`stem.response.protocolinfo.ProtocolInfoResponse`
**SINGLELINE** :class:`stem.response.SingleLineResponse`
=================== =====
@ -119,11 +107,11 @@ def convert(response_type, message, **kwargs):
'ADD_ONION': stem.response.add_onion.AddOnionResponse,
'AUTHCHALLENGE': stem.response.authchallenge.AuthChallengeResponse,
'EVENT': stem.response.events.Event,
'GETINFO': stem.response.getinfo.GetInfoResponse,
'GETCONF': stem.response.getconf.GetConfResponse,
'GETINFO': stem.response.getinfo.GetInfoResponse,
'MAPADDRESS': stem.response.mapaddress.MapAddressResponse,
'SINGLELINE': SingleLineResponse,
'PROTOCOLINFO': stem.response.protocolinfo.ProtocolInfoResponse,
'SINGLELINE': SingleLineResponse,
}
try:
@ -140,23 +128,37 @@ class ControlMessage(object):
Message from the control socket. This is iterable and can be stringified for
individual message components stripped of protocol formatting. Messages are
never empty.
.. versionchanged:: 1.7.0
Implemented equality and hashing.
"""
@staticmethod
def from_str(content, msg_type = None, **kwargs):
def from_str(content, msg_type = None, normalize = False, **kwargs):
"""
Provides a ControlMessage for the given content.
.. versionadded:: 1.1.0
.. versionchanged:: 1.6.0
Added the normalize argument.
:param str content: message to construct the message from
:param str msg_type: type of tor reply to parse the content as
:param bool normalize: ensures expected carriage return and ending newline
are present
:param kwargs: optional keyword arguments to be passed to the parser method
:returns: stem.response.ControlMessage instance
"""
msg = stem.socket.recv_message(StringIO(content))
if normalize:
if not content.endswith('\n'):
content += '\n'
content = re.sub('([\r]?)\n', '\r\n', content)
msg = stem.socket.recv_message(io.BytesIO(stem.util.str_tools._to_bytes(content)))
if msg_type is not None:
convert(msg_type, msg, **kwargs)
@ -169,6 +171,8 @@ class ControlMessage(object):
self._parsed_content = parsed_content
self._raw_content = raw_content
self._str = None
self._hash = stem.util._hash_attr(self, '_raw_content')
def is_ok(self):
"""
@ -245,7 +249,10 @@ class ControlMessage(object):
formatting.
"""
return '\n'.join(list(self))
if self._str is None:
self._str = '\n'.join(list(self))
return self._str
def __iter__(self):
"""
@ -295,6 +302,15 @@ class ControlMessage(object):
return ControlLine(content)
def __hash__(self):
return self._hash
def __eq__(self, other):
return hash(self) == hash(other) if isinstance(other, ControlMessage) else False
def __ne__(self, other):
return not self == other
class ControlLine(str):
"""
@ -336,7 +352,7 @@ class ControlLine(str):
"""
Checks if our next entry is a quoted value or not.
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
:param bool escaped: unescapes the string
:returns: **True** if the next entry can be parsed as a quoted value, **False** otherwise
"""
@ -350,7 +366,7 @@ class ControlLine(str):
:param str key: checks that the key matches this value, skipping the check if **None**
:param bool quoted: checks that the mapping is to a quoted value
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
:param bool escaped: unescapes the string
:returns: **True** if the next entry can be parsed as a key=value mapping,
**False** otherwise
@ -408,7 +424,7 @@ class ControlLine(str):
"this has a \\" and \\\\ in it"
:param bool quoted: parses the next entry as a quoted value, removing the quotes
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
:param bool escaped: unescapes the string
:returns: **str** of the next space separated entry
@ -418,17 +434,21 @@ class ControlLine(str):
"""
with self._remainder_lock:
next_entry, remainder = _parse_entry(self._remainder, quoted, escaped)
next_entry, remainder = _parse_entry(self._remainder, quoted, escaped, False)
self._remainder = remainder
return next_entry
def pop_mapping(self, quoted = False, escaped = False):
def pop_mapping(self, quoted = False, escaped = False, get_bytes = False):
"""
Parses the next space separated entry as a KEY=VALUE mapping, removing it
and the space from our remaining content.
.. versionchanged:: 1.6.0
Added the get_bytes argument.
:param bool quoted: parses the value as being quoted, removing the quotes
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
:param bool escaped: unescapes the string
:param bool get_bytes: provides **bytes** for the **value** rather than a **str**
:returns: **tuple** of the form (key, value)
@ -450,18 +470,18 @@ class ControlLine(str):
key = key_match.groups()[0]
remainder = self._remainder[key_match.end():]
next_entry, remainder = _parse_entry(remainder, quoted, escaped)
next_entry, remainder = _parse_entry(remainder, quoted, escaped, get_bytes)
self._remainder = remainder
return (key, next_entry)
def _parse_entry(line, quoted, escaped):
def _parse_entry(line, quoted, escaped, get_bytes):
"""
Parses the next entry from the given space separated content.
:param str line: content to be parsed
:param bool quoted: parses the next entry as a quoted value, removing the quotes
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
:param bool escaped: unescapes the string
:returns: **tuple** of the form (entry, remainder)
@ -491,7 +511,26 @@ def _parse_entry(line, quoted, escaped):
next_entry, remainder = remainder, ''
if escaped:
next_entry = _unescape(next_entry)
# Tor does escaping in its 'esc_for_log' function of 'common/util.c'. It's
# hard to tell what controller functions use this in practice, but direct
# users are...
#
# * 'COOKIEFILE' field of PROTOCOLINFO responses
# * logged messages about bugs
# * the 'getinfo_helper_listeners' function of control.c
#
# Ideally we'd use "next_entry.decode('string_escape')" but it was removed
# in python 3.x and 'unicode_escape' isn't quite the same...
#
# https://stackoverflow.com/questions/14820429/how-do-i-decodestring-escape-in-python3
next_entry = codecs.escape_decode(next_entry)[0]
if stem.prereq.is_python_3() and not get_bytes:
next_entry = stem.util.str_tools._to_unicode(next_entry) # normalize back to str
if get_bytes:
next_entry = stem.util.str_tools._to_bytes(next_entry)
return (next_entry, remainder.lstrip())
@ -501,7 +540,7 @@ def _get_quote_indices(line, escaped):
Provides the indices of the next two quotes in the given content.
:param str line: content to be parsed
:param bool escaped: unescapes the CONTROL_ESCAPES escape sequences
:param bool escaped: unescapes the string
:returns: **tuple** of two ints, indices being -1 if a quote doesn't exist
"""
@ -522,34 +561,6 @@ def _get_quote_indices(line, escaped):
return tuple(indices)
def _unescape(entry):
# Unescapes the given string with the mappings in CONTROL_ESCAPES.
#
# This can't be a simple series of str.replace() calls because replacements
# need to be excluded from consideration for further unescaping. For
# instance, '\\t' should be converted to '\t' rather than a tab.
def _pop_with_unescape(entry):
# Pop either the first character or the escape sequence conversion the
# entry starts with. This provides a tuple of...
#
# (unescaped prefix, remaining entry)
for esc_sequence, replacement in CONTROL_ESCAPES.items():
if entry.startswith(esc_sequence):
return (replacement, entry[len(esc_sequence):])
return (entry[0], entry[1:])
result = []
while entry:
prefix, entry = _pop_with_unescape(entry)
result.append(prefix)
return ''.join(result)
class SingleLineResponse(ControlMessage):
"""
Reply to a request that performs an action rather than querying data. These

View file

@ -1,4 +1,4 @@
# Copyright 2015, Damian Johnson and The Tor Project
# Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import stem.response
@ -12,17 +12,20 @@ class AddOnionResponse(stem.response.ControlMessage):
:var str private_key: base64 encoded hidden service private key
:var str private_key_type: crypto used to generate the hidden service private
key (such as RSA1024)
:var dict client_auth: newly generated client credentials the service accepts
"""
def _parse_message(self):
# Example:
# 250-ServiceID=gfzprpioee3hoppz
# 250-PrivateKey=RSA1024:MIICXgIBAAKBgQDZvYVxv...
# 250-ClientAuth=bob:l4BT016McqV2Oail+Bwe6w
# 250 OK
self.service_id = None
self.private_key = None
self.private_key_type = None
self.client_auth = {}
if not self.is_ok():
raise stem.ProtocolError("ADD_ONION response didn't have an OK status: %s" % self)
@ -41,3 +44,9 @@ class AddOnionResponse(stem.response.ControlMessage):
raise stem.ProtocolError("ADD_ONION PrivateKey lines should be of the form 'PrivateKey=[type]:[key]: %s" % self)
self.private_key_type, self.private_key = value.split(':', 1)
elif key == 'ClientAuth':
if ':' not in value:
raise stem.ProtocolError("ADD_ONION ClientAuth lines should be of the form 'ClientAuth=[username]:[credential]: %s" % self)
username, credential = value.split(':', 1)
self.client_auth[username] = credential

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import binascii
@ -41,7 +41,7 @@ class AuthChallengeResponse(stem.response.ControlMessage):
if not stem.util.tor_tools.is_hex_digits(value, 64):
raise stem.ProtocolError('SERVERHASH has an invalid value: %s' % value)
self.server_hash = binascii.a2b_hex(stem.util.str_tools._to_bytes(value))
self.server_hash = binascii.unhexlify(stem.util.str_tools._to_bytes(value))
else:
raise stem.ProtocolError('Missing SERVERHASH mapping: %s' % line)
@ -51,6 +51,6 @@ class AuthChallengeResponse(stem.response.ControlMessage):
if not stem.util.tor_tools.is_hex_digits(value, 64):
raise stem.ProtocolError('SERVERNONCE has an invalid value: %s' % value)
self.server_nonce = binascii.a2b_hex(stem.util.str_tools._to_bytes(value))
self.server_nonce = binascii.unhexlify(stem.util.str_tools._to_bytes(value))
else:
raise stem.ProtocolError('Missing SERVERNONCE mapping: %s' % line)

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import io
@ -8,10 +8,11 @@ import time
import stem
import stem.control
import stem.descriptor.router_status_entry
import stem.prereq
import stem.response
import stem.util
import stem.version
from stem import str_type, int_type
from stem.util import connection, log, str_tools, tor_tools
# Matches keyword=value arguments. This can't be a simple "(.*)=(.*)" pattern
@ -21,6 +22,11 @@ from stem.util import connection, log, str_tools, tor_tools
KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)=(\S*)$')
QUOTED_KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)="(.*)"$')
CELL_TYPE = re.compile('^[a-z0-9_]+$')
PARSE_NEWCONSENSUS_EVENTS = True
# TODO: We can remove the following when we drop python2.6 support.
INT_TYPE = int if stem.prereq.is_python_3() else long
class Event(stem.response.ControlMessage):
@ -65,6 +71,9 @@ class Event(stem.response.ControlMessage):
self._parse()
def __hash__(self):
return stem.util._hash_attr(self, 'arrived_at', parent = stem.response.ControlMessage, cache = True)
def _parse_standard_attr(self):
"""
Most events are of the form...
@ -126,6 +135,25 @@ class Event(stem.response.ControlMessage):
for controller_attr_name, attr_name in self._KEYWORD_ARGS.items():
setattr(self, attr_name, self.keyword_args.get(controller_attr_name))
def _iso_timestamp(self, timestamp):
"""
Parses an iso timestamp (ISOTime2Frac in the control-spec).
:param str timestamp: timestamp to parse
:returns: **datetime** with the parsed timestamp
:raises: :class:`stem.ProtocolError` if timestamp is malformed
"""
if timestamp is None:
return None
try:
return str_tools._parse_iso_timestamp(timestamp)
except ValueError as exc:
raise stem.ProtocolError('Unable to parse timestamp (%s): %s' % (exc, self))
# method overwritten by our subclasses for special handling that they do
def _parse(self):
pass
@ -142,7 +170,7 @@ class Event(stem.response.ControlMessage):
attr_values = getattr(self, attr)
if attr_values:
if isinstance(attr_values, (bytes, str_type)):
if stem.util._is_str(attr_values):
attr_values = [attr_values]
for value in attr_values:
@ -163,7 +191,7 @@ class AddrMapEvent(Event):
Added the cached attribute.
:var str hostname: address being resolved
:var str destination: destionation of the resolution, this is usually an ip,
:var str destination: destination of the resolution, this is usually an ip,
but could be a hostname if TrackHostExits is enabled or **NONE** if the
resolution failed
:var datetime expiry: expiration time of the resolution in local time
@ -212,7 +240,11 @@ class AuthDirNewDescEvent(Event):
descriptors. The descriptor type contained within this event is unspecified
so the descriptor contents are left unparsed.
The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha.
The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha and
removed in 0.3.2.1-alpha. (:spec:`6e887ba`)
.. deprecated:: 1.6.0
Tor dropped this event as of version 0.3.2.1. (:spec:`6e887ba`)
:var stem.AuthDescriptorAction action: what is being done with the descriptor
:var str message: explanation of why we chose this action
@ -245,8 +277,8 @@ class BandwidthEvent(Event):
The BW event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
:var int read: bytes received by tor that second
:var int written: bytes sent by tor that second
"""
_POSITIONAL_ARGS = ('read', 'written')
@ -259,8 +291,8 @@ class BandwidthEvent(Event):
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = int_type(self.read)
self.written = int_type(self.written)
self.read = INT_TYPE(self.read)
self.written = INT_TYPE(self.written)
class BuildTimeoutSetEvent(Event):
@ -365,16 +397,11 @@ class CircuitEvent(Event):
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
self.created = self._iso_timestamp(self.created)
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if self.created is not None:
try:
self.created = str_tools._parse_iso_timestamp(self.created)
except ValueError as exc:
raise stem.ProtocolError('Unable to parse create date (%s): %s' % (exc, self))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
@ -386,35 +413,15 @@ class CircuitEvent(Event):
self._log_if_unrecognized('remote_reason', stem.CircClosureReason)
def _compare(self, other, method):
# sorting circuit events by their identifier
if not isinstance(other, CircuitEvent):
return False
for attr in ('id', 'status', 'path', 'build_flags', 'purpose', 'hs_state', 'rend_query', 'created', 'reason', 'remote_reason', 'socks_username', 'socks_port'):
my_attr = getattr(self, attr)
other_attr = getattr(other, attr)
my_id = getattr(self, 'id')
their_id = getattr(other, 'id')
# Our id attribute is technically a string, but Tor conventionally uses
# ints. Attempt to handle as ints if that's the case so we get numeric
# ordering.
if attr == 'id' and my_attr and other_attr:
if my_attr.isdigit() and other_attr.isdigit():
my_attr = int(my_attr)
other_attr = int(other_attr)
if my_attr is None:
my_attr = ''
if other_attr is None:
other_attr = ''
if my_attr != other_attr:
return method(my_attr, other_attr)
return True
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
return method(my_id, their_id) if my_id != their_id else method(hash(self), hash(other))
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
@ -458,16 +465,11 @@ class CircMinorEvent(Event):
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
self.created = self._iso_timestamp(self.created)
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if self.created is not None:
try:
self.created = str_tools._parse_iso_timestamp(self.created)
except ValueError as exc:
raise stem.ProtocolError('Unable to parse create date (%s): %s' % (exc, self))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
@ -545,15 +547,26 @@ class ConfChangedEvent(Event):
The CONF_CHANGED event was introduced in tor version 0.2.3.3-alpha.
:var dict config: mapping of configuration options to their new values
(**None** if the option is being unset)
.. deprecated:: 1.7.0
Deprecated the *config* attribute. Some tor configuration options (like
ExitPolicy) can have multiple values, so a simple 'str => str' mapping
meant that we only provided the last.
.. versionchanged:: 1.7.0
Added the changed and unset attributes.
:var dict changed: mapping of configuration options to a list of their new
values
:var list unset: configuration options that have been unset
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_CONF_CHANGED
def _parse(self):
self.config = {}
self.changed = {}
self.unset = []
self.config = {} # TODO: remove in stem 2.0
# Skip first and last line since they're the header and footer. For
# instance...
@ -567,8 +580,10 @@ class ConfChangedEvent(Event):
for line in str(self).splitlines()[1:-1]:
if '=' in line:
key, value = line.split('=', 1)
self.changed.setdefault(key, []).append(value)
else:
key, value = line, None
self.unset.append(key)
self.config[key] = value
@ -630,6 +645,12 @@ class HSDescEvent(Event):
.. versionchanged:: 1.3.0
Added the reason attribute.
.. versionchanged:: 1.5.0
Added the replica attribute.
.. versionchanged:: 1.7.0
Added the index attribute.
:var stem.HSDescAction action: what is happening with the descriptor
:var str address: hidden service address
:var stem.HSAuth authentication: service's authentication method
@ -638,21 +659,30 @@ class HSDescEvent(Event):
:var str directory_nickname: hidden service directory's nickname if it was provided
:var str descriptor_id: descriptor identifier
:var stem.HSDescReason reason: reason the descriptor failed to be fetched
:var int replica: replica number the descriptor involves
:var str index: computed index of the HSDir the descriptor was uploaded to or fetched from
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_HS_DESC
_POSITIONAL_ARGS = ('action', 'address', 'authentication', 'directory', 'descriptor_id')
_KEYWORD_ARGS = {'REASON': 'reason'}
_KEYWORD_ARGS = {'REASON': 'reason', 'REPLICA': 'replica', 'HSDIR_INDEX': 'index'}
def _parse(self):
self.directory_fingerprint = None
self.directory_nickname = None
try:
self.directory_fingerprint, self.directory_nickname = \
stem.control._parse_circ_entry(self.directory)
except stem.ProtocolError:
raise stem.ProtocolError("HS_DESC's directory doesn't match a ServerSpec: %s" % self)
if self.directory != 'UNKNOWN':
try:
self.directory_fingerprint, self.directory_nickname = \
stem.control._parse_circ_entry(self.directory)
except stem.ProtocolError:
raise stem.ProtocolError("HS_DESC's directory doesn't match a ServerSpec: %s" % self)
if self.replica is not None:
if not self.replica.isdigit():
raise stem.ProtocolError('HS_DESC event got a non-numeric replica count (%s): %s' % (self.replica, self))
self.replica = int(self.replica)
self._log_if_unrecognized('action', stem.HSDescAction)
self._log_if_unrecognized('authentication', stem.HSAuth)
@ -744,11 +774,27 @@ class NetworkStatusEvent(Event):
self.desc = list(stem.descriptor.router_status_entry._parse_file(
io.BytesIO(str_tools._to_bytes(content)),
True,
False,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NetworkLivenessEvent(Event):
"""
Event for when the network becomes reachable or unreachable.
The NETWORK_LIVENESS event was introduced in tor version 0.2.7.2-alpha.
.. versionadded:: 1.5.0
:var str status: status of the network ('UP', 'DOWN', or possibly other
statuses in the future)
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_NETWORK_LIVENESS
_POSITIONAL_ARGS = ('status',)
class NewConsensusEvent(Event):
"""
Event for when we have a new consensus. This is similar to
@ -758,6 +804,19 @@ class NewConsensusEvent(Event):
The NEWCONSENSUS event was introduced in tor version 0.2.1.13-alpha.
.. versionchanged:: 1.6.0
Added the consensus_content attribute.
.. deprecated:: 1.6.0
In Stem 2.0 we'll remove the desc attribute, so this event only provides
the unparsed consensus. Callers can then parse it if they'd like. To drop
parsing before then you can set...
::
stem.response.events.PARSE_NEWCONSENSUS_EVENTS = False
:var str consensus_content: consensus content
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
"""
@ -765,16 +824,19 @@ class NewConsensusEvent(Event):
_VERSION_ADDED = stem.version.Requirement.EVENT_NEWCONSENSUS
def _parse(self):
content = str(self).lstrip('NEWCONSENSUS\n').rstrip('\nOK')
self.consensus_content = str(self).lstrip('NEWCONSENSUS\n').rstrip('\nOK')
# TODO: For stem 2.0.0 consider changing 'desc' to 'descriptors' to match
# our other events.
self.desc = list(stem.descriptor.router_status_entry._parse_file(
io.BytesIO(str_tools._to_bytes(content)),
True,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
if PARSE_NEWCONSENSUS_EVENTS:
self.desc = list(stem.descriptor.router_status_entry._parse_file(
io.BytesIO(str_tools._to_bytes(self.consensus_content)),
False,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
else:
self.desc = None
class NewDescEvent(Event):
@ -846,7 +908,7 @@ class ORConnEvent(Event):
if ':' not in self.endpoint:
raise stem.ProtocolError("ORCONN endpoint is neither a relay nor 'address:port': %s" % self)
address, port = self.endpoint.split(':', 1)
address, port = self.endpoint.rsplit(':', 1)
if not connection.is_valid_port(port):
raise stem.ProtocolError("ORCONN's endpoint location's port is invalid: %s" % self)
@ -993,7 +1055,7 @@ class StreamEvent(Event):
if ':' not in self.source_addr:
raise stem.ProtocolError("Source location must be of the form 'address:port': %s" % self)
address, port = self.source_addr.split(':', 1)
address, port = self.source_addr.rsplit(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Source location's port is invalid: %s" % self)
@ -1018,12 +1080,16 @@ class StreamBwEvent(Event):
The STREAM_BW event was introduced in tor version 0.1.2.8-beta.
.. versionchanged:: 1.6.0
Added the time attribute.
:var str id: stream identifier
:var long written: bytes sent by the application
:var long read: bytes received by the application
:var int written: bytes sent by the application
:var int read: bytes received by the application
:var datetime time: time when the measurement was recorded
"""
_POSITIONAL_ARGS = ('id', 'written', 'read')
_POSITIONAL_ARGS = ('id', 'written', 'read', 'time')
_VERSION_ADDED = stem.version.Requirement.EVENT_STREAM_BW
def _parse(self):
@ -1036,8 +1102,9 @@ class StreamBwEvent(Event):
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A STREAM_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = int_type(self.read)
self.written = int_type(self.written)
self.read = INT_TYPE(self.read)
self.written = INT_TYPE(self.written)
self.time = self._iso_timestamp(self.time)
class TransportLaunchedEvent(Event):
@ -1081,15 +1148,19 @@ class ConnectionBandwidthEvent(Event):
.. versionadded:: 1.2.0
.. versionchanged:: 1.6.0
Renamed 'type' attribute to 'conn_type' so it wouldn't be override parent
class attribute with the same name.
:var str id: connection identifier
:var stem.ConnectionType type: connection type
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
:var stem.ConnectionType conn_type: connection type
:var int read: bytes received by tor that second
:var int written: bytes sent by tor that second
"""
_KEYWORD_ARGS = {
'ID': 'id',
'TYPE': 'type',
'TYPE': 'conn_type',
'READ': 'read',
'WRITTEN': 'written',
}
@ -1099,8 +1170,8 @@ class ConnectionBandwidthEvent(Event):
def _parse(self):
if not self.id:
raise stem.ProtocolError('CONN_BW event is missing its id')
elif not self.type:
raise stem.ProtocolError('CONN_BW event is missing its type')
elif not self.conn_type:
raise stem.ProtocolError('CONN_BW event is missing its connection type')
elif not self.read:
raise stem.ProtocolError('CONN_BW event is missing its read value')
elif not self.written:
@ -1110,10 +1181,10 @@ class ConnectionBandwidthEvent(Event):
elif not tor_tools.is_valid_connection_id(self.id):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self.read = int_type(self.read)
self.written = int_type(self.written)
self.read = INT_TYPE(self.read)
self.written = INT_TYPE(self.written)
self._log_if_unrecognized('type', stem.ConnectionType)
self._log_if_unrecognized('conn_type', stem.ConnectionType)
class CircuitBandwidthEvent(Event):
@ -1125,15 +1196,32 @@ class CircuitBandwidthEvent(Event):
.. versionadded:: 1.2.0
.. versionchanged:: 1.6.0
Added the time attribute.
.. versionchanged:: 1.7.0
Added the delivered_read, delivered_written, overhead_read, and
overhead_written attributes.
:var str id: circuit identifier
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
:var int read: bytes received by tor that second
:var int written: bytes sent by tor that second
:var int delivered_read: user payload received by tor that second
:var int delivered_written: user payload sent by tor that second
:var int overhead_read: padding so read cells will have a fixed length
:var int overhead_written: padding so written cells will have a fixed length
:var datetime time: time when the measurement was recorded
"""
_KEYWORD_ARGS = {
'ID': 'id',
'READ': 'read',
'WRITTEN': 'written',
'DELIVERED_READ': 'delivered_read',
'DELIVERED_WRITTEN': 'delivered_written',
'OVERHEAD_READ': 'overhead_read',
'OVERHEAD_WRITTEN': 'overhead_written',
'TIME': 'time',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CIRC_BW
@ -1145,13 +1233,28 @@ class CircuitBandwidthEvent(Event):
raise stem.ProtocolError('CIRC_BW event is missing its read value')
elif not self.written:
raise stem.ProtocolError('CIRC_BW event is missing its written value')
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A CIRC_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
elif not self.read.isdigit():
raise stem.ProtocolError("A CIRC_BW event's bytes received should be a positive numeric value, received: %s" % self)
elif not self.written.isdigit():
raise stem.ProtocolError("A CIRC_BW event's bytes sent should be a positive numeric value, received: %s" % self)
elif self.delivered_read and not self.delivered_read.isdigit():
raise stem.ProtocolError("A CIRC_BW event's delivered bytes received should be a positive numeric value, received: %s" % self)
elif self.delivered_written and not self.delivered_written.isdigit():
raise stem.ProtocolError("A CIRC_BW event's delivered bytes sent should be a positive numeric value, received: %s" % self)
elif self.overhead_read and not self.overhead_read.isdigit():
raise stem.ProtocolError("A CIRC_BW event's overhead bytes received should be a positive numeric value, received: %s" % self)
elif self.overhead_written and not self.overhead_written.isdigit():
raise stem.ProtocolError("A CIRC_BW event's overhead bytes sent should be a positive numeric value, received: %s" % self)
elif not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self.read = int_type(self.read)
self.written = int_type(self.written)
self.time = self._iso_timestamp(self.time)
for attr in ('read', 'written', 'delivered_read', 'delivered_written', 'overhead_read', 'overhead_written'):
value = getattr(self, attr)
if value:
setattr(self, attr, INT_TYPE(value))
class CellStatsEvent(Event):
@ -1280,7 +1383,7 @@ def _parse_cell_type_mapping(mapping):
if ':' not in entry:
raise stem.ProtocolError("Mappings are expected to be of the form 'key:value', got '%s': %s" % (entry, mapping))
key, value = entry.split(':', 1)
key, value = entry.rsplit(':', 1)
if not CELL_TYPE.match(key):
raise stem.ProtocolError("Key had invalid characters, got '%s': %s" % (key, mapping))
@ -1311,6 +1414,7 @@ EVENT_TYPE_TO_CLASS = {
'HS_DESC': HSDescEvent,
'HS_DESC_CONTENT': HSDescContentEvent,
'INFO': LogEvent,
'NETWORK_LIVENESS': NetworkLivenessEvent,
'NEWCONSENSUS': NewConsensusEvent,
'NEWDESC': NewDescEvent,
'NOTICE': LogEvent,

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import stem.response
@ -48,6 +48,14 @@ class GetConfResponse(stem.response.ControlMessage):
else:
key, value = (line.pop(), None)
# Tor's CommaList and RouterList have a bug where they map to an empty
# string when undefined rather than None...
#
# https://trac.torproject.org/projects/tor/ticket/18263
if value == '':
value = None
if key not in self.entries:
self.entries[key] = []

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import stem.response
@ -30,12 +30,20 @@ class GetInfoResponse(stem.response.ControlMessage):
if not self.is_ok() or not remaining_lines.pop() == b'OK':
unrecognized_keywords = []
error_code, error_msg = None, None
for code, _, line in self.content():
if code != '250':
error_code = code
error_msg = line
if code == '552' and line.startswith('Unrecognized key "') and line.endswith('"'):
unrecognized_keywords.append(line[18:-1])
if unrecognized_keywords:
raise stem.InvalidArguments('552', 'GETINFO request contained unrecognized keywords: %s\n' % ', '.join(unrecognized_keywords), unrecognized_keywords)
elif error_code:
raise stem.OperationFailed(error_code, error_msg)
else:
raise stem.ProtocolError("GETINFO response didn't have an OK status:\n%s" % self)

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import stem.response

View file

@ -1,9 +1,13 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import sys
import stem.prereq
import stem.response
import stem.socket
import stem.version
import stem.util.str_tools
from stem.connection import AuthMethod
from stem.util import log
@ -101,8 +105,12 @@ class ProtocolInfoResponse(stem.response.ControlMessage):
auth_methods.append(AuthMethod.UNKNOWN)
# parse optional COOKIEFILE mapping (quoted and can have escapes)
if line.is_next_mapping('COOKIEFILE', True, True):
self.cookie_path = line.pop_mapping(True, True)[1]
self.cookie_path = line.pop_mapping(True, True, get_bytes = True)[1].decode(sys.getfilesystemencoding())
if stem.prereq.is_python_3():
self.cookie_path = stem.util.str_tools._to_unicode(self.cookie_path) # normalize back to str
elif line_type == 'VERSION':
# Line format:
# VersionLine = "250-VERSION" SP "Tor=" TorVersion OptArguments CRLF

View file

@ -0,0 +1,413 @@
################################################################################
#
# Information related to tor configuration options and events...
#
# * manual.important Most commonly used configuration options.
# * manual.summary Short summary describing the option.
# * event.description Descriptions for the events.
#
################################################################################
manual.important BandwidthRate
manual.important BandwidthBurst
manual.important RelayBandwidthRate
manual.important RelayBandwidthBurst
manual.important ControlPort
manual.important HashedControlPassword
manual.important CookieAuthentication
manual.important DataDirectory
manual.important Log
manual.important RunAsDaemon
manual.important User
manual.important Bridge
manual.important ExcludeNodes
manual.important MaxCircuitDirtiness
manual.important SocksPort
manual.important UseBridges
manual.important BridgeRelay
manual.important ContactInfo
manual.important ExitPolicy
manual.important MyFamily
manual.important Nickname
manual.important ORPort
manual.important AccountingMax
manual.important AccountingStart
manual.important DirPortFrontPage
manual.important DirPort
manual.important HiddenServiceDir
manual.important HiddenServicePort
# General Config Options
manual.summary.BandwidthRate Average bandwidth usage limit
manual.summary.BandwidthBurst Maximum bandwidth usage limit
manual.summary.MaxAdvertisedBandwidth Limit for the bandwidth we advertise as being available for relaying
manual.summary.RelayBandwidthRate Average bandwidth usage limit for relaying
manual.summary.RelayBandwidthBurst Maximum bandwidth usage limit for relaying
manual.summary.PerConnBWRate Average relayed bandwidth limit per connection
manual.summary.PerConnBWBurst Maximum relayed bandwidth limit per connection
manual.summary.ClientTransportPlugin Proxy when establishing bridge connections
manual.summary.ServerTransportPlugin Proxy when servicing bridge connections
manual.summary.ServerTransportListenAddr Endpoint for bridge's pluggable transport proxy
manual.summary.ServerTransportOptions Additional arguments for bridge's proxy
manual.summary.ExtORPort Endpoint for extended ORPort connections
manual.summary.ExtORPortCookieAuthFile Location of the ExtORPort's authentication cookie
manual.summary.ExtORPortCookieAuthFileGroupReadable Group read permissions for the ExtORPort's authentication cookie
manual.summary.ConnLimit Minimum number of file descriptors for Tor to start
manual.summary.DisableNetwork Don't accept non-controller connections
manual.summary.ConstrainedSockets Shrinks sockets to ConstrainedSockSize
manual.summary.ConstrainedSockSize Limit for the received and transmit buffers of sockets
manual.summary.ControlPort Port providing access to tor controllers (nyx, vidalia, etc)
manual.summary.ControlSocket Socket providing controller access
manual.summary.ControlSocketsGroupWritable Group read permissions for the control socket
manual.summary.HashedControlPassword Hash of the password for authenticating to the control port
manual.summary.CookieAuthentication If set, authenticates controllers via a cookie
manual.summary.CookieAuthFile Location of the authentication cookie
manual.summary.CookieAuthFileGroupReadable Group read permissions for the authentication cookie
manual.summary.ControlPortWriteToFile Path for a file tor writes containing its control port
manual.summary.ControlPortFileGroupReadable Group read permissions for the control port file
manual.summary.DataDirectory Location for storing runtime data (state, keys, etc)
manual.summary.DataDirectoryGroupReadable Group read permissions for the data directory
manual.summary.CacheDirectory Directory where information is cached
manual.summary.CacheDirectoryGroupReadable Group read permissions for the cache directory
manual.summary.FallbackDir Fallback when unable to retrieve descriptor information
manual.summary.UseDefaultFallbackDirs Use hard-coded fallback directory authorities when needed
manual.summary.DirAuthority Alternative directory authorities
manual.summary.DirAuthorityFallbackRate Rate at which to use fallback directory
manual.summary.AlternateDirAuthority Alternative directory authorities (consensus only)
manual.summary.AlternateBridgeAuthority Alternative directory authorities (bridges only)
manual.summary.DisableAllSwap Locks all allocated memory so they can't be paged out
manual.summary.DisableDebuggerAttachment Limit information applications can retrieve about the process
manual.summary.FetchDirInfoEarly Keeps consensus information up to date, even if unnecessary
manual.summary.FetchDirInfoExtraEarly Updates consensus information when it's first available
manual.summary.FetchHidServDescriptors Toggles if hidden service descriptors are fetched automatically or not
manual.summary.FetchServerDescriptors Toggles if the consensus is fetched automatically or not
manual.summary.FetchUselessDescriptors Toggles if relay descriptors are fetched when they aren't strictly necessary
manual.summary.HTTPProxy HTTP proxy for connecting to tor
manual.summary.HTTPProxyAuthenticator Authentication credentials for HTTPProxy
manual.summary.HTTPSProxy SSL proxy for connecting to tor
manual.summary.HTTPSProxyAuthenticator Authentication credentials for HTTPSProxy
manual.summary.Sandbox Run within a syscall sandbox
manual.summary.Socks4Proxy SOCKS 4 proxy for connecting to tor
manual.summary.Socks5Proxy SOCKS 5 for connecting to tor
manual.summary.Socks5ProxyUsername Username for connecting to the Socks5Proxy
manual.summary.Socks5ProxyPassword Password for connecting to the Socks5Proxy
manual.summary.UnixSocksGroupWritable Group write permissions for the socks socket
manual.summary.KeepalivePeriod Rate at which to send keepalive packets
manual.summary.Log Runlevels and location for tor logging
manual.summary.LogMessageDomains Includes a domain when logging messages
manual.summary.MaxUnparseableDescSizeToLog Size of the dedicated log for unparseable descriptors
manual.summary.OutboundBindAddress Sets the IP used for connecting to tor
manual.summary.OutboundBindAddressOR Make outbound non-exit connections originate from this address
manual.summary.OutboundBindAddressExit Make outbound exit connections originate from this address
manual.summary.PidFile Path for a file tor writes containing its process id
manual.summary.ProtocolWarnings Toggles if protocol errors give warnings or not
manual.summary.RunAsDaemon Toggles if tor runs as a daemon process
manual.summary.LogTimeGranularity limits granularity of log message timestamps
manual.summary.TruncateLogFile Overwrites log file rather than appending when restarted
manual.summary.SyslogIdentityTag Tag logs appended to the syslog as being from tor
manual.summary.AndroidIdentityTag Tag when logging to android subsystem
manual.summary.SafeLogging Toggles if logs are scrubbed of sensitive information
manual.summary.User UID for the process when started
manual.summary.KeepBindCapabilities Retain permission for binding to low valued ports
manual.summary.HardwareAccel Toggles if tor attempts to use hardware acceleration
manual.summary.AccelName OpenSSL engine name for crypto acceleration
manual.summary.AccelDir Crypto acceleration library path
manual.summary.AvoidDiskWrites Toggles if tor avoids frequently writing to disk
manual.summary.CircuitPriorityHalflife Overwrite method for prioritizing traffic among relayed connections
manual.summary.CountPrivateBandwidth Applies rate limiting to private IP addresses
manual.summary.ExtendByEd25519ID Include Ed25519 identifier when extending circuits
manual.summary.NoExec Prevents any launch of other executables
manual.summary.Schedulers Scheduling algorithm by which to send outbound data
manual.summary.KISTSchedRunInterval Scheduling interval if using KIST
manual.summary.KISTSockBufSizeFactor Multiplier for per-socket limit if using KIST
# Client Config Options
manual.summary.Bridge Available bridges
manual.summary.LearnCircuitBuildTimeout Toggles adaptive timeouts for circuit creation
manual.summary.CircuitBuildTimeout Initial timeout for circuit creation
manual.summary.CircuitsAvailableTimeout Time to keep circuits open and unused for
manual.summary.CircuitStreamTimeout Timeout for shifting streams among circuits
manual.summary.ClientOnly Ensures that we aren't used as a relay or directory mirror
manual.summary.ConnectionPadding Pad traffic to help prevent correlation attacks
manual.summary.ReducedConnectionPadding Reduce padding and increase circuit cycling for low bandidth connections
manual.summary.ExcludeNodes Relays or locales never to be used in circuits
manual.summary.ExcludeExitNodes Relays or locales never to be used for exits
manual.summary.GeoIPExcludeUnknown Don't use relays with an unknown locale in circuits
manual.summary.ExitNodes Preferred final hop for circuits
manual.summary.EntryNodes Preferred first hops for circuits
manual.summary.StrictNodes Never uses notes outside of Entry/ExitNodes
manual.summary.FascistFirewall Only make outbound connections on FirewallPorts
manual.summary.FirewallPorts Ports used by FascistFirewall
manual.summary.HidServAuth Authentication credentials for connecting to a hidden service
manual.summary.ClientOnionAuthDir Path containing hidden service authorization files
manual.summary.ReachableAddresses Rules for bypassing the local firewall
manual.summary.ReachableDirAddresses Rules for bypassing the local firewall (directory fetches)
manual.summary.ReachableORAddresses Rules for bypassing the local firewall (OR connections)
manual.summary.LongLivedPorts Ports requiring highly reliable relays
manual.summary.MapAddress Alias mappings for address requests
manual.summary.NewCircuitPeriod Period for considering the creation of new circuits
manual.summary.MaxCircuitDirtiness Duration for reusing constructed circuits
manual.summary.MaxClientCircuitsPending Number of circuits that can be in construction at once
manual.summary.NodeFamily Define relays as belonging to a family
manual.summary.EnforceDistinctSubnets Prevent use of multiple relays from the same subnet on a circuit
manual.summary.SocksPort Port for using tor as a Socks proxy
manual.summary.SocksPolicy Access policy for the pocks port
manual.summary.SocksTimeout Time until idle or unestablished socks connections are closed
manual.summary.TokenBucketRefillInterval Frequency at which exhausted connections are checked for new traffic
manual.summary.TrackHostExits Maintains use of the same exit whenever connecting to this destination
manual.summary.TrackHostExitsExpire Time until use of an exit for tracking expires
manual.summary.UpdateBridgesFromAuthority Toggles fetching bridge descriptors from the authorities
manual.summary.UseBridges Make use of configured bridges
manual.summary.UseEntryGuards Use guard relays for first hop
manual.summary.GuardfractionFile File containing information with duration of our guards
manual.summary.UseGuardFraction Take guardfraction into account for path selection
manual.summary.NumEntryGuards Pool size of guard relays we'll select from
manual.summary.NumPrimaryGuards Pool size of strongly preferred guard relays we'll select from
manual.summary.NumDirectoryGuards Pool size of directory guards we'll select from
manual.summary.GuardLifetime Minimum time to keep entry guards
manual.summary.SafeSocks Toggles rejecting unsafe variants of the socks protocol
manual.summary.TestSocks Provide notices for if socks connections are of the safe or unsafe variants
manual.summary.VirtualAddrNetworkIPv4 IPv4 address range to use when needing a virtual address
manual.summary.VirtualAddrNetworkIPv6 IPv6 address range to use when needing a virtual address
manual.summary.AllowNonRFC953Hostnames Toggles blocking invalid characters in hostname resolution
manual.summary.HTTPTunnelPort Port on which to allow 'HTTP CONNECT' connections
manual.summary.TransPort Port for transparent proxying if the OS supports it
manual.summary.TransProxyType Proxy type to be used
manual.summary.NATDPort Port for forwarding ipfw NATD connections
manual.summary.AutomapHostsOnResolve Map addresses ending with special suffixes to virtual addresses
manual.summary.AutomapHostsSuffixes Address suffixes recognized by AutomapHostsOnResolve
manual.summary.DNSPort Port from which DNS responses are fetched instead of tor
manual.summary.ClientDNSRejectInternalAddresses Disregards anonymous DNS responses for internal addresses
manual.summary.ClientRejectInternalAddresses Disables use of Tor for internal connections
manual.summary.DownloadExtraInfo Toggles fetching of extra information about relays
manual.summary.WarnPlaintextPorts Toggles warnings for using risky ports
manual.summary.RejectPlaintextPorts Prevents connections on risky ports
manual.summary.OptimisticData Use exits without confirmation that prior connections succeeded
manual.summary.HSLayer2Nodes permissible relays for the second hop of HS circuits
manual.summary.HSLayer3Nodes permissible relays for the third hop of HS circuits
manual.summary.UseMicrodescriptors Retrieve microdescriptors rather than server descriptors
manual.summary.PathBiasCircThreshold Number of circuits through a guard before applying bias checks
manual.summary.PathBiasNoticeRate Fraction of circuits that must succeed before logging a notice
manual.summary.PathBiasWarnRate Fraction of circuits that must succeed before logging a warning
manual.summary.PathBiasExtremeRate Fraction of circuits that must succeed before logging an error
manual.summary.PathBiasDropGuards Drop guards failing to establish circuits
manual.summary.PathBiasScaleThreshold Circuits through a guard before scaling past observations down
manual.summary.PathBiasUseThreshold Number of streams through a circuit before applying bias checks
manual.summary.PathBiasNoticeUseRate Fraction of streams that must succeed before logging a notice
manual.summary.PathBiasExtremeUseRate Fraction of streams that must succeed before logging an error
manual.summary.PathBiasScaleUseThreshold Streams through a circuit before scaling past observations down
manual.summary.ClientUseIPv4 Allow IPv4 connections to guards and fetching consensus
manual.summary.ClientUseIPv6 Allow IPv6 connections to guards and fetching consensus
manual.summary.ClientPreferIPv6DirPort Perfer relays with IPv6 when fetching consensus
manual.summary.ClientPreferIPv6ORPort Prefer a guard's IPv6 rather than IPv4 endpoint
manual.summary.PathsNeededToBuildCircuits Portion of relays to require information for before making circuits
manual.summary.ClientBootstrapConsensusAuthorityDownloadInitialDelay Delay when bootstrapping before downloading descriptors from authorities
manual.summary.ClientBootstrapConsensusFallbackDownloadInitialDelay Delay when bootstrapping before downloading descriptors from fallbacks
manual.summary.ClientBootstrapConsensusAuthorityOnlyDownloadInitialDelay Delay when bootstrapping before downloading descriptors from authorities if fallbacks disabled
manual.summary.ClientBootstrapConsensusMaxInProgressTries Descriptor documents that can be downloaded in parallel
manual.summary.ClientBootstrapConsensusMaxInProgressTries Number of consensus download requests to allow in-flight at once
# Server Config Options
manual.summary.Address Overwrites address others will use to reach this relay
manual.summary.AssumeReachable Skips reachability test at startup
manual.summary.BridgeRelay Act as a bridge
manual.summary.BridgeDistribution Distribution method BrideDB should provide our address by
manual.summary.ContactInfo Contact information for this relay
manual.summary.ExitRelay Allow relaying of exit traffic
manual.summary.ExitPolicy Traffic destinations that can exit from this relay
manual.summary.ExitPolicyRejectPrivate Prevent exiting on the local network
manual.summary.ExitPolicyRejectLocalInterfaces More extensive prevention of exiting on the local network
manual.summary.ReducedExitPolicy Customized reduced exit policy
manual.summary.IPv6Exit Allow clients to use us for IPv6 traffic
manual.summary.MaxOnionQueueDelay Duration to reject new onionskins if we have more than we can process
manual.summary.MyFamily Other relays this operator administers
manual.summary.Nickname Identifier for this relay
manual.summary.NumCPUs Number of processes spawned for decryption
manual.summary.ORPort Port used to accept relay traffic
manual.summary.PublishServerDescriptor Types of descriptors published
manual.summary.ShutdownWaitLength Delay before quitting after receiving a SIGINT signal
manual.summary.SSLKeyLifetime Lifetime for our link certificate
manual.summary.HeartbeatPeriod Rate at which an INFO level heartbeat message is sent
manual.summary.MainloopStats Include development information from the main loop with heartbeats
manual.summary.AccountingMax Amount of traffic before hibernating
manual.summary.AccountingRule Method to determine when the accounting limit is reached
manual.summary.AccountingStart Duration of an accounting period
manual.summary.RefuseUnknownExits Prevents relays not in the consensus from using us as an exit
manual.summary.ServerDNSResolvConfFile Overriding resolver config for DNS queries we provide
manual.summary.ServerDNSAllowBrokenConfig Toggles if we persist despite configuration parsing errors or not
manual.summary.ServerDNSSearchDomains Toggles if our DNS queries search for addresses in the local domain
manual.summary.ServerDNSDetectHijacking Toggles testing for DNS hijacking
manual.summary.ServerDNSTestAddresses Addresses to test to see if valid DNS queries are being hijacked
manual.summary.ServerDNSAllowNonRFC953Hostnames Toggles if we reject DNS queries with invalid characters
manual.summary.BridgeRecordUsageByCountry Tracks geoip information on bridge usage
manual.summary.ServerDNSRandomizeCase Toggles DNS query case randomization
manual.summary.GeoIPFile Path to file containing IPv4 geoip information
manual.summary.GeoIPv6File Path to file containing IPv6 geoip information
manual.summary.CellStatistics Toggles storing circuit queue duration to disk
manual.summary.PaddingStatistics Toggles storing padding counts
manual.summary.DirReqStatistics Toggles storing network status counts and performance to disk
manual.summary.EntryStatistics Toggles storing client connection counts to disk
manual.summary.ExitPortStatistics Toggles storing traffic and port usage data to disk
manual.summary.ConnDirectionStatistics Toggles storing connection use to disk
manual.summary.HiddenServiceStatistics Toggles storing hidden service stats to disk
manual.summary.ExtraInfoStatistics Publishes statistic data in the extra-info documents
manual.summary.ExtendAllowPrivateAddresses Allow circuits to be extended to the local network
manual.summary.MaxMemInQueues Threshold at which tor will terminate circuits to avoid running out of memory
manual.summary.DisableOOSCheck Don't close connections when running out of sockets
manual.summary.SigningKeyLifetime Duration the Ed25519 signing key is valid for
manual.summary.OfflineMasterKey Don't generate the master secret key
manual.summary.KeyDirectory Directory where secret keys reside
manual.summary.KeyDirectoryGroupReadable Group read permissions for the secret key directory
# Directory Server Options
manual.summary.DirPortFrontPage Publish this html file on the DirPort
manual.summary.DirPort Port for directory connections
manual.summary.DirPolicy Access policy for the DirPort
manual.summary.DirCache Provide cached descriptor information to other tor users
manual.summary.MaxConsensusAgeForDiffs Time to generate consensus caches for
# Directory Authority Server Options
manual.summary.AuthoritativeDirectory Act as a directory authority
manual.summary.V3AuthoritativeDirectory Generates a version 3 consensus
manual.summary.VersioningAuthoritativeDirectory Provides opinions on recommended versions of tor
manual.summary.RecommendedVersions Suggested versions of tor
manual.summary.RecommendedPackages Suggested versions of applications other than tor
manual.summary.RecommendedClientVersions Tor versions believed to be safe for clients
manual.summary.BridgeAuthoritativeDir Acts as a bridge authority
manual.summary.MinUptimeHidServDirectoryV2 Required uptime before accepting hidden service directory
manual.summary.RecommendedServerVersions Tor versions believed to be safe for relays
manual.summary.ConsensusParams Params entry of the networkstatus vote
manual.summary.DirAllowPrivateAddresses Toggles allowing arbitrary input or non-public IPs in descriptors
manual.summary.AuthDirBadExit Relays to be flagged as bad exits
manual.summary.AuthDirInvalid Relays from which the valid flag is withheld
manual.summary.AuthDirReject Relays to be dropped from the consensus
manual.summary.AuthDirBadExitCCs Countries for which to flag all relays as bad exits
manual.summary.AuthDirInvalidCCs Countries for which the valid flag is withheld
manual.summary.AuthDirRejectCCs Countries for which relays aren't accepted into the consensus
manual.summary.AuthDirListBadExits Toggles if we provide an opinion on bad exits
manual.summary.AuthDirMaxServersPerAddr Limit on the number of relays accepted per ip
manual.summary.AuthDirFastGuarantee Advertised rate at which the Fast flag is granted
manual.summary.AuthDirGuardBWGuarantee Advertised rate necessary to be a guard
manual.summary.AuthDirPinKeys Don't accept descriptors with conflicting identity keypairs
manual.summary.AuthDirSharedRandomness Participates in shared randomness voting
manual.summary.AuthDirTestEd25519LinkKeys Require proper Ed25519 key for the Running flag
manual.summary.BridgePassword Password for requesting bridge information
manual.summary.V3AuthVotingInterval Consensus voting interval
manual.summary.V3AuthVoteDelay Wait time to collect votes of other authorities
manual.summary.V3AuthDistDelay Wait time to collect the signatures of other authorities
manual.summary.V3AuthNIntervalsValid Number of voting intervals a consensus is valid for
manual.summary.V3BandwidthsFile Path to a file containing measured relay bandwidths
manual.summary.V3AuthUseLegacyKey Signs consensus with both the current and legacy keys
manual.summary.RephistTrackTime Discards old, unchanged reliability information
manual.summary.AuthDirHasIPv6Connectivity Descriptors can be retrieved over the authority's IPv6 ORPort
manual.summary.MinMeasuredBWsForAuthToIgnoreAdvertised Total measured value before advertised bandwidths are treated as unreliable
# Hidden Service Options
manual.summary.HiddenServiceDir Directory contents for the hidden service
manual.summary.HiddenServicePort Port the hidden service is provided on
manual.summary.PublishHidServDescriptors Toggles automated publishing of the hidden service to the rendezvous directory
manual.summary.HiddenServiceVersion Version for published hidden service descriptors
manual.summary.HiddenServiceAuthorizeClient Restricts access to the hidden service
manual.summary.HiddenServiceAllowUnknownPorts Allow rendezvous circuits on unrecognized ports
manual.summary.HiddenServiceExportCircuitID Exposes incoming client circuits via the given protocol
manual.summary.HiddenServiceMaxStreams Maximum streams per rendezvous circuit
manual.summary.HiddenServiceMaxStreamsCloseCircuit Closes rendezvous circuits that exceed the maximum number of streams
manual.summary.RendPostPeriod Period at which the rendezvous service descriptors are refreshed
manual.summary.HiddenServiceDirGroupReadable Group read permissions for the hidden service directory
manual.summary.HiddenServiceNumIntroductionPoints Number of introduction points the hidden service will have
manual.summary.HiddenServiceSingleHopMode Allow non-anonymous single hop hidden services
manual.summary.HiddenServiceNonAnonymousMode Enables HiddenServiceSingleHopMode to be set
# DoS Mitigation Options
manual.summary.DoSCircuitCreationEnabled Enables circuit creation DoS mitigation
manual.summary.DoSCircuitCreationMinConnections Connection rate when clients are a suspected DoS
manual.summary.DoSCircuitCreationRate Acceptable rate for circuit creation
manual.summary.DoSCircuitCreationBurst Accept burst of circuit creation up to this rate
manual.summary.DoSCircuitCreationDefenseType Method for mitigating circuit creation DoS
manual.summary.DoSCircuitCreationDefenseTimePeriod Duration of DoS mitigation
manual.summary.DoSConnectionEnabled Enables connection DoS mitigation
manual.summary.DoSConnectionMaxConcurrentCount Acceptable number of connections
manual.summary.DoSConnectionDefenseType Method for mitigating connection DoS
manual.summary.DoSRefuseSingleHopClientRendezvous Prevent establishment of single hop rendezvous points
# Testing Network Options
manual.summary.TestingTorNetwork Overrides other options to be a testing network
manual.summary.TestingV3AuthInitialVotingInterval Overrides V3AuthVotingInterval for the first consensus
manual.summary.TestingV3AuthInitialVoteDelay Overrides TestingV3AuthInitialVoteDelay for the first consensus
manual.summary.TestingV3AuthInitialDistDelay Overrides TestingV3AuthInitialDistDelay for the first consensus
manual.summary.TestingV3AuthVotingStartOffset Offset for the point at which the authority votes
manual.summary.TestingAuthDirTimeToLearnReachability Delay until opinions are given about which relays are running or not
manual.summary.TestingEstimatedDescriptorPropagationTime Delay before clients attempt to fetch descriptors from directory caches
manual.summary.TestingMinFastFlagThreshold Minimum value for the Fast flag
manual.summary.TestingServerDownloadInitialDelay Delay before downloading resources for relaying
manual.summary.TestingClientDownloadInitialDelay Delay before downloading resources for client usage
manual.summary.TestingServerConsensusDownloadInitialDelay Delay before downloading descriptors for relaying
manual.summary.TestingClientConsensusDownloadInitialDelay Delay before downloading descriptors for client usage
manual.summary.TestingBridgeDownloadInitialDelay Delay before downloading bridge descriptors
manual.summary.TestingBridgeBootstrapDownloadInitialDelay Delay before downloading bridge descriptors when first started
manual.summary.TestingClientMaxIntervalWithoutRequest Maximum time to wait to batch requests for missing descriptors
manual.summary.TestingDirConnectionMaxStall Duration to let directory connections stall before timing out
manual.summary.TestingDirAuthVoteExit Relays to give the Exit flag to
manual.summary.TestingDirAuthVoteExitIsStrict Only grant the Exit flag to relays listed by TestingDirAuthVoteExit
manual.summary.TestingDirAuthVoteGuard Relays to give the Guard flag to
manual.summary.TestingDirAuthVoteGuardIsStrict Only grant the Guard flag to relays listed by TestingDirAuthVoteGuard
manual.summary.TestingDirAuthVoteHSDir Relays to give the HSDir flag to
manual.summary.TestingDirAuthVoteHSDirIsStrict Only grant the HSDir flag to relays listed by TestingDirAuthVoteHSDir
manual.summary.TestingEnableConnBwEvent Allow controllers to request CONN_BW events
manual.summary.TestingEnableCellStatsEvent Allow controllers to request CELL_STATS events
manual.summary.TestingMinExitFlagThreshold Lower bound for assigning the Exit flag
manual.summary.TestingLinkCertLifetime Duration of our ed25519 certificate
manual.summary.TestingAuthKeyLifetime Duration for our ed25519 signing key
manual.summary.TestingLinkKeySlop Time before expiration that we replace our ed25519 link key
manual.summary.TestingAuthKeySlop Time before expiration that we replace our ed25519 authentication key
manual.summary.TestingSigningKeySlop Time before expiration that we replace our ed25519 signing key
# Brief description of tor events
event.description.debug Logging at the debug runlevel. This is low level, high volume information about tor's internals that generally isn't useful to users.
event.description.info Logging at the info runlevel. This is low level information of important internal processes.
event.description.notice Logging at the notice runlevel. This runlevel and above are shown to users by default, and includes general information the user should be aware of.
event.description.warn Logging at the warning runlevel. These are problems the user should be aware of.
event.description.err Logging at the error runlevel. These are critical issues that may prevent tor from working properly.
event.description.addrmap New address mapping for our DNS cache.
event.description.authdir_newdescs Indicates we just received a new descriptor. This is only used by directory authorities.
event.description.buildtimeout_set Indicates the timeout value for a circuit has changed.
event.description.bw Event emitted every second with the bytes sent and received by tor.
event.description.cell_stats Event emitted every second with the count of the number of cell types per circuit.
event.description.circ Indicates that a circuit we've established through the tor network has been created, changed, or closed.
event.description.circ_bw Event emitted every second with the bytes sent and received on a per-circuit basis.
event.description.circ_minor Minor changes to our circuits, such as reuse of existing circuits for a different purpose.
event.description.clients_seen Periodic summary of the countries we've seen users connect from. This is only used by bridge relays.
event.description.conf_changed Indicates that our torrc configuration has changed. This could be in response to a SETCONF or RELOAD signal.
event.description.conn_bw Event emitted every second with the byytes sent and received on a per-connection basis.
event.description.descchanged Indicates that our descriptor has changed.
event.description.guard Indicates that the set of relays we use for our initial connection into the tor network (guards) have changed.
event.description.hs_desc Received a hidden service descriptor that wasn't yet cached.
event.description.hs_desc_content Content of a hidden service descriptor we've fetched.
event.description.network_liveness Emitted when the network becomes reachable or unreachable.
event.description.newconsensus Received a new hourly consensus of relays in the tor network.
event.description.newdesc Indicates that a new descriptor is available.
event.description.ns Consensus information for an individual relay has changed. This could be due to receiving a new consensus or tor locally decides a relay is up or down.
event.description.orconn Change in our connections as a relay.
event.description.signal Indicates that tor has received and acted upon a signal being sent to its process.
event.description.status_client Notification of a change in tor's state as a client (ie user).
event.description.status_general Notification of a change in tor's state.
event.description.status_server Notification of a change in tor's state as a relay.
event.description.stream Communication over a circuit we've established. For instance, Firefox making a connection through tor.
event.description.stream_bw Event emitted every second with the bytes sent and received for a specific stream.
event.description.tb_empty Statistics for when token buckets are refilled. This is only used when TestingTorNetwork is set.
event.description.transport_launched Emitted when a pluggable transport is launched.

View file

@ -1,8 +1,8 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Supports communication with sockets speaking the Tor control protocol. This
Supports communication with sockets speaking Tor protocols. This
allows us to send messages as basic strings, and receive responses as
:class:`~stem.response.ControlMessage` instances.
@ -46,18 +46,21 @@ Tor...
::
ControlSocket - Socket wrapper that speaks the tor control protocol.
|- ControlPort - Control connection via a port.
| |- get_address - provides the ip address of our socket
| +- get_port - provides the port of our socket
BaseSocket - Thread safe socket.
|- RelaySocket - Socket for a relay's ORPort.
| |- send - sends a message to the socket
| +- recv - receives a response from the socket
|
|- ControlSocketFile - Control connection via a local file socket.
| +- get_socket_path - provides the path of the socket we connect to
|- ControlSocket - Socket wrapper that speaks the tor control protocol.
| |- ControlPort - Control connection via a port.
| |- ControlSocketFile - Control connection via a local file socket.
| |
| |- send - sends a message to the socket
| +- recv - receives a ControlMessage from the socket
|
|- send - sends a message to the socket
|- recv - receives a ControlMessage from the socket
|- is_alive - reports if the socket is known to be closed
|- is_localhost - returns if the socket is for the local system or not
|- connection_time - timestamp when socket last connected or disconnected
|- connect - connects a new socket
|- close - shuts down the socket
+- __enter__ / __exit__ - manages socket connection
@ -71,6 +74,7 @@ from __future__ import absolute_import
import re
import socket
import ssl
import threading
import time
@ -80,15 +84,21 @@ import stem.util.str_tools
from stem.util import log
MESSAGE_PREFIX = re.compile(b'^[a-zA-Z0-9]{3}[-+ ]')
ERROR_MSG = 'Error while receiving a control message (%s): %s'
class ControlSocket(object):
# lines to limit our trace logging to, you can disable this by setting it to None
TRUNCATE_LOGS = 10
# maximum number of bytes to read at a time from a relay socket
MAX_READ_BUFFER_LEN = 10 * 1024 * 1024
class BaseSocket(object):
"""
Wrapper for a socket connection that speaks the Tor control protocol. To the
better part this transparently handles the formatting for sending and
receiving complete messages. All methods are thread safe.
Callers should not instantiate this class directly, but rather use subclasses
which are expected to implement the **_make_socket()** method.
Thread safe socket, providing common socket functionality.
"""
def __init__(self):
@ -103,95 +113,21 @@ class ControlSocket(object):
self._send_lock = threading.RLock()
self._recv_lock = threading.RLock()
def send(self, message, raw = False):
"""
Formats and sends a message to the control socket. For more information see
the :func:`~stem.socket.send_message` function.
:param str message: message to be formatted and sent to the socket
:param bool raw: leaves the message formatting untouched, passing it to the socket as-is
:raises:
* :class:`stem.SocketError` if a problem arises in using the socket
* :class:`stem.SocketClosed` if the socket is known to be shut down
"""
with self._send_lock:
try:
if not self.is_alive():
raise stem.SocketClosed()
send_message(self._socket_file, message, raw)
except stem.SocketClosed as exc:
# if send_message raises a SocketClosed then we should properly shut
# everything down
if self.is_alive():
self.close()
raise exc
def recv(self):
"""
Receives a message from the control socket, blocking until we've received
one. For more information see the :func:`~stem.socket.recv_message` function.
:returns: :class:`~stem.response.ControlMessage` for the message received
:raises:
* :class:`stem.ProtocolError` the content from the socket is malformed
* :class:`stem.SocketClosed` if the socket closes before we receive a complete message
"""
with self._recv_lock:
try:
# makes a temporary reference to the _socket_file because connect()
# and close() may set or unset it
socket_file = self._socket_file
if not socket_file:
raise stem.SocketClosed()
return recv_message(socket_file)
except stem.SocketClosed as exc:
# If recv_message raises a SocketClosed then we should properly shut
# everything down. However, there's a couple cases where this will
# cause deadlock...
#
# * this socketClosed was *caused by* a close() call, which is joining
# on our thread
#
# * a send() call that's currently in flight is about to call close(),
# also attempting to join on us
#
# To resolve this we make a non-blocking call to acquire the send lock.
# If we get it then great, we can close safely. If not then one of the
# above are in progress and we leave the close to them.
if self.is_alive():
if self._send_lock.acquire(False):
self.close()
self._send_lock.release()
raise exc
def is_alive(self):
"""
Checks if the socket is known to be closed. We won't be aware if it is
until we either use it or have explicitily shut it down.
In practice a socket derived from a port knows about its disconnection
after a failed :func:`~stem.socket.ControlSocket.recv` call. Socket file
derived connections know after either a
:func:`~stem.socket.ControlSocket.send` or
:func:`~stem.socket.ControlSocket.recv`.
after failing to receive data, whereas socket file derived connections
know after either sending or receiving data.
This means that to have reliable detection for when we're disconnected
you need to continually pull from the socket (which is part of what the
:class:`~stem.control.BaseController` does).
:returns: **bool** that's **True** if our socket is connected and **False** otherwise
:returns: **bool** that's **True** if our socket is connected and **False**
otherwise
"""
return self._is_alive
@ -200,7 +136,8 @@ class ControlSocket(object):
"""
Returns if the connection is for the local system or not.
:returns: **bool** that's **True** if the connection is for the local host and **False** otherwise
:returns: **bool** that's **True** if the connection is for the local host
and **False** otherwise
"""
return False
@ -295,12 +232,78 @@ class ControlSocket(object):
if is_change:
self._close()
def _send(self, message, handler):
"""
Send message in a thread safe manner. Handler is expected to be of the form...
::
my_handler(socket, socket_file, message)
"""
with self._send_lock:
try:
if not self.is_alive():
raise stem.SocketClosed()
handler(self._socket, self._socket_file, message)
except stem.SocketClosed:
# if send_message raises a SocketClosed then we should properly shut
# everything down
if self.is_alive():
self.close()
raise
def _recv(self, handler):
"""
Receives a message in a thread safe manner. Handler is expected to be of the form...
::
my_handler(socket, socket_file)
"""
with self._recv_lock:
try:
# makes a temporary reference to the _socket_file because connect()
# and close() may set or unset it
my_socket, my_socket_file = self._socket, self._socket_file
if not my_socket or not my_socket_file:
raise stem.SocketClosed()
return handler(my_socket, my_socket_file)
except stem.SocketClosed:
# If recv_message raises a SocketClosed then we should properly shut
# everything down. However, there's a couple cases where this will
# cause deadlock...
#
# * This SocketClosed was *caused by* a close() call, which is joining
# on our thread.
#
# * A send() call that's currently in flight is about to call close(),
# also attempting to join on us.
#
# To resolve this we make a non-blocking call to acquire the send lock.
# If we get it then great, we can close safely. If not then one of the
# above are in progress and we leave the close to them.
if self.is_alive():
if self._send_lock.acquire(False):
self.close()
self._send_lock.release()
raise
def _get_send_lock(self):
"""
The send lock is useful to classes that interact with us at a deep level
because it's used to lock :func:`stem.socket.ControlSocket.connect` /
:func:`stem.socket.ControlSocket.close`, and by extension our
:func:`stem.socket.ControlSocket.is_alive` state changes.
:func:`stem.socket.BaseSocket.close`, and by extension our
:func:`stem.socket.BaseSocket.is_alive` state changes.
:returns: **threading.RLock** that governs sending messages to our socket
and state changes
@ -339,13 +342,135 @@ class ControlSocket(object):
* **NotImplementedError** if not implemented by a subclass
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the ControlSocket subclass')
raise NotImplementedError('Unsupported Operation: this should be implemented by the BaseSocket subclass')
class RelaySocket(BaseSocket):
"""
`Link-level connection
<https://gitweb.torproject.org/torspec.git/tree/tor-spec.txt>`_ to a Tor
relay.
.. versionadded:: 1.7.0
:var str address: address our socket connects to
:var int port: ORPort our socket connects to
"""
def __init__(self, address = '127.0.0.1', port = 9050, connect = True):
"""
RelaySocket constructor.
:param str address: ip address of the relay
:param int port: orport of the relay
:param bool connect: connects to the socket if True, leaves it unconnected otherwise
:raises: :class:`stem.SocketError` if connect is **True** and we're
unable to establish a connection
"""
super(RelaySocket, self).__init__()
self.address = address
self.port = port
if connect:
self.connect()
def send(self, message):
"""
Sends a message to the relay's ORPort.
:param str message: message to be formatted and sent to the socket
:raises:
* :class:`stem.SocketError` if a problem arises in using the socket
* :class:`stem.SocketClosed` if the socket is known to be shut down
"""
self._send(message, lambda s, sf, msg: _write_to_socket(sf, msg))
def recv(self):
"""
Receives a message from the relay.
:returns: bytes for the message received
:raises:
* :class:`stem.ProtocolError` the content from the socket is malformed
* :class:`stem.SocketClosed` if the socket closes before we receive a complete message
"""
# TODO: Is MAX_READ_BUFFER_LEN defined in the spec? Not sure where it came
# from.
return self._recv(lambda s, sf: s.recv(MAX_READ_BUFFER_LEN))
def is_localhost(self):
return self.address == '127.0.0.1'
def _make_socket(self):
try:
relay_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
relay_socket.connect((self.address, self.port))
return ssl.wrap_socket(relay_socket)
except socket.error as exc:
raise stem.SocketError(exc)
class ControlSocket(BaseSocket):
"""
Wrapper for a socket connection that speaks the Tor control protocol. To the
better part this transparently handles the formatting for sending and
receiving complete messages.
Callers should not instantiate this class directly, but rather use subclasses
which are expected to implement the **_make_socket()** method.
"""
def __init__(self):
super(ControlSocket, self).__init__()
def send(self, message):
"""
Formats and sends a message to the control socket. For more information see
the :func:`~stem.socket.send_message` function.
.. deprecated:: 1.7.0
The **raw** argument was unhelpful and be removed. Use
:func:`stem.socket.send_message` if you need this level of control
instead.
:param str message: message to be formatted and sent to the socket
:raises:
* :class:`stem.SocketError` if a problem arises in using the socket
* :class:`stem.SocketClosed` if the socket is known to be shut down
"""
self._send(message, lambda s, sf, msg: send_message(sf, msg))
def recv(self):
"""
Receives a message from the control socket, blocking until we've received
one. For more information see the :func:`~stem.socket.recv_message` function.
:returns: :class:`~stem.response.ControlMessage` for the message received
:raises:
* :class:`stem.ProtocolError` the content from the socket is malformed
* :class:`stem.SocketClosed` if the socket closes before we receive a complete message
"""
return self._recv(lambda s, sf: recv_message(sf))
class ControlPort(ControlSocket):
"""
Control connection to tor. For more information see tor's ControlPort torrc
option.
:var str address: address our socket connects to
:var int port: ControlPort our socket connects to
"""
def __init__(self, address = '127.0.0.1', port = 9051, connect = True):
@ -361,8 +486,8 @@ class ControlPort(ControlSocket):
"""
super(ControlPort, self).__init__()
self._control_addr = address
self._control_port = port
self.address = address
self.port = port
if connect:
self.connect()
@ -371,27 +496,33 @@ class ControlPort(ControlSocket):
"""
Provides the ip address our socket connects to.
.. deprecated:: 1.7.0
Use the **address** attribute instead.
:returns: str with the ip address of our socket
"""
return self._control_addr
return self.address
def get_port(self):
"""
Provides the port our socket connects to.
.. deprecated:: 1.7.0
Use the **port** attribute instead.
:returns: int with the port of our socket
"""
return self._control_port
return self.port
def is_localhost(self):
return self._control_addr == '127.0.0.1'
return self.address == '127.0.0.1'
def _make_socket(self):
try:
control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
control_socket.connect((self._control_addr, self._control_port))
control_socket.connect((self.address, self.port))
return control_socket
except socket.error as exc:
raise stem.SocketError(exc)
@ -401,6 +532,8 @@ class ControlSocketFile(ControlSocket):
"""
Control connection to tor. For more information see tor's ControlSocket torrc
option.
:var str path: filesystem path of the socket we connect to
"""
def __init__(self, path = '/var/run/tor/control', connect = True):
@ -415,7 +548,7 @@ class ControlSocketFile(ControlSocket):
"""
super(ControlSocketFile, self).__init__()
self._socket_path = path
self.path = path
if connect:
self.connect()
@ -424,10 +557,13 @@ class ControlSocketFile(ControlSocket):
"""
Provides the path our socket connects to.
.. deprecated:: 1.7.0
Use the **path** attribute instead.
:returns: str with the path for our control socket
"""
return self._socket_path
return self.path
def is_localhost(self):
return True
@ -435,7 +571,7 @@ class ControlSocketFile(ControlSocket):
def _make_socket(self):
try:
control_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
control_socket.connect(self._socket_path)
control_socket.connect(self.path)
return control_socket
except socket.error as exc:
raise stem.SocketError(exc)
@ -476,14 +612,20 @@ def send_message(control_file, message, raw = False):
if not raw:
message = send_formatting(message)
try:
control_file.write(stem.util.str_tools._to_bytes(message))
control_file.flush()
_write_to_socket(control_file, message)
if log.is_tracing():
log_message = message.replace('\r\n', '\n').rstrip()
log.trace('Sent to tor:\n' + log_message)
msg_div = '\n' if '\n' in log_message else ' '
log.trace('Sent to tor:%s%s' % (msg_div, log_message))
def _write_to_socket(socket_file, message):
try:
socket_file.write(stem.util.str_tools._to_bytes(message))
socket_file.flush()
except socket.error as exc:
log.info('Failed to send message: %s' % exc)
log.info('Failed to send: %s' % exc)
# When sending there doesn't seem to be a reliable method for
# distinguishing between failures from a disconnect verses other things.
@ -497,7 +639,7 @@ def send_message(control_file, message, raw = False):
# if the control_file has been closed then flush will receive:
# AttributeError: 'NoneType' object has no attribute 'sendall'
log.info('Failed to send message: file has been closed')
log.info('Failed to send: file has been closed')
raise stem.SocketClosed('file has been closed')
@ -517,22 +659,16 @@ def recv_message(control_file):
a complete message
"""
parsed_content, raw_content = [], b''
logging_prefix = 'Error while receiving a control message (%s): '
parsed_content, raw_content, first_line = None, None, True
while True:
try:
# From a real socket readline() would always provide bytes, but during
# tests we might be given a StringIO in which case it's unicode under
# python 3.x.
line = stem.util.str_tools._to_bytes(control_file.readline())
line = control_file.readline()
except AttributeError:
# if the control_file has been closed then we will receive:
# AttributeError: 'NoneType' object has no attribute 'recv'
prefix = logging_prefix % 'SocketClosed'
log.info(prefix + 'socket file has been closed')
log.info(ERROR_MSG % ('SocketClosed', 'socket file has been closed'))
raise stem.SocketClosed('socket file has been closed')
except (socket.error, ValueError) as exc:
# When disconnected we get...
@ -543,70 +679,67 @@ def recv_message(control_file):
# Python 3:
# ValueError: I/O operation on closed file.
prefix = logging_prefix % 'SocketClosed'
log.info(prefix + 'received exception "%s"' % exc)
log.info(ERROR_MSG % ('SocketClosed', 'received exception "%s"' % exc))
raise stem.SocketClosed(exc)
raw_content += line
# Parses the tor control lines. These are of the form...
# <status code><divider><content>\r\n
if len(line) == 0:
if not line:
# if the socket is disconnected then the readline() method will provide
# empty content
prefix = logging_prefix % 'SocketClosed'
log.info(prefix + 'empty socket content')
log.info(ERROR_MSG % ('SocketClosed', 'empty socket content'))
raise stem.SocketClosed('Received empty socket content.')
elif len(line) < 4:
prefix = logging_prefix % 'ProtocolError'
log.info(prefix + 'line too short, "%s"' % log.escape(line))
raise stem.ProtocolError('Badly formatted reply line: too short')
elif not re.match(b'^[a-zA-Z0-9]{3}[-+ ]', line):
prefix = logging_prefix % 'ProtocolError'
log.info(prefix + 'malformed status code/divider, "%s"' % log.escape(line))
elif not MESSAGE_PREFIX.match(line):
log.info(ERROR_MSG % ('ProtocolError', 'malformed status code/divider, "%s"' % log.escape(line)))
raise stem.ProtocolError('Badly formatted reply line: beginning is malformed')
elif not line.endswith(b'\r\n'):
prefix = logging_prefix % 'ProtocolError'
log.info(prefix + 'no CRLF linebreak, "%s"' % log.escape(line))
log.info(ERROR_MSG % ('ProtocolError', 'no CRLF linebreak, "%s"' % log.escape(line)))
raise stem.ProtocolError('All lines should end with CRLF')
line = line[:-2] # strips off the CRLF
status_code, divider, content = line[:3], line[3:4], line[4:]
status_code, divider, content = line[:3], line[3:4], line[4:-2] # strip CRLF off content
if stem.prereq.is_python_3():
status_code = stem.util.str_tools._to_unicode(status_code)
divider = stem.util.str_tools._to_unicode(divider)
# Most controller responses are single lines, in which case we don't need
# so much overhead.
if first_line:
if divider == ' ':
_log_trace(line)
return stem.response.ControlMessage([(status_code, divider, content)], line)
else:
parsed_content, raw_content, first_line = [], bytearray(), False
raw_content += line
if divider == '-':
# mid-reply line, keep pulling for more content
parsed_content.append((status_code, divider, content))
elif divider == ' ':
# end of the message, return the message
parsed_content.append((status_code, divider, content))
log_message = raw_content.replace(b'\r\n', b'\n').rstrip()
log.trace('Received from tor:\n' + stem.util.str_tools._to_unicode(log_message))
return stem.response.ControlMessage(parsed_content, raw_content)
_log_trace(bytes(raw_content))
return stem.response.ControlMessage(parsed_content, bytes(raw_content))
elif divider == '+':
# data entry, all of the following lines belong to the content until we
# get a line with just a period
content_block = bytearray(content)
while True:
try:
line = stem.util.str_tools._to_bytes(control_file.readline())
line = control_file.readline()
raw_content += line
except socket.error as exc:
prefix = logging_prefix % 'SocketClosed'
log.info(prefix + 'received an exception while mid-way through a data reply (exception: "%s", read content: "%s")' % (exc, log.escape(raw_content)))
log.info(ERROR_MSG % ('SocketClosed', 'received an exception while mid-way through a data reply (exception: "%s", read content: "%s")' % (exc, log.escape(bytes(raw_content)))))
raise stem.SocketClosed(exc)
raw_content += line
if not line.endswith(b'\r\n'):
prefix = logging_prefix % 'ProtocolError'
log.info(prefix + 'CRLF linebreaks missing from a data reply, "%s"' % log.escape(raw_content))
log.info(ERROR_MSG % ('ProtocolError', 'CRLF linebreaks missing from a data reply, "%s"' % log.escape(bytes(raw_content))))
raise stem.ProtocolError('All lines should end with CRLF')
elif line == b'.\r\n':
break # data block termination
@ -619,18 +752,17 @@ def recv_message(control_file):
if line.startswith(b'..'):
line = line[1:]
# appends to previous content, using a newline rather than CRLF
# separator (more conventional for multi-line string content outside
# the windows world)
content_block += b'\n' + line
content += b'\n' + line
# joins the content using a newline rather than CRLF separator (more
# conventional for multi-line string content outside the windows world)
parsed_content.append((status_code, divider, content))
parsed_content.append((status_code, divider, bytes(content_block)))
else:
# this should never be reached due to the prefix regex, but might as well
# be safe...
prefix = logging_prefix % 'ProtocolError'
log.warn(prefix + "\"%s\" isn't a recognized divider type" % divider)
log.warn(ERROR_MSG % ('ProtocolError', "\"%s\" isn't a recognized divider type" % divider))
raise stem.ProtocolError("Unrecognized divider type '%s': %s" % (divider, stem.util.str_tools._to_unicode(line)))
@ -661,3 +793,19 @@ def send_formatting(message):
return '+%s\r\n.\r\n' % message.replace('\n', '\r\n')
else:
return message + '\r\n'
def _log_trace(response):
if not log.is_tracing():
return
log_message = stem.util.str_tools._to_unicode(response.replace(b'\r\n', b'\n').rstrip())
log_message_lines = log_message.split('\n')
if TRUNCATE_LOGS and len(log_message_lines) > TRUNCATE_LOGS:
log_message = '\n'.join(log_message_lines[:TRUNCATE_LOGS] + ['... %i more lines...' % (len(log_message_lines) - TRUNCATE_LOGS)])
if len(log_message_lines) > 2:
log.trace('Received from tor:\n%s' % log_message)
else:
log.trace('Received from tor: %s' % log_message.replace('\n', '\\n'))

View file

@ -1,10 +1,14 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Utility functions used by the stem library.
"""
import datetime
import stem.prereq
__all__ = [
'conf',
'connection',
@ -17,4 +21,135 @@ __all__ = [
'term',
'test_tools',
'tor_tools',
'datetime_to_unix',
]
# Beginning with Stem 1.7 we take attribute types into account when hashing
# and checking equality. That is to say, if two Stem classes' attributes are
# the same but use different types we no longer consider them to be equal.
# For example...
#
# s1 = Schedule(classes = ['Math', 'Art', 'PE'])
# s2 = Schedule(classes = ('Math', 'Art', 'PE'))
#
# Prior to Stem 1.7 s1 and s2 would be equal, but afterward unless Stem's
# construcotr normalizes the types they won't.
#
# This change in behavior is the right thing to do but carries some risk, so
# we provide the following constant to revert to legacy behavior. If you find
# yourself using it them please let me know (https://www.atagar.com/contact/)
# since this flag will go away in the future.
HASH_TYPES = True
def _hash_value(val):
if not HASH_TYPES:
my_hash = 0
else:
# TODO: I hate doing this but until Python 2.x support is dropped we
# can't readily be strict about bytes vs unicode for attributes. This
# is because test assertions often use strings, and normalizing this
# would require wrapping most with to_unicode() calls.
#
# This hack will go away when we drop Python 2.x support.
if _is_str(val):
my_hash = hash('str')
else:
# Hashing common builtins (ints, bools, etc) provide consistant values but many others vary their value on interpreter invokation.
my_hash = hash(str(type(val)))
if isinstance(val, (tuple, list)):
for v in val:
my_hash = (my_hash * 1024) + hash(v)
elif isinstance(val, dict):
for k in sorted(val.keys()):
my_hash = (my_hash * 2048) + (hash(k) * 1024) + hash(val[k])
else:
my_hash += hash(val)
return my_hash
def _is_str(val):
"""
Check if a value is a string. This will be removed when we no longer provide
backward compatibility for the Python 2.x series.
:param object val: value to be checked
:returns: **True** if the value is some form of string (unicode or bytes),
and **False** otherwise
"""
if stem.prereq.is_python_3():
return isinstance(val, (bytes, str))
else:
return isinstance(val, (bytes, unicode))
def _is_int(val):
"""
Check if a value is an integer. This will be removed when we no longer
provide backward compatibility for the Python 2.x series.
:param object val: value to be checked
:returns: **True** if the value is some form of integer (int or long),
and **False** otherwise
"""
if stem.prereq.is_python_3():
return isinstance(val, int)
else:
return isinstance(val, (int, long))
def datetime_to_unix(timestamp):
"""
Converts a utc datetime object to a unix timestamp.
.. versionadded:: 1.5.0
:param datetime timestamp: timestamp to be converted
:returns: **float** for the unix timestamp of the given datetime object
"""
if stem.prereq._is_python_26():
delta = (timestamp - datetime.datetime(1970, 1, 1))
return delta.days * 86400 + delta.seconds
else:
return (timestamp - datetime.datetime(1970, 1, 1)).total_seconds()
def _hash_attr(obj, *attributes, **kwargs):
"""
Provide a hash value for the given set of attributes.
:param Object obj: object to be hashed
:param list attributes: attribute names to take into account
:param bool cache: persists hash in a '_cached_hash' object attribute
:param class parent: include parent's hash value
"""
is_cached = kwargs.get('cache', False)
parent_class = kwargs.get('parent', None)
cached_hash = getattr(obj, '_cached_hash', None)
if is_cached and cached_hash is not None:
return cached_hash
my_hash = parent_class.__hash__(obj) if parent_class else 0
my_hash = my_hash * 1024 + hash(str(type(obj)))
for attr in attributes:
val = getattr(obj, attr)
my_hash = my_hash * 1024 + _hash_value(val)
if is_cached:
setattr(obj, '_cached_hash', my_hash)
return my_hash

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -161,6 +161,8 @@ import inspect
import os
import threading
import stem.prereq
from stem.util import log
try:
@ -213,9 +215,8 @@ def config_dict(handle, conf_mappings, handler = None):
For more information about how we convert types see our
:func:`~stem.util.conf.Config.get` method.
**The dictionary you get from this is manged by the
:class:`~stem.util.conf.Config` class and should be treated as being
read-only.**
**The dictionary you get from this is manged by the Config class and should
be treated as being read-only.**
:param str handle: unique identifier for a config instance
:param dict conf_mappings: config key/value mappings used as our defaults
@ -274,15 +275,15 @@ def uses_settings(handle, path, lazy_load = True):
config = get_config(handle)
if not lazy_load and not config.get('settings_loaded', False):
if not lazy_load and not config._settings_loaded:
config.load(path)
config.set('settings_loaded', 'true')
config._settings_loaded = True
def decorator(func):
def wrapped(*args, **kwargs):
if lazy_load and not config.get('settings_loaded', False):
if lazy_load and not config._settings_loaded:
config.load(path)
config.set('settings_loaded', 'true')
config._settings_loaded = True
if 'config' in inspect.getargspec(func).args:
return func(*args, config = config, **kwargs)
@ -446,11 +447,14 @@ class Config(object):
#
# Information for what values fail to load and why are reported to
# 'stem.util.log'.
.. versionchanged:: 1.7.0
Class can now be used as a dictionary.
"""
def __init__(self):
self._path = None # location we last loaded from or saved to
self._contents = {} # configuration key/value pairs
self._contents = OrderedDict() # configuration key/value pairs
self._listeners = [] # functors to be notified of config changes
# used for accessing _contents
@ -459,7 +463,10 @@ class Config(object):
# keys that have been requested (used to provide unused config contents)
self._requested_keys = set()
def load(self, path = None):
# flag to support lazy loading in uses_settings()
self._settings_loaded = False
def load(self, path = None, commenting = True):
"""
Reads in the contents of the given path, adding its configuration values
to our current contents. If the path is a directory then this loads each
@ -468,8 +475,16 @@ class Config(object):
.. versionchanged:: 1.3.0
Added support for directories.
.. versionchanged:: 1.3.0
Added the **commenting** argument.
.. versionchanged:: 1.6.0
Avoid loading vim swap files.
:param str path: file or directory path to be loaded, this uses the last
loaded path if not provided
:param bool commenting: ignore line content after a '#' if **True**, read
otherwise
:raises:
* **IOError** if we fail to read the file (it doesn't exist, insufficient
@ -485,6 +500,9 @@ class Config(object):
if os.path.isdir(self._path):
for root, dirnames, filenames in os.walk(self._path):
for filename in filenames:
if filename.endswith('.swp'):
continue # vim swap file
self.load(os.path.join(root, filename))
return
@ -497,7 +515,7 @@ class Config(object):
line = read_contents.pop(0)
# strips any commenting or excess whitespace
comment_start = line.find('#')
comment_start = line.find('#') if commenting else -1
if comment_start != -1:
line = line[:comment_start]
@ -506,14 +524,10 @@ class Config(object):
# parse the key/value pair
if line:
try:
if ' ' in line:
key, value = line.split(' ', 1)
value = value.strip()
except ValueError:
log.debug("Config entry '%s' is expected to be of the format 'Key Value', defaulting to '%s' -> ''" % (line, line))
key, value = line, ''
if not value:
self.set(key, value.strip(), False)
else:
# this might be a multi-line entry, try processing it as such
multiline_buffer = []
@ -523,10 +537,9 @@ class Config(object):
multiline_buffer.append(content)
if multiline_buffer:
self.set(key, '\n'.join(multiline_buffer), False)
continue
self.set(key, value, False)
self.set(line, '\n'.join(multiline_buffer), False)
else:
self.set(line, '', False) # default to a key => '' mapping
def save(self, path = None):
"""
@ -535,7 +548,9 @@ class Config(object):
:param str path: location to be saved to
:raises: **ValueError** if no path was provided and we've never been provided one
:raises:
* **IOError** if we fail to save the file (insufficient permissions, etc)
* **ValueError** if no path was provided and we've never been provided one
"""
if path:
@ -544,8 +559,11 @@ class Config(object):
raise ValueError('Unable to save configuration: no path provided')
with self._contents_lock:
if not os.path.exists(os.path.dirname(self._path)):
os.makedirs(os.path.dirname(self._path))
with open(self._path, 'w') as output_file:
for entry_key in sorted(self.keys()):
for entry_key in self.keys():
for entry_value in self.get_value(entry_key, multiple = True):
# check for multi line entries
if '\n' in entry_value:
@ -612,6 +630,9 @@ class Config(object):
Appends the given key/value configuration mapping, behaving the same as if
we'd loaded this from a configuration file.
.. versionchanged:: 1.5.0
Allow removal of values by overwriting with a **None** value.
:param str key: key for the configuration mapping
:param str,list value: value we're setting the mapping to
:param bool overwrite: replaces the previous value if **True**, otherwise
@ -619,7 +640,14 @@ class Config(object):
"""
with self._contents_lock:
if isinstance(value, str):
unicode_type = str if stem.prereq.is_python_3() else unicode
if value is None:
if overwrite and key in self._contents:
del self._contents[key]
else:
pass # no value so this is a no-op
elif isinstance(value, (bytes, unicode_type)):
if not overwrite and key in self._contents:
self._contents[key].append(value)
else:
@ -636,7 +664,7 @@ class Config(object):
for listener in self._listeners:
listener(self, key)
else:
raise ValueError("Config.set() only accepts str, list, or tuple. Provided value was a '%s'" % type(value))
raise ValueError("Config.set() only accepts str (bytes or unicode), list, or tuple. Provided value was a '%s'" % type(value))
def get(self, key, default = None):
"""
@ -743,3 +771,7 @@ class Config(object):
message_id = 'stem.util.conf.missing_config_key_%s' % key
log.log_once(message_id, log.TRACE, "config entry '%s' not found, defaulting to '%s'" % (key, default))
return default
def __getitem__(self, key):
with self._contents_lock:
return self._contents[key]

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -17,6 +17,8 @@ Connection and networking based utility functions.
is_valid_port - checks if something is a valid representation for a port
is_private_address - checks if an IPv4 address belongs to a private range or not
address_to_int - provides an integer representation of an IP address
expand_ipv6_address - provides an IPv6 address with its collapsed portions expanded
get_mask_ipv4 - provides the mask representation for a given number of bits
get_mask_ipv6 - provides the IPv6 mask representation for a given number of bits
@ -26,9 +28,17 @@ Connection and networking based utility functions.
Method for resolving a process' connections.
.. versionadded:: 1.1.0
.. versionchanged:: 1.4.0
Added **NETSTAT_WINDOWS**.
.. versionchanged:: 1.6.0
Added **BSD_FSTAT**.
.. deprecated:: 1.6.0
The SOCKSTAT connection resolver is proving to be unreliable
(:trac:`23057`), and will be dropped in the 2.0.0 release unless fixed.
==================== ===========
Resolver Description
==================== ===========
@ -37,9 +47,10 @@ Connection and networking based utility functions.
**NETSTAT_WINDOWS** netstat command under Windows
**SS** ss command
**LSOF** lsof command
**SOCKSTAT** sockstat command under *nix
**SOCKSTAT** sockstat command under \*nix
**BSD_SOCKSTAT** sockstat command under FreeBSD
**BSD_PROCSTAT** procstat command under FreeBSD
**BSD_FSTAT** fstat command under OpenBSD
==================== ===========
"""
@ -50,11 +61,11 @@ import os
import platform
import re
import stem.util
import stem.util.proc
import stem.util.system
from stem import str_type
from stem.util import conf, enum, log
from stem.util import conf, enum, log, str_tools
# Connection resolution is risky to log about since it's highly likely to
# contain sensitive information. That said, it's also difficult to get right in
@ -71,17 +82,10 @@ Resolver = enum.Enum(
('LSOF', 'lsof'),
('SOCKSTAT', 'sockstat'),
('BSD_SOCKSTAT', 'sockstat (bsd)'),
('BSD_PROCSTAT', 'procstat (bsd)')
('BSD_PROCSTAT', 'procstat (bsd)'),
('BSD_FSTAT', 'fstat (bsd)')
)
Connection = collections.namedtuple('Connection', [
'local_address',
'local_port',
'remote_address',
'remote_port',
'protocol',
])
FULL_IPv4_MASK = '255.255.255.255'
FULL_IPv6_MASK = 'FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF'
@ -92,8 +96,8 @@ PORT_USES = None # port number => description
RESOLVER_COMMAND = {
Resolver.PROC: '',
# -n = prevents dns lookups, -p = include process
Resolver.NETSTAT: 'netstat -np',
# -n = prevents dns lookups, -p = include process, -W = don't crop addresses (needed for ipv6)
Resolver.NETSTAT: 'netstat -npW',
# -a = show all TCP/UDP connections, -n = numeric addresses and ports, -o = include pid
Resolver.NETSTAT_WINDOWS: 'netstat -ano',
@ -112,62 +116,97 @@ RESOLVER_COMMAND = {
# -f <pid> = process pid
Resolver.BSD_PROCSTAT: 'procstat -f {pid}',
# -p <pid> = process pid
Resolver.BSD_FSTAT: 'fstat -p {pid}',
}
RESOLVER_FILTER = {
Resolver.PROC: '',
# tcp 0 586 192.168.0.1:44284 38.229.79.2:443 ESTABLISHED 15843/tor
Resolver.NETSTAT: '^{protocol}\s+.*\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED\s+{pid}/{name}\s*$',
Resolver.NETSTAT: '^{protocol}\s+.*\s+{local}\s+{remote}\s+ESTABLISHED\s+{pid}/{name}\s*$',
# tcp 586 192.168.0.1:44284 38.229.79.2:443 ESTABLISHED 15843
Resolver.NETSTAT_WINDOWS: '^\s*{protocol}\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED\s+{pid}\s*$',
Resolver.NETSTAT_WINDOWS: '^\s*{protocol}\s+{local}\s+{remote}\s+ESTABLISHED\s+{pid}\s*$',
# tcp ESTAB 0 0 192.168.0.20:44415 38.229.79.2:443 users:(("tor",15843,9))
Resolver.SS: '^{protocol}\s+ESTAB\s+.*\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+users:\(\("{name}",{pid},[0-9]+\)\)$',
Resolver.SS: '^{protocol}\s+ESTAB\s+.*\s+{local}\s+{remote}\s+users:\(\("{name}",(?:pid=)?{pid},(?:fd=)?[0-9]+\)\)$',
# tor 3873 atagar 45u IPv4 40994 0t0 TCP 10.243.55.20:45724->194.154.227.109:9001 (ESTABLISHED)
Resolver.LSOF: '^{name}\s+{pid}\s+.*\s+{protocol}\s+{local_address}:{local_port}->{remote_address}:{remote_port} \(ESTABLISHED\)$',
Resolver.LSOF: '^{name}\s+{pid}\s+.*\s+{protocol}\s+{local}->{remote} \(ESTABLISHED\)$',
# atagar tor 15843 tcp4 192.168.0.20:44092 68.169.35.102:443 ESTABLISHED
Resolver.SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+{protocol}4\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED$',
Resolver.SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+{protocol}4\s+{local}\s+{remote}\s+ESTABLISHED$',
# _tor tor 4397 12 tcp4 172.27.72.202:54011 127.0.0.1:9001
Resolver.BSD_SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+\S+\s+{protocol}4\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}$',
Resolver.BSD_SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+\S+\s+{protocol}4\s+{local}\s+{remote}$',
# 3561 tor 4 s - rw---n-- 2 0 TCP 10.0.0.2:9050 10.0.0.1:22370
Resolver.BSD_PROCSTAT: '^\s*{pid}\s+{name}\s+.*\s+{protocol}\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}$',
Resolver.BSD_PROCSTAT: '^\s*{pid}\s+{name}\s+.*\s+{protocol}\s+{local}\s+{remote}$',
# _tor tor 15843 20* internet stream tcp 0x0 192.168.1.100:36174 --> 4.3.2.1:443
Resolver.BSD_FSTAT: '^\S+\s+{name}\s+{pid}\s+.*\s+{protocol}\s+\S+\s+{local}\s+[-<]-[->]\s+{remote}$',
}
def get_connections(resolver, process_pid = None, process_name = None):
class Connection(collections.namedtuple('Connection', ['local_address', 'local_port', 'remote_address', 'remote_port', 'protocol', 'is_ipv6'])):
"""
Network connection information.
.. versionchanged:: 1.5.0
Added the **is_ipv6** attribute.
:var str local_address: ip address the connection originates from
:var int local_port: port the connection originates from
:var str remote_address: destionation ip address
:var int remote_port: destination port
:var str protocol: protocol of the connection ('tcp', 'udp', etc)
:var bool is_ipv6: addresses are ipv6 if true, and ipv4 otherwise
"""
def get_connections(resolver = None, process_pid = None, process_name = None):
"""
Retrieves a list of the current connections for a given process. This
provides a list of Connection instances, which have five attributes...
* **local_address** (str)
* **local_port** (int)
* **remote_address** (str)
* **remote_port** (int)
* **protocol** (str, generally either 'tcp' or 'udp')
provides a list of :class:`~stem.util.connection.Connection`. Note that
addresses may be IPv4 *or* IPv6 depending on what the platform supports.
.. versionadded:: 1.1.0
:param Resolver resolver: method of connection resolution to use
.. versionchanged:: 1.5.0
Made our resolver argument optional.
.. versionchanged:: 1.5.0
IPv6 support when resolving via proc, netstat, lsof, or ss.
:param Resolver resolver: method of connection resolution to use, if not
provided then one is picked from among those that should likely be
available for the system
:param int process_pid: pid of the process to retrieve
:param str process_name: name of the process to retrieve
:returns: **list** of Connection instances
:returns: **list** of :class:`~stem.util.connection.Connection` instances
:raises:
* **ValueError** if using **Resolver.PROC** or **Resolver.BSD_PROCSTAT**
and the process_pid wasn't provided
* **ValueError** if neither a process_pid nor process_name is provided
* **IOError** if no connections are available or resolution fails
(generally they're indistinguishable). The common causes are the
command being unavailable or permissions.
"""
if not resolver:
available_resolvers = system_resolvers()
if available_resolvers:
resolver = available_resolvers[0]
else:
raise IOError('Unable to determine a connection resolver')
if not process_pid and not process_name:
raise ValueError('You must provide a pid or process name to provide connections for')
def _log(msg):
if LOG_CONNECTION_RESOLUTION:
log.debug(msg)
@ -181,14 +220,20 @@ def get_connections(resolver, process_pid = None, process_name = None):
except ValueError:
raise ValueError('Process pid was non-numeric: %s' % process_pid)
if process_pid is None and process_name and resolver == Resolver.NETSTAT_WINDOWS:
process_pid = stem.util.system.pid_by_name(process_name)
if process_pid is None:
all_pids = stem.util.system.pid_by_name(process_name, True)
if process_pid is None and resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT):
raise ValueError('%s resolution requires a pid' % resolver)
if len(all_pids) == 0:
if resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT):
raise IOError("Unable to determine the pid of '%s'. %s requires the pid to provide the connections." % (process_name, resolver))
elif len(all_pids) == 1:
process_pid = all_pids[0]
else:
if resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT):
raise IOError("There's multiple processes named '%s'. %s requires a single pid to provide the connections." % (process_name, resolver))
if resolver == Resolver.PROC:
return [Connection(*conn) for conn in stem.util.proc.connections(process_pid)]
return stem.util.proc.connections(pid = process_pid)
resolver_command = RESOLVER_COMMAND[resolver].format(pid = process_pid)
@ -199,10 +244,8 @@ def get_connections(resolver, process_pid = None, process_name = None):
resolver_regex_str = RESOLVER_FILTER[resolver].format(
protocol = '(?P<protocol>\S+)',
local_address = '(?P<local_address>[0-9.]+)',
local_port = '(?P<local_port>[0-9]+)',
remote_address = '(?P<remote_address>[0-9.]+)',
remote_port = '(?P<remote_port>[0-9]+)',
local = '(?P<local>[\[\]0-9a-f.:]+)',
remote = '(?P<remote>[\[\]0-9a-f.:]+)',
pid = process_pid if process_pid else '[0-9]*',
name = process_name if process_name else '\S*',
)
@ -213,28 +256,41 @@ def get_connections(resolver, process_pid = None, process_name = None):
connections = []
resolver_regex = re.compile(resolver_regex_str)
def _parse_address_str(addr_type, addr_str, line):
addr, port = addr_str.rsplit(':', 1)
if not is_valid_ipv4_address(addr) and not is_valid_ipv6_address(addr, allow_brackets = True):
_log('Invalid %s address (%s): %s' % (addr_type, addr, line))
return None, None
elif not is_valid_port(port):
_log('Invalid %s port (%s): %s' % (addr_type, port, line))
return None, None
else:
_log('Valid %s:%s: %s' % (addr, port, line))
return addr.lstrip('[').rstrip(']'), int(port)
for line in results:
match = resolver_regex.match(line)
if match:
attr = match.groupdict()
local_addr = attr['local_address']
local_port = int(attr['local_port'])
remote_addr = attr['remote_address']
remote_port = int(attr['remote_port'])
local_addr, local_port = _parse_address_str('local', attr['local'], line)
remote_addr, remote_port = _parse_address_str('remote', attr['remote'], line)
if not (local_addr and local_port and remote_addr and remote_port):
continue # missing or malformed field
protocol = attr['protocol'].lower()
if remote_addr == '0.0.0.0':
continue # procstat response for unestablished connections
if protocol == 'tcp6':
protocol = 'tcp'
if not (is_valid_ipv4_address(local_addr) and is_valid_ipv4_address(remote_addr)):
_log('Invalid address (%s or %s): %s' % (local_addr, remote_addr, line))
elif not (is_valid_port(local_port) and is_valid_port(remote_port)):
_log('Invalid port (%s or %s): %s' % (local_port, remote_port, line))
elif protocol not in ('tcp', 'udp'):
if protocol not in ('tcp', 'udp'):
_log('Unrecognized protocol (%s): %s' % (protocol, line))
continue
conn = Connection(local_addr, local_port, remote_addr, remote_port, protocol)
conn = Connection(local_addr, local_port, remote_addr, remote_port, protocol, is_valid_ipv6_address(local_addr))
connections.append(conn)
_log(str(conn))
@ -261,6 +317,7 @@ def system_resolvers(system = None):
:returns: **list** of :data:`~stem.util.connection.Resolver` instances available on this platform
"""
if system is None:
if stem.util.system.is_gentoo():
system = 'Gentoo'
@ -269,8 +326,10 @@ def system_resolvers(system = None):
if system == 'Windows':
resolvers = [Resolver.NETSTAT_WINDOWS]
elif system in ('Darwin', 'OpenBSD'):
elif system == 'Darwin':
resolvers = [Resolver.LSOF]
elif system == 'OpenBSD':
resolvers = [Resolver.BSD_FSTAT]
elif system == 'FreeBSD':
# Netstat is available, but lacks a '-p' equivalent so we can't associate
# the results to processes. The platform also has a ss command, but it
@ -349,7 +408,9 @@ def is_valid_ipv4_address(address):
:returns: **True** if input is a valid IPv4 address, **False** otherwise
"""
if not isinstance(address, (bytes, str_type)):
if isinstance(address, bytes):
address = str_tools._to_unicode(address)
elif not stem.util._is_str(address):
return False
# checks if theres four period separated values
@ -377,10 +438,31 @@ def is_valid_ipv6_address(address, allow_brackets = False):
:returns: **True** if input is a valid IPv6 address, **False** otherwise
"""
if isinstance(address, bytes):
address = str_tools._to_unicode(address)
elif not stem.util._is_str(address):
return False
if allow_brackets:
if address.startswith('[') and address.endswith(']'):
address = address[1:-1]
if address.count('.') == 3:
# Likely an ipv4-mapped portion. Check that its vaild, then replace with a
# filler.
ipv4_start = address.rfind(':', 0, address.find('.')) + 1
ipv4_end = address.find(':', ipv4_start + 1)
if ipv4_end == -1:
ipv4_end = None # don't crop the last character
if not is_valid_ipv4_address(address[ipv4_start:ipv4_end]):
return False
addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, 'ff:ff', address[ipv4_end + 1:] if ipv4_end else None]
address = ':'.join(filter(None, addr_comp))
# addresses are made up of eight colon separated groups of four hex digits
# with leading zeros being optional
# https://en.wikipedia.org/wiki/IPv6#Address_format
@ -469,6 +551,24 @@ def is_private_address(address):
return False
def address_to_int(address):
"""
Provides an integer representation of a IPv4 or IPv6 address that can be used
for sorting.
.. versionadded:: 1.5.0
:param str address: IPv4 or IPv6 address
:returns: **int** representation of the address
"""
# TODO: Could be neat to also use this for serialization if we also had an
# int_to_address() function.
return int(_address_to_binary(address), 2)
def expand_ipv6_address(address):
"""
Expands abbreviated IPv6 addresses to their full colon separated hex format.
@ -482,6 +582,9 @@ def expand_ipv6_address(address):
>>> expand_ipv6_address('::')
'0000:0000:0000:0000:0000:0000:0000:0000'
>>> expand_ipv6_address('::ffff:5.9.158.75')
'0000:0000:0000:0000:0000:ffff:0509:9e4b'
:param str address: IPv6 address to be expanded
:raises: **ValueError** if the address can't be expanded due to being malformed
@ -490,6 +593,25 @@ def expand_ipv6_address(address):
if not is_valid_ipv6_address(address):
raise ValueError("'%s' isn't a valid IPv6 address" % address)
# expand ipv4-mapped portions of addresses
if address.count('.') == 3:
ipv4_start = address.rfind(':', 0, address.find('.')) + 1
ipv4_end = address.find(':', ipv4_start + 1)
if ipv4_end == -1:
ipv4_end = None # don't crop the last character
# Converts ipv4 address to its hex ipv6 representation. For instance...
#
# '5.9.158.75' => '0509:9e4b'
ipv4_bin = _address_to_binary(address[ipv4_start:ipv4_end])
groupings = [ipv4_bin[16 * i:16 * (i + 1)] for i in range(2)]
ipv6_snippet = ':'.join(['%04x' % int(group, 2) for group in groupings])
addr_comp = [address[:ipv4_start - 1] if ipv4_start != 0 else None, ipv6_snippet, address[ipv4_end + 1:] if ipv4_end else None]
address = ':'.join(filter(None, addr_comp))
# expands collapsed groupings, there can only be a single '::' in a valid
# address
if '::' in address:
@ -577,7 +699,7 @@ def _get_masked_bits(mask):
raise ValueError("'%s' is an invalid subnet mask" % mask)
# converts octets to binary representation
mask_bin = _get_address_binary(mask)
mask_bin = _address_to_binary(mask)
mask_match = re.match('^(1*)(0*)$', mask_bin)
if mask_match:
@ -599,7 +721,7 @@ def _get_binary(value, bits):
return ''.join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)])
def _get_address_binary(address):
def _address_to_binary(address):
"""
Provides the binary value for an IPv4 or IPv6 address.
@ -644,6 +766,7 @@ def _cryptovariables_equal(x, y):
_hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) ==
_hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y))
# TODO: drop with stem 2.x
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
# names for backward compatability.

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -40,7 +40,7 @@ constructed as simple type listings...
+- __iter__ - iterator over our enum keys
"""
from stem import str_type
import stem.util
def UppercaseEnum(*args):
@ -76,7 +76,7 @@ class Enum(object):
keys, values = [], []
for entry in args:
if isinstance(entry, (bytes, str_type)):
if stem.util._is_str(entry):
key, val = entry, _to_camel_case(entry)
elif isinstance(entry, tuple) and len(entry) == 2:
key, val = entry

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -88,9 +88,13 @@ DEDUPLICATION_MESSAGE_IDS = set()
class _NullHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self, level = logging.FATAL + 5) # disable logging
def emit(self, record):
pass
if not LOGGER.handlers:
LOGGER.addHandler(_NullHandler())
@ -99,7 +103,7 @@ def get_logger():
"""
Provides the stem logger.
:return: **logging.Logger** for stem
:returns: **logging.Logger** for stem
"""
return LOGGER
@ -118,6 +122,22 @@ def logging_level(runlevel):
return logging.FATAL + 5
def is_tracing():
"""
Checks if we're logging at the trace runlevel.
.. versionadded:: 1.6.0
:returns: **True** if we're logging at the trace runlevel and **False** otherwise
"""
for handler in get_logger().handlers:
if handler.level <= logging_level(TRACE):
return True
return False
def escape(message):
"""
Escapes specific sequences for logging (newlines, tabs, carriage returns). If
@ -199,8 +219,8 @@ class LogBuffer(logging.Handler):
Basic log handler that listens for stem events and stores them so they can be
read later. Log entries are cleared as they are read.
.. versionchanged:: 1.4.0
Added the yield_records argument.
.. versionchanged:: 1.4.0
Added the yield_records argument.
"""
def __init__(self, runlevel, yield_records = False):

View file

@ -310,4 +310,5 @@ port 19638 => Ensim
port 23399 => Skype
port 30301 => BitTorrent
port 33434 => traceroute
port 50002 => Electrum Bitcoin SSL

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -54,14 +54,23 @@ import socket
import sys
import time
import stem.prereq
import stem.util.connection
import stem.util.enum
import stem.util.str_tools
from stem.util import log
try:
# added in python 3.2
from functools import lru_cache
# unavailable on windows (#19823)
import pwd
IS_PWD_AVAILABLE = True
except ImportError:
IS_PWD_AVAILABLE = False
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
else:
from stem.util.lru_cache import lru_cache
# os.sysconf is only defined on unix
@ -70,6 +79,9 @@ try:
except AttributeError:
CLOCK_TICKS = None
IS_LITTLE_ENDIAN = sys.byteorder == 'little'
ENCODED_ADDR = {} # cache of encoded ips to their decoded version
Stat = stem.util.enum.Enum(
('COMMAND', 'command'), ('CPU_UTIME', 'utime'),
('CPU_STIME', 'stime'), ('START_TIME', 'start time')
@ -324,38 +336,110 @@ def file_descriptors_used(pid):
raise IOError('Unable to check number of file descriptors used: %s' % exc)
def connections(pid):
def connections(pid = None, user = None):
"""
Queries connection related information from the proc contents. This provides
similar results to netstat, lsof, sockstat, and other connection resolution
utilities (though the lookup is far quicker).
Queries connections from the proc contents. This matches netstat, lsof, and
friends but is much faster. If no **pid** or **user** are provided this
provides all present connections.
:param int pid: process id of the process to be queried
:param int pid: pid to provide connections for
:param str user: username to look up connections for
:returns: A listing of connection tuples of the form **[(local_ipAddr1,
local_port1, foreign_ipAddr1, foreign_port1, protocol), ...]** (addresses
and protocols are strings and ports are ints)
:returns: **list** of :class:`~stem.util.connection.Connection` instances
:raises: **IOError** if it can't be determined
"""
start_time, conn = time.time(), []
if pid:
parameter = 'connections for pid %s' % pid
try:
pid = int(pid)
if pid < 0:
raise IOError("Process pids can't be negative: %s" % pid)
except (ValueError, TypeError):
raise IOError('Process pid was non-numeric: %s' % pid)
elif user:
parameter = 'connections for user %s' % user
else:
parameter = 'all connections'
try:
pid = int(pid)
if not IS_PWD_AVAILABLE:
raise IOError("This requires python's pwd module, which is unavailable on Windows.")
if pid < 0:
raise IOError("Process pids can't be negative: %s" % pid)
except (ValueError, TypeError):
raise IOError('Process pid was non-numeric: %s' % pid)
inodes = _inodes_for_sockets(pid) if pid else set()
process_uid = stem.util.str_tools._to_bytes(str(pwd.getpwnam(user).pw_uid)) if user else None
if pid == 0:
return []
for proc_file_path in ('/proc/net/tcp', '/proc/net/tcp6', '/proc/net/udp', '/proc/net/udp6'):
if proc_file_path.endswith('6') and not os.path.exists(proc_file_path):
continue # ipv6 proc contents are optional
# fetches the inode numbers for socket file descriptors
protocol = proc_file_path[10:].rstrip('6') # 'tcp' or 'udp'
is_ipv6 = proc_file_path.endswith('6')
start_time, parameter = time.time(), 'process connections'
inodes = []
try:
with open(proc_file_path, 'rb') as proc_file:
proc_file.readline() # skip the first line
for fd in os.listdir('/proc/%s/fd' % pid):
for line in proc_file:
_, l_dst, r_dst, status, _, _, _, uid, _, inode = line.split()[:10]
if inodes and inode not in inodes:
continue
elif process_uid and uid != process_uid:
continue
elif protocol == 'tcp' and status != b'01':
continue # skip tcp connections that aren't yet established
div = l_dst.find(b':')
l_addr = _unpack_addr(l_dst[:div])
l_port = int(l_dst[div + 1:], 16)
div = r_dst.find(b':')
r_addr = _unpack_addr(r_dst[:div])
r_port = int(r_dst[div + 1:], 16)
if r_addr == '0.0.0.0' or r_addr == '0000:0000:0000:0000:0000:0000':
continue # no address
elif l_port == 0 or r_port == 0:
continue # no port
conn.append(stem.util.connection.Connection(l_addr, l_port, r_addr, r_port, protocol, is_ipv6))
except IOError as exc:
raise IOError("unable to read '%s': %s" % (proc_file_path, exc))
except Exception as exc:
raise IOError("unable to parse '%s': %s" % (proc_file_path, exc))
_log_runtime(parameter, '/proc/net/[tcp|udp]', start_time)
return conn
except IOError as exc:
_log_failure(parameter, exc)
raise
def _inodes_for_sockets(pid):
"""
Provides inodes in use by a process for its sockets.
:param int pid: process id of the process to be queried
:returns: **set** with inodes for its sockets
:raises: **IOError** if it can't be determined
"""
inodes = set()
try:
fd_contents = os.listdir('/proc/%s/fd' % pid)
except OSError as exc:
raise IOError('Unable to read our file descriptors: %s' % exc)
for fd in fd_contents:
fd_path = '/proc/%s/fd/%s' % (pid, fd)
try:
@ -364,57 +448,18 @@ def connections(pid):
fd_name = os.readlink(fd_path)
if fd_name.startswith('socket:['):
inodes.append(fd_name[8:-1])
inodes.add(stem.util.str_tools._to_bytes(fd_name[8:-1]))
except OSError as exc:
if not os.path.exists(fd_path):
continue # descriptors may shift while we're in the middle of iterating over them
# most likely couldn't be read due to permissions
exc = IOError('unable to determine file descriptor destination (%s): %s' % (exc, fd_path))
_log_failure(parameter, exc)
raise exc
raise IOError('unable to determine file descriptor destination (%s): %s' % (exc, fd_path))
if not inodes:
# unable to fetch any connections for this process
return []
# check for the connection information from the /proc/net contents
conn = []
for proc_file_path in ('/proc/net/tcp', '/proc/net/udp'):
try:
proc_file = open(proc_file_path)
proc_file.readline() # skip the first line
for line in proc_file:
_, l_addr, f_addr, status, _, _, _, _, _, inode = line.split()[:10]
if inode in inodes:
# if a tcp connection, skip if it isn't yet established
if proc_file_path.endswith('/tcp') and status != '01':
continue
local_ip, local_port = _decode_proc_address_encoding(l_addr)
foreign_ip, foreign_port = _decode_proc_address_encoding(f_addr)
protocol = proc_file_path[10:]
conn.append((local_ip, local_port, foreign_ip, foreign_port, protocol))
proc_file.close()
except IOError as exc:
exc = IOError("unable to read '%s': %s" % (proc_file_path, exc))
_log_failure(parameter, exc)
raise exc
except Exception as exc:
exc = IOError("unable to parse '%s': %s" % (proc_file_path, exc))
_log_failure(parameter, exc)
raise exc
_log_runtime(parameter, '/proc/net/[tcp|udp]', start_time)
return conn
return inodes
def _decode_proc_address_encoding(addr):
def _unpack_addr(addr):
"""
Translates an address entry in the /proc/net/* contents to a human readable
form (`reference <http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html>`_,
@ -422,35 +467,40 @@ def _decode_proc_address_encoding(addr):
::
"0500000A:0016" -> ("10.0.0.5", 22)
"0500000A" -> "10.0.0.5"
"F804012A4A5190010000000002000000" -> "2a01:4f8:190:514a::2"
:param str addr: proc address entry to be decoded
:returns: **tuple** of the form **(addr, port)**, with addr as a string and port an int
:returns: **str** of the decoded address
"""
ip, port = addr.split(':')
if addr not in ENCODED_ADDR:
if len(addr) == 8:
# IPv4 address
decoded = base64.b16decode(addr)[::-1] if IS_LITTLE_ENDIAN else base64.b16decode(addr)
ENCODED_ADDR[addr] = socket.inet_ntop(socket.AF_INET, decoded)
else:
# IPv6 address
# the port is represented as a two-byte hexadecimal number
port = int(port, 16)
if IS_LITTLE_ENDIAN:
# Group into eight characters, then invert in pairs...
#
# https://trac.torproject.org/projects/tor/ticket/18079#comment:24
if sys.version_info >= (3,):
ip = ip.encode('ascii')
inverted = []
# The IPv4 address portion is a little-endian four-byte hexadecimal number.
# That is, the least significant byte is listed first, so we need to reverse
# the order of the bytes to convert it to an IP address.
#
# This needs to account for the endian ordering as per...
# http://code.google.com/p/psutil/issues/detail?id=201
# https://trac.torproject.org/projects/tor/ticket/4777
for i in range(4):
grouping = addr[8 * i:8 * (i + 1)]
inverted += [grouping[2 * i:2 * (i + 1)] for i in range(4)][::-1]
if sys.byteorder == 'little':
ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip)[::-1])
else:
ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip))
encoded = b''.join(inverted)
else:
encoded = addr
return (ip, port)
ENCODED_ADDR[addr] = stem.util.connection.expand_ipv6_address(socket.inet_ntop(socket.AF_INET6, base64.b16decode(encoded)))
return ENCODED_ADDR[addr]
def _is_float(*value):
@ -508,7 +558,7 @@ def _get_lines(file_path, line_prefixes, parameter):
return results
except IOError as exc:
_log_failure(parameter, exc)
raise exc
raise
def _log_runtime(parameter, proc_location, start_time):
@ -534,6 +584,7 @@ def _log_failure(parameter, exc):
log.debug('proc call failed (%s): %s' % (parameter, exc))
# TODO: drop with stem 2.x
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
# names for backward compatability.

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -27,10 +27,9 @@ import re
import sys
import stem.prereq
import stem.util
import stem.util.enum
from stem import str_type
# label conversion tuples of the form...
# (bits / bytes / seconds, short label, long label)
@ -75,13 +74,13 @@ if stem.prereq.is_python_3():
return msg
else:
def _to_bytes_impl(msg):
if msg is not None and isinstance(msg, str_type):
if msg is not None and isinstance(msg, unicode):
return codecs.latin_1_encode(msg, 'replace')[0]
else:
return msg
def _to_unicode_impl(msg):
if msg is not None and not isinstance(msg, str_type):
if msg is not None and not isinstance(msg, unicode):
return msg.decode('utf-8', 'replace')
else:
return msg
@ -117,6 +116,22 @@ def _to_unicode(msg):
return _to_unicode_impl(msg)
def _to_int(msg):
"""
Serializes a string to a number.
:param str msg: string to be serialized
:returns: **int** representation of the string
"""
if stem.prereq.is_python_3() and isinstance(msg, bytes):
# iterating over bytes in python3 provides ints rather than characters
return sum([pow(256, (len(msg) - i - 1)) * c for (i, c) in enumerate(msg)])
else:
return sum([pow(256, (len(msg) - i - 1)) * ord(c) for (i, c) in enumerate(msg)])
def _to_camel_case(label, divider = '_', joiner = ' '):
"""
Converts the given string to camel case, ie:
@ -145,6 +160,24 @@ def _to_camel_case(label, divider = '_', joiner = ' '):
return joiner.join(words)
def _split_by_length(msg, size):
"""
Splits a string into a list of strings up to the given size.
::
>>> _split_by_length('hello', 2)
['he', 'll', 'o']
:param str msg: string to split
:param int size: number of characters to chunk into
:returns: **list** with chunked string components
"""
return [msg[i:i + size] for i in range(0, len(msg), size)]
# This needs to be defined after _to_camel_case() to avoid a circular
# dependency with the enum module.
@ -210,6 +243,9 @@ def crop(msg, size, min_word_length = 4, min_crop = 0, ending = Ending.ELLIPSE,
# ellipse, and cropping words requires an extra space for hyphens
if ending == Ending.ELLIPSE:
if size < 3:
return ('', msg) if get_remainder else ''
size -= 3
elif min_word_length and ending == Ending.HYPHEN:
min_word_length += 1
@ -262,7 +298,7 @@ def crop(msg, size, min_word_length = 4, min_crop = 0, ending = Ending.ELLIPSE,
return (return_msg, remainder) if get_remainder else return_msg
def size_label(byte_count, decimal = 0, is_long = False, is_bytes = True):
def size_label(byte_count, decimal = 0, is_long = False, is_bytes = True, round = False):
"""
Converts a number of bytes into a human readable label in its most
significant units. For instance, 7500 bytes would return "7 KB". If the
@ -281,18 +317,22 @@ def size_label(byte_count, decimal = 0, is_long = False, is_bytes = True):
>>> size_label(1050, 3, True)
'1.025 Kilobytes'
.. versionchanged:: 1.6.0
Added round argument.
:param int byte_count: number of bytes to be converted
:param int decimal: number of decimal digits to be included
:param bool is_long: expands units label
:param bool is_bytes: provides units in bytes if **True**, bits otherwise
:param bool round: rounds normally if **True**, otherwise rounds down
:returns: **str** with human readable representation of the size
"""
if is_bytes:
return _get_label(SIZE_UNITS_BYTES, byte_count, decimal, is_long)
return _get_label(SIZE_UNITS_BYTES, byte_count, decimal, is_long, round)
else:
return _get_label(SIZE_UNITS_BITS, byte_count, decimal, is_long)
return _get_label(SIZE_UNITS_BITS, byte_count, decimal, is_long, round)
def time_label(seconds, decimal = 0, is_long = False):
@ -456,7 +496,7 @@ def _parse_timestamp(entry):
:raises: **ValueError** if the timestamp is malformed
"""
if not isinstance(entry, (str, str_type)):
if not stem.util._is_str(entry):
raise ValueError('parse_timestamp() input must be a str, got a %s' % type(entry))
try:
@ -482,7 +522,7 @@ def _parse_iso_timestamp(entry):
:raises: **ValueError** if the timestamp is malformed
"""
if not isinstance(entry, (str, str_type)):
if not stem.util._is_str(entry):
raise ValueError('parse_iso_timestamp() input must be a str, got a %s' % type(entry))
# based after suggestions from...
@ -496,7 +536,7 @@ def _parse_iso_timestamp(entry):
if len(microseconds) != 6 or not microseconds.isdigit():
raise ValueError("timestamp's microseconds should be six digits")
if timestamp_str[10] == 'T':
if len(timestamp_str) > 10 and timestamp_str[10] == 'T':
timestamp_str = timestamp_str[:10] + ' ' + timestamp_str[11:]
else:
raise ValueError("timestamp didn't contain delimeter 'T' between date and time")
@ -505,7 +545,7 @@ def _parse_iso_timestamp(entry):
return timestamp + datetime.timedelta(microseconds = int(microseconds))
def _get_label(units, count, decimal, is_long):
def _get_label(units, count, decimal, is_long, round = False):
"""
Provides label corresponding to units of the highest significance in the
provided set. This rounds down (ie, integer truncation after visible units).
@ -515,6 +555,7 @@ def _get_label(units, count, decimal, is_long):
:param int count: number of base units being converted
:param int decimal: decimal precision of label
:param bool is_long: uses the long label if **True**, short label otherwise
:param bool round: rounds normally if **True**, otherwise rounds down
"""
# formatted string for the requested number of digits
@ -529,10 +570,12 @@ def _get_label(units, count, decimal, is_long):
for count_per_unit, short_label, long_label in units:
if count >= count_per_unit:
# Rounding down with a '%f' is a little clunky. Reducing the count so
# it'll divide evenly as the rounded down value.
if not round:
# Rounding down with a '%f' is a little clunky. Reducing the count so
# it'll divide evenly as the rounded down value.
count -= count % (count_per_unit / (10 ** decimal))
count -= count % (count_per_unit / (10 ** decimal))
count_label = label_format % (count / count_per_unit)
if is_long:
@ -548,6 +591,7 @@ def _get_label(units, count, decimal, is_long):
else:
return count_label + short_label
# TODO: drop with stem 2.x
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
# names for backward compatability.

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -10,6 +10,10 @@ best-effort, providing **None** if the lookup fails.
Dropped the get_* prefix from several function names. The old names still
work, but are deprecated aliases.
.. versionchanged:: 1.5.0
Added the **SYSTEM_CALL_TIME** global, which tracks total time spent making
system commands.
**Module Overview:**
::
@ -17,16 +21,19 @@ best-effort, providing **None** if the lookup fails.
is_windows - checks if we're running on windows
is_mac - checks if we're running on a mac
is_gentoo - checks if we're running on gentoo
is_slackware - checks if we're running on slackware
is_bsd - checks if we're running on the bsd family of operating systems
is_available - determines if a command is available on this system
is_running - determines if a given process is running
size_of - provides the memory usage of an object
call - runs the given system command and provides back the results
name_by_pid - gets the name for a process by the given pid
pid_by_name - gets the pid for a process by the given name
pid_by_port - gets the pid for a process listening to a given port
pid_by_open_file - gets the pid for the process with an open file
pids_by_user - provides processes owned by a user
cwd - provides the current working directory for a given process
user - provides the user a process is running under
start_time - provides the unix timestamp when the process started
@ -40,25 +47,63 @@ best-effort, providing **None** if the lookup fails.
get_process_name - provides our process' name
set_process_name - changes our process' name
.. data:: Status (enum)
State of a subprocess.
.. versionadded:: 1.6.0
==================== ===========
Status Description
==================== ===========
PENDING not yet started
RUNNING currently being performed
DONE completed successfully
FAILED failed with an exception
==================== ===========
"""
import collections
import ctypes
import ctypes.util
import distutils.spawn
import itertools
import mimetypes
import multiprocessing
import os
import platform
import re
import subprocess
import sys
import tarfile
import threading
import time
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.proc
import stem.util.str_tools
from stem import UNDEFINED, str_type
from stem import UNDEFINED
from stem.util import log
State = stem.util.enum.UppercaseEnum(
'PENDING',
'RUNNING',
'DONE',
'FAILED',
)
SIZE_RECURSES = {
tuple: iter,
list: iter,
collections.deque: iter,
dict: lambda d: itertools.chain.from_iterable(d.items()),
set: iter,
frozenset: iter,
}
# Mapping of commands to if they're available or not.
CMD_AVAILABLE_CACHE = {}
@ -84,6 +129,8 @@ GET_PID_BY_PORT_NETSTAT = 'netstat -npltu'
GET_PID_BY_PORT_SOCKSTAT = 'sockstat -4l -P tcp -p %s'
GET_PID_BY_PORT_LSOF = 'lsof -wnP -iTCP -sTCP:LISTEN'
GET_PID_BY_FILE_LSOF = 'lsof -tw %s'
GET_PIDS_BY_USER_LINUX = 'ps -o pid -u %s'
GET_PIDS_BY_USER_BSD = 'ps -o pid -U %s'
GET_CWD_PWDX = 'pwdx %s'
GET_CWD_LSOF = 'lsof -a -p %s -d cwd -Fn'
GET_BSD_JAIL_ID_PS = 'ps -p %s -o jid'
@ -125,6 +172,143 @@ _PROCESS_NAME = None
_MAX_NAME_LENGTH = -1
# Tracks total time spent shelling out to other commands like 'ps' and
# 'netstat', so we can account for it as part of our cpu time along with
# os.times().
SYSTEM_CALL_TIME = 0.0
SYSTEM_CALL_TIME_LOCK = threading.RLock()
class CallError(OSError):
"""
Error response when making a system call. This is an **OSError** subclass
with additional information about the process. Depending on the nature of the
error not all of these attributes will be available.
:var str msg: exception string
:var str command: command that was ran
:var int exit_status: exit code of the process
:var float runtime: time the command took to run
:var str stdout: stdout of the process
:var str stderr: stderr of the process
"""
def __init__(self, msg, command, exit_status, runtime, stdout, stderr):
self.msg = msg
self.command = command
self.exit_status = exit_status
self.runtime = runtime
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return self.msg
class CallTimeoutError(CallError):
"""
Error response when making a system call that has timed out.
.. versionadded:: 1.6.0
:var float timeout: time we waited
"""
def __init__(self, msg, command, exit_status, runtime, stdout, stderr, timeout):
super(CallTimeoutError, self).__init__(msg, command, exit_status, runtime, stdout, stderr)
self.timeout = timeout
class DaemonTask(object):
"""
Invokes the given function in a subprocess, returning the value.
.. versionadded:: 1.6.0
:var function runner: function to be invoked by the subprocess
:var tuple args: arguments to provide to the subprocess
:var int priority: subprocess nice priority
:var stem.util.system.State status: state of the subprocess
:var float runtime: seconds subprocess took to complete
:var object result: return value of subprocess if successful
:var exception error: exception raised by subprocess if it failed
"""
def __init__(self, runner, args = None, priority = 15, start = False):
self.runner = runner
self.args = args
self.priority = priority
self.status = State.PENDING
self.runtime = None
self.result = None
self.error = None
self._process = None
self._pipe = None
if start:
self.run()
def run(self):
"""
Invokes the task if it hasn't already been started. If it has this is a
no-op.
"""
if self.status == State.PENDING:
self._pipe, child_pipe = multiprocessing.Pipe()
self._process = multiprocessing.Process(target = DaemonTask._run_wrapper, args = (child_pipe, self.priority, self.runner, self.args))
self._process.start()
self.status = State.RUNNING
def join(self):
"""
Provides the result of the daemon task. If still running this blocks until
the task is completed.
:returns: response of the function we ran
:raises: exception raised by the function if it failed with one
"""
if self.status == State.PENDING:
self.run()
if self.status == State.RUNNING:
self._process.join()
response = self._pipe.recv()
self.status = response[0]
self.runtime = response[1]
if self.status == State.DONE:
self.result = response[2]
elif self.status == State.FAILED:
self.error = response[2]
if self.status == State.DONE:
return self.result
elif self.status == State.FAILED:
raise self.error
else:
raise RuntimeError('BUG: unexpected status from daemon task, %s' % self.status)
@staticmethod
def _run_wrapper(conn, priority, runner, args):
start_time = time.time()
os.nice(priority)
try:
result = runner(*args) if args else runner()
conn.send((State.DONE, time.time() - start_time, result))
except Exception as exc:
conn.send((State.FAILED, time.time() - start_time, exc))
finally:
conn.close()
def is_windows():
"""
@ -156,6 +340,16 @@ def is_gentoo():
return os.path.exists('/etc/gentoo-release')
def is_slackware():
"""
Checks if we are running on a Slackware system.
:returns: **bool** to indicate if we're on a Slackware system
"""
return os.path.exists('/etc/slackware-version')
def is_bsd():
"""
Checks if we are within the BSD family of operating systems. This currently
@ -164,7 +358,7 @@ def is_bsd():
:returns: **bool** to indicate if we're on a BSD OS
"""
return platform.system() in ('Darwin', 'FreeBSD', 'OpenBSD')
return platform.system() in ('Darwin', 'FreeBSD', 'OpenBSD', 'NetBSD')
def is_available(command, cached=True):
@ -188,27 +382,49 @@ def is_available(command, cached=True):
command = command.split(' ')[0]
if command in SHELL_COMMANDS:
# we can't actually look it up, so hope the shell really provides it...
return True
return True # we can't actually look it up, so hope the shell really provides it...
elif cached and command in CMD_AVAILABLE_CACHE:
return CMD_AVAILABLE_CACHE[command]
else:
cmd_exists = distutils.spawn.find_executable(command) is not None
CMD_AVAILABLE_CACHE[command] = cmd_exists
return cmd_exists
elif 'PATH' not in os.environ:
return False # lacking a path will cause find_executable() to internally fail
cmd_exists = False
for path in os.environ['PATH'].split(os.pathsep):
cmd_path = os.path.join(path, command)
if is_windows():
cmd_path += '.exe'
if os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK):
cmd_exists = True
break
CMD_AVAILABLE_CACHE[command] = cmd_exists
return cmd_exists
def is_running(command):
"""
Checks for if a process with a given name is running or not.
Checks for if a process with a given name or pid is running.
:param str command: process name to be checked
.. versionchanged:: 1.6.0
Added support for list and pid arguments.
:param str,list,int command: process name if a str, multiple process names if
a list, or pid if an int to be checked
:returns: **True** if the process is running, **False** if it's not among ps
results, and **None** if ps can't be queried
"""
if isinstance(command, int):
try:
os.kill(command, 0)
return True
except OSError:
return False
# Linux and the BSD families have different variants of ps. Guess based on
# the is_bsd() check which to try first, then fall back to the other.
#
@ -236,12 +452,63 @@ def is_running(command):
command_listing = call(secondary_resolver, None)
if command_listing:
command_listing = map(str_type.strip, command_listing)
return command in command_listing
command_listing = [c.strip() for c in command_listing]
if stem.util._is_str(command):
command = [command]
for cmd in command:
if cmd in command_listing:
return True
return False
return None
def size_of(obj, exclude = None):
"""
Provides the `approximate memory usage of an object
<https://code.activestate.com/recipes/577504/>`_. This can recurse tuples,
lists, deques, dicts, and sets. To teach this function to inspect additional
object types expand SIZE_RECURSES...
::
stem.util.system.SIZE_RECURSES[SomeClass] = SomeClass.get_elements
.. versionadded:: 1.6.0
:param object obj: object to provide the size of
:param set exclude: object ids to exclude from size estimation
:returns: **int** with the size of the object in bytes
:raises: **NotImplementedError** if using PyPy
"""
if stem.prereq.is_pypy():
raise NotImplementedError('PyPy does not implement sys.getsizeof()')
if exclude is None:
exclude = set()
elif id(obj) in exclude:
return 0
try:
size = sys.getsizeof(obj)
except TypeError:
size = sys.getsizeof(0) # estimate if object lacks a __sizeof__
exclude.add(id(obj))
if type(obj) in SIZE_RECURSES:
for entry in SIZE_RECURSES[type(obj)](obj):
size += size_of(entry, exclude)
return size
def name_by_pid(pid):
"""
Attempts to determine the name a given process is running under (not
@ -614,6 +881,38 @@ def pid_by_open_file(path):
return None # all queries failed
def pids_by_user(user):
"""
Provides processes owned by a given user.
.. versionadded:: 1.5.0
:param str user: user to look up processes for
:returns: **list** with the process ids, **None** if it can't be determined
"""
# example output:
# atagar@odin:~$ ps -o pid -u avahi
# PID
# 914
# 915
if is_available('ps'):
if is_bsd():
results = call(GET_PIDS_BY_USER_BSD % user, None)
else:
results = call(GET_PIDS_BY_USER_LINUX % user, None)
if results:
try:
return list(map(int, results[1:]))
except ValueError:
pass
return None
def cwd(pid):
"""
Provides the working directory of the given process.
@ -668,8 +967,8 @@ def cwd(pid):
if is_available('lsof'):
results = call(GET_CWD_LSOF % pid, [])
if len(results) == 2 and results[1].startswith('n/'):
lsof_result = results[1][1:].strip()
if len(results) >= 2 and results[-1].startswith('n/'):
lsof_result = results[-1][1:].strip()
# If we lack read permissions for the cwd then it returns...
# p2683
@ -765,7 +1064,7 @@ def tail(target, lines = None):
"""
if isinstance(target, str):
with open(target) as target_file:
with open(target, 'rb') as target_file:
for line in tail(target_file, lines):
yield line
@ -777,13 +1076,13 @@ def tail(target, lines = None):
target.seek(0, 2) # go to the end of the file
block_end_byte = target.tell()
block_number = -1
content = ''
content = b''
while (lines is None or lines > 0) and block_end_byte > 0:
if (block_end_byte - BLOCK_SIZE > 0):
# read the last block we haven't yet read
target.seek(block_number * BLOCK_SIZE, 2)
content, completed_lines = (target.read(BLOCK_SIZE) + content).split('\n', 1)
content, completed_lines = (target.read(BLOCK_SIZE) + content).split(b'\n', 1)
else:
# reached the start of the file, just read what's left
target.seek(0, 0)
@ -794,7 +1093,7 @@ def tail(target, lines = None):
if lines is not None:
lines -= 1
yield line
yield stem.util.str_tools._to_unicode(line)
block_end_byte -= BLOCK_SIZE
block_number -= 1
@ -951,63 +1250,105 @@ def files_with_suffix(base_path, suffix):
yield os.path.join(root, filename)
def call(command, default = UNDEFINED, ignore_exit_status = False):
def call(command, default = UNDEFINED, ignore_exit_status = False, timeout = None, cwd = None, env = None):
"""
call(command, default = UNDEFINED, ignore_exit_status = False)
Issues a command in a subprocess, blocking until completion and returning the
results. This is not actually ran in a shell so pipes and other shell syntax
are not permitted.
.. versionchanged:: 1.5.0
Providing additional information upon failure by raising a CallError. This
is a subclass of OSError, providing backward compatibility.
.. versionchanged:: 1.5.0
Added env argument.
.. versionchanged:: 1.6.0
Added timeout and cwd arguments.
:param str,list command: command to be issued
:param object default: response if the query fails
:param bool ignore_exit_status: reports failure if our command's exit status
was non-zero
:param float timeout: maximum seconds to wait, blocks indefinitely if
**None**
:param dict env: environment variables
:returns: **list** with the lines of output from the command
:raises: **OSError** if this fails and no default was provided
:raises:
* **CallError** if this fails and no default was provided
* **CallTimeoutError** if the timeout is reached without a default
"""
# TODO: in stem 2.x return a struct with stdout, stderr, and runtime instead
global SYSTEM_CALL_TIME
if isinstance(command, str):
command_list = command.split(' ')
else:
command_list = command
command_list = list(map(str, command))
exit_status, runtime, stdout, stderr = None, None, None, None
start_time = time.time()
try:
is_shell_command = command_list[0] in SHELL_COMMANDS
start_time = time.time()
process = subprocess.Popen(command_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command)
process = subprocess.Popen(command_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command, cwd = cwd, env = env)
if timeout:
while process.poll() is None:
if time.time() - start_time > timeout:
raise CallTimeoutError("Process didn't finish after %0.1f seconds" % timeout, ' '.join(command_list), None, timeout, '', '', timeout)
time.sleep(0.001)
stdout, stderr = process.communicate()
stdout, stderr = stdout.strip(), stderr.strip()
runtime = time.time() - start_time
log.debug('System call: %s (runtime: %0.2f)' % (command, runtime))
trace_prefix = 'Received from system (%s)' % command
if stdout and stderr:
log.trace(trace_prefix + ', stdout:\n%s\nstderr:\n%s' % (stdout, stderr))
elif stdout:
log.trace(trace_prefix + ', stdout:\n%s' % stdout)
elif stderr:
log.trace(trace_prefix + ', stderr:\n%s' % stderr)
if log.is_tracing():
trace_prefix = 'Received from system (%s)' % command
exit_code = process.poll()
if stdout and stderr:
log.trace(trace_prefix + ', stdout:\n%s\nstderr:\n%s' % (stdout, stderr))
elif stdout:
log.trace(trace_prefix + ', stdout:\n%s' % stdout)
elif stderr:
log.trace(trace_prefix + ', stderr:\n%s' % stderr)
if not ignore_exit_status and exit_code != 0:
raise OSError('%s returned exit status %i' % (command, exit_code))
exit_status = process.poll()
if not ignore_exit_status and exit_status != 0:
raise OSError('%s returned exit status %i' % (command, exit_status))
if stdout:
return stdout.decode('utf-8', 'replace').splitlines()
else:
return []
except CallTimeoutError:
log.debug('System call (timeout): %s (after %0.4fs)' % (command, timeout))
if default != UNDEFINED:
return default
else:
raise
except OSError as exc:
log.debug('System call (failed): %s (error: %s)' % (command, exc))
if default != UNDEFINED:
return default
else:
raise exc
raise CallError(str(exc), ' '.join(command_list), exit_status, runtime, stdout, stderr)
finally:
with SYSTEM_CALL_TIME_LOCK:
SYSTEM_CALL_TIME += time.time() - start_time
def get_process_name():
@ -1150,7 +1491,7 @@ def _set_proc_title(process_name):
libc = ctypes.CDLL(ctypes.util.find_library('c'))
name_buffer = ctypes.create_string_buffer(len(process_name) + 1)
name_buffer.value = process_name
name_buffer.value = process_name.encode()
try:
libc.setproctitle(ctypes.byref(name_buffer))

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -8,12 +8,13 @@ Utilities for working with the terminal.
::
encoding - provides the ANSI escape sequence for a terminal attribute
format - wrap text with ANSI for the given colors or attributes
.. data:: Color (enum)
.. data:: BgColor (enum)
Enumerations for foreground or background terminal color.
Foreground or background terminal colors.
=========== ===========
Color Description
@ -30,15 +31,19 @@ Utilities for working with the terminal.
.. data:: Attr (enum)
Enumerations of terminal text attributes.
Terminal text attributes.
.. versionchanged:: 1.5.0
Added the LINES attribute.
=================== ===========
Attr Description
=================== ===========
**BOLD** heavy typeface
**HILIGHT** inverted foreground and background
**HIGHLIGHT** inverted foreground and background
**UNDERLINE** underlined text
**READLINE_ESCAPE** wrap encodings in `RL_PROMPT_START_IGNORE and RL_PROMPT_END_IGNORE sequences <https://stackoverflow.com/questions/9468435/look-how-to-fix-column-calculation-in-python-readline-if-use-color-prompt>`_
**LINES** formats lines individually
=================== ===========
"""
@ -54,17 +59,52 @@ DISABLE_COLOR_SUPPORT = False
Color = stem.util.enum.Enum(*TERM_COLORS)
BgColor = stem.util.enum.Enum(*['BG_' + color for color in TERM_COLORS])
Attr = stem.util.enum.Enum('BOLD', 'UNDERLINE', 'HILIGHT', 'READLINE_ESCAPE')
Attr = stem.util.enum.Enum('BOLD', 'UNDERLINE', 'HIGHLIGHT', 'READLINE_ESCAPE', 'LINES')
# mappings of terminal attribute enums to their ANSI escape encoding
FG_ENCODING = dict([(list(Color)[i], str(30 + i)) for i in range(8)])
BG_ENCODING = dict([(list(BgColor)[i], str(40 + i)) for i in range(8)])
ATTR_ENCODING = {Attr.BOLD: '1', Attr.UNDERLINE: '4', Attr.HILIGHT: '7'}
ATTR_ENCODING = {Attr.BOLD: '1', Attr.UNDERLINE: '4', Attr.HIGHLIGHT: '7'}
CSI = '\x1B[%sm'
RESET = CSI % '0'
def encoding(*attrs):
"""
Provides the ANSI escape sequence for these terminal color or attributes.
.. versionadded:: 1.5.0
:param list attr: :data:`~stem.util.terminal.Color`,
:data:`~stem.util.terminal.BgColor`, or :data:`~stem.util.terminal.Attr` to
provide an ecoding for
:returns: **str** of the ANSI escape sequence, **None** no attributes are
recognized
"""
term_encodings = []
for attr in attrs:
# TODO: Account for an earlier misspelled attribute. This should be dropped
# in Stem. 2.0.x.
if attr == 'HILIGHT':
attr = 'HIGHLIGHT'
attr = stem.util.str_tools._to_camel_case(attr)
term_encoding = FG_ENCODING.get(attr, None)
term_encoding = BG_ENCODING.get(attr, term_encoding)
term_encoding = ATTR_ENCODING.get(attr, term_encoding)
if term_encoding:
term_encodings.append(term_encoding)
if term_encodings:
return CSI % ';'.join(term_encodings)
def format(msg, *attr):
"""
Simple terminal text formatting using `ANSI escape sequences
@ -75,38 +115,39 @@ def format(msg, *attr):
* `termcolor <https://pypi.python.org/pypi/termcolor>`_
* `colorama <https://pypi.python.org/pypi/colorama>`_
.. versionchanged:: 1.6.0
Normalized return value to be unicode to better support python 2/3
compatibility.
:param str msg: string to be formatted
:param str attr: text attributes, this can be :data:`~stem.util.term.Color`,
:data:`~stem.util.term.BgColor`, or :data:`~stem.util.term.Attr` enums
and are case insensitive (so strings like 'red' are fine)
:returns: **str** wrapped with ANSI escape encodings, starting with the given
:returns: **unicode** wrapped with ANSI escape encodings, starting with the given
attributes and ending with a reset
"""
msg = stem.util.str_tools._to_unicode(msg)
if DISABLE_COLOR_SUPPORT:
return msg
if Attr.LINES in attr:
attr = list(attr)
attr.remove(Attr.LINES)
lines = [format(line, *attr) for line in msg.split('\n')]
return '\n'.join(lines)
# if we have reset sequences in the message then apply our attributes
# after each of them
if RESET in msg:
return ''.join([format(comp, *attr) for comp in msg.split(RESET)])
encodings = []
for text_attr in attr:
text_attr, encoding = stem.util.str_tools._to_camel_case(text_attr), None
encoding = FG_ENCODING.get(text_attr, encoding)
encoding = BG_ENCODING.get(text_attr, encoding)
encoding = ATTR_ENCODING.get(text_attr, encoding)
if encoding:
encodings.append(encoding)
if encodings:
prefix, suffix = CSI % ';'.join(encodings), RESET
prefix, suffix = encoding(*attr), RESET
if prefix:
if Attr.READLINE_ESCAPE in attr:
prefix = '\001%s\002' % prefix
suffix = '\001%s\002' % suffix

View file

@ -1,46 +1,329 @@
# Copyright 2015, Damian Johnson and The Tor Project
# Copyright 2015-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Helper functions for testing.
Our **stylistic_issues**, **pyflakes_issues**, and **type_check_issues**
respect a 'exclude_paths' in our test config, excluding any absolute paths
matching those regexes. Issue strings can start or end with an asterisk
to match just against the prefix or suffix. For instance...
::
exclude_paths .*/stem/test/data/.*
.. versionadded:: 1.2.0
::
TimedTestRunner - test runner that tracks test runtimes
test_runtimes - provides runtime of tests excuted through TimedTestRunners
clean_orphaned_pyc - delete *.pyc files without corresponding *.py
is_pyflakes_available - checks if pyflakes is available
is_pep8_available - checks if pep8 is available
is_pycodestyle_available - checks if pycodestyle is available
stylistic_issues - checks for PEP8 and other stylistic issues
pyflakes_issues - static checks for problems via pyflakes
stylistic_issues - checks for PEP8 and other stylistic issues
"""
import collections
import linecache
import multiprocessing
import os
import re
import threading
import time
import traceback
import unittest
import stem.prereq
import stem.util.conf
import stem.util.enum
import stem.util.system
CONFIG = stem.util.conf.config_dict('test', {
'pep8.ignore': [],
'pep8.ignore': [], # TODO: drop with stem 2.x, legacy alias for pycodestyle.ignore
'pycodestyle.ignore': [],
'pyflakes.ignore': [],
'exclude_paths': [],
})
Issue = collections.namedtuple('Issue', [
'line_number',
'message',
'line',
])
TEST_RUNTIMES = {}
ASYNC_TESTS = {}
AsyncStatus = stem.util.enum.UppercaseEnum('PENDING', 'RUNNING', 'FINISHED')
AsyncResult = collections.namedtuple('AsyncResult', 'type msg')
# TODO: Providing a copy of SkipTest that works with python 2.6. This will be
# dropped when we remove python 2.6 support.
if stem.prereq._is_python_26():
class SkipTest(Exception):
'Notes that the test was skipped.'
else:
SkipTest = unittest.case.SkipTest
def assert_equal(expected, actual, msg = None):
"""
Function form of a TestCase's assertEqual.
.. versionadded:: 1.6.0
:param object expected: expected value
:param object actual: actual value
:param str msg: message if assertion fails
:raises: **AssertionError** if values aren't equal
"""
if expected != actual:
raise AssertionError("Expected '%s' but was '%s'" % (expected, actual) if msg is None else msg)
def assert_in(expected, actual, msg = None):
"""
Asserts that a given value is within this content.
.. versionadded:: 1.6.0
:param object expected: expected value
:param object actual: actual value
:param str msg: message if assertion fails
:raises: **AssertionError** if the expected value isn't in the actual
"""
if expected not in actual:
raise AssertionError("Expected '%s' to be within '%s'" % (expected, actual) if msg is None else msg)
def skip(msg):
"""
Function form of a TestCase's skipTest.
.. versionadded:: 1.6.0
:param str msg: reason test is being skipped
:raises: **unittest.case.SkipTest** for this reason
"""
raise SkipTest(msg)
def asynchronous(func):
test = stem.util.test_tools.AsyncTest(func)
ASYNC_TESTS[test.name] = test
return test.method
class AsyncTest(object):
"""
Test that's run asychronously. These are functions (no self reference)
performed like the following...
::
class MyTest(unittest.TestCase):
@staticmethod
def run_tests():
MyTest.test_addition = stem.util.test_tools.AsyncTest(MyTest.test_addition).method
@staticmethod
def test_addition():
if 1 + 1 != 2:
raise AssertionError('tisk, tisk')
MyTest.run()
.. versionadded:: 1.6.0
"""
def __init__(self, runner, args = None, threaded = False):
self.name = '%s.%s' % (runner.__module__, runner.__name__)
self._runner = runner
self._runner_args = args
self._threaded = threaded
self.method = lambda test: self.result(test) # method that can be mixed into TestCases
self._process = None
self._process_pipe = None
self._process_lock = threading.RLock()
self._result = None
self._status = AsyncStatus.PENDING
def run(self, *runner_args, **kwargs):
if stem.prereq._is_python_26():
return # not supported under python 2.6
def _wrapper(conn, runner, args):
os.nice(12)
try:
runner(*args) if args else runner()
conn.send(AsyncResult('success', None))
except AssertionError as exc:
conn.send(AsyncResult('failure', str(exc)))
except SkipTest as exc:
conn.send(AsyncResult('skipped', str(exc)))
except:
conn.send(AsyncResult('error', traceback.format_exc()))
finally:
conn.close()
with self._process_lock:
if self._status == AsyncStatus.PENDING:
if runner_args:
self._runner_args = runner_args
if 'threaded' in kwargs:
self._threaded = kwargs['threaded']
self._process_pipe, child_pipe = multiprocessing.Pipe()
if self._threaded:
self._process = threading.Thread(
target = _wrapper,
args = (child_pipe, self._runner, self._runner_args),
name = 'Background test of %s' % self.name,
)
self._process.setDaemon(True)
else:
self._process = multiprocessing.Process(target = _wrapper, args = (child_pipe, self._runner, self._runner_args))
self._process.start()
self._status = AsyncStatus.RUNNING
def pid(self):
with self._process_lock:
return self._process.pid if (self._process and not self._threaded) else None
def join(self):
self.result(None)
def result(self, test):
if stem.prereq._is_python_26():
return # not supported under python 2.6
with self._process_lock:
if self._status == AsyncStatus.PENDING:
self.run()
if self._status == AsyncStatus.RUNNING:
self._result = self._process_pipe.recv()
self._process.join()
self._status = AsyncStatus.FINISHED
if test and self._result.type == 'failure':
test.fail(self._result.msg)
elif test and self._result.type == 'error':
test.fail(self._result.msg)
elif test and self._result.type == 'skipped':
test.skipTest(self._result.msg)
class Issue(collections.namedtuple('Issue', ['line_number', 'message', 'line'])):
"""
Issue encountered by pyflakes or pycodestyle.
:var int line_number: line number the issue occured on
:var str message: description of the issue
:var str line: content of the line the issue is about
"""
class TimedTestRunner(unittest.TextTestRunner):
"""
Test runner that tracks the runtime of individual tests. When tests are run
with this their runtimes are made available through
:func:`stem.util.test_tools.test_runtimes`.
.. versionadded:: 1.6.0
"""
def run(self, test):
for t in test._tests:
original_type = type(t)
class _TestWrapper(original_type):
def run(self, result = None):
start_time = time.time()
result = super(type(self), self).run(result)
TEST_RUNTIMES[self.id()] = time.time() - start_time
return result
# TODO: remove and drop unnecessary 'returns' when dropping python 2.6
# support
def skipTest(self, message):
if not stem.prereq._is_python_26():
return super(original_type, self).skipTest(message)
# TODO: remove when dropping python 2.6 support
def assertItemsEqual(self, expected, actual):
if stem.prereq._is_python_26():
self.assertEqual(set(expected), set(actual))
else:
return super(original_type, self).assertItemsEqual(expected, actual)
def assertRaisesWith(self, exc_type, exc_msg, func, *args, **kwargs):
"""
Asserts the given invokation raises the expected excepiton. This is
similar to unittest's assertRaises and assertRaisesRegexp, but checks
for an exact match.
This method is **not** being vended to external users and may be
changed without notice. If you want this method to be part of our
vended API then please let us know.
"""
return self.assertRaisesRegexp(exc_type, '^%s$' % re.escape(exc_msg), func, *args, **kwargs)
def assertRaisesRegexp(self, exc_type, exc_msg, func, *args, **kwargs):
if stem.prereq._is_python_26():
try:
func(*args, **kwargs)
self.fail('Expected a %s to be raised but nothing was' % exc_type)
except exc_type as exc:
self.assertTrue(re.search(exc_msg, str(exc), re.MULTILINE))
else:
return super(original_type, self).assertRaisesRegexp(exc_type, exc_msg, func, *args, **kwargs)
def id(self):
return '%s.%s.%s' % (original_type.__module__, original_type.__name__, self._testMethodName)
def __str__(self):
return '%s (%s.%s)' % (self._testMethodName, original_type.__module__, original_type.__name__)
t.__class__ = _TestWrapper
return super(TimedTestRunner, self).run(test)
def test_runtimes():
"""
Provides the runtimes of tests executed through TimedTestRunners.
:returns: **dict** of fully qualified test names to floats for the runtime in
seconds
.. versionadded:: 1.6.0
"""
return dict(TEST_RUNTIMES)
def clean_orphaned_pyc(paths):
"""
Deletes any file with a *.pyc extention without a corresponding *.py. This
Deletes any file with a \*.pyc extention without a corresponding \*.py. This
helps to address a common gotcha when deleting python files...
* You delete module 'foo.py' and run the tests to ensure that you haven't
@ -90,50 +373,46 @@ def is_pyflakes_available():
:returns: **True** if we can use pyflakes and **False** otherwise
"""
try:
import pyflakes.api
import pyflakes.reporter
return True
except ImportError:
return False
return _module_exists('pyflakes.api') and _module_exists('pyflakes.reporter')
def is_pep8_available():
def is_pycodestyle_available():
"""
Checks if pep8 is availalbe.
Checks if pycodestyle is availalbe.
:returns: **True** if we can use pep8 and **False** otherwise
:returns: **True** if we can use pycodestyle and **False** otherwise
"""
try:
import pep8
if not hasattr(pep8, 'BaseReport'):
raise ImportError()
return True
except ImportError:
if _module_exists('pycodestyle'):
import pycodestyle
elif _module_exists('pep8'):
import pep8 as pycodestyle
else:
return False
return hasattr(pycodestyle, 'BaseReport')
def stylistic_issues(paths, check_two_space_indents = False, check_newlines = False, check_trailing_whitespace = False, check_exception_keyword = False, prefer_single_quotes = False):
def stylistic_issues(paths, check_newlines = False, check_exception_keyword = False, prefer_single_quotes = False):
"""
Checks for stylistic issues that are an issue according to the parts of PEP8
we conform to. You can suppress PEP8 issues by making a 'test' configuration
that sets 'pep8.ignore'.
we conform to. You can suppress pycodestyle issues by making a 'test'
configuration that sets 'pycodestyle.ignore'.
For example, with a 'test/settings.cfg' of...
::
# PEP8 compliance issues that we're ignoreing...
# pycodestyle compliance issues that we're ignoreing...
#
# * E111 and E121 four space indentations
# * E501 line is over 79 characters
pep8.ignore E111
pep8.ignore E121
pep8.ignore E501
pycodestyle.ignore E111
pycodestyle.ignore E121
pycodestyle.ignore E501
pycodestyle.ignore run_tests.py => E402: import stem.util.enum
... you can then run tests with...
@ -146,9 +425,6 @@ def stylistic_issues(paths, check_two_space_indents = False, check_newlines = Fa
issues = stylistic_issues('my_project')
If a 'exclude_paths' was set in our test config then we exclude any absolute
paths matching those regexes.
.. versionchanged:: 1.3.0
Renamed from get_stylistic_issues() to stylistic_issues(). The old name
still works as an alias, but will be dropped in Stem version 2.0.0.
@ -160,89 +436,106 @@ def stylistic_issues(paths, check_two_space_indents = False, check_newlines = Fa
.. versionchanged:: 1.4.0
Added the prefer_single_quotes option.
.. versionchanged:: 1.6.0
Changed 'pycodestyle.ignore' code snippets to only need to match against
the prefix.
:param list paths: paths to search for stylistic issues
:param bool check_two_space_indents: check for two space indentations and
that no tabs snuck in
:param bool check_newlines: check that we have standard newlines (\\n), not
windows (\\r\\n) nor classic mac (\\r)
:param bool check_trailing_whitespace: check that our lines don't end with
trailing whitespace
:param bool check_exception_keyword: checks that we're using 'as' for
exceptions rather than a comma
:param bool prefer_single_quotes: standardize on using single rather than
double quotes for strings, when reasonable
:returns: **dict** of the form ``path => [(line_number, message)...]``
:returns: dict of paths list of :class:`stem.util.test_tools.Issue` instances
"""
issues = {}
if is_pep8_available():
import pep8
ignore_rules = []
ignore_for_file = []
class StyleReport(pep8.BaseReport):
def __init__(self, options):
super(StyleReport, self).__init__(options)
for rule in CONFIG['pycodestyle.ignore'] + CONFIG['pep8.ignore']:
if '=>' in rule:
path, rule_entry = rule.split('=>', 1)
if ':' in rule_entry:
rule, code = rule_entry.split(':', 1)
ignore_for_file.append((path.strip(), rule.strip(), code.strip()))
else:
ignore_rules.append(rule)
def is_ignored(path, rule, code):
for ignored_path, ignored_rule, ignored_code in ignore_for_file:
if path.endswith(ignored_path) and ignored_rule == rule and code.strip().startswith(ignored_code):
return True
return False
if is_pycodestyle_available():
if _module_exists('pep8'):
import pep8 as pycodestyle
else:
import pycodestyle
class StyleReport(pycodestyle.BaseReport):
def init_file(self, filename, lines, expected, line_offset):
super(StyleReport, self).init_file(filename, lines, expected, line_offset)
if not check_newlines and not check_exception_keyword and not prefer_single_quotes:
return
is_block_comment = False
for index, line in enumerate(lines):
content = line.split('#', 1)[0].strip()
if check_newlines and '\r' in line:
issues.setdefault(filename, []).append(Issue(index + 1, 'contains a windows newline', line))
if not content:
continue # blank line
if '"""' in content:
is_block_comment = not is_block_comment
if check_exception_keyword and content.startswith('except') and content.endswith(', exc:'):
# Python 2.6 - 2.7 supports two forms for exceptions...
#
# except ValueError, exc:
# except ValueError as exc:
#
# The former is the old method and no longer supported in python 3
# going forward.
# TODO: This check only works if the exception variable is called
# 'exc'. We should generalize this via a regex so other names work
# too.
issues.setdefault(filename, []).append(Issue(index + 1, "except clause should use 'as', not comma", line))
if prefer_single_quotes and not is_block_comment:
if '"' in content and "'" not in content and '"""' not in content and not content.endswith('\\'):
# Checking if the line already has any single quotes since that
# usually means double quotes are preferable for the content (for
# instance "I'm hungry"). Also checking for '\' at the end since
# that can indicate a multi-line string.
issues.setdefault(filename, []).append(Issue(index + 1, 'use single rather than double quotes', line))
def error(self, line_number, offset, text, check):
code = super(StyleReport, self).error(line_number, offset, text, check)
if code:
issues.setdefault(self.filename, []).append(Issue(line_number, '%s %s' % (code, text), text))
line = linecache.getline(self.filename, line_number)
style_checker = pep8.StyleGuide(ignore = CONFIG['pep8.ignore'], reporter = StyleReport)
if not is_ignored(self.filename, code, line):
issues.setdefault(self.filename, []).append(Issue(line_number, text, line))
style_checker = pycodestyle.StyleGuide(ignore = ignore_rules, reporter = StyleReport)
style_checker.check_files(list(_python_files(paths)))
if check_two_space_indents or check_newlines or check_trailing_whitespace or check_exception_keyword:
for path in _python_files(paths):
with open(path) as f:
file_contents = f.read()
lines = file_contents.split('\n')
is_block_comment = False
for index, line in enumerate(lines):
whitespace, content = re.match('^(\s*)(.*)$', line).groups()
# TODO: This does not check that block indentations are two spaces
# because differentiating source from string blocks ("""foo""") is more
# of a pita than I want to deal with right now.
if '"""' in content:
is_block_comment = not is_block_comment
if check_two_space_indents and '\t' in whitespace:
issues.setdefault(path, []).append(Issue(index + 1, 'indentation has a tab', line))
elif check_newlines and '\r' in content:
issues.setdefault(path, []).append(Issue(index + 1, 'contains a windows newline', line))
elif check_trailing_whitespace and content != content.rstrip():
issues.setdefault(path, []).append(Issue(index + 1, 'line has trailing whitespace', line))
elif check_exception_keyword and content.lstrip().startswith('except') and content.endswith(', exc:'):
# Python 2.6 - 2.7 supports two forms for exceptions...
#
# except ValueError, exc:
# except ValueError as exc:
#
# The former is the old method and no longer supported in python 3
# going forward.
# TODO: This check only works if the exception variable is called
# 'exc'. We should generalize this via a regex so other names work
# too.
issues.setdefault(path, []).append(Issue(index + 1, "except clause should use 'as', not comma", line))
if prefer_single_quotes and line and not is_block_comment:
content = line.strip().split('#', 1)[0]
if '"' in content and "'" not in content and '"""' not in content and not content.endswith('\\'):
# Checking if the line already has any single quotes since that
# usually means double quotes are preferable for the content (for
# instance "I'm hungry"). Also checking for '\' at the end since
# that can indicate a multi-line string.
issues.setdefault(path, []).append(Issue(index + 1, "use single rather than double quotes", line))
return issues
@ -254,10 +547,7 @@ def pyflakes_issues(paths):
::
pyflakes.ignore stem/util/test_tools.py => 'pyflakes' imported but unused
pyflakes.ignore stem/util/test_tools.py => 'pep8' imported but unused
If a 'exclude_paths' was set in our test config then we exclude any absolute
paths matching those regexes.
pyflakes.ignore stem/util/test_tools.py => 'pycodestyle' imported but unused
.. versionchanged:: 1.3.0
Renamed from get_pyflakes_issues() to pyflakes_issues(). The old name
@ -267,9 +557,12 @@ def pyflakes_issues(paths):
Changing tuples in return value to be namedtuple instances, and adding the
line that had the issue.
.. versionchanged:: 1.5.0
Support matching against prefix or suffix issue strings.
:param list paths: paths to search for problems
:returns: dict of the form ``path => [(line_number, message)...]``
:returns: dict of paths list of :class:`stem.util.test_tools.Issue` instances
"""
issues = {}
@ -300,15 +593,24 @@ def pyflakes_issues(paths):
# path ends with any of them.
for ignored_path, ignored_issues in self._ignored_issues.items():
if path.endswith(ignored_path) and issue in ignored_issues:
return True
if path.endswith(ignored_path):
if issue in ignored_issues:
return True
for prefix in [i[:1] for i in ignored_issues if i.endswith('*')]:
if issue.startswith(prefix):
return True
for suffix in [i[1:] for i in ignored_issues if i.startswith('*')]:
if issue.endswith(suffix):
return True
return False
def _register_issue(self, path, line_number, issue, line):
if not self._is_ignored(path, issue):
if path and line_number and not line:
line = linecache.getline(path, line_number)
line = linecache.getline(path, line_number).strip()
issues.setdefault(path, []).append(Issue(line_number, issue, line))
@ -320,6 +622,22 @@ def pyflakes_issues(paths):
return issues
def _module_exists(module_name):
"""
Checks if a module exists.
:param str module_name: module to check existance of
:returns: **True** if module exists and **False** otherwise
"""
try:
__import__(module_name)
return True
except ImportError:
return False
def _python_files(paths):
for path in paths:
for file_path in stem.util.system.files_with_suffix(path, '.py'):
@ -333,9 +651,12 @@ def _python_files(paths):
if not skip:
yield file_path
# TODO: drop with stem 2.x
# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old
# names for backward compatability.
# names for backward compatability, and account for pep8 being renamed to
# pycodestyle.
get_stylistic_issues = stylistic_issues
get_pyflakes_issues = pyflakes_issues
is_pep8_available = is_pycodestyle_available

View file

@ -1,4 +1,4 @@
# Copyright 2012-2015, Damian Johnson and The Tor Project
# Copyright 2012-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -21,6 +21,8 @@ Miscellaneous utility functions for working with tor.
import re
import stem.util.str_tools
# The control-spec defines the following as...
#
# Fingerprint = "$" 40*HEXDIG
@ -54,6 +56,9 @@ def is_valid_fingerprint(entry, check_prefix = False):
:returns: **True** if the string could be a relay fingerprint, **False** otherwise
"""
if isinstance(entry, bytes):
entry = stem.util.str_tools._to_unicode(entry)
try:
if check_prefix:
if not entry or entry[0] != '$':
@ -75,6 +80,9 @@ def is_valid_nickname(entry):
:returns: **True** if the string could be a nickname, **False** otherwise
"""
if isinstance(entry, bytes):
entry = stem.util.str_tools._to_unicode(entry)
try:
return bool(NICKNAME_PATTERN.match(entry))
except TypeError:
@ -88,6 +96,9 @@ def is_valid_circuit_id(entry):
:returns: **True** if the string could be a circuit id, **False** otherwise
"""
if isinstance(entry, bytes):
entry = stem.util.str_tools._to_unicode(entry)
try:
return bool(CIRC_ID_PATTERN.match(entry))
except TypeError:
@ -124,6 +135,9 @@ def is_valid_hidden_service_address(entry):
:returns: **True** if the string could be a hidden service address, **False** otherwise
"""
if isinstance(entry, bytes):
entry = stem.util.str_tools._to_unicode(entry)
try:
return bool(HS_ADDRESS_PATTERN.match(entry))
except TypeError:

View file

@ -1,4 +1,4 @@
# Copyright 2011-2015, Damian Johnson and The Tor Project
# Copyright 2011-2018, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
@ -26,10 +26,16 @@ easily parsed and compared, for instance...
Enumerations for the version requirements of features.
.. deprecated:: 1.6.0
Requirement entries belonging to tor versions which have been obsolete for
at least six months will be removed when we break backward compatibility
in the 2.x stem release.
===================================== ===========
Requirement Description
===================================== ===========
**AUTH_SAFECOOKIE** SAFECOOKIE authentication method
**DESCRIPTOR_COMPRESSION** `Expanded compression support for ZSTD and LZMA <https://gitweb.torproject.org/torspec.git/commit/?id=1cb56afdc1e55e303e3e6b69e90d983ee217d93f>`_
**DROPGUARDS** DROPGUARDS requests
**EVENT_AUTHDIR_NEWDESCS** AUTHDIR_NEWDESC events
**EVENT_BUILDTIMEOUT_SET** BUILDTIMEOUT_SET events
@ -39,6 +45,7 @@ easily parsed and compared, for instance...
**EVENT_DESCCHANGED** DESCCHANGED events
**EVENT_GUARD** GUARD events
**EVENT_HS_DESC_CONTENT** HS_DESC_CONTENT events
**EVENT_NETWORK_LIVENESS** NETWORK_LIVENESS events
**EVENT_NEWCONSENSUS** NEWCONSENSUS events
**EVENT_NS** NS events
**EVENT_SIGNAL** SIGNAL events
@ -54,11 +61,18 @@ easily parsed and compared, for instance...
**FEATURE_EXTENDED_EVENTS** 'EXTENDED_EVENTS' optional feature
**FEATURE_VERBOSE_NAMES** 'VERBOSE_NAMES' optional feature
**GETINFO_CONFIG_TEXT** 'GETINFO config-text' query
**GETINFO_GEOIP_AVAILABLE** 'GETINFO ip-to-country/ipv4-available' query and its ipv6 counterpart
**GETINFO_MICRODESCRIPTORS** 'GETINFO md/all' query
**HIDDEN_SERVICE_V3** Support for v3 hidden services
**HSFETCH** HSFETCH requests
**HSPOST** HSPOST requests
**ADD_ONION** ADD_ONION and DEL_ONION requests
**ADD_ONION_BASIC_AUTH** ADD_ONION supports basic authentication
**ADD_ONION_NON_ANONYMOUS** ADD_ONION supports non-anonymous mode
**ADD_ONION_MAX_STREAMS** ADD_ONION support for MaxStreamsCloseCircuit
**LOADCONF** LOADCONF requests
**MICRODESCRIPTOR_IS_DEFAULT** Tor gets microdescriptors by default rather than server descriptors
**SAVECONF_FORCE** Added the 'FORCE' flag to SAVECONF
**TAKEOWNERSHIP** TAKEOWNERSHIP requests
**TORRC_CONTROL_SOCKET** 'ControlSocket <path>' config option
**TORRC_PORT_FORWARDING** 'PortForwarding' config option
@ -70,18 +84,21 @@ easily parsed and compared, for instance...
import os
import re
import stem.prereq
import stem.util
import stem.util.enum
import stem.util.system
try:
# added in python 3.2
if stem.prereq._is_lru_cache_available():
from functools import lru_cache
except ImportError:
else:
from stem.util.lru_cache import lru_cache
# cache for the get_system_tor_version function
VERSION_CACHE = {}
VERSION_PATTERN = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)?(-\S*)?(( \(\S*\))*)$')
def get_system_tor_version(tor_cmd = 'tor'):
"""
@ -107,7 +124,7 @@ def get_system_tor_version(tor_cmd = 'tor'):
if os.path.isabs(tor_cmd):
exc = "Unable to check tor's version. '%s' doesn't exist." % tor_cmd
else:
exc = "Unable to run '%s'. Mabye tor isn't in your PATH?" % version_cmd
exc = "Unable to run '%s'. Maybe tor isn't in your PATH?" % version_cmd
raise IOError(exc)
@ -144,13 +161,17 @@ class Version(object):
<https://gitweb.torproject.org/torspec.git/tree/version-spec.txt>`_,
such as "0.1.4" or "0.2.2.23-alpha (git-7dcd105be34a4f44)".
.. versionchanged:: 1.6.0
Added all_extra parameter.
:var int major: major version
:var int minor: minor version
:var int micro: micro version
:var int patch: patch level (**None** if undefined)
:var str status: status tag such as 'alpha' or 'beta-dev' (**None** if undefined)
:var str extra: extra information without its parentheses such as
:var str extra: first extra information without its parentheses such as
'git-8be6058d8f31e578' (**None** if undefined)
:var list all_extra: all extra information entries, without their parentheses
:var str git_commit: git commit id (**None** if it wasn't provided)
:param str version_str: version to be parsed
@ -160,11 +181,10 @@ class Version(object):
def __init__(self, version_str):
self.version_str = version_str
version_parts = re.match(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)?(-\S*)?( \(\S*\))?$', version_str)
self._hash = None
version_parts = VERSION_PATTERN.match(version_str)
if version_parts:
major, minor, micro, patch, status, extra = version_parts.groups()
major, minor, micro, patch, status, extra_str, _ = version_parts.groups()
# The patch and status matches are optional (may be None) and have an extra
# proceeding period or dash if they exist. Stripping those off.
@ -175,20 +195,19 @@ class Version(object):
if status:
status = status[1:]
if extra:
extra = extra[2:-1]
self.major = int(major)
self.minor = int(minor)
self.micro = int(micro)
self.patch = patch
self.status = status
self.extra = extra
self.all_extra = [entry[1:-1] for entry in extra_str.strip().split()] if extra_str else []
self.extra = self.all_extra[0] if self.all_extra else None
self.git_commit = None
if extra and re.match('^git-[0-9a-f]{16}$', extra):
self.git_commit = extra[4:]
else:
self.git_commit = None
for extra in self.all_extra:
if extra and re.match('^git-[0-9a-f]{16}$', extra):
self.git_commit = extra[4:]
break
else:
raise ValueError("'%s' isn't a properly formatted tor version" % version_str)
@ -230,9 +249,15 @@ class Version(object):
return method(my_status, other_status)
def __hash__(self):
return stem.util._hash_attr(self, 'major', 'minor', 'micro', 'patch', 'status', cache = True)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
"""
Checks if this version meets the requirements for a given feature. We can
@ -259,22 +284,6 @@ class Version(object):
return self._compare(other, lambda s, o: s >= o)
def __hash__(self):
if self._hash is None:
my_hash = 0
for attr in ('major', 'minor', 'micro', 'patch', 'status'):
my_hash *= 1024
attr_value = getattr(self, attr)
if attr_value is not None:
my_hash += hash(attr_value)
self._hash = my_hash
return self._hash
class _VersionRequirements(object):
"""
@ -324,21 +333,24 @@ class _VersionRequirements(object):
:param bool to_inclusive: if comparison is inclusive with the ending version
"""
if from_inclusive and to_inclusive:
new_rule = lambda v: from_version <= v <= to_version
elif from_inclusive:
new_rule = lambda v: from_version <= v < to_version
else:
new_rule = lambda v: from_version < v < to_version
def new_rule(v):
if from_inclusive and to_inclusive:
return from_version <= v <= to_version
elif from_inclusive:
return from_version <= v < to_version
else:
return from_version < v < to_version
self.rules.append(new_rule)
safecookie_req = _VersionRequirements()
safecookie_req.in_range(Version('0.2.2.36'), Version('0.2.3.0'))
safecookie_req.greater_than(Version('0.2.3.13'))
Requirement = stem.util.enum.Enum(
('AUTH_SAFECOOKIE', safecookie_req),
('DESCRIPTOR_COMPRESSION', Version('0.3.1.1-alpha')),
('DROPGUARDS', Version('0.2.5.1-alpha')),
('EVENT_AUTHDIR_NEWDESCS', Version('0.1.1.10-alpha')),
('EVENT_BUILDTIMEOUT_SET', Version('0.2.2.7-alpha')),
@ -349,6 +361,7 @@ Requirement = stem.util.enum.Enum(
('EVENT_GUARD', Version('0.1.2.5-alpha')),
('EVENT_HS_DESC_CONTENT', Version('0.2.7.1-alpha')),
('EVENT_NS', Version('0.1.2.3-alpha')),
('EVENT_NETWORK_LIVENESS', Version('0.2.7.2-alpha')),
('EVENT_NEWCONSENSUS', Version('0.2.1.13-alpha')),
('EVENT_SIGNAL', Version('0.2.3.1-alpha')),
('EVENT_STATUS', Version('0.1.2.3-alpha')),
@ -363,11 +376,18 @@ Requirement = stem.util.enum.Enum(
('FEATURE_EXTENDED_EVENTS', Version('0.2.2.1-alpha')),
('FEATURE_VERBOSE_NAMES', Version('0.2.2.1-alpha')),
('GETINFO_CONFIG_TEXT', Version('0.2.2.7-alpha')),
('GETINFO_GEOIP_AVAILABLE', Version('0.3.2.1-alpha')),
('GETINFO_MICRODESCRIPTORS', Version('0.3.5.1-alpha')),
('HIDDEN_SERVICE_V3', Version('0.3.3.1-alpha')),
('HSFETCH', Version('0.2.7.1-alpha')),
('HSPOST', Version('0.2.7.1-alpha')),
('ADD_ONION', Version('0.2.7.1-alpha')),
('ADD_ONION_BASIC_AUTH', Version('0.2.9.1-alpha')),
('ADD_ONION_NON_ANONYMOUS', Version('0.2.9.3-alpha')),
('ADD_ONION_MAX_STREAMS', Version('0.2.7.2-alpha')),
('LOADCONF', Version('0.2.1.1')),
('MICRODESCRIPTOR_IS_DEFAULT', Version('0.2.3.3')),
('SAVECONF_FORCE', Version('0.3.1.1-alpha')),
('TAKEOWNERSHIP', Version('0.2.2.28-beta')),
('TORRC_CONTROL_SOCKET', Version('0.2.0.30')),
('TORRC_PORT_FORWARDING', Version('0.2.3.1-alpha')),