From 1f23120cc381b2a0a8ca5d1b8bff0c9ffbc23be6 Mon Sep 17 00:00:00 2001 From: j Date: Mon, 23 Nov 2015 22:13:53 +0100 Subject: [PATCH] add tor deps --- .../ed25519-1.4.egg-info/installed-files.txt | 2 +- .../ed25519/_ed25519.cpython-34m.so | Bin 255440 -> 255560 bytes .../_ed25519.cpython-35m-x86_64-linux-gnu.so | Bin 264336 -> 255808 bytes .../setuptools-18.5.dist-info/RECORD | 86 +- .../sqlalchemy/cprocessors.cpython-34m.so | Bin 42720 -> 42824 bytes .../sqlalchemy/cresultproxy.cpython-34m.so | Bin 53872 -> 53984 bytes .../sqlalchemy/cutils.cpython-34m.so | Bin 25160 -> 25280 bytes Linux_x86_64/update.sh | 1 + .../PyPDF2-1.23.egg-info/installed-files.txt | 2 +- .../PySocks-1.5.6.egg-info/PKG-INFO | 11 + .../PySocks-1.5.6.egg-info/SOURCES.txt | 6 + .../dependency_links.txt | 0 .../installed-files.txt | 9 + .../PySocks-1.5.6.egg-info/top_level.txt | 2 + .../DESCRIPTION.rst | 48 + .../METADATA | 22 +- .../certifi-2015.11.20.dist-info/RECORD | 15 + .../WHEEL | 2 +- .../metadata.json | 1 + .../top_level.txt | 0 .../DESCRIPTION.rst | 30 - .../certifi-2015.9.6.2.dist-info/RECORD | 15 - .../metadata.json | 1 - .../site-packages/certifi/__init__.py | 2 +- .../site-packages/certifi/cacert.pem | 237 +- .../python3.4/site-packages/certifi/weak.pem | 237 +- .../installed-files.txt | 4 +- .../PKG-INFO | 6 +- .../SOURCES.txt | 1 + .../dependency_links.txt | 1 + .../installed-files.txt | 4 +- .../requires.txt | 0 .../top_level.txt | 0 .../python3.4/site-packages/ox/__version.py | 1 + .../lib/python3.4/site-packages/ox/jsonc.py | 22 +- .../requests-2.3.0.dist-info/RECORD | 134 +- .../setuptools-18.5.dist-info/RECORD | 96 +- Shared/lib/python3.4/site-packages/socks.py | 712 ++++ .../python3.4/site-packages/sockshandler.py | 79 + .../stem-1.4.0.egg-info/PKG-INFO | 13 + .../stem-1.4.0.egg-info/SOURCES.txt | 52 + .../stem-1.4.0.egg-info/dependency_links.txt | 1 + .../stem-1.4.0.egg-info/installed-files.txt | 98 + .../stem-1.4.0.egg-info/top_level.txt | 1 + .../python3.4/site-packages/stem/__init__.py | 833 ++++ .../site-packages/stem/connection.py | 1284 ++++++ .../python3.4/site-packages/stem/control.py | 3631 +++++++++++++++++ .../site-packages/stem/descriptor/__init__.py | 841 ++++ .../site-packages/stem/descriptor/export.py | 110 + .../stem/descriptor/extrainfo_descriptor.py | 939 +++++ .../descriptor/hidden_service_descriptor.py | 422 ++ .../stem/descriptor/microdescriptor.py | 314 ++ .../stem/descriptor/networkstatus.py | 1444 +++++++ .../site-packages/stem/descriptor/reader.py | 574 +++ .../site-packages/stem/descriptor/remote.py | 777 ++++ .../stem/descriptor/router_status_entry.py | 625 +++ .../stem/descriptor/server_descriptor.py | 822 ++++ .../site-packages/stem/descriptor/tordnsel.py | 117 + .../site-packages/stem/exit_policy.py | 1094 +++++ .../stem/interpreter/__init__.py | 141 + .../stem/interpreter/arguments.py | 94 + .../stem/interpreter/autocomplete.py | 115 + .../stem/interpreter/commands.py | 354 ++ .../site-packages/stem/interpreter/help.py | 145 + .../stem/interpreter/settings.cfg | 326 ++ .../python3.4/site-packages/stem/prereq.py | 132 + .../python3.4/site-packages/stem/process.py | 272 ++ .../site-packages/stem/response/__init__.py | 588 +++ .../site-packages/stem/response/add_onion.py | 43 + .../stem/response/authchallenge.py | 56 + .../site-packages/stem/response/events.py | 1331 ++++++ .../site-packages/stem/response/getconf.py | 55 + .../site-packages/stem/response/getinfo.py | 78 + .../site-packages/stem/response/mapaddress.py | 42 + .../stem/response/protocolinfo.py | 122 + .../python3.4/site-packages/stem/socket.py | 663 +++ .../site-packages/stem/util/__init__.py | 20 + .../python3.4/site-packages/stem/util/conf.py | 745 ++++ .../site-packages/stem/util/connection.py | 651 +++ .../python3.4/site-packages/stem/util/enum.py | 172 + .../python3.4/site-packages/stem/util/log.py | 253 ++ .../site-packages/stem/util/lru_cache.py | 182 + .../site-packages/stem/util/ordereddict.py | 133 + .../site-packages/stem/util/ports.cfg | 313 ++ .../python3.4/site-packages/stem/util/proc.py | 547 +++ .../site-packages/stem/util/str_tools.py | 558 +++ .../site-packages/stem/util/system.py | 1176 ++++++ .../python3.4/site-packages/stem/util/term.py | 116 + .../site-packages/stem/util/test_tools.py | 341 ++ .../site-packages/stem/util/tor_tools.py | 151 + .../python3.4/site-packages/stem/version.py | 376 ++ 91 files changed, 25537 insertions(+), 535 deletions(-) create mode 100644 Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/PKG-INFO create mode 100644 Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/SOURCES.txt rename Shared/lib/python3.4/site-packages/{ox-2.3.x.egg-info => PySocks-1.5.6.egg-info}/dependency_links.txt (100%) create mode 100644 Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/installed-files.txt create mode 100644 Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/top_level.txt create mode 100644 Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/DESCRIPTION.rst rename Shared/lib/python3.4/site-packages/{certifi-2015.9.6.2.dist-info => certifi-2015.11.20.dist-info}/METADATA (57%) create mode 100644 Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/RECORD rename Shared/lib/python3.4/site-packages/{certifi-2015.9.6.2.dist-info => certifi-2015.11.20.dist-info}/WHEEL (70%) create mode 100644 Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/metadata.json rename Shared/lib/python3.4/site-packages/{certifi-2015.9.6.2.dist-info => certifi-2015.11.20.dist-info}/top_level.txt (100%) delete mode 100644 Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/DESCRIPTION.rst delete mode 100644 Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/RECORD delete mode 100644 Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/metadata.json rename Shared/lib/python3.4/site-packages/{ox-2.3.x.egg-info => ox-2.3.b_769_.egg-info}/PKG-INFO (80%) rename Shared/lib/python3.4/site-packages/{ox-2.3.x.egg-info => ox-2.3.b_769_.egg-info}/SOURCES.txt (97%) create mode 100644 Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/dependency_links.txt rename Shared/lib/python3.4/site-packages/{ox-2.3.x.egg-info => ox-2.3.b_769_.egg-info}/installed-files.txt (98%) rename Shared/lib/python3.4/site-packages/{ox-2.3.x.egg-info => ox-2.3.b_769_.egg-info}/requires.txt (100%) rename Shared/lib/python3.4/site-packages/{ox-2.3.x.egg-info => ox-2.3.b_769_.egg-info}/top_level.txt (100%) create mode 100644 Shared/lib/python3.4/site-packages/ox/__version.py create mode 100644 Shared/lib/python3.4/site-packages/socks.py create mode 100644 Shared/lib/python3.4/site-packages/sockshandler.py create mode 100644 Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/PKG-INFO create mode 100644 Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/SOURCES.txt create mode 100644 Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/dependency_links.txt create mode 100644 Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/installed-files.txt create mode 100644 Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/top_level.txt create mode 100644 Shared/lib/python3.4/site-packages/stem/__init__.py create mode 100644 Shared/lib/python3.4/site-packages/stem/connection.py create mode 100644 Shared/lib/python3.4/site-packages/stem/control.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/export.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/extrainfo_descriptor.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/hidden_service_descriptor.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/microdescriptor.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/reader.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/remote.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/router_status_entry.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/server_descriptor.py create mode 100644 Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py create mode 100644 Shared/lib/python3.4/site-packages/stem/exit_policy.py create mode 100644 Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py create mode 100644 Shared/lib/python3.4/site-packages/stem/interpreter/arguments.py create mode 100644 Shared/lib/python3.4/site-packages/stem/interpreter/autocomplete.py create mode 100644 Shared/lib/python3.4/site-packages/stem/interpreter/commands.py create mode 100644 Shared/lib/python3.4/site-packages/stem/interpreter/help.py create mode 100644 Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg create mode 100644 Shared/lib/python3.4/site-packages/stem/prereq.py create mode 100644 Shared/lib/python3.4/site-packages/stem/process.py create mode 100644 Shared/lib/python3.4/site-packages/stem/response/__init__.py create mode 100644 Shared/lib/python3.4/site-packages/stem/response/add_onion.py create mode 100644 Shared/lib/python3.4/site-packages/stem/response/authchallenge.py create mode 100644 Shared/lib/python3.4/site-packages/stem/response/events.py create mode 100644 Shared/lib/python3.4/site-packages/stem/response/getconf.py create mode 100644 Shared/lib/python3.4/site-packages/stem/response/getinfo.py create mode 100644 Shared/lib/python3.4/site-packages/stem/response/mapaddress.py create mode 100644 Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py create mode 100644 Shared/lib/python3.4/site-packages/stem/socket.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/__init__.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/conf.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/connection.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/enum.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/log.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/lru_cache.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/ordereddict.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/ports.cfg create mode 100644 Shared/lib/python3.4/site-packages/stem/util/proc.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/str_tools.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/system.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/term.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/test_tools.py create mode 100644 Shared/lib/python3.4/site-packages/stem/util/tor_tools.py create mode 100644 Shared/lib/python3.4/site-packages/stem/version.py diff --git a/Linux_x86_64/lib/python3.4/site-packages/ed25519-1.4.egg-info/installed-files.txt b/Linux_x86_64/lib/python3.4/site-packages/ed25519-1.4.egg-info/installed-files.txt index 5152de6..dd4431c 100644 --- a/Linux_x86_64/lib/python3.4/site-packages/ed25519-1.4.egg-info/installed-files.txt +++ b/Linux_x86_64/lib/python3.4/site-packages/ed25519-1.4.egg-info/installed-files.txt @@ -8,8 +8,8 @@ ../ed25519/__pycache__/test_ed25519.cpython-34.pyc ../ed25519/_ed25519.cpython-34m.so ./ -top_level.txt PKG-INFO dependency_links.txt +top_level.txt SOURCES.txt ../../../../bin/edsig diff --git a/Linux_x86_64/lib/python3.4/site-packages/ed25519/_ed25519.cpython-34m.so b/Linux_x86_64/lib/python3.4/site-packages/ed25519/_ed25519.cpython-34m.so index 4f8163cebe2f4b758f5b955e6d1767c6da425ab9..c224b53b15ed070b1df8d426e322318c8320acfa 100755 GIT binary patch delta 13773 zcmZu&30zgh_rEij@0C(l%TC-zCPsp1*8{i3P2mKN!(S84TPt;r@(2XK zI@)Ah5GK6?Kg=(8mKgl1b|$_{=`V3N`8MYXQ-77oXk*q-3qrAp=A&gfZ=3p&CjUpJ zza4(qez7ysll_yhswk81rub1n~b6#gGBF|akA1oZsGXErbWu5bL|DK`O*O@94 zR0v(&T^;J|7?2qHlVY!E$hJG52uQU5uGqU9u!}3s1w1N5BWFTTuyc5jx6hDVL<$E2 zr};QX09_O0T{xj0>SyrHQ_hm_EIF&)(>g?HIZsvIHFt5ubmTGz?OL@_PUSN9(6y?&cMBoVk^xSSYvqPO6qnI;+O<-n z9Li;+fp)FfC^z9UqN`miGRj_D=2l(HH_DRB9K>sRM){9)l&xIOHOfD5*^A3rcT<7H z3;-~$_-@Q3gXXNw?A-~iUhB?eU{3sRIV-=;S@N6j>_7x0DBXE0INEA8B+_IjXIMyt zxb5r|(qib53b0NI`?aqDLaH@h`UFQ~b>KMX z>rE5;MliZI%`y|%%FC}hz^iR!V!Fbz82j}Pj?SmUPL3;v)p87DDZkXFnkI8rzLB%i z{eKndVL3t^t5_UmnB#-ey}cnuE6lYVX3@wzicm>e5_`_ZxsC{h??Zep0xW--P@Tj z+$xK55QhKDNaO4>JAw)Wk^*v@>k%VN>p z=$pZEpwBGSQnz@nMVsX=${8Y?HQ55l^AH+m@oaBp(v}_{B6GyYbYh5{;Cm4PwG0sg zk<78~r}huaHsT0PepuS$c`CJx6$3zutZmUA%Q%q^xfRwHtTbNyj0hK7bqno(Scb&? zmnEi&bX0h^^%z$xUu*>_x9(u^0x<)%I&A%t{>MH(pva-pE{;*gP#GIOgC$l9PxdHm zAHl9=mDmkmHet)9!l5!=oTIX#@|mDr=o-sbu^1vNgyv!CfF{XvG-Q};7IA{rr&qzJ zO_LdHa>hke%EMBPvm|EuXW+P;fPb9lMej4X2%TDS# zLOR7*`e=k~E`FvzM#yEeL7mRJPo}s9b2=5XaG*Y&Z(w!EKl>KZ~tQ9EOoJDBCFVn@5GEx-M zossB+KPfm*wh-5;bDp$^H)hc&!&;X06t^Z?aPxDt;Bqf2$dit+A{L1=L@d@XG4fdA zMI*G%rUidU<#{r|H;W|_#U}tY>-Y3Qp4{VO=W1q(bl3_$Ln}wgcA^(mjgo^?`m>kr zB7`|@SToza9>Ui>#0bXMaSVHkAduh#lrvhc3Ed0a+VTU8w-!r3I6_HEvzvlO?h5P zhnFE^#8hiz+^r?$h2g{K))?8L@n+U}>1OL8OjQuih=r% z;-N_KkX6s4#bafZ?+qp5hVjwSgbt3C;e{>Lybua(im7JR^VOmi$`Z4fzo(%js1BbKp_1qF1Q41A2gZ-_jXX9+04PA3=q1#NC?rB?@_(vaA9~W^qAk$GqrT{-f5&?&T^=uk3tv#O9Nu+?EW$gXE)d5R zo5LG*N)>vdpf#O>AyFi8wOs20mzLQ2s=q5rhQL15+e|y7_%({**_?JMUM#7X+*%ag ziV7#l)Zs0x`FdA6=kA#X84@ipx zigGsCL!>23iyBF7Ct~@NM^h%sXfcC|C(4)>+mx+**m^L)u!X!c@?m>jx*Vj76J==O z!8#W(o~1e7RJ!w_TlP=gs`{c1&GA`1(F^|+eZRhFmKJkUi7taxqh+K7s9=&z5W{Ha zB-vX$M&C`ssGUxJlVykCUpZyUy82?|(QDMQtcAsmMopFxwlMS&|9rw@$70|cOU09A zSPNfV8o$igp6E*cc@K!)k2K|<_g8U4pfZF$m@H##IY9X5lN5Uc{dR*qr^sf$#=55p zV;Qe|Y}96oj1xq6X3BKByFvztX;d~v_HH&F62jtIzm2H7mH3I!dotOk$`0Zy>N6Ed zJ(b2ymF;Z2X7JC)Fe(BPAf8YArpm738r_^KTZ+A8pC+T_dtuahnhcYlh0$=PSHox? zs69K}bkGOY)%;(_NhGq7TskmKHV^-bE0g4la8+?crfMR27V6>4O}w^P@V5XR6&T3}zrZVR>*aR%G1 zWcO?j2tZ5=9H#hPz>hciauKT=Qd%HUcD_s%duUF+Z00zmq~js|jFO(s(sBW8z1@rs z)VV$!>4gI-cS zJK#NEx;;w8?fyXc=iW5bNHg?>Jp*O_xy9RWg7D9+ zgMBLq|J>5-=a8cOb4##i!wmo2?e-=h{Bw`6uK|;PR5?>NZfvCIQEZ`n{_(n&4I<~v~mtC;yDPV z@ZxAwm)}e95m<@SDNG5P7CN17&XK{Pr<6z)Mhq={9VRLBx9AdvOhNU%) z-Csavu>09ie~5}7m%)L%6x&eu`vRd5Pn6TK$K_CYD~3Ym%A~~77WXXdLawugIcUHc zg`29r?75`61FQkBM+~5(dAN1+sP8;EIPvFL)1C)v#P6A44@-oB zTG$_oVH%u`W{PJus-7qN*t^A0J&ckPxBVYBzZQT_AWr&tK;dT1@gVj_?D*SN(?9-SgmvG7U294?lvv7 zZ9Q&t#hq<%abJaYp_+xr=z;Y2LUjLRie3aVn{pNbTuS2>p=Rr86_+;CZjj;6*E1J~ z!Hb>EG|X|Al-J|-R9w>;@+I_WJ?@x)>7Hvzk%cm<^XK)XUigPpA#8kIk9+E09&WXy z#f2Cr|7=OyK}9$nD?}6GsHPB2XhVN9=}uuqAoo*8CZlOc5kzLv0xm70%}iF(VUUOo zt?orO9k=H{8~Y0V1WkFO71meC$2 zo9QfzJWtO9|_q=gVEu*VTQO^xzc@kthwSE$s`>79? zj?*-hD)5uO=-(zi2OK_`Y(r{8;LZP>EDU8dO1c@UwRROqA9(@ zB$2K!hmj8Cy8@+bYRzN-^;sdKQx+uE3rehO&d?TPfOON|#lWFn!vtYD$m2amixvOh zawLa_uas%xCwgk79PTY3En4=)Sk-7D{jm~L?pjJ%C6~#=P3X`n*%pJ&^;I%iyg*G? zqh`mb%W4@TKTV=>t7UsxlSJEB%Y~v7g|Cq@7%951!4UB%O;`ij*|dhi74#B=&(bGM zUZ%eQCY(-Ijlt}32E%p$(sdEeJS+u>2dugy150E+{I4d{{cACz{g6zLuSLW(+Oihb zo6zfPVXrw|WzvqU>p=1-ah+`D>BEhhl}8V*lVijII=l`YeL96c0Z)FELcenPs}u@a zj}E_?LLJwm!!0y;y=)obpK7|jj*5k1B<70EG%ALK@{gp__3)iUr8XG0ke|t!I&|Y<4!A^(_?X&GpjtCOZmk=jBo8 z@HW(VgKQ=LN~HlMvZwqzl{SNl;dG@$K8VyydkQ3vRz8I(U=vk7C7b#G%SgFt1UG1a z6Vfly1gGqU9w>Lp1o?X#y6lwUP5!0jN4^{I7;>C4Heg-gM{_p7MGJaq zNndWjnlh)YS$`~Ij6>`>=#86kDbmW3LlIBQU`$u-pGGo|pn*@zB>QruaD`V7k-Ux( zukx*2OxvE8E7DKmIZPbS2Y}V&!szk!i;MivE9FKA=EoQi%|6on@3y588{xx?<}(SS zXEsVlqtd!eYDuRyqQA1Jb|X5fFZmFpMpG+7f6b*ngk)V#`2>-tX#N|*n~U!_cU4t9B*aIHbE(v&VcyDv@-%?eBN9_C7Te5 zWU@XZ!|h!ar#)J61VbFR0<*6FDCz>XD4-F~$mXqIs;3`|0nU`-8)X5maif%WR5wcS zG1~tO61=J%z59%OSl($zahqYN5%t`R+Q-nO%`!G3xxHDRe(>57VXaF2Cn*XeVX%nRQy#=leWlV z;wGKmBD+j@OBt$$gm4pg{ITS{0JUS7;QV!dRf*$KcsT?PR}52^e`AGnXTN9SdSQ{! z{F~Ned|StmT4*dy-HI1!J!tt>yr~>c2e(Rxc$_}iY79odG1*SdwgJRwwhiy)E~V4> zZ5UyGN~e--SXp_}LBQz2Pq)cWoZwJ7mG#j9NwE{5;43N?`IT^#zf)GHBBCm=rqF z^5^Ao3qP_6ejK{*1(_%R*NI+yL9P)_%J>hSr=Fun{{yQPv=T(VnMvjUfwNCC=~JfP zW^UY#$o`s1EkKc4eRo4Rn5Hv{C)y1Q8T9&Y3{>}~yj;4z8#NqF0cBtpP$wqKXbh9h zw5ANa7wHuc`9^2DQicYXl4TDP`X!3rBg4W@cQRwo_sr*!Mhx9be#`gJ{A)VVs6DcI z@5Py>#5GKD&7#aGFGQ>#l7)p@@GqTBt}!}$K8sNtUwSpmM4%68(P#OF_vSBK7=7ry zJ<=ZJs(Liv&Z3EncV~}`Y3iC_Y(>yW_>e=H_y#(cl{5Fscz5$H%jCUskNG}jByMJ2 zz-Sb`&$WO_#62^AA10zqD%*!?Nu#g#$w|oj-2GUiJV{UPmn}Wl;u%rI7t<^IfBvHE?8#5-Vt6q{9Kf^L zw@T%CgkcV<6^bC?`7qFvo;ZNF);2nL026aE{RZKLiQP>5ES!sVjZ1i?!rk~$H{8Y* zomotemCJszx*NS#F8g?2M6<;cJPF?FMga$9a--8Mwu%u4^*<<68jWV)DF$+B^+DOu zlV1ynZAJ9XLD?t9i@DpGI~4uzINKdpI4987m46(P#RBVr)s^@fv!k~uJQ-v+s^KRF zdf=_BN3=z=?WYr!cnNTXE`i9?z3Goi8QSqeJ&h35s!k(Lv;DUouWfx^vSz#0o9;g> zL*?JSX%c9-cb|KnSJaoy(QKi#<1jqO(Qzj2=rfS;?0O2n)mNCH*#?l`5lB8t2}dwb z22rmg7*HnDbS6h>3lnd8lgSABp2>@3e-(qx-IbL6DsHJHMV%9Y=#--=l53 zhzR_?9&Xd>HL}trWYcVZRQ;+9wnx=tf9qm9G+UdEN5Rdm#|=ie7#@Z$^xRQAsuG8X9%XcobZ8Og@6q|I#rT5?z;hYoX<)y@7SEdhq&K)x<_2$8jsz zsQq#Ipd`!2vofjj=y7?-R}@!He+O?Qg{|_yyK=6;OSsCG@5}e3D60JZyiC-ERvGt^ z?4gU&%4wg-lu6?YlE#in zO-oDZP`R{5-gT>d?rV8gQ1!PmJ*e0H10CH{QX1?xpvB+H{*{(mbgKKv2{T4bC2?KG z(Cc5|^YO+{bca4_%osZVy$q%2ugjk)@wyCmp7rsmeEo*3(!KIi@>50@JT@}F4K4a! z&e?cNK1XdY>JiQ&KN~%FOU`j#_G?4~Z_8m6c3URWrQ6cmYi7Zyg0Z9L7L1)qwYOz- zWzY|@J-HgX+8ikVhjln>Bpmw%@RDDh3TGxVTJ*4ztcrSsR2%*>6 z#?90E&W&eu54>$HJfkNe!w#I$gR$$;wW-x*ia&fv?8d7Ob|EHBRJcQZTy$}HL>8Lb z0cc#CoCl5TCR%O3f7WsyaJHYvcAA^A#(5de0vkDN8YnzZC0x@(V6$Gs@2hVZY9U%} zgN=7Q=@{+6>XDD1Orl)@HE{JIO+EK&KLW?6aLx7QW`dHJxMzVEYNcw6UR%ZRF&SE( za$KbFaD}^e=9DX(w`{Tl*N&WR3eQ$}jk5ow!UqC(({A52i?ryR?&UWU7+1dTLk>+m zr^jH=WR=4d8>EyM)AVx)+G-{5+Ke+qu{SEbM!nG~LYWWV;483|*0q`{GEjWrQ}TH! zCeTXBA5wU6s)_%q@Yg_Lr<>+ldTC`Q=L?gexgLpK4r*PvdX{wIzj8amhhd28VtQ~z zXPi=S&DJgjfl1Lw6D9wn=~uG>H(I79@uj9Y&7UIQ)`RWQO3k$!35!zWNHy76N;@kP z7dWzvv!->Y#c%6L5%(Evo90lvz7F(u+7WA@N<`D zoGCHGB--B!Z@Aw&&dsb~mU`CU)ifWx>g9?z-19sKIJTsidzDR5@c;o2Y~Yy^@dyH2Pyf6Ti|)u9lI$~ zkydL-ZI?@4b+@YrJ~ZtQ{a0RHRth#=-Qk#`(#q4K@*~p33rwOVDm+;^;O7rMhSCY_ ze|Mh>w!^ql7AwJqd*RCz-f#o`d4)IJy#J@dt9qG^E}BG(vzirb_{WF-!0|_m#DD$M zqChD${CC18;AWrhR}#75rjN_2*N-T?;eRvUqATZ-L{}BNScSwjHu>Rci~Fd!t7#5T z!^V*b+;E6*NL@eBgCoZof>zD78CS_^Q|UeFiXuv{J4BWGmK z0bTp`7@3njpfcnm{bx_=eo|lLkw2v%ar|TS+DUx~H9e)5VCffEnR`jE5kA;PXB-%- zumsDztVdg2J~z?C%X)jiDh$nh)KNNkSx@pQQtK>(I&ZuAQ`QyTMl}xU7RWYnGZ=?! zomNcOF6*&Qn}-`OH0s!{!NWzZH2snuK{Z{3X-4IqmZt`pJepXe$J)2R1e^;XvW^Cs&FI{397@qbk41B(Cv delta 13727 zcmZu&2Ygh;*1t11WjBotshbee2nmo*Ng#v-K?ookK#CL%(yO5bsYxhG3nVOX06_wx zQZ*PMfPkP#vBrY>L?k>#6Z9e29tu3Z|G9TI#Q5FcZ};5uKV@dloGJJ2?78=wRKM4x zG&r_8NHeZ%lW4X}y1LV}Y%N`L$9J?AQI-{H9oQB=$*hu5y#hei7=a<;R--qf{~DP+n8bQ>$@|bPIso z*1{BgCQN!4<*@vQqs)+NX(ql?*)MiA<++Yiru|A&(AKP<76`Vf=A{)mZkqP0;9r&f zHk8Bus~wTgz5Q{el6N%K_sCmsC^xviUthba3bRsUQ^v3rpH92eK%dPP}RI}W%9 z^zqbkmDfo$V7^(0tVdtxPYc=V(;Y*PPE~b9A!`H&K%?~%l#VT4ssx=jk zu9kp8PYr!%TrNu9UPVi5ZQ>S_XiZf;Y~4*DS>c0Jc{bH+kqTetX4WHFMI%w+*(w@& ztmwF4@kJf8<4oanrDH=|IO6#ZVdXZQRPwf}j_#_=8;%rfKw*$}V?dO$G|6z)2AgU_ z2Fm0sUfCtLH2E(~qFr#dS@rBih3{2(h03Uo3V$21uxUYB4VUY9$I5Le>TC)RseU-G z$|`r_k{1ZdkDe@+A&JYS$cA&YwcnPa~7Yr zyxl+vxA)pNh047ce|$Fy&&D0|<5{V$<{qvX59T?jb=3whnddI7J9+P3Ar$^f2f+DL zS79jp!aVv`t1CD7Ys@20w7OD*|2Ol9zE)Rc@Lw^{ZLZ5V_)nPU5ZC1y{JYFsna?%& zx0v@}KKtGSKw^$EfN`qTB^wH_G4I8E1o)hiOlxOEp~RKRz?@dMa+Y7sS$xZi3w=Cf(I8wj(RUO6i>CEV9$!gFm7 zzDAsJ&4r@)^=%H{*u*B`OHeXmU6R?WJ)ADi(q?(qJr2#WO~ShaU+Eg@m>+zw@VUFF z+w$6+mBU-ajm~M?EG7(ua+ByA*mII~bMvY8G3i*KUSE%E=!;H2vW2G?!RA z3h<9dN^_Q9uFYYWD>Ln#47*D+HLZ4xX|##`)p31F+V?m7HA%1SZOH28aZ?fKJ(8NAX7%gdUnE`D1<)aah)!zQgz;uOWGxh^x%fB4<_SjN* zt-!pO@<(ly=`v^eTRF>J_gCHu%@JZ>W!JE4LY#8Mho2Bf9Y2P@C9*1Ci4am`R#rsW zgov*^6@6KVhbp(nL->h4e|_QjtxJiRRJkbY zt`JikMO_z*I)}>x#bR9LGY|BVBFs_UeTsOsvde>~dW!!nDmhdlR#k}Ps*+6kt#Hl$ z;_khgs##gGjgPfF7;VRuM|FK=ny+&u^fpngh*BPwuZr}lD-X*wLCLu?JL(y=`PiW7 zONt&+^h{9I++4X@VlE?(0di0;=VWG6>!+G8wTS*ma$L^Y%%)anr@W}2DR)k1)wFwn z>=WO%RYd#5)WnpC)Z~=*DaonnT0ERc!<9KtR;xx$(;Xv4Aa!_D+B|Zb84B8V7Xus1 zm=)Tm3G^zy<-c=9ZXx z2~_;H?&ruCE+K2M-Qd|u(=1{qrWLOkOdZEi^=c{~C?nIR<5m}zx6veBy1#*WMt5GlJGds7mgQ@EszPDNW(ZoSA zx`_j-u9{#h+4`FsZ5br9L{s{DkZkD_iz(C9CZ2?Bru99t4VKx%KVmFK%mY?x?Snbf zHBoSjHd^~LlVmXE*2kDhHke)3JZ4f1rouXonN*Pgo5R-A%(OE~IBA_g`v%K+pLvY! z$Va;@mTz_Xey|+qJrK3jE$&m$X1RoNhREhk=K*p*h{jpm)2vMV>FFUdM;xM4L*#g$ zHxN+E5FrrB9BT=sJto_VO*H8-X>WBFmX73t7hW_4quapHG`snq(d zJMDi=2FJa{8dF6&D!j{jjH{I|wt!Svi&=f9;O;qWJwgBBGG3#|q0%n4P{vRh8`hsS zmJ4?-QP?iPuVsbUg|ckImQ001Wh=3pb`O=C0am(wqzQf`)Ne9PIiewduv^(R)^^(?;KEG77E`Xnm61vh?Nx+XK(ZuiTE zd_T+C-?97hXzD&(zTWbCZtY>Vjj)t)SdZ|13ri^DM>$R6EU}){e1r`27U(I<@iX|& zwk)EqBcww-NEb%P7UBZ^HA2qy9?E6@*BgZ-TZ*apaTy&W5G>1WhH@=aSbQe|_d_hN zF@Bft^DN$U{&CsI=M0No1!EhnFH_VL2=6Ew^n`3F|LsBxpOAfR05zX@0Tb!X{ zv3`SD$I?o8!VKHUN~*|{{yq_`ktn_Zs98UvPx9m*?>{+8nIav&f?lTOBV`8>OD9Ll z!6`{xN;eVA5JwzZ#&>G5lQmzVF3EbNH6HK`lOJC%JwGDTH z&640r*PoO_0$kzN!^VRy0)4$0da}lihK-V4L^5q2B@;z2IzLKAd(VO+&s5IUWXo9k z6HrJ3iuUZl8JlbQ4Sqb+=0j}kUrlQk1tU`d z5L2w!BrSiAT>EEVOm8nJi z(ZH)4Vq`QSyWXn|p*3S=V4QO``^~JYCbsOxcG(PQcaHcpL>d9555~%7AB zto~a(#(l6L1fU5iL#e?gnpWcSv#0~C&`XMUvSFo?&^b?N3T&^vX-?vjhrOIZFeB# zk9Sz?gP1EGq|!;USFx}fPWP5ce=&o0PnNx!mqJ5WoO`zs^+Smt z553lrZHnwDYN_`WB=vq8J4JS|@t(miAJeGtPk?#_?VBRIitBWJifk=@B>PkuEq%hM z%T(D6s{{>a+9HhRg4&manFaJlbv3^?@g)-3Nh!TL6-u>SnIs=X<0P8rJw+4AvrrEo zZgL&nm?}G=oHkF%CvEFkGouAGIX?rg(AKArHFi6SO`fHvr5)dl|Jyy@mN*~V=0s4c0VBeaSaVI(hPlJ z&j8OKmsa*)LHOg+(LNc3KQ8U<=aHiPaY?ZEf*bz0+U-Fg{BaGpuY!<2R5e33X=0@3 zS}X&co?0Ybn1SjV`T4E{dw!-Igzk)(B{Rf$8a@ljJD!%#lHsC=USzI_K4fk^U7v-4 zwx0Y7WR$3(4h0g^auls7kiG1Oqs%;b966MXJP1V|U^Q#@EB#a;*Dtw)8!nYv@==y9K6ynl?Ipiv43@vOm7AjGYfLmJ& z3V&J-v9F3@sitx9pCL22_yU-(qtd5kP{4OeY?%9gjZlb1H|W^Ya;QvhNx^euQesWZ z`yP0Q#U5s(0q(ITZkqaV$z`@!nl%SoCCt%tFpuWbcXMQKv6hnNV(6ArpSf~yV*5DL z&J8uk z&y(ZDGIF1f7%rpO`IroUrGfL2`&w(7J6|SAOKaLYA91&}rVI1shnW*vo3*Jz2DsrV zjaxS#-;ZLnX(2NkNLwgrfg#1X3Mr(T1<2_2^uGn@{=F2v5abZ$EChI(#x6w7KB5)O zeMP%KhF@*qE)J6y7dFFi$6a!xfwYH`nuQ@>LT)#Zj{b+OwGBlU%BU_OZSFVcrN3zv z!beO4>1idsUqW&lDlWu4nbC%}fr=4ytPo8YPc?;T!VLN!lf~4m2xK*NVzPsV6hWnu z<}-JKHZwU-he5(WYfxA^hUecK`v?67ODW>XUM!mp^NPP;?~O|Ne$7G~NDuwPRm%p_ z&;DVXL#4$sI&(k+r9jLDTpP0h4$R{Xr0taSe#cCr8^vf>0r@NfSwZoO(B|hTm${c{ zDw8*86_YcxhsjrTj#aMGPt4sQkHsLjDGsEITf+U8?m~|LJuJQrq-T`$ejFkbXz^m% zBCwCrGDb?28;{ziBA1RYmhWS*tyqH6UZhiHR2mV#WS z_@%J?5A|m5E=>g|Es0dJ6iE@6NPCxJ?P!}wAAuL$=qFYlMIO&Uc`7A718yEY#AG#1 zWbk=f&D>tv2STl$m0on=85t}-r|XRWNM6sP0M8_9^DG<(Ceb6$qM$ZOG=*uGBw7hv zJWQ`ZO$?*=nM|c?&%*T_@>vFM3B@y6OTCxL=#*ngW@L@1#5xxZtr(N2i}nF#4)q?! z6Z=7)@G)I1`~UVMIW&B^Y>!24?Q%KX>kn*sMe9D8tUO21U(2!PPN9Sq@)?=vM+aBP zc9?apt&qv$dkS5Nn%$wSl`=*KC)3!KGEGJ#)3%jzftXKWt7HtOif*egN4!AeS3&m> ztzz&Ty~f}-^aYdO=nlXHw-nVFEFWhvZ~G%(7vh`eC4bS+s^?{3kIa`}ixhge1XEgS z3O!wdh-K4rCGg&#-YkK?(exdY0u(u8uWM$4PGN#hYv_Ki@k=5g3)3sO$Y*J2I$lB(9q z=6?Tha>I0j5$f-N_AfNvA$y_+DjYIFc5FwN9Wt!xKdkzZ&pJGZ+@*|l*ckMq+3Qfm zSbA}tjQzVZ0ur)-zFCJ&<+65W{jrNNF0to;7e?a}q*cT+3STdSuwJFDM>21vf$L?G z{j4&$!rO;P-pGifJeI3z>w38?{V-?c@q7T-PA-TZ*N|Q4=iA;WMDs*2KlgU_k>;1x zo!gN*R3sn4W|bb15&tB7hhprcljH$iI$wITG^5$a7y*0YpPP`N^vxBR0tv@xL^%kHrjUYVkms zk9!Q1noeq<1l^(in~>mI8hx-yJ|;8MC~h+x^`Rb{QTs78akGpK&rdV!(-)<-Mp)|; zk#}u=PgBKa)Nm)AW6KKqezQ!qpHL=4V6s=4aNmDQDbLAfVPA6R>=)mFcumJ$U#?FJ z-jIqrH)!H>a+vs*&O9fx#(VJVNL5H8}bANIVT!R#28Qon9WI0~g4goG=Gwaf1j zg>z?rXySTdk`0atj^UA|u3M`echS8Qo%XnVN3} zh}mo_Ud)Acq;Xp@!?f#2Wm~be>PZIxqX)m-DqBU2?P%8ean$-by!S!HP=R3X0^Zil zp~&ZgD?l4$K(c4hpThwStv|+n!A@T0(7-&R%#{DMG#fu`d$(^08 z=)GZw(TLa`sGvt@dUyxYKeRJV-XUXUYG+!%L&o`}b-wQ-V5G4M%ir`tSXkGAXqCP>fi|()a)hTgPUDKYVkvpZI8|F(tgsJWY zbX{Mv?u4%;l)Mu|ypjg)MC$)Ua{zMdw(msSN731xX!~b$b0;eE9W{9ojXErG&7;gHFF>pxm4$^`P=_qj-k6=;pT{hYpMW*X1fY*<(Z_kf zd-2a&n0@HOJ<=ZNta|i$mNj+C?(UH>q0R-yRs@TLPdSu{{PP4GXYQ4)T+P=kllICz z<_nc4FqnA*lTFe4oEw-#Y|!WJ!$RakyZ2#P%BHLPvxmaHGql{NzKAuLKq~sMDYTwY+Y}scZha`rtvAk=cC+O5G*t@+=U%eu`xbx$> z7+y``uj1h>_5su6MTB8CvMvNc!UJMJPg?XUUR&qVfmg9G=hH1HC+u#ZoQr*pQ~A8Y zUHDly>nKt6|%3ib)yp%vbWb+uKglj1e3dw{{flo*_M$NjO5WH2V{!pC5e;G+*B=WE1T}Ib*GHi_R-}}BrFoIfd5vSRr8pzr; zlqGAnyq^dXC9M^bV7)^fQy|WIu|@CTj(CKZ+q$N>h%?m@ds5 zL}M)mG}^|Sh=8O9xGlQ@Q{_~$X|{e;eN+b7pKKuh!6}Z=Y%?~z0qK$k(jat;Q9`oT zbUh`%A-l9U-fwMJ{eAyuYct|kv89scudh14i4!4l$YI+`df^Q`-G} zIrS%<$L~{9DeM^5iviU27*@rOB{cGw@hrITn0y?w|D|IxIJ!RbN?_%py@h?Q%Dm52 zO>!ytIEF$lr5%?8Ppg zgfHELC%q|Lf)B!8jA6UKcJ7g=hPKANt_9E&(9#1@;z-<3={tI$2*#<20NVMEzLWCK z=*@8~q3{e`dD5#OI2LIO{<|k$kR*=lUvHOvM+@`H_UmSW{?uEGsK0w3u3YJXw48M-mUP6 zWD_U?&(~|90>@~bTcaWa)wDO2ez|%B(njf@Q+Rc%srX-oe+&vg4`|MPl*T3E`q>m{ z&ga`A1J$(KPFy`0I&lxWd0r11>1?L+IM7mM=8yG^>+4*Aoo0c+jr>r1!e7nOwHT%E zT$h(9KXFDe_H)@`pGx3-khbx$>r23~xHv`$ z-@d;qof|QxL)EET2$o#7)uK%NS5;n?!fPT;K>e_#jRhWonZqw-kp0on~>Sc)Ce=i833ZHgYOUwblwRQjcl88Rm-*{}_#KZSu|&&U1j< zco?fuX-YM%$kY$is@3?7SC00u0k^2|h;mu3TC`c&UQ&4O116v*4vl}C$@fD&sQnL0d_%prS%En7Wycs~ca68aZGf9S#pgFzqg-{xcGFPfA9?v$ zdgJ5pLzI5wWABR;eo{qS?Xk6&ocfJ=;ezS^@qc936=jgiTQppgRdQJ^W<`y&{=jD% zQa&=IBBoM`A0qe~4%}!Wo)}MkgyFJSDKPzH_vk+7MHtI?;gDsgF;S-@r0mw@t-l?2W}?O_e!H$ zg~T~S`FWT%tnO-h#3QY7Ws&d4dXVcw5J&OwjpQ@qO*O0LJXNdow5iHL1&#bz&#YSc zvHm@d@lyV$`ob#lnf{Br!?C7Y)v`-^jqpxa)zx9_L9;IF(N?FsbF}@kp5}W4FU$CH z(i*C}tS4EW{}HODku`d(eF-$!6*AnooI@U4K8UG&mvG^I1r{vxDpylNIbC7%wLlw+ zo%_O*6nTZsH#IUZ%`zR8(nv$zOSsUgE7+GFqVreuEb$gu{|$1MdNHY{FtWW&D$>s`-2!E9(fyA3Q( z>Ye{i&vM~IZp!AW-pWzr>SA(FT-Cc;A9$N%t!aH|_ffH1Qj~`Tqf9UG(Ds diff --git a/Linux_x86_64/lib/python3.4/site-packages/ed25519/_ed25519.cpython-35m-x86_64-linux-gnu.so b/Linux_x86_64/lib/python3.4/site-packages/ed25519/_ed25519.cpython-35m-x86_64-linux-gnu.so index 2c19c3b04920798b728a41f80e948b08429e4a92..52a9dd70751974f94a286593b77af9c1e1522a8e 100755 GIT binary patch literal 255808 zcmd44dwkTz_5Z(1!bOcFa#2)-tgZ@P*$4^>8j#C-@x6$Gh*b*+5l{pQi9(AN4A`u3 zD{Zy4t*y4z*4AFER=go*146h5@WMs8R(XeOxXB%U&)3X*lMRaW^T+SucYVCvnR(Cq z%$YN1&YUwd8y@T7_N*5S22!7lz>NXrrk`U=^8E+-Ycu>*AdnyE7--1tpu$`q+i0JoRlJQj()e=96pwui7d);J31dd z-N?KZ2xRgtRQS(N{0{r|Qs1hpPv0*JhbcX|i{ec6tz(es{K6>sK{mKjb6<#^$jj1p6_~OqWsfl|* zQy{sV2F~o;sK?|$y%G6Srkob27kH8Z&CRTrlb@9v2)q_(^lYOSDa{Wwc`hq6kXb*I zWM<~%HaDMNFWfB{Xx!j7(Neo7`!r8`a5ibFRSR%#T6uc%JTm8lmf(W)2j7rx?U&{9 z9ZGwCA?b@iIIZmBwDcvUF9mHtTW}fB=W=ibxJsF{&((f9l2(>S`Wipq-v7SV|Gtjz zj-ZpDzn%@4FrScgN7y6+0VWy=T|n!d;NE{pA<$Bz83(|Ll8{ z&YW5~{vR7==il=3=NI45`SK>c8ouzS-;H?k%U56Sv?bX2{@~pY&n$m_&QpQK|G4UV zw@q)kX+*17>0?;~8gywHEB&Iu*1hj-`uU_kAN;H;?oFI|X6t@Mi+{ZL>{UM)*mBnw z?Y6(uAy(R@!QFwz*F0VSiwQXwb?fo*w#|G;uiEZ3T{8L4k@l^B_SVJg{*cz4uO&0k z%;%&mX`dlMY(ZK+m6V?3rZQ>YWu(8U{??O}AELYoIPtTFf^VX;@Fe9EPr@g~-^gc| zlhiLd3H?4NsXyc-^j|#*9qDH-IPsI9;G6n;JLQ^JCw^YW;57+k1=?vs@k!Mgjv-R{ ziy>Ry%6I8e-@qw<_9ZNVts z*XJp}e&v%ke~n)*b!rkgGm!sNTb}y=PknyMd_MR14_@%;Oz&z79`VbkH}v^*Y{6OG z?Dt@w|H(Joy#F{b)i1B~>&d?9bG6@Yjs(ua%jH{{p<4d0^Q&NsGGd_K?N zTjgUl0r{B4h3_{~^1RR#2QvNo9sKbslfmVq^+%s3KA)Ao+}hdVzyP10N*QZDDSk@) z{#JLh2rlor`o6UsVxqC?AI^rWea3?^m)}UAM4MH z)S5EeZ+HC(?Jk60^()_BXlBq)edhb^4!+dpHLm)+<;yeIm%r9PeWZ_~GuT(CKiJ|x zJ->e69=0H*w=)et=LAmVX9VS42t%T4H{N3u3-4c z@r4EB`p0e_HD+`{{~`B{DzMMK`N^Q66NZSukWnLlNX6a{54zo_aPz1kB^c+`Ez}r103?0;e;@E;7GRMB2MrzZ;Gp}5j2xBTV0wDc*rNMJjU4)5!9*xTkbcl2! z9~(2Q2=;FtS1_coAg{Kd>#$+B-uFPk(84-J(b0uBj~Pv~qg5{M`V>3@(*1$-8f2AyQ2jSlzlqMhzMF zP|>KuLH7+AUvScTW0A&5AEq~WZTnL-XqltQQ_>qUY}hI56plGn7mG%V`OyW#2R)1$ zk9_Flog7|ZdHYsL3>(9c7a-&s!JR<%;ev4^@1Hp6nv)OE{eC<5qnXTtlZ{QPn(e?T zt9|PbrD%;D|HFcDV@^qHIliC}OY*-| z`=7e7df1ppPTu_Z5o5*`rYg%GMXvqd z%!BG;=gixugeiVflZzq^8a-xQ!Kv*Qs`5Yg&q}#NN)B2B z?qd~vaC~6Y$os|?j$nZqcGdVXSG7}qs8ZJkScirb4q+j?Z~S<>fKxPjSfH01?RN8^ zysPrA>QIwT#opF^@SPpsm#F>9U|Zw=@BeITLAFa?H@=X~7o9hfZI+C{M}Aqekt5Fx zP-i`PeNy{fDSvpdHb?k){=QbKJl8Lu-1kEBJXKzk_x-?B`367E$vL6?MgRTW6Uqns z`P6rc&K94}h!g5p`hm=Z6UuXMxBN{$p?t4Tr{sk4N`E8hxf9AK-)8B&ctZI>pUx{M zlu!5N^Y#hl>;3*#+-m9JMw&e`BSijJnO2_av!1O?D?dF|neX*!<&D$Iy|i-QwKq?z z)5_E1>H}%z>H7{Xe14@K_0s6)rj_H$*L>Qfl@lt|eA=aztIw%V$Fy=^1A|Fc`?lTvTB}5m6u9Y<(vHNozh~sSPNF1l# zdwl)<*{zA4?wze(-+xth>#2}LM2Rk^nfE)?v+p0%`^jS&PQ2PldN(=2xz4gpDv;%M!EipGa?Vmv?Sp<`iA(6nB}&3$PUbqA<%T?a~ciydHGHbNJuAbL&s1VsOvb z`H6R+o#LAUMQ2ofpPH6$+8^S#UnP>qEAKV(xTBvF-|r+k%wYoz9q2gBDb2jNjXDur+AQqE=XtFd*J))%4>+a$DNJ#$nymUi_1#tV_MtxIgb zNzKuSdE+VG`p$HydILqWw98%BoA*9v>%uFtFa28d+FE+I`}DqN9##EFlE0C3-SJP! z?`5d`kMipTksA3)VEIK89pr;(Cggj!@H8d4pPXEBnarS+)LN+R_}WOBckD=w+{T;a ziE_Kimm9+I^|!QsC+SO~!vax3eh=^>Qkz>NznMmU*HFCm-(F|F?w?+(|Ec~zGoH7n zY%77|Iw~~5S0O1*KJUiMBW2T;7j|?Kqr&Z+#68jq>B0^T%&nLWv|t3Zy1tzcDJNd_ zZm#)O?{axxdN83@4^DTc%u$=Dg7=5R$B$3%$NLjd^jZ55kT;7Wgqc0{aS#a|FkYBVdT6`e?;TGTUUbItL(GFJ)E~y$iI)H%=bRE$3j;{Tg@gFnM_ZVSD<43H# z8L#Q}>WtUaa0vy7`eMC3<94?7{5ZIsxn;)eDoriPD~9?XJ#N_Y^LmTBX1wuZ^mZuo)e$z72u8;xx!CYgOaQ? zxjLqqzlZxu7IBuhRfc+MM0a`8-g?$t{L27K*=zUlU&) z@VVSnsrQ$#VVpO-ZM>O2{&t*C7=Nd9rja@W-PsOw69!s?l^W=-gJyfn_A{R~zFlWw zdyi{~8CVVQQB;{xjK)8b$-!4}6El6@GsAVpb^;GIw#U7u&9&5IPg|spV{*F2=Sr~c zsDPD;#XUM}#G4uRh8shZ8Fmu=wd8&3#QS5N5%Xw-;c7S_64gTH#^o%igVDG1+RSvav^Q;?_(jaqFa6 z3|64D*QD;H4ddm^jpUYw@m`bSm0g3C$@Q7tgJp?kecgCP{G6?)nfI6-naM4UGIFL2 zK60E_olePkPEl3yj$qL{d(wX*i@$n|8p$24Yu2k$jAK=1`{Tu4 zFzK}p&2>60E!tYz@XGDbq@cJu7@E6(ZM+(my0$IL&z>>2JABo4qHR5=cuS_!>C>Wp zQ@+$oZLh;>*FLYEx6K;FoBgE5JnbwVKIt)PTJ@Wu^#y;8mImFjh`RIhubdfhA4>t3l|_e%A; zSE|>&QoZh#>UA%A%?POD|H87BY8ty?l=eU2$uMper&gcNb&8XjiGIhETUv)InF2-I zWu1J9_}1w(EknF}_iV58p1SJ~`Y>f%3(RI6WoSf!l%b)Mc;2+CpulLv|0}XFiaOQu zX4KSb0ZPM|Sp!F!VPk!y=cF0Aso@uGn!%eIe#NF4zp3HZY?>jQ8h+cR8O5pLGMi=~ zr-mzRnz5W3uC!@}b82{{O*5iX!|QFDL7f`*Y?^VM8m_i!hIVTB0BP?dc~`h?i}=P{ zZC2yYVrQH#lc`d8MQO8)PUw2!R_j`5P-nI)|K8pyXuZ__Kf9` zvi+-T^(Cue_tMM;T_Y7;LzUel$#_;nnKs*G(FTQM-2-X!1?`ah>S%|Q%WB8}Q!3$2 z#*NLMkr^&tA9PBeNzH*~)+#jet-@@tQ#B$=$$MTy3C>CM%5;i%PQHKoJaXhEl1>MT zc70tQiJwfSLfZSb{ygf>`-b{8b}Or1MplEm(%xD%8gH;}Ja%$Jhoy|O*?Lb%qhuZROlKv&N%%#FrmmxJ?~nY6}UXBUR_4EeBFp<`QoZt zc!0RE!A>JGG~-n3M|=~apon8(r_19ZYH{UQjazSVLPPlb;l0v!1|D(V}s`1-@oWydYU`5 z%)~Is%pTeQTG%s~HMMJ}@}e&tT#c^4DL#~2RN*F`m`uC!S@j)|T;mIp8|x>UJ(2PQqTsk%S{Z!ZNyccj+3MnK;FI+pQMcx z3gO=PgLDIrua4MT@HWxcuNYY4j_&lH(iq$5c#0|AZNdlilXy(A*X1zd#y2YbBFM3E z!hWy0Dg8rA>9@V+#9X-KUxNfXXj)|P%qvTO0SV9|e#2u#-x(XXyEwNs~l3^g>(d(YLr zmmq2VN6W{fwU5Ttl8oaBn>SlQ#KfsPea2#@zIoQI_SNfZORuPy$y8TonkbGy$#hkBD-?=MFccBrPVXP)J;UE0 zNwR;SD0`>(Y;F0Iwe$>UdVH+{6w_tPr&nO=n^(18RqN0C{Pn1PetjCfl-#~FtEqYY zd{fhKlvDds{o2w)rfpMqr;^$MbmO~e$a_I#m`_q;R$6~PDYnzJNDD^1eO3Or+WB+L z26y|6H>G%7vqpDp<;pQGlaXAViBiHdCdz2T3vt^z%_xqX0^Z#z_9%KOJXuAKDI%Js zyKB8eX0rxWX20UNw~ZX3mnKTcR8t)VVk>y?zLeXaEg3a*y#sW!czdv@+B9l(Su^8( ztIEf{=W1y#rmC;MNNk<=e(kF;mzoA^-*0Lf+U(tHUR8&@pHtP>hjrexn)l*rO3(Ae zpqlja_odgrqozJW&;g9q>i4Y*Zua)EaQYp)Oz+hMc|Nh`weQaiPd4wHE)F$(9{1if z?;Ejgfse1f|1pJDM83|h^B(6F)4t!t>w3@X`#SIC+WM*f-A;YK>Q8v;Cnu4#1F0R8 zt5in;3U;^@eWatd5sY(_g!dUC|@k{8TF(GTcEL$RS$e0(s zL;J)(VF)s*JH`@Z`WV0X1(sL)MP{c+#w31NOET45_T+L>!c6US)e(6MU*yNl?ikH` zXCcdIVw#51ES|`_J6msu=wUSRrtR=fZ>#CBHKaSe7|fcToc&%C^#s2K<0xzDCSDaK z$-j+yWCipO-nLUyQ~D z>I?F-0kRWkKaYY4{5;BQJ%|^dP?+WvZ$99(J?O-zDHpHYznl#kr|lA=swqh^o;{;1 zS(UNJc{$-6Pjo}BcLvdv(<<2hagrMvv7^&F(LNL}@5*+rW{Edyx4KBGyR>6}H1VIT z$&mK?QyyO_#oJk2U9WieF(}hY4cU6%LbJt5Xa>@0W)$x}TD<3oC7V5CO%3IY;;q4G z@x07$Y2Yn_%5P%&1Sqy~HqMc51%S~QC(}s=KPnaf%vuE^ZEuT(X$97FMo-a9LT&P&)H7w~K&i4MW&7T`Py%|QCn?Kz^ zR)yKI(;KEf#aH`g>{^WoGeo{Knv24#PPGxiI(ZsJhQL!{9S#^FEOp{pVek890%UQX zv^pg?Y_8YP5MO5u%2(b&LtN{;=G<1lDUxH&R8}c9z2!9zXx^JsCpW{-!Jn+NnUI^k z7b*1@)sIb!sWyIM9vGi$5dnn`q@Lk5zwTe_)V;g5?u(}GySU!R3%kff|BEn^1EIoj z_PfYelc#&z-JL26&w!GiOP`_|IY~c$NY*n8a%}f_0uk#+Jbkc%r)x57Ka&bJPp309*5X76pH*yQ?#?RvB}YrSbB3WHGl&(Y*ABePdfVxo_`ZHz@Zr=4CS zHf;Qp2qMCr-azU`6Hn`ReWns~#|`Ir8?<1>SL^kLdeSl+#U3;=mApBqo}HhL`ptcc z6kjYnBAQ`{{v^N?L)7q<>pf*w!Df*9U^4evGnx(uspDH}&(z<#4y|g#Z zv_-xAH*)+pe&xTxg&+}ZllBI8T~ySZH?SqK#J>VLD%t66fG3bq-r(FxMaAZgLY?#( zW43}S>NQ6{dOR%c4IK_qbeq)!4MGd4nR)Gb)zi(09iH(IO0O{8;MA2OnXU89TNqL^ zDi8UsaC=P8aX2}Z3V2^(C)NA&eY!im|6*&>2c^CLB0o1#TOax_Tu}4E8~zKaG0E^> z;D<+g;Tiu0b21F)@ODL5>qI-X|1dA>N(iSOh1@KO?~EqyeUyW1?{_GHPGJ8@EuEus zw*(I&x&Ab^?=v;qumI*e8rS*C69=U4+7ss6jjz}Bowjr9YpNx^jCeAdm}-onnLfB; zThDbnZ8gknE*$Q#aC_S@zHX;Ig^w6xywjU$nhz={jMbtk#;G&gw2tESf0pL%gsPCRR;rZvuz&^TTp{=r?7si?bT%f6)9 zWpD0kvt)O-_9z-R@p3FNOLhbcH)Y#EW^5hv<50jJGySh-noIE2*aYoNjn(>G&5JU; zVS7G1s$aJ!9U87+-&_8Pq3?%}iOE)3M4?cJGYDS!2*MGuIvOMPp%~3Xi3w zw|UHnObX{m_1NX8imk@6lq%C#nQA{X*gRn+(vw{C1hc11JCiXiy6B4D@vF5k@A%bn zqKSkgp;tQk`-z&IBaA1$vs?D8W*za+f?l=;mm^t@)+U8HV%_0w*Ajyc=9_qEv-dvx z5~;i_!J~fOyupFy7Vo#|C4*IRmnrEV=CN5ce{E>?=Sc0 zbN%-@%q!%K4@cxqy^&~MeI!VqjF-~G&_IL5IjaqJeXh6Ov&=G!xJ86*^p2&MXl8md zO$m#Wh{M5t?~vbIj!N3;)qL+uzs%I)0Ta(#?w3);+}N5FcDz}XMT?K$kv;8sZK0vN z(zCj{D^*O_-P2d1yS4iZ+%bDc=dO|EY5Hoenz1v#^>in3=C6+i+5VnV+WsfOW%1o@ zm$Y5bwWPf{vE+=-T#Z?s=*^*z*;mp1k^ogByF&{iyVFl>zmN*(%2=j7z^KpC-f_0~ zqffb2P|J_?efL^7Vam3SD4UyjCY+0Irtvp5tNFIS^(u>Y#fbkWb$u^Hod_3w@|le7|3N08dk9-}E`Ola zkI&&U(YC|rz|e})&evuC>-yl#r^J}qT`$k<+G%#tN~ickcGYLIt9~?8^ezMPJQX-? zOnQwnIIbgub<`IH!KvYwNZETLrOh(q_8^!O2Ju?7?Sl|sJtF@VKlD_EeG)BS<}1)W z?%#jpX3}F!z7BgWU)@UEx61xkUhqFpsnFd-P+uv31YqS|F1;W1Twu z*g#Fxi+cc57+^%qt*o6+Vv5>!5FTbbh9mW_t%eGR9ou z)nM539@|dpzCljWPXFW#+x~yxFCYHk;L=lY@CY3Ea!$2hs&1wZT=I*mInm44iC((i ze$#ng?fXxZnD?!is=3o)_dV^N)z+MpT_#fIq^~f$^c*hX?TVE7^~`!a6RpgH7?RSvA|ca`_4K$@Fh8{rg1MwCr6GVz@f}DxqO^52i>< zb#i4k2RHc(iXPTZW3?vBVq5!QnOf^!=X^bJXFVs;Gt*7 zOL~7*>QuAEWYYUhx`9a_Adv36w`U^jS;z;vQr0~(zohqpQfKe7DpTVi$z)ZgNgp!l zh9-TOHwPZ*mB>23=B>k}&VgiAJ@fd8N!K^&qtQ-WIU7L$ojs%H1P(C!N9Gr=Z&=c+ zFPC%hP)+(alWt(rw`b27Fe$mQUh&3;kxD0isHE5JoKi3I%)@>plO75Mf&Tt{idWQQ z1Su*zm=Wk5j3>(ua>J=Sm}tLP{c2lX!mb*_`Dx(R{>&Kc7CUTTmsc|@uhu2Kk&&0I zf_>6PM%$3l$G(hOr)0Fv^p+|Ji8O*CeMo|-_61|BA~&Ru40$ZMIeWV%H)J9=`xv|lLgewqS7EjaaSq_-+?YhGP@!!=8WJEMCdGa=dN)}^wMWJy*- zOaUFptY>7D(Y1Jemdf3R!O|?s5Jt^w)btBXg?grfA>`9F6;hPlhW0`LC*s-Deq_4d zZBp_2j6~Kn%R^7)A{8aQCS}j)J<0FthAhAE(b8^KeKHcw%nK=X%7|2!^!l;wpYAMW zSI8*a^(nRgk#dk$(voh`aueqnd!vWF-%1L-au~vL6V*=*PgcRdRgm}*Zm7LmO+$n+kv1?^PuqH5e@3p%Rc6&2i~g5pDuso-r2vY+_Z zjbgD9D=^t9UO$-_sKI0&s6rXv{<>7kJ#x7ZXIO%bqBzvYWrN^2%FfUpn{Gk);%&BR z_~h>9YMX&a+;MM_&A}ko`|SOU#E;;PJAU44_F2Aix!xab&QWfl_l(W;Rj!Tqgv||9 zuA?`><_0U5?>%61Bb0Nzdu(p3awEJxHa8&}pX0>okk>^8rO%t)_~<(q{XzE; zbG;Ltvx)g)@oG1;Hkzzx>9Vyb3+*JbuJ`HeU+!G=j2Rs_^r<1U&IvAX;=2?qCg-+_ zMyhS>C-|`wS>i;F5qG)#Y9_g~u@ht!*4HT~EW_BW5Hw;`eK3lQSR~X|1=g_<%HI$T6`I=w}cZU7>g`}ZZuR){d390;=7{} zPV$yHp=E9)DK0cnZg9R6JlH4E3`Ul@@lPSZI4x~r+J~HJ-!07wM}w=J_hzn%PkDLG zn^kUbp_|BRO?@}M1{U1V3MaJFiLiCF&<(=Kd?&fEC2zNKBMZrqI@hFjMa4-)G`=eu zS{4me#3B_j(!qn#;L50Eu+WV!y$t0;i_yAPPH=5BV)Ds(3=5io`ta5~CxTzQ z&kgQ(g7Cjbtw*J{N--86G6H*|v(BxoKV#QfyY6x`*k*GgTV1wmL;GUEJ#KJKG`=Ug ze?=^`A{yBm4X)A2Al-L^DE{XrGrN@&@l)rT)Iv8h+cdJ!4b6^*jzuFhxFQ|9a{1n||(Oj0(4iMrdeHG`J-eT%nQPzdRON;E0FSvEb?$ zj|>kC>Kc=o%~<#;#)8!BsKoWT5~0IxXrCLQv4v{Z2$x0r|16GaHUK01JP!Ud|j?>_jko zrWgEHBeWW+#+ZqbWmrIDh<$4u#Ux-RtZj6;RS?0vR#_5Brs4wUosIuF{_*Tbv+i(U zB0n0e=$*)_rx*4w#|G-KJ+vhnI)Lr#ooE(>F$eKue{BlRNUd^ZpxD8n{R%fk5769- zXs|jO#CEP0BZdp5niv*VLB%#dCjO({h4;=Hr{*0exYRBW>o7!kwc0et9Ln6X#`x9% z>Chw1em4X+^IAoND_9N8^La2t${2B3gQfh61$}UW7&o7fPizW4G?t7beC&2+{ri_^ z-*D{TYbQI{gb{9NRW$gO8{8dZwD*4!4ef}cDq3U?Y1N1Y8DMxI!#GIZO3^gqgQl?O zMk5Q{P`ML2#u%AFLj=JkyhX+hvbrr`Ca%>g5?>YzeNrQi4o&7={Ferw{nia`&;SQH zyZ+eO|FIML+>Pwh%4gTDXt0VOGu5n6ehO1aW910TG(_`oU3n~J$7pg#L$WwwhBIu-lBS{{yUq`Nk~GqhmeG?`*@grTpt zEsKzgCYLsaMQasbiy);0xnC!h6Af-pqt?c!*0(OTw4vcN$)`5( z>(p9AgP*5S>*!OX-IEM-VYshPt&JE&BkIbJ=V|oveR{NfQhK>Qy^d~%6yij3EY~$^ zX~3VKLoB5l-Zfh1BpDi*JUS-g4v2 z-dOyL)J*xJW|kC&2m9QO5O-+6iPQ$CahDp5vBX}MmJ(*h?J%kB(&t!k8{E4Y!zB2{ zO;dC8LuRpNsdRK~G`Ss>R#i7XOvg{#qRSSELlVhF%wU_^2EA==aD9wl!)6el@eF!j zkc}qiK+mSOK}xku@a4wKoMZ)_=Ny}2-RJc=)W`o^ADy|p_paY3NB;J|QMe3#mU4Qi zyfS8-9ye6!MpjyH5!Z8lG`=_Hdylj6xQrheS|r!UAwE!kPZW12v?3PS6bs_E&SrDc z=fI}K0YB%h;b7Cnw|0D@*Q}jUnh$=2rxpzo7#@hSVC=7SS;Qliah?KeMus&ww}lGzey zyJ(Qp(gU&JA|eTg7-v5VVlqltLkLFP7z=J-Bb1>f9}9g3jaIly(TMV7SZlcPC3zXC zt#^Z7R1pAi38BjT#<4hbmBQ(S5K^0ipSvMCv*7u^KeFf#SvX(ea4a|{8d_{vm~VXD zgRw}JX_!GS zRnhm0>Ro=KbjeC4SE{=ZX?El0e;3m`#;!<&WY$;51&cmxVIsC4OpJEK)dY&60Z00B9_+|5A zp>5F!?Hrd-gL`eegeb>j-0LAGm7{H(xZ`r~-}~rr*Y`jB>udKY7HDDj6=?Ig!cjsI zIZiaS-whon;&s&QW=BE7mtsn;xI@sv+83YeMm|;C5yB_m5+jz0m&Zbz_?&_RmfR(Jwx1vgpOkbm8k!#q9g9VlXtW7Lv{|6wpu81s zK4tiw(hH7`8Npb51Hpkp1l%I_4A}9IugX3N>R=qhCL9OF5Ggi&pmDZ$B0~*CELd&x zvEV#29EYL^0@iV_P4JD>agFh>@%(IL5(^$NUhirn0NnTGj7l`lj=)ah1G9^=k>G&g zW2=Ohh{_0!Fv)FvR@ZZKe zXU%);t`8i1!olpTNOcsMu~S53Rz*n0(Yi!n)T2Dr`6&7zFP#6Q73@irGcC{>wxcvY znpt@J+B~D*!KI?7K;#pJO{rL<{OMP^>^WooT*V?CqjCyAVKTbGEm$^1mLdLd%v>hj z;-~@w6Oz=4M@$UB^qA{J<`RO$@cD^FELV!f{6K^qEQZ)bWHy?!5!uCmIsF&Ce)gA- z6_JgN1{V>bxWSFs9>T5Y{*SQ1(Fk+;BR>?0X??PBi8jmp6x@;WLlGrvLlJfbh(H{VB2u$Gqevtc+=NXx z!3TEQ80jOX9;ca z-q_KNPaod4Z8KRUJS?^sX1hTa#f`EjW`9PzC0gptD&+>zaF$&yNl3v@(L5<85X%6o zRCP2&B*)_FnI(Fqv#bKj=Dn4$(8$y#bb=qc^2W`blWHC}HN284%b=pm#wyR+ zQM-X#Ix6hgS6CQ$Vu&qcmq*oV=+S}0F{Zu(1tKJ+=Hb97rHBYGt%1udl^nKm*}H`s z_oB<#v#V&tW{@o=plTXTZf$5O`;n0wT&ab8u!+HlmGJ!F0_Nz@nrNbBhFjV)EJl_| z3@P@sfLEIs%_Cx3(iExmNnC;VXJfUw8Cn}V#!{ghtX0E<%XWNDpG13{T|!1e^^`w} z!@dxojd*H%Lv0tqJG@FVENVtnjiPK8a{|hw8tV&@tdXCki`zffjkkwYB*4x91F~#M zG(wp_Ce>C~YZVf>xA6Z3{A7+S*8jP`+yV=~t;X|4yJic307ONPBP)`+^cBTP?h4y?mwi&L_x z#?^H~#bYOONvY4K&ta~{ig8+N-Lm4d z?#n?kM=89_G&Jt9wvZ!!>RbXR!WQE!6IT#@EHgXZdssKss);rkJXz^&pTbv-B#&gVc*eTsv%L;qFUfBfkMZU*a1a4ih9 zv#yfXT)9*PFvJC<*$G@?8Q`d9pULZZ(NEDdJ{GaVm-3ca--Ah_5=IOM$uhvyL+-wl zfbe_~yz=@Zo!>kC0fGgNv4~XQfC(@k50P<_-^2zre=H=6Mo6_uo;J_ld^b9=9==9f^8nkk5g$nbyW+2nc0)l2TPY4$qAG{C#C=Q5}{%@8*lP}#EoL~mzJ7dn7_1dMVHj?Rcxx|alL5tgWesZq& z^JU_IbF?qv;2LWP7&vPPbf_88DWDO|5$zdhX9sPuCehl0XlRa&tYi*s@PtuN=p#cQ z4rCQ14a0%InS=~-{#tNHxWTVKi3Rs6tVx8=ux3E#uEq?o#;`?a8sBRhW*W1RM8@Az zQc6}>+f3RyVY7-&sUyY`C{Qw%KrF~C)UY7iZHM8q+#lRItwZCz5<F56;a(a8&dwFADd>}`YVeqLVC%HZUYuKbH zOAFiq8#xih7)3rH2ZMRRL6N7GmCz!%UH)ypPS^XM%w-$wdoP3w1Y3xdIA;{Gfg%fH zGmck19r_bcIFS!~C%ShDC{l`V=Ne8lmxh=(~uTF z>YL?HEE^tu_Ecq{x;t1GR}?I1;7H9TKK|S>PsJ)?vOyV!Z!+ZlEcyc;}V2pG@TCv0}`zlmrY$u{@YoN|N4 zgHO|QPCNYGH_~}3O{n*_R}vVJk^qFQn_eq{ zSmKHd9qgE(xTD5WaStkoQgOCwLR_h_hDDLA{%ReY1(41W>49HoI;WQDn6~lJ`Rxrg z(zuqqZFdp)t)jn+P)8v|(h+fDk&_P^?L&zlu%M2LXbi_D_VV=$4N6Jj8c}QKWMqa7 zMK%}a`f9Mx9O9atj#~K+7G=ahAVXDixQzbFA{j}nQ9oJ`@%yr1?|^EYU)%i?vY~u~lJfqvx2#i!2C+)^uaOK);SS)Ug z1fBOPDwn@~cJd{zz7R>UO6o5M62Y+bZhRe->nNKXY>d$v&haZ0XN8X1lNz0r;LPbV z+hVq$@zcv%!G_AX7nS^wZGPez^o%=Jl{lJQ_H+3 z9nj#R5J=%Hu%@bQo$#LZp|N9eBk_j12Q*3E{7C^B5PVlb&nOoYa@vn&*5#J9}lgbYg%#ahFS95&-%?}RTjKQH#+(0E03zi!m?6p>z#&NG!|DoB0imAFJ*#^3B#q?Okbx6+6Z%MSw zl@HIUh>n1XvU^boP-y2K7rVPeru`GeHLO9*jSQz0MsgE^=r#9P|Z+$Y5f&BI8_B&9C*a(JeDtDowmTfrXJzqz)H zLUHYwDX61u9i2m!A@#3~R=xlD+O4DdzNGc7BXUqomS`E#VH`L0n0ayT>oLXVasY0w zXPUhq*%`%xx|S)c;Fw@gCV>xhx<{)M1{xSB9Y_An(}FT_DsP>AzFUVf?G*U-P&6dEpJrMI=ZEu!?z}`xW)uq3JkdK41H~h z?=eSgID!5#*iyQ;Rd*KSN6oE0du@>uC8;gPlyXb8SD$~1tA>zai$kw4BXkQ5_Uc{< zgd!Kubc@muM8CKX8Q&haxTu!(+R_E4Y8=oA?`)wSWRvDCmTqrz|o=W{|ZI-nhU0P^o@Nh8?NCrH!j z_YA**Lk`LmbMrqG;>6o`+<2=Q=6s*zsXNV?7IDZo-(F-LH56lUw)wanIPVr<;68!go(w3kcfqL zaHB33*&Chpi}1YsAHBLD&x9Cz%}zJ_EQ@@9+=Lp~cfCKe)zVn3ypL6QT_<`wcOChRJW@GS2Ebv z;sJ)A>WjFdl#G7x&od9s%6#sovy6+qNZVuYPgpj3$78oX${@};hb!TxuUrlxPF<*9 z^KiJqRvHU0ZVJAUb^`FnB%0Ye`OtB^_LR6I18a-qqKUN-=Z2D*ZqyMj=^zLZe1v<{ z*z!^)_Y?1ol?^Asgs|_9IcMD8hF3Q*%MTs|m$UeVihfpf0RKcseELNJ8$|r1gllm6 z1&wx-34II`__4&t{Fa1OSzNB*gbF^mHD_L;V{EVJ*E3tS*J2N)^rAWN#XG`Wqd5Ff zh?Beb{`_3UA3uC_=;>M&SOYNUv_r_WBNp143YjR>OT?H1mMW z*FY_{=6Hj*(NH#T?2n1eHZwfP|2#l9*q@L_T8V9v@1dK63Rjp;d$|#XD`NMZab*}Y+z%^$ zUV{Pkdxep`MAb_DHiDn}Xy!%x9$AC!;XE#eQKT=@LeK2UEM!bE6lS{((O_aYi#20t zTqIlxF4W?S>F^beUBQ`w%!3ZojgXF*A!9C>;h$|>DK_fMr2G0wNaz4X{JbDKYv!!^ z%}<~8`_A^~M_QZPSrb8AWp4c7$LL38TnqG_8YU;LuF(wF*lRA3XVNeOV1!LPD(=xd z&?F#!NL{kEXF-H}W#@>~+^DW+v8!@`=}Q=>tHZ%v(OK@9InU1-e!L{yZ?}VmS3D9!hagB@@kTJK#@l1$s<= zoqrRlGB#v14gC~NXw!~xF=j+TLoxp(oWU}4&R7hYc-v{&cY6{(quY%OXMOac%Sbb! z(YhPVh%S}ZrB0uSKFrwxr(A>`EED{rVGN#(G~4^yxYGGEK@|71>`w{jX%n2C8V4mX zaf#U_lnq)41E{f-4+e0>FZ*%9k>}e!c9&f77DNxIr}@zt*Y9TA8Sm&!tQ6x%bIEhqbbVh9Uew{Zz5a zoIW!WOcHhRsLji7mQ`VI1y^O2@gN)56-)K1DE~{5IM+r?79+$zyIDj1AFy;>fG+5IJPnlHY<`2A|&ZXw1_uFP?Y8< z(-+JOeUqIa#a&D`RAiaiWsu5YDojcp;UQ@hZK0!!n&zjGrWd>QdsA6PPHp%J9ut7w zXyq}8i*;~bJSse4vJ>PVqT-jg=C`7iPN^H7p!Fx?`S4^)ogn`>llh)$en9Q=cXgg4 z&n>d^T;6<UWPQFeHyU!^-YJY{}-?#AuUsQLYVQomb&$^3@?F^6AQSE?F6rk(Ck-wjY@hpu&JDqp7m|2MM=+0o60TenPF3HN_y6_sdgnjgGxcn{Lnk;bu1By zl0MmN4?_8$Ih|ei= zR`Qkh_q=NVbe8!`EH!^?S&m%%=(`V?m$nTIp71x7918e*bn#94OF;Yu7yi6iJ?gDZ*)a06z>s*sNlUx^@>ze#;26XFp zFL^0^6Gc3^kuUFeGxg8c@ON1dL`K>_%#>eb{yn^z>`j|Lhtt{o4YTZN?-}vVk$CNx z%r?@P%juDN%-<6<{VYE4i0vib>H2pk>m~iH!SwPia<#pb?%14q`A$tvy?nPOr(UXQ zTT{K9S(8&Q-?zE+UVcCkPiFDuz4ab{Mz%SBXu~@iPyb_Zrr!;-XWSXgYYc(G%|_P@&=q{ z5~t@4INc=9$Qy8mNo3^>$TEpD^9G!05{>f)G&YIsyaCxJk&`zd$0W|m8*r9MG|3y# z#3Y*L4QOf-&GH5`Gl}MT1Dcz}*?9xbHi>ic2ApFOE%FAmFo|>X2ApdW=j9DJ&m_*z z8*si!w9FgO(j+d(8*qV1w8|UM$|PFn4QQ=IS+N(?zij3OXCq7V@AMXb?Vsi4iTzBx zXyQfJ4;@i3^udRRj2byCF!7>5w;{uF`;8nvdPrdr|Cqd~pnsIyX4@#=Hc&7uuS17O zr>m%2Q?`t`W^_8$Fj>2X)LyZZO&7wCIOH#d57O}1aOS0Dc5)9pQKO8Cdb z4XMD`qWeaT9Qt6v#DH20j2Tgj%b0IuFcgVPc+-veo)!gxg<0$8! zI_q_7}YZP%&pQ$C@(|JHZ^#l-$jKm4KCfd1Ok#o%c$7+eVkfcfBc@E4%x z&w{}~f1B(IFcRDYJ_DbCqd@=Q#76KEC-gKIzw@F=(eWP&bWl%M_{-!A|cbOf&g z$w4yy6F300mi`m;1ZuZ2&^X)%^v*Cpt$*LU6ZjR-^VZ;xK(svYeLsHz-{qh!xCopE z_5h8;U~nPO_-VYp=xr4Q>Sdqr#>Sd-)b$ zQ6RngIZ%7O!Sg`<)I|RX{0Hm<6M)(t0cL}GKyu6gVW9fwfNkJx@B-Kmeh)4IgMr3I zTCYOaN2B(_jyH65J0?17pBHz-TZLNLRlA^MO7MzzPro;#oQ@ zosup+1nvT_``?9pUj#k?o53|80W?q4*3ZE6APgiw>DlFABaj~M0Fss3zZ9r#$>t>> zx{{&FC5t4G99{*QODln%iGD-yQ*az;zB~XFOG*yvr~0dQt^(QMIq(7)2WErEz~eyk z=9fM+_a(yzK?(4HWIhi(0#vUt_yf?m+z!?Q@ul)1K(;_Kz7lAjiC4)-@2&^xZ$7ve z`~*nOgMj!``@4YNmtJiLw}S3K^4|<3(=$Q2pKikUa-hDg1B<~gKrWCDECm{)1>hkN z2k!v&rvj+%`JfLN3@!m`^T*&<;46?1J_jm40@QEG`5>4MA|L~N2_6LlKqqhpxWLa# z*0M!<=RD92oC8Jz>6!S{JIbp+l9R?yZL0iApm#fheDD--fn-u2NDi7i;;R%K0A*ks zP#@O<>5#^D3Ah4m02()qWqTkwf7hSjcg%psOmnd_(0s}UlG|-Sx+(d70#*Udxuc*b z&|G;NyaFZyjl1+)V;g0?{8qqcIvFmMQH94`Tb1T?bkp$@i!no#sKm11lSA4fi^%NwIklP0L@v^>;lB|=U^$Q2C}_T@JDbj z$On>#Y?Ww8R>~KHC&61l`nMdM2R;C5qaFx@MnLmj^ILlNZ!in|4qOK`zr^n?Ky`Nj z>G+G_C5o7Z^as;{^dkut0`>Jv@Bxrbx&ZV6 z+d)I{V<24?zh8k-K)ggiE@%cCgIB;Ra48V&-+(UQd>~n^}n1 z%|l=ZI0!V3qB9;$1(o0ppfO76#8sp{@C#51^ce>B1L?bTun?>V(k1Dd17bduo&nAU ze*pPFy1oi%4t52TfckYEP@OBmXF&Z@|3-kzL2qz3_!vkhr7J%J(%TK-GH?r+1A2j7 zU?%t>P`?&}3Lt*e=HG$lM13H6?E#X@?LhY8Ebt)c0+K-btvYvso58a{?`w|#5=;d9 zz+Rwdn!{~D1MoZ$ziO)mSPdjQ$ye`bu8jnu{R0pLTfuptIS>uWR=ht6I)g3XYS13! zf@WYg_#5~H$mU!DZUpZG@skTS0*#Mkbpud4vPZpv-v0-XZPT;CKr$Q%G`?j(eY?!Z z6uy50eh4HRjhT3z1vK`0Hw#EcJ;8eL0Qd+<{+i#bz#K_74} z(0dKRAA#D+1-Aj|o8%$c-3LTZcJndtXP`bF1mbfV7zIS@381;4vHt>m3RF*Yeg;~C z`ry0%1ixbjw$Q%xc`eX+7iacP`)00*#k+PIE}@`~>_SwDP|-H|7E9coER} zzYKN(jkEOZ6(BiZ0bT{t!@mOcP5qt^)Sn2*0cV51fet|PJ~gM_B&|6l-Twht4tj%o z!Eb=(Q$HXXNVlbT;yD4X0n*vyKy7~vH1Fa-^XV(_5*P;bOnj^d7XaCeouE5-AFKw& z;O{{FSO2Pkbh;rp2qd?gf#h)$&|J_wl>9CP8r$(e?Wv6@XbLo*>f1Ij5~zM9=mvHG z)zPy}U^^H9egr-M;_WFQ8jk{%{|?AL=pD&Ive)x5pb-!+`@kR&1I@usa0K)LcY+LX zEs(6#PwAEP}&s%@={j;%RUVm=3-No&n>)li(Jx8FU0Qf#h2b zUIWv>2B2|12Cf7ehpAv8$OR7ql}iR^f#<=eU=~;bZUpL|o{s=(Lu00~63^uKyP^X zU4H7(_;HX2E(iC3Ux5BVWgd_YDt{g*1=oPVpa84@!@)}+2{iX5W98d}*5J3`FW|3W zFIWX`1=8W3AOWOnqN{d9L(fKlb>KblA&@?o1I_QwKze)~P`jF2mx5kEx})*c^8?^8 z5Uosb5m*Mcf_$JpybPqbQ^Au!{n5PrJy6~5K<`U`)Ta&L1@I>zeiws=U;&s2)Mu4n z4VnV!pysmXJ7G_Tb!)fcZzz<_0T?9S?3xVW!15h2wTw|31#LGXx<)A%? z0P&`e>WIh3K{GH1tOw77YOo310mQG~8w}L`=RouROQ7~I1l@pSsgK(KH+TRH1sj3- zdKb_bod@0m`t%2F!F(XyNXby+Cq0tx3<4VaOM!H;5{v_C<18TF^-T8o8Q_2^;K!g4 ztOR*LGE@6{rwtek#NV|*&!wMl0_lOq;6iVl?}GQiA+QvT29lfJc?rA$>Vc<$^g!eEU4Me#F$3*sU*o$QNGAP3 zFQ74M2=ak!LN1Vedjsk3r{E@_x+>QgUIdzf*MR1h2UPbJ@Mo|S+y*p8V}Nw|2SECN zHh36lOdEhLU?SKDG>2w`+ky0N2v`O-0L{a(Kyyd)Qgoz8vL%y%=I9Y1KBOz6p=YA2 zy6XQg!EeA<;6tz;s2>?13>pI$s4dw<)m5A7uih2UyTDc8cR;-6f!_kj^JOp|tOX_D z-{3|d+GO$3N{(GgFQ zgT`<&=neh>VnEM70Fq}E$QCsK(#2swavKb$1M#OZ=?)$N?*Pf;S|Hh#gT7!N5FhJ+ z`s@OYspK;PXnfSK*5F2P9K^xT!HYmNzw3{5^t=2#O5?YJ=YjOD3~1amuG_#tAbD;D zuYi!Ab4+eo{K>DXKej7->ErH}O{oMkjKX(F+mB#5ZpfS4zyaxJ!(cmm_1Go*m z4;}zV!GFQUKsu*3RqrP798i0z-wezJdfpkF0bT+7fpkXm_hum7`WcW-QoX0a3*Z_c zzFGs-Q(n)cOUHrAy8ylSAou|2*&-l0HU*eHu#JhNt zE_?<=zaLl+B$M9)$xvr|k63841MzzaU4=a+-0z`H+c{f2E&{`A|cGjeckH}d{xO&f?@b2R zHD>Q!1nV^%(a;vbemV2Od|99Tu!k35Ppz;0vyQ*u>&#u|b9C6}OXv=JTNA|)51}}K zBxnO?Cp%uFH7212ULY!*iO5)hOt7EZ@ICB#M%cIYxCvjLHTVA!tKqyF({b3_Qy7Jg zScB8Z1#3MP{uT=I)Wy8GyjIWX39SOxdI1!v9~uZLGCi8yd3o1!ir zpgx?#>M)M3FyF9n7I)(>n8y$JjIy{6?+HO`xRyE3Mq`|ZdCx;(gdjG|HxI6&7#^Vm z-XRe@gXU$=H^F=CxqG{g_w2%9{0nRN4BqFNwD#7j3Ov6vVZB_>KFz^Wyg+!IgL_PX zYvhKtY6SQ6_sFn6JMbrZU9eurRP`oVjRD=Msq`(K042Jf}MU-!Gr z=T>L~pC$Hh2KHk-w!%E^$v(J77rceFc@JY=57(ZECiny9SO&i#FOFdgzJq<)1NUr! z$*@NG5d}5jY}u<`u)aRW62oVj>)wZXJ2z=yuGZ=>?!fpzq7JfSK89fs{)1~5hp`3w z6(-05bZ3A2R{{yK4fo)g+l!rOkBnFxFr9bn=PY(YO8ks}VLr}=bzXywD2;N6iiL15 zdzb+c;M`V*v(^Nz<$3b|p!4fzd*xY34$s4K*xOR@dC(fhe-iHHd~Lx#nda#yAR@;aY{^%=SijtbjGQPWIyztm_dB$89*@iI5t`;x(c81pCn$3lKfP4 zpa{}q61>KJe#b*J#7%rb7kq}Zn-(*X6hGh?tY^q{WibzuwM4gecgWo4q-c% zVk}(896z8l`rvW9#?&4dtgZbK@ya@IyOppT@$Nq0n z1sCxIy5GX-92j;SyP*0l&e!M)w@AH+v-WPv&7MOeH+fB4()n2!)t zLKqalaD<2V*uVXViG%nL=kXrq?7S4lNB9h}CVSxwmxRxgE%3Lku(k_eE(tIJ#@GQ@ z;PtU!Eh59&aPNM|0cS29&R`kr%|6sdBg8>F#6~hW3yH7^*3z{l&g?)=m}7rj!fI5)06448x4%WfGn_&g zJi&6jL>AaTzyErj`&*~kxP`;;TsFZ!2*D~;gLMmwyI28Z%Y`#|3g^QZmf`>^U{Bn~LI(TI?m{>R z#_aX;;o9f04BK!7`_K^9#B&$ymvs#0%Xv7BVz5v4!B;_8=ggP^Yt7_dR^&K~Y#+_w(HOtab+G-~guJ2ed^!JjY`+ zg|QoxJ@;%nUq4|aT)RAO!)J&48(T&UKomsA3fS`*s0sV`4qksAyU~SAL6n^I#{vICIvJ}3BF^-0D z-3+`NdlHy?7+6PZupaxc7x!Q-U9$|zqAr%>B9fyx-l8$Axj7h{waW?j?T+JUgoto{ zF2I^uBYPd}mo*6H>lZdZjg@c)tc(9sj^QYNgSAWud;KR)Asy_~0u)4i*oT4W2Yc_c z)iqkfd2wyuTVNZSpeNcRBV5;6vgcng7vAGO@!&lCi+_+A`QV(5!xDsGDJmfYUc&o& zz_ni?2aG8c;c*MGP#OjB9%Jzl*|84y(E*+ZYx4my@F%>-XUA$7b6B{aGwA<=2{5;C zxQfZhkG6=3y08u{a1GO8EY^KC^1?Y@jwxskpCJuV1&@&$p3%FgjdBr^YevKW1?F%8mvIi>R|_-I4(7WJXAz2_XohLngtBM}=U^Vp%e4|94cyzh zkAOLBL6N}ct-L!&;}HRSu@lDn3+zh_EW-(y<0CwQduM_1y+v`wHk^W@L94G?P2}wb2sERr;c6+l3PtgT^;F=F{6h1qxwJ}9O3s^hf*PsY8z-NN} zNettj1@qg0YB1IU=mdW^A7f4gYvkEBShxEyj-O#q%r!M;A_i_DD+F_Ta!0Vj9qSy-KaBi%jwR{X~vCPH9; zoPh_hmXGiaM&KAq!Fg+lHb{oAGw7MHPtLjh^K(VmQ`fqN~{EG}a|?Cnx?z;NWpZ>WkPaIGmY*H!R7bM`#C{#zJd zL@dH$%*Rle&lwznza@nCpG0i9eqUaDJ&-^$X#x}G< zX^cW*T!**7xs4^ zN?;P^BRu^69Hzq8UoehhXo0Zk4QI6`jOQ1`KnA$B`*_VMl*D$_f^~O>`{G34^D*9y z&3e1{5KE+W3cb`^&15H6(-05RAv89uy_0L0ru_=?gsuI$-DDp51kL| zJrfr(3l;DN{a|0R;wYxW{&}u?q6h4cv%eVbJsF;lqVP;Q)6V&4tcSDdefFyfP9iRH zBLW7)_?*`tP#DiK2;Nf><8c9gXT3|oS#i#*!5Ml1*D8YB$OrRvw!PMKZ7zurf`jny za>#*Ju!gJf7ee4Z2hawoP!C@B7};U09q~KNdnf)u4Mc`DvTn|AJG_E>)I|t<_)?3;NN!Fr5>=kN*E!oHY~ zeV>P4Vc&Pd`WuI@WiZDbsDueff{4h1=4ghnSb!-oe`nN}bLkr4@DRtb3$Eu}y+s_D z=R(|qb;=LxX&jsI1ubzCw_$zlN$_{r6x4jP(MnvAuRr_uCKa z_AQpe88Y8}$P4Qk1J_XyT`?H`_8j&-GvdP-toJKelX$oU>zEdUFb6ep8TR=G*1)U8iimD?Thng-;*FUQo%L7 z{sTtB-Z=}-R}1(I=z;=Rg8Oj(?6pGiGVt!~B}56hR#|v1jluQ3{_D%<-6Hr*o(+3` z0`~kA%ENQ%`TrZa;5e08lVQsVHKP^9mGml7jvG66gUiP<2)}z zOt|M+WI`MGx&>zsFu(Y)?jf)jr{OcwIP9_4{0-}n2+rkV{D2Q|9rqmyXJIVPVi8`zc$|k}s0iaW ze%HB#=_r8WC|p^$bN!T*pB8Y^ecz?K$!9#BfGk z&l!vid$|k~uoWe+8D853m~tR44q+lLU>}a-C1Rl^t|Bke z;a4Pq_c?!O;G8rv42Ch=-@5QM2j*n_J|o(~J+1#f z*voG*3t2G$=I=e`QWkfR6c6EZ%Im7YKDw`S(hBBQ3^Oqwg%JZga1Zri&CT2Uw&5|p zhqLksM_@eWvI1w24P%i9Ij{%cpfilgJ`BPFgn@C{YrNIb_W#KlIqZX?($XX6~4lQxKs?68L)Q3LkS+58iG z;XGc)Q*BP^)KVUap)0r~ws_2TVXpOW;5Azy|I`Dg+1=Udu#^U^DMP{@>JXmx0Fdxr? zJ&B1+IEM_d7ooU{)$n|H?L5qa_0v@(34A`rdqgya{V59TXztfA4Awav%r{No?+(0s z*6h<;*!M|jhivGA_^|f&y97SLbJ!mD;C|NC8u^Uq18dtDZQ$I@MKAn~?(p6V7>_b!5MQcdzlrkaS{g*9`-EeA5x+>`XMr^;WLb773RV< ze}(J&jImDEi2|0(3`oxaMM54~1YTqTn6| z;x3FcHd>$utW6@^L<3w4yyxNlK0d?P3&49_!)tb9AFRy+Skpfc6`9ZoyD%NsVLis< zHfF)RobSUZ4r4Wz%&^W^VgA07A|sqB=f<1|BO4aM+SqSvV7x1lACF;gdck;%(a&M< z4qjUo|KdX6volx>!G0NIFkdOz+*-$hv#>4j-ii0+2#-?mT>7^&&<)PIbCMTV5f6*e z6uz9*=bRX`|Ci0_7dY4RU_L&(#-I(pK_NUqZg_T_?LKhUeuFRn9}mYItcCH1 zq8!e^d(1Bjd|o;K*4Vl?hIOw7a~XhIc#mW-mWarNvDl2rSP!3B)lmu7BQes#JT9RP zF2j8PMn&95F>Ha~+Xwq&4J*TSjJE+^V=C^#ep@ToFgD{pg0AQSpF`K-%=m2Z{JW0V zmc)OsM;%~ZvydLG;G8&D&Cw2fFaqXkTSyJjcJV*1ng;V3_A5 z#Dy_mKq7=kOiaN-*i-k-f~Bxdu@MEXJptD3BJ9aPSP!pRj5%bzYefN0!rC}jACMEZ%s9TLFn6QU~Yy=$(4`_{t}WJOB2mM^b$25P}Guo1>w7dPOX{SIRr0^`XD z=YJ%eRXFC#dp(4DmBaCGl z;=vlU!9GmJVfec_dOjb+Gjs>vBNYB-PLa_B$FT`HPy)5l0QoT;HxV9ZVgK?5-plcR z8P?o0up9QF3+AFFj$se>BOF}+27W^njDvkDjF@n*@i2z>=#AtEfi?5{hS&=C4T}y4 z!8u%jYub}P@d6<*R_o{)cirr;PTm_AvCs^^A`YzgQ#jXYVU12;EMDSoSaa8C3a^=m z$}pzX_yy14I<9B`Ccrvcdwa4HdGQ9{VIk~;v06{>T?C&y_NWnh!ab*AIXWQ?jL-OM zATgXR=hF3yq6pl3Qh>9a1YhgvHGL2l+2IUZi(xS5TX=x<*a_ov=A4sYzrqALfK2S) z4JBc(TEH`GFU!N;`9H}%`CS_vfiqye?Q3y7#%6f#?CTSpLK!r}7TDY2us42AiGf&w zXmF0~d0n{H-}nP{umgGUKUBgLgdzlM;J$wM8qR22EWj6hzy`dBGqf62&>hZ%&)LHG z2j*uS1>pB55guN51%pr(-(wxNz&)MUM{xGM))K?=&tf0;U>c&r zyj{N#BH;&&#!gtj##n{JIENTW2A{#!eHpA<1sI>TFovtR37?VMPzy(q0_kuZpI|N% z;hxvvdOzVRN+B)gVlhSrK6|E+ z1UQVJkrYSan&wyxt#KrvH}B(N9J$dSo|jPg9C!(zH^*R%#$}8Z;qO&oUE*RdvcmiA zk@YSC_p(M_zaBH;Z#7|Um%}=ZhWUM!kI&{&2VS2DvGD}$@CIIEKMKIwThkjTf}%); zS$K_;NP&6SffZ;5YhrBfGaS}wANs=nTALmih_W!hJ#fwc!QM=RYg7xoo7W{=gt2#l z_1O&9{1L_+8J7_YrC@FcFc#+16WNd-$8ivI(H#Ym9p9n^(!-dx;tTA{OYFoH7^|~j z?B>t|n=lRWaTE5cJaVB4g8d2;(x#1bvg+(wv=jAN&;UgBp_07i_cOB<4AL>_j-aZU~XTiOOU^2e0d4tbB``v#M zZs9!A!e>Dg)P;R`2jh4C&1EFAU>ZhXAfBTpKEPUJh0hE7Zob)J%+}PnH=_c~r5&u9 z>$;xTUd1I?6La%9mjKpv5sa-FCSVWN;d@x4O0d_~AtoNc-g#ZGz~AmS7!T0`UC|!K zR}wyFE}#v@BMjWn=hRu4dsB4A4_JkBaLqqZ423Zq&u{>*;P>V{162c`|K;5pr-t{G z#at{vJ={fOB*H#y!$HJ_u|$VCTHl4Zjo)F-?4z%wr~$vPfU6Zc@v?W29Ot_fj3F2mZ*gfsd*JcFrVPwbn$uy4+n-+i6I z8+?9?otTH_SO@1UKYRw{!z1KCSlmHz^gv!X8w255C2$H2;dwT;-!Kf$M0q&JvtT~v zRtT>5H=M^SNQ~8RFQ2`xJs-}Cv-1lcqdSVi9R7pfHG|icL^q^EL2N^9oQCsw9pO<9 z=IMPe;Jka5Q==!WX)ah7@A(yFa13+s4aQ>%te^FIiT}at8^G^cqaKXQIkzs!@DIkJ zKa$`b;s==1Nz}zzI0qGB9+O~fKf`&juX~UYhj0cpV64_@0j!~Q^}5Db3ZI2{u>z%G ztcg(ymte14-+8zV<5`3)sDPe`jO}nO=_kBF3G_o2RD(6Q-$T$6yRZ%y;PsJU z4P(JLTzdmf!Tpb;A)MoRIFE4f`F0c5(we=7y}yE$=z*C?fm~P!uUUf5uwSDPiuW+? zPVkzdFuwh;{&A284=@nU%w!yZaU6u}y65k(?%D7?Rv{5w?+;k3*sv!4UI4*_A)E1`48BEoT!UCI0@(3 z{+vKDI6Ft-THW9o*^7Ds_AM>yARYX>20GzeICDesBWA$(i=s82qB6`umr)uW;5n{> zc1Qwqcm?NnEX*M;oZI#oji&g7<+zH9NQqn6g{{bdsBr&Tn2S&xfivne%^5b&9C!h* z@jTqaKJ3A7_<%_;W_y)8@P3!~H1O=1m*-|Jy25-0BM$uDHCtjG!k`I~V;bzUIZeSu zSO@!KU96FHx4zcKeR85W-oswGc4JIO1AK=r@Z1{z8w|l16ooNXfUzyc02G4j_}vP4 zuW=s630%T4xK>*nKo3~MWUw~}VJ~XHc>3W!j3FAV$0ihkxuwHOoJMq%hcQ}X@9T|% zu&={m9M0o-TtIesZp>#s%=rt9-L;LUK8$xa9tS?B;NAY)-_3A-t05zn!kSuxgfMpV zasR9cg|&3O_85n}fp=%snm9YDVJ_zB-_FSY;6BD(6TyDj-(bGNviWCJhduZXhhg6r zq9vSjd)p3iVUJqF-$x?{?w|=O;RZZ!o?~Zv8T`$46Tlh#752)x_1x`(dsc<>>=~$w zxtM{9c#Nq?k1cRk{cbsyATfqv9-N1K=!w0EiDI}7uXzN&djaQrB>LfZ#KLgoL1nnV z^Zg%;*=s$6sSpY0@C9=)0BzwpD1e(-2jlg#`@7~-L`GwngV&eAc;v)$yo6^U9-L`w z;k=ZB`C9jg7>f1y0i}@}o^fL~=RxQS_jC_mV_}Uv8?HYSdoT$nVeO2$Dg4cA`ePSb z;%~U;0%V50YlyJ$USC&X4ZP+k?0-_YPHM!0bCUt@V7)KG{OtWPbcFL+0rjv0&fN)k z?yb=iSVMc}TE^zOdyxj#)ZgrpHTsB~_zBzKYZJo5JglGB_zZI`pHmTFy{p1r41+mz zL1kQnGj9F^FdD{S?W5s2YN7XJ?F_??vID$}I!nbfO&xbi#(~Jm*4KTKDD1!Sq zgiNp(XJH-ZVSnxW*E4H&PW!x-m8F)y!VDRG2htujO~~T zXQ>8U^Ly;TPE0{tIFGq;6b&%~*3MWxC)V8j)4(|T!<>vG2E5T*eio!+%%-*ZK*S za256N8vRfQ-dhl^w*g-NEneXZtd0Avh2Mw6&p3pEID(`|fUDRF@9{d%@e)|qiLgHQ z{5yD`agKuV`I~DPPbAof^{^&01MjVQ{|olrYceAiW?>twlR4jnb#(5%)}H(Z=jm72 z(=G@_O}s=tIO8&I`?Ui#kQ<%_^RRbQa2NUE@4Iji-t!5QkO9UMipfX@bM`y)*$rzn z9Eq_X=TH(qBG|7mK@Ok~`+LSJV>O(q=x76Hr4yXxVt54m?`$l`Q+$N;IuC{6S-u4O zW)EH<6|NyX?797LM!eR3u7bVkfX;9R?2U74EWP3L)8~U{!fT8<8z$g2QsV+DVJwW# znA|TajMa6W_sH-}I_K`01NhH&PrtMOT%zVM!|XoZ~UiA^Yij(80F;GBD}ar})27=>LZgt9QsJg}bDIBeiO zH}B7o9y9R;zT&{T{sVh>3d1n~-fvHm;V-;HIn2OztVA#P4Dgwg5%zfvtodNv!6*3r zQcT4*WI}ATg*lktS=ckxT?WgxZiD9ztYHw z637K>+Z`Wa4UF|0l z{Pc%=lt*$n>s?_j;v+BIJ0`ke3Y^y#sEEO6jS+C}?;;xB;0NTxIyh^$VcZSj`7q88 z@Hyq)zvC&QqY0j%8rr}$e18Y8bDb*)4`*^ZG9xOk!r8ij129kjo{!h)0Pi0Q*EHu| zFvfT|59c)z>_H0DhBI%>u4SA@(HZvV6FhVNeiGp@4x>;Nr{Mkza2l&{A4w4tzFe;i z%+J`Yr|TBRKrDs%Tk8Sv+@yx**BmM#2~y!AT(1+Z!F)#IFFeCz_*#!Su(z*Z&W&LI z@?a)>y~K00LI;$CGcpJXQ4&5M+$Suor*jbjrO_72(H|i=gS)7Vbod3Cksr6P0`7Sk zSztW%Q3B3_bDaX#)HCrjCc`)m!&t1f*Gxr6T*BUfIlQ}%dptx`M1pnno@Tg*$e4p~ zU_NEB8OG2L*7I*9#y(g>YvWv)TPTd*K3Ka(h=H95K@nt!H4OGEOppVxj$Kd-1@Hj& zJT7X&dLDvvzA?c6C(eL;A@GCrD)?_8>!Md5h=f=4FyEy(0FgKqA?&m(^5d|qx2IVmb_I@_j;UFSnHT=%| z&EYBD!?o$e+eV9YmQuXdp^!sAawhI_WdTv*$I7>)uMf}ar^_Tv)5!8n>@FQTC{oSCgi4Qpe5 z9btc~!nj|e7wp?*ShI~Ng(pY~>s)*IiVAY#LF76&I#AFkoN7KJnAey$Y{zDDCX%y%_> zzIdNG{|o2e&s8u9u}~MU;knTqBt(8hK}IY^VYr5SJb-g={N`$`*7^=g<2H=Lzt_WB z-^FmaMmf}iaeL02!#WPbe0+j+i;inZiXQNHd+5GC2fD!8I}7&gYYorwc{^6%XBeBW z959zZ@CfEK2IJwq{ZSLvs0o_Fc=Ez$(+D_kuJ3+p;d%|=wd#tjn2SYlp6ppKn1k!T zLVZ-kz`*AZyjx4_PzLVh`zCBgE?CDKu$C*40O{a&?j4Fwum{#8EMmg^jM08O!v_%q z#%LW}e+ecd3Eam#vf~)u1wK17|HC$n#8&)?O6Y@6FsJ;mN7g<(x}zU5p)O{_ds-p| z62iEhsSt#LXWZKU2XoDgV84tjn6I&H-X7Mv9IToB+=*XcFCW0~Q^RLhTFk&_96=$} zfi+(Y=fiW<8{6?6Zo^qf27BHSq44}QhifE7dD!F6*a>@G9OqC1?y(LlZ~#7c|Aq0T zM^juuQFz9js^AeEx<|4g=^J^eewAc3CUs1F<=d5VJbY6IWY~+bPw3G zmT=x%p%+}k=Y}c)1l~&X7IX^m&jlRBD8$3}*oL<-mk}_A$QTZvb>G3?K4Ubh zVlbXyJ09XE_`S9BUiVspv#>wbq6{h_J(|NFB*JD`%ln9pe(-tezSB_?Ly-m%VeP+# z_4Xcr_q(D<4f}WnDex4R@HLlJeBKCiJc!m90PpFHe_)+Yz&-85WIRR(SPx@31=p}| zJz+fNWqqO{*e`1y%$IXm0zTJ{!`e8{`B4OW;W>T{d;Ah@;G7MFb^Zev;CzjQb6zX( zxAXQ0O|TwI;ria=UY^1D$c+~$1)m>l;9mRTea_>1_}v`D!ELxtNtnkbc<(vX!5wtN zM6|^+Ohy5?UKG^D8<>;5a6bI43UQBkQmn1zhj^XtYZke!=8BV8o?S^EBo%g@8J3!V1GT=Ne~^L|E{nup6h?G1^ci9 z=5F4#Z~}MX{`SEBcvf9A9ZsSkmcTky4t#cgjHw~a{|U~){oca$h9Ls7!Fzv3yuf>B z-p%nKtb2U;+;rdBFut02jdJk2KM@u7!fO{|B!8jbfV2J$M&U1XMOJv`Z=e9IqczTkLKurlID_1<_Lbm_IK$3{GoJ*m z_Y)SwnZ5?knX}J1y_QS7ujxjKn9x#r+$buJe7NejUV!^oy2iM2} z&s8Y?MNBlsEhL5K)cnoCvyu%lkQ!m(D*}8D6o+d%<32MEU;}m_JR+kRULqgL1^%AQ zyYu-i%-wsv&l($3NjUfaU@ffIO8A?ft#Jvsc4HhzE_faW;ybvubM_48)DvE3-HRdx zvcp~`MrnAxad^MyX#i?r8|EVvPhkyxM$`-Z?K(p-4t4MeV=)h#Py-X-p2j%^EieoA z+x*|5J72T6 zeO!TS{ReCN9$E1}n132XKy_FX|K5yzp)2daS@( zEJR&&z#+K)S@aFOTOaps4rlX6EQK+>hS%Gll$eHOuqN*R5=jy4mva`(m-R}HEpYxm z<0gjSBUT|ZJPSi%-S)#-eudO1h3BwevoQw4;hc9wE1W@1IKQ9ZoOqw}*c>MUpL6kk z9MRzHEJ9`2>xP(wziF?c4&s%a9v+s*ArckALi=}o<;`r z!Daje=cf*;!C764KjA!mo#|zK-UH){k3<-RVsH-UVK9t2AB-&qJd2*GCa`Yyz&y;) zXI30+!%WyO=f5k8!20$_ESSes*vsws2F78nHls1D?@>g8H8EcI*pCUYM%L4Nx8gqH z;wg5)b2$*hup5VP9$x3#hhbgz!dTA2{8pAj9I zVGWJT_-fz^tkD-N!b60IF^z&fu8D@Qrq=EzvLYw!hxL8}XLmMyc9(-S`~++D6P)ky z$c+Iwhp6!T=jaamSQ^>T8hzlJC-5D@q98sXF$y8rFKZvnm$e!J>s|y^a1zVm`TYj= zYzgL}80<+xcyCWQGdu7Gwcxobj2}@6F>xKM(I38M!F!$QLCA!?_z3&B40mB)Q^MZ{ zA_4N_JqE+M>y3AKfer`<;|M_oxQ4&CM<-NAUPM6=WWcXj40Cc`z25s4z;&IKO>ici z-S1(IrlA#vA{n~D`ecFkje}>=vl-vfx1ki=djhPV_hp9l zDgd7q)-nQorr4+Au&!QH2d^+1-s^tu5d!-(07+oaog-s%ox9izYxNZAVGrZOa~v7J zVHlhtHI!raXF1FXe(G(lxJgT~;V3(yeGsrB8B1W1hyaDTsB zf;+I*IpFij{Eh1`7{@3$4{=~G+TaI7g=?%wIV?m2*jM)|3g_)-7?*jzg)uv)%~1xO zu?SOOKb@nYcnt4b1#@1E!mx+IeuW8g0A< zwO@gRm<{XQ0?{x6_SHQ+!uL1lyI(}VH3PBH}+u*{O$x^;T(?PEX=DHV&FE$!gyS} z3W^~a%yT)Wp%gsF&ZTFgKzN@%*B__ z%+ffC^GFZ(YJz;Y3-|QZ2iA8Ml3^;mW)i|<8~nZn%x5*y!Z|a~rEslMIE$Nb{lW0w zWw7owPzN0l>{qa#f2k=zQ#fy)o!saI|F0cKPB@#+yPto+GjxX6Z$dmcTba=co)c%$ zb6hF#*)!;zI(q}K0AAzlyLVLhoqcg86Tn$$gR)2tuX~Mk*owxuiS@XId6)oy%YX5qyvG@V9m_59eeqjIBP5_Zy@}MEn73lLYnf0LJE=S4VMNL=7~DaT$yC za~3zEAc`R+*5Vis%Suya>$0`u>2r zC>2m2K! z$N}tR|MRE`Yi#Z9=M_XjCCtWlG{OJyI|idT8el7~;stiXIoW{`@cb>so50^qdABdF z<9hM25?*^1(~%l2Q6D9+8_vGZhz)Qa8pHG17Z>0;^f%|-?|l|0hH(^y^JMRh$LkZo z-_4^2^1$ErATP=wF4kcRPT@~fhjaD}xnX=>@0|IaYpzEw_$r9Wc#I72<=HSk=hV-a zFcq$m5Z1Rj+Tk9$VhGIbB+SosL*e)PVUAz(`-jgBaRt_X8criU+{;?HZ)sT9e%K86 zwiijU1l{o(g|QqB&;jO?92s#O=CvAMU>&W)M2tZ+SVQ-A4`WFI*EL7a>jzlZjfeo} zs6VQr6|7|`%*0?=57$1Aw`dL5a{t?~Ph;^0*5MeuW;$x%BV5ZKZ^Kf=f%kf?{Vj_H zsDr3zidFan=U`0U6CU2@jF@vo9Kjh>#1>dD*R^-G0`E6?_u3w43-?Tiy_kT$=#5@* z*6qD>mLF|kO^0GX?7KDp4fb#o^1*d}#35Y83^+IT>=k}NuwP+<96)OJzmHU~-ZkOb zypB4Efk6m`y|(6c@hg&{9G<{gxP4u6L|_1?mWj7f0q5O{rZID0=14nM#%w;VSS0@v|+ zb21mtxHen&&Jy%iCdHjG+oVx7J`bt|AGRAQIBUcs-w;U_K3z79;T^4#M1e;WRw= zCy@=VVI9r?KYT!Uc+FHeJCk9oMGzDD;hxrU4vg746hJGuUvYGS@wbI*)`a=`TzP{< z@ONvL8P3W#@R>6JAu!i4h>Yeqj~xgP*ZYXDNR7MLfKaSNY4n5F+(Ihcf%_eTH8$@* zFb5f69@j7s`H%(n()eDWBnsjS{z5vGM;n;WE0jZQY{O>MgT3901u*Aiu+M+P^I}bB zz}V7aJ-p@_+`l)htG%iU^)rI~vPZ#u*~_a)jT~5ulkk~l|DDTuaCXMPnYDJ- z_BC#zJ*>0+a6W6HKjy%{FTwSE=0w5`*ms|8KfqZig#GvyUb76&l)W~FnOGC}+nMf# zj3^EN{sia$7-GORbHN#MPW_w>-C*CHn`VfErN|2B@hMVbBih0}6QTkhz_^_~=h1kb zy~}XM+{?Vpe;EFQIWIv={Dgk6W}lG)z7oLNWyd|7MkxIIdpO(H-8Fl_nk~dOc-DT0 zeXNYRXaHj`ilkTvduCmWz#4o5uWt%#^AI~R3nk#ae_;;XF9JM6_hDRVPzt$`8Z$8% z#^h{x{#wIp&SN5~;~jk7d0hh-i*>t>JUD=Bu)p?ZBxa*$fIUBsVwjIj@Vlv4jGefE zf-s){;RCE|SGeCByh2UbBYSL|vC$sxy8-D@5ngv0=Cm9uP#b&j7DHiv4G|H4z&_Y# zYvBG-VgJo}IPAZ%xNle(&q7%9edq$~y%{Bu8?NDfhhQ8jF%F}U0+Wyv;jk0#6CW?& zoLog1#6&e%qhP;`Czvm1-&*HEa`?ABUxK9Y+_r#ywtx0^GCra_nqdmwz&@5mRkVe< zZ9^GsL1H|@0$hhZsDrbZj=GqKu{e#5FfRA0fdOzPBf}o{#ZvT#aa=_-xW6;v`N)BE zXo^et1anLU_ge?gi1$^%8uWy}AA~h=j+~$7xD990m}0^B4r2y-Ap(4ce8Emwx6xRI zG3Ws6`5n@t9Eu_is>6K~!5V$-!w5bngV$R3ct{Ov+6PCl9$sUOU*S27-?RG=*1_6_ zq8{cTBittgN@FmxVmvCr=Yj7pup0Go7KN|~o8TVyGAF#=df9h#wXV}~01IJFen(6g zYkD|i?%xA*VXW56zO2A~ghfk);#YWm61e6G7()T{hPBEBd*S`&R|E-A10p0~par_(7}~+!+=6xT zI`7{OYxW%0pb4%ZKT=^5Ho&<1z~0P4NpwI$IICS?Tz>u$F|iY~@eKE1J%asmR)YB& z%H~TE7oI84z(hF9-=Ywl0ef*2TW}RKa5nI6FE^t$e!(bwKuYw4HFxG*H#^qA+NZ#4 zIMepXp8tivkqqJBefHh?I)sl%gN_)Be=!ZtxqCXJUgLVsNNzZ@&c;nRGnHZ8GvXX> zU@{uPIs1esn2zLdAJ?}&)~Oymb64P6tB@4Neit<`977NtpJ9y)U;r}18ui2=M1%Lf z#qTHq^SB7-`UE^vW#D(!uolnI40CV|dthzj;Sq+x94etP{H+!i!gZ5i4D9oL7{f`J zb9cnTH?T+ZVZ2_q93$XbIpErN5D~6rZ|A^#i=!c|UkHA~_c#t~GZ4M78$MIVAr<_6 z8|?49z~9bW7i33D+=joohcURGb@G~-=!=o~g5J1^Uts*RVgAnlE=#)Z!uoZsy5dUEn=HX}f9BzrZFmL0uXGPEogJ9i@ zVF!$%9Gu%CFpl%68nBx86fl;i*amalgLbg@@6jKH5$so(AP4Y){Xd~Jtn+X*$1HS* z{jrV%;4C;_*4JKoE-qj&qTqYuth;9&9Dv_9!7luSCCH7EFqS^J z1nZCkkx&`d!oNLF&RZ84i#f)Iv*`2GJYT`me-*Rcim#&w;k1!#z05fK?+{k|X_uEM@-hjGn7bo`F4Sir;SHk*Ow?z0GQ*Z#$ z;W=mm*L(u&JrEC31edWGQDN*IVg5zY3f|WO*2A2JV;@eSFiygn`0jOu;5|O0*1_8J zM@`tzwD?;4JA8Is&$%%gcWp#OG{nFQY{paA4?kN+_v{Ay6N2fm|Nke}KBKIvvW9JU zkz)}hO3q0@q68^2l5-A4&OvgfBuWk!M z8S71i1nx1puY&P8-%MP9dw1oKN4dKD#7aptb;u>4JTl4*iZJg_5BRi-rBWePno_*fA zjPZS#FUNd^?@<|{Sb?eVj5Wjoyo020e%}d`@hehdG%CRPjMusiM+3P2Y8bzB?t^>Y zjG-8WdvN_B2uB$h!xMDFB^amq@Vbl|Fy8Di&*sv4TK5dd2lHwFZ-PCP3Y`&-H((6* zLsKlpauh}h7*|2K&*bn53vpQ@C*36F}vmzEWm6SM?74|PEe`+yV=|{DP#5OFzkS~vLUwpY-$f#LZs)_ZZ7$lvbG#15{2|;! z8#IS!J_^>MKN7;)nk%o@@%_x_U*TSjDL>A_XZ_A~3c|I#w}bUGfByX~jBg_{q8wb$ z93;U>+=6@h4I}U;lEZvC#~B#2d3OKlVP4GDWjw)Wa315!3wy+z6vty^!Uyn~S*QW) z>KeYUrl1w9XA3ws7woq!2*+yJvp)AW79lo#z5={{MKgSjS}-4zVQw$MTDnd+7Ggdk z@dMm*c?<~L-P1vs?>&LL&qntZ6wLwHC)T|re7{wJ=goSzMs9e{hQL0xX4b`XmIqVe zb8C~V=y1z!+fp?d_KWF1lH#!%#pErHXFldS|9=Z z&0IR4F?;~mGd|<`1dhFmO0XYX-`viC{pL9HasmGR4W=V0+~Ya;Zd{FAaLy_y0>_)H zO{k6i@V(az=H?=T;k)xR9$^{E!#d}Ny)hEk@jjeyH%h~K4#GVeV+h>CdZa*axUWNj zzt?dufrsdY-|!|b;|xOK*p{dcdn5_&p&yFF7^cAd+fN&C3Wt#z_H!NN#2|bEYkvUF zGZVMb4ec-#(R~F)a{%Vqd@qLivOn!r&)gX7gmu0j_}hN4&WZ6cYQjE#f#;ZuFgRx? zcrFs59_$V4Z_l=ZJ!>y*#wNr;bJ$a_umht}0t>JN);&|;?=QI9yX{d8^RWu&Vf_BS z1@@a~&#|uA5uTkU7z=yGH40+{T;J!MYXagTFS^5VFJWKVOSR#@vBlteuDucokQSw| z5o=)&&O$f1?=pe=I_|U46Yn4tU*iv4hxJGYpBV^aYKhqJIb*8~^B4x-O;)u%_n0yfuUSx{CgI0Qcp0jyG5JupN2e*?)+XFkh|E0FL{O-2eA@0&DBO?UB^54kfV$t{Z`; zcnqIwgwVjf1@|k+fcIfM3*q;l!`!*=arhMZ5spiE8}{H~m}lo+2KQjy_MivGV-V~U z>u*lX|4_`qNK`}dL=d{Oj!ZYAG%mVw# z^$KGKtjR7oE*|W0`_*TBw>8BMIKDLoz_{PQOnd=*x**cQIgIB+_}yH@!UK42Jl9@> z;Mp>!wule+X|FkU7XE-S_vVy6?2S+G zJ^H|XErGvXMM5Np^Dl;T8>{=e24lR>|g6!0+-MX zuI1Y9%V#>EB32^;?z=07U@0;p4<^IB-G;G!gJUpe_iAk4Vkol1cc?Y;`*TQ-mAHc- ztbzM-F4xS5IQS5^P!hs7Pl@pP#)9a zxi#O(kODJc4wu8)6v92MLOARlFKgoWXK)xT&=U4?JM2YcJcCz1ltU=CV>GO#J((L# zVUKu5KgVVC!9m1FZKQ*97_02PxF`er(mh>3M>uZ+_+3vpzA!=%1pgg1KHmezZVZ#~ z75wcFoPu-2##9`F{p=WXWY76|BPQT4*t0JX3-;Yu*tZRl8RpD0cOJ&<_>a&F&gogO z_dkVa$s9d~XWJa*g3qLZ^&11{_k5Z=pLrMGVHYCdb1~r=n23F759{I3^`rCmhG| zHIBeKhF~SyA{(wEZ$NME=6?m8Zy@gBIx3?F7NIplQ3u9-5%yU(_>OJ_e{=7~`Z3H~ zaqPwi2!ea?-IN2lU@aY=J@9!v_n4>&_Yec-q9m;SNra*r-1mL-hw(?DA>3afRD|F8 zyfJ5i&-mTCz~}DVui*7FFZrAn?t2N0Il8atjv)mJ*oFMCCPy&_qcI$LU=PK{GI-hJ z_J#GdHy+~z3L+8a!T#)s`LGUEkQmnODN-RfGGY|$6Kg#ZYcUb_oIT|^u*W;V{`wu2 zF##7~pYDeJ5{!MY=ZeAJ^32&Y_K)lAgzq!|zKPFq7Ui%Vu3ZVnlM7ei_cIWM>R664 z_yfKJA0q>DU^YgfJ=~}LyagTb3Z5ThFjtddPYysY#Dsfn4foX>zT>ChGOUkzdx#6z zjG-{EU2zSLiGXXF&re|fdSDo)!Maz#G3-Yl_|9~k=cx^h!OJ)g!n2kY)^9MZ{c5y> zx%RnFVJ+N;dGlFw$of_>nA-i6l!xbBB&j5F}Nml%RS z@eJi*{2O83lfmyZqd49~1LT9fl^HFt2+L3jGm#xJ&>qRr0mkLNUF$sZ!#JwK-`slw z?0}c?euPta5|DYRm-hk(=2!>)CCc(34k9>s^ zu+Gi!HMYV!*yqmkBl=+;O2YnGkJE5Y`^0nT@B8s99>d@LZW??q&V~K$Z=M~`$mj6) zVhDw`Hr4@w&-TPeuxGlX7Cym3tbp$^{~iXv+l=e*Ox!~x99IE8Z;!V_2aJH@(qkV! zgRxhJ^ZbFBSOfDGfiAcU_c0dc;!hlfXVEpXqbGb{G=}jt!w|T(dp6I`^9Zgz5uTB^ z;GWmxAPU0#pFjfCz&mJ**l>;<@cGGT1^06RiO>kc++!;QX5DoA%eu8^m1?P7!hcF%P={$UIJw*@ry`NpfdRV7dSckrF|L(C9`or<& z`8KYgH-eBAQ_vme<|S&ucaQn)h58tTdH4_`VUIL{drAz~J`2~L1>ZN;Hv_EMN%+p0 z3iq58B?6zFa}Z)+KXPFOuEPGg6!^Ou_qWz2 zT;I>peOZI(ye6@D2V_J_BtR1!gYTS{n1lH+)|#+?J;Rex2-ZFx>~(wLIIQ1ESc7C3 zhs8JwYx)BAk!#q0Kj8?zhPAx``>r^Q#eQCbb1=TF@Eu@HJqPb&Av}XK(Ffzv7{29CgKAtV2~e-&Pz(D!2!8m<^Y( z3_0NU&f#2ZaXs*NZSGs}2Yx^ZKENQ1hOyiK-@?5*&c9orJKAFwtZPwt-VVWWb?_dp zVjs-Ec{S(e#hf~J2R_A3w8mh#r}Ow8tKgdE)qh*cBR+PaDVAacjAIQ(;UN~`7ert^ zdZHbS%i24a`*e+gfzKzoZ^9`!UvJEXdn=0*FlTQf39Q$2yoCpF9b-4g=K2oiVFThK z6iwiG_uLm5U@m*X{S-lV*ayCY-a!>i!$fq$ho}eZ?4GR8+b}Q1aT%`j1jcVJ-HUn8 z4=?Agj%To*6VMg)Va@!`{J9tZ&Jl2d`vUxg6sQExkNZu4KCouy!5kUqY&dsxUqR6v zz+%=v3;X#?n9~CAOte8vSpRtNjCy8X;bU0WGO#YCa1^mI8up`OoXg&|uYEQKeuK|g z7i;Ex>5&MXaR~PH1z0Ql%pSRbA+Xo%^Pl0_vuCWYy>|@u=6+aX&)g2&MsCExbEHN} z48vD27JH=zUcf%}e1~BdT+18`#W^_dV=P2N)I|drgM0Pv97VAVSz+%c#vFLws^TCv z!m~0FO>hLBkpZ}Z5|{_;a|7SwZs6{|_Mi^lz(B->xvYtM_#B?0mxzT{@LVUwVOX#G zm=5!6jaI;TPs2RLM>YI`NeF>4x?U*EL35l(Ok{`qzlMUi2){R{UErFd&;d7*8}4H% zenwtA!wfj4FYJNIaL;!Tj)xe5V)z!0uLb9}$ExE!T)`%k#$H%I<8nUhdk|&O6<@&L z)1VTzz&Wj}wJ^@{Fos0f2m95rU&7kw!e+Q2ujBA}zjH44=(%_oonU_bZXV1@ z6jmV_PT>+9XUvC?1c{Ll-C&J}BRf{0PT<~}dl)LiJ@-I)ME4aG%>mqF{fw}de_<-D zvwd|7XAlbOm<18ACg!sBMPX(mF zI@rHE;rpU5%E5lKAGV@DVjvivH=kRGF-V8>u;&`$JejZ?46i_zvpuI{64`Ec$xq1=mc}>Z#gg&_S_=0LZ86hn9buKC7K6EL1n2R$uL7UlTXbJR(Hww%Qw`Sqb4)^f48(I-!=YG%KCqr+ z5C-2HW04Oz;hEnF``DU)kG1fuhQNQP2ICVrE)>V$m>!6SJqSTb^n+(DAs)e)oO>OX zp&*(B#N~b$&S4zRXAJ%w1K+}Tfc;Vl?ky{lz^fd-KrDDpJZl|M7^7g{-G+O(h^nxM z-AgJofOFcH_MJUuoZHYDKHC?eaE$X*!$|nfE`mc?fw6G?jPTiS&>a=wIZF+{bKMA7 zhk5WEw!j4RgLAuYpF0ib$N=|T0G^4PXo^gTf@_zT-_h6!YwH{{Z~=e8eXYdnehuSuCpgA99Fr5S z<(v=TT-{L}&OH~?P$F>m4BD5Q;CV3r_D*_SKmwG7c`u6GSPbWI?pYWHa}bJN=n3bu zf30~tB*tJ^vkEYW(S6wm(RsCIacg=Cu`nN=oqYlJlX>&IZLk*dZ+kZlGQr-ghp(_3 z)_n|eAQJY>5}5O&aNpTc3zy+pDhcakOdaq8?7s&17BSHU*8Cy-&HnfMx8a$oj`H{v z7m*IDF$LA|2(Ih+j<|tyScOt>+$}uDdaT1$^n+{O#blTZKd(iifEV12$-TQj*K>Z) zgE88#x!|7XpfC0!K1w4L=H2m*84lMpx1NhdaGbyYgjgtoO7I<01~FjX%(FdiY+11p z*2SEBjSUD!Z;Zor`2IML&M1VJaG&P*0)}BI@}eQSqZQi0IgIZqd@p~9J7|l|aKF~j zH4302UhlCmpWVknT*1%i0sAWqRWS&^z&^c+uJ{CVF$$T{1fF%(K|JJwIW7i&4}p2e zf<*Wg)@44P!<_l|@0gA~7>{RIiv8#VuZ3_PbL3u~-&~lxrkH}naP2cNSN2Xfgu}dU zhhwd^V>9C%te(R~F) za{xzJ-+qdK_5T=^kq!3j1LTLf%7K(PjF7-*&z`>*M_v4e_GpZafxET)49j8vThAZh z`E#yOIEw^0h|zd6@b_Tu@8bsgV=ByxfOVLO)o@&F?86dtK>@he58)hX;d`NNKo;)y>l;`E&%iQN!Cowe^QK2k z7^^+)neaQGtpWQu6$+y)GQoF6ILvn`q`{9!0oNLc?f4$f^#I22-h6fi%yDTr#@hK? zPkf16u+GV`3(sLLyc|Ct^I)y4a}3x+;qc6cpcAHGC#=CZxVE{w4)>fH?x6*u;M(Th zcr_S5!&rRx_`7F3KFp)_YKV?7e+yt;`yvZ2!8|U5_4)vZQ5}vO0(0u;miQH0Fam>M zeXOZ@$%@Jti#Tv!zNdEMi@^OP_xG?0J~IL4&3Q_pGcq85;BM|-VJP~;y&i*cT!lS# z9!GEneUJ@ZaSg6ET&5pZ1T>J6|VEL=~)pV=|&Q5+Ws%!MV2KJ6Jd8sRrxj z*{*{nuzvRL8rTcQ_%1vHCvgh)T0Z20^>2VE?8g)|!+O+4CB%ij(H2Qy%%V9uP&yKx=BaNNV6D3711yI~wC7ti-n%g3|Cg-*x?v3fAg5HlQHuC-z!g`sLhuDUJ=#R7k_PKLzhIuu% zj_{dBuy^OfzVNvg*bUEs@#lwYKE-+@!Cx>IdMAV;Ju1R;loOtZd5DkcC;-ob zYrhZUIfJ{fzkTL7+}o$HKfi{1yA}Ank$WQeo_dU{NDar>7oX!gjKe*diy24D}YMHrkbG0blWmg60K0ry=LBhd*zqA^~e0S03ys^A8k zzYFXK<2?+&|2*({G4~EI2R`p_=H^TM1bd$NAeg&*sEmQQh9_`;=JFU) z!T8*(XX9buUYvUxl!Gz)+Xc)=MohsjIL5sAo^w9;Uku;jV|4*6z5Nn%FdKt$1NQG^l*c|eXH%Fz<4OwmdkA^qm~ixj zc|3;&n2Ve!3&)$AJTS-B&i!qLdHw^Qd7t(D5sZ6SgGAT>V;u$Kam`XFgAQ=4d$zB> z!6eLp`yPlzsEIzP0p|~gHSxO?SPGxrgpx>$tZl_%u*c2IJ7@}j8wYD>j2+Fz(4HfvsqdC>ZZrya&%pW(>#AfxGX!otTLQxQ8P!*P*cY&4Kwk zfWa_s^LPcu>>2qJo(=ED)E~$36k|{a=4c;`&-dp~aBLQ2g8P0OUEp)((%;%3A@ZU) z{N3F8{daKgWw;Dub)JHVz(;6{pW&S5*YkG~?lBkcAQ&m(`p!EIi_i|`kqYK73xaSs z@V7nUv+l!qH^XP_tK8U+WN?iQFpqK24>=Kq7wCi$_yexL4vi5B_Z<(j&>4f^Z^q~I z)^8V#?>@|f?+f?p`qsO%n4P9Yxj>91w#C5E}e3Zv_7>jX-<2mfF6fl-Ze26kg1Y?Mg z`lyFGuvgN;`FFs+JdZH!LJ@3%YdnJeRvhMQHynEd#?%T!VBflz1o#oIWuDAKT{y-w z`aXOvE<7jG@dB%1?n>f&{EP;$&&MMpBH&ytVg6myGdBS};NHBfWhXSkT~rU;r*gjt ze@g?$r9^-Do4HE|_nHJb;aZb15$0kg9tJ*V=WZN6*C%kV&E1%-Qz`6+d-OVmJ8(Up zyN9;8fZVWFbMP3|V4gpKJ>l51_zELnEZ@OBegSiT8wX$wjb|nbBPl*YV(f+MIG^v~ z8|aPEa2%iaEp_yd#h9A3U->;C;IdjBIRdYAwEESw<*>zozV(OTH!17I!h z!=Ac?%_xDz_!4804CX#Ae6|7X{lRcdA>_g$Jb?Y~8F>qn;nfhH=~}R7?MdGu#SjAD zn;S6&uV6n}d*_-LxUb;uIdQI5a1GDK5+p|>cuw7ubNl;YWXC-W!+6+RNiYcZy5mM* z0({pLfaAY`V~wdV+|O;iK4Ufb+zYSwZ2twrT=hUrLp#g?n=Ssptgj z;4^pOnRSlXur9tUPM{BdglA?n+=IElii~(4$B`4Y(G#Az<#6q+s0@E!jFCu%zXIBG zKMP|HLS8thxiMDHpYt7twYPT9kRLPQSaV^{rl1nyBR7mI8}?y39>E?np4~8at>K)% zVh@_&BP_vobj3NeLlT^ZaT!Z9*c&%t?q1kz8=Qg5x(op>3kT=C432g`w%-}EYI-+O2awZz&x4n{rCjO;6DGv zA(RWa%H7{PBOj)q6U>EsaP23Ei|D@GPjp@nSll{Td(Y1qG)FSnXQN>q?QLslZ9RLh zV7@!R_l=)N;TPB|j=2Zxx(f#oiHit>^>U31aLuf+|F6My+F~KfBP9yJUdRc@{tja= z3fHrrE8`DX=ee*yzksnB%U`I7wD4I!JKp_yErPvw9ufEsLAVTis0?bMFg$x}&=l_F zE|Q`;hQjs2U~jwL+i)FoTm;S+2j*o2>cD*$f@6}y{k{Wp?$~Fj3HNLtzmIQ`3bSFq zn-}+#4DMqoGQhL-5q!>=jK_G*+YW5OFto%=tb^Vr~%hF z4S%}>^Hmn%@XY-H=c$Tn2!(4~yG+Q2#c=O^Fc8<^d7cOJGzbmhe$CgXxQLEMwqXMjrd9inmc_bzv0&8&}OW<#b;9ksC0`$c_n4cff5$19^x}i2)XB)D>ecV7n z82cW)i@ivVuaObv++5^t1scP6&1ree zfX{pf`>hU)dpZ2w^K}N#F(3J1?Dnp)`u8NXM-Rk>`}N#9*H4IvU(o=WksH1XJK}v* zg=Z*LKyvO|;J6}q2N5s^d3OE2BKE+(9|+?!7c=2myp3H*17od;Wbh0>hUcIktd;pn z592lda`2p*zwx++Dae4|u^%sS9OknWcAyem+wp_35YAzYj&o0yu>qOkyE8AG+Zexs zF&cLZ*b}Ld1jbPxC*i!W*Q>{8_y0CJ!nsS}1K49#0)MyTZr*=J2RMhZhF~bnPeM4C zF}*-znCs=RSBArNd&9VvAsoi;Tz+>8BhelyF$U+c9q*$6%ENeF_Yr=9c{&Bx{sP8b z9AD!gy2JIKVG|}J9<1FFjKFkQQ)9@C(=c!Q;k5^=;Fwyti5sy09GeL-5eMgx9npOS zMRNcPSbrVbA_3;Y8d-aL@ei0k>tz2Ofa5aa5`M&4j6*Zn%g$+yo1iG1YXWX#7<@Jn z%Ag>&Vl3>b#P}BB@GRHBXZQ%a;e4M4?or&Wby_%&{{}WhW#q>!6ox(Ta}VHNbHcvp z5BFk!+y6abTs3hJ5x559TaMQ7{X7MS&=m8L42$stjCU<6A{N}E&)h{eyn@d_?OC-jJ zNC$H;5@&G=#=0HmwiR5{e(>&h?x{E2pZjft2}p@zsDPswfYmT2{~ia|vo4O`i*UGy zi&z6=?S|@b&+o(ahT{Y(AtpM)G44g?=?Su<9Goi^tethU{=Z@uGGi{r;3t$oILc!M zs=+zk*ESgEb2#?}*zfsafBcCLVE#UYeX}3NV=m2KDC`mYZaESn3sPVvTw@TdZxL*P z@36Rd2MKTvBM=8k@hPlHRrEu8yn*JhuMVLWj=+7FLJ+=3LqzvwEYW#cWBbzl+DFzT zGrGZ^4u|<44SVQ4497m0^9O;y?VZiYhI{CaDyR(m&b$5oJ3Jr$Zohj*oXcLxgPL%h z^PR>d6o6}5yQX-EnAm`C;kxNyA363s>{-`X2y0y*u45mr$0HPi>v@J>!SRXE4kux5 zJa_JAEyiI8B5)U;1HX@lk6_%>@E1x3KHui<++hg8H<*DAcn{{>^Wt8&!}C}PWzYb% zaRxQuK9j?}9fIq|g7FQ&br_F%x(e&|9OkwohM_M!H?BVlW$^*-!&p-x66U!Gro!B( zgXbwT^254#?%c}{aBNRl=bJDO^I*;{!dNq57aqX6*&kP6-1eG#3`aTSgJ=AEq{eC( z>opw4R&0WG@O_dD#c&66V9pEUGJ-J_wP6orzyV~&1$2fno1ZiA`IcCNw{Zl`;C$}) zEN0;oe2w_9-u|8hBXI();Ir=IJGlQ_xQ`aFKIZ;&?7?|-M|ot0`|+$8-ypmT*Y}w< z_zmW_Cam#%m^*vIc#YvJn7_F{9G%LTAr;V|DFVUKRWPbh#+u#Pbhh5kqipR<<7VQGVw*sF#aQ7@s#zB~$ zZg>Np=|#we8)$;77>sFn2;U=VksPkE8avS*_OoN>z#O{Y{Ro5OV!-}hi{Ws8#^U}x zKRE-R?PcEs%V6J|51*S7U~CicHHhg^;`cwovF0!ge&^XXZ;m~LE4Yky zcmZ=&1yd1(^@xLdI0pA$8p-e+m#_*M5su6-9(ynW-1`!^kJ{*fFl>bTGFQgf5ETO& zaSuaJ7>D`#5-hUHW5#5(@M(1U}dv3QP7)jtcX$|veA6h$mBOj8YGCZ4i;CVZa zR%TW(`a2d}q6!v3Yj7CA6Kn8ebj42Gp z&>5boH?R=)eI#zfJ=q_{V4sdcxxn9vxfewOY=ZMNhH+%ZBcw$ecy`C20{Wo=R$v#* ziQ|f31MJDI@a#;$6nGxY>s^?ms(1$_;P=Mj-V)&nnxiKw!gJuB-JkP&_N#K7N|x%b5Z zIEQr|3HR6@v0yEYX#fVpcbYji_H+0gf1n}ypd;Ml0~qrKyaW5i^;|m=)9?}Mpd!w~ znwY!uc!s&SieZTED=3-+xX$|4+x*(UzhfP&x3!v$iVXM| z{a_uP-}?BxbH;>Y&cMD{4bN0r-nEZY!v5=moQMnO{{?A~ z5bnpGaSh}D0?u#Gdk%J?6T)CW2csIMU;+wbEPUUD;x2sV4&1+=W1|S#pddyd1pfXu z+;2(rhie;8e3Zp8bj4`6=c#y#q9_i}MYz1sUCR9RAxcB~e16AR*8{OdkvctV5hOw`O z^PEI1xZkZ{9hw~eo`;G|Qf8#y? z1z_!p!82gbnTL+}1f7u`OJROj!I-Dv5scX$wMK)m3zuMir^CHi|3k2z_O^fbfO(4U zD=3-+NXh!E(FPx47?#04`W@EIzMG8JuomWe2bREECxQLe7U_@{&0(zu!u~#mH*pRZ zkQ_Z>9sRuztXB*;uj|x;^VkDBaTo4i}3=taS?G*6i<;E?#Xov!LwQbi{N)3qd0y>G8n@**p7Vo6!u*s_+2VE zz7Bj}nO|e{Ech&x>n9YIlehg#xJXMD^sf;V|GA7q@Y)#l#-Qk!|kO$_) zxqU|@gnbeLf3sHeVI3TQ1bblIVQ3HMc0K#41lAxQhT{ghz%_PYG@Pdivf@XK!FQ;E zFc@D&WQTjZhj}oL6?lmpn2OSfiP>fUI8x0WMS5PzukeBuA;xf#$ zHEM>3n2ZkspC55Q3VX;tn1Vj&j5|1pKkylR-rn{;0#z{po{uN6r>t!&bc1IpJ${6J zR|r4DXRN=m4Z}j%cZYBa_JZ%M4cLoIaBkyChunw_`^{dDgtc#uxk!p7uqXe5>pJgG z@LhEg?)fl0Csz@MjQ9;#;QA{Of%oCM;}MKpFjp<`9G(ULUW0fr7V~c|e#JPL2XkU> zzJNLXC7=lRt?>8a=!X(0g-S3F?)^&)#d(ZD2u{NHjyd@ri{XCV-?#8gnX7X69FOn@ zJm15yAN%kUli+urclXjB#xoy-VZ1Y8Za#uJHUAZ1Oy&!zCDZSy(S~`~y=!q}}q4P(iLaQukAu#R<61N~uNIluFm$6dIB zJ#f!GV9l+Ked0c*!CKcwPV7bonBVvD4JINSc@f>0`-{%Yx+H{oJ&!Y3f#(ktR^ z;TbXSe)bGRp&hJ?{Z$D`P!CgJ5B0)#unzXm2RI7PKovYj9=Oh12*YxugFW^qJo6`D zZSCvr_!!pDnulW)DhEE>Ln#p(YvI|e3D?*Nd%+$Ufw=esuDJ^S9v|l9J=BJ4?#ESR z!L<(eio^Z1 zz-jb>@p>7*-+c)4HWt=#3*N*#@O*ZJvDQK;=3pTT!&)qc=g~TOc9)?UKEwAgU;b_` zo5FK!4R+ykl*KP_uGKIn4e&eaU=OUD`8S3>Ynk3}Z8H|DK7`*nkLx!hB9eC&Wa041swvX7{xkP4F91x0fJ_RVEXgta({toRuA^L|)6>*X2fiBAv(zsmya zX&>E#a}+>pJVhl`!F5=_FzkZs`JS4LQ?T~d&Dwj<1AEN;JV$2iKy4V?0j$C>*yFQN z2peHtea5q09-+twpE1_Wa9^%(PuSb@F&NokKbC^;66dv-6TmTZ;av8qG23^4!hGz; z2vop9#6TEAkQf>90^gu5%-I@TLqU9iK3Ige@D5^OICdgGCZIB^qXoLcdVGzZa7
mJ=x8ic``ok47Dh5Ip|o~54iQk zSdXiigB*B;(eORhAB8aq%h4N+VJ)WOF)qOQ>cL!{hc$Qq{yrA&vpVeGj}QU#X%4=I zbBCh~qWdzB(Ro<|>vRHpVBb`Q{ksZ&7Y{8lA;6sa9`M|3!co{S=GHot#V*(fPw^h! zMoJunwX^=&(GfmtkG?)Hu>zlqa`$}OOa8VD9k3tPJs(=&4H&QgZm&cfoWoA6!FO=n zcBIDdn2s2*x9lTh-Ujz+4^~2c7`OdqOraQyBrrE#&h1?LkOA(`ITpix`@X4*nJ|y` zU_~@SY_x*;^l#(x3@1lg+=0K1Lj`<>PH_E&_y+R=_nh1(Vhh?L5>fCyRRf-lBKR1_ zS`1C$TCV*Jr*Q!8eL2p;cf}?6yRlzGT-Ya}Xb8u?1M6e_=BP1x;VVprIkFb!)IKuy z&hYvYHL(uHW6!*e6&MOH_h3H#{(EGCxii*jfxBakIT>ESn9R9*a82WLesgJFt&RP# z0jtp$&2R+9?fR);{O#eq>tX$3;WQG#aU0PE=TIBw zlCU1e+#mMs2<*U4xG(!V5AGuc9PfI&u^i^We149(FmIV*oX#^9&ruE0eMR^C&%UUN zg}8|vIF8Hk`@!&Bj)S@N-Wv9rIc|d?NQXyQjCIJ3a4f(%6ha7&!5VLcV_xD1xTb5_ z4;kRNdDsAZ(LR^!rpGC~g1 z{cw0TK0tT){8PM%GH|^Jw1u_W1lRSfje_|zUmwBq=$tnK_ekz7;FC%++?Z!uxm!?a&CB5Q^ro-V-qi=Bo$JpcWFq-1NmlG(#`6 z4=_G!a~aQZ2|l|B=F)vwbNek7*1>n{dX$4PncGmrhie4Wt6(8Xstj9f!#!O_uHJGO^@YxQq?(;Dj*47-gg=3v>H}1lE#YBDNgugj0u% zPC_K|BOP4RXZ`&M+6G71qYMmg6?eM-3QbZrCgC!#+8W1h|0msDx~I3Txv2&DrPJ0oShybF>|^@g}mP z7204HDq{}ZdkLgObYDTy9Dw;c1kXh$Y{o!@qXq2WaTow=V7({8@2%lF^u}6P*PZAJ zzw>?69QK|yDT{^p4X=B%7@ybR8u}p)Hp2Q`fHn8`thfRD%J~w)bLDfp;X3p18SH6$ zBNjfv71V;y{D|*S2_bO*jbUGSKJArE_yG;!K0btdwkO^136w_#bc1>L1NPHbFn@*6 z5lPS)@8Ap6!*INZ>~Q}}Fc`le8GOckA4MNj!5L(R&mVyO90p@divB2$@%R!a;k@zS z+z$|hJTRwWC=KWJOj;LhLpTn?JU77NfVAA*M>ULre_N-fF!or;jS&2WN%#rIuo3o5 z1^f)>^~}1qx&9Nrb5`LTe11mYvo-k$)?hVU$91paQ#khxMBsO%LSDF+ohSw0Y1ZsG zoTo3EBNOHz3PoXGra%^$=LB&4IM_SuVNL9_j5v=^@C}@&5RBa%`CJI5A}*{!4%~<1 z%$su@gZpcOOGu6DupTR6?2Ql;qfrp~VC@>iIh^k;WJgu>g86Y@Md0`D&AzgST(dKx z`?4pa^D>Wd@EaDP6)xg2>}hjr{_V5f$OE6TN95Uxz$w^w)~*0P!}O*yf^+yCp80JU2J_enr*RGD#Qizu zCVZwf?C~{lZ|9K(z9X8$wT~eTrC>hI;ZQt)d9lAk;92yHxQ=5yH&@|2TW}VS5g(6W zy{+kR#Kw4p;UiRs@1zZwj*&3G<~0(Ba1ZX$vt|rkQ4}dq9v$#AO5+G-z_qQlapRa_5xD4~W4%VnRs=)bHV;1bQK5$RHFa&F1{_HE)blwP9U+d!j8o}Rp<4xoW z+>IqA>R>;XVkbUC4!Hh1cnNEJ6|dm_lc6izyL&T7wcvUE9PZJzqx%Yq<^b-n{!!Sw zo~eYezSj9Hp2GZJLUJ63wf_;_5g!#{t(L$X+usGS3H@PTybt?2EhfYB(+!TP0PAi) ztb%9A&(rWVPGT;st7pnSi4pkwTkgkTAODPlaQ-p4gJRf&1u&M4aE)=W7YD=MxQNCu zM*F4<>?!wdjP|kXo2Mc$zQS<61DJyxxE4^JyD_K34{*HmKZLoRiK1|?J@6Mg!5;T4 zn_GK(J-$arSSR!J6v^N_!a7)|Ff_viJb~}stGEp3co)uh8b0G0vUcX`0m9*P&gDKg zz?#Q}`5p?dBbbQPs0VZJ{=8>}@0*eET{Ilo;dk!aK5&eE^fBVVcpT@x6X7x3ukR0Q z;#%Lrz3qpwM!*>D!}zEKV;zNG&>nAK9$eeH*c;C8yD0)b(;FEu9<|{Z>reto;d9er zyf2V8aKFI)E*!HLu5IoHU@h#A2-p*e;j_kLo&DRK+Iz9l71pLIK0$tbgZeNp_U9Pb zv*}?CN24HOpf`N>4DwnRC*eGL z;tA{pYc&XA_#F3<86$8RE71YIGwo&j-n@3iKzPniA_?rh3Fr*_B^<%1j$E+M{=iPy ze^+3?*@JPA1{L9V%i+BCcv;wIUbT=93Gq9&qYlE59b@3{&F}#BSx(HxeArL7(HLvt zc>DfSq{Iby6~Zrg1Y>@PFib;QT*pfogLB!dzrlSN#}c?l<4A%1Fm~4-fR#9f@&W#q z0Da&!7rxhq!1{O(!5&OOKX?Xu!~Ghg?}3=GuIAcxJx|VSJXf&*cVT|>pgrQD6|%ti zy&GrL|AQnv`+vHO;VA6GmhgRHEXmOn*2!4^!eZFB&jNSr^#5`t<>7AeuXTR3Wna7> zIyKJ>ZS*hmV|SU7@7*4mGL89d<%{xL zySKP-C~b<7S%w6qk6HR)&_Dn8>X6?eqi*my>6yr=q%rvaC7g|n3h{m}GO7>v3*5uG z&p00$6~X;J_fy=D{2m#VnS!mk$T;pHm-(H0IQLWBGhc~}y6^Yg9vYWM{5KHGb*8##jIM_@xXx#rWy}Ilf+Scu;m)|}6uP^tI$X=$0ef}R6f|E{*Q6V_Z)R+~5Gf#K zC3cnILgV9<4=y<-Zr$Ju@nfzEN)TMK{NEF@)_+dud`p?9BxmP?|D3Pdzvd&ZzjLP^wzPFBL_`VR| zKl1y3*S&i1nxGiJ|F62(Pdym_56*ln_rdrvF{)a^Ckg&;kbmaSd1o{32IGze#s&JR zYVevEF*mWdKu1-6y`PAHnzjd-{*Rm)cJo{Bs@m^^)Jic*kV% z_rT@%DS7wI{V(6==leB$pY`=%?yggb@AvY(|Cau9or$dTcX)rFj}H7^jdxgIfB#>3 zYW=V6{Ez*+KI4o0=l*NC|2O}=|GytF-kpqJ!Ml-H&A%@D<}BZb^L=UV|FsY4xj^4k z4Ze^dX6l&#=RV9Ii+2a_)W7Ta-}?IBeONX4Nc@+3ICG`fhwShF{b~cxo#MGL^YiaG-utiT;X>?~@BR1l z@OS>qQ!L*7@$N=O<@?Y1IE#Vv`tN;J555p9W~~3qd<~iJ2J`*(ulaa&PV+qyn6Jja z^4=i$LQu>t|CjleGT&VOjml5I{*V3sd%l0i*DUzL|Hs~&fJarHZNTTuNy3?gBmdUCkc~xlP+Zv51cDG;LY7Gw$;QkiAuItwEo!S^wRJ%gTX#jNtyMu=YF+BmmR2iN zTT89B+S=AnYqb^SzwdWBb21a8-+z7ocU}K=eFt*Rv%SyrywCf*%X!b4nE}>s`qy_R z`^#+n{R8?o{MYBl?*uI4jxnci+z(ow?o=_pVJJt4o$+lnicGeXnkllXmZ#LC4X&=+EDHY<*P4D`0+pC5li3*$=pEUdMr8($npc$|az zHxcv_&C=(lr{c{!UrnB zn{p(OuJHNXxWbVyFUHYxM+!*KxtIdPq36tZ6`%7k56%Dhi{spKGCoj-v4j%G5!_ev z;8KUj2Cj8*D6;WqHSo~(zBn%ZFIV!%6|Up>fx@5WMih>JDg0#nSyK9c@BFE1pLmO}sdoRPy8csL zQ&iw`)b$i~ou;m5tLp{o+Muo->Kbonn`*Cky2h8bc1~&vb_AOOXVsTYomx605D7K5 z)I{6E!Qzf!IMfiTtBHnMTLKO3Ep-GU?o}D~HQRzMU&2K$PtYwKP_* z3wDi9OYuf106L@q54p?t0=H62%{|#A)^!qLJd-2Vjx@-iUcF6 zLFr^sFdRl)1(-cs(6Mna~*EWUf5TkMQ z6xZrVFj$X(onBWLY>P$$HI8aBbeKSnwzo9}>FPMdx&b9+^%(AnfzD8LEsds5nZ=NNAzcL ztXVF{lY|2QA>q$T?;$lHYJURDq~3pMz8X+9e_Ms*|4Z?AsewoHkDjF8X!p)puwZ6j z+?m1JP)$o7=qL<0TX*D=D2?T3S{rc2(p+C&xW)ZM`$BYJKuL5f_g!41HS4 z`_%A$hZ?Q>O3PhMkMToncuDz#0V(lhM!clNrT(iXYx!-(lHPHG;Zw?Itkm-2HCz(9mOpg0WQaSBeu_wWYps^|eN76v@1)$t3J^&d zI@{cVCLS0ZD7O{*YY5L`)XH;YIuuy;fO%*l%d}@>g0wsXZo<51B@N&Xnq{WU6~yF5 z1U)G!In)wGf#d+DRuA_stQ{e!@H`7s*5w4D?Wa(kYH5%$?3)GzsS!g=;Pl=*^i z!0v_KWUD~uGO7;k3zIPF7kenotgSDz8n%HZR3xjdu~XrJ8(@Y=X7V9`Bm6?#zF=U+gUFy8V zd;?lby!X*qwfQNK>E3Tr+ZuB`oS*M4$JwqiFT`b`H%zfwvojqVGTz~+qg`)a56&v@ zX>@L_xdogx-pg6zkhufMT5mSpxzxN0NSn8U9<-RZqs#{H7FMp+T!wgV^ZuJ<+Jv^9 z-iv8xy?H*wc6s-KZ--6h>rGxhC$!tm>!7XIJAn!|m>d!A^=44@Mp5m3-a)KFw;6`O z3-FnnOKM2zR3`d%RCdeI_=>3fnD`noxQMAK)})f-F?O8HFR zQhIcwnE@=%^gTq$Z^$+>eIK)i-!u!r8)y0s(2twU3ScFsuYu)mHctUI-SmBpI&U$5 z0OEYp*F-J1idrr-eMiXKBdnL3zPng~+XS}C^f6uS+Xc49^!klzC z?J4vs%w+T#p1)IF?C{N7eXIWh(^*-bLkF+lsiF&$=3 z{yl5+ArsNCuVtfuMA)_~==DDc`|Mq$d`vjc&R`(^NjPBNK=>0T={S2Q;ZM^6m)PSO z<9`vJZXY21--PGer&IrD%-4nX7(~@RMtZs3O&>m=1bCHwH_rAKgxA=M5kC7k;kDlP znQMkgq|KW`=S+cY@b;49G1I}>=3S4<+LoAhcY3cOCrNNH($ZjxD07qdY>EvKNUt}8 zZrK94*E^7^y#m?iy_``=6@&QW-qj3On&2Gt-o|PT6v$7!zauAIAcwr4vrL9Sj(FdN zQ+B36j(T5Xbp{FKUGLpQ1`Ff^@2fQG7s#i+PZ*6Xff%N*ow{>G9Wc`J>6@J=co=Cr z8RvX~<(WqAM2Z%eZ122sd#Gtck#{z0T4=KC8(xk__ApVv_Hx#ob$!9;_e=;%?lwa9&a-#BUHN_{H97lXcZVly%%q)~a;SzH@3qQhmm5QJk z-e1yknT(KkBsEPI2nsJ{xTcsDEbOzVnUAn=6;AeP=3(eJykSaAHwUnok^2-`Gt6so z$^9d%I@^2^FotlPv$>k^i|&(qA;UGt{F7k*l}xTK|s+!8!LFYCFrEDhkYE0rq(=5M%+4Tt;p-EZAq8oFa0IS>hbCQIu$A3F_l2 zB343q^J6gVg(k<|+zN(vk;z#o_Xfsrv3b8xv5zWVf>JIWRKW?fx5Ru&2>gNq^olD8 zX@i)bW{uo;K(v=iy`M2|=SrBlWuIqq!peP)E}t*`2+?-Agl}Ql3bQPo;h2uIFvB&0 zpUc0PEhG0$s#|7WEJW+HC>Lx}lp{5iFhx>qj&3P7`#_3#%>c_|zytgULBKnX%N_d) zw7PdZ*OG?49B1#TWmxNAD1e}MLM_NX`@bkNNvt=a=tdOF4+zai01kKzGUHOyxJ0sd zLNaxrXsfKoVAM!WKMSi}J1ass<5IxBtoqhEBQ=vUdG@E27dMdsX#NA#dFCQ^-ob2P z5B>BO(_V@_2kHl$0^Yc^vDeVlQ8=46PA)roKu;UL3uGS@0zQ>t$m5#KNSnY$3K#{v zq}VL$p`Nsf=TRjM`O>C8N0oO|TrJ+y(f`wCehn1c7{IJtn|Ag**5x-8I_DdZM-QT& zg$&F%y8sbMTSU0T&Vtgk#q?pi{SoDtEQI`g`w$#TTS_$x?R!wUv~xLom18skd>$u- zRrUsyN;{uxzBO?74wUoU4qsC3auiFkFTh!32ZJdSgH9Tr4e%w^+b)DQQK%Nw8yq2# zsX{Wvz6I3ud0?m5JP-K=44dmHp>NB@8(LtQdwK@p-IYHO6aoN@p-;nJ}isl)5|-l&kdY zE;jSEtIaV{2~i_3RSE5bP$nV>cLcpb(GP%LB2IfXGG>=sQ4VhbyI^wflqaLeUCkD+Ic0Yhi?acQONS?LOLF_%TYD zQ)H99+Z_ST~C0xD(rVyN$pI^@Qvl6yaMDGS%5zI?!126_+7AO>(`~ zE~k~)Ti7k_0Je~?hctE z7{9jRIoPmf!~7oErHE`GtYkibOHXK*j;d{I_-&WJFSHv-SPM5pu>J?It<7=d5AM63IX{@g7(-x%{ZB{~QWz_{1? zrXx^kmLM7K#Qb6gSa+}WuG;cX>qT_#f?^TRhmFE+Cft}CLGfC8|6T~asD<_zh1pD; zIs1i>-ozI|f7e2n8-+h(OVs~P2f8{tf)&xbK6)zp*!< z0RJ-Wv9IC6zs$6;6Jdvc=^Mv>2Ic%qFCP00F8oU`8E-(2e*?#x#%%~`k|DMs;=@E2JZ1~~R`o!AS zDeO651HbKv9w&MM95n51kmXzc86m?=C9)n!X0v)yGYm2Hp%Z^(F7ufcxU9lf6?P<1 zHTf`Mki7}WU_p_ayneQwnKcQXw<#^J0_I`vxf-DtUS-qcLu|wde9R$MjHVhW2Gm@l z*894n+10ZJ~Mx2A^gC>4FpnyHwzE~maqxAMLTdMBT&MISq)S~tCY+`z;YvX z2;LMKo_!raBQdm722XQ0;A!+yOW;6;_yxbxhP3IChIzRzV z0h3u=&rCT+CdZy2nHORfV-l^Um3xVZn-77eV5OQHEi=rJ%D(Qb+04F6Wttx9$U5DP zZpDc6?;}%sV?lG01XE9t`ajvvFjT9sMKE-AlTqxOOFb|lt zTtTtkTEq1h_R$SHQ?l4ZsGDtHJ(pAGWpoPzQ1@X*aYP z+4%t>W-xQ#l5%d3M=SP?VDqUVD@Gp6I{&7sO{2P#LJcor}l5s8ivwD1;j2o(dAejy-$Mti5PEY@{6SVhU zm@;!Yk()Q-lv<-O!+R6sk|H=iQXGM$)+%z^227LW8v`4IhM6Hm-&djnqo@EV>YGPV z?>-tFDr8g8320DYMtZFeoDsUt7f>39xn~u`GB7HSAYiV@#0~l=mYSU zh=hY}3f&77yEI%h47!7Q7Y9ZF#u1el=#65sqTa<)ZyHoBrK(JH?Q>}$2hB5>Q5mPo z=IOHMz5`eM4*{J48UI}XE&-4gFPP0;tINPqismat^p1ylei57yik{Gl-lU>TP-K+Gd@e@&bR7GXA zj7kPtZZ&CUC1ZWjB$R!bDtz|SpsLkQ?g7Aqe#54%J*ZTEL^C+dJ#&$^c?fi^0FeH9 zP+H5EEINTuZ9UcQ1Crl`!I7omWP=8wT5o4uK`neb2mQxTbbBw35}<7l_Yf%T&x zg9Ep#;xOAd0gI+vg#sc6HUr3t7ZgoLj>>^uN)hG)ahTyU_#_ndYDLdbQM{l~L=HZt ztBo)+i&31B!D4LaGfKILOreR50<(jaJEZ91Gi<=kJ^xmL=wKi|gVsERULaF^vQaj$ zU^9^XXsKa#$!HwflFw;rqr$oimX3iG4tnyc@}vVER=Vi#D>~cmPDK|U4mzJrwSAwW zl}c6ct|D3X&F!%)+)hW$$~=rI+u7o~;Taq6a=JNeGZkIIUTanug>QgoUQMYFlPMiO8E`fAy$C_J^;czmj&>LHo+2e#pF1I{Bm2CfOTRCpx!s2pZG@xz&s2^?+D*SL z2r;|AP7h}TF$Sg6v75xl@PWq4jl`VBayPNu8ddHVmb+P$yB!T=-cvFb1RN@>5vQwE zxqDddDTuOW_poOFB!VUtNHG$Dp4>-`Yk>@3rc`kZMip@fvjXnTrF##pQM_A(F;K*c zhweQ@Ghan{nt6z3{ze351j4s@X=WeIJOYHF{SI#yxeFBd*kzNmw6;6+--;n)`yT6h zyD?5$4?9lWhaz<1b~^DIw9|>(>BLlw12okuTk%jgow$Qew187^hEm1G>6CV{gfR-r z6`eu6)u9i;GN%K+RfwS`tYn01BMvb>Q_bH%I2E2ur6*}Hd>Rvv$vB-x)5JU^Kx5G3 zXb1C&ZAy*!3?)R&J@?qMHcwE;^>CB5d4jchi3ma}kW;bA!P*?4;;V>Us*v}Au+mRb z6NbwzgBhTO;~|G*k-Qk@^EZR-e-x}UvCEMa&(Hn=I9tHU=4%-*65t)w-vDr+RoX=D z#qIuM&?eT*bB#eyv!Px*sARwphq-64S61ysR&8ajtlEpL+Abi&f1(O7iYf&VbMB)f z=)52iry{M)pC3|^vrv==MlB9hi2To~`UR!>=TvQCa?amaW|+T_NwBXyf8z|p{H4M= z@;8=Z6njOXox_S~;Z=oh81@q%&|fLEYgm-6_L@RB4qH#|VTEoQHkIfRg>KG|mKf%* z6^7vvW?omANW?eTbZg*V!T3`}H4FwZ1yeW2md!Ye+mwPSF79Z>Wg>FbZ4}INanDfP zh~RdByFzgp(?-SJBDj>1Q7u^K68fqV>J~y2mRtdZ}ln5_W8`kouat0 zx=(O%m-Q*jel*R+ZA&P-SaBJ*8{*2cO=cZ#1X6Eb*o_#_f*Loa?eR+BsTGb~zf~Fhytw9%Jon@C@NT{D#NUPAN6DVcH%7dAPa&n zZYpNkSdZ>RS#i&S0I)1?<6>Oq#+9lpBfL~`_1vLz;|7;Zo04(&J6h>+`EYF?E6n`8 zE)9<;uAHTrhZSP!_>@b-p*|WokNm{N{aYU{Yw@m2|6r^_SjYI8=p&bg5+x&h0X?*e zM3#sso!5s;{i76@Q3@-r+ka{(a%tGrM+5zz<>EfnhfDqEx%B@sj?4H^!zC^aAH>NR z1?ybg41atpU*pnWrnvF`f5WALcO9`jq4ft`+?GDv2)Hk~^k1*I@$rAnrD0zm4UFDj zT-=}a;Zpw>F8v?IaTy6jI24VN#SJT)Wk%;h)N7dLDIry6oKt4JC}UVr3ZA+I{IqKjU-RZ~ z0yKFVhz*jPZwHVLUvIq&fZnA`k{@ZpV_JUbBx4{|>@>M0G!Qe*P61#8b?56!%TB7p zlV^I7X&=SeM=(w73|}h@a{H17ucg6(bqYvt2Wb}t(%V7XEg*Pvy#P@2uTOzuBVz(R zSYVz@CGcQ@0N}v_z9I;dKNaUHAU+0vP=NBYbuO)<4jkqlzDYb73T6{1DFgx~v#I2I zApW~`vW_$izWkGg0k;EZF8#C6A(7DjCtWhXlQLGvHq5Yo#KnoT3gsU ze^6`&?7MEZ=>K9V=l`3M`4}>MMb({#hTkqjeQ;32F~!xHuqnmfHWcZleDNz$)}#O- zai@aeAECHRg1K>A=n#FY*dMHFe#%gN$RAUPd|`E8Xj-FXj!`m9ojh#Tfz{E%$^iy)650pLdfIte@u z;5PvLXHKRZj#2nU2FNnvqV+Sj;yn$k_d3ZP$=YC$FSy7h?=*AjW@Gog-6Au=3SZC)6D+@c2JtFEj9Pt!7&jc`(OrevQ&`G|~*+_souLA&`Y{E54=Nh5&>Qty= z6Fve7(S&ajU=uDFjV8Rw)g(2Lbv2==EU_QK9pcG^KMqBxG}v5o=D;W6gP<)Wa}+Pw6Rvdh?#| zP!Gej9uguV;}`3Z3< zm-9Er$ctTO`Lc%Nubgi)cHh%|gK1A6ZBIYNo;SwG{fd$o!{rFR^kMGjo%}9EpC|$c z>Yk~CFcxAa-On}efUz7Wfsncx$VhxM37+h5x%>uDF6N$6hXz92i-L4TZ3Uub2pEg=69i6M!oLlcKd&!+)P8(yoP~X2E*_lSM(gvKUQ=x zM~kO5{?w@qCEjyI4E?Axi^(a83&x>}%LU_H#g(fcF&BBQUT4{LsuJZKS=&dEoVJh& z{`pFV6Y^Kw>a5Ljoz8Y!;_~P&B@}DbS{L^j$>m~GE~5;8mtxc5KdZv-WiVDZn+^Y0 zTv8dCI#$#nXHVW#+~|@i>mx&FONfRAEUTznjL+(v4EDi^xx4(hWbxe+|ywF3B{%68b#+gaM+>CMYG|Ly6hg4 zT%?;=oy#RW6a5jF&{%Y2wu6YDTz4bUXL@ASbDvbAVw}jdXF=M>&OVy&nVIh$RO~yY zIEIUIbW{!(?-B5ilf%V(yj$c0>YONUuK&^oX}k@*IVXxcY>jQn*@)d)jdlA!SE|oI z{SUgG!GVIWD?0NKRY3jq-9C0EF7U(RUh_@Jb7x{NM`>d;U)?j0?NbG>PLfWTdol}U z>l~xlZWtZhhKIn%=+PN?@HaR@j6&BEYb#T{=LL@#d8bUkY+XaUU+{J;Y4^VUTXWjN zc(d+!0ocHQuyqVp&)D_>${SzqyW^wT@S+-J$1)NxD&G#l5i`mB9P0941^r0~_^s*E z;X2g#QE=FBOx#RG=djQ1Oh#iXW>xPbL`4bc}IwP}Tdb5<|<0!}OZxWvl9~ z*A{P}#ecXgPJoE8*z2-5K;>|(db#~s%vE9tg0z@w{ytc^|QGneP7Shi_fU#G3} zUA9Ugg1W_4*ag7^Q|TtrD>XqBB{komN>_Jp?f8(O>CWvy3QLnoM`YqlrB z+-d!odr(P@(B=l4rTMCIo?;cqF#!ArT;?(%A{%bHHuuv6bF=!I6LI{llImp~{pM}j z)Dmr~g{IzjncC=>nypQx&XUf?+H9U=%h*$Wp9FXGmWt^Ql`#*II6! z>rs5LJulbvBNEsyqmL&;n$-8 zmE7M~K)bR96+3fgtYSZRi=n&tIKxs%_dDwP$StHCVuW;Op-jQhK2?{qxvaWQa*Cba zS4_J_ZKGyMw^~NwNAi@IxR*@ceUq3xE0TBLE9!ZJl4Oovr6lEY8#(H~Q*jxld)(&4 z()3|h*=G~V?st{_LqgdP6qgk-u@Z}=-ex57%-Pa^wS3KnfbUw62K*E!F5JY+lbOZ< zE^KFNyxYil&<}Ns%pZcp73U)HEDTS97V&9ZI(F8VaDAVS1)v1@nxTTQf|L+;*(HD- zb_rnhD1inIG-_b22G(PDK?oV+`H2^EoH<`rLj1A;teSf!j)eDcYn*skcQ(50so2#W zXL8|sKQJy#PcJDuL*PZV9wc_=*HNChU?x=Y*RjuwlZtB+xbC z0SRoJ@I47^@_&5}>WG8m&2KSQp+)hrqxE7TDmEUl!M`3o>)T3>+Y{Ds+43Ueur{Z( zeE9WUCB+Sj@k&YzO?{-Eb?K^*Qc1fH3Y)^3?tzD0+Pbvdp+vbqDLK}m$L*UKxABMq zx()v)E~#hTzR6W}(r#$R*L}>D@nAjwqHXSy@sO);WV%0BNwJ=T0xD{~@lxZJ6#beZ zrR40NwEJ<&@&jJ9Im;zi@0JtNeICx0y9{5e4d0|f@@bn?iYvY{M@h0G-`A2mm82*k z$EK)DzAnnqw--=3KIzzB3ri_x&Ksk5QF1@+f95b z0y1JmtTaZ=;s`jf0+iUEpsx+qMm#XoCQ5#NR5pEah4A0gqNHL2)#kmFy{IYk?B;a}2xd_4~? zMm3&?sh4nmiAeD!9AloSr1Q1DUqJAG;w2n@qF;U9f4()GvmL)AU<3h7d{c0`9e--# zv-aWk+ZweA6mJt|hBWfcS8+Bh7&JmKDq2%351T>zAyz0JpMn9&8q2UiNNg${gO&!GE2 zELOpq&nas7MsNoI2xN0IfX4xh2H+<%>qTH+1%?*@u26-N5LqKfeEd+PIa6KKA77Fn zzaLZ zqFqW!Cw;(S?zsXo4D>||G&i&u=!-~Q1B8LTgn_;p2$Q3F7dTjmY{~zVQpHEwogfxq zbc5RL0cQx!hVt_ou!geXg-5)%6CTYo3P1M4e$c1HhsaX!==YEr29JUS%oW4nPmunY zmGq{O-n;{EP*L`$LVo_(TvwvhR5@xcK~l0}>sYZ;+;OmC>!|2bAXFtj=J;(OtXPXv zv5->5ij_l&uGm%XiWTn2g4xI}MMEDGhq;GeUQV+Sn*BSJ)Bgym(*Jh)KaO`DIrcrE z4i%N)XQa)|hmy ztwlm7sC#B&6l65EQ`Jo<&uEAb5dMkEhRrW7m$iHEso zJmz)=_cjLi8i=wMw^7?OM4(R~p97&=;sxhvn6YK&7U*iAfuAVNuTpJcCaZh@Aw?@8 z4TrghZ_+UM_tTmT{{6!9N_sBA3J$FgPJ#RbP@I88+DqhPBJkiL1|pBEfkJ*b-h9kB z6EKcf=z08z;7YJ?r zj48*@D(7dOC3Ud(-2f#V);igRFo;zc`Gtzj&_1r%9ioSA#+hGrWn`bOWTc0j&COTJ z6-|Hjy}5^9Y|F-Zg@Cyt9rt0c(7zG$bu@l82AtJE8MfCLwp~EbNrr*0Ujmh1Nndyh zv-Z<%XM>M}UvkI8+|#YBd>XUzDXqLbUt9S%tt6i=t$ao+vw#>QxFEsD0S=x61jifl z0+p{oek2?(G-aP2$;TpvCbgQCqatM*>4cim@O-$qgWN}@K0M-1{v*nJt%I{YLgUaX zL2oxqo4+)PxA!!I6WbWQPBYFyz>|z=baVRP9 zpn}Lvmjc1=f%tgGVdQ-Vb`X}6lZWKZs6=B{68qU!Vrb13#AaTFeA$L3J{5l%oh61@ zLvqo>#89M`*tmVfP^6C7>jK02MSLs3rtgq91ydn;)+5BG8fF8rZwPFfnj5Enm%P)| z7pnYHVAJspDwJIEJ@RJY3si_bBd}69Bfce|{82Lb@z5r6e^=Y-0L$4N-8 zQ<6t1`NwW8xd{S7^8Eu^@_>>IVAL`H#GeQkI8hxwOs zEx7>#Lh`*IYsvDf0pVCf$@e#E$wAL3$)(SN>3dTRX3U1`A%jSBPd)qZF1k;DQk_DN zX(6QiphY0yApPf>j&vXCqW?+Lk?NCO^y8Y2G(Xct&xO0fi4tS*3Kw0_Fo5RBFTcxW zPhJ4oJe~#^>W5v7n@mn6{LWl8m89p$spOCf$7FOr9`E=tmB>$7SSD_k%z|+!8ZTw; zAvLMB9qWjKVt~4uRUsl|j=5wi ztW>;okoqqdy8vpm3VeawHe;amHCgz2aEx>r#teHZFps?7#V@V{$t1T`M8l1!rQ^=+d=SJ^XJIOX0q~wIbDh=)9ZJpdD>4ho*wA5%_7w{KINHS zK;1tGKJi^X)508P#Iwykq*|D$@f-_6D=aMeBv_d=ixE%03UvH{j`%^K6#H@@17scT zErjLQB^u@+b4-Rm&B_>(F~}S|z?TcNIfj)nGi_GdB0o%wHqz4Of)BqHXhUr#sAy8Aj|Kz5qAWD32> z(3?ytd}E{VjEdNI{ASGN;B0ejzEtK<7KO7QZOeMlY&Hd`B?rZ*Qn;F{oPkPoB&ARo z!`IYW8ME`TM9wy-8Lwy-p_VL2l+l`>_suhe%Av_lg*}L1;glGfhvg4YqogWqgs8qXC@JMdrNx6Sy))tHZh|W@uhMYl5fi z1X?m=S*&Pc1UvmQo7~y{_;IW`C@&`mGNR0&$L621d{Sv|=v`l#^oE|IR zr%~{=f=<{`^~{(WCcRI`dXPKFeH08XM6Q#vDy>Smg??P>XW^_gWa41lq-0M~lpMD# zw*6!S;`gZ>7f>wU_zn%4?!9Ge%ieDEl-ULYEtc zmGffG$cT?nrcM!`Y)xc}9^?ANYj8{@J zuFkuBop(;gVC*Lidv{n*z$ONGV#o-Ypu^qi$mU0z%Zx=HoN%Zf>2e85Imo;6tIr_c zCjd+dodc*!d`h^gBPOzOF6RVcyyH`RDxTUm6(@-L!Nf!{T4O|>6!URjKI4c_==Ysc zierTTt&BvT#Ens3KA@>NyiZI|GKa*fEe12y0LWv;V2~@~V&FoC%DnK)biw2yai*l2 z(sOjS=en9ll+M$+lkd*#0#{}akql?B87ezWpTVn7?S{F!3>+TrBtYU)h65Q~RJ|sa zzVWFt(v>Pu`6YuGQhUrvlfu20aIZOIV}Kk+lVde;uRmjAaf7xUlV?+TWu#KSBmikB zp}QemcMV`{dJbVOZv0wQl%7Z+%%t|8-)=BoNYnOPh-(_^*5 zhf?G{M5b&SpRVN~UCY6-O$D3R0+na(#*mpIINVL8_*if_<^);ehZNUgYY+!*LY6Xb>9MnyN zZC!1x;V8(MD59a}ARuf^;CK3hb&A>?groItN_{X=7j_e*bvLE0OZ?6@O+zvG@H%Fw zt*bfMELnBouC{1vHGjey1hJ>>_2?h*vHpUGqgGdR@xScX-?O~it7gqvYk%upd(Nym zRST@39ko|mowr+qwp$t7ci6>N%_<~v6td!_I_=BYS?d&u>DrnE-UTrCGXgC@Ne#1t7Y+#nY~tH z5nj9Sc#t-aAi^HwM&{0!B~rhwB-=+-xgEdyJGl&%hQfxR_yu)|MCx2fV;|1inyIC# zh|(f|ExlSRLeKW-UoF5ruqVk%*)Gm@J1(X@Mh)D=x3Ra^{~Xwu7hJ zzP^H&D|ou@j*nMdv36UP-TpDrDxNO8!ydWqp*ffB*n30w>LbG30U`S>&!qectE=HJ zR@dyitgij`)PHzJXW5tIU(aO+{`e4t%%zk}*(J`biWTHZRA=SomUlKV(^H%-_#cY1 z;yvX_W~+(rU-jQ>HQg$rSDmcY82N(%7{FnTFt=YaZ$|N~IdkcX{W8xY25p3}FO_6X zz{z86EV9y9S$X@jtf#hapDBX>7P%sz)^i5yWv#RptZKLB&R$(Y%)XxI;A;FN8-dGh zKO$B#C+N8bgEt5-^7O!3PjU8kyTo&q>7P|)-LiF+^(XuSA6eGTTlb6Wdxi+-$_qV{ zvUk}f^Ddj|xzfy^em>8GB35?&h=IyAf|DRU6J-cd$wSK$h zTVJX#$HOQEe*mv&bjKD_q`{r>2qBNtrW2YBHl!LUbYj=jVUQI@>Ddk=d~ zMb@5$VcTOwkWv4Qu||&=|54VSc&uSQ>#xJHW}C&4-pH&aRrcydJ;Ud$v_|~o-1UE| z>#VwLg>pp6ZU`x_uV-PYV7sCg!dK-5dU4bu)a&NpAve6(9cbKu=Snp?EqHUS6B~qdzP5iFU{?MW?R2Cqex6MvCQpxeg`W5!hzb!h_MWQ49H-~@SH%`=% zG+)f%>@NBhLVS34an&SI*Hrs8US|npy&QiB$xCZrOoYTrpWRH+UY_hlYdvH5LKa{v z%`?VKvE@M6F2_hSWffSS6r0nv=lM%*QRi#n9xcG74B^% zYAq5bdIbC>hj2y9}~Q7^>rm7^Gcfa)P+jBuaa6>$`%U@;jlQx=6w<2^_RSC ziYi@9VX>YNsHl>de5QY+R5pTi+cVBRzp&76d5Rf`g+&^uuJV*-cg*aaX_r(T_DmL2 z-6z5`k-x&EIN6P)?&Zlv6p96IwePpmcU^65C~7=#z#bt4aa#s)k<3DH7E!Sb*iFdV zxPQ$C0`Uf=t$xkeLQ%{ODQZRbTba9dV!~{MZL0i~r!TR5`D_luATY4*JbI~O| zW76-kciT%m!!bGS5dDh}x&IpzRYa_2C)GB!)~$=kr7?Sve!nHxN^7uY+-`FYUtG0l zN8p6%8(VRtdJeQk(E@Z@YgMU_|XOpCN`jO~! zJT1T?b3E&DnKj4LDlcIvveoa|DoY4aGNJG;Gt2XoH_NjfN+Hn=s%N8QT>$At_Fkb1 zIvPDNIA>8K^jBr=X`FRnQKQh%3~Gc{FNTZ|ZUY6*G~x2V98a@!jXuF1b$ITC(SLih zX0G;hN@|4aJ0#pLFVQ}jBC3l}qQsTnz5=CoHKE<$1z1u{3!Y4;!tv4|nY7 zK7Yp!>*}rBt-oXX-;L{|Ti>=1^zfL2o0_X|b0ZLqMgM29SFi-mN7wriI-tjx^Z%+F zuC0lzjdeo(LV;Ca$_dCbF7L%7PjM(nv5Qjd(uG#$Dy*+bu+sNq?d6?qb#CX<4#%8D zOD^jG%d{HA3}O|ovPW4po{8p#Lc^+*j}qmr!MBTBwF^WgrvLXUA#a0Y{_AJ&+14o2 z^PtD?!FL;&&DEin`rrnmxfW8roaxjOy~P)5DF`}yolwA5GQcV?@bf-Pl6OD&veYpSUWR=2fw;^*PYyQ&#_lxb)UH`kzOQ)^VH zf*aMrmU>8>6O1koM$g244}ONaCK?U53O`WkhIPR%Dn^}ZYa&7Ln;ML0TXlV~rU|Zq zEVpkWZo?Z|!v<Qv24apSu_~w3`U|xZ9{c92!)X#D${`4o^#%cz=G1! zz|^9$qS8QFN$J$m(z4RPq~>UQOK?tOuq7A{)d8&u*R7p{t+z?jrUWK6ivQ=A1tv9g zhHKh7aE(N3>efwa3%5qGPt_Wpgm|~MK(3*D(E?Q^Wf7i*T0&87Y2iOgp#7kJT`(-X zj)rRxMnn!l6hRdY1QHrT##&IJr88Kwt|^2{;b+0w27=z)Bm!)pt)t;~x?z8cYM)?0Q?%bFV!h%|^ut{P@aqVj6~jyiK+N0{!l1UEz(!nQCC)HGMu zbk?Bts7NFzZmJ z5ez%7UDfK=HVhB$V&{v^T~-pYLVP#`MIso%Fb2s^6KZK_rAD~K{6*nlbM?8Z0Z<}{ z7G_)3G>NLp?i8zaTT~2dssl3Z)reQu+ERxR4whSjjWr^?BGeoQ0cf#sWNoMcV<0!L z)ff$KO>LyL2_uVg=WKQ<=7&gX7*Wb&!;}~hi=)kL#ciRsNww{vrus>zMLH*!l&qVf zH~i5zS#wu{GwsT|&I*UC&uc}sTf<#6DCPnTz}3t3=q!4jFwqf0s)(F|pd3eC!y=z&2Imz3la958YRUW!=$56N}~%Y*#4J)0^Aa z{TrieWh$4-OBvgj9Z>6FE(?OYwr6loX_iBKdDnuoG5bYBtu6E%vl>;f|MJR_O+*q6 zhW|C;MiCK?J{(Ay*(#-xp(?GbJEAqUm_40gQVi74jiEHCJ2G^muAw@i9BRe*$RQhk zR7YzX=}VuU&#_j7p}gyYny?D09$Iwx#30K2W_ZwX@IWKfCdV5+0yQ+%z!=6-9s6!JOm@(u`*(PU&mikaLt>9@+9a8U% zn)>BviJEA8IEW-f`&Czq$*#I??YioQnvm>rXD?cM7IPQbC<4NCh^V|>j@1IDZqDXS zqDAYPS|d=W(>mCB)`q%Zn`lUOZ9d-N+5#Tp*QLUST2^qrLZ6CaZooQbJx1|}oWI}@ zLxRzQ;v%95qG(npkmx}h>Z&Wzfn_B`30GdoM#EU(8mZ(E!0FPNl*Mw2>#gcD*nKbH zn5`CDbb$keTSIkh;(`G#bE;dL>UDmVcP$UDZx6QA1)c7RHQ&b87N$760)xTM%3zdt zo!3rSi#qdO-C8V<%DWa}imR?xokw~uS9MKt@M4#UR5#X17FV6s5e$e7GDmjhh@opL zI!kQm;s^@K`VdwWat)y{L(GdJ=DOpu^9%P~i77fK2WYioMQN;0m=7|mk+@QCM6YJ# ziV|{i{U>I*<~H43;4;1o65&8Lq9B@{{_rs%M<_1y(3N5fu9jeD{K~hKfl#q=O_gG0 zB0G!|VUEDaH)l2owqXU1ke?xzkmXpxiEb$x-!Ur!2oqc=3E!IZ?CfqF_rT%`m$Sx2 z$p)Zz4v4z2jNudXSYN~B*MGr(0VBUtb8lUMeIs^T5KdR-^WIfZ!@gH|6vI)wl z%o#rrFcIqNy7n+ST~m8=iy^uU20X!NcbZZ(z_cl7g6j5`_6V{-OuPubTx}pC9PC7? zKF_RlM?v=vHFV*Lp(cotUuP4Bp=$Nq%ZSvi4c9ayL-4!8u70Ysodm1tD-$4&6pSvEBvVX$*d#KPADVO4e_jLstGe8t2=7K;->UOTH9L~GOVOdDyeFu zZHxO&x%;L`2|3{MLo78A;ZlwjC(atxzA_83ivN;$s02d-i$=#_h?V4ts;Pz994C{B zKocgqlZhc_CzV0Z-WbF}d}CR4X-P?S$;k`gmnp)*`gYve!&UZemYw>a6va+J+0?Sh zCpE#P4Mry65?ALoHLEQQ_ zU_Lo6>nGLdnn-Hl0&c}HZ*!D7X-SM=)h(?M zKB;6VB3z2qHTF_hzl#NQSf`S>xrqj4yXv?5H4>o`55rO&;w=aV&QSnya3Cqg06^iKtFTaVy zG*)P^4DWmKNHXvhBs~&NJmXVj;xLU$4Nh}ND>UfVRhgixMiscsDOj$-dmYm9w-xxl zLn_x`rgGIZmMf-nxl%zltElx?F!SHQR?rC^ohEH z)TaoL`O4L^9G6#Ua0=f2;?aM=b-|@h!AcEwIHVOCbn8kC_^qnI0jFTO29G+V)S(FVwZafFDu3-#Es~H5jv>5b*iRC|{A~fn3!fzbuEx2|}m1 zOPzw18fUfVl^D9WRDqA3f(a3juHffb6#zdqc(Ee&@24)f$|+c>L3hDKr*y#; zPQeNdx(ik$^gLZq`)3-^~^Ds=tD(PFPTd503#$dNe%6alc%H{7M=gmSNBD z(a17MLjojpuQiIb*%4Wx!Bd@NyGVol#4!)q92%Pv6O`=Od5r?Q)D@$i2J!lkI4}lj zP%EVR${vly)F))l8l^U(uBK6;L4K^6hiP1(L9LMLEBaBN(1Bi+Dva0Em9f|Gdy0mb zYVfa$WEpK{&!-B31L9!p6_}*1LQae6>wLxhs#9)-27P!Lg9jp}!2(5+4WhB(F+o`( zcGl|kxQ7PO;53z17Jl2PLnB(1Y?7EB32ANRjW#iT(FJRh_5Q zl{uv0SDdPrA61~P3eshXGIp-IvZRK!ZHT?9*sr<7G#ndr6FPoC#cHjSD=Rd(2A?F5 zEpUMbLy9ExOk@03BPA$-*jcNe?WnKR;4-Jwat&&QGRri^pXK9$eAl41*nc`|qfL&{ z3Jumex-Qb7HbY$%JsQ&%OlLZhPT-uVewkKZ@2J06gIXcgFY8CWo-e*c{VJ{g22~-C z(P?N{J4*eDX_u!Q8l$dE9u3cRoUPDKZgGiecvno!@lgT%ZWJD)9FcOZNw*ZoySQVB z!iT9VM(f!MoTskI#(HD=m>!Lu9+Om%b17BOWOZc)9cCPm?vl~)`HGfoOo#8e_E8C>FJ`pE}=)`o|610ngVh9w8* zcp;C`?(j6AW%einWi+f6_GAC35`W7nR??$keP@DV8vIC!^*e22kP^ojXGmY6!M{4B z3pDsQMPlDw(W5cv3q1{;ZptDqs=#B8u1XE+Qq+~W(&f*n@z}4f>?Rt1-ceSr!Iu=N z|2nD}<{ov$I$eWFYJidzk|ty4=T!mA8J82rhVLj5ev1{4{S*~GSuAe(&z2^F{Q;QWEd|Rb||JBz8uTfIyof>o(Ol-UxRDqk+wZEWzpm?7+ z$}2QDLhVc>VNe~e`+*ax62*N@UD28v9FQWVPEZ}i9j2}*sKHXFVEeYj(Jd#gpvNdp z@M^x2;aBkSfN~AiILh1lQ668gzgY3tV0rA0Dc9ijj`IFCZxRbu_E#{m1rrNa^j9!( zHLMGUm0$f;r&sZ&sBa8JtTZ@Pkz@;MY(`8_gERXG%J9U_i54pRvryTSXrZz{3yA?r zv{2EXg^GSGRP<*dF_K4AlwNlt*-zG~C~S!ZNP`bLqzg2-w@=J8_DD=H7C;HjSFFn% zkxC8fQf#Qi=v1o${9#`n{bj*H#rx2fGROiAexykK_hVh~^_W*0eBaTvT!Vje>8k9} zn6}W*aH-V~aCB8@@UIT(0u5?~bhV-%^$9cLUzOJ%xI&}#&s6#?W8<10jhqo9*7Rtk zB1T-3FcxWb`AUe<*YJ2nvyC=mqcL<;phr`t#&Hry-c3qEH`9OIVqaGUzoD*JKz~nx zkEyF#Kx_1gn54{(*!e!idO%(K%PsBcPM3^^pI5YGql_zMjlSrRCzGsET~{R?b3;M+ zGB+Lr)DAqXv&Hh$7Gp-JrPB@HpUU!vU)V- z`Z!KbkET2n;|OgzJwH&CS7RLIo2DFz;|%Q4l)uF|%C~{|@(3RMIb!#l#Owmh8|7%MvNo0HT2xAD9^<>LR(pnro0%(nbM;v z^HlbrKFYVV6-Bpvj3a9vJC`fTu)3lfXz+XZ&I=xB$OR;E-oh zTBBz>)9@TcZy-WZ{WMf9#Nj zG=CDA$6wSn#jwk0Q{#V&(I=Cxael`m4=tag@Ch+`J>@lC6r%@8*Z6`MUC5uO@C##f z;g7~IiP42W8h_yGZQVT@(|aH%m;qE7 ztrmAz8eHNSU9Q0j$HHdZL}*M~!k2!7sxi0S#0g}L()6%nWQ7KQ>D*Xfpusm3N!CGQ zM`MCYi*s3{pl#}kYH5&P63heBzXn%0ik54z&aoIpEH$Pr+L+rmrr6ttCaika2;GLU zMO|S$o>aY&UK)VdkEP5p*W-{@C6UaZhyyF*&0 zLH_)NbmRgJ9#^C!qZ?J#7(P8G4w*X|8=$D@K9U_fYd=nLj8>!f?SB{z2dmXg0mT~o18K7ULYe%dy~?2tGf2L zg%K~)&(5PRe!Eg*8Q#Rr*mxmy#F(!V(w=_kxKozUpW|ivxmAl7Nk)5=jZM(>zOtgN z^;0p;`Z!jxFB0^`3-#ksyhsW>@_v!%QoLM0G5VKs*(=mPzunPx3jIz7hZgeYp6`Hm~gHMmHTMAVKYMlHT5+k5iZp7`Q6 zgMMr*IPq~>pbTjL`pE>%I$wz*#XnEb6ED((!AK5ax7=%@KKOYh$@zUPlgsJ#}`+q)Q``VMNE(W89J<0etrXq-llO6udJ zmbfccF%3R-(!%krtNqd?`>CN%O+GW6Noa)zbDZs@i!?Y?k&=w9nAS8_=ukO>YixF` zc)VZQXt`swQiJOp(ghmqc1V>P)Vlkh{P>duJhU{DT7$YEX1a$rNU7La@9u4IJYS(f zy`P2%YVdb`YOS%iow5m&xmLg25vkPREe>h92Jdv#b0w=Wt-k+?>n*g;9d#=-I9BcE zV2IYB-sh65kysccq;+LDJ4U5F8Xl}@X;=!_fzc&JnDToxG2k#eVoXicHv1VLN>sV9 zy7HD;t8#90FG^FO-yyBgpuWMz1Ah&6#H2L%kVCpigO4gwsWC!t$9Rz>WB0zZ6C$8@EoVESOHlOxbay6ja#dCSe_3cEOGqM{O@O9DOauwp z0ok%b5=c^2U6rX`lA@EYBn_SJPH#zP=`;!wa3KNz8V9ukG7QYfm~jOWCdwiRjzMtT z#|R^X%%H(2#J?zWZoTt8@AZ3C&}Q`i@$;|GC%M0S?z!ild+)pNciw$VM^&7mMKMFm z)xZtq4MkO)q4=D9Oby&$-cVG<8Cn+e=m|COU*!!&Rh*%^n4v$Yfm3vEumsOURh*#} zF+=ZC16Pzc6jgDCR>lm~tAVZM4MkO)q17=%A5sIKEpI5Q;tX9KGxUfW_<4ClQ59#X zHD>6b8W^i9ti)hPRh*%Yn4x#8fyLzwMOB=ko|vKKYT%~whN3FY(59Fnc3kWA98jWb zag~PX(vP3~)13S{xs)&*ua$|{inX;@Jv~u(|Ae%Ft;hL}$v!X8{TX2Qdz2>>%_UUm= z-7M1tPh}7DHs=atvf8c)uPTsvUm(mC$Xu^NXA&1*G{x>*fy}Q0;Z+4PuLr_hY!%@l zPs_O`!1J`CyUD&;ZnB?IKyrDcq`H8Fegt)pnTcH5y0e8LHyO8O)1U$H~&&DZ9GWN9`Lj z_9Mfq3$s-@V|HWb#J9BJ4{7i}70y$e!l=b2uNpcwQ1TI~Q{$?Lup)yQt-V8Qy+WN8|sBCk!!xeFMs7{49$58Bebnz}Itf7!f903T3M$#Xon0mi7S&K8)r9i-F1 zajI6*)?_W@#wbEg1=5xMF-H7HNgWnmX^ne`CfHz^mVH@V~1}h8g&rib^_qKnvMimSF~t9_}|h+;h)O zk~__$r5<>Unk(ggm0FmnsWb_^LPf*8?s>6pN9zssmd11vYJe9+O6l~g&NqVgfsd=G z^ctfgZ&1jQ1q@e=A6Znq(^^ad!xiJtAX4$YTCvn{{)3i%PE&o7i|iLo+~H2>Wa)8< zh;g6R_^zg|Q83RP%i;A~Ep~s;!~IH}&Cq^HRZFOpYRtD6`d?f4Cu=DKd`3m3Ck)r* zFFO{0GGV|cO|%U7uOVCM0y$bJaSez0)6G^A_$?DP05RxXO_nepyV#qmHfCt*=7D`{ z_MD4PDw4Tb6=k6}##e?G)`hHrn^oqb4F91-B;Apc_(O@esK&=NRnjN>2@@9&KeEhU ziNkNd)_6@*IXwU`&_{7)E`S%Q$Ugwc%?=bphfxt?{v|V?23}~j8h{vd=F0n9{^Fm5?W!QZKkJzTWxQ?)U&>5EwhTLCWTZiG0HU8pv}623rlOYdWFO| z5~#)wrG72{plx=hfe)z2AFpU1(F&eJP2H&#s93^PYfhVWu43tKtzOR6y;}Z3+jC3; zZ&Oj}>7e4*)qra>+MQ7<;KEamRN!-3B=czfMLjaSmLqNekGRw~SkzminI26`jDFm;B(70QOx?H5`l4Wwrb{$E!b{vb z;@0!Y)~onPZ|8wNSAZa=1#)-96}U>1k+o>qF@D+3L$2mzr0rUIT&UDlpGd)0RTv#o z*xA3$sZeySsvI9xafYfWJywV7#HiNJ-OgGS%=dvlGRIrMnFfB+L`mQo76GJy zcbF&(l)qeA{K-NBe!)a5fK%<%x(xVP6V(9E(zC@7uO4`hiRyq8b$Ji*YJq!9lm%X5 zXO9f*e$iuMXIJ zDq6L`|29z;*m@dT4ZvTSC<9!7I$CMqK@+8b-BoBMfd@@g2kbcmty^Q4KJ8HWjnLZ=0wAn0z-{8Q^zJR0~{s z4q9p86DCRlub6~Z68HlXC4eh+BUZczK4+o^pnP(z_>*^=Hz2;K@ov5(GpkM&gVppA<>Jw-NjwC2=8-oR;1= z)CW@UP#+l>qiBo_oFPL7WYaGMB|DTzTH%Y5-nsT#*6(w~5lg)+&y|6!2veC4p_m z6?MRuO;ihPGp@)2UolYv*kxRi0lsFUG;p)gM+&&axFQLB(nNK@MaC61z$Z+U1uinK zXaGKGq6~0>aYZfgVH2f+3ydpLzZuC(HeBDH~z#gNIEbuob$^fr1`bYzZ8ds!%8%>l14mV<` z0k)f{4){wG)dDX!`p5z&8hvDd`%RPvmS7YvM~XDyFO=Z_7KQDa3Vi@!45`<@jSP%Y zG)4x_C~^uJSVK4k2x|zZ0AUT`6dT;oL%avV8sZla))4Q3u!gt_gf+x3Agm#N0bvdC3kYk-8Un%^;ujFs zkdXkw8sZla))2pdu!i^rgf+x3Agm#N0bvc9MregR<(@e2rRh+jZhL;M1A9U^`K zLj=;lEuan`ZB@aw0f7MF455YHr3j>dTL}U=)E6a?BC9|l+@2HnfKbWpZjY=&xT>PE z2oCjCR8Ej9A73r46@a(-kd;La+g@HX#VoB^6tKh>YaCQtEe7#$B*y0|3e2q@aUWZuD)U$aY-QT zci(WFFSh^J+@0_+%V0zY_fH%y8L6Em#;mG1Q>7;F&tuq<_txMi$Ut zv#Zg?{g*j{gOkKx?kEmUOC`^9f1afhKD}-As@=J?dfd03e6VoKEl=OJzQ7rb8`wf) z#P4HmwiE4@8udv;oEb;rdOw>U9zO;(z$i(CKH1T`F zc6(C9@BeC|B=P$y+wG|%et*J5wZ!jpY_}&HkKYr-?;C9cC`0^yk?r=RiQhkNq7?D_ zCAQm>jK}YF#P4Unn{L+-zkkL=S>pF;w%gMXkKZ%#_`R0+{VTQslqP;(a4s#Th~Mur zQIh!mg2`wlh~FPDQC&QKZyYdE_NU6bMN_#91BT-F?H0d(Xee`=0TRW_z6+2TUbYs1 zMDTKv0wf%kixeOvBJn*C5|L{lAS5Cb1B686>IMjj$f+C%iO8-I@Ghg6BoJPa4guj6 zx#|MKE7C_Gydr%Be%V+j3505-+d!yByaz%x;ynaG!Bg5(pv5)iW?*+>`{uQF2uegrj6sfwPRRlE8T;N&{gs z8G9g1CIbzeV?37xLT)nlK*&w}0zz&w9YDxUrUMAM$>0DXH<=C~XFV7cgbi zmjFV6;wlgd6jy;zptuTz0>xF}5@W*z5LT3h4E%PW1%xQ2L%=JIMiW4|Q~C&mJEf06 zxKoa3AlxZ^1j3!tM%z7YnDrW#7bSka_p;K`c2%UcDnki`Rb||Ou&RtM5LT6*17TH}DIgRoZn2z!b5 zK-fzL2MBwKUqIMP`~pH%;ur9xD*rGSzkpDc_yvTj#4jLJC4K>+D)9>lRf%6fs7m|- zLRI1y5ULWtfbf#|1%#KxFW_T#4VMDKOX3$05)!|FFpew*;9eu2EO4G(oizZV7x4=S zy~vRRgkHo|AoLG(65$}P}i;OA|dXZ5DLNDSf5PA{6fY6Kh1%yk) zFCbhZYX}IJh+jasMEnB6CE^zlE)ly?NPV0@>f!`a8z+!-oIsLs0!hUQ zBpWA?Oq@Vg#0g|soIq;g1X3R-kh(a5)W!)U8z+!VoIuiX0!hUQBpD}=6>$Q|#R()4 zCy-Q}K$gb|q#;fq^>G5JixWs~oItX10%?d7NG47o={SL;;slb66G&a0Kx*Rzl8qBc zL!3Y|aRN!l2_zLKkYt=d>f!`a8z+!#oIo1l1d@ppNIFg+sW^co;{;L{Cy<&rfn?(Z z(hw(*Oq@XK;{=k96G$pfAjvp^B;o{87blR~IDus21d@ppNIFg+sW^co;{>uiP9Sx0 z0;z>Sp0*voY@9$caRN!l2_zLKkYt=d5^(~lD?uQ;Vg&LvC5~?f1Of~Z$lav~WLGHy z*|9B3AVpSzLYCOQ?qxuzq~2Z8{Dtlt+r4()qm>8<2yY!Z^e@-`|C{^Pe>wmEzr1Vd zZbnzX$*s#}_Uw8cfede#N_a*fld)TuX#$y_m?%jgleT^36oJeSOq3;%$=ZF&41vt^ zCR#xtlebOeWdt%mGf@qJ%;Mwx6)i_Pfy}o}R7W7Q*e;H131p75`;=J%nZGenhCt@W zCQ1{?Tw|NaDFT@nO_U^%S#R6JD+pwMW1<{^Ot)NEkU7=vQ)UTdK4GE;0+~v?PnjW*`Gkqm1TtgC(PD}~=5`Y$ z31rT)TbFeNGB3P~inRnXTWtR}OCa+*6EzUXY_Kcg41vt=Oq3>&*9#B3Is%!4CaNKj*=QHXSpu0O&c*u%0+}03lp&BAVfQKP31mKKqBMcbXuD6DB9Pf_ zq9lRLHoJA1Adnei_bKZLWNtE1ErHDGcAqj!AalEkG6XX3wmsK0fy{FzN)gD6wfmGw z0-3)v(Q*QrF&4Gd5y;$YqFSPs+w9h5hN$HC|NM>dB$vUy-f@Lr*C}nlh?lgYlTH)3z(c>(O(8g6u3Y>d{HHR+AL}1 zgiRs)n?mhM!GJ*%@2WR=71g62dHnka|Na9v{@saV3s_vIjqUz390H#ES#;Q{U)IKYRt>T|39)0l1? z#{KZIN1u7>p|2j;T=-8N;&Uugs=r3z1173_UEvH1i)uR+e$GVcLkiEauqfH1kiXw2 z8-K5==xhs%lKic+&j+I4Dx7a&(TWWUA2iXjVMqBKFi%q%_7;thZr4<{)Ba0gl|@R4 z7KL}3sP^Xy&$h6rwprnqOqBVh!pkfyO21Fx!zM}{t>IA0!lLBO3V&px6{9p9%2-&G z`>4Winkad!hC_=iEJ|%xIMiaG<-i3dYPeD1H%(MO;v|2(7HKNmmS51I>3{`KiOCuv zwOFK-_`Jej1)>=mA+5JaDe)zRzYav_YlPHgky7F-3SSLG7ghOwZPZjI?IDG)2BM2K zLh7|hDe-{9*G$xKu|`O}no9KifWqGgqVvc3E%s?DS`R85Zjn;`9EH3rF5@*u!=bE& zMYT68{H}=-$7?v0wy-Giw+g=#h)&jUC~aX;`r`_}YogSN8V;o_EJ}Vv;Zr868>tb} zM=esS0q!?Z_FRpSS~QiN@=qxIxrs89C;Nl5Qd7BB`kcZS0uk_K6^)WlA60dDmFp#S zTvJ_`JjJ)OSyLJ0M-;vhh(0yb*Wz_@+4di+F%++p%eddAB3>sKpXO@}#p~qKnQy9y z*U5zxuh1BZ*U4qu{}B}(V}Vm*w!)1jst0aU(I|HU-^vNRUHbR1mQ1$LYWYP9-)o|} zXBG0oxp+QZ1FuaMfF-`H@HG?F6M*r;xp+pcG%ui^md@$`cTi;hkL$$V9&9^Ct_qx0MAsaA0yxDUGGhhmFm*U z37z69WxRtkGosTGYQ7`fe8<4%sn3pJ$B^P+{}sQ(fi`R;+uv(Lw;j^vRPAa52#$_ zUGIZ|?F*ak8)%ySLOz)!(I$|$A7+ZdK6k}f=~CCTz;sX8bk9K3>^t)5C1FY+Z&Az? zgFT+C)=HPUWgD3844dv8Xqx>?KH(%>3cOB3@tI<<(-X>;>qN0HATYf-Y|gId|JVTL8(6XsfnTxa-Q}QL zi2h}vYo0P*d6_9a2)M3+{|iVPPK-jGTf7Of-&3IA6;*Hq>npglvZzBDyY`M?!#acIck1)uGg-~R38Q?M z-RHc8)nv7)e-w16f;;{C3NH2iql_1!-03&0GuTrCw;*oUw&Y3<_>U@@CWie}g7`Ps zwN7Q;%5v=F8*!=IZVHavH#=u=KV;%)<~IwRsvjK& zJ|(nAP6=9{&(F!V4)8&1ev@24`KJVTA+<%Cr;PhF?vxPL8SE*6`h4n6o`D1M*4h{` zET;sy&T*+9$Z{`;&*2F>0Y~a~#xzmh>WQ-wRLcLsWNZnK#+HGjLFI93#GMa(BQAAa zADkU-Oz~|#Sroues_8LeP!2aaR9x!w5M{iIH#4F$*khiXV0;HqXb^as?&HjqMmgsH zl*5&qX?#ymC=ST0frraZ+?3nWga=eedZbL0^%ki z-$@iw1MzY)?amjnop1Q&ywzJJ7^0K zo7mwtEMKCOl>y|XQY9k^5CbyeRxMwvl%pPazivQ|5s&=9&ZYiLQ1+PWIIqJxgN@s` zEzFlOEFt@$mw z3SH_qPZ=*0yA!UzQhlAlu0n3(^CeRWErD~Y{KZge6;ifTbq+WDY}NYRnhKKvA5l@M z=#sLRta%n;aZO9CI+)d4U7-F!YrYmZS9jP;tyju;AzOn-ThoIL&Uh4puffW?2Y$_( zAGoI7p+^}ne;@9;=dlaEJS&U{oU3;ZO09d!c+vcD*FBGT@ReIx_ds4)ALC!I8Ed-U z^B4$U!j<(3f2ulOs)c9ij0$>Fjc_4zWdtTSM<-gGIo z&M4zW{KH*mJOXorwj~iQ@Fo@g>8wdjJ_&HRtjWVTe2-Z6nSgv+Ai4_O-4-6$;hV*> zdVtk>&q-NR_IP~Obk!*169u|@OlPqBOgtRK_mPD)fxE5wO|sSFU)uQlOw|R-c#z1g zp0LhfFKwvLx0q%108i6Kn{E^UOdTzdaeGl=ox!pu4?XeSXNh!x1#5mF zBf7z-jL$qAE^G3D72lYalK^n6-Xk32GooyT`HbkAr;JZZC~KO|U?V0T=Hh$Saxww_ ztu^26A|?r)UFr`$Wjy%hR&!Wqu%u0WzMU;66X3~u@6(+*{rw=9`pr|uLuM{w3hNAZ z!=3tkr(1XfxI~+uX4^dykPeoOsmymf4tLLpC+7HQxC9kIzWKR$FWr##>**1Y?JJZ{mu)W6$B z*#(n*7afU(bq2fWd7_Yys>?YCxK2$^6T=b)%jrfg{|9?6qcR^^apy$eh)XL6ZHLDj z`NF%L6@ZsaVRp8-^PPmzF7=0pG9HC=XN9oNU_*K8^EG)nD*)G9^Tk^+vLgLmVs|f+ zGCm&T&WU~~@9PZqtiWTNe7RoE3c%lL^V7tzzv6{*2D{v;%$IBqHwNJu(*MxsALK?E zkT2pKZUn)zs4MlM2{|JH`Ld4N>F`>Xvp~r7dbi2v{&Ob>G0n5F*V?nO2_Rqkai{3ObeNvx9#6>hR&hag`uwYZ zG0ii!74|@F0?4zrXUSm}m<}&hS#KfJ?q(%*ZV1c)`Pz^>DgtxiB_oS1WUhDtU%Z-k zm$KCbp7R}Q52B=iD^+y33xel``I8BFe!rngsAU6}gBlFcywho#P=BG?k)ssIBUFcLn)e$Pb9fjsLbZSL#yTMYgmq1`+t=s9qM zc2jP<0XLgy#a9)6!TMeEyuw4aPmp`(c)zKY=2q?#3V&}*-T)kap09nhx8--gfBlsY z{pZtPofMlZ{t)-cfyY36k`Ivpk3QNrS)8kPpX_HYSFhx#zFOh^=HRmD6<(*_$npd} zWe)Z{chBoPZhdtBEhoere1|%AhNeTk4Ib{)t2<7HH`~=Kaq;_o_;Z)JxE%Nm6&3X+ zPV>8RqS1eiF9`mBr?!8trc&?FRvh`~U|bGu#giYYC(b(@8Tn4%qSSu8Z?m>~o3&aG zyjw*_dz&BIeaEl9f71&$^u~r}y1H_4;8K4dE)6@zTI%n^rG@H}#CyQSCQ1O$d52X! zv=z_m0?+R=&%dEPknY?UTMTp5tt$few)ag@x29_<2Ny6Kc)Z;_zBcfszb~gg?A6qb zEQ8pmu1ME_Cr$VJ;nbqPggR9lIJd)`yDRWyuX*xp;K_FLOkN8!&X9?2jE9o79K*1-{&EzT6u4vder)sxPuz3Vb?n zZ^X!M#Dl+pXa@f`)g;8opB-yf|2(VIL>MZ)qp;4p6B*N`6PwrO_zOJc63P1)zXdL)-;MyK@ z?bNY08GCF@wgjH+iH%9x1sWA|?R)z!H!b+wzRS)WKVj$IzKSVycC7hS80mtgYS~4W}^);*iEt<-B0vT%2+h<+dA9QW6b&Wxmu5BOC zHOYm$CRx|DeFM5Cxp3DcTkM+jxLPQ#MAul_w_a*PFMsS+GauJf`V0J|ijFKkTrPjc z9KojpUEtpj(495DfZ*Oh7x>QubQg>-Aoy~i3w%IDBZ@wZs2X2D^0h!2_~=08iQ@}M z9uJg(PpU}&bT-)^sJQr-a4SYk9xwg&hb#D>fkd~|9~Y+Vd22ZZ{Dp}Uz?W5Yggiqh z4U#<=XaZjgY05B@{X?K>aF*|}vnhi-M6*X~DiaMnT16G2>4up6=s*>CLReJ>nEc6s zD)1B)=>U@*r{WQ=vEuL|nP4UDT7u*(Es;r)H#&y4sX2 zM&Nr)lmK3$BApnr^8!uaf{><64A~`tCNLG!l!+mm4K#rbDq>>DuMAXytHY`?G31*A zRbY#XbYjSMsW_MzBpa=Si9xbiOJri?g^kh^!+@jfPt2&;#87^{+LVa_e!xTt;LR%1 zi6Q%c0!`q@Lz*%%WD9{N@Y5kpnHaM71e(BmRm8-Q|9qed{Bl@TCWic10#)FHD$2Y8i9zy&l`t_#p3)MT749dYKcsYyw6>FVi<6A{fViHO$_A|)TT@f@Ej8*fKybY6GQgAKofXj zNK+<;?Cd}jI6tH*6GQfjKohuBMNAC&OrQ#^4Xet;kZ%Z7fjJfF#E@O1;$UKsTw^6n z43btYk%^Iy8I+zF1{_^~VkX8WhVqSSQzi!3XQBk~y(-d)A-g@$1b#52DHB8XmOvBu z(U7K04B1Zvn!w#EVq(bO6{rILA*?DBL;l`C6}U%5Ix%GLQ*kgcNWN+%Obn8L(Gr;$ z`ExU+Cx!t>*Poclv5BGl3AHH`1N^p$62NCvq!UB-2Z1K=Cm~Il7_!d?n!x`IY0AWq zeL2tszM>)~hWu-RD)9GVRhby_e+*O&&hbx#Ix#~uJA%}rGG8tDpP@t3AL8?4LrjfY z1#(*fxMhehlf6XXwJI9!9UPJ=kefEt6fqY~m#IK*Py<((XgTm|6;%|U)2}X&XP3pv zJ7g=I{G(tDpPY1D0o(`MsXg4RRKzAg)q2A;I zc&?TW^V)|L$XsR}P5>8$^*GwLsn8h&-w@OVejr?z5qvPv1Ag7~){8HCT+G+hwE4^u!CQ}Q@zQV+YPwLaQ;#m;ms$Iiu?$IfLDJ4?&h!w!Dcsou^q`m}@Jw1eL@T5nmI z`q79TV zH61e2*O;T5xl#RdRiKZ1c@ThZ%zGDW`K)L~&A+JW-(2d0oTh6ujaFkXk^RSKRFCbf zsD3%UQ)c;^<1{@>Q(qeVUrw(@^}96PtZ7iwN9FV$RQ*RaeM(cmR`7p0y^*th9j>6c z)Cy|)sGMF->#xz2D}reKa(dA#6vEwT{c?JQ#Zmn?%MbI0c#jW>YYdan<3LIIFmGdt z^TaT3xcB`M>us2Kg!lC^rR-ODiMN-MAL;d%l#lRsoKQ;tZQiaEOUaM&_Lh_%?e(8j zO8*#dM@jjy-mDLo(jVzvRZ@PO*MITQScPHUC@*noDfwt`|KjQr`eVEoOUmEw9VjV3 z-aAxMeuDS-(o*(ia1y2DCwcuP$o0Npq;!}!HX`w}!@N_= z$WJRHKfR2+DkgV__i*o(H=+gZG(N?=R}dkDtF2d8vMVQSIzdJ5#mcuNUo{?7gV-3ynXk`ulW> ze1=NQ#-pC_=-oW{?s!1v()}9JrB=P{kdiIom-{ay|QR$lvlM- zK2)kKC+wqUYh;it_mR{GUZWM#g{N9N&4) z$UhSKs8D}^2UUO1cwa*Nd9qwzIXipL@a1luyWhns->q`a$W0=zDryY;&t2C#JKh9e zuGPG|v0v@WhY#`7Z;QN?pFhOTL|?DNPrN^>eBWGOFj?gjC0=pu&br7Kd`bH0ehHQD zdzYWTQ0lwi29b|)=P^Am`BKm8Q~e!!0(-aGxx1*3R*-&K^{>+Y{;d}MNadsE_yQR5 zm?L~YtJMFqRsSND@15xj?o@egQ67&^az(i+g_6x`C!zKoKe^vsA}=-0Un(PiOzrG@ zr(d6Wd|BoD=lSwRKk?pCq4TctXHzeG zze*?kcHfMbEb5=^9f%xn%T)iscwg~*s^72jew{~c-n$ch^@I6+M(szB=bx(nsB?V7 zEjs>3j?m-q3}4=*?M_npWVK(b@;Z^1>hC&{SE)`oabGHP-b-w#(uBzy_frfAE|tz>=!uudv#rlTJg73nP{{j@iEc2Ca6n z$V>UNNaWQ6O&4>ki+1AIM=ffiYgb3Fx4NToT|U>;y>4BvR0-vBqS-Y-HQzLK#*8T!tZQrP zZpn+In$NYh=UcrwjZHPpYg!w-x;ygoJ387rthiIemTB)^+0wk~ntZQGd~=?kNXG)Z z>+)S|+nPE})9Mb;a^?pJoI7EGc)BLvm0K$v^;YMLT@X=6zNvdvKG)dPluSrjPxV}(%Tk)lubSHAHLYy%rK{GqwRPrmT~@rZQL^j0Te_NMB4kol%V_7V zG{36JaOe7tu60wq&Q-L!QsyMz)s^$z^44{GoogFsOqqJl%9gfO*K~@*Ep3fWxs}ap z@~utHjja@~ZR}hth0Q%)Prjphb+6!D?y}VE#q)FXm&}s`$UmCEO`UDIwT-P!EqO0D zuXf4o#S7<(#^p;gx%msU+Jbp05v3Q;#lYoPE}b*`%G}b+E~}ZJ&ZTG1xpIEaTh-Cq zE`wAYe2#;T{OX+cNCt4))RnzmayoqLGOJj#S^m6VXEUQO?>GE>YCiDwb$fUH#WD-nsxb=3+K$uO`SBoynou94C%n$*W|lu zx;mO$*SOKxxGI=1iM&^PrbY^v>>BlS?#qbmr6Da+=N6G;hkMrLl#MuH=9&o>Aqp7 zK7ib6nmIjw`dzy6>ijBM^^1CPDUB-TwlsEj%2~SH4AZ#{&8q%1$UbEGUocev%|bT_v&Eo*G) z=2#1j)qwFR#z94vA!ZvC$IwwXYlB|*PC3M|!ZwQ=TO?GFmAh(VqYLvITV!DbC%UPV zW_ZPrZh*@cQhDB_&faxhjVmSX>hROGpw_mo{G>Il-IG?jp*W|x$t&jEvh*f3^|ne) zKke%9OXMWb=}sd>HO=IttRvsj=qe~`Z|U+TxiHh4ByiH2HUVAvjq-o-tUalttw>Lk z@@q9PUE3r^$oWnpcSrZC%d1NF3Kql2z$Vl3012#J?iTuk2nU z^%`5($STlmbL;9haO29A9r+$|EiRx_Eg2o48+etfCF1)3W$G?hop9VFTmY$r9StcNUxIXbN6%YIvwh>T|P@> z&NlVAPeuBY>s;ukmvbgrAn1*d|J=_a-L9JWPdlU;DgR@rKKH#yCu@D@ziS)+RqQnh zpl?pb{V`IvpXz?JPks8|DrK(z9H%5d?xT^8&r+nppu1rszk^R8@l)y{on5B>o(KIS!tI}jf6@A* zrI2GMtoM>WD7Z9IA(Acj-S;$H+w*2U;#-~QPA>8rC;uE*a{R$R?(|fu{k7lmrSFyf K%dTUu*NYi()$!WS=KHXsBL2;c?qLIi;mK&~Qp_`g3hXOks>_W%36{xAQlA9iNW zoM)bS=9%X_&&=$}6M2z74T8Zy>XQ+;A)s8Bb8ShEf1&DVhMx)qasu4~P5AeMK$fZ7 zsP@Y7iA!&7UUBp2fhlXUp^+?!Ovy! z9ZI`$KIscU$F#Bw)6zd6eG%vcI)jUWK9_(?!R5-NeXj7+;k2@B(pUQVtNib){qJk| z?gqO1`D^*U&QD+Of8XGLr@nIhL~g3U|L*C3_ev|@zcCWuS@LnCyLMIf9C6Bh4UYcs z>p{!U{C&^E|LzvrvT}R#D|YYv`e4J^XZN|I^NSh#w>p12v?Fxm^uNCNW@2aaag*PE z?DVIq#=ZW<+?<mXI+4$P4-8ToXzc+aI!|znQH1FBKlGiW) z(XEr(ufPAiX#9z00~_~fAB}Hlyyfe+*T3-CU-o}m6Z0m#b9#r{i$6rrbZ7gW z8@hb;X4h!EN8`H#&98jE(T4FYF6f!};nq!jyO)3Ev|3vHm+(~`e(}E-uKiXBMdcNl>pxV^_RFVFu;nxDSKxO(o$BXpL85{E ze%Z+9v_Osw0H1UHa@n4ifo6fiUz_6Ee}3ZAAL8?wdK6apbXq)Mt3K}EG2Jg8d!a4Z zmutT-#qf&%{v&PvR=@su+E@A52W>%0hR^zRQbNrpuaBMsEl;5T7x?`h@`$Mxi1_{5 z=F^$v^XYdx@Oz)1>QT1-IQtbC$n*4!!0CakUOoZ;t1;!`bGko{A;11_`Br(=WLxlx z&u1II-Rc)?`DVZV>%Kh4mfC_d{Q7VBdQ$wXEkE6_-{0qRx-U2Br9M%=zr{X3veWwf z34X=TR9~Jc+x%#X{}NkI^GBaGe!J73vH4>A73kp8Dfi_z*e~zv*Y6Uy1-U-|NBr_3 zWwt!^EclXkTkuqCk$=8V!G2xi%VCn=-zfdmXNTXfsXm>LZE@gipMKR?Tkxe{UXqfh zKQE$w`6|ENK)rS^_vO%~*j7BxZ}&l;ehXg?Q++zieK}P5@=QI;F7wB?OUPE7@7G`C zkJpgVw%~GKKFbY1%(8m??DFeR8e{8U@7MpSU%ta1_Zpx6O+G(aK0mpBdC;%l!mqDq ztUjkf&*Os z#uerl42bp`Ic9YJfT8z{%(vH0d}Z*k@k2#m=*SU2rDDH_2jAvX=rwZaxN-U80)zWc z$St^UaQ~qNGQA$2M@dd!NKc-9NNz_?V)5kkW(WQk9IHe8vVBln zbRH@iT5xhjNL3r1f8TdDH*D0Hhfi9mX!Ka@2V}leK~hbQ%P+)$o|Is!nkD?5)y9r_ zB)e<&$py%6A%mY)9RHM`-?iW>XlEu$I>aU zSKiL2Q(3RPOG<9_$^$-~%E6W%?zLGnv)m*9wDMG+b!}Bz`KhVOd{?KHH%}|~(#kJP zE8me;o?7$M<$Y=8>G5~lA(l_6M{2!G^|R8-8>qnlbV@7FP8IUqC9PcRYpGASwDQ)e zLcVj-%72hn?xdBcVlrLspH@z&QTw?wtvnU0>Ee*I^3*z+%I{ArPoKtP)5@(xR67tD zpH@z2Rr@JUE5Ek3G!Q6DD{qrlJ}IqSI+6NJNh|lKsJS#Xt^CqdWxl7SmFJ|DPfsgP zU;CVuR-V3ATAo&p!KnRIrj@7bVO3hW)?`zkRcYl}sY1T1)5=dvEBDgMwPup~>_{t5 zUzgpNR^BXCS>L{0k<7ZLl{ZZ*_ot}2+9IvIZCd@dY31qbvRP^6UDE1zN-IyTG3aua zwDKNl^}D5&U!7K-lU9ClTDg-}o>~LY<^E~qnQ8U!Oe^n|Rz4)H{Gzn-`_sx(v9B(V zO)J-$XX-OPtvr1_vN)|g-JX=Cm3K-J;oB+M+SG|RdN1$%fKyst*vKm(kakMuHJzJQ ziR96)E2whx!Vma2vqKIk^MTfg|{g$Q(jYT z^8J+8P}EeJd{5;y1U2O*f35PGqBYY^K3jR2-kPZEx3{%qxC zx@yLnd`smuWom|){He;z#MJaR`3B0Lt$dEjALvb9kARvkCcjI0sZ>pt$!}I(Q@o~y z$*)shQ?({w@@tgWqo`)bVfBB7@>$APoBSf>L&{f~e1-Bd*)`=R|CaI>C_mlg|D$|I z<)@nb-<8)Rvu2XX|4Dg0f@+FQ{&Qrf^M; z$^TUOOO)?o@&(Gvgx6%5`~%8grhE&NzejnQo|*u8C(&bJU*<@nk-qM5I`4C08=R7T z%^yCK>GN<;JAFbcar$=pv`2$3@%#OR)`w&763TieBdjyvrCN z&*7gz^Bc9GVsQ8I{H(qarb})N6rEP{6KYz%X+N23J5wW)N2>;z`J;61fZLskTl=H) zPD!(l0cYa8ETv<6okXIiLR%!jPI2p$I|l5 zr>3e#{P9Ys53x+<_>T)XL9jocAKd zTV{Fh@OAH`y7^Go|EVX^r^&MMP9ia$uXGzh|aFPIT;pQ>u2Yag3^D~B7rS^G>` zu3wg=vOn9h>(lE^QW=kEC${SDL390io9I!^-beTw??3wu6Hctci8VSG4vL;q<#4&` z*4m@YnN2ch{xF!`SOXDD#;V9Q$nF!&9@yZN%2Slf$R3zs5)HElHZ+Mw*#jGyMC0s% zjZLCS_P{14(KLHtQLH%}nC-?1877MDy%{%}pXR zdtjzXw8$RV!X(bf9(aaHw9FpZ(j;1C4{T);t+NNVHihx2cBaR?Xw5AH;Hqz2cByZ=VcE(&m=lz59}~Vsqzvp znDjdEZ@TQT3r2qj=H6j#;#i*h={C`gedxvxxv_83b$3;`{H+HVQz!BABy8B0Oj!gc zab}ot%beM|L0Rhzr=&c{LnWuki^MiZV&6zpdNSH3S5va?38(XNv?=qct%lBwD(JY0 z>zjwmGiNqyQr4`oA<(^|XopjBV9fld&QyisM9*O6)0Nzj*&*NztD=n_d&&8i14UaR zG4Cx_M5LHJi+YshEue%{*Rs4yo4TwlZ=p?HSeCcQrp_(PTWnKjmgT)~Q>T~ZEg_Yg z+>pWjje3`jT2_{~)ce_9*#`BOj%MVRwQgA3({15$o&~Eg)YltBomu*(`ZjT1e;8se z<_^mJ$)LGT!t`~1X{tlEA0_)9sqMvAwEI@E?)7_7+85(t+3H$Ws)wP?Tvk@<)@Cj& zEA3aCIk&9zrrOMzWu=kY%;{yNQJbmj+UvJaQdW91|9UTfbM$D<*-EY4BU8`pZz2_O zGAr$q+vv*=b89uGX+4c`Vh5er40*0jZ1O%v&56z4!I$@jib@tR*-Pcedao#-c(et1 z`LW)MHs6+fC*@0QK8w8kSZ|!ocOox8*BfT@UC4J+{&t)1M!v1`y_7FG@C5m$Br~7> z#SIc^6$4i6lvEcxiKiM{DVNGGuJv!?FI(o<1aLFxF*5|}&d{C{@(c*+&e^o+!2 zjHe(He_4W$#9x=Zo!Dw8w#G>=Z0;l%wRFOZRFF)zAr*=3bTSeWVkAC8W7jWneJ~o^ z5smF~la=k=WJS9uRT9mv6$O-b?+WLF*%}n*g2@^SCww4+bCq0ho)Z)`H@q(rS{;e4 zjfShDp_*uHO(e8E5?dOLZGjt&wi8|&i6`v6MOM=Z`nii%on*y%k?^ueaDf}!p?=() zXy3sNS8KC z>|ptfNO*y~lt_kAN|4ATepeEl8Ds}FXXBl z!ktdS2Igv~6W-~BmPf)|u8IT~M(`xLr@0f{AE9H(Wi4rzOKxzrVe0L7K8a0y^^+Mj z;wHG*ja5bx%{ovu68i)uBB7O3I}e#V;k8a|v6EcfJ`!B!#&$-+bE(Y5b|wWYRc@%# z4K0s`D;Z)pxZe$~a)bLKF%0H1?n7CYF#>BTaFTNwfwj)t-DWtu@2(iw-N_hK?gl^O zk{dki>|Lq$L(3wePn__&uZ#w_sjt!C z=WYx^E^Z!;eP&ZFMS1q`ZfW!b>~?4~xH%eJ>Bd&Ldsjq5i`?*< zXmCvwjtmRS85on9i%R+_hJe&uH+&HKN`wwZ=}MTk5a?%W*m=9{#8GQL{7L)QqCuY1 zi;(yKat5Ditph#TA)i6OjCoX|%wNtZuzV{4#^K41f23HCo>igK}?6WmK4;YhZjPom+) z?wfPQjLi$)UDDD>^ne@O?Zl37JIc5eP53C15uGajX`Gzc2pxz77hyo0*Z~=*&?4qF zR!2=b!CjHqwrGt0uW>@3tNU*7BbYYVv203h)_8^1xMAvkgB+dY{5EcI1qMR4$Ym~@ z6pb%(-+cBzU*0^YG~5w3ra0kU@D>Su!>qNo&)K^?5=v@92f1qn_qTV0OPtt8CR1^q z1OF#L75X==06-FvF%3w7(t9GMsxu} zF@_+PG;Ja)%EC)E`6F~&mH?}<5JT+-S7HTZ3$Wl=0rfSAMS}x94v?Bs7^7XzoPYo7 ztm}^a`}1Nw#xjbEb;sW2Zje#f<;Ir6J4QGf+UACrGS*TGMj;wxs1P(6je?(I6c9N2 zLJ&d|iz1;4Cv+q=NHiRToCY!y{KhuVKrF8vh^{RcT=>_rAHtxR zaKkoL(T0@s)-QiBVp-wN-z6O$UYGYE7cu`d}csLr{%jKr- z@@3KRUX|(QD9XZO4CdK)aH6rp?(!v3&F;f)a(fduxuj`|M))wyPKpMh&75D_N)(lv zPkT0{_C&CpN|D&8s$;1cO5&Ur4d)q;ml@hd94Qv(XSm5_tqhN*-1b5@&9YQ^s;2Xp zMz!3hTAbq8?{ykU(`}#2>6TvkN$K%Kl!*4+lDO(3)r}X?Hd0Kdr=IxqrWty#k2Q8Y zoyA%)MiF8B@#)dTRZT2C+O9`WeX#Uqn+Jjs1`P4gu+txr{cLWnMdVQTj+nCo@S6ArKeM>1YDsvi76KG0r_JzSu##tRBn zMtF{=oGtE0HXkY<`aqr!i}%s6$z$(H;kdbYN_ZaOb$GBY4-l>L8#&k%&zgBARnD_! zo-=F4htJ06znsiOs)Qp+UD{g}4ZiR4fTshY9nnzI4JY9UGCXs?aAWZKo;VDB>L@A+ zE_B1nli|7d9uYf&GvX7wk)X$5@wlsWV+&n{V_`_VFDWSXPX0^(c;x**HpBVK=SeV6 z(Tr)I&`M!mXg~2xG)x-XWZI9$7_Iln;~^?4iiSRjhLx8$L<;W**T)T&M!M)>gpwo?H6AaS;|rOn76*Hsb;Wn$?U8XLS4HQ_kRlW!RS^wuHceJU zLz|2MS%o`D%{d zv_ZPYX*b@O2?}0Yd+)Z}ZoL6>ADxCXnbOOpcu~>V{HS8fZ}3Hp>rYT};&31mTW7-o z62t2%3y{eq-q1l*)@TrsJCPXqzoN?GRBUt|CZTr=`ukDjs7!-qI%B&dqA;JOY-7lMOpR?eJ zyWVwhJ>d%eA|Yy z_j4+qpVnjdY1Q)ygcJhcc%rB_W=n+%l)J$_^`e7lXamuK6Jynd0D-_{VSC&7F~JFO zfelWG3;f`O;K58WVi96FyQCLk*+NmGf;+}xk@Mz-e?9fZzQ6eEhl!YTpb=pqcYY@>n~ zB;kf_N^!{t&Rb)i%&5L~*rLX$o`O5-ur?C>l!v4hBuh1JZD;Hw9o_uY`#N`Sjgi7J zVwr(84W5t8uJv}IMr#pz>Y2xtv4nas@s#@54PzGlrxd9jPMAj$k1HNBA3A!@daPyY zNzAhw(&)9;Sz=7f%BDCzOGJ`XPaDn1!X4EB8STb1>Aqucicq#j7WSC zJ;q_(VK`g9#5{g?5ZlloG1F4+^a$PB>WQRvmiZYx-Eiz|Zd+q@A-wA5#8sVWxI7Xg zLNguZu|kBlG-@6*T*9Vr!BxZw`Xmx&#dUEj-NW9WA~A#tC&uA4f?ehJ7VcO?oR9P% zjH7JDw|uD^R+%`27IZjopjFo*Jjl5sEqWrPeRnH2D9tjI?Bcf^Gvd81O>D0%!Br5W z4XSDXIg&BM`jB!(@ef(6Bqe{7WomVYs20BPPU3^68`4F1->lU^cZuyfj_GHn;pA6z zow|ls_$>C71FEJUaI3&s3W45?nqJ5BG)sAwaV0-LhTFRC8_`FDN5mu?ik9UPFT`>g zw{Z|kkB_HDi>9ML@M><6zb;<=A%agdL@gr+q{d?1Tq9A-qYCR7oN5>}0h;CUL{jzD zV>(@zdK}9n{(PD!r&xOC>2yMAPNlk$rq!1^p`srrl;)H=W?2qHad_^S@jtmV{ixAK z^CzHMM-kI0ic=&#f3-o2v&?Y?Ev$2BJf~WTPEyycHfI#+!Id`ZK1EAy8k;lHJ#H;y zjNtaCW|2}RZP*blN`l0t-qye&F7C@l1|tu7$YVc zM28Nhn<0F$S~G;ba574bV=8sjP_o=Tm+50uTL`cGz$Owie7IT@s%(k$nlr1&)!OMp zSFGq$dg}kOa&a&kTn!IRU7m_Oq+^Wv7r6C2_j&pocX=W46*r^X)?;Rk9_JdDS-v!m zHS2AJ4lL_xQFFCWug_eEgnq!V%I(=ez5QdKkgPf=W_N9qCsq3`+%Yeye8s= z*gp1Za9AQ?Wf(7nIyCCisKP1}J-*KqngKGZVs`LKV*2aG#4pS=Y=Q`t^{hb}nj&R%>n<8FpN;<1bK$AUW>0agfXXYC=+ z;~ZxYmyy_0c`|UqGjDz*ypi=>6ACTHL$W+DpUohk1nNdg2-E)a$n|fZ`T(KCAfm<9 zFu>;CLParbu;Dy~7CNCS+yHzgxoTzoqAzHohluvZF0b_FJ$RY%MT{ z^Yns9#BYiZgrfZu&0cj>yWPqr?-w1BOxLDTVRm#wFcAH?pEGlr(UtZXyIEv^YyoQn23IqJg-P&i^Na?xM3}u?V=qFB zjTOZnF&$-(S{hSh*F3^#GAS*ytasn(GG{>JS8g4-fMu2`k>I9iqT_Ye9>|0ndw{}e z>r!4jHs^if1{93NHyI$<@Ke?ly&nzF!%`4>V2~Vb^^(?-38uyvf%l237qir}p5j^_ z+TOPPqjug4%i&=f%N+2)?t<(AW2o?x%|&z$H`PWCh6iRDEo;%t@}QLEfq4NB8)YHI z115nq%T1UBEjQsNp}#&4{&Ev?Xz{oFJ32Q0%|}s|K7wDs!fDnbVA2)Qh1g!_k%VU};!ks$IW!it1z<0wThtah%4fz-n> zJ&2mM{O#YHvO9>;;(k)Ho zd4=D*Dbc$J=WIt!!s)QS<96d{&dsoUFldt8_A2hc@i@rhRz}KaQb6`qZ9s;<_PX)C zaO@N9+hXe!N%>y7A}Pms>GFr1@Tz`Pabhn!)>*S2=oYcaHkX-K2dSj{Y1)xrwY#4>zDJ?*pJGbc&T~L#Ru^?dHGJLEBIK>6GKr5&xWRuwSF+9Usv;% z>N${{W+Fehps5sbF+ea-+klY`T*e0R=%9D2;5Wso30uW%iqZJ%;vf>-aU#M#QH;HU zHI6ZfaT7Px<5=w4CVW<@Uj@69bSwov&9)&aMvT)Jx+Zox31H$aR_|7^5`Ub8@nKDa zDJjoNXB@&Ar5el=(6_hMHDUOlP7|Etvu^8u!9U&*PEQ*9R_IbO>8fGf#uA46GNW>7 z`cW%#Jwr3&psr9wwIGH2YGKgD^vV=85 zh;I$UDZ+803d8YqDT-7X1vPw2RZ_B^n$9utHG^5D(=-}F@oR=r1Ckm>BjYJBJ5_;3 zeP*HSt8_}239MmO<9i(0>2+kae1Xd^_2QhIRx*||YjBQt_!C7M9v?N$S7$^p6;4^M zIeGsLqmv-VT>3=TMzg`Lx0v3uX|6eZCA6kMi$Nmj*EX`8Ix)9s5+Qp=od|IU>RW7?knQK;l8p9Y%l&hS#E2~z_IxG3-c2c$=Pj@pn z&pPLztpV1jW*CbaVvSdeYn(YUZ3PJ`cDk%+66~2&l2$9-H*bF}_tvH(ui?}L z58BF`6U_oAjL%#Rq~bGfX4iq}fINFXI`*a%PeM{vCNiwT4R=Fq{!n)z&ug`s9-rYI z`te!Pnf043&#jF-;#}mO^2&Or|`(nmB)30KOp^A+qPW*NI_7IJ`+1ueL-X@00L}@{m za+TNV8&VwQ)B!?;XzWw5ILwhpWWedMv?xv8sO4eUj~2f7Mzi+aBN^rg4~!*YI&1rW zoEBPYqTwX_Ijkrh)K7(sr*FIgCy2v{{;|1vb-+i;W46m! z=rB7gh7Et8MF(E1oVQ+?^`DP+kGcGQC*uy5qKL#;daoiXi-Z)Ix!S7I?v#lJ3AkCI zU;xPzuM>}%g=~BLM<;w%5=Pn+K4w#4pItvE4q&H;19gkoi4^~KU82I7b?86a2W@-u z`9k6a)~|JCFWbHx0%jnqIW&nNWmN3RN9`Wq11dK3&lviFAY}>!u?|Xv;Ee@wlz3G% z(Y~D{SDDy>IYRSh$Cr7-**$x-@U0|8 zWWj4Y{@8kaIf)l-$VA+5kToGNAFN3=Gi ze625zLa=bdzn`)&gFT<*Z)L*-Q^Oq5tt1?>sd7THq&sVC_u*$ZZLr|k2ri|6s977% z%h>QXXMQ!8qu3&2|2XW6Zp|gHGlgb-id|dfWd+T#SLPvx5Yb^?JDoS|f&)kAaK0qU zqVOtC0L!%K?6bnXkIm#<#y&XKT6ydTx@l6JHkQp6BPx?>Lu8avZXH(CdF#kV$U)ev0RRoSQZ~*)?0zpiWnetdQ1%;{U zzL551>vRTNLEwIw1IL7mvIV4MW&Ch(;Vp?~*e>`bDrGVd73%3h0H+fOCdFanw=cQ9 zVEwM~56x#I$u7N`h+WEtDs!$R(Y}@DIF`Yf0au;YY+P@mLg^>MJ*fRT*$6EfwbR2- zdH?L66JBHX3AbV#w4}RS4{WpP!J;>gh*o*uX9I@K9(I^mEEIEF(QWpP9-kgj5KoT7 zH3XD|J*r@ihp`nbuarYLW>J}rDb8D88yc~_^@&_Jo#wV@#qbKEI!B|m85~Z|@GxP_ z=9VAjk=vuoJUW*z;dc=C%aMdE{+K*9ye;GPGN`On$Q5ga`b>cld7Rdlv%W05T*l$T zF45H^rePMoms(;f(v}{}{kJ6AU#_i_TR7{e4ieUwJr+{(-Kn|JY{6++LLMBCPrN>V zx5%8zvxL};IqdQS33EV^1^Hs_yZMI+jjw4=PT8|uRUCa{=@}N5nO&#Y;i!IEA!xyuD~H~!bV9)m_M9l{Hh#YSBx128 zIOV&D=}4oteHi(AXY{7l^^%8>B=()0i7OrWW7r`k^)s4=&P+*llLr6~59k?9W659o ztp^C2yFiiCoR^0FVDr)&E?UGMtevzAc*-3%n`7nXMAlx$z8z5AqWgp5bq6dDrq|B8k+2zw@?KFZt;4H5|lV zug59RFivyJy&-)N(``=L@hdA<-FVhl@x1g`-4>b%b|sdLM~u7?Y#Zkz<(6$Kv3AG!SH2Gp z9JDwZz+Cp3NbRg34X$n>F5u@?HjL%LWa2CgiND;4H>q7RIM(k|{LPTJ#O*WtdortduSOy3cXc;pBm501o-dj#Csf0yyx_7->U0r4{5!zOi@ju;Tcrau_v9K;vm#&^CJp2b`-zK zkIG1PJoqS}-;C^4NChA8SCd{!Lvx-&~+wgOYeJ0rY02Zh6`z|f+D^lVNs(y-P z`it^n5zL7^lzFi`Cvtj=m*(AfZZA|?(?E-S#2}%?#wXcrw_uv{)MoSEHwFYAw2m>b z+rp%DxD^E!lj(ov?i z4IGh%YQY@)4*E%%$>{fF*fuKf=4V;--!!t=G^cabxL!<1Vk~VsDWm4iz4UJ-r)m)a zvp%ZhxTK+m&13brZq}P(wJ2`ip4sz;#dAJ*FT!+Zx5wXst}?$it*YC8R;)NzJTPvC z2QoebN{Im^VYa+v9M~MU@deDVjb*wVi zRep1NeIrjXd`tK3tp`53_O&sK2P1?@ob)6GvS!{g`Wh zqeu_v3c>C{v`8oLpc=X$WppF(%{j( zS?Df{oCML_v5j33?aA|#OKBBj14}H%ePkjuf)GY=y*Vt(?x%Xx#wmHdKfD$@!5N+S zHrH5~;HV~tRV&%xLSm+n0ZBa;cSVA)cA>Iag+du+VOf2c-9nuXnjf9}lb_=r!9+Jh zmnG3vEc)e`I`+rTLGx2-CvJZPf8C6Lu9?HF=Eu{SrN2qSQOT?Nar_fbJfUC5Q(Gj^ zyqvPJtYS08y{2Wkjcuw$S#BenYFn1u(5AA=ax-kIQ(0~Uo9a@Q8&oPjL;Zk6w=(5( z${xzJ^qA11487BB3VO|K3VNs66!cEDDd?S|RNM+t^m3b00Kqnv;90gJ1kbc72)4E< z2)42*2uky~1A=EL6`vuYQUJjg6hN?pC3v2#2*Go03W6Mm*3A$+$EF~7woO5>ol@}` zS+xY)QUJk=EWsbxiV(cerXbkSrXYBMO+oN{n}T3SsrU>tzJ|jr3LtoeC3v~52*Jy2 z3WAr~6a+7^DF|L{QxNQ|RD4F;S`Ir=0Ksc4!K-aW2zIq82wr7V5WLc+Aee1a5DY67 zpV73IU>6D?c%vnFgRKa`9ySHR>um~x*Vz;VueB)%cDJc+Wx3trrRJp#6X$unfx>@n zMebm|`@z1y=?=XN;`o;*z4;Z@!zp}FMqQvPt!+}7&*~Iu&KMyXC z?drU=^P=3cs{(p~Msjlp-WHNov?kHdyk3L)xx8Q_K-KWB(4z3JSnHqrFWSf|+Q8P) zK3=n7-_TUlVBuWvB3>?$*+Fk}sP{gT{=By={e32p#LIf|2F8YiLFvr$P7^orVpCq# zfvMK}Jr;cu31-1vY-f!vsRF-b$Ghqx`; z@~&m*E3VT^mGMe~*DYI~eeVw9f1)if@`+f!dd9D6ocXWp;Qu7lZJ)933&huZ!IA~A zbvUc&4L9*jF;vvg+;}inap6&N$<0l3W5L|utqt9hWWz}Jg+=SbE5Zx?@yU%hY~8ss z_mYM!W65MqhuG5O#-_m?v4*Xjl8T1T!~>#N!~*hK?*+a2DW0b{s`x|Q#+f)ze>aG$ zsTZo$jHH%#(Z7!wfAq6Uz1NbYk2Q+P8<}Kwkq2tlIM#Y%t#)m95);LPllX<+Br~br z`0@h_?>CmQdOG;ZbVKhBh{rahzn@6-Q`+02XuGCT+WXY}`SgFtZ%)y6an$659GwA2 zlj`#m7duUiUhDll{OqD{j#IL|I5of1`@64br~j6*y8awb@98o_Pw)H5D%xS+5|dWn zN&MW%Gqzz%GkVS|V3<)03Nzzvc$?JDaJk>YEQHO=<|f?%Lyu7jgSA0=6>m|xqUda= z#N#gb@vGI*7lC8m$5l`M@d1q=>pq~#V+ChDa;l+Hvah&kv$@O6-xR@A|Bvfmvj!Q> z8poPhk@Zklinch3M;^oDw*1AGPh4m8zNTrcMbOb3f#7vV(S|`a%Xm4KX{CYzJupJ{N7grsTf+c*DhP2K7#Is1(M z`-M34m#*ztCg!QP4fjQzo^qIr?T(}!Tu^f=ximbB&ou`~$wLo5{y zUGCZF-KAHC9W$|;GqKyE0A^MXn(O`W^<(C9>g`msj(caBc~_75UC18us5am$tF5>0JTc<6E8r I_`-hpuY2)Q2e%QcC^l2DL^kK|5WX^OBF~ZI0PScVv znw0hXChqJ=)?}FUUXyNY())U6&g}nnpG33C8n#@@dWRR5_1hPBzFuBqYV0SOtZ8V{ z2TZz&Ngw3SJNNZXG&`sEu7h!BU$UlwxqQf^8=3TBw|jY}IqsA>v(NZQqVIt4!jkGH zWqtd{`;1R|876(JNjEm>+cIYkd@Q-HLCLx%IwyLdtnY2{K94Q;%*ESDCOs4n0t5W< zk5x8cBq=K2pAqO6j3vwWH=ysq#8pewug*KldLGrVejK=EfMz1|_lLY6uD-llTX}UT z>xYcIWDV?-HZt0Zj6U>b)FCCKt){nBK}e(#4CwdwQu6CizQ2(#KTU<89(;{zrMEh8OSZL~_3aPuLu1%8dM6qt zBqOqPA)|LVS=Ouxrh|?&Y+z)Sky}#TOy!X#!FV&u5K8TB)btBXg$AaAA>`9F6;hNV zP4ogc*xdJ6X6d6w1U(-cR8pOhXjW>O=<^u4aAjHF$1-R3d(7|i7tQ>_$KyS%9%Uq2 zn;TMkl@YEg>-)2`H_%C6VjJq-CYDuihDQ=Ji?ItVUopueiPws09yiMS%io#Ei8o}? z+{8N_ZSiLBd5Z5=fiCDpC$Vk*i`Qxjxuo$h?WMiHY*jrE5j$z9C1&VttDJe&(iv~z zh|TV%hpb+YvNw{LZKJhBC(0s;*UR}zw>*`+gL2$7xr!7m zKmYJo-GaPL69=g3J)0^Ds8v(cj$4l%1h28_OkQQ3FV_;2KMP+jEyVDKK&$@vIQ7Pd zeoNZNd+^AdK1LF+8nGm1sC(XM!%D*H$0kP4i6!Jhdv}-%Gt_sDKp(QEn`S)jgbycS zgL<}tc_U4%be8VAW> zx}CA{Hi>THZ7Wu3t=H9-SpHC+Bi;on;a6$Lim+2X5uQt}#CuXp6YkQr2tR`>20ba2$oap-E^vwarYZeMfg#2&8VXq`QN=tZd&hR|c3$lr+qrkeP5k(;WM?pQ<_d&_=Tovd z7_qMcEU5{evRtvjqL^7_@y7pIGBd9UjGGb$`liJHG!lDA{w^*<653+4LCMYrCA%0k z5rw*&tZoWzh^oa?2;2;zboy<-o&J7aRai$qqhx1B$*#lyZ|Kj0z_;-K8$*AGn;6iM z#htY!I}Y)_PA9ppDR1i}e7uvXEluv*xq^;qnod(P zbB-BE9ca>fBsbsag;ji<22F!Z`Xu~>TJ_&=o31~+`8z$Lb`pQk5c69fyw7?zkcOj) z*_KaZPQBkloXEa+#}3w}!M|p6x!0tu^cvoB>%IOb^#*IT*ZU8ng8jN~sq7c?a+5b* zC35D>EN!XoFsTwRTSLe!+2p-!3fDs2D=_ybWCzULPzH z)7k-V1%=kCc>imXDUGXVW(CMyPRmjMoifb()#in#QV% zuEcat<2Au)T2tpQGh(sD8Zkri2u;@L&79s-5U+jy*1kP-dc6pSXQ>u8B8eIO&9LdQ z?@XL;&EWQQ^&+J4s;8Qv)$0z>mltRYpFrxyU}`-@?>s|j6Li7Oj2pN^bw7feWOb9= zE@)DR-OKbuQf6R?HDCczLW`9??`YjijVJJ>CtNMH@D$7BL6AsOkm@Dn41a znd!dC8)j~vqd$%<+OgSLs`nXhp{1VgaT9Zjo0yBi%nbNnm?vsxo)8*erlR9m;IL1eqvVfX-K1o;d6%NlPB{g@9nzk zwjO_!HQ;rvD?OIT>0I20NVP<57%)U0uB+}7;SV%;JHD=E^jISNwFd7E5wTjl>evn? zrUdj*N7MgxG6GC9ufPz%dK#g8U02oT0EP2L*Oea20ao7&n{Jd!Xb3{4@RoXv%+jCI z_Bx-8$eCQ+`ukd07*oEY!Nx{=fvSb7aMPz?KxBLkT-6R%>L@!teIT=PJuMZ}g75T)(%E`M2-~DOBO`E;9@DSEKALsI_U5_~R&YPRzcMFK;Gv z@F1G1X?7C$!iu?xpFGZSUvB~zGiT<_aTAX>HLh2(`Vfb({VB2lZXzRp-}QZ*5$7r~d9Z`<>>`j7Oi+ z{h8yf>sSVWLqa^0#vY+_iPv|?0w!J^tM}6WwX${v)H-&Lij(U$G z1~>6@*+g5-Q?<#zHKoM}(M>!rsWUJ2N*T_{#!9QHf~B0Qnr}l;%hiXE9yM!~oR*wM zxiRbs^9Fq`ij`Pmif&6J#u|Ya4u*54do zy2<;*l+N%es7soA7amipuBr52mV&Kh9FBx-$yPukH+hemlG*CNaTGRstEU*|Bm=+X zK1)aJ!k@%0^O37N6K7sKH^|5t5AO;?tJE{~W*f0-$mV-Lup`thP{L3iPrtumXEdBaym+3*Qd2`#12qKi-|)HX;s|mo{Y69swvqP zESjP9!QRl*8sT_IbM~fpY<|f%PjEH4t%F@RW-Ym<;Z;Y8eF%C&^PTR?inhd?T=o^T z^al)f1Vi)pejeKaJGq_9b24Yn?+s6N{#j>MCpWYH{&CTsi5qoO=WEzisJnjr`da$_ zOjgnMlKUQe0!CB)sq^=y*NdFfEro|47H^C-7-EdYA=HOytc(-yLu720Xi2i!ZGS!$ z@545=|4Yd}S^Undn5*mybCxRJhwa?FNql$XdkWtbfB!B&4Q{y1qg@ug8KhrV@k=vya;zIAfw>&v09FNeOq9QyilV57>&p|3B8 zzP=p#`f}*&%b~9?hrYfX`XUF015srD>gdt99hSsRXt@a%tftk`8N= zuKUuWulTbLl8{~RC^{7hnH>Y<^RHi?P(Jo=X?)w3p3#>kJVdH_q|^z@JMeP{N7Cd{6w%g~LdB$#8<$dsihL-hWmXN~bds!A` zJF>S9Ff{2MZ`e3DT$vlHV!8d;oxv~s-OOX>3)AJ>jEDB4<4q8zoCbv4l;n7uY=PPq zQ^eTIGME{HWiT@Y%V1{6RK6K+Sq3vhuncB~U>VE|fw-9=SOzmguncB~tm2!Y&8y~n zJmH>coxgYG)u`(;4L9sT^2e35)7{$s^(mdUc=H#=39$A=`?U=hG(m`ne0wosvT{t{ zU-b8pi>X7rTe}}@K{a{%{h26x{oV48H;yDs2crq6VWTy0 zZ>7&hMWZ9|6U*Bwld18JG;TOCd4w9dv1UA*%(lq*A||lyXKuV%fbH93)nGG5HSKsn zWN%D$zR_#>j&Sa60#4q4-;SS*o3y*GIkSM{<8Pn^HO|bI^*gDp#N>=T-WQK zXZ7uqHaZeBWP;fBsBg{_&uM3ut>g4@P1f!kuRs`9A9}~zzo@s-eDQ7j$7&>(V>Rp_ zS8&Ru0=XWr`^Uw!;-6z_#@cRjU86+nXEIthPPFbFQw9WDdl)6O?=`Td(WyMd;P5tF7%Cj z)8Y$>FCss-|59!mLcE6>o&OM=RvoOyRQE*OU;kvh(!0=bD_h zH@wS5&@MV$U`kD+ZR<);uPbd}lB~GF%2BVO$*Gat*$o*TZ#}Z}Nv$G@%Bq1Sb)^*~ zb4$Jo7JVrp5T}?8vW>a3PvLg;?%(D%vkv;Jy4&I;k-Jat@wy8IBz<}&7*}(nYcKm; zMJe!a?OA)xyQZ%6Qj)b?X|39B?Bvd7jlpnrj;U+73e-{9lkhdgC%%Ea-}^NrQxyF@ zJgpVv&JG)jN4O?ci|b5seW2T*yKH3d)YGT6f;sD{WL)df?gGrnW0dG>Fy_$w$!aO*<~IFzWq?=4}VGhbr}VEXV3- z4|9=CIVSbz-i;>5#sbDxQ`StDi%p?4+siabtMZ$?Gss!B^5wIGE#H;^TS%tAgoYn)}V9op6Ht z^Xu-{qPXE{sLBijenAceezI#_eM|@@D%GA<((PVHDj*m2?G%#r`Q7u3`fK@}EfZ(f z;_2@&U&+6ug5TeTB!TU}1v zJ=~VA+Us@qxt#diHb`%7iY=}AEe+Mp&-%}kS@rhA&Cv*6p?=sL;IfKWPyIbdtk3M# zq492o3xBR8D_GmTH%aQ5T#V#(E|&wQWdky@ z@P%xLah2-$)w$h{?$KrXDYJ zEd8dijTtZXD@eC<)NQx>{Gx3vw`L)hnO8N*DJx79$5$-D$ribZfmu#sT$cYxH!FLl z6>rwlQbChO)H=ZVG|cJ2jWt5(cAZcRd0&;PYN_dD!r#j@8=0op^SyKHNNI?2*wll~ z+CRWZe6MX~c*rk~@)IHL>d%%TWZKAoyxdr#jq*lJ(?`8S5R|=S_wN@lW%eeISnDUR z&D#n?%p3E3I_mx2)aLpo?~SLVC6nyngX34_PDU`r|+AIR9z(5co&o z*>C0rj^>Zm@DnwdD(%k$rv9+M4_l?>xoM7kbAN;=br+mf>>>&*K#73XM1N zjx^4Z8(kJ@#EJ3>r~9e`-lN#QBg+<*waXU0W0gfcHSvyp%HBIEypRoEQEX%9pD4}m z&90R*Y?0G_b-{LbLBoy`UT!S8h3cC>T=^xIM4e zZFvK3%Db!QT?6uN5A?sIXTafMm;?Ll=RhCW!*EOFn?Uu$T6eu zlb!2tsAWBzUohg{2_r_|mj&T*Swjo*v#!iGRkOww7EnG;mHG5981eAX!u%|?cT9Dk za=q&ICjNckl#Tc8erI9Rx9|IB=~IjTdfoM-cNZM_<-SW^y=Bj}gU`Ks=dI0ed*bzt zPcHvmqx+w|#>1K{jy23`Rhz)L{OpMM3N zfu6hUlLiKX5YQi_*Y_5nb=v<^=nu(%+ebXC1be`bf&P%Yc+vLuo!}!NUiX4OfzQEy z|66}l{{QB~g6;SIVd3C={Yc&)f(ya(UiZ8t7i>m&W+s?-A^)2bO|fpcA+m{1LndO2A;CzI+UL z)CBUtbKrEK`SK6230w&j@;wf&0}VkBFw#%|i0?mu2dZUwq$xS!UX>JEMlbiD)6+z>4f{KU_n%XbCn3@!ktfZagjFa(?rG=3T{wXd;J zyAHSqsBcfFLX-RDw0&22cpphp&P7a)I>f1)%o&0nKOCxet5*{s(CO zj|Xb|elQm_0Fq+{=m=E54cH3K0)GIq3%>_H07JlYK;x&pz9r`oU>kS@EC>Gvq9q$M z7O3wF!94Io@E4$diTA!hcD)4K&{80nz``Pv670>Rb=TgNfjI zup2xB?ggiSG2nGD8cYDv)eT@F(5Eq22|_?TONXUX(xp-0F7O}!yO8e-z`I}*xDq6Q z=84++1$YT`1d^Zh>=LjJNDsFG$x7{C1k|=<^JgHslA+2aizJX7UIUs-tAMVFeiQH< zI0`gh9stvUg~&dd9@vAI4_PXFtU=YXwl80=SXh>Gdmw;!${{rdX3UD@f z7pRQ}pd)AsG~YG9rFZ`ZbHK~s8ld?les2P*yA4Rkr-H*E1oD7nB%Muyp&%QGC*A)$ za0hU}?Lczb3x0B zJ_AT6MMLQifC~nINkICM1dDUjXUt7vN%W6PO43f}P+U@Kd0EEe4f9{HV=;0L_U;K=RrR zB$wNO?8O=2LC^yvf%IE-?gG8QWT5*s$A1MTfIZ-Aplh1Lok3&p5)i*?t1VaqBs~(K5AQ|-m z)!+f}0g(JPzgL6zfpkE95zpfN6VL%j*V=$qK>aufL|1KX1$p2NAUTCW4bc6fC%xAA zbprFjA)s;H19HI(5CfONbFX#8IVJAuYodNvJ6&XXg=KzBm?QT^iDh{z?DEcdlabc z4}s=g3}`-m1O5z#16>mz)!s;mHH{YlD_-`d<52kML=V$u@|4x zKeag(Xe^!wSAt34N8p#B06YV30-HcL@D7lCE5LM63cdgu=Of@UpmBH#ECyNNL7;NU z;0*8*_!!IqE5Qvw{nPdPf!ffRX{^Mv`Xw1j77u{&;P0R_P~R&-A(-svMekD}d5ea| zMD+dyvVeGb1GEKWK~HcPNLS_o@#X^c|NH)^|KI1Q0gWF8+29iJWAGvv0957y>7epw zgE+Vn3<3FICAbg#86<(`zGSTYRiFd-9r!Ew8~7Tm2Dbp|a37EW(lyalJEEa$_k*?I zZSWqDK34$E@9Tl|_!^*gHMcGTeSvgGh=b@U;3jyeF6ReUIF5F31|WqfeAo;R{0g66_5^UE^A)?2^;{Mz#yP`t$wM# zcwGv<1f@WI{|1Z!=YmO~DbW1!Ksk5;NZ)$_%|qoSXUXF=a4NVQv>LjFM%CkJ-7pi zU)?tZsQu4?=KV&X_Rj}Bfn=$V+W$9r01N}`fcknD&={Q!{uk&o0CWZmfpjA!Lye#G zNV+o^XzVWn(#0xJ0My1AK)mak?C~#w115r>fkLnfWCO`e?dzUSU}ms>EBSW z9DD&Z561${9nDM8ksisGJO(sJ4*~HZT@ejk6J6C+|9=JkAL8o5TdJxJ!1$s=QtG2G z-3`*6(%s;t8$<-8LqJkWK)R7ey1QFCq`Rd%zUNtM_#0-{`mLEebI#sx?Q^H#HTL2f z%p)99BL*tMy?iEmt$UiU_ZssvWW{tCYfems^_-6`xPqZri6U@q_jImZKRFt}^;TdY zy!SW;AQr;II{u3}@OkAr-eYaPK~FqH7hq8E%KEX*M#jHMD1!r9w_i*V0w zFn-rq4Cg5-en48B#uiw&au|w!aDBgj4(G*M-p6Ic!(+I-jx=7Da@!*c{d^6k}j~_M{ZLVzf4D-}!w2=cf+rm3_(xdsYE|qcS=oHVUE|wxTUQ;TF=s znR8FCD~dnieqJ9J`{3_|@C_E?4V)RzZ%H^?W8pK&>qcWHa>2Ng!|VLp-<+i{@cY8> zzINCSe>(!}7zgc<8s_Eq&c1&~$7i_r5G;dv_JFY)o3n5Uu3rP!U`?jN8oF0eG=#Yh zg4d0L`|ZHY0OvdtMq(4(^F5y9C~hJPBElYLKz;lRb9C+MSOW8Chfqw%dXz^DcqW^{ zdYaQLOn^O|1mm!`K6k8<^}dbXNRO;&1N-<1`>+-6c?efA1*Omz?(g@m+X40<9qg~q z5_2m9U*_+#tuBngxVEA}xl%oIfHV%*TD*>oj}@PJ}g# zje9tQ#CVL(*o#IufE6f>V(5kLsDtk@1O1U7sqh5O$2V{v_qI_-x`gR_$zuhAA0& zkB4Xo=ddP>qZ`aOES$w%SON3+4xdpTH{d-XXbaae=b31Vb1?5YD2foohWX~jHI%?( zbiz9%f@jdY?Ddbs~}_-ycA`}=jj z%Y1HwcJNtZ|E6Id#$gN0)1K^wYjnk1Sey4S=5=uGKhO++!W_%t7v#fHY{vg!Uv|Si zTVW!sQ2|6jZ8%%@syD2!&#}btndZ6=VBXG68knoKI)uA0{*S1K?3jyT7>uiM?UFFI zV86lyIe;GQZ~samA-3W^Jac=n109hO3j?O|ZvC9a&Pa(La0%w)Y*^>j*nqOAfT)-c z_p*l>5CP6@RXA(S;98z1{|`F9ezsShh2-!&EQP%-1D^+NVf-iHUe4EMjKoD`#S$Dq zeRPNGWWqMM_iS`PHhhDX7>%2_24irR{jE5h$?}MYNN|oj!8(|?b-V)On~P$wK0eP& z!@2GT>tc+fkQuI36wYiPgvT;ibL(V3PQtn##&Fz$^PLE(VJuz~ichc~T`&*P1D|8? z?r*Ko4))UC4nPg$4|vJD`PlE>umcMjIe-np;)kHzLyQbU|PIjpq0hbx;Z=;4C;R&d>wwgmv;<7;h+?Q+w3}1Mp+ub9CMZVk68c z1>AcNUctShp)!8L6Ijdd@fNdTz3i{Engv_oIuT)iti?Tiiw-bf`;!mBeuW8g0Atzz z4XWV+{zD(wS7VC<&+!KI!va|Q1aKCtbALFep510}-a`=u_23@k&>i{VUY@CP2*nvV zE3Oe4+2GtvMiI1ueH#j&6L--7)#3c-Kt|lgFkC@vILGVY+CKA~i&OA9Q3Kw09>?G_ z>m3TA0_Gqo%*l8>TiehRAK{E{!Pon;^V#){I|BCO7Ji4dY6|DI5@w+`dSD>T=QTFL zxCS9A<{~#-)42S*EoLJxJZqO>j-G+l_yq~k4arao#&7Mj!XEB~_cn$#zmASrgh(?vr_dVXN(@2blueGQN_i^8ba5nP5^`_$%8lX6gCDXKY*al~y6&9j9>SH7CrYFvZoz!SM ztltAz=ewu~zY7oh&<0-TZ`Cjsp6_tj4`-t`oUacUf=93>)-@}H-4^>`J?+m9xSzd^jAU?K_gsxUn1-IP-s4aT z&hBgcjk=hCotO%1`xy4AH*zBbzK8dgMpHOf=4|gWA_-Dp9@3x}qGKfDAwNE1G+cK+ zoC9O_`nhoJvsi+yIE=k$3~S=K3--%82J_`SoI(lMC;Q;55Ug`%OoO!=0MB3-oIpgJ z!5;L3^S>F+h5h#43a}5p--o>&hIYsV*NcXsus7vU37#$cT^Xj1;LHz( zzc)l;xbJn?dt-}<|G{%v8O~}F7_;XkJe)mia2Uy9Y_U-d&YtICDHfv$X23l|u^zi% zY;`aagJ5mk*Rz-qAK|rKF&4&p53O+srEmcG0*>%r15uF^dEj^MYn-*iTTJ5zrTs)aNnKi4CBiIUwKg+*4F(zcRs6~fmzs(N%#)!(E!i! z1kGXW#$?Yuo6gtw7zx*|h&%Av;r_;!5d#qgk+BT+d>U%Q{=I|OpF?jLm;D?MYZ)80 zupK`8thH-qKpRYeIsFD>wMVXPU+tyOH2+SG60oMu<)5%N<`N3Oa}9qF4{KQl-@+Jw zhjHBsyc>HGn0pvlM{BSSd$9-iVJ%&=9Ll3Umf`}Eqa@yG|G{{e zTR2?9L=-@K#6*2qhgP_bsW2AnJ`?%i952Nrw1m%)#;ArTNDa^EJ=8@742OGm#SnPj ze7^zLsSKY>Md3BS!~X^5a2}U&7T#9})6oIuyA@{;ilJzMDcFeeXbtCJ4$RB75+DuS z+q#c{Ic!F;z~?QzJ4fRX0ei3m#`+WNOAIW*ahT&{{0H~W0^@s&l4yWU@cW0zhUz$p z6R`HhVBW5I2g_jH%b*R+XEm(PVm!b!xOWJQw-H7n4y?^MGQ-<((1$cfLWftPp>_cF%$u+J^g1@@D`=980J$B?wEVXZQ=|Yd>6f96G`N zjl~97Z)@gSgd6`y!RT+;So}zKeC}UJWEmG99f69hzDou zBRnh3;EewZuk-yL8p1jJf?4qSQU$KL9yt+;$@l}G0`JC{1=j2}>Z25%!JIS0JszMp z1_j=om!-G~*E$H#-A=g1MqGh0`1crG#y;5F#ps0LD1cv39lyb~Cc#`+!u!nG^XU3- zVSEv>01Gh}Lt#FraTxxV5Z-?RvEllWkpLBu7a1`d(eMKPUJ@5zU7V?3@fSSvV{r&u z(FSEP3W;$8=I%VL#dY|<%I8TEj7K;egZcQq^+}40xP>P08s~Bmob}dle*VKMxR$-J zzjIIu6EGLy;rC}T8NOD)I7*-u!lDnH)!HzgpAZ8X;M(rvH78LT+fWDA-5KtO*!?Djx&xQ6lHL815iH%-7lWTF?UIlmq73hq%KA@I3T z9_Am4sAc*$#p%mv*B;y5$so(AO~O#zlXhW4(cKndZIe4^IA9;&R1I$fi<)*&Yyixg49R_ z*Yx@i7zumlEI41S;4`2r3Sto+!1=S+3dPI7yR(-NrQllS;kh&h*Z2CbFQ0b{;4^t9 z?D=un^OL9u&!y-8ALNGf^bpR1pWS0LIwKt>!1-!~TBv}PaPD*f%VAy2c@9$G5Uh>! zyaX}fo@bB=?cnP+tVuLHLjg>IHHm`!Fpm?kex93L*aYKn-@P!_$mj>_&>w~2-mei4 zJ>i)yhA#LCgHRg-u@2wjN4U-#JjGTtfW0z)_xl0$upM6Od9pq!5f%qg3Vvq~t?5Mk ziRQ51#`HHvU@?;65%yy~yeB!vAw2dW0?g+soWUdTngMtP=Xe_Gz?t(ouo)3y9PaTM z&S`s`hy5_W_^|FFuotJ`GtxNhvDf?q>yQY}|D==O`oybMm^|aOS=D1A3r4 z;$a(%KRPzS+BgRx7!G6f8so7~-n$;J(H5@%wVvh_ikBDyW46Eb;cFJm$@qOnw1<0I z|Glu6-(m)`Vj#@ld(5Rg?jk83!RM6MRfBzWU+1I^%&i2bV=js!2Dalq8p4{JxA$$u z6Xbxi@)(C#yo_9aoOWM_>8W&jLwJ+uNw-VQAJP# z#&7Ks;~L(;I#~nbsEh&l0c&9{%VD2d!FY`?H-1MX7;9-*L$Aq?V85(SFkklZN7O|K z!XX)~&ng^%HTQStb0r=k3#{W6IHM!+948SM8{oQ4V6U8wvv5w@Av&_d9)3hE*h6RY zFYJNycmq#S0i)sE_Q7Pl$0;nq7VO8b$b|6dhiFKMk}$Wzh=fBZhIcSW=f)i3!~K!fz~{>i*pFv80%t!lcHuwx3@8C-%(d)gR=CCq>_>Rmx14B+RmcVB zDh0f+Cf=YkoR5fbHojm1eBN||Gf^2nPr|}$O2ac}{?6Jr2#38$i9YC$$f$wOFqV~= z4cGh`uJ1F(I$4vKI1bkh#R?RM=cWkCU@qz-1dCx!t#v6hgZWN^=cOZRAsVcSYk0jg zz8&LXzfQtgj{vWqjBU69<8OjTxCe7GU-MawSg;<}#mcd@y;{o^s<~kc0;Pa$4E@LgM_Yzo_6etek-i!q3f#`6}g|Hq9!B9lO zeGI}q7-wv>LQhzmM7V`UxE^@V%liX-hOrlf_qvAH?807Hn|ZLNeWk&bo7w57!V6 z3(*|DoYiBf26J45Er^M5NCMX$27A;Wk#HP7OFWm(^IeoiDHMkDVQ>BI7UH8J8lweV zcR!rFeQ@?az}a(7jM@Lo=JXSs>p3tVpIxKT4&R^%9wHAsJI;1rIBUPam;aB4V;0uH z_(M?vr{O*3mjynroPTR<-J8O?*MPYUL>;_GG8julWWpG1LS(Fi&#aoL0_%|&X<;50 z(GHhkzW<;y9-ss^!|&~b{jr8s;X20K2(K|2_h7%Rm1`KAaUVuEbcN5M8*pZPHhBJB z$7@UDD(q1wnAZ%XM;kaN&Q(ivz;29yxf<76xb`e~t$SI&dH5awLwh{OC0J|U%VP-4 za{}VRn9m~-!XqXoVLt4s`)0vnSf|*C0@of7>vjS5WDu-}*DS;=xK}1DzzA5RPvPwT7x-M8_ty9b>sS`{)%vf4XL%l?!n5FP z*?;@e8OPBQ_NN09!0QvDI_$k`u7>+Iz#?QtO1PFUuXP6Mz%#G`##|pa;hg;nWBU!p zlM&AUNI1(i;o8oY^X<&J#yxaFdBniq@Em5rA29aCIE7Vsfa!1@p9#)(B;-bAc;AmO zmMMq_YtRmRF%gI0@8;Y@<}U@C4QJkG%W_b<~Mr$0!ZtO!ixc*K2f+!dZ`&1M$;a=ll4DZng$q@o;=J$=U1@0Rb zoe+YvI1ksfCx77uLSU@c(KGJ4*OWI6KT4gQDuun)#+J-v4UeD2tzCg=tCoQ$RDj5IJl zUud?Qj^*fc3VoCGiBC;JLG}|KTLcp#?U>-VTSo@pDQH z!ZJjIb7ar!!?pgwpQwlJ$cw*G1(Ohp5Uhs#`rT_dqwO&dU+@9z@gC05DpW%cI1@f+ zi{f9HpK%m~-=9Euc-<8YMs?)CT5N`UI?jIeA;S8U_UhKvc zM1^^~ei1~%claGUVEvk6B@W>%VjvlO23z+fux^!LeAdDkuHhDZMs7tN96<`C!!dk< zx%>h5ybjm<9@kI?X)zlMF*5MkGo2J`;94D!1qYBHp3BC_3+uNB9pPTq)N^_l_BS4K zVGyj7bCw8UaRCLf3^C#Cnco9gFTd-Hi?|H?SrPryqoiR{DuOsw$`Q`4q^lBOI8?nCzx9mSR*}!xf**$nA2NWpSG|M zUSplDeJeOG=Hq#C7PJZJFb<31wT(~}S8xQyU=O1r*sm}_4#3{q*XLM;F*t^QVXyry z9GoR**Rxm?KcX8_!{SC^W(brkPOA(_0CauIA^Z!EPjIX?jF96g4ec0 zd31)c_rP2@%U<&i&fy~*hqGyY-oxCzHzqn_1w1FtUMK8>HT5@R83}6|1)^*OZdDw3S%@bW2_8+uLkQ97kiKu z-fxercPY4+HS+p(m=1rd4Qsm;*6DYc->hY##OC^@$K0|Dglkz-#PBL0EfhdK1M^ z9H}q^uWoV^Fy_d(j94fGbK8$GFrQw?h5|T-1DK5-D1_|z7Nw9L#Yz&e~=8XFb}Kn0zczE z62Trj6ZY6M{QwUU3+<5yo{^nc0ONCB&LBTNVm@5oe4KIDaV`_WJk2W;k{}dg;eDPV zb6t%~=#LE8gAh2+5#c^Na04l^84Gb8*5OCg!T(Sbh2YHFhk@`cxYuu(h_7qj@q8Hp^If)N;m=ctVjuohY2^TNKHZ*~~7H8t){s04HA z0Bh#DuIIJaa1qwT+>%5n7=e zI>Pu$!{^L-w8J=rf&2NKIsC71qo?`bvsg@cU}$i>~lB02^@_=IkD~ zVGI{w-R)6w1p5^x$N~6_uvfQmAJ*JH+BfT(5ccCTtle}tqdDLiOa*&l-|U5bbH4oU z>kQuH^IPn|9JIt*IA;amGax@6BPYV*E=r;&^1;~{1lKBslV}Xjv$6exVQ?lY!a1G+ z^D(y~aJ_%vJYGR!tb%*_>~-zAa9*69pYQ}dP#orP6@J$OURN63kq(8h6?Jh6&f^V) zM+KOt_q~Ag?paQaUa+RQVO_lEXOzQH%)&Pqhe@!0*6StyhSxWO-?c>p7?*QyU6SEn zjKu&X!8^ncFsBo!k27!%D#JV`z}S9(^I%_hBO?ytG-|Ub(*Wa0kY-0Gm+>y$~7O;9ACM9i7upc!N^tk1VJGYi_@PLu>5BTAYX1 zM}jqs1>AUHDJv6vu9qOn>FYL^BICT@O#&6jkO4aW=M`Hu+Qc+ z2^U};?2mP^M%LZ>S{wJtg_3v=d*#|qF%^yQKXirX*7)DxH;hJc7-JRP5x9=u zEra(O=OG-&MI41|wa0$+gf&bCdvgHxq85y&KOVpsqQQD>L@}6KIxNR2L`Ov!qc!%v zJ}3nHIvmE~JdVS8WQXU*eCEQOzrfgC+jttncz5AR;ByMz?Z5rq1n0K~GGZ~TsWnIl zV>ci7&x%l3OV{g&vB(#AcUG;5vy&R;VxIo(jQkDvG49$3_RIbT^A(oOKcgn>!T)dw z_I*BD!#TIN9S|4xs4e{ccjUxfG(#2Kgy+q3>`X6#zqxJ#IDTsSt z1NAW*(@+^tFd6Bw8P2NTEyW@v#xTr*^N=6Cum>?w0(amwkKuPO;CzonfBcGA7>>NC z3io%uufmwU)-#w2k#H7YFbf0G9-f1OxP`SaUO&6PYd%F}G=({MeL0LnEzMl3it8SoC)`vT0*-XFz}a6T)c0k*@r zI}XpiHTn$5!~-i108E>*qB-!(7YfR0LS>>aZ8X zU=CeT71!a6oBu%k4r8$P(eNC#Q3~eioEh6_%*HVsMkp@gTez0z!dCfJKJFps{0zxVTQJwL%3?14RQieSIO1UZ1D?C<&c6V6v<*!$wJzxMs>nYF&l zF%g~{XSE%E!YX)fOTe`}Bc7jFu=dW(NK8j0cs5--8=7Dy&S4>n!ujxe?{9-KFb?Pb zB${C^ZlNaZ^=){q&(0+9o;p~J`>27GD2WV61@H0Nw}=X7Bnw>cRltwD_klGr-`Mz! zZI}#asTN!_2exAeCZRo?$2>TK#ux!>XRMwRYi|B&U>pNrPR0=f-tXUjKNSlw4#!Xd z=H@xFUj=ap*H8*e;F&kx5BLqIkrqV|6XP)r=5-hU;u)S|FsxU4#6w1Wu}t_VeKyhMID<1%jhwH>vP2c8APi4+Y@wJ8=Ns^9d7>0mc)GiAV->_B-?0 z1#2`MiLno7Q5uC2>{pl|2hf-OJ!4g|3eHq?w1czK8P0MEJcj*uHkRTkKEipOgQD;( zUxablN*WOyc>bN9>w=gVi0YyJ+e_xw0(4{#aI`cF84SNM$E*nm4Yg;*$u zGVr`PyPku_aOSM5@mZgK@SbjHgIwr^jVOj6@dWn4Irm=U_y>(J3Oi8*F3Qtic30 zBjMn0^DquYkrF-+*23rFAZ&;AFb-pni}JV{_?(0HEtm~s?uaFLk2)}}8n71bcL&z5 zEOMa~a>Ls8z(-gEWBmqs5Q2j+Z(}sBJy?VgJi;*iflX+M+Hm$Z;x|~kLvWvnc#Qd| zg?ezNg8d2;)0zct-4J6Zl+N z1%LDW41jx7L~=Om-C!-^BOly5Cc0x1oYz*Uj3H=?5peGBAsXJ`JLJb&IBR!c+>PP+ zFwPI~IpyBJ;whq|8U8~Jw1aE-{tjN}I#&=L&g4{NMpRsbvvnT(VV?dy7q8I?-aiJe zY0kZ2jPY;|&TAsrgA}L>XWp1y%Q%mq3+&G)c;@{51j1n~Mxi=R!u{vr6jtH^k|HL2 zxn4P#pRrj_*DZ=cSPb*G)&t?WNe$1hIaEOsq{0QbUT0i~`HaL0Ji`Vmf@i#B;PkCscqlG8hR_8a^M~CoHU|a}fb$(H_Y$03kSyd#H+Z_z9U&0JpIW z?s*wmU_1>`3eJLaodVX>Gw}l^!Z;4WSgf_zOvaD6h&=(bcy}N7c!a2k1ncNMEpQ)^ zF$>?oe9B`JjG;fQ=RZh{y|9MX#Mv>e?HUB z;crC5AUN0dcq>lBS@{M1krgYk2A(U=d%nQuDZIbKYrH^lbcAaR#RM!x9{AfNY=`sW zycw%!!x{Ddw`d2SrOr`%I9KzL4bHFopGOl6zz#G+Q7k|q#K9Q2&L~*ld&rOM=#Eq{ zFP}l?UKmC2Gdy3`WH}nZx|zS{#<={uBrXM*o6iCFbDwdDf|MwSiWm%gKND+l01>eY ze&_w>@D%UiTK3u)t&`6sd)x=sd>)L)eYzqi7QmVryX$#Yt(!BI8~5RCjK_4OMSs-9 za~y&7+l4eR=9{orJJA&3@fRY)Jv(4FtnDBSM?w6C9}pY%<08VrI9g&4qM-|%nJq{S zYh!*t!v0o=alb@w*tg5DW*blj{~;-?cP&^WXUsiHAQ$!`FWSJKF2p`qhk5vjCNP%P zaNP$uha_l)lW=YO^9@1xzQ{nL;*xWMl42AxQ2T?gmZ8F=4!0g`Yy`i4vfRU z*TGuf!*IAp1=N9Yd(KV~YCjRkO?>{)M^ zgX_OSLsZ6~z~>LVTTAOu4({dqMr=ZESjU^NmdlX<>EL(n9g5Dd2i7DkV#554(SAF_ z2M`0sXdPUC5hfxD+{Zk!<0#$*K07miV=G2t3;seC^u;HbQvui`Yabpx&>xvlA2Z=S zt&su=VcgDC2*SWKZf&o^Tr(rsFXIa4YYdxrgte{!Yi2)p;3wG2hw%H<@Y$6X)9@LG zQ3Um1%@@M?@ErBQHvA8F;4CDAJ^vA*@cgxeYa~TQ*yGRG0ef8%XHg37u@=j)A3k?4 z!T8dnIj*2MJY&xCXxQs^hz!q;Ji~=yEM7Mq7q9{=;dg%aEHr|#JBQif|JNo6#XanX zb@6v+xG!E|04~G0?6vt=&mC~Ri!io%IDy%4t%k5KK3^gsIgB|5ticRShG#Mtrofr* z347KW&U+j5hHLoTuqN*J8s=_Xo(XF@6g|)aUN;hdz}h!OUi5+Yc!tf@T)w~>m}?a5 zM_W`4{2iV5^auw(8>=%G6~;Ih&)_q{=b!U)9tSWA@sI;s@fPMX0>%&-!{M{;fAF`@ z_#M?T1pi?h9^rfVy|wdR_gaKAus_zK9I7BaTEZSA!X{YD2Z)XS@OkOJQ&Ag3kp>ZA z?Z1Wf_8x!tyW&U<`*;N@@DvyEHJ6oq-T-qvfVLP2@9BbnVV#e|J?+CpJV7T|4`Vn9 z*RXHBU_9n!eWD@QFKZsmmvdMOKG%-H+BnYzPz-zEIera${1WZpoDG6?{uAfne2s*2 zUMKLk^Y$3cunvpi`rhMSp27IYgBK_RpC7B?Ui;vE&f|Od-7Lhx9k@?vn8!wV?^)Et zU3AAEXpbeBh=OpvD5#G&FeiKAeE3^6?1axo^K^}}SOu?hz0Np?BS?Ujhza*@6cCGd zudf5ojJ3Rs8_12cxB#DT4N(_MF#^`n`Q44daBchI-~G@5&hZd9wNdDcor`rF|4hB$3QVy#}M>@J@MQ%fie;Gdkfbah6u<8@BIPs z0`FaTH^&38?(yMs(|u>c_-f-dD!}jlLR8obubq#P_zm{dn2w<`?2)~+56(<46vla& z=UP-m3*^Hr7~6Ze&l-%zL+pV$PDOUOuX$I3y&H}xFvdi1j|QlLxJZaJXa?8qgyd+1 zV81*Y!F+kvtn(HmgR?gR&iXePg%#+Atnkd=L_t_bYn%;5Fa{HF8hK#ttH2p?hMf&( zJ_%g!dn|-AeI1@NXTxi}-`n}BBKRfB0nkw z{+`IY^Z6~z-Fv;y8XHq-IQRcz4XoF4_?w@taVfZVQyfEXcpe7fe{gN*>>13d7rf58 z7e@+YhrLXUvhaH2@P5zJK-9rj%ta`k!W#OFXb||@b%tUr>fsZ{U=B8-7RJLpjdK!O zVFv8C`M*O)oJAMhgSE+kM`#A)@nsDr!t7A_yboN{ zId$KLxB}O@3TyiwS@Ac_KMf+FCaj5nZ$d;Yhxd)Z1z6Ju$cNSNzW8u1ofG$UPMRY< zmf#_4X$vrXU%tiTl4qQUv?uoCWh`y^>=y zoWIYwh2QWIE0GzVg`u!+``|3ULTZ%3bJ(w$7>(g@&VNK3oJMUpzn|coc%Spw62}9d zbMt--(ctVXKvmf5#+ZN=xR1Jck0+Rl4KRk7$b)Mr3C~Igw7?y>t}n0ag{~+7^K}MK zAp`p2GQNlNQx7%ZtggXdaGt)-^b$VrhVjKmA`C_eIEQmE1jd{n#ufvfMbA_-ST}oM z9_HsWD-O0|I_#J8-wnlJeFq>G%wsa_*>8) z@Bne~6g%O$9E4%mg@ZT;uXF7~ur7OGEN5W<+Tbwkqt8Bb?G3Ma82Frn_b#vptze$c zK}(qTNVv{e+(25KLaM;u)}#@vwSBo7_Y{sEnI;$`ho>`gzzw?QLx9g(HPd$+TB7{L zegk{92y;*Z_9P*^w-=n5?RbMa@LUx|VN^j(+`uXffUgc;2mC|6T-nbLQn~=;qM*M88wj)QBVvS@G};|oSav$_r7^>U1wz@ zoJnUl2dvQ)w82m$Lw8u8EbzXu@GN>(~L+@L6FkBfw{heJTm->NWN73ctg9-OoKjV4nse3GBIZWK6De4|`y(o+3T$VSIRw zBjXnggEM5DG4UGx;CJzC%>F#yV8Md^Cc6b+6)Z-hO~_nde&=vvb-K z<9jm>>sGp8c(h^EMpz@gAH7&%du5aCYM35bTk2 z=4@E|Wk`sbu->f@4I^M*-NQ4S2_4|PITPOFHPd15k02DD!JkkC_P-6BS=VWe+*pFy zC=KV$^YANvgZE}fO>~F%EQhnc7Veo6&h;~Fg!kpaUTlWn9mgx2#ZjDrdG$sN+`$+a zk84*$2_%DgF2xj-f#=w{^h_Ai1zc-JJhzR@h2MXgQ62W>r#}2rky>)-1w5Bue_z07pL46x0Kel99^x}nqBiWKHHwQ0sDyXe zhPAMFchC^l%3w- zAOcPX7_W0-F7Z(eIZzS))&b_>oXm!?HH7hggVczKKVfZ>paCAj*qrm4D2WTGg_bZb zW3hhD;sz8#38cgt97SX}kH(M+K5yLDYaYY%W8MG4ei(CUynyG$UUkEISfl)~F2AD( zw&6bPnf>X5rWgnF_=unI37g`lNh6|Vud*uGX zeuW8g06W)8{Dqou&YmF;jL++xGrx1qb;u20g)k9MkO96t8^-6H z`uQRz!!;7Z`nE&|+($S326Ht zHo?8^MN%w654=WEEJY)9g83vzMjV5At-=>rN9*thMk5-mp?kZBv7~_Onxp6S1FY)? zM1XTN0M*e3*0KzyV+gE=YoEhgw1sQA{~g$;F?a*(a1>rM6}9jYu4Rw6Vlm>td%f2F zmd8BQLsT@!O8kklFedK_5ASnE%sCQ|KKcKL`67EI53S#3qygo6U9p@|$Iv^KX!WjvN@8FqRidzVQ z>v+96nTu!Koc%j4Qs55mV>o(Y6h6c2oXsNG3D+@JOx^#6$tOr*)hKW3~K(Jr-D3~vMc@3$N6Kik+KGW>Kb2$gj&S*HZ z*3R0##w~P&b+#YQXB`Z{Eco|DxSr3PNVp06?z8PXI15Fv58uLTmcW^^*Tyg%s{?;K z)4h=qW#QkS;QSv&47g@)I77~ZfBMM`Wyd$?ypRKi0Tx3lLw z8n3f=8P1q{nYZ~5!&R8`BDBW$=nrf587bf^0jyni+{Y<|!oPFC*|zSk*%Q`mKDNTM z_ABgTRm?^s7<+Lf#ah@i>sk!f;2U^-b6A^4*nt@+1@B#fS#ZAy@C-eGaiu{Sltv!7hW8zWaiqjpj6w=bKrV#C4!BQz zynu6Z4Pg)yHDHZ`{W6|lzMOq)ofpaB-}ZbFlEQP_3ijFl+1rWuh>B=|Nq7VMSRU2U z9_F?c<**rv@fh=P1NNXE&R{C)V-CjP6gI%P+@}@>!kLT=d)N<)F#yJK4bkBK&WPtD zC(@xgF5(l+F%{fzEj%OMR|%`p3;uop*2Fn-ep=!VoJnJf1>-w}Y3Pjz@EP(2J7C>@ z$4ZPwCs@z_AuTGPIO3ot+&2-d(bqnV;Bzu~t#yxw)Uc*~aTx31HP-kQp2PS(yN_TU ztZgV7U=}jMeKMddh9E1(p$dE+`2GT`&=6-(1Pibc?qM%;!RxJ;eK%L@Iu-jdAJ*ho z#DuY?hco8>Juw@`YQ5~sGCV+7v_>d?hSw*7YaWL&6ht3bt4y#L-fw=zkO0--y!o4X zHbPXihk0&+xml~bC=YvKZ+7B)k&tVOk;R*^M6((RkjJq%F%?y-ACnSWk+7-s-=fa4I9hiw{xDV?Q?3c3= z%-2vhUyQi$OnCPUO z3cQ9hZIA5v3jBj)2nX-8@6Ojjd_)@jh%vZ?DR9o+(;4*|*KCB}!nN$}ESPUeG=}vH!7s>xW3V=Z&>OqpGj%Ld z!QZ#S{=N(R?YwnGcBI4|_?vqegX>u*ubGa17>O_FgIo9s#y=D0@BHt?Wc-6d@cTxv z#{VG>Vqy$7;yYx+gn)g#8>jOS73O~f_V@+1!0#U6Dpq0+et^&6)|d_RHcoq13~ewN z*1ZI_!x$>Sxh)3cIEU&1t9VZVV`+}9Fvs2K0DJ!)15gyfeuW8g03X=@6UxFm4@XPP zKo8g->o^e3g7am4?WO19Jcb|&a-cUZBOKbIE@q)S*2CU9lRg`gqZZuT*<6Ui@H_X& zjU`Bm<+z5I@C^Qoy>K4g!`aw_jQ=OLF5I-LzK4x}N4ir|x=TR1q(izphVGDVhHgn| zWatv4OJZngl$4SNX#^3edGGu8KK>1JuIuA@=6Uwnd+oK>J_ygfv7~_GbKw?_qY&(M z*KCBN@cY)-k8iLV1yCNwG89)}9=<{})P%Y4Z_ktc)(6Jo9uvS`^nL0+|AynuX=HeI zqoV@s$MmqT{cNnCFbf~h9=ihf+1%ad1Ehqx_e`|FGq~R-_;3AFd|n5CJAg{)f*kPq zJ1Bxy*n%1`cgA=OS8xHnPzw!U-gm&Wy%c5919#C9!!ZiC@iVNA^V(C(&>X!H71?0^ z!jT0xVO{pZxE3G|M&c6UBNjf#c9<*ol@objoFOQRA!r7B$hvgKe!PHrN)Piq4DV3@ zK63;?$bhUkgwJ4{X;22{Y9XwT*Gw#jdA6U-o3)C8(&&e=FrGny&;7aE7p^@O_UUjW zhq?0I))k{M4wFy{P2qgbTMfqQGkp<^ho}l`eHZy){_NE*Fm|7fgowV3Jt8mr&Yn5| zYh|BygZ=Rv{=iUJQ|tBxQo-}M6x(2}+rvDchjlK9D)1bpMHH-n{k|WLNs2d^1?RWk z&Q%yIkrMWozuA}eYIKapZB#@Kbb@{5dhf6p*7ydD&%Jr3J-_zbO?VDt!CtxtdpWWA6d?Um6|ZbM0Y1+|vXc z#u=2vuP`Ux9rrbS#&^_Kn496K59^s3|K6Lz;=T^X=X}#} z6Ykx$gJEt5;0CgwEAFE&tlOi&-`36d&_S5DFu12v$c<_=ofSibf zNhk-ehp-Oz%y+m5d&7RRudVM7u=dujDXfJxv0j~FU2~v2teG{sgj=w;eur^v#8iC5 zm)H!?TXGm$X&rVs`OYZ+HQsWTZ zi|aRo`~L)c*fZ=q!~L4qw}_1D*bDdJd$}SUWBjgD4CdMAoy!=%h52&K8tg)O1Y;E@ z!!uSFCr||O;rzZ6CgL|F#t4*w^BJ#o8;m+|{q-<@=R5}Yyb}Y_56|KH0}zVhFowU; z3AbUK=ELg_s=#uKFnAqUK-{l5eDP!hC9C_aZV*bfb{3@ecrg2AS2XkPo_3;mep*N18G>q9gnK#Gf!F8NOVJycv9LHt+3de7Pu}s5x^h6Xi zKxJ4{^X_NY>VUK`SIOYoEs-2PZ|*B18{AJ@Sepds4}Y`oPhcsG>m6KkH0okC@?kcd z&m13sJvA2vkQMjv7_AY}S5O28u#xrc8QJI7_%%LYGeQxJnurQ>YK>~62kg^Wu+L?0 zw18*DI@?F#7!PYO3X9>HON4SrgB`F|=3_tXLwlnhtZ^{nV;Q<27uv%ZtY;>q#9!D4 zd-y&4-I!f-5*A=4j3YW8;1J598jiv79pTwX1<(6W_!*8X4Bu&UPy$a82k8(AAMpb2 z*S?L64RE~kx+l*|JT%82FpqWMxfz2H_{>YV2hYzGw8LP;#WzTS%jgXI+_b#lYCytjh&G=KiR6~?z6sZkQHXAa`w5+1=l{f;4cgM=_& z&T$pSY@XeJN|+aObq9ar2RM)MWrsatP72~R(qK4zW;&|Cy1It%t4U}E>)8a3%>w&v zH$t%<_N>o+g~f;hpDzQi-_Qs@q8iM{M3~##u$Hb9iiMbukN6qxxitC&?(XR%%=gj2 z-De~E3X0$W>=Wx=7{1>s!1HFknV~?zX`*08TM=v~r{TU5D zR~Ca|Ke#_*`4{O>17D&q+=G4Y{)5pEKKm8ywY^9RlGLV z$Blw>m&96}gJ)|3Tz@f&<2Z7`8XScCanG?Z8ph!I{?-Nl<{4;!MJRz5_yg_H0cGIj zUTPs2DG?Rsz!=QOE|||%fzKDYmxA^A6XwWRJ)8C6GffZ^{$?(n&lrZo^^DKBX2Y@f zP!9Hk>zmuDu-_bKUT(s_f5H^RhkLvZ-;L{$1&6M(y-21VQ&n@1AGhTJAz_xo|ABo##jpOVKWk;JKWc)z~7s=7s6lYhTri8?%*nd z;n=3A40|LVo}(8E!WbsO{M%1ka2cnO6!vosWJW*ChP6Kd=b47b=!8}nh={&|A~*o^ zY`&MkeA%D&s%LH_4#7IV2>fk7Sm!wS9#vrkLGwuNV>0Y<@| zagDqf0@wFB=NgM>$d1l%+y~fK_EL5DZ)^d$o@=i~Oe9B9Y{y2}gVWIo?z?#4zKQ!x zbVU&a<3~Kh16Yp~@R`0arlyDjpEI`dFpnYd-IN}#Y3wIZ3O}F+e3!abLVSch&>!Yv z6|AXwFmH|EzV4wnUc!C(o#V|_E$l}&c=rE7VwkUHr~}9R19Pws_PsGKLlv}w>)wYk z^@OoIm-{#8e%^zG7>o`uAMSq_{)V-6-}XpSSck&c0M`w}TfBzP)kARL-h}&Iq{6o_ zo`vxHMKE{ndo<=DCqi)>U%?(c4fE{W%i$iZ+fj7E81#dEV*SmD`5%Z`7>crJfYmTQ z_g)>_&=X$f!dNe%HqHe&#`xaAJ-BW;l*BMN{=aiLfX@+q*#{AMRbcVsSP64I22kIxRz_XFP~|HvRIEWxbKb_ zfMrOHY?ui1_87+Y6VAh!-K()}#Xw|)?@(*x_t%jUYw-j@*Z}wCT&|e|Q85mWPy)H& zcPF^JW<+1^F(R)}7Pl@BQ5sX=xi#MjkO)&@4p+k3PP|S zFjm=n(NG-rrF*)Gws78<@Vl;Xd|s475d3%4_M*td0&7Us+|cLT=k_!;O1=kzSt`}5#gGDolB*)~U6;4{f! z{YJw1J)h>zXTHWZ9L7iZTqJk~#^V@T!#aI|q}YOB971MvfO$U%waCg7W%{5JO3l}Lq$ZxSMXV5_qzcwM?Tvaj!6vH3B?8ch_kScrLYz)kpcIRJ)k>x z^S=tt*B8(60OipIi_sjxr~%`?1^cWMd`H)Vzqxl~{T}A6AdX--g5Vx}H)TQ=SWCxe z416BLJrb(IJ$wdpQ5e?#5`s|)?)wFL!}!Bd7w#_)%EIq_-k8(DXZ&tc;B#m0ck$md zANZUc?t3YWIijzKjv)~VIE>t-T1m9=={U;XT8cJe6T)P~MCkyVv@24Uhm9Y}V@eIBLUn3PVVJ3#5HQcBD zyc=!s37#KgFjo^{Pxe7KM1p&44)@g^zT+q14y=!P`wKU*69ZvhJK{bZ69(5ZpR-~9 zx?m8#gLN;1^Ei$k@SW*6&r=H+gO_ohgl8>1tY3dv`}JrAbM15UU@hE-dGlFwb>aJO9h_%08p6HofPLV8zJ}KVxb8UA$5r^<2MoX){EN~s{_QaD3E=l>Q4n9C z4syWWN{c2~jO8eYX~>Ar&>9KR2FB&SUF!yN!Z<3z-`sml9E6wg&cJ2-9gvK>HFVrX zq=b3vf_tcdN3bTdFd5a69rkTHWJVR3qn{BEgRvU(5gC483(@fsuCWY#;aKB{=qo6K z19->!o>$*-rEnIP@EGQD1PbB;K8NQm9|mF{Ccv|3kE}r~rVYi(Z(E z!mxif;|iS9KJgs-`*Hk+*YJ10`wqSr=fM8{FoUE%wpK8&vs2Eeu5vw3!&S8(m|@Qi#3_q-V=ksId!B4VNnil8N; zz&SF(=O>~W+|LQbMm?DCPVkxl&+0>@#u1FfFKCRuxC+-!4(sdZ_VBq_sDm>&0QbBO z&hK7MVG7*S4fx)Ai!ShcKf8wYuuh+_2|eNd-D5lShU3liW86h|1R*^pp)<_Q2ULUa z9`oA`wb2iAF%Cmvk2HXLiUZfa2G^br-#6Ab6|C7M_|BOO_Z%OE0-v3;A3nozWWg%j zgZ*-NG0SieiK1_>}4OK=X>^gZk&*RcN%;4FTG zwS5Ttt{{xXeqM^}FuwHg9bip82j5~LJcHBF17lDh#=02BVa%SzTZjkOvRCFHCp;%U zza9zk0><|Up2_0aiDvK|b;MI_LPa>=9-KxJxCe8X0k^Rnnc(-%;anT>An%T&AE9or;a^{dH55} z(I4*V26kZ`T+_VzZ%b*!z+p7RG7N!nY``%5g~j+4Vc3kWXa(c4_Ri%#U88T{^Cj*( za2d|m9dqE`O5h^Q*%ych>oo;m;w4 z+>3u_3b@IA0S+J$%E9yFeq*8steJT*N5(l5&K=QLPy`3Cg!Qk%eqIf8nhTzZ7KjAv zA03`i&&(%$59?YS)}<)UAqqyoesqj;*}L|&&whsA;WO67nmJ!e#729Zf_;4x*2+G! zM{Z&O>^1xR3_N@GjP{HKo2oA%w z%)vlhhx5M1Lexb~)PXU$SMSb|AIp&*_HG=^g6FLwPGUPeE921sXW<#?gS#k%xv)MD zu?tTFclUJ^HSjt5A{xwPRXoQcc!oY8GMd429UrG*y?N}^ArP>@C*}B z3dZPq!7vAnaRZT%5$^v!a^n{K-kf%TYYsyj{E4h^AIoqC+3_!?!ZAHz4@`u6eu7Z^ zg&`<_t#EubIJZ4k8QiQ34&Y0{)&1<**yhX}Yo7%>;hyb*m9W1qz~}wWx!j}Y;%l^n`SrWGFel+yhXlBc+i;vQpF%vuK`eBF zH6DzNScMvadvop~C=d7C1*H+uS5O28@SOEi!&?4>$*|7$)gxR*Fsx%bgu$Aa&-OTo zZ(;uZeGANMC_11pj=+5`h408(C>}7K`&-yQRbV`2kO-S#{~m(xi=HS6`^|pXgWmWI zap8IMxrG>s6i5kst}brCx!i9O*aP-yZQMm`tiep&hkLUJ7r;H#$B%f2ZE$`2@g(}f zG0vwY_zwM%1EXO4zH@x8IZU<|ua9_G=$xP#J2h{1RP^Wr<` z-@s?jPEHt)wRUfLVSZA;d5_~hj=)&$v4hx;iEzKeu@~m&EtbN18LN5qZ}(aN=CU4~ z$KTcjKD)PwzJek+0Q;sAtob5LKn(Q7J6OYk*nl3eo+A+g-y5Tl1DWBOKLq>On(x9! zcvefnf2aCmHXIj>^KeWTM8{E-LSgiRXDt?9!I+$T6P6=48U#e+{uIt(9L{G9{{0!Y z!gqlEQV#AdJ>tQuBvv3YJSU#Dw#bWNu`&;7vhbWGh2Obu7_7rwcn+IjEPBDY-M7zOfpesS zd(H*V#Gh!0Gzf=lm&6QsmQKL^=0Qi8_dYO3&Yu`9;2HdY7`P7iVXYU!^V|p%@hhAw z2=3c8`yvt!qYB2u{n&3_KI`WNxPaf00FG;n3iyQQxCVcBZ#hsN)?)+Q^Lu2(0c3-H z78S-H3+8tO4#CXtsxA@H6bcI@pRx=m2Z}7yQls_xrElnW>D@n1@?Pf%TY#N_Yj=b$nYq#C5Dg zQ8?}qUSl&h;U0RyHJ@T4%!QvfB6h%g?#ATa-Jk0@zvsai?bj@D&$G}IzaR#RAsFV} z@s1e`*EF}Di^Xu9zaKzkPwRjR)}kaRcp< z2Or=*&GAhP!a!t4U35k>w1RUO-&^=z9)~AriJfr2*3dO_p)CH}V_rVHkCV8IGw1^Q zD+CqM55K}b{SzHA8*?xWY0&_lb=5$0WPv#@0Dmt9^N^Y1?}1xGOk z|6&=AqX)be!gb7FhTRx~ey~2))V!oed5l6-xG&#RN3bGr zzr_6;tb@;tg?V$HqG*p)$QihsyH6O1-f*wyVI22hPu;*-TtyFLKu6q%Ydi_C7Usnq zcY^U>LuVMfJ<$x);kX}>3lV(Iw6|F_+G#|u7$NL4g1I(K1E7+ zR?VmV)@Ew=#E%Oj0AA5eb@%;<~)^P-8|bhuoTwM-rWFu!5F`WXW$Yp z!(PjQEU^A{5RT)Rghtqm>L`b3us2#F9*lVt(xC{3A`DMp|7XPrEJikb1?TbomI&s| zxx5?K2@J+_yg_N4!6L+jXXsyCfoJO=?qdQRlM%mR7Tmuvbw@+EpZ%zh8CZ+07=U8% zIp1}?kObE19kw7h{C*-f;(K`JrlTg@kG1Iw>--wkU_8d?zK!!SJ|QPO?}zXb#^mQd z@EOmz-;G9N6ovVU17mZI;;_E%Z44Y!0mIM*_CRfPLm}M7EG)pEXo_|)$G&U){XNV_ zY(&8uw7~^f7wc&bZ^3$*lfSSJebF1q1MG9>+zIn)Y;EB)uVC-ahkfC5O>hLB0prgJ z*L;i3h=+e*EXMCM)_4ap!Mtom9_+>_9L5W*gFT%P?!n&|;ym8L9=(q{a4$>nIgG(E z`4JxxeFa5u04-VH{DaZxSf@^;Z<%l*pE%< z0r%rGjp1I*yS*L_(=ij*um?4f1II87W#HvA&g-7v!Z`ix`v0O7g5jFR;(0g$$MnHb zSc?!iR~(q%Qdo&1SONE)A4Aa&dr=?nQ3w5T2o>-U&ffv{gYlk*-!BS$Uc$W%%z@AQ zo4Hwy1F$C=AszDJHr$&vG!N!K7kp+sis5rO?gGLv8x!ym-BA$EWzBcuAjZM{p2aq} zr#!I6>#-T;sT`JJBx<1>f?)2RqdfZJKK_RLGneO)1jgrHJsW=o?ghCgLrEB;zum-q zq{bv1hGWc&?>Xml{{^rO-ytF?I#>tIaoZ4ffR+&zJ77*{+D#0R+7h`!8aL|&6w z+_QKEo(b!H5$5PBGGho*VFzyE1{T6G{^onb8hn95hz#e=g~4bJYgrhn@e^DtCG5*e z$O|uf{|)RF>u$ZWz_G275U%wZjOiPMp&15XES|$PjW0LSpdISrGVJ@`(GqoGZ#$nc zj=&7eMKpLmR>J)FY#)rrI~b$$EX59VMq}8cSz%uNt`QF4Ej%x`u^Un0ISPYw*tecB zd;0@sVJ7ERqPU<{w32F&SO*psJG0j_WTYGN>q;a610KRAyh=mNhp{|nI!@h}ZJ zP#d4n6!~!vd2k=bSPZWM{5?8WqcTFV72U8I#vK!xVGr4__JHd>#u7MJS~#!g$~f=h z2e>!)S^{-`p4ZY^{vfIz{vq6u|*pWqtn*$qD<|{A@yMdE-Pnc`Z)>QOE1(bz#_8I#y7(-wW`MC+~EozNa9`C}KJtJ@6+3;>my>S6= zF%mUkj*h|je19H*W78oG-1k@L0G~6L{?-DqkR1i#@8;I;x52rW;|`3~d2%BRGtd%e z;GE{y^LGpGF$lx)0;s37@gAvf?-rz%{nO zJVr$?WJWmNqaB9e8C-u8>fZOi<6oSIId21> zIR|ro7fWF;S})JH{m}sSnY|Smn-L#7Q3dvieQ_3D(GnTpx<4U5jAsqn!ruQK9bs>d z#wnb{18l&2l*WD-i*bkI9qg|}FqV%PhvJA0V~By;sD&D^SCYf|55m5@fe;)b)-@2EW*bCP(Pv)T}9OD`N7CsjZo|7qfkM%Hjg|Q1~PzUz; z7^FrRoU19!ziWEt#-a<{o0qk0hkAI5%7Oc2?ziA?$>6xe=na1}cPZdr;~_I#Ya+(O zT&%@kfzKJa8;8&J2;8f4H)iWp6vyEny)NSkT+ip8qa|)4E3DNlyhbIM=i#s?9D5CG zFa*Z34eoIT%>84WfHgFpX~>KCn1ML>1+L?KzK0*8J4V2D?7Ykk2-%leWo?{Tkz}||7ez4aaHw0thyCxSL{}UW*Og-U#9^=0=R)x>q z@ZX;8zql}0T~HO7@CdGZ0KcLJ-0RnHPp&^1?O+{z<|#b0&JhLH#dpO;^uS(tW=6n0 znEQK3jc;)QnNb~G;h9?r*G`Y}@b@JciX`|apf&evFyhxTF-*ZL*ki_X1m> zhq2toYWUkY9D=dD!_O!N=WGG|m)*oGk7fjv|l)sPpSy$xsx_wp3+Q5gf_dLgj4UGFQnjycW;=ZgyS zG6Xf?zVpB_3E_T=z??hwUsQ#AwvWHXRwThp*ze}WJtctqScX*aY|Vhr8I$oCuX#I& z-57+X_<&7ty<^A?&(tmyMJ%j^wJ(hBXo4zmjVtiCCoo?n5DL%S&v2fKsDxm+wzW%x z3|Ip9-UEGcAD-vAFi-tZ7w*@5&BFtGyPQu@= z!Ts!ka}2~_^ud_G- zPr)4&#W#41Pw>n=fVK8)_+2SvMsGAgRlLD@*sG_}4A$5_XbTT ztf}+3mOZ!(pWz6Gz-R2&9I(E=H{PQ@jMto&##H#sHrQ`9VB9O=@1C!#c!&AO31hc+ zjn%&=pf$Q68r-kv*0~NK5`IG+q(xTvE^Lc$Q4yY@BmoJz?}p>@p$NiY4D#&yeOVlZ zecuu8<*g`|E*Vx&+h*# zw1smQ!f@DQ6#{>^;%?s0pbebESW96b%ug&hmodFZeVFT&uvZ4db-Tm3mLn9#?p%KN z2t&~ti7^s4upi$d7fQo;UH27!g?YLR*IohRE{Gp-5}o1t|6&IwB08+ySq#AxSW{z2 ziz_g1$KiDp>)@Db_!AFd|2Z}dA|WcSBO@aE3X0$W7O?&%v_wqIfi<%B_Tn>`KkH=w zoq*#~<2Lr<8b+fL>}BV)#to1k&NUW~F$g{z8^w_udoT+2R2*zYC_Kwm@B?Px2%K+T z;2zH1S|^9|_-|lclt)fXM_$gz$f^uW9lFZJd1tsH|&R>P#8Vo8Xw?X+2B4rSH|p` zo{^ex@1Bz!FeewV19y=Jv#<{4#&_HbG({YYLkgIKp}2<2FxLGrx6R<1_Jeo7b5Gsj z{@ia1j74G;KpC7vAFPKl`S)nJo^^5jF9?NuxP=Wc)=sDl_xvqfZ!j*R93r7D9OGVO zp8iHgl!S97fwi-4*8evgMq13lNE|>RgrYQ7p%R?aeeHvBzJqh$g#Dfq_Qx9xhxr=^ z`{p=|$6T7fVAvz}-AcqlIwZn0xJEx%-+b5&-(k^E1Tk?PLl715F%Q)3~z@d|n1dY<7=aC~gE!X=m+&z<|(h|w5;Fg%6l!0)4D28{bV{DZ=Q z&yTq~cL+-1Crm{fd;@dtd2z4%;dv~F;;4h_xQZ%pp9$gKPQi5}!}$8(0gT5y-Gg;| z2XosNgU}P68`mF(5*UsbFxDja2=kl|lVR>t!1I(AIbmHqckbn9IJPUS^Peyeb79VI z!C2GaFkZsC*&lad-1eG#3`I%gfMxZx5`aZJ(zr*}ig*BcJb7xN&uQ9BF`8x>vEI$gt zyqfcfzJek+0CQ(;I^q|Y+wU-ZVM(HrsMbJp?#>}~ttzxT~bJ}1Hz1jG44upebG1kTX|vtiwhg^;`e*uSaTQxzw>OHH^-jBUEDz{yob4}fXN8LW<*6ToQL}_h6H$r+gOLx z2t`^Lk3AR@?tLlTM|HG82)4t0nJZ(gi?RXrxQC!CjKh4bhR>SQE-=n?xQt)mchgWA z(O^vGp%2V)4J^YZ97K1xPwQq)PNO!~qCH~3m@1(&oc|Jh-+qg}I05_rI`YDP8wd09 z5ij6(u4PQU;qNtZ7T?2q7_;lRC&#%@&p-mWj&(?ZEU1R>kO}i(?|*^xi0I2WBl5D} zJ-2%h7xCaZX%6#fA6h$mBM0K6JUp9E;CZ`%X0YGN;Uvs;Rd^PP!}Dr?TPu6jvuls! z3EVv=D^UyCa0mZlAneEN7=hflh*a>*7*hz0p*=iPpJO5H`;T}G_hf$*fPFd)B?EuQ z;hrBcu>;OiAI6auuaF!q;MpCCGU$amScStdCyvX9EwCr|z_T+Jli+zUuTNo)DxwGq z!S9X5y~W1gXpF8X3(tXjc7M+A*|T2eW-~UxJ*^1*ZI1o^D6Ea8RG6D~_!;iWGiYCU_Aa0@lEFEyU=-|&7x)@~!siNL9YRqWPw)$> zVJCh=H_SqH*hA*|078)xXJD@O!TlIl1MCm{J(atAw}*Z17Ba$~$by`3E$g`qKcQIQ zZ_k6znB%|j2-e;)^N_`u(Hxc9^fIEQr|3isF=kzp;3sSobCg$!2{>2>J!yrWT6%@e%JYaq6ZGP?F zKd=eb+gi;;LUcz{{D6M2zqa8r)}kQ>A{D+zFIY$Cw?01aoRQ#|tFSND!!uPK*0nZL zVmBHGEa$!#_Uj(lyY_Kn*nb_68PVYUzakl8!Ts1Xu3`Ku;QaQy=io5fAq4hwTvWm& zj746Ig72GPJcZ9Zf&2G!6y!q-(S_gfgf;o8O%10`@C9Wes#c{1K2KMKP0 zQ4j9NdqWh*0Q>~^9Rh1GALic6HBP|ykF{EaH!!{>c#ZNff9BSlj)wcQ#-~spf5S13 zErTscgL0?}_udiaU14$3fznJKLzV)Z~J!_n5T%of+9G8#H_y_Eiev)upIW$AFyur-9$8p zwJ_HQu@u%i9_+W4NP*;N3~SXF_V;Ccf$O-5gy;(E= zp4D7f48Qvx1#t!mU<^NDKXPCm?7MpKyCiUY4fwt?zsBfU@Lil0_OWw0=MH$DW?(bi zv)^q+FZ>DPHb!fZ8lLlbn267D0h4eC8ITVHV2+aEJ{;cwuH!SIScx^T4*Out=D>Kr zhp~H}D#MzT$6a_ClWRG)D(tJyaLjCEgL!do-x0B3pM=5Rtkry22gjeqQ5bg!TEn?r z&weU|4akAPc!&;gje{5g=V^fS*o%?ahAIew@s&kJxVPt+3*%UY56FbcD27Ow3D^7@ z=F`0Nh0mOXxx5PBm*%-0M&Sj_TOzE$MZAOguYhQHgP|}NU13aq=Xlqu1m}s0M+nDM z*k`U?752me6i0Q`K}26c5gb5v)~|^>FwfSg5&ptN3=e#M#r+)YA^TtwdZ0a?;3S^m z2l%|b?R^L;q7OVDf5V=#w$0E9o~4x73;Qk)&cJ7^zp)L%LfChwa2fW3@2oBO1-Iec z#*+eB5e4>}z5Wr_zA@$?K9<6s{0FYK5GdX?RZVAq1)MJMO~u*CGtx!ga?W zF0#N}HNiW07W{hyqQh9szq$AgqhTJ*iMd$;bNXvQKJI(q?*-8dg-{gbU>@B2Y7E2; zj6^A1g6|!3vI|S#e%;?zc&5x%Ni4!Ed=Ag|U>wIWe82?wo#)-Xw1)A_M}HXaG?<$i zFsJ6fER4xKn0Np7nF6>C<1PW~WsZMF5*Tw2{E70&jVbWEVKA3NQ3r?M-uJQ2 z{WBcr;2EfZ*T@Ff`4S;mi4?HM-oP_|5!Tke-jDBL{j7N?hM|1mvptj;QLqu7y{d4H z?XVZ@fgy;7XK>AR@b?%nC*PntT=O{YAsy}q*h?4C9i!(2B*WVoL> zXo-jDgx}!)zkzdFBiBv_<1YZuKrJjo1=u_O9tXx|-2Occ#jpio2!{EbjCP2Glo$Z> zWX$esJsRM5G=%#ejY_aa=dcy_ay2Bt2;9dQ6vv;~2(RQg0OvHfu00SDeFa5u0QSut zjEA+jg!K3x_VaOAJL}~c=!)40hu@`x^|X(k!#Q%HIo_fiD&PUEUkDDv^?XlF#AR4} z>t^k}XM;Uve%>K14x&1Y?F80g5bW`p$b;>$u0G@0E{$O1fX^7~PPi}Ewq;6iLoe;%4mX)upU36D;yIa#^7_s@Hw0}7;$0Dj&+ajDH%dw&8{K}_Q3s^ zPtVc;ltMN5?l=wCodEOi8f%di{%#B}PzbSc5=mfP(jXtqNpk#)0$77aNCBT|im3R2 zPKX89H17rB_t~)=jp1JMz&^B&=Ghz?&pdQSA2fjP5$DT{Yv>De<($T|03~6cI<6I5 z*EvTb2-avfjLlp{f_ried=BIF-|e-Cit9Lp4cG?9?MG7lfhqV5_LhBQ%=_Rz?ZI-$3FEfkj42qS z5D(_Y%ekHF7*fIgImZ&XZ{IgHF%9O?9xRJ`h=OJ?pZ;xJp5cT@jwkT9(I|r-&xRz`Giz_$*_r4O>;Je~B{N32^BO2_J zVAO?Ui@^FAzd5RpZdikfFh|zHoZ3gm-X317Q5BnDJoe02ScQS`au4Ry?{^^$%$>1* z7q~mtm=oYVjLDq42iG({=Qo$;)!Nt(Td*EI(FkW@+^(Ml#@`ywyBXFmGOi#t9Jd`E za2?fQULL@G8mFvxUlfG3_1tyEV2pr!D-7#l%)MdX4#7bjg8Q<+v*862!SSwl1S??< z%;zG^fq6>{<8+?Mc!x@e=qsY%fA&R1EX1G4gbTO>zwZyvusO?4Rf_0&G0RXpcU#N z4T8}a)_XiAz_#-CLv=*- z6%@e%_|EHv?l31iVc&Yrhu|@0BQN4(DeT2tID$fO%oLPGL->4Yv_V{a4RbyhCGZk| zAT{1%9enlxtb0`0OLwsvV{i)g+a>ge?~mSShu>iT_`Ch){Kv5ZtuP(?aROe(TL$)# zeUcv;Q59!U824a5dIo}!6X_8Pj&+Q2`*|pm;{|-?UzjiZzW@^84JsfX?B@(YR8T)zrh zVJ@ucDEPcNUV?w&Gp_CUc*qXxxfFNc*>4Qr6*F)W*5f%wU>Z{4KFm`G_-q?k_xYFz zYio{L!m-YG1W#eTBB3@i!`~dc8SdRRC*UJ;A_ZL2XZ`&w+!@PWl zC&&it+bw>6 zQ4SgK7S_c5o3lkY2-mL)bF?2b@dYxX8CqaE%3~JXdm$u7L|;J>9Dw;c1_lIL zq6zHZ(dYwfV7?681|kuDS?If9sl)a0X}cQee^<9Y=`x^ z32W}}>G2TumGi}d=gQ}fz;)*02iVj0Mr6#!T~ve5?8PpWLn*lb`mirNpY}=`{EWJA zALHPj?Me505v5TEonRiG!G2l;^Opy05fANA1S?PrgYgYA!u>BrfBcFB@EP-c4n0r- zSCJMze**S%2#hg4dZQr5U^Onmd85O*Um^(EU`|6&49@GBv@Y6*P@IH$u7f23$+^3a zN*D?MwoVOU?2(ZbrSK0X-~fzaJM5Vu?=DKQ-{#n#_PTSP$26 z-TRma=YEJV{DCCM4)<~hMd3Tmnq7eN^h9H%!7PL$KkUmyNC)#A6OJDZduKDOiG7wD zH!vGN!Flq)*v*m8mBM61gEh#67jT?;bB^eosEBSbKkh3Z{NBCUSN4!=wns!?_GCm}<}oUM$6_?YExd+3ZEnrKeRc%d;4}7! zJX>M74ExU7<-!kGjbj)9Yilj7!!&q)JwF}s9}S`tFTaO# z8)Ia6CTC(U=ELt}!?R(Z*Tr+3Kp1KxF~-9+jG++5q7w$fvziLO!~RME`^I-eZ-l{q zHuikDjz8d;--kgkkL_>;_hC-lpJV=n&oqZUz5(v-2GYTIL}R%2d4!-S%%?dVh?g)g z_ID|G7Cj@b;~3A)Jvh&9T*GU`z$;j9YdRQFFa{x*fy(fmv;|Wz6z12we#9v}hkNv_ z8AC_pM>UM17rL=@VO~>=d?DizXX$z8Kck&L$D3m z5C-e}2!CTZk|7=nU^U|60n(x;+<#ZhL<{(QIn>1+nCDHfMg>s;&bJ=ZVW0JYd+LS( z*a-7yU%969hQaz;7xz~W{(b~sAWPtGEQwJA$FU5DFbo*7hDg!Tl#dN4R(Q zW{#@C^STJ`(X}J`3X0$Wp0NHo*t?#oSg^j<`5NBB{NF}GT!6LTi_VCFvanW5VUF$Z zT-br$urI!aeVrT=;rZzV$CQC}w;$HQGvw#*@FOl^4y>zZ%0Brl@b^~k=V2e8!AUs( zNIXFS9K`|{%XYZNXxNMWVQ<_*eHf#C(*gFBdpAb=*!9g*J{VtKINu4(LMGe~D9zoN zQ{ZPf-ueH6xt)gmaIanP58A;V_bi)RdwVl>p)IVFd3uWk@Eu_ttWyXYVJ!ZJ@7;U2 z1Lyb}&UXbq;~BDc=ISLv;d9RAKDWS{M}zqu2(PmkkEEytbMOAVr-$#Gq3~Ta7#ZPr z?%O_ajD7SyqQZC_=e}d(HQcZ74{PFDTjAc0!&t*$jP_v+l!LJj!>?$K&oLLSZC&gQ z=l9(d2A}DUR2YNmaEx^*g!u5eDKOsmNFKP~2tVB<$Igu!bX$8=s*&eD*4`V*||FG`RnIuz&qr6rphM z2~Z0uZ~_1AHJH!2V6Ps-Cai?Ne~(}sg>f2tQ(VJEd_-QD59=9*PpAXyc?$Nhb@SPD zXoz+ZD2TNq@>u=mEIJ?xiI#6@LffqnK2hhYERh5cp^Mny7|h2O1&^V;JjV4rzaLk`5kAJ~r? z2th`Sguge!OW0?bF%$D)KRrf$Y=qXYyLKO}#buNZ@VA)g0k1jmy*2>W$9pLp#U%8CXP`UWuQB=_hy?2z zlrmDWlQdyrK2JUOG5jH)r(XIP9-qT~^YX{=!rp)X7#_@h>Xnb-)n(@KxMja6ijEZN^XGFSv(rPZpW++zWI6 zU#3st4Y}vx9>To`_f_0Ca6ijEerBHIUXy$LDC{!}&v6gq9?bnkR-WUYC;O-HQ0{4S zdbBtGQJPvMoghvgwY?(y@n4)?R%FLO_u|5Lax$*Dy?h3DbEp*Zt% zk6(iM{hoU$_b~2Z+|Pc^{M=uZB(CUuUz)hM|E~=D;eL~Q2>1A9d5(K2?qS?}aKGvI z#Vr`o6RtTaQO8jQCoxa>`@ zpnA=A8&>Py7V84GwWU^EYOS^|wXK!+`+b%>cM_2H^XdD3-aouBbI*Cs_c`Z0 z=Q+=E@14vLcM4uB-}?9AlTpt&Dam(JM-#4(S$-~RVuu9-b> zcMx~7!f{NkL{8n&8@G|TlN9br%8_#~xN$`rEYC$mot6;5JKOL+UOR!CME_1#{#mIe z#A02DLHSF_*-Yf7ScA#$FM(p^vryBngI?@!JUe%FOQ$-w4{ld#-j_m3r?3?sMS&fwO~) z`1!i<^Hc-}2lf6IxV>559p;?t;60%zy=+em+MSLuINZUVni#iCx=1}EARhxc=i!K+ zCB5{V0o-!nPI7S2vr6irojS<7ARp$IpBJ%n1?1h3bDoxVPEU+GmuBWA%#$$Du7`o! z3EUJ1w=glTgt&zXb6vRK0=E~q;~gCQsUYq&N1tdv=9UA%am|u;f%8aTF+Qh2J{|Ip9Xay9_)6=Fm<_R>7iE};s{_s$_T6F5u}&N{u2AA=?;7BW zft%p;^Vx}UR;I{T${&Wj26N-ADET-?{wCy`Fh{=c*kOCF5f0OjFR34MB*&xUhm%L1 z1q6ndkV0@y$sZ zJndZpc{k+TpE-IwKaUtgv1mVV#V34sc&vjf#Cjw3Q2r+5HINT={#qs{^f4BJMZ7pt(W8K3gGu)Pjmax@MjAAT)D#0 zi64$1BJe{5eu%&i5%?hjKSbb%2>cL%A0qHW1b&FX|BoUtMEbHfGukI!;_$c)*JwO6 z|7Z{XMaB1(FvI2pFdX6<5$Evrf)&pWNbNg+m?+>g5gxn*@7qtM3l; zmcQ}2as;;s#Bo5~C&neSI9A5Uvnn*__r9Zp`-)~Cx_CZI zd3OrmKvyMCRLqBsq-{-o~O>| zsB@J%x2p3c>U^C#->J@zsPhZz{I)uOpw3^bbDT=ZRCOMu&ePO+o;shS&Q~s`IEG<5l-ZcbUv*&G$T?(b$$#7ibOCWuIC( zY3kIx8QG!Q>V~p#OE8ez8VJ@_)mD^+Ya1J~t6CZ=NQ5Rtsc38pG>oYaghFN2fpJdN z@!6todu>B?>54%6*!cLlHGzs1+2NW%M1OWwV=%k5tgg1QHrzfTdr<&J8$tr36lT{} zNx=B*U|DS_5Q+~-Cnp4gK~#pgp)nloP?UFgK8V(6|Hcd35uc<*VX&gH0qqQ+;cRPT z6$*oaKz37EFdRBTASO@BDQ^!4vKP4;0Owl*xAD;E#-B!6^PLodWv&tC=jSbz)q^D2sDL5*=3Gu3UruI9Byf<3((auh;GS9&|Mr9k^Lr$XvY*7B@qd3U*#XDInZweIQq zhLZO%*NZ)E?%Wfy$DDT7lCjxSCrp|!QQS!M2$);~wBi?>6((8-Z&Z4W%rs}oQ1K8C z|DvISllc1`{G8zuBpwDH6@Pk$*1vqD1lcDa0gNK4{|1Ch`rlM7ai{1Nv`00GziW)f z?`@WVtNQl(1v#-*7lr4?mm{q+1;egxaXnEb1U9rCO(LgKDU585c}06 zoa{ol9Q$=SiVgdhNSWAw-9*x!#`14T`|Kc^9{X?7>Gn>%$9_jT+rAba#~vm<#>;04 zv4%-zqIVvgi8Tc?-J63Z#CilX$2%4I9vdT=`QD4+er&8@3cPhx94DA!?{Gvk);2GM zhs(X^u}-|%1g6HDOPhU7I^5))Pse|pZ}Fq^!eQ^81cHOjiYx3CEl%qzj%;Jt%Pu3);o_mY_?n4R7S z$>a%UxA$=}lgu2{dCZ%IevF+WeB0~2jH;)Ze1QAB5q}rsd{du(;L>Be*+}~ZdUY#F z!~Qwd+{WhGHXlXC-dT=fANq3!%HnMHVL~_F=yu;rK(Lg(DN48LQa?&>1C!VdfM+h= zVydpjI}+(S^cGU)_wdp4HQwU>?=Ap6e?`>d_Ur{|#l#%u$&Psv&_0|@>^{ePd78?C z8Rbx9*vF$c`kX*I2FBh;jU;0F+yO;C`@$K-e+b&Hqf*0O47^?c3Zh{f{r2P3W!mSO zE2;v zmYZXtbdL9r$R@kYJP*u#?*K&FE;rk7fl%PxLfOAPUErzR@&ry;%ruqUk%CT5d2$ftzmnVrlP2^J#S29MjjydN+wy&NqEN zayJX>1*Y#Ia@zz~Z2D%fuWk|Ca?|%3b#512jp=Kry&ZyUGJS`k+U^!PvJ!r>-tFd@ zh+wDbOQ7t>Lbl070yAGd|6*$!l0o)Qx0{%cw#d!qeN0XJ3yOnGMxWss&me?;13{ck zv1bzt?&OD};_M-KAHdk<;N>S_c+R2yyQYJVvrojkkTZLPT!CI;Q+J*(KouIqu8)8e z@1jc+R3b_uM7qyC|pM9MP&ipG@2Lz9{5Br<`8?0WWEZXv5RR zX7Bp50R4?18cBfV2uP{BkCA^M6{0wMFx~}ns)M9*iR|-F#G$}`=mp4<%Uh}p`{660 z)JSeDztFIMLZF>o7ici-M?|FU7RK#S(&@I4K29gH?c10?dq|J4XR)iE5X+H0mOc9< z)lam$A+?_(J>6c6cl&A5bL{76bL( z4dN6aK_W<=f3sKrv{ryNXwWtXByuf>)88b5o@l>;X50TDJ>9;PDf>^-bL?tn!M~WV z^X(Pz%Knz+1$GDP{yPVBvCWU=+24^~Zckvp9VT7l^-_;vl4<^2nd#ECjLcz;Qzk6^mJe9CUyg4yZi?G@WAnBCq*Y?Q zQSvTeA7z>x`i8fYh#{ha?Mo}BqQWqK%XWH29-xrG{xjr zHr;1WGaqE-Ei63V+zE)i+}pxdXJ)uKi{Yfv9|5s@0on?F$be3Nr;8bAAMFEF_Z zrIpaj)6IK?irrN4DoVI@P!->102Z3B3c%|G&@1jBqz!U@(TubL_V*c5@5Kz|nUcPT z^jRhsthBv!`E2QjN!10Cew;oPnv)V4j_G(8X1FKtbNd&uWu&d3%ZtqO1++p#xnYw~ z&eTxC6p6DryT#cY193Jp4%=hA_n8Vvws#D-J9YxzykohSH0;;lqxZN;*z4H#*#LUS zv1vX#mvoNUZ$ibjsFBfx)Qz(bfb8=g8Zst6fmUj=#W#gt1*H_SuD~nQ$D#kZv!+d%`$&QnoRWr#PE+J=Bvh{w%6|52e0@ z=`T>_rBtal?`NQa2`A7q+vsx{Ks5>TX0t6<5p>#hz@savXFdZn#(o5tgaxE0+HL61 zgwyH6bo+7Q7tRNMj@^P>NH~LP=G!|+pUKs$z)oS^v$!Y}+uVpJoXtJoa=3dd>UsFl z-FUkI)#B`P@Gi20!IX(XCk@XE_!94J5unusm80|;2P868Ami+tP?|U!;y9b{gEG(y z<_aS8Wx4n;HbmyO?YMI=c&-CLnK9T0q{Pnvo4E&}GV5u|+Pr&R=9TC`v!0E&Hb1s5 za~7oylzKML$>hhc%mxZQo97Ry4^lVD3!E#7ug34a0?ig)I#a|Ws@Ygox66*`IXsB7_wK}f{3dc~h(9=3+sJY0~ zI)A8H>}f3+YL<9fi;ZD7soGbecJ|YXxe?6kQk5r!%0-^mT)a; z0cvC)RGLd%9w%W06b8LYIzatz)Zl zz{sw0iMzBo+c6PT;(gF+4Ei13%u^WCPF4OT1BJufHVMx{WXB6YGomazUa%Eg+QRS9bjAbdt9v zvR_i3vZuT7JjUJAw>tfBw4Q!n)fPP+lP%h6yaQEFNL8+$&QMbJ^g$`@-P0o!h`scU zqr%8ON0qau(-29Kh?|^ls8?e4^sumEWS=OaF5Al$F zB3N#k&F5OXr)Zf!o;ky%c5itMqEIIsB>7>Q88SD&gwH%y3QC*gA?d1ueIChC5{%3N;{jQ zat-KWk3Gj2HbPYs5lNU09c+QPo0DY-zEbnEjS&lxLgs^v*hE$D1!)yoyeqK7&h-X_pLj;~U*%#eZcP*&7w=pFq7UQLltUP-Jn3RX;=$jNDgz+ z)vr5kMC;}l+4s4ilcS*r-Oy|!d#?+6K{WI;H*}6M@^=dAg99S8k2-)Nb0V@~AUiml zZTDJsV3-I?LN`u#K(UDD14iaDj<=E5pu}tGJM{wevIcE4GT)@8{Ko}I->w&+zi7}# zBlB4f`O4o3kiKW1z?IvNFbZ>)k$E9!f?F(*TAvmf9OJ~g8f*cRNugHnfnP@#(yYrvC z-qBxS_rO=;@X?1+!dKGl(f@)nzWOd2{d;)KSKklDv_lbJqaQ^O`RZqn&H#$9ehH&9 zOptgbt{S}x75GZb9sOHqaSie(Ep_ z5YJ@J3sw_~dy36%EVWib+y(+GU%MuYVoqVT*h^V0R^BR)vp0h2BX6zSYe~x+>&6Z6 zJjS>>;!wA8sOMvi?Q}@qA+=W1z`!x6+7o4NV`)rd3l{Eyxgd+s@R-^LaQx$whrq?Y zC~D`xiEMOkT#aE4J`FN!bLZMVVahTu5bWNycCBF!p~56Y$U6s1ubIVq3lZb^`-g+V z@gvCJ#W_JNxCvOZJ3tqD(8~DW2muk8Z(W!|E8Z}T_{Ae5;(<`3Rbc#f8hy`K(k+k} zi87E$HgDK_WFV7l?wR_r#K*UD@#@dF*^Crbi(=l86vc;)xY}03Ok9rwueDdL4E@;n z0au~ne3@Rq&Nt(9v^vJvikPWn`3wLiOV3A0m4JA-F*mL0u&Qok$NZipe)9qp#UfMu zf@lGeL*L@;N@8VG`V7GcGh->x%8}jTFt^cB8wz4cm`hM|I|whU#|xhqY6)y2d zYy@s&X~NIx!~&ZB03qqiR9HYg^SBolsjz@PKMpoAolY#AhH8HWn^b0)XV9b9vCT~? zH_S81oQ@4nQiWljMP_>%m`cMuo6MJZ0-qEx%mOkmj|Ed@n1y6o^T1%+xrodw$AdxA zEhdwFBAB8)!z?2A;5>4Z&_;6Ko{i4M*~p1Oaoa@0EMxJ%TgjnHIl1dF{F2&GrGnfU zE6Gj9gh=jP!A&vD0J(bHK~HL%YM52z{wTO?kBXq;RL6><-Dop9Zq29YP9+8p%E+S$Nb$_SSXC z*V0j-aICBt{+8bl|?sdtgxMdR!-(xNr|3=uUH{J04%q8RB);Y2{hVLzxY?E6y z-|)TXlHK8!6&SwHT(Z4xS+U{!)+OU&;MiM^TTi1!phcv=am#89U#d%%jO26bH5mhj zxnxc`63x#o(=!~CIIs^e%tPm*YH|_C+D;G)LHyz(ya}W~01C%)yvz?S(#W{=3b}Oy z1Oi#1kljRnc&|pj4uH^k!NnT+utFXr@}svjavJ~wIrc{y8CwMkM>eKD^W(!BSq*?d zez^%kVf`G1Tu$Vl#>N7`7EQwHCXmlurjd6kWH*tYEY`?>10ax>U7?ZRDC9vR|GZWs zUjjfNt*e0a?HY-992^SQ0)t>YMXc3o zl7rcnt4Yrf7`%frk9x#j0E{PwsTCtP46&V7MhV!sR>K-Z0w$*5trjmV*Zdx?7-`Vr zHMx~J4`s1)tSrWqm4=c~m9~QrmHHXh3SOWL=d)&S9X1aMRBFD7w|FcAf{VuzAczF4 zMdDtD(YFh=%w*ky8v*BGZo|_nBQXdOiIZS5sR^}HsdN=NFll7|45r@;+*qWOJ^pIQ zu<6KP43~oOaUXLPbl^rt|GQE6Xf@t&_%L0WS*su!@GRK(fF1A{i1aQbE``ajfV&YK zQpQ`u6XJj_FjDO%y!Y9Ts(k67$#uBzJo+-$rz(g)+y>g*_FJfAgT}H!i#CHn5XQ3f zc`)>OJbfOu1I3Cy8x(V)&}p#z7qT2Td6poiN}yu;9%O^*lE_B3Pz3fQgxe_< zcm8Q78&^q;^D>`Of^||H=C+G56*JDu80W#)$v7`#=@KxE^979aRbZGKrCpHVx_WI! zn^N^-)N4cGKwMU~e!kX7xiZ?(%OM-gju(!0gAGh* z1~6<{gVQoEXFwcmSs|1-Ez42@(FK{wv>VD+Whcox$lTkM-4N}*5AAF~h^4XtEsSU8 z4R}kn`DGk~MJrCCwlz&k+b~BPA7?QbYe>jotohQ>)j?e+NnMO_s{K!Dr?Q!xkBdaAk-^%;(h#Qv7bvc6)rTt`I}VQy;v^}$_8L}NMkSfAQpqg^N+n7@IcTRW(69PY!*;Vsy+!Dip8G;^528k>91ZdEqjvDO7^ z4E{Z|CWC*E@VtngOR|u2>wOGQHxwr!lXj8$6B&4LKRrmi8E=_OXywt*m6eZC#KYV+ z66+P)^eC<50m?Q#N;?;lfhEDb42BvXr^Yecz+m1JOe+}KK*9b3EHiqK%;>#qQy#4s zrbI@kyrUSI#UuN}h1U+MGOYLFFt_nz^>perI+b;cbm}!a)jd? zw(_y}isg=DvBF3XA7viDv0Rmj zuY7<7b6W;3nbdx+Ym}hkAZ~lApHvUu=iY@*^YUcBnHZ z<0`m-?NE%`Ly65&Oy+8odJRu(zn@dQ(unOZq7~O}#10_U8NbSi^@}nyK5WD$i$c?| zI&l9jHia!kbu+Cmk`&d=3|V~@HZig~@^p|>-4qjJC#sJ_+s(`atlnvwS#wnNp|UzE z4U^TOXt=CySaZ=Hb3_`TUc)p;)v7vUoH}Dg9mASV<;U@kZ9)V4^v3H!ntbbvsp3Y? z(;VS9bA3S?ijRNlY?L>gl1`wlP3D`dgG|{Zh$axfR$4Yu9d69gi+=V&y!%KdyvmAK z34;^R{WN$L4fe$y1VJRWps0(0#1@oo78JbMA_%m6D`zhwshx`EnrBi8JeVs8crbS{ zN??*5*3ag{Wgmmzzk&E!I+xTOhh*=I^>zRh%p&nJav1$;&Z3fW-Pk$)vl)7DWFkBH zY$RDs2g#TYLZAzAYs>KW4@NM*ml$hJrwNc1>hutW?M%ve7YbV9uxT1jX%VQR;e;1T z3V!sn+2j4^B8kl96II1-6!9>(J%&|^J})P6d$;s?`O{!NC({fGnGLsMF2QEKoc)f+ zf~!&yTFtCZMRYZ@CKdN=G_y7p^QC4w{I@9cV-SI-;2R^*f%l@A)}5>3*t!s6u*Ze^ zqe88dsNE8U8zcUgTruc_)N^zRjfI|dz`Uz4lVOiHdWcy|O)}!bs^R~e3$<9Gggs2w z@F}JhR`F;SuJK^+@$a4@x~LHkQTBEi{vj?9KgmLnP=(&Jh+5NW8UBeb)XQ#^z!qY@ zI~85e!+(|w`h|irTZ;NHVzR5D)MNOUDJ0vPh90A7fmGQAvAO}P-y=61#Cnz_*vCY0Ur1)h>XYhb_d${kwTu2c-{n@GxYkbHClIf z#0^6*Vp|{UxF*IJ)TdRpf>Z9a=_Du=dgYBPnlPpFL&p^e;7YBPnl zDL+=)E(M{q>9#DVwmWxgZ62ZRIp8uW5!%QJZ62ZR1FbFbPDfk3ip*}I?K|Dl4x#Nd z;4&!@+Q^BPb_i`3f)y>@BDJyi(v>zNhQ4jYK2P-C<3L1u?+`fud{>X~ix9uB_o`jk z|D^Xceh$63>sQb&;%&(`-Gw$$BHIK`M97kD0xR0I8H9@Wh01!D(3YK`a$&8|_A0ee zBD9ec5n3y>{Y7g_-6^y&LMxTFgF@S#&?X|34@4wFwcz|aU9DaP-1kN3MHluziIB!; z$09;IztnA7BijV)nUu&jffH?7BijU4L@4!cr%k_9*12$*J1^|3+H@%pkv8oH=l{ax zCqGZ>v?+Eiz5<#fmebdu#5U^2FDndp4Ocv51q6c?*EV71ngZDpi>u{3p>ZI;$#Hm3 z*^nzyI^Z56{aVHe|9RzuT#h+Q=ekUP>Gn`=OOjfj>A1$Ur;o6wkFsZvG}4wRyqF(H z+{$a+JvlJ5T$P`JvCIRHaUtX4xrZCwtKkb5&pj-SxfhpF_`6KQe3ERCFd4%jp1)~0ZiOEauQdmQM))7YNQ*b_GfhEYK7^5skuEchW{3qhUy+tHkCV; zdtI2zkAR_-$6c5Qj)M7x3-hMJh>ZlAyi&*Y9R*@s{}!c)`?C*SDpGx}u(%rhH-)(r z8m2^H*qGS%NWU$Ll2X54N!c&0QBwA;-0R>*nK-EbnkWqQ=PKznklv@H)O@xo=PdAw zQ!aPO$Oo6*&!iN|Cbr{p&(6)-Z7$G23}fbm+=vMb(p+Cqq}`{WY8N#DXdj3A=rfZs zIzQg6#E;09V6vEtVamzkV-mYDA#k$znAe2;?&Iu$>*-ILfW{56HK}1-RBLW+>MGpO z(OidLe+&N(wDTdilf1Gu{7I{%Nd|bo63c-s1I$p3c3}=Hj2K#x>t3;0pQLd2KvyQ* zW;`Mj>j_HAj+m;X=%BPJRjxwAsEl|La`OvLqfJVBrV|-{#E`-Gg28A;Fh+{cw0=eO zQ(*mDRF$8=&~{}AV|>s~Rn9ztYiPf|z{kyquQ*HYGG7OtHzRg&jy6UYqP6C?dsRj8 zU85*Aw^e4!-uafW3A^YJZm@kzkN!oDx7)rYXzBfwO;WPE$?@)+kbMPi6u;fyLE-$5`|%{#FGpobmv^x}T8{e@cpi-CTNO z%+Jw`j2BV90|0*>Z+_v}ix%&MgdM`f%~$1|^q-aG);g@K{sk_}2p@XoX&8*WkP z$=Yzd$xjoC0Dq#eaIFY@huoY+vuPeQC7qG1~USN47og$hKeTX}h19o37e@IZO)MGarNP-@0ta z0^(}-cy0S1N4A}NWZQkP1n|g}C&Toa-P(2=ObXkhABXL1g=9~!mbP6zJxxg?^T*^P z8$3k;$7zH8&3+lGu@$f*4Ep!L;5jaXpGt$6mZs=-HukW%uJd@uBDZOBy1GcAhH7&I z%nohtY*-QI4%1wh%iJ;jx@1v(4oLRWL_CsZ5>@!hgR)vG-;Gg4U_M z5{CMq=ap3To_lHNIsgA&L7jk(FrU^Y>$S<-L8QI!GCAm9(H8V(POLBDPOjl^QOm*0 zSjpa1E{g37{{+VH>xE|kR=$`)vITN?BW{*SpRrj&dw38wrZ-G%;Ptdl%-L>?Zn)g% z>lv7^)@Ybq#w1Knmh}4?Hw?qW@asv9N*;@n%6ojo=*fo~-f?4MTe)@_emz4_3BUfE zy(iip+p0aG9zB=|8{vkb)!eNUqsJdH^P?~>e^dez)9A)T;zLzB*$KNY8W!c1>iSoQ zV<)6wVv8HwdV^S*vBB6Rni+Ow%SR9QC!gRy?$TcffbS}heSVHNZVlDvz4~zLe6{8~jHKUV z(_=0$lZVJX?E>+D4Ud)<@DX1k9xE;66+|Mon34!%*QzM&Q6+^N_maR(dr4q_DhYfP zSQ6Emz(;^(3GS&&${7DBoful>sD`p27m90}Q)$5|-wk-i z%^>_wV|PjEgk9iJuX(~Fk_b+CMiQY3FH0gk;SEW&O!%!NS|=QmMB9YFNMhxLze@t& zAcY7+;i?HN2J)>~E{jpTyb6Pm!pY)|X^V*cS1S)xW^y*@YVQSWi@`%i%{YpF)Zn za;kg8rSW);JJ1`q&*fy9#_d!%5g;@N`VIe|T*%8cQX~qV<%%Ey2@8flW`iLPcHB=i zQnUg{;e*mSXow4md0hn%C~X;fp~`MjfGh>l4YPTPgLf`vQ61n&2=5tm#gNRI*)e*KMSPbL1ZKjGu$-?hn47@OB3p7B_}#OI9-ulPht z+!Pd=&nk)F;<17Vf}d^{pIJ5fR%7`}63{;@sNe_{1fjm!5E>Sb(=3B8i%>97_cNPX+uevOZs7z8cthhK8=Y23)l$Wy19u=Fn~K)f_)k+{@$L+ z*#8Z5nR)td70VVB@i4dX+X|@nbg4H1s>EOM^uuF-GihM-Gw3)*u+Z`>we&;@Eq(X0LfW%< zJEXMW@tZiz0{=T8K26?-_maKP&=ZmR9wcia!O@>tZ7!X-QE=in21%LQ_~*I|#!?xK zBy`wPmYGEi@iK<^-+S@qv-xQs_2H-V2SN8|#+&8Kl}ho`J3{4D3ws2pEGMx5zGpyb z`Ajf7!BDOEz9~(6(V1Fp!VQyQ1=Yll9g64C6iVs}Dy!aS#KX@pJDwGaHZZ&8>MAzqC zB2_qD-}Ejth)y#^r(KF_DLql5(|!tx>iPnu1w&jMW{}^L=hBk#9JKUA2`z1qVCL5r z7?YW(t7CGJ(kx;!gtL>`%FgOi<*zv*_(X-El_B^HS~C!I!HjuccE?IqECVx;d)9Up zhqVL4lMHi}j>;hKCORa0=V!_jY~%;^ca`&xH?)V#RJri5Kj&I={c=^7CsmoxM%D^PDo_&3>a#MWV`Drj&;_o3Yo&qKKgz6B^$L3{3b}RM!9rZh*p8bCr zb-stX?CC3Hn+|piI`>kEWOQ!=6uw*dLvkatO$ns)Tpi5KovLiCqx~SYBab%I`9VLE z(cH>t7J)JPbM`lH*sbafb?WhZURdu2)?2CS-NbtTC+cwtGw(X6>N)pTw(+(t>)pkA zZvo1t-NmLQy#NONDwxy2aG2dqjh$e!6E2gsIrFlO`~wA}KNn5&exH(cO9=XeUqeST z_tVTB)C@EC(@flp(u`ngz|hQYnt25bWBY$4V6c1d7<#YE=GSo%yP3O{K*pBqvAJEB zbvjP$P)=;86MvyyII*2hocWS8)vZ&LPTWc-E`el_=W?lool_4b%tz*_Pn9!>o1OB( z;*RQIc{!aqQDKB@!#rHFrkcM3I3D4cN>38d#^afJoJgkAXo6UM1Zni=mc)Eqs}Vns z1&F!r7F&k&aq74SZn8Cxvo(JtgPs%2$uG;0K0(E|lEFvD1rzg%Z1j`VgxPZK00t=j z3gB>@CQpX>;?)o*-wV+)R7mM5Pkk7YYavN}3dDOP_|0l@ud;q-v(hG7FaG*t7qp38 z--$+l?xDzbTRH=uUbB~Lli>fNkW7QrPkFkf^`Stl?=@;Q8qF=&Cy%&l%SxV&wNGX|aOQt~q;mram{ zI^b%#OUEDGIz&V2T~af_-At_PgI2nvSxOpd<_4EEzgOL>T+;J;)!n9~4AD9zmCcu) zGswGK8g7ZwunyA4T+%0cS%1zYeJ83eJLnfK>EC}wP<~=OT$~e zG@Rv{D8Q&!r(T$*qC;_NhxcPDvwe|CdX;AgV5V zJnrg9K2-OrJ5WjK8^3DI?YW*V#Yn40TK%G}8-p%X zQu;PpNp-7rzO}eC@T=S0{u_fjUDApk8jL}gyQCNOs(YPF%0Ekt9x1oFq)+v#`>2xA z^FvaKyGAW>%m~rvat_UWSK-8}I-Iu@%mL;>Rd^+Ktvt+a6pz4KH^Ag#xAGU@u#OHe zx!U~<+(@o=$>Phh@!^HmkzDRlG}|^(taquJUpY!Fcxjq%A0<}2bj`0CC6>Gl&95FM z*1Un5Uo%Q9dV@5-b|khvgEiMN8tYu9xP3h0m&m?pU9u6JjSl(T{ll!E?~+<*eRPZ$ zyQD*-q|ECwC1s3HRnng8Ppb=ap~A?KMb z+oj=^C=Hw(?sZ8&ijuM!uetOmV$;nKxE3S0*lrBk?@}>Nf#gV_i=Vip3!rjGMczj{k8k4J&$Jj6qXf(o1_uPjl&i zKuR%}wTiinIVjfs%zA|qGnw7t>=wb| z17gPDU*XCfb7?%8C1Co!twKBkG4+Ydq(we)nN*DHb$o5Ta|^D&y&B8_Q6m2a(a*2@ zGLv)Q=0T<9Pmar%VVx0PrO@MtVJa~MA5+Qs-+)u%OZd{r2EiuZKpzKQh5GzEx#V-m z4*I!dSJUOewDUTkDPy)uD|C4n%=pA^6V785gzxwsJtL)1VRC_)qcH2(K<-ylmMQ58 zkn$4^)X*WM7&j@^N_r}!Ym{`YkYaU7eMLzNAT|8F^J-3@Yx`6=zeAdbxh)Cz14K9F zvX!U4se2-qf;+$_q6;Uo3;#;lRyKJeJ7)GI?i)XExZIi*% z?8>z20G9QsDtPT_V5TL*Bm%oO4fT$f%)YhBchThZ5kmI)T1I&W?T>pK7HjJ9T=)bj z+ue~YvTvq>cBJH7BU{NnxDy)&Mo`8m(g%;ZFt16B7~`TBve{GXmK!OrC{zhF{aI-e zeIc+$${&@SebqNcSaP(WKT@n~r6=sIQ7*ZWa6OaW2Ep4I(X2Z)vA`r9ILf+osm+i#7wLkqm*J%NC_)xC8STgD`P_Du)+tkjg<8Y z)Ckafy+ORCs~r8qWRf| zpI`GQzo2WM(tSH>@G!R}Ll2X&f{FSZdS51E1rv1`?rn-Ju4FP^1WwK;m9%=0Le4z_ zAw;B^MB*#Een}=6z&1{>Ie2!E#{0XqD*HN;Z(;UbDARObYw~Swc2)A(WLL4+{aqB{~fcUc+%l%6Ur8RN!||LoOr?D3fiGNZw*GIjmM# zCID~U3G7SZIAE2wYZZbyutOtYM`+7NN-KF62A;@D(#MgCFPMD|ToG zmvz2k2MyGZE;D#Z!dJ}L41N~{UokT>_*ENx#Z1jO2`9c{ax(b0GJM5mWbkv=e8pyE zOomCm`otP$e{*D#Kfy{Gl+@oG(1%~oi5r|&(g_JO6BhU}AB`{)5>A91>J*8cVr)N@ z!fl?2>f<-9q=W=kGQrx3j)v)gc@-PGz)v<_>&IPE(_G+ZjlM&T1%7^cH@-QF3X_f( zr+z|jEcC__<#Q=Q*7x_D5t{>2%+VQAnLk!kP6pbR?V#Bd0;nYw)u>Xqnx>qAZ+LQ~ z6bfS*el%hhKDjqA#h_+v)+V5ptVov8nv;J0$wKAeSf{}rL@;wogiXh@D6q;AO0!&2 zu-TWcH_;1uA`U|@!?!GY(l?)sYQrGF*LUT?GM;cc8Xy@l!JM6O6jw%Sb(m=*hGu85 zC8H-8BpQ-rUBeg?DKLz&5rzRrejX_~zQ;6vTFdQaOvF4gCXzgntcm27ExY}Q79%L@ zdyL1pj3cVC(LI7k)Na>s8(XKi2l2#nYe$N@UY8VXypAZ7C&AT0eIp%|7-=59fl3aF zezHwIo#Xv=jt__mG*Z(aDL6{h^^jp2x`$*WBeO+1v)L-u+`}^h>yfy21kXTtrOWZ( z6X%5J!1i@1K&%p@Lx;!x+8t(oKYUYZni$CLkoI?lbbuBhD49t8MW|4mP$JrfH7UqF>#LI>GPz-%i*-*canX}VA8o|eM!blvh&U$&*$Mbf z5#4GeOe6%D<{}*tHGd>VYOsMl!xkM>UqmDOtbflq4bXA&Gu+s=PdP%|bdsjAHhs}i zPEWw7AAl7qLw5x`Y@n-;21&uSCuhhxYp_h3On1^mwQh*3b#OS#4eXJRoD4DN3ElXk zVFwvIIwMNuFr^aHHe)c{#iGR{RD>u6JMx&rE0RV3qoi;zG4ZYx!mOGQ$pM`(u0bS+ zkTZmnB4C+Y{dC9qWYqgd08s%#66hRc2>kBOOm=l9#}YbR1w1u6wbEp7a0N>5nPvDT z200oBlEC7qE#Lx{C4;fQ$dn0)^-n^v@<~CqKw#z@DoQ{M>zRkcbsm22V(0P~t#mDT z%!<)Y`e4P5iq05Uf&e(y#X`+-uC|J;z&Mw4qF}tExo2YLL}%gz(R7%|74rqUe75iTRUVBf)rT{be`>VFX-ceE??l9lf9hm2D`>b zW@HEqal6@L-g58hpib@S^fcxgW7Iz^5-vHYhdY{krV}ongx7i|b0sRw=zLeR;Aoc= zO^n*y{eQC-{%6x(R2OsZQ{#aX)iG8=S0r&c;OeLTI++H@?)Ez&?Y&#ag6Nn;6x9?c z7qiqzf8&#nPR0O+Z(7y0!%!~P zH&(XP1t#FH6!5dy;mXQD)r1;Q zD(ftJfZcV{>mS=E+MDfntZxj@Sls=!{Z?|9mGJ(;Lv}v?&6$6q)zEd|gl@|>!8-SJ zYwQG%g-p2su6m5=OqcJ=o#^h8KbkVGz3KN{g4bGE9Fm|Cm|0*iJe==2 zpA_mXsP;5U2%P~ul>gU!Po03ozh2IQeF+Ud|~Ie zSP5b4a7Wns%m`bX*WG9RWZm2LE;)>>_F1c~_Npi@{J-ViFt=mBYMKEt==h1w|7Ort zOqx11Z-yLhYGQ7|&!JDAgsGVei;@e5vzPvIbbKF(T~(4J=7xCB2;3cuv1SUcd4f3g zm#4;PZoQBt&Z_6Rn{W1l8vAL_$N@r-;2Cb>hptJ-ZSde@514`CI01R$>=$`@@k0B4 zN)LFZr?`c0QFt#Ub~@kl`IgJKa~)Q=w~nl};BP$b5cIQ#nBjh9_3helD3pJZooZK6 zU|%fW4;|VdsV{B+hgxF$H{e+TKwt6g76Cj@kW2A1PAK~F3ralpi6t9DC9?&6%hHm^ zCcg7Eje06-^E#G2Ab@?kLG*CNF*@INQq}`ihX~urt*W5{hnN*Q ze#(d1L#@G{$)^3`-A`H7v#gA8{<&6F*cy2Hj1R3w&luC{Kg(*wKB3t1o^G{OVGEFN zwa&8Q!+1yS-&y`DtG-xD_#S_{)mmjg0YP$=Rd+-|$}FoK>Y!A@XW9AIko&Ciur*}8 z6}%6-ltY%!lLw!g?K4koX)dX*Y2IA2%O1LaPSfsP_6FM@g5`w=?%%(E|Lo=mcI~$( z?b`qHxs&WC?ze~LT0u_^9uL@6TkTg`nrpTpx9ttPzVJ*)vAa$zc`N^(OK$T_8el!R z?tSYQn7$ueXIEMKjVkNzb>WgHp0M69_8=sCJF2X`<}B-(j$(U<^{i=UgA7aJ=?**F zF0r07?c%o%&CIu+?6}X~Z#{3?d#op<=x3z&NcsiRj{~y7dXeN~9dD!ZOC%qah?hw} zBI#F1{{)Z?)~nEOJ>2p3j+rIaYo@*4dI*q1*3aq7gM$2}Y0t9$uj6g&mu9j3ly!fH z=Ty`Bl^F&#%X-(`j$Ah5IJKH_R%`K=UAuN!cXbr+vVKF&cXZg>ORR&YeV=Dyiv9ja zP4=%pu>ElD<_^y^F}YhGnVuivQVAo%x&descFFg?i_s^?VAQ*;C8ctpx%G$%!FIh{ zZ{dpIcF!oRYcGk5xBqgD(?d&KDIH;r({g8k>A!BXhMIQz>>1b}a4T$ihKVUv_}at* z{4iIHH4bkc%r;hHv6X&$@_g%=b?Z+MGuu5Bi}qPB7-FA&t~Ix~#X51;(uow=TlgMW zy5UuxvAitYZFhZ;-#mM!XA|y6pzH&_-CKjn*baY?AM$K8{WBL@H?1qSKE$soQ)u0| zPR#n&$3_?VbIJYYA z{?Njb9rim*x3PTdp{+c9dC2N(i#7R6`;4sH?5Yp9%$GBx?6yO8$)PN(cDpUC+OOI} zH|)2NIFiTKh)U!FuGgRyEQ;GB-tCKOtiN~I6Y@?#Z0+48O|!=_gjZoJlbbSh=80LC zV3YTLON-SUwtU;0ahYNzzF)Fy*Zx<6_Rt1vWw_)3@^{y+1Jwc19lPvZuU2dn4(_*) zRQkmc0hsZ2Nk#as?Xf9n^7}2=S>jwgo2L8DDyeNQ*|mR5uw>T(I*uXXf2DGxAoHWh z|5OZf_E3?_4`6%cG5C3c|6;$UuKka?UyI7+sLfl{G9s_F>cC8 z_AXA0%*amb+>Kb$i1Ca{q@g&mMT`^L0~=oz*R`SARRP{!;ms2KZwfY67!Bp6wGB;m zWfg(aV4%LNwxJTAAVOg<5DEla1C>TW`cU2; zyeGvbhS3vrB-qI2RMj;$21_ezTM;4yqK2|ydsRzAg;CGoBDyW*xW`r76h`l&_o^xy z=w&JIs-VAI-CPedwM}*HBFyz=!4)uba#`hK+|Mctw*&*H27`!ALHnsID@u!yEt042 zbjYO$sCqQEsokIv;Y%nGs4T6+mzs(s5Jji}r*S{cppf;$!Ipq-NK+73YXZlFnYZAK zQ(^gJ(YFOi&kzH^-%D?4;Qh8hWj5jrv$D4+P#qw8T_9Z!x(%Ti5iAp(P!$XWj8HS8 z)P(M40O4|VxQ6kPS>n#Q(#9q<2=hzTiaa2ZmdlwaY^8>tP11nNuYvGK^RvT!)qC?bz0 zY2JX7Ob8B%bIO8ei<_!Q&8DD6iaV(o%3%XB6NOUknR3{(yJ|7!(AK!HGm}H zq`{)PP)%)B7@ete7jTSG#1_Nc70aHHY6!Gx`M8>-zEoUsmt@QrPAi#a`L zJ9^5{vmPe`okXIiE9x3U7zV75kh8-vFBw&hL3o86uER7~RaaJxFM~wXI3|Q^T18V! zd0lM50Q(XZMWgHCN@ZZBksGdW%5AD`$|-NDt*gvwZk!BI zYmI&y!F3~ChJ>!DSy5V5R$J#}D@R6MV?%Yec*9iS?lqMy^&*m$jR;yKW|bm80%-C{q4tIfUD(EjyIHO|&p3<8cpq~_fE$Xp;=3W)%<)}n!8*y^xyoQkZ#)E!y(B8lXz7R)G2>U!70rCTT} z<f$Pk-H?$35=o0Ag(0BM>({^*aL*9sJyYUj(X4p zPL-haRzM_dIR-(Y}sEYIPJr3ZCWsaSbwld~_@p3-m`))r?ekbWY~)&8xb ztfEG2Rxp#*R){fGUn({(<+aXqE}D|fprh5?EkGZA7Vy&6a2bZMo|N1}^<-}ITrrMB z-B~PUaPal%NN6`SYFni8TrO*fJ=X!5XJSWD+fXI8QgZ881;6XKFcq{fRy$Lj^tisj zE*Vn{cNoqb&Pb?jfEclm%enTnvzBDf&CAOcPg1jUu!L6Burn$Hb=)3`)d*t@8zfqY+U+&S z0V+dbZab&~Yca<=B-}AUnYf~;FO{1r=5lt_@WbANKY%WbV}r^eVQ@x3ehN1jUb)%l zHU>!`H&SZe35hwU-r0?F zZBvh~I0`rUn7G=|sz9)@w6!cKa=D@`h*k1fPS9c9gkyrkjgu!Gqiq}@Dj3*^Rkq-| z8yUh}KBjiMGHuE+^sAU~$9t@%)rBLmhD3ey^JUC=IX)z%Hd0zkYGl zMM@dPdUv!|x`vn7NoY6gYgZy)kJ&-IQNY_MM^hh3l|ZwTT%4=FxB1$MD31aala4+S zsT#-hgA?hhNu_xcCzehmF_q_IInP}gwz#4m;A+L8s-p&8wHtv)tw}V(@G zD4|o*%}&K4OyEt;%#8oW%CjjBkxuDMLd zuU=u$>hU38abVg#wZrBc8xK`=Zo@u_*vDINT8-v*xUG#59 zfY^f@iNA*eh~z@QdX- zY&r^k7#a$6fo`j56lzkpQR~*cZlKn!dEH<~ch6={Qw67}vj@MUu4z5cEu+cYp_y}> z1{Y{@sVWjUoo!t%g_^!TQdg5VyX)!*?u+0w`IdugHg`-?oj*mL>9nTjs4`4lnmk1n z3IB9(*ShNJ=w278tI3<4y0EK!a85dSIzP}+n$$^P;tRZ$3fCq?+w`3!Z&jHJnroT9~=>~pd#vy9fy*J{Rg3O66b7coR1z4aGq z{Vypblvazz=6rpbP4hAYKYsX`(J$sbqbn48N^QsV-fkr2Sk{Rv)7Un-rkG z3(&fgI>u&i+&eJ5n9#&Rds>w`S7nNvohf`Fn$#M9qQdx0{Chu{IiZnSE z-}EU_i!^yDzKB8=6>74d8hG$alQ%j=OEsB=@2Zd$3p9DZQ&gpoPuAP{M;!j)@1V#U2&NvzjBI}XtFg+qlz^7cc*BPCd1hpRjA2t zouZ|h>>R35C7Lvb>7rsyb~;4`nzV;&)Dlf@a*B#H88IbqUB_wEQce2C z>7vD&yxJ)$)@0gvjasJ3%}&u0O(x}NREZ{kDJs-tOs+;1X!260 zXsIUCCTi3&O>T9HiZz*p>UfPhUz3NOqNSQ# zIbEYlH2IxVRIJHOGc;<6Can{6QIRG)oT9~=^r?*w{L`bh9yRH7*}|u2|TP z^jx0ATG4YYvbYvWmm=>qimX#-EIgXLG_vsQ?$Au4T4jU<&0OOwJw=)v;H)!+n%v?P zEz)F)v(6N2@&>19i6&E=b>;$1^6Qm&U^UWYinGov*5nON(RrFobJm%qn!Ld&D$(RH zXPqh5&!Au-tQEhr^(};b!Mq1pKyvwGG!=BRB zB2Av^EIo@f`GK?a6l${8S!b4N@?)o{M3Waf>rAmG|Kb!a(`2Kw&MeX7=T1?PCL6M~ ztBW-Gl~Yux$#!SyS*po@I7KCzT*sYjU$wv_zBX&eBt)$q${SXOSk`oOPy9 zlmBpv&evqnS!b4N@?TC-i6+-OOHZ*Tzjcb1XtK>&dWtmZaTb-unq1=)6>9Per)ZHT zS2*j;QceEZDJs$A$RVnVhsx0ZfsucBJRTlMLRSJ8rD(CfHRhIT%RSJ5q zD*sP+X98wdQSJMksR?07h{zD5Gz}rh#h3vC4nV>@bI$2=w5Q~xlRilrI^CU~$UwRy zA_f6X7&Iu}(>TU~M;xw3RBli~t|RD0Q1lwbi{jI3yg1->daL$czqNPo+8x?Fd7r%V zeM$Xm)v8si#TI6R;9tWDlNWMDfw2V(YGo^->MXRtCIJvO3t?`O}Mc=B_`&K3ITa|)uRhoRO(&$^22H&cbuqr&z zh;Nf&Rd!@qmF=EY*`n6urr4?g6INwwmQ~r2WmR?#rmae7YOpK4_Owz2VqflZJ8?B) zz-nfMV5flDVjLsV?1ACIn%aTwE>+OLE|*&87%x5i@MDf!bo`OCj&-ri4?q0)BPBlT z*x4?6Q5>yv`Oc1+C3dF8Lh%s(`A&)sgn#6=9yv(=ku^lbti1`jxg-v;?RAaN8HE$Em-`NXc`o>Pzz4|oknWLsXy5Eg_>~ccNwV>r~XB@FVx^W^*NmSqiw6G3y|z_U#Hs(YkqS8VYiz41 zk5m6&Mry*T@35_+dYt+{8mR%NzGAyaEjaZ*Hd4uV>Kk$D|7g2MMV$JTucV<1IQ2g_ zQXZ$i-L{HyIQ56vzEG3z)VJW&*VtB315W*CZL6q+Q-7xI3l(wd|J_Ihoca#i7s}(* zKWn5VIQ2_xU#JPE{wX6h`c8cVPW@inJu2bU=WJi7h*STFk?L{k&$4}?JWl<+Mk@GD zeUtChH~LO}15Q1jyc`?FBr`j9(sz#girkV(~;TAQn$r6^O-?Rs~}5q*Z}f zJZV)R7Ee?KV(~;(AQn&b0{+JAVKWdTD5?T6f}$!ABPgl@F@mBh5F;q68Z_hAggwLv zimE`2pr{JO2#Q`njG*WRyxHtw4v1Y8Re{(=Q5A??6jgzTnLTU-Vk)Jffp0ag91!a% zvle)eSeG(*~gxt(OB91ycBEn(1XYcm!YvnwqD#Oz8-05QAL5#Q=mo^=ie5m>u1sVgW>@qA24-$^KrFGS z3d9nNsz5BUs0zdqi>g2@vGg_&ODw7avBXj#AeLBE1!9RsFW?`~jErGTuuk4KuB#Hq zLW{n@gs~pZGS*u?W4%L-^haZ34ZJ>nS76s55iGR01H;}9oUFsxW~{Gt3=r!pwFP2* zrM5tnn{3#QI8O0T@~$t7*Xj_AVyRw1e`eqFQo~H z#gqyGv6!M45Q`~#0kN1;Z6Fp?>IlSQN=pE-m{K7i7E|mg5Q{1E7l_3a@jxu5v;+|I zD2px-TPQ67#1=}|0x^8j5p1H|x2;{Y*yqCF79CyfKd@QGeP z44>!)#L9_YK&+fp8;F$?y?|Ib(Fc78L7pfnV*txewuyrQ}oSG!8bp7-~8l! z^V8&;pGM#OH2CJHAP=0PRzwB>5CbE<4aAU$UciJ+*_CBec6c^rm)erg#Wn?q84>fbJQ^+tFqL$DocE;Qtw-pX5Xqb z`BtUTw<-<3RVn#arRZCgf^SvwzExT3Ta~hJRdT*nDfm`piEmX}e5=yzTa_l?sxNkF zR;9tWDlNWMDfw2V=v$S7Z&mWXRcZ3AO1*DY8hoqL;#-xHZ&jLot5Wo>O2M}(dEct! ze5=yrTa`xNsxNiaSe5NrR%MH4RUT4n@lIJ zwo~>cKGZ15T!1(8{{Qoy6=Fgx0|6iV$x?}LVeH@xzY=@>z zcsE}*QX}5Y#dbnk#Jl;5k@9#q7u&XU0q^EZMry#jIp6l6OL#Y5H_}qPn@erOc?sUl zBSxynyE%PoG^6Et67S|dBQ@dOoM|Vdjd(Zr8>s>B=3+ZEE#cjK!AM2Co01)x7VvJq zX{0>fP1z1jm*U<0*hpo(oA$%eK8JU+*GL7tn>Dt(yaey&=SFJ5yJ@$rt2F=EW~V)e_##9Y!kR-OR8Z;R4>xyNr~_yO~*wtR}ph zVIwu--5hTxqz!mC?>ABl-pwpKAuZwEe8@;eyqlBlF}i?vbGMQ5csHlop=lG|%}Z@Z zxDoH>lSXR5yLpYBkhb96>@rdb@8&Gq>n-Ble9A}#yqg8Ki<`&0*=eLEyqlBlnYkYC zW~Y%F@NO2ol7?=cbInn%&=@dEX-`WOyW7vOE(11Ma+Tklw1HY@`66RMPAI4z7ZG4KRGUE%7cO z@@Gt-Y9-)eC7l;+3L#k(-r5Y$SgHcB{S_J%|O8xjRe!iZKA_38#Ll)}FB(Bsd1@Wvb7 zf5glx5|?RV!+(d7FxqR5qVwNDV>I1v%~KD=1HCm^AJj@WY>-f1kInFc>l`s2UoIAp zJnuc%^@#>vpKRduBO7>;w69GAc@IHe9sAyc4}AM;_kC{f#_D%f-($?>X>L*YQ6n`y zt?&eMc^bPF?l4mENrlIl%adiyiLp={d!K;d+Ad2-7X-f5)9Jqk}Sm#49;@MA_QJ*jY>xje;Ag?AgN0K_qx z77PWoYXiA+XGprfPtKu!smv#tQ-ug*}xoT9=Og_Fz! zS^_-JNG&%ie8@=6la7qmL_UR=?afcC%k(RAopPtCf7Gj?H1ekvJ`edPnxB;Wyut}}kpMZASgi1k zMrwXQ;X%j2)qIJ<8)MSHDLnXiK!#}vNEoT6Nl!dqg}6AEXT zQSYsUCQMIYkZi3U7)@k13onmui<9 z6%HGz@ka`eo(ETBtHK>dYJNuHvGd_-{`kTO@+rI=pq--r5ud_Kt3IeCK82S}_Dj@1 z;!}9p{Qsho_!M3?|F2M&iBI8W^Z!95)tH}DI9K6yG3k2>Uu9c9OU_ovr|_cJXOzUJ z@S@j3^|jWT&z0M$@OLo@pNmi7MK64=tInix>hZbwpk3UgC@ub<4IHjQaR*o9qvWj` zhBw4SUt{zHv-^YD>tqQc@tw+aM*I~=#Pm0PqESW6lnGJU=f>jVPeDZR-zF7l9Gzd1 zkh)twCXtU!Mto`_w2?3Dr(TLrzIa|mC^yDP;{rJ}IWsCxijieI@!3UR5Es)IBns&Z zMihY@ZlIA9Bp{n*uMUfldkw@uxW&JU@*gluJ&=m4kkqpjug$--!bOl)&u#3Vxb5ebPtGfA&JNr5R2ZF z6uoJrXtof!B}#VhfP9LvPy}uYWVVXa=pGb%6QX;QqI*Y*W-F50ro==6Pf*W$x(MtI zFRiCYBm_nm+q2 zh%N_`^-ZSDSQk$+^X-wF&|-;2NOZ%{Smm(y&h=_Cp@Fwfjr^ghB3Kq!F~Qfk6{D_7 z!jsD240Xid!f2T!jSr!mp-%FwO?2PB`~(gHxa&<;1>n)s(67h&2{I#9KN40dgImTU z1{X&9k;Ye9ZW&MVjCI-IRN@XTOCIllPg(J6bxCkxR6J?FP#$N*qGcoE8SApa2?&?m z$+7|DOEO&&x>+{56D{AJToSrRE(x5Za9N%hDB!OTi&}nxO!w|^NeCx>JR$dGcgf6W zZEnqogt%~qEHGn}!?6xG_sR1yaINl9OcjB$W`tHQG!`WA{oHh8aEy^bd9P;X`ahWw zK+cHVf+2HNuJdxCTZ`(dq;W-{3{>DyvG zb-6JsUZGeb7e>noX`CM}O!17>T9KbCCdFC-`HpjX%FsB2z!~M`x(6py2FO{ZTj1g$ zIr@qpS`bXd?7AxXxz1ADGT<4yVW-QxxMlJ3P8#2p>hf+pW1SzIt8#Uw%n#t_t@yPv zjGbHN!l*w;;~Q32`y|g;-7?N_xu#RrIN+JO5t*Jtu0KfQd)fV+L!1|LC8*3H;1{g; z&@GeYESf{Ec+&VbSLd1WT$30H*CZ{_H6tw$XVY9iD)S6@j&88Jl~|V4aGtp()fG=# zPI=s1O!AD?bL1SHt4?Ju0>5O%U!!x;g;DXO@y)SYnxpv|@r-pYa^}vpsxlXW=ha5` zH*+p(+Ep6vuVdJ$`TH~!dj|YBC1v@2qSLMol z1@b-m^yo3lY@XAacP+S1SLPXzZ`R#BOWHG+jrTFQa#!XD@Y{OckY%e#P= z(Sr<`A3(mL&zc{+A990bYX;t-q?5*ItGcBT$ip}K`)lWTM}=#M<+Xm`5A+NsJ>K07 zvZVc4dG>d_^F9n$9?N(KF4M!E>5&bSeV542x#CIV?g1U|#xvHvAKty;dS$V5z#m!h z8{FPcxc%Vveq8aSanFIXb4i}DK1%SO4_7(Mya94wg4@@TM+vbpE}S7CUdQ%j!+2ka zE1|`I0Y0JUVQCxYYDOA&GwiPo5+XB|0S|du6WY82SRO_@r>1f z;XSA~+xcDr_^1`%@3xG@opoWtVES;=-tS z(s)11`7cSHvF@#qpDVm&6$idqi(g>dBH|v7)i#ou%Qp6RZ-tl5xKCV6Jn&FGIh`79 zi-hJPJb)pATRj#e8OHhm#Vc*xS}xNa_ya9?fvh%SaKgRTF+a_cnfpcDauf-1;n<&w zdB2V;)MYgU9y>P(f6lT=m&tnI!su{{w07ljmMzIM)-{87`EFG?vK#^5t|YhfB0jCG zMDo->)@6>&T&&`j%Sea|$G-Ezi-p`BFN+Frm7bEjMJ3v;bz#&zr127>TU3%fWA)3) z{|+rn{0`thTk+w^iS5?9qeIfTFlK*uba`QtJMm>v0nXNU1L7TBu|{KEEy&F6HT&zh z^Y-R%^z8&W*8*}2&i*RUulp94S1cBPLe&Y_)s}{tR&B7i38?qq({iZO+0RY zY%F=h$NX@+)@{DkRe2M2yuEgs19GE}{3*qR=;YJCEG`MP*1G*XnGp-az3jPoMjoa1SiIAGl(4Cs-0+W)#iY`N;*mARxCQX zM96xPP|kJ4wVLO~SRC++N}4MsI2M2#tB8^vz_jkIJpFcGr(;fH*Eq@xD-mKy9kMR#_Bqc*VlOXyBD$=0=Znu`3EA`{pU_`!#y(#WN>1*|a>gyz zY+o~8B!q7eI5S>X<-K$M9EjL{Aa`!L2eD`>y3jof)Kz&moj(~OHXF$MQt2mlmyNW} zGa|Rbje_{WA~|9FY36plqD{BDD)~369Py}tyqmSZqRIbieYeYvrdY+%Um+SfCF`o> z=g*~xia_2K+h5V-pLQ0lI5t-+oR#AFL^H0gO8#1vqx%QO;U0}hJ5G6O2FmYRg~amB0L&L!ko?intvOeNH(lroB=X zxgl2Q@xh}OL=5jYh3bJHF@<`6{GmT>z4d`Tx4hU>=y+A=WDTY83sI;XD|8_W-5e`) zAqqXI3OU7FRG;J3eUNh&;474LP_S{!559EM8@7J*{wbb5^Hh&l#riBlpL=3`7NO5$ zs!urLQ0TN1qDBdcX!JhS%xM`V$N!(HrJb*#WZX6MZxna1N!vR}xvxArB8s#{6nm+* zgB(Kxb4IEMZd2u4OYcyfeiQ3WH8W8HR|ZiQI4T|*73({c@^?SVHaDXbX! z{wCIB5t`hin#kM%e%we)f#;qQ*FLRpgX*~~)^~^L`*m$oSu=ol#Z}m8Rd^!ScjplL zUNJi=ODeHt2!(G`RYGD~mx}7p9qaP5LHhOAalt#R;8)DBGWHH$s|9`}E_jy}yeBSr z*HF6Pel3tcnkw22TO*u#h?ViPK{{H2soRoUSc z`ynmvV{x&Yt=R2aUztU~JB_pi*cmIgW#}ua6g?oy0h?mwHk)#*W92q`%6(pyx?e-F z+CaKQEEn*;SdTq}=yA+UYmu!(_oxOR*HGjE>7%f}iO_OI*K#%I*xwUyu?6v;6L8Um zvt|$M9juub^GNJ;i-n1&&62-quyx3NYbF0xBgNwXzo50GKB67)0Db>UwA*Y=+!0r8 z&mdJh@+7P8-a+a?BTGFtkEn;llJ$_NtH;kq)I(y)dPp>^hg7dltV>2c_zq#?S*FVF zK_YKZqErs}CMAV`Ubg1+Syc!(#=O9{jo@80s|w-vm=}1*2;K{4RUuSkUf_F`G&ykZ zeNV2PRfY1Qm>GEYNahn~RiW&RnSq~B(j@U+rEWyI^(p)#!!?uV&XP1}{Gq5R?vD>@ zT0Bf5L?e?&Muz_voaG5Moo~0Q^HtNd2zy93Ih zmLh+@JxqZ@<4Y%6M?p(;;_zslNN12oX29M}HOEc%M#6-kXL2>FN&Np|s@F^nWpfx< zXQUkP1SL(9`bk$3JvrtC&P{MiR}x(qa{^zT;FPW;x+vxZo~tCflK90jEAa9pt8^vt ze9Q_gDoMMNXtR>zu7pyy6uJ@$T_9a4pQemDALXwqL}@|lM`s{MyK-7qS5EV~a=GfY zMnmaJV6TyKz^j#{T}gCP%n5u$f>XMZ=$m6s;Ef4R=}My8Vou=gN}?-?zbj@1R+FsK zmBil@vjX3*B<)I~A60VPl~8tC3S9|>E|9L=N_Rm^%*xHA1*zY1l}sM(%A>Nn@+hw> zKcTtr(@?q+_$4FdfDb82yOQWPVou<<5}eYNL?4eif!|MXN>>v7am)#PN=bAj@n6KO zz`aRU=}O|y#H_&IDM`DM=%1AwcO{f@i-YhtSxjU!yP(ho(v`M|LQ8a|b#o*q{-4eu zk9K8kR#(<~T{%(pnyR66CGf>Y$^nm7l6EE0V`5I=aS2Z8N}?~1If1W8a7tGaT@Z5u zPg4?IN&JkM6}ULbDqTtZ+?W-3fs(W_r-fJmzB^0_qy3*_+w8X63LN}+o zlFlHHcIDixuAJ+2?CDE_OoWO4;IHfCz zJ{ofZzpEs=lK2l}R^XFKR_RLOPsOajr2pTuspEANCn^uBjte-UZ6ITRYW~4ljI!X2*%npItm|W9WL0SPh_e~e==1)&BvzjiyMh-h2J*~tc zC~@0Fmy9Zm0ZERk7$rkSQ&9j&uV;?3VXeT)<02pv0=Q*7qnLW|za`JR^W6Aj&Xq=?m8#Drj7xk{uv zCN8TIc~?v8OrUNqDG1+gFs{e z<7y2&92NKyRliO{l|gitk=MGd45FtgS*9=Jf{=E3m9n_;0lrje?gh*NSx@Sw>>fAs zwuyDsMom~_atp{Kd`=lK5|Uww$~;hu%#EK3iQ-5qj@Nx&WCQ_HF7~BBy26ys0TG;G z1BR8mWP7V8dtLP?o+uT~2`K|?1C ze3~hi1HMK{nX+=4xZU!VfcGdVyCulTZ7#Aj1Cts3mKad6n>2LVgWqggnbyD=O3GApt|p#g`AWdN zk|qY-qA0PlQL{YEw74wwxqh!bE)sZ~lEk*0>s6b49m+I5xWeNZ*0c}ogJ+MX&7EX( z3^-3o*&~aL2U^A=FqzRGS!6ueGUkEFjCrp*WPHl1z~v@>2@ru!)qIBdC28@yRO|y9x_;QB_)wKU zA=t32O5`{rcDMRNNpn7(kOuyP(wslDLA;~dDirSJA>|lP?nzqcJPp-FAbN_C!{JAi z{>vDCTvo}wJ7NNX-&0}I7r_5eQZxXFal2(kry>4MlT`$M-$+Y<2z26$RbsJtuhS6! zag&t?k}Dbj>E7O}8J<3XL0$yDY;r`3lt^cU>vxv1Fi|s*F{70!F3?N;^wMaPE{mbsM$wTb(yB0sxRk>OA0(! zN!i^&#+53-6`F2&;;&L5BNe#ZNG-stl;rIGV3k;hQM1gcCI*NVUj~Q^EN>P9+f3Fb zG1$Z?X=2bPRAR|;eS_7ll0K=S)C@Sq9KeBH8;DFZ;E^FC<$m4HtesQ|oQO^*y?;9He6F&L6YAhy-0t`Ugc9%rX~ zLiBi_6f5&?3 z3A{AK-@i>L;RI{8r6@|6W|=5=+xaJQaJ^I=Q|EMEG0UsbZ?p;j5H}?-(420;SGW(Y z(t=!h^JZ7m!Y0YqfsoSzCrKR~IrTeG%cU}+3Xtw{0yFA$!Eq_jOpRlCIKF zbO5eZ(kME>wvSdzv($(V2mZxZ0=`XFVrFl`hGP2~%kVybWBG%s>^|zBW^sM6z_0#& zD(R~lN*@3pu@=c32e9o!hcrvr2Y>dJfbR!ZVrCz}hGP5b19+dmJ~%+vk$u!Z%@WrC zPk#N6Qc>~c_Br6oHOW0%$Ydls-xjeP@H`{6053I?w;4H#{M0HD!1nw#<1*FpN)4Si zJNc|xRpOm-9e^00sm>DJHg59i$4@@}1eb>dtkP6xm68nU>J6*#A6g+L;BI@QSOVlv zBt^!V*n`UG3~H%L>=$wN(F*;w6t4-qvOa%^3kQ6cky?OvDJg0SVjqYZM-%^YV<`e#j8qR?Y2uwXPYm(d zt+7+{JYwQXz~39G9{6V~A?u0cE1LSGhU0^IRq#)hc96`|$+gGNoiq8bGpZ1OXNlLu zi4Z9>;e+gpo?XV-7Vg;BiJO0h^7q1h`B|@^;0JdxvVK zY#ukW3a!=BwrwX3+R9i|y058Bw^gPH+-anxz)u>f2t;;vNBiRQh)3~>@~X(aL&Rcn z+qT?0^xWDO5O>i*G-^FCmi5fIg6C`bsx=@BWkfYmf>7N2K(@1XkE?!ONw3kqNhGCfy;xhFt%XU!!0!9xVNA(rElUdUY3rvl@B-U! zSPG*~+^e|dq`6@8O+eO+cY64y;^A&xQbzPcS*QH)v zuX%yh6@8O+U7lIjWaR}x^`yPwbQHXbuX~GqHnUUJ(+dw@#=d23#_i_o2=_VW?cuoy8h+`R#)^*)^$T> zT{n1jeNrD;aOE@3vV9isJeu5i|A_|yo`o_5`?>5vInL#k)vZq!57`q&3HTcQZdyKI z2W~kmT6<+9X2)70Z#Ck!?Q7FdL&*QO$u9!Qm;GEkOH2Q_hHldW{Eb+%ba~&_q&KNj z?)!d)=<5#1)`kaUP;O8TcR&`UKtV?lGg?Uq8GD-?qoV3GlpU0F6<(mBz7Rz8qL@>W z1*a;~i(ce4;L#7gwVELh`1P89QgBM2yf;9!$;hWVGErJVxlIcN8#R1HLm9bgmNq5) zFJs{ia^vh*#ID|ipl_&3gd#*V5K&X)73Il8v**psIQ@b5dX+t14vD~9OqCq)Q!$(F z?JGe7cy5fd0pF@5-SsB=j+j%C1-C1T3#9hO62Nn(lE%v$9$=Or+1dcUN7{pu?6Z@}@X> zw@u8PSMb)T%5GkPPqJ#%15b|Gt^v~~a&(?ndSB8HjHLq)R+64<6Qv%q&H^cOQqVh; zv05xrL3KL$Wdj??OQ6|3NXA{3u?S3N^xxjOUo$+QVb(G}O%Gl;c9Z%57i-c)Ife4y z_k-&t%9SNNr)huDP^t`^T@&?7c9rL9>RB2}Mqs05%zAD5HJW<1hR(vL2LSJRUKI-m zzQKgMSDl|LV^)>7s)T!E;lR(D@T>zBKVvZ~quX%T=D#33czl3~N3Hgi?@>wPLEqS2U@E{E{ zN1dkKt)a9e@ViE80Y0ImsBwwyi5b()NF#sDSc*V$Iaz+|+@Kk5)i7%mAfeU{I7+}{ zm6Sb?$;gYS;$Q)fGFkP&nF(3NDltmPSjrK9+T^G?Wbp#dF!3!w1Ugmo8RETDj3ZPm zFP^%lL4K#Eg)d$c8HkCkRU&VSiOakt1#uTClPdz;s5IWWuB}38Po?;)^WDlpH_2!N z(w^Bp_JC%5NJDATpD5%6SDzz@J{@zW2jvr*^aBmE#ul31o8STeUTKqp`R;*%=>Hgb zfg=;8g36s%X0DEi*V@P^0nbPbJpXHHKVvZ~qd&D4sn{Fh5`k~l4Gej}+tp9xW+QIf z;d$%`yVHaffsY%h3`A7+oL{ZS6N4H~maD%H{L9OZn^J{xZ7Sv9DwKD`DdSw8gR2~r zd?QW?r5#>{@~u?L5mhMv6{m#Kj;Q`cDTnL!bF3Sbqf#jcSE0NjPEp-1QOe3VCDd(x z70Q*VlvAouhT@b^+9}m{E9FyhN+_+Z3gzBZ%E~H~@5Cvgw3XGTl)`2%HJx9DGDQaj zDW_DSTpOpTZd;T>&r2RnNl%2W%K4avvNiyZ)SV7DEC-0Z)QH=542&l1KTTK>c)0DT zECn8=q-ag&Ci+_=hup!TQTrXF;iMqcgy>Wwhnf&Q-pC>MX&Svm z!OeNWc&hGDk7JNZCvTJj zlNtTPazA53R>lU;p&(<80%vl;uT`4ts{qedlDs#w$Zo@6V@~)GN5}^g?=w*);1#x` zu>@FBQq nqo%85+eRC6JG>gV!7&p2#i(}VzGGd(I4^uY~u64%S?O=5P?p7zDg_> z@A->tR{cT6yS4{&xw*6{@K7x>-0!K?$cd&$4#-^!nfpTAhRioAG8urE#+w0G zsa62;IldSa;00E|+GSNDUpC^7?pOCoOy~1_#*H@6W;xDh;xdDhiMp5NM&nCghQ~c=>HMSd z?Hxtm3sk^G8eXE|Wg3<=^o|de{!X11cWJm&!%u3+cW8gs_yirTH5wkQ;fplnGc!IC z(_$?6FO|bxA-_+R!v}7Z!4dFXwy)~BFhNyBChmuk3L z!>cq*%R|m(nvR?XjU%U3;}s2&o0dmD^hoDJ4?Yk}=ZDU6{BbSsUo`xzhWBZRrQ?gM zv^?b8oszRFB`1AygWR+{@}WmMAA0cdRXRWPDY~+AhX{u|GnLM#PwD(q$C&R-<>OCvI_+ciw*zgy`$HRL_?A8VY~^8YwWe%{kLQXkuAX?T)`>GEz; z`mGxBIl{2Uc`;|AJ_4un&(lZXT^jPH$x_~=NN1SSyM9ey>+)buZ(mOot_F2>^;PDq z>g=DhyuZDp?G^29K^R-z+PgZK)3&}-azFI6n>KqX%OIlB*qt#_l)ZNh+ z%xUjzm;d`J>*W8H5|Om7w${GZU`}Oqd1X)Qno4dOf6a=Bk{twctlXIY$PQwSV=9Dfp@KZ`P*!jl|v0g|1$kYh5e|-WLTy zjmR4p91#R<(KuPH^cO3Aw~nh*lwPa!&C0)7>2sC7XhkGvr_#??`tb6I{%)neUg?3- z-=uVRD`-|btR00u6v{ay*u6axut?=xI|{$MUDCC~vRM(M9s>k-jl%yCHhlQShD174Eg4(Y_fTKUG z{l$9do)jE?=lY28Q_9~Q(tSUqHKZ$3BG{yIrfK_}rfEAy;ol|nET{V+m9ts<@#QM# zS4!WrCL-u^5WGYVTb%xTwcMwb-@U%(=!>)gUaIX`Q2N&EBgU`kco+)lnmG~NBJ|os z9C(+?*`s>?lO{Z%^q?*x%vbr}8Abk2m4Ek!h<~Nj)BTP$okee0Tps*7)xddad##gEy+2tz8lIwc7q4 zQTnpZh(1T@-&T6Jw*O@+%-xLO^m({DBK$<@Cmk5kH|xCC2hSic^uwIp;V?z(JD~h? zZ;S*i)^gt&(p6|8xJUUPzBMBLQTaa${q~5?q0`rdUYp1dd`kIiwZ9f?!i%Ov`cKn# z-lX)iLOQwQ@KT}cP>zX$+$$~91OIuaGo&9B>`{C7b8@Zd>Fe$5U%7J5ilAJ+_?$xd z(u?Yg<#Nzg>8Y%0@9nGfl>63{S9ElB%425+sa$UBDzED3THe}GZtLsn=`FYRuM6a{ zwY#IzS81EG@Kp;=7s(k+W%mFbJr0-U5w~Z35IsP*^{-j8Ua}-&W%t}Yg0<2%|I}0G zy=qNYTYpDIbk}&ftGm(}oZZ@1-@dA|wXeUYa$ZkQSC1w43fUsv{mVPrS6o$DZX8VMQgOr_$EHqEc>cYb*EmFAvrP%Vk@k($`mx5`s1T z!Kw<(-SfKVmHWEN-SY#N!9__eS60k#3)+@3s% zOx>+3u6jQs>H=^7%cL z=a$@ASv9Iu8dFAExwA_~*giO0J6e0(cqzM)Gb$gA+ukX2p)V*`!dY9BS6XjmR- z?dfY@Q<2V;sa(FiORCo;YsF|8DaK8a(Rg}Scgd<3O)Obvy4K8F7!8eP2&| z=PK74>sFMD>$`P<8%+~yj!K`5xaTZUy0NUSGZtQW+-fHI+{y;Yo1SRW-dkCls@CO| ztED!T?mo8-TQ09!)79x(t*1{`jOVJDtZwi`YixPJ{CMJ@(^F~ftBkHxTbI~?iYy&A zhG}pu*VQaqnuBs~{on|Z>88TYh$^-eM6-vg+1QZ&R_7Hd@E%|+3?&0$jzpO zr})e770a)ztdLoM@j$tte#JQ*t-ZanN{`mVRPO5b&bF?rPaPxQ=rw8Y?X2|o^@yL6 z>SM2MD0zN+r}#e|mDYiM3?Uk_cWr-b&*%-Lrg>~@q)bCM+{$OWBaGLzcJwpW5^Xi2 zJwmr9v@*D8W3n++)b-k!=e=W2SXi+(m$i1dgN1T=#kyAK)wOoW#E2KV`EyPULJw|) z%H~1giT+=OZpT<2ZDda;`f*-Cc)ZdHSf`uJg#3kCf(p+Dy- zgjkCJ(d66L3!@KY}Frv*3_=RzRV z{OLVOszdnSnxAZhhiU%JFls_78o2%KG|jPYTN&y9`_)K{!Zhz8qvYT6%_x5p8&WDJ zo&P1H?Df%%YR1HOHvt9(NOoOrish~b3cvpKdbFuC$xI`$N0m3 Wxb-!w{0n~@QLl4GC-F;{|9=3fCwlGx diff --git a/Linux_x86_64/lib/python3.4/site-packages/setuptools-18.5.dist-info/RECORD b/Linux_x86_64/lib/python3.4/site-packages/setuptools-18.5.dist-info/RECORD index 1eb3b34..cbb0d41 100644 --- a/Linux_x86_64/lib/python3.4/site-packages/setuptools-18.5.dist-info/RECORD +++ b/Linux_x86_64/lib/python3.4/site-packages/setuptools-18.5.dist-info/RECORD @@ -71,54 +71,54 @@ setuptools-18.5.dist-info/top_level.txt,sha256=7780fzudMJkykiTcIrAQ8m8Lll6kot3EE setuptools-18.5.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 /srv/openmedialibrary/platform/Linux_x86_64/home/.local/bin/easy_install,sha256=7h7lc5DCAhnE08UwEm49wGymGDGdOcrkRdncTKYmXIQ,233 /srv/openmedialibrary/platform/Linux_x86_64/home/.local/bin/easy_install-3.4,sha256=7h7lc5DCAhnE08UwEm49wGymGDGdOcrkRdncTKYmXIQ,233 +setuptools/__pycache__/package_index.cpython-34.pyc,, +setuptools/__pycache__/msvc9_support.cpython-34.pyc,, +setuptools/command/__pycache__/install.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,, +setuptools/__pycache__/depends.cpython-34.pyc,, +setuptools/__pycache__/py26compat.cpython-34.pyc,, +setuptools/__pycache__/ssl_support.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,, +setuptools/__pycache__/archive_util.cpython-34.pyc,, +setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,, +__pycache__/easy_install.cpython-34.pyc,, +setuptools/__pycache__/compat.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,, +setuptools/__pycache__/__init__.cpython-34.pyc,, +setuptools/command/__pycache__/egg_info.cpython-34.pyc,, +pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,, +setuptools/__pycache__/windows_support.cpython-34.pyc,, +setuptools/command/__pycache__/build_ext.cpython-34.pyc,, setuptools/command/__pycache__/install_scripts.cpython-34.pyc,, setuptools/__pycache__/unicode_utils.cpython-34.pyc,, -setuptools/command/__pycache__/saveopts.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,, -setuptools/command/__pycache__/rotate.cpython-34.pyc,, -setuptools/__pycache__/dist.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,, -setuptools/command/__pycache__/build_ext.cpython-34.pyc,, -setuptools/__pycache__/sandbox.cpython-34.pyc,, -__pycache__/easy_install.cpython-34.pyc,, -setuptools/command/__pycache__/upload_docs.cpython-34.pyc,, -setuptools/command/__pycache__/install_lib.cpython-34.pyc,, -pkg_resources/__pycache__/__init__.cpython-34.pyc,, -setuptools/__pycache__/msvc9_support.cpython-34.pyc,, -setuptools/command/__pycache__/easy_install.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,, setuptools/command/__pycache__/register.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,, -setuptools/__pycache__/package_index.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,, -setuptools/command/__pycache__/build_py.cpython-34.pyc,, -setuptools/__pycache__/version.cpython-34.pyc,, -setuptools/command/__pycache__/install.cpython-34.pyc,, -setuptools/command/__pycache__/egg_info.cpython-34.pyc,, -setuptools/__pycache__/compat.cpython-34.pyc,, -setuptools/command/__pycache__/test.cpython-34.pyc,, -setuptools/command/__pycache__/setopt.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,, -setuptools/command/__pycache__/__init__.cpython-34.pyc,, -setuptools/__pycache__/site-patch.cpython-34.pyc,, setuptools/command/__pycache__/install_egg_info.cpython-34.pyc,, -setuptools/__pycache__/extension.cpython-34.pyc,, -setuptools/__pycache__/depends.cpython-34.pyc,, -setuptools/__pycache__/py27compat.cpython-34.pyc,, +setuptools/command/__pycache__/build_py.cpython-34.pyc,, +setuptools/command/__pycache__/setopt.cpython-34.pyc,, setuptools/__pycache__/utils.cpython-34.pyc,, -setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,, -setuptools/__pycache__/windows_support.cpython-34.pyc,, -setuptools/command/__pycache__/develop.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,, +setuptools/command/__pycache__/saveopts.cpython-34.pyc,, +setuptools/__pycache__/py27compat.cpython-34.pyc,, +pkg_resources/__pycache__/__init__.cpython-34.pyc,, +setuptools/command/__pycache__/rotate.cpython-34.pyc,, +setuptools/__pycache__/extension.cpython-34.pyc,, +setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,, +setuptools/command/__pycache__/test.cpython-34.pyc,, +setuptools/command/__pycache__/upload_docs.cpython-34.pyc,, _markerlib/__pycache__/__init__.cpython-34.pyc,, setuptools/command/__pycache__/sdist.cpython-34.pyc,, -setuptools/__pycache__/py31compat.cpython-34.pyc,, -setuptools/__pycache__/ssl_support.cpython-34.pyc,, -pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,, -setuptools/__pycache__/py26compat.cpython-34.pyc,, -setuptools/__pycache__/__init__.cpython-34.pyc,, -setuptools/__pycache__/archive_util.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,, -setuptools/command/__pycache__/alias.cpython-34.pyc,, _markerlib/__pycache__/markers.cpython-34.pyc,, +setuptools/__pycache__/site-patch.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,, +setuptools/__pycache__/py31compat.cpython-34.pyc,, +setuptools/__pycache__/sandbox.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,, +setuptools/__pycache__/dist.cpython-34.pyc,, +setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,, +setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,, +setuptools/__pycache__/version.cpython-34.pyc,, +setuptools/command/__pycache__/install_lib.cpython-34.pyc,, +setuptools/command/__pycache__/develop.cpython-34.pyc,, +setuptools/command/__pycache__/__init__.cpython-34.pyc,, +setuptools/command/__pycache__/easy_install.cpython-34.pyc,, +setuptools/command/__pycache__/alias.cpython-34.pyc,, diff --git a/Linux_x86_64/lib/python3.4/site-packages/sqlalchemy/cprocessors.cpython-34m.so b/Linux_x86_64/lib/python3.4/site-packages/sqlalchemy/cprocessors.cpython-34m.so index 79e669c91a579266286bd0f583e762566203ddea..32a79b07b6749b6fe7d75c9a42acd7e8aa179044 100755 GIT binary patch delta 8515 zcmZ`;33O9c`oH(3Desjo>6WDndFc{TptLDXDaF#1HYH{20$s#~vdAt(Eehjki=cv~ zlKP2MaiF-cxPo=m<66`iQI87#T^MJSqNCF~FybJBBKH4%FZV&|Idji>_ucRJ?RU+) z>HR=D^q~~Aj%0C6E8ftH`FDs0zPzlE4Z}KvIXX%l%EHVLYhlOIH)0FZLd3;4OyD~c zviAMucMsvgU3VRv;LW_Td!CeNNRDLtQ$l*rhhEH#?>H947D9(H z(Pm*y1*+hLUd<9?CT4{hSsUcE! zU(UTCaQ_fIY(z-DdI)~b4sR-Bxe0z$EDB2w3B3dd6brZ_OqF(_c4&xNZ&FC|=n%Xq z6b2?m`24EL*tK&o5+;OVb(BtfcBg1pthpi#5ntwUB zMR;@#!K=X?X|Xj3?wTRomtbC`C5Z`-CeP(I`y6mzKxDaiDF)@)*O}U{6hGCK>$d0kV)rbZLp>c2xqMH$I z9fT%AJEE^58XSaL;Wb20AlfwuO@@<*rkEn5_76fwK&r_C(@l}Q2W~KpXu005GXI@D z&Bng6_dx`=3n z=xUA5BRYa;uSSm|dN|R!8l6UTB++V%1|$&>MSw}8-rM5dU*e}D7h%cUXFO1@fR2qe^|1n!0+*!`1P96wB%v* z8P;$4X^61hp4EJoq`k~P(X-BsGjPMB7#ixjduCN``t1#xc4)%Xw*n5>y7^JKE@>hk z1G|z|%{*}iBMuJAXQJoF1eG<%_&1&He*>8@_EywJ^>FUrbm}eiaD@1C(VmPleaSx$ z4B*mOZT7c$_Q18txz0t%_qTgi;pFwV751${BkrDmW4~Q8(&kwT&nJ(P3KF0n#!mXxb}amZg3oaJvzm64BKzRQBIO{pJSafmXJd1p_|#&(Lb zy?A$`#`+w)p)W#Xt+^TDoCL^89Z~YpX^gf#(NFsAo;{?p>6m|$vBZYJdSedayl*P1 zp->rFYWF-Sgw`9~iQSu1&xLV6+?lrB*mZJXAkdwe{%8d3&-5z#2^xeW+2dpN!y$2~xc0?Tgke!&EBxjSGnw?c2Os_3Wf1vs!J9s)cJRJSWaKU47x zt#!|AF^ve>&`(i)11?lUxPF%EXQ$3@5`3r7*H1~E9~6B3eALfFoiEP8B32>{=trL} zz#O3f^h2*p949)z5ZvQNVn$Y5Ku8LYAvLV4;Xqys??ky!_x#-J+g=!^+zk@~_2jMAKRZ8~GLp(C5$&l!vSjaCTl*l@TY zp=2rUz0%P_Bn|Sg@9`usaCsf>zXoX-$6eMCHtYut3}3H-#$~v7hlLrJu53mH<4#B) zpWd_xZ5WjX)yl0HrZHS2 z8kH;wq6)42b8r@pYiY#e$7tcRQRfcjO011melrHLUimu)ZcO2KA+b@3$B>Pg{8khQ zD(_%l8pm>4-JQyZMC52hmvW4p=4!-VK4%;aJm6=lVz*grCh8IBr3uFT{_D;rT!sp7-X7mDCmE7kmARI(}$V;3kjoGhq{ z^Ah1wGD)nCZ^k%s6)$F>)bo8v6e@XSt$}|roY|RIk?@FB8hIpkl3$5H56YEV+SQ6@ zIJ6e!MV}{fD` zgL{=vNi3j+y+-max`Sb3IWT*tQ~=ctlT?(X1wkmyzZ zNji7(+t5Rw@)dTIau?r-#3^MN1%EgH9Eo$LMj1LLCh@P~sfk&m9b~$lzmEo7+%%WW z-ODAE%jKqT@TF|wpChl3n}YXUkzDr)#eE--LwnWSGz&XT0gYxqv-bofzXq=Ru@BsD9>+Oj9aV3+i^dfATRK(wa_a>>*qzE`MDV@IsrjiUvjevrZ z%K5)ho~H{@(IhJr^-t8c%1$zMhT5upirPP4gSAWkKWd+~Vx22@QwN`;cA;#c5&DJN zUimcj$gi}3{Bnf^$4ktM?PU4_nQM~ml*#YZULv2NP9LClKzWNoVVsCH%6?*SZbhP1 zX{P>?wCTE0*+U$I#tABoq+`@{I+e-@xYlds@4_u!C;tU@c%80g$ z4uv*?pFlB}GKC!2HBPQFg*Yl7hv5_|2a&I&X{NnODs@&mS%ZD0wyfP0@JJEE1v23h zF{~vMS=@|qnV97R_;=~V5g(AiIDRX#!sT^XC(=F;Pcwj7_CxvP8T?_mZ?c=Og109Z zL9Yc%|$Ln-z_y z5iVMo@Fthm&4D)ZN9Ymk*FW- zrhHs-Ey_pdQ9D-#^c6jh7F419G{za7e+!!N$`P1R`#b5|N8XDbVzyFSmY<^b1JpLj#lvBJ zMOx;Is2=n1UHHo{SN;KmjM?!#whLu5x`^5NCAPit8hEebaBUSKkMBTbtIEr`9lxBk zbA^|4EnlS_1-MV!y-Z3YE@57w5muZr7(AcC|AXdGxsqS4DZ_~=<9G$AekY#cQ~bBv zj*!xyG$n^-ZKt-~I~w6D|0wE)Y^? z4=u&1Wly08E;mT<;K;T2bKT zL5gmHlfjQK;hs8yZ$>=-+#uz%mxS^o;o0dHTQ(w8wxK8+^CC_wzLT=bg=5nLqjPD% z_sl?+)Zuq;z{#ak)7>xNdeo_E_W}6BjC{Tlo}J;sp*}jpHR>MGmlxAtBwSInH(D;G z-wC5;CgFFfZ>A%OvZF$ozB5SXy+ml8X>RzBU~7r(6%I%?%zDY*s{$uKDVK21mjVx^ zT0`MiQmVaXI60F>a79&eDC3&TG8UA`SWuMlb5;4g9{yG3!Z~58cKL2ZXH+g`oQxSz zp-7&<4++7@Um$QA@IHZy0skZ1R-MK-L07fK`QRXRYtlt^&uaqLdZ81(thS7MO1LIZ zb%R7d5V)3e!5A7qqck5w!q1$MHJP#dXf+wNDPDowQpPKAO-(lE&{mUd=@fUZqj`{m2tQZ*Y>arryt;PA2V7`e_<8(&>^u==-D_BLNQ%^sknF=Rro%LFv_1fY-j$2E{ zD-sGcXmoZQW8<~W@_OXgPwZ^5&5qQ;%~^wK?9hIV%RG>hj~6pg1%X*)SCnmvqwtjl3?8IwB#R!&OcEH z<#Fek%~;V37&(K$>>@r5o}BG$nI`Tfbz3F>K!K8Pv6?VXuuf?Pa3?rq*TSPJKT$Bx zBXc=^HtHTcrGgQjj$%bWq9OfjvR!ENhdCOWC}F2WLv>w_bSxRx)upK6;;S?TH9b*+ z&}K~$-JqG^wK^Af!FP4`WQX9oP*nHKifYheE~xcWc^53NPs94&dM951f3D9MdCy?u z&kQzRdJvC6zD=7v`|oSW@V+m&`DlEzDmuF&hGPw8+n0lx7o9Nodl20?%2F+1 z&MN7p2)`XW4No*6Sp<2{1BR@C(&Uj--a2{ zqkPMK3$VB7iNDd8f$r$BjPo_4R&#gUoHvb}b$>e79A=&}vra8@yNBF-K>zhs&@tZ( z#~w0vhc7(ByMMiIfgvrY`T9UkU}+$G{_3SG7G^KLDeortvc=6gvudW7u2`_-y6e|= zU%m8}ueJRXleY2&*(t-6(F8;j0Z$q3jJblFhA`Vw}kB|>teYP;B3WW~A(CUKU6xPEtdyRdT_aSBM^xq?VD^v|MkO%g-jwKN9eSpru@21|sYe1UffyjS35 zy1dwdy#k*va3+SPN#NukuD(CPzD^S`v8GJH5w9&8juZO+tg7w82wp*7CbU<>6Ikz% z*>wPM{6b;+f7rUx+aWvf!~GU+hGARhatkcpnnR1V`@yXshhW-b9xVCb(P2tw= zTefj-y9Hy7!&?LW;#{{jA38dH-Tj?+N+#TU=KB0|D{4z;_~!b{YrB_k-zh~_ubsU* zu;RM8)$DVr~BIt{~toQZ?vXQiJh*VEn$XHJr{JP7@yTS2D3Emma z!}@*Lh4o=Lj`bbjc(jE74dy*+=ZBya>#t!KB7JZi>yzMktc3pz^RPC;F05nWIM#M> zJYK>_!#u1DU>DYDr&Un8dwilw*<;cETEurcK9$hEd$h7~8{v$tf`hy5 F{}0*SDF*-m delta 8549 zcmZ`<3s@B8-k*1tg&E|s+;(=k4j{N_$aMihWD!;uuYiJf70e9N68qG!8=6`Yq2S-! z>7uA;_ay1-^w~u%&q?j6sPA+R)v2(opsXyj>|%WX|GV$##(BPZo|$=nzyJN6_p&o^ z{4KfvO}X7R$`vm0!{xn6`cBcnn@cK?0`QtF=^Z7BYFCO^)YY-*a9C547hJl-IJPUY z=e<$;>L+VbOzgg2rj)(4<{!-=-@+NToNef-m!nKEBc!vjUc2YKH_3|ka4AUYf(~<3 zgjK4`(}l!)TMoO;8uXeKZG+`c0%^4@Nft@#`OaKwQY{f)VJ{@A8dR!|ak=qcM#<87 zL-9hXSn!@Rx4qnCzB`(YVMn}p{b!=&A?8>Q_LUeB31I&TK>IEb*`%!#jv2qwb9@u!4C2umD)Ksc1}**}m-d6%$- za6iW<35OBx<@i;?w89P^;rI|?8mxoe93LbcNqAN#C!QdJX69f!$9o9V8acR{;|B@T zd>?E?>}e~ukHp8;W~N(>C+&i#<#W&43n43`Jgw##kPtpneLtBLobM*XwD1I03QNLM zuWCU~S~1GAwpfe9Rt;J$5Aw7Y|A1&99Sa-q;N{xCo9j@KoqLTQ_rkHE3F|O0j*YhB zqtWPeZHcsE#Nc(PxiB=L9QjuB|G=5>Ub!y@UWsU7>EMaHh1~*wjl9)=?QcUvp$V`n zB93`rdK9oNa4ITW3Db3HTC4eXh_Sw6{oh|mM=G6NGt1L*&a?K6Yg%i{e)!IMME;uw z&uZD2{b}uU_8FYC-JQ1JXVP{_o+4+d6-VT{y_g!>W*wPT(enFY8iwe?GH@p>v-hwa zkP=5PA$$Bf&3>M3$Q8>vgnLp?1&4<0~aXu@i=Mst7WX*v5EM(7~!OlE2-%CVD|;$Zf)77WZoCs{|n#(ul(q}7=Q%VS2%KSe-i z%%Z5hKk{PH_nHVJ_ELzC&192cX6)5&8RZMxXL(xVj)#-);(85lEpeM|_=uuVxc*4! zhBnHwt$1M+7j4IG82I%ZiJDd-{apln7aL!47#UAnltT7x&PU0lg6%J))9{JheIhB9zvezmM#h7I(pq6@WYGA#?jTLqqL7pC|1N^x1IVNzV^6_GX$o zbaa7z>;dNLIWcx7gUrkwNk7q~z{6ks+^~v!s<`Kc!@HSd1C33)I3DVm74$4eps_C( z=un+Cqq#EP>&V!aja9r!_wvSuY;3@Wyi>>*3VUO#HRQcQZfvr~rfSH=W?CpU3kSyj zX(+f7y$KjQXTR8!40#h|<&46LZ1!%UDI$8!@Y)W~W;e4w)Voc#*r7gUWwfzpwcEW~ zhO_O|KdIc?h5yH!i+giG$HoX~Oi6(XXNg=FOFjmuuWQF=s)#;#|`Jzi%l z66*3IugFQ$ony1dX6f0PS;mZuG^gcWlDrniw!9#ao@VrQ$&y5?J&M_#xC#dy zLbMw90~hq>MV9y>CHEDe*rWtJhO~((YjANk$pH+PV3S|K)135Q%Sp2t7j{2C^P*dp zqk*{)lJk@682T_hwFrIqE1l$t`QMM)@*L95JcvxdENu3f&u3$zN-mC9^H1}UmK54& z%)dQIP2~daA?DwyZBY_%KQdpWwoS3X{``^532)@v(<*TvGW#(Sc+^y^#BAoEQng~5 zW`7Rms>c`v7ueWVNG-@|K8O8c*4S*cxktScuVx!tiHWRLS7PGkSavUh4Qd3YY))Z! zphCMkpKQi5n(8~$MFcW9=u{t}pqU&zrgjm?;-FjYA&|||(dGr!O@-w$tP=VPY{Aze zw3$0^!br+|xLBaEZ4foCBF2On_$}OoCfJ)!EDoQ-UaUkuCBv$5iOdZTjhozDfW46^55e_AnaR)O_q(wTh7kT|J8B z2;fjtNUerFfO%x9e~^P(_Bet9^-WY(=dce(NC}clm2v-7=dvK|B#(NWqFu?$u2g#i zVRd14$dg#Tx`53@g(fuyjuhI<{!Z?0WcZ$eJ$ej_QyW-5f*tAsin^E;;z;OJk5h8D zFdBo8sg0CeBTu_qeUm|Kk?q>E6pFBhJ%o}z^#=m?utyQ} zt9vNL_3SQ;FrcoY*!Qvx2+pbw>>l+#_Adk%Eaw8CqbQo)1y2>Fjd_W(YGZGp0|&Fj zQPho0M!igC`3!IBCiX9s6)?+~&EsQgmeNz%%)-%MC9`}>2soO{EMHNUTR5s?mZvG1 ztsFI=G0C=Z)W|GX(evESA3zhcR6yhSr1WPg-b-pkk zUFk{=xF%?5Fn2<79`ga)K_uufmxL$?Q6?l#G;LR3WdV zV*VlO^K%7gXi>T->mR9YQ%+OG=c%nL&yd@nXa+cx4r>2w!)vCpj5_!NwF{K@i2sG! zF6A8c$glTd+oPOfaH_<5;~{c=k=)fOhp3W2sNJBP$C0WIQM*ySj#7~rfhKh-2^g~> zSgnTAqmlV=-Jnh+iHS?v)pJ;*YUVaO)YB?l=d!T{aGNWY{S|h)QXOWB6~g8H>Rjq} z3kL(LgFqMuXVvL&#xAkC)ohO37-J$YBTN z=ny$XQr&6Hih1Gtdjou3S`<&e+SMF(2a5caHF%Apbs&O9fTT@@@`=+~K5U+t#RB2j z#JrFZ{%?Z7lzq9mmZEae*80FLrB++(ZDGp47a@dyhgqhop-(Lj5Vx@$ixNAF>d zpad%x&u2!E9)M9LB@&6C#DAhgo3a6~K}odG>ykVk-YU1o&?E}J%By+`4T7((LIay5 zpZ15La=Y^x?7hJ7GL*=aW?+U3gZe8=P$^&=${Znyhm4RRN#4mQ!*q8BuI6{Sljc8) zIw4t9k3;zevk%FpcBV3x+Br0#3Y4cY&yd{P(2YykhrJi#ybt4fl*QP&A$fP9hf1Xu zCum4M@pF~kXcbbh6s2{N+yHFSNclqrMor2~?L$fEmL2qBq?BQV(5=)~l*!cIMs17I z7YJ)7C8boOdFaFU;yY8Oaz6$Q-T5503zUCge}?Y*2-_|t4^B?%tq#VZq5F2Cu}x=X zECGK(@_mJsGhVMc6&0A9uU;0_ic^?P;=qO@29sx#*|+EpjVstS+!)SG&S8F_ds1-^ zpX|BYzKM(;;YNwv+b+J`CtBfS&uDfVs1>nZoqK$A))_hzDn=?QmcWiN7u-~lA%7GA z4^^bFS#Y!>GVCL)oxRTOKE{4Yy9U0fNQ%B){3;y7YE0)?jU*+%fE51=1<@dqp8PV)GxMw6r|LT+M>pnVxR#-j5I%ll#$qO40fC}@oUH138;3-b* zW!@Qw{^;UxK?Tlh_I9tkhE9_c6CcUov-I6g=;RKKQ6o z%Q+)LQ=}Rn)nM@&!)wm_3ae#ic|PWduREivQo_nW}rJ)K(m2`$yZnd;P9 zo@gzf+^2A98Cff%!W@p%@r^d0FD6%VjLD|*#mNg-k@z9xad)NqF5@o67dW zf7gy2b<)@Qx4zCxufe^RZRdmM?B+QmT_Z$aUaLG!eU#PWL9f&>0cuZbA=qM&^{aV+=aOhXTsG@lI#dbmp$WR~tGb;@zES zL}yTLc@g@gQypUNCZy;Lt9Gxz9|+gb#j3fj=zz|!TK78K9q0_2TmA^HGjxV;^(#M) zZt09lG?mw2S#+wOGxzn_TXaU;>OPEq>C|%E*=UvB6MogJX8e}*-aM-xbj-Ix$qnY7 z#S7kHNg2y;Zp>(0)R;bh#iAt((r?OJIxc%*=E96wRntnBT;FiR&8vEf7A`RLyt-(m zpLOz#8oewlYuGmvU<+pU6x{MD{uD>POp*=1~8fT~PQ7ZV}Vf-BAnRGc59OL|0$a$=U;*R%n{Mv?B3T*s2&=CCv8w}qz zjt@)p1qkcVn*S!OjPF$y!tAu*jX!7^yb1H5D8?tgHj!5--YMM%G5v7%@FPy>8=nlH z7W!#kz8Uv?ui`gCFisd4e=ECFII0o6@j39A;Fk-&PV5cO3BDb9Mt@=zTa4J2rALIm z_akYuBEV^zdshd@io=(^QGX8FN}8!KmIs5DIsHRqXea!@Jti} zV_o9{P!(raofXr9L>No zBVY+c=078%^k?dSQhky0ICq8(r(<4ycpEgFBVWQioJ*6pWSH>J+dZqcOlSCw?JS(w z>Sh{*Zkx=so;lkXv(LvhC>%c+7$4D%Pz|tbySr!a_8oH1j0YcN|+W@;e^TWC=Tr3&)b{Oo;P>of^ z1@*f}+1H^8Jr7*;_|YY{s4m!z()*DkX|`7=bqi@XoI>elAr)yzm;WswanGov&SBC^ z>yucgx@0I_9|c1@RrUn5>?vW-!-+j5(C_C5L3_uu*P&u>3G0OxyuJq~_9o!s@-y1br4#*O8kDnU{Kl?0iSfG`7unHgM@u)e!&KqhakoPyY{8AQE*~k ziC?xsCG1c0D>lHq{iSL=Ei8CUg~#{jM>THrBKjCgM-rZs;r#wFfsgIrgtWp8H|>i5 EKlYa^zyJUM diff --git a/Linux_x86_64/lib/python3.4/site-packages/sqlalchemy/cresultproxy.cpython-34m.so b/Linux_x86_64/lib/python3.4/site-packages/sqlalchemy/cresultproxy.cpython-34m.so index 30a7eb5c9963f88c47acc18d6f9dbae1ccd9f239..69e233f7810233e4a618ebbe40de03790a81f3b7 100755 GIT binary patch delta 10984 zcmZu%33wD$wys;9CRHR&I!mWJAxo#zorMs(I~WK{BnctuAb}7FAc%xLENZ|Yh+v0R z0i%I4&fqc{`~-166_}u)k0pRlWi>FP!;GT_#j&IK3?hn*n0L;tTRoxQtM99-`~PRT z=bpRN?W*p2+0e1m&}_?Wvr7CZ(r!}6Nm0OCkyIdsXo zqS~Gc$4zDamPF0d-jTO^>PXi3gCTs8rY3cS@Vfqu5WX>8^HWrMyZTpDBZRLP{v|Q8^Z`?(`G;_Gbj26K))u03 zQ}Ff>zESwh3gKH|rr9b7a#~lK`x)R+Y@}(o=}$y0=)q>f>&X5SS*6&{Q%N!8z^BMYS|Uxh z9;_Qo7Ap+5M6xWHX-VI-feErC`PRLwoZudUm+t&+sM78F9bThSi?Bqv zj^v#k-FUb*6J~_lIbKT`ok^W-9Iqs7AiS63rG&!>w{-iFaV;dmNJKMNm_@i3;YN<9 z5~e0~)^l7lee-NfMytAI;3xsWiYdJnem_}-6701U3(^}P8#PK_XY0h?fIQ}zXS`qx6 zc24|>2wKEC%^bf(IEApp@w0?!;pyzUMH$#e*iN{E*mav0x8^>~%U7FY2 z?aJeKUsK_ZTpV{zMho3*eAzdAo4)d`yV2(%i*JkZ1xU3ym6=>4e=6K%v$MBhu`Od- zG*Z&COy9b~r!uirgGLRJzAc3p&}(26#uf14W%_%@K!3Z$uAQLqI&VN!>UszFx!;B>ygyD?$ZSiLXzP;YvjVnmh`l-kq zK^4ij>HWY4o!w8wFrzCK{%$XDwx-xo{6D@S;u?%h-@x|6$#kb|O@i;!$K*cK7o%y){}Zk~ zl;;aq_4FOV^sUl_bA|9tsB^m5EAXVVGWVB6Znsc~@@}*HrzT7mLJKIaT=oYTCo4yXL!f}C_ z$J{MDh7LYWz^JepqrjFiZ1g5X6L840BxE-k^OA9jxVV!Q#Yp$V)*ZQyt8$=)9C9xW zS-vLAd9B$cT0i>M8IQL9G2?)d)j>`F)5hjNcXv^%EB8S&v_%xD&9nyLQ3X2+2S@v8 z1;c|@QaT!-vv3b?3ZJ>t&tf(T;qS$Lm!&nPs279z%nU`vzTR`h0~mL7+lt2qcXZ9- z`Ag3iI?QmsBok_jZ3(^{jS>4*4dAs6iVwtMbWV7@Bn!87`$|&ridHgi;%=6Vm0J>l zdFS=g_F&*XXn}fWkvo!MCX1IF(_vpk*-)|JlJw0Mb_zdsi%RuvmcFghzb&)R?V9}t2e}Uu4z?4I%V35O?;}g>m7XCFSdmZ7Vk=DGhFyn&z8_>X5 zp+NsK2YVF8m02U-Ad%{hpv z#LK~6^)8a+v0St{sNR6Tm8WIgi3znCTPP|e68AdB9aNMQ6}KJ6mnc3(sWCu`&zIO3 ziFuj#6iP8;rPyf9^rzTD`5-j{8_MU6xU${bdTm99{yM8J^>o4TU5~^=ElBnQU661 z-Op&O>{hp8ew2D1!d|tEn%lsZqUk~PCT2ic&Q@Vk+SE-{-6L!+diy}VOlptvNZZv9 zsX=R4b0q$Jy{JAyA*^N3qh5!ai>As3wiIg(sIO5x8`)|EUFtcKJjTu-xM6+}nnw3! zufwaOv!fEobrWkt`z&VO1(#4#2e(G|9dZU+rEFv2Xj#R~3FH7cn!?OeDTd#1RLjis zN%k~HbHst~81BLfTeLHoB@;xPH33Y&3l6SzNyZr0` z3?Gv!PnBW%n31dR5?wC&dPMQ1x#Zfm{mdfyGDsKD@Q5XAE|6p zc2b+JQ`xROM-G3g#&VYOAeDbMVcDaEQG0Gsxj^|F@xM^HNO9wcRBlo~`jlBRd^R@T z^bxtfMeb^q6Trq<7yOM0d**WXb!s6 zla!Mf4sMvAhM&gWd5?|aPvn%MR#5x;vO^fHSxutKk~pxbcBGVK9=ctfMxm#$OUPxZ zu~b1Smw421B(bwBbXuU!gyt%D_QMofCVL*0CG{Or&JwllA?0iqfeAKC*3ThOH7w;Z zQW(hUF$KMphwy4ygO^z9bIJNwC?220j>5g;J?t5HYP^>%hGXLgnRbz*B4)!XtP3Hm zda|pX+>~m}uhha(Y&uCFrZ$zac&ySo6B?@r$)i;`SUo&}CbwP9q6wzdVvTAxEhUoj zCYUBz*$r?_$d+eEK-Gi+wehIh+e{JJF~)UhVO@s4m0fHIdXLVENR6QRPSYX6&Q~X8 zGj?J`+G#$xf5K~ogBD=BBp)NC?9b3gv^_FmFt>xxCS>_ju#(8McqF2;Ojf_{&}7pb zo)RC=0fu zPr)3Fq*jt+dclyIbUCs&+*>ouPkCS|n&agd0ZXM2&2dW~SS~R;m2FBl@o7}HD^XN- zP&rGI6R@LL`UOm1-@{ld%a&(h-dO{V)1+Uilr8h}9!*GANz^~cH=#H=MeT#(v%xY+ zl2hE_kTJA%FIEGkqwBaQlf1_*K&yx0NlsLPP*R(m;NZRux*Hy9kp&#XR+!Hi?j!fS|wTzoW z2m75Lnj0?vJFhg;roT;MHHK@fMv}7UQ~rPlkS_EH>`#`+w%Wgj29LqXz^FwyOO1Om%LjO z#e45cluLOI%H?R96D{!CR9EV3T=&>$YTapUUE1d~EjYB|%AdO&x~3+j@X0oYbmXnR zTt3|ds`7lF2>qrd((8h0c7J4Ft=5@Xm1m;>6xRj(!Nc2dDA>?lJ;Yaa@y9{&dp*Qv z9B|xvr!MxM6=L3~fxl7I@3CIgtoFX}%`{gUpRw=pF>?`HatH77_xO;h#T4ZK2%dYg z{Jd{J;#%{udG3YuJ_rx$+=uY)Xu~mk8LQ9dnxAvcKa%DnJv3L5W*2J4VXE`_%ycd3;^5o zc#;(V5u)gNTo7WBkXnOAO=J{qatVCd~49f6?LlVp`oR{MHbQO|igd*siA6Oe*krR6UU^TZENcM_btx9_Ml zVN6|7q3QPQis3>`U2(r~CFbNqL08jZz>LJaG#wYL6GqP?!@HK&2kpp9pr!vBJ%I5S z_sSVgxicABX2kougw1grOLVa3RcuAtJnRYfGBtH8HZ_e;M-JV%u%%TY&t9gXe->f- zEK=S@z87IvrvIDzlfx!llG6>O{0eOYJ|w6y(>M{o^RM-Bj63d}gM5aq?*J z4+M9T0hdP5Q>J$iPDJSXk=~Z#;nHr>&$u*cW8!0SxEFQY7V+tZ`ooWUj&}56$H|~6 zCY>bBzmVqM5KUJZbkxQT=1|ncsd$=Hr^07*610I^!yPc}B=-a9n)_fkF0*Td z0Y}jibc92S58N%_og1I|s*on2)N@<_CxX{|sD5pS`{%|p6*kOuWIQa|HH<8pdH@qz z>&|z=*|`q+hcvi4H+Jek94U0*0Q%3SB#Tw^Fd{^Q`=C3&os`6i%JL-7JwixJ=nI-q zH53bXl4m*GJ1@~+6~s>q^>Xy}iSR{p^i{!6K>nKGX?C6$Jk2y4wj@}$YRJl#w8Lu@ z38er%b^$0|g_=bwHN%@P1T#<|WxpJbBXT3}ltzf3UwX$ALc9>g`!ze-&PfH^h2SSX z!NlHmxC2+k7IZ^BD; z?mJJ1>UIQm2fq<2GwTCY8h!@-bT}buK{cy{Sqt1Lv&CW@45%om?D@qBdltl}m4&L* z1yx3*;7VK%|5`9ezU+dOg=usvUYIOjaY60E_{rfJT62Rd;P#-Yw3(hV~O8lgUf>g4N9%OIuO}rTli<~S6vKFPVA}C+vvdt4= zyn*K0bmF<+4(k_X%Cl5>eUa08&WH}GWf@gfLs6-n2R?E)1QzAU{bjH$cCwW)baBqK zE>sNLO|KtImm0A6Diuc+Vg1Tv1N|&5y%8o!QIQw$30hjw8-I2?d9f82J5vrXdKLV^ zYUjm`=ps6a7u7ImTO1deg4Qvw(2`o(1|5qn{(0z=&bS;_7Crq#z!`tmYni6*cdvMFuB}`CUnMK3@e|3A<`Ka%gX%7(HZL~ug37` zjC@ys6bVgarrl6co)1dOfDn26djB7KLPeUo4aXVT0TNpE)N;fN~*9>&VC-f>?foY~Q zW>VQl7$==kmh#K^Q=U%6B9~plmeUziUsge{_A2mhg)a*e+g?nx<12$-&j&%%vVt#E8B~VCBY&!jnH5G9TMP!t1~U#?;mLz9LCe8j%nqq< zyICxZd)w;&zcJwhOOi2GNq^J|?*jy16~dPy&rn(aFB22`6F46uwG`R~quo~Gjr{ZP zX3id>jaX(9HZ{ZmHyIdct1#2Qz-$th9|&Im%!C7uAD;_e|E#3**95PBM$-8{7&nEb ze>Kv1m&W_0X53uS!9)X#g@OKms9eEM5WN1q>!xNVE!25@e$Wvm^dA+x{(mUF;%$Q0 zeiZm6N&i*wx)A8!<#2NF`Xmq~Hb-jHz|%kz%YyZ>$KQU=$z1}Bg% z&EtGLj5z!XYz&Wq8;4U2xKy+z962j98+_eXj{gvJ9}8#v&xS*eXC_o)BKhA3ajV|* zxCBd%S=o26_4ugP&f{OpW?TnnjPlJWt1hk_HN#h0-Fo)F4;kR8lgaSa{zrS&FK!qx z{{g6da|#@O^C?{7GF!V&b}*BVHvqr1q3?M3^mMwaf9>{xdS-BRy{J~d7s2>5Zj-(i z@;jWDW1-`$Iq@;{LEVmPG(Yr}vCRsb&t#-EA=y)`|A|t+s}OxEH35WHp5{Yp%+Lyr zpq|Z4drnBPoA{yUUBB-qABnGqXtjynY3mQ0&t|Yz*mrgWenGg3*VEuVHv-x>he5-+ z;cPAJJ2!&;9j=~BWgdwBFqM4)-VaBx04%}lcd!qyKfzVJ8o}E>fiS delta 10934 zcmZu%33wD$w!XJIO{$V~(pkH+b-Fvw@E zFqprP^qWI*JJ#RvdF+KB51v@PXL|dUp)dI1JHCpqX`QY7Gr|mu%K14T zO1)%a%*s;W?nqm@jV&7Cyvb}ul)yB$2AU$1xvh0iWK%?HyDWNRsV-KfO9R{!CpMTz4$s^#yWcOhW>IdzAamiQ&xF%{NsB4;L+IlR3>9i z>!s<0H)3rgKbI}BsF&56sYhPZi~maM8+-AWAU@7EvLRLXYUyRQX6XFhUi=6d?Qk#t zI@}Xy3pL7UCwp0)UOiezFW%(S`R-nPisUc%;#*{VCP$YsVCp;~4n$hxN4ll2y_aT) zHhR-59>Z(VEcYf6>FLS)li<`nlxyr#on5^b9kQ2uusQH*k}XwsfhdDo?i6<_ix2*Z zKL0=;dk;1rEXg*ANscM>^nRp!nCoU!1>_tZl@ zG?=_mJO?E=yz}rHmsN%-!iz}M)o}w4?{vcG%(~hIo6o8Ch!r$ ziG*7OewT0(;ZUO>{!BzN5e)*rNjQaYoxraUwh>+-@b3uI^4e7^@Uw(zq<7T_yqz!& z>aH?@A19nnI3V!DglP>3bvXsGmIzv6x~u}PButB97ZZ31;Vi=4*Qf>N5Oxyo5O_La z7vXk+ClGcMZWDMEVOku!S_G~j>|xA{g8+$Oed+819EkO1x=jw|T@G$J8{Bv~_P*rc zR`X2w%H~mBLQtFr(RL?)0P^iQGd{_8GPXK5xUqCxE~X}<)2f)@*3#`hC+jT0a&`vs za_9TzW)$)ZhiLpR=pUE02@8UQy}tAl4`#Y?7+W3PH4~K!;<74{-)ip4X?@jR#QCex zk?P@(fjMokHP%Z7EIxqaB)GMtGX!JPhH*bUnYK20)s-7JHn+uX+CfpbR(h<#t;tVA zW_k`Uhst#SwA0zb^%pn>$#oRUzx_B|jIHK_y1hi&`;z_j9`+{P{xyzi@{c0>Q=J!n zf}`o>ya6m3nfy)Ym$9hepj%{g-U#&5jT?KhUN^pd2`jZzH@+&37HG@x@;z`aV*;NC zgEO6cI84vn%I}ABnQlG_G>3-|gZ_?8J{v|mp1=q8-yAP0A7w#Z)``kNS-2W(dRxH) z4K*tQM9})NHRS93@*;L9HOF*h_M?Oaw|vmK+F*APG1Pq40na!~Joh@BX#RI_aJ3q@ z$${d|!6+>JKDgyjaPya|rj8!!eGD!->yq#PUlDcWmiJbD5ZwBp6P-He!sD)a{CW7n zWmhg<0L@*(heCzBFe~o@iZ}M-sE*ro3^N?F`ApoVcW@{NxBSqV4%^*#EB`$YrtHmF ztu5Ie<%@K9J$rP)?de#W`r=n`F`|Zk_eZ1s_GA0qsvGA^lstsed%czV2Ly0|Q$g+ivwi4DegjQZ*kV4P zfpa8T1X)v#^f#E>{)@OwcQ#T$5oMJxSf9L2uXcsrkHL-RQ0tvJd(3qfI_@){`9+BTK2XNpyJO%v9YUj!fc+SI30LPQQET6wQ7jm#WCj8gJQ>s zqf_Q+?A?sLnqmJcmx6^Z$(k6OGGh~FY`SDLc3tX>?Uu2rzFcvc(pSPdkXkoG+b{YMQl(G(&80BOMCQWMOdQ_RXx(@{=CGviJjG7`M z9~NYv{em=qh6`Ingn9AuRp?+o6AAfMZk`I`s%$YMF;qn0O$-$|0#;UKr@e_<^VMgO zu&NK?yfI&+v|as6B)nc#I^2#$HAisbg4#0d0kc_v8m&KOY3?JyH0>mg8?(xnVD)OX zLHH;)NAVR17HQ`rz+dgn8;tpyQ+O4|snZT%Wz9A|3Bki!DPGNXejlngYA?pX`f7WH zg9QC~J(`}@yafCLG-_9<2mt|_wE<))6re@ZsPIL+0DbmqyWxv!SI$-3^w`afR4Vld znVTrdR1Nnc=HF5IRJB_HM=+R{j-3#(;3>3Xi+zQgBW&q4SVQGLs0-RG524wlMr2^k zBG*$MLI}X4>u8Bx()cfw|bVAF-tBc;1Gx@TFU*{_l?>H>`rw#r?Jwk z4b-MpQ+5yX`55i67NS;pNR+f)TS67uz#C%lm+?t$Fcslpz5{g~+PCDinXklLJGE*m z&m(+2f^O{z45~iLk0H2hErj|}={yL37?l^di%L+>+tA;~t@GeCT1;?lRQlkBSSxiq zk3`QJZvBEngFw@`^&%DMmjcyttCxcRN}xsD>VxsOQH*7`xeFFj8I~nsiR10+-P8xF z=#Lfcm}KhX)G+r@2UwYME9|>1#3#V5qqCHY9PS)FoG*o@(RutCIEvQ|aB;LhwSii1 zH+k6Ad@B0hIT*;PMnmZs8+XIxG1*CHs5txUkq@#yDruE$cw)>D<)a8VJSI!o$DwOX zHqQae*t_RMQu|#fKo_ezol5(^l(wsLDab`iJJs89K&h7|VA`h^Qu;p@Ob66=$@em) zOVnQx{}ZLlR6mYL^$PW4P`$?C)Yufu3JQIV!quuXfseB-UP;}^I8ke~P1yV@=j2|e z{gYZn5i{s9?Lo4bgrz~dj&)JZ!slu2d6M-Jpi#SuOO6^TK(qD~g;oV<(fYxLajCfB zd2U=z@efprSYhbU63E{wK&N(s+BIH)ZtYR3SAqbStpT_+?iY8oQuWdVrD`Loe(8K4 zme#86p)zF%VAoDj4`+&^JGDzRfE@fZ%6!^ultGrT1hj9+;^aOoPKlNU4K@C}B(mr7 z9VlknWGb3Z<~E92Adg3539QVv7CLK&I@}}}z*k_LKI(mVP2PZ)MCx;9yBo^KXYlnf zb9{i`0#A)EJtTbTNDxQK_+Hfy8 zVW4tWg}oDQP5l6KacVxAVCsL-S<9oP1eX-BOtf(T@5DUidKA=5>|gr^N~5e)A}5w{ zBYM~dU~KgzKA5J1FFGrl<~vP?Xs1}6)W@+Cqg_WaqFvpF*Jw8_z)q&zL881<7$e>p zlR8NF!KsP9(A$_vj4K6+c%Q`;+ueG zWdb(8mQX@0)lu^oV`hES=g<~81Z}=pR~M#5abw;3?EOfC&6E1`A+T$bJMA_U#THU) z_|yl`H@1k<0rg}gbWcj*b0BhZc3cZmu_f~`gIcDvn_=+eY~^JQW=sUB7C4+{~)hT)O#xHkjK}hJIredu`i6?-U zY~d!jW`vBnYw^;FKh3j+`WaH6Ldw&C7hNs5B#xN5+whWtKiqZov!ot{7Ed?wx>|5` zq+Yv&lztritBkcMdx$Z_;&Jd}=eiw%xM%e!*GR>VP6+S-Mbb(K1EKi}%JRn-4 z0R~U=q>BzYAleCcA$e19aPm%JZ-h05y)$g5)rXr!C$zvG!)_K`ME=i;0V3>eaA}$+ zO~iK__I{-}xp`lzLOa}YyH~lc!HnB$6zuy~Z?~0-Hm)G+KpdFBAOX6kXE?-U8%-KzTd+V(H;HNj{&eU&BaNSg5i^{j(dl}wvoWi{`w~#OKE+>0 zq7GNX6+PF({G(w`p)%4D=p$d*lD8EHobWztco$lvSyXDkGgRvLc^|5lH3Pnx;dO}_ z`@R@6C$T2Ch%SF$44GOSPQ_0_;0|9%^z8|uT8P#2zaqm@=iZ0G+-*2!&tUe&Liv?Y z{+5*Q^iZxNWj895u&Im1%d=lkh_l zru7nf56hyURvfKir+RXteTe^WQbW<#qsrScLo)Qrk&e!=*OTN`OkO|s@S>gzEq$%$8c}ijlguXIHH25o);%CFkVsaH!ux|5<58H1fq! za!5MuKt^=!Qp3cGc45=<3*rInfVHzcN}dB6XQhM!vc29FDBPlA7iN4&%FjsI-An0x1UhPyhKQMXgj5}>icL>DHvgheF_wn)K;ZXEzED-;43{6lcd_LQw#Nqpw*=~9b7uGb4{;50a+a4ZyW$MMU}0zZV<9l#pS z#fH|`$F*Y7%*V#|`?GJZqIZQ$CybTjnXp%?@R~zj4@;3g*YS0~_pD?F=-%w(u$x1; zZ6>!U_s^NF!=!hTB>GA(GxoDy#DMT>Ca(v2dAVz`GXAh~2r9Adj*|3QI5jU-AGni6 z0FIsHf4x^718+I?{OHZsJs1dwlo+^2p>TdmZkM#CqBWq%dU+7M+C!S_g5~p5_(<41 z-<|We6b&Vh<2`@{Lm9!(!twcTrN|9G&QF|vFOC#CZ~*_Mq8PWy3ddr&kds7m zMdd{-utOSY34LBSYNib`91HA$nG4cF4Pksj%BwNfmogU3(JskPME-*0X?DiTwxXG4 z$C`xm)=Z1!oMVL}q=*U_sFi@4t5o;MqG}YbmWEkqV0m*QaYQ~MI;9O#7FOPJOq!RV zd6({o+tf*{cs@MTfE}Y0G!@m~#OP6{uDjwm7?O~&MY|z-=w(hX) z*0mU?4#qB;$S=b07Wsb>E!WU+9H%iD*`R;-j#Cr1-Fgh;IK2?Jcml77Ig9;{-}eqw z7ZwKwd*SuPDXvF)OPa%yVGdl0Tj1Q{fr=*w97|kuD_@eSfm)yz0_-eNlITsDdi%c&L?5h(p=@53a>8p*q$(BfC-9%Qq5%iV`2Dk=v>-Q z`G7<6T^?QnL+Vb8PIW8a>#=*=}hX!t2SZ~ zIuj-YeR!j@stKdfndqv%guZkpJ!h(#i;rwNllxU~z!suYjZmr&L@`EZ(#eSGSy)6m zlM;DV8it}X(OvyL2BR}+r>QD2vJg20mg1^0D5cYKOLZ9*kItm)rs@M&e>#)K@Tymk zdj(!x<_oDR=7(oYLDeZ7backqs#4HSr)6OEr)Z=zsm)Y91Fdu>y~wL~W0~nxZ}93% zSZz8}Qu|aTVit5JOsS%`IdsM)SD(S3@^q@nO4SS4?sO)ssEW+NB1htxFtvIY2B1?t z%d4uf73fS{Syh9@sA*lf{5v!Lgl?@})gA$J57}D(^uSQX?O(NQg@4836$NuvFTQJD z!IFV@4=!2|Sm2*HesuX=a~I8D_CV{o2lty=KL}lpNU0t>**Uzhu-EqjsCr;r>w!%< z0eJH8Ly-L8WSIZqFCpTs_xKt({Z_ImiZ$Zh#u<#SNHm)m{>cc+cPE=HtWDfwSr9)+ zB&TED3b4Sv4imoNdK#K`C-ZoCVYkhcz?u=+@GA~-sY$GEn6sA$z5!>+a04|dFswY; zWMVe(@3CR-DNwm*=k~o0o&Z^UZ9E#r?X`uXS())Hm4V9fuCQp<(2JjnJZC@u3yMqq z0>Q^K;}1`gX7!k$IK~GugMUWov09~095n`u_r1WeK<{IU+>B2wTcqMk$s6C1aKee> zyyT7VMh4#p%SUmH??eXglDzR*$lyzKKE&E__e2Mm6`Uj;jDJxTNPdCjjSpH^bT_u% z;N{LgPU^Qw-uT#NWK5f9DyY8e4>4wJ``e^}IrYdmLd5a8B=2IMOx@AQ(AvB&irqc?1#sU=T6OviHM zIZI*A3a7vgSP(ks;|1}x0{IDl}xX#x|{!Mfsz46C3 zqh?1WZ!GV%Qva3C!?<_yT$e;Tj$IB&qr636t8x5WUo-HsMEKJ?NrS9{NnvH>&YPr} zf=^^EG?057Il3O=_Stwcfu%4^-p-1#NAS+(dsqbREo1 zrI+I3&xE+Y?sObw5-2AR? NG2zoDd|)n#{|EPc0wDkZ diff --git a/Linux_x86_64/lib/python3.4/site-packages/sqlalchemy/cutils.cpython-34m.so b/Linux_x86_64/lib/python3.4/site-packages/sqlalchemy/cutils.cpython-34m.so index d64be983cc4b9d34ebf61b0d5cd1aeca3596bcc1..34d293950ce5487e65cb082b546ab4f095d8ca18 100755 GIT binary patch delta 4459 zcmZWs3s{s@8a`*P|IaYY0KzaJ%moH!xDA7hfW`u;H0mlBLGyxKDlVpphCb=qf{`W} ziG5`~W@&BpXl0pHYd)1z+C^)3p=X~i?zUbY+lAfQFtyw=XTR^C|5u{(JpVc8eZTK~ z=R4oI{O5dlj0fN2{RtVPB9l^CxM0DBas`VrR>bs}r!mL&Qiqsk4vW&Zw;zdaGljvm z5{mim372;!6r1<&`)KX@!}IlX|MIQ>@QzgYns4B2pk1|}TP>{7VGlQy^A66KnI(W% zW4(#Vz9o^FOBF#@>V`LT zUh74<>7f0WQU+Y_^$p5Wx8s-6QQ2nEIKF70Nk5x&1MC+p@OL57*eILTGscYYC9;uX zWLKc)Yay^z`=@w23v3E@YKK2UaiHgza1={SA^%wvgkZI820=h<-`oAYmQhZV5k6n7kh8knq!l zDRd)^65dIerfH-~!VeLqsEm|J_`5heYv~WUB9L$C5A>C^TX3*_S}LOJ0-no(-md~Z zm!sCkVIbay%7`@4BWd`Z7T6GBpdFSS|$X3=-w;2z;vChf!+o4*c!jGXS-eua!5q3JVi zdJ*&C24v7`p!Yq~y|d9xNj5wa(u|Mr0XS@2;(zX^>l!zP-Mxw}(cR(2DB2WUGd0k& zf^pNm_ahx{?}XV=1uElDuraDW{_P(_pCS|ayegw7<&aAs)~dTu7PfQKveb_ zgCTpRRoPd{KB7=iDuOdrvDjt;G&&|^7kKRc-27ahJwMOq_xbXDEE{!bl`|IJ#Y+1j zH8k<~q~&pp(IQ7NFUG|f*?=oN8o$v97;@Ned>j|FnhUR@3AIYk4OnROdn8i#x}S$HkBzek*bd%5H?~y4PJAlC%Ld6p^axp z(ts*S_jc~ZMYcyQfbU)Dd+&0$-%hQ^ksq zn~}&pFf$`5?HjVPlg~jfy1AL-G7xr2&TQdKCluodR4&+(k)lf0!QPA;co}?^F_*VN zfxDFVL#KN#Ujj$n3yrtam|myxB?vRY<4NTs7)i#qUZE)^8U62H#{pmUIfED@1Ujg5tx} zr9lE8ieJH=tTKKC&SWL><8UQwR(&#An#kWo-30MAjHr+-&9PlvPyMCvGbnM3k)&pm zBzdBNBzEq`qDb_S#3|V?6EEOS5mF^<0cJ76AF>@2dr849DF|W+SFYgU2IS1latTgn zPjD|HgX8vMKfxIngeP$(#jIxR*etjp z*_+Id!Bj8J*8ScjR}Y5HZE$4a4_kN%H4V-|j4TtrMCS~tc}SauC*XuPHIFn6>HkHW z`u9+!&EP#ujoj$BA#XBPnNUnBzFjyVw{RYk^Ss7Z^vN)JH5#iD7Qn*165a%l=1o%J z!SZolnz;rI@dWa=7~3b$e#n<(>qBLpMBFCiQZ`?MS23r{7Q>3kFp`KIt*xZrcgtS65!k#H@R&SJ>PPvwZ_M!h!q* z+z-d|on4QO(IErOd6Uc}62i0#?@cKl95Y``?$U{qHeM&?MfCc*+`eSIRaX`UkkX|I zjD3z4(P8xkUaNQTT2Vb11*;~dbj_4?7#`HFWil`zxFR*E<$27tL8jx8cEl)Ak*xej z?v%Nh-WbEvqd?HJ0%+?hJl{*3AKknru_O69wMM&;Mo@E_eE(W@QiI zp+}#oAp2QdTJ)I;bC#iyKJznbJSwn`pE;<`eHF#84p<6x8r)n1wyE(tr?+!Ohqt4> zBd2*y`|?|JTJOxi)3>a((>t&Fx8=)kX}fL3x`Bf7w>0MFH7zZ-t!}@gWhJ|#xnbpM z=&dM@ziDoreVWfV?j`|KD=G)xt$0C?cO4s4=``?pWsoy?r7BJx!R%OC_-P46vQa80 z{rdGmkyf)Prd(zSD@n~vtPL+SacVQ`r;vIP4OQ;g`}p%sIZQl;@I6v-Mb^UX**G!f%%JquKB{ zubxF-!_J2!J$PcCp>RJ|B|l|P4&s`i^d^3sS=b)ConZK#gXIVKArGhKc^scgbu+We zPfvtWI)^!D7!!^ti2@zYZ7K@%bfS zU2xKjOS54{prN9se9nx9!1S7d(FHp=teE?IUB~jy+}4#aJojdpQMnD*XvV;Y3r};S zAUR9RjooT+A7qSzH)x$3vWwQU<}Z#A`e)vr-A$Rk2XC$eef|>XA9zxl&S= z&!`B^OVS;ENRAaNZ*Qd(VPv5#5oE3Wu#Ibb0nhuhOVX{6%VHe9l$0R1fuladx^JAc zU!H<~XsXYEph^P|)Zc(NpyTzW{38&TmhwTEiTORyi}@FDe5s9hz&^~+g4j^XhhZk> z-#~9espfmM4d*Lk;Z%c1qs4(pz}D!|L@S^HflUFs8$CP=jyINSCM%S!$)hP(Ktoe` t#EP54qp0N27>I)-O^&#ZxnZgbPeDql_!YvXrmToyokXzxs<84R@&88biC_Q# delta 4324 zcmZWs3v`sl6`t8g_W!dOHuF<^iaiP5Z|>5o>~!W1&cK~M_Ux1h;G08@4pb!Ip?35@4I*I z+`04kX9ix9uf8aEr5E^Pg_dd~h1fP%ub`+ERl} zZsWMap-Gg8f~X~F@dXlfsn_{asb9BKHG*H?-)fmEWh@;~?DVJ-7vG|rwZLo~eE{|! zO5CQ7VXPiJ$c0!xj8E2>NEYh;R0-`g|31wjLR$ycnFn`*4|Tt&9E-@q!6zXKyAL6^ z<<_Fccqt2qPKS})`x#5d$HU=GARc7Gz{G&2a35nM-fJb#R%*Is7J-GYu0;-2cylVP;GMs!+- z7DBV#mOByrPV;Li^g--G*-8_vMV_yJU=M>E5x>y(S3=v54Za8lGt~liuEu^i_ypZ) z%}wfJ)vW`zt%qSh5FTu$Bi150mOimg$u4Hu_G6))wMyul&HFSLM9jMDL)$+b^ih4B zSN?&15jU~0>mnO}o~P5c^-<)5)4{-MX!}dHEmPs9CL0__XXAFuhiGH`{NQiCzpQc9 zkEr(qe?&{TJ2(}htplCoL)|Ndv~Ae|I?_IwtO*r{A1+W`!h*De7sBEHi_V|uP{M%R zN1e9u=9cfm;nV$3*}jZd^xZK)pJ!JlM|W>L>~b>M=!%?!vZ{Y5$0y0}sCE5Y-``*B zWsH9Ir0Jz6mNq)-)#z5Or4x?n8PTm7-HIFyPV4E+^ik9yQ>ew6QZlB{5iBb&3pmRC zfndO2UL{JP!|Ps%$X1WGKVhU+nU=L86&^;!*vHtk6R_z23dj6bI^lF&wH|BSD3wDn zVKi7IMrt%!Zqdk?r!~@C0d14XylmAvVN}fhXm7Zm6}p$fu3;yQaW%_c`jyKU{1h^? zkoST2tJAQX%u)kY>RC8vHfW$$Eyvi*CJlttWctE2O=ePUUdEf$Ep z@+LUZt-cA$JX+okuuaY3Zrt*h;C87?xSKNB18|SJf`Ly1ed<~UehutXH!=ul;BmE! zLAfkNo5SkA7*xnMA?RdYe*9MMcJFd@B$VBx=C{QkM0ZBTvKC`D?4b4eImv$mW0`?{ zpjawZE*8?h{EpyvXsR^I0kn0flQ0$?c-2<6)+D5-o`vtq?eZ3gzJ zw867TK1T0(W*W{ZR928FW9VlES^2NCmHXs0c(GX~wqqG6do*XZNfD!r!w3up+E$Qb zxL~Gz1rua8eOyp4CsBpBMy{lF-g-HXj(O+Chj>g+^Z3#g4;2>X$t^Uk&>r8+Zanug z9IsX0ryC1N&L?xxYS~SlMdRh~=!qh``%50yg`1%nQXE|WJ|6W3EO z^;#Ztjywa2R~=%tTutIv2f25L^g^LZO<>^C?ANO2u~U>h&00v<*U&>H&e83x;MEiq zHH?+36iO>N{LJ5`((V7@1hW@mpk%F3wJK;4&x!UIE(10MQ>xq z73F@+q*~mDjuVv~lr>JP4JgR^Kz+$P2%%{;{YEC5=@AdoUoc@+M{beeo0FlXdJG7;8`} zXs*9nUP-_8k1^n2dBdNd=z}4dZh`;f&_3IHS0F2QIh17A??9kD<=kHmh;%%It(U%q)yJG@cOdc zv1g=J_YDXjr>|0kI0=hz*mz#fvN+`|)tG3b8^`2q+OF#e9MtapISUsH6tOQ!?E zJ&DFujBf7Kh3ulcR|i)E9J|z~a6dE5!l}y^-qGnQsDE=w9pm6$!}M51<|I$lRd!q{Acw2^P@{wp_N zx$~DWrQ`$-J^pMJCAVQ|@n@?ny#_-55+63=sKAuqOB^tk^+Md+Z?B9oVRQB8j!%o} zpHp|lG{v`WWrwe0Sx0GG=du-xOP7ybJ+8dn-|m~)`186I*DYPLa)W5=Y;Rw(X4$It z)nZlKqSb4}HT6x7s{?@%w+LE0c{;f#SM|Rz`4J1PnsY0CHK&Ot&Apo%VobEFen9%E zak^bLAWzM*is)-fTPAvDDsB z!nvtNqe!9E4R#}fM|;7h(E~ts(L!e&qaaPkO(vDzXn2_V945N6F>_P0_%_=yymOHO zWWaN1q-SM1-+=>gc&gL+ff4+*;H5y{P6ujSkEC|34)3|&2|CF|4?gZV`V@s&9+eFLorxv(x zG#;TrXh%`p%#=zayk~2hb!$-2QliFV^W->mCQ@WX^l9}>I|6>k%te@rPiFc|{4}M^ zS?i2=i&F2b9^654o9xIdn>Ju&JkeB5?>Bwc|Nd-}l$Utblg+1Pd=+eQ;(^7|XY=yZ z=*8Jj8S^WX8gRIBik7FTb-ph)I(`0FIyK+pOvk*N0DcdY^{+6RENBr vx{z1X(bj9#74@2jVk$Y?DpL requirements.txt echo setuptools >> requirements.txt diff --git a/Shared/lib/python3.4/site-packages/PyPDF2-1.23.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/PyPDF2-1.23.egg-info/installed-files.txt index d3945c1..fc2c42a 100644 --- a/Shared/lib/python3.4/site-packages/PyPDF2-1.23.egg-info/installed-files.txt +++ b/Shared/lib/python3.4/site-packages/PyPDF2-1.23.egg-info/installed-files.txt @@ -18,6 +18,6 @@ ../PyPDF2/__pycache__/__init__.cpython-34.pyc ./ top_level.txt -dependency_links.txt PKG-INFO +dependency_links.txt SOURCES.txt diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/PKG-INFO new file mode 100644 index 0000000..8c9058e --- /dev/null +++ b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/PKG-INFO @@ -0,0 +1,11 @@ +Metadata-Version: 1.0 +Name: PySocks +Version: 1.5.6 +Summary: A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information. +Home-page: https://github.com/Anorov/PySocks +Author: Anorov +Author-email: anorov.vorona@gmail.com +License: BSD +Description: UNKNOWN +Keywords: socks,proxy +Platform: UNKNOWN diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/SOURCES.txt new file mode 100644 index 0000000..db9da17 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/SOURCES.txt @@ -0,0 +1,6 @@ +socks.py +sockshandler.py +PySocks.egg-info/PKG-INFO +PySocks.egg-info/SOURCES.txt +PySocks.egg-info/dependency_links.txt +PySocks.egg-info/top_level.txt \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/dependency_links.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/dependency_links.txt rename to Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/dependency_links.txt diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/installed-files.txt new file mode 100644 index 0000000..9512838 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/installed-files.txt @@ -0,0 +1,9 @@ +../socks.py +../sockshandler.py +../__pycache__/socks.cpython-34.pyc +../__pycache__/sockshandler.cpython-34.pyc +./ +top_level.txt +dependency_links.txt +PKG-INFO +SOURCES.txt diff --git a/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/top_level.txt b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/top_level.txt new file mode 100644 index 0000000..9476163 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/PySocks-1.5.6.egg-info/top_level.txt @@ -0,0 +1,2 @@ +socks +sockshandler diff --git a/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..e601d13 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/DESCRIPTION.rst @@ -0,0 +1,48 @@ +Certifi: Python SSL Certificates +================================ + +`Certifi`_ is a carefully curated collection of Root Certificates for +validating the trustworthiness of SSL certificates while verifying the identity +of TLS hosts. It has been extracted from the `Requests`_ project. + +Installation +------------ + +``certifi`` is available on PyPI. Simply install it with ``pip``:: + + $ pip install certifi + +Usage +----- + +To reference the installed CA Bundle, you can use the built-in function:: + + >>> import certifi + + >>> certifi.where() + '/usr/local/lib/python2.7/site-packages/certifi/cacert.pem' + +Enjoy! + +1024-bit Root Certificates +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Browsers and certificate authorities have concluded that 1024-bit keys are +unacceptably weak for certificates, particularly root certificates. For this +reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its +bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key) +certifiate from the same CA. Because Mozilla removed these certificates from +its bundle, ``certifi`` removed them as well. + +Unfortunately, old versions of OpenSSL (less than 1.0.2) sometimes fail to +validate certificate chains that use the strong roots. For this reason, if you +fail to validate a certificate using the ``certifi.where()`` mechanism, you can +intentionally re-add the 1024-bit roots back into your bundle by calling +``certifi.old_where()`` instead. This is not recommended in production: if at +all possible you should upgrade to a newer OpenSSL. However, if you have no +other option, this may work for you. + +.. _`Certifi`: http://certifi.io/en/latest/ +.. _`Requests`: http://docs.python-requests.org/en/latest/ + + diff --git a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/METADATA b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/METADATA similarity index 57% rename from Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/METADATA rename to Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/METADATA index 412b853..99d106b 100644 --- a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/METADATA +++ b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.0 Name: certifi -Version: 2015.9.6.2 +Version: 2015.11.20 Summary: Python package for providing Mozilla's CA Bundle. Home-page: http://certifi.io/ Author: Kenneth Reitz @@ -30,7 +30,7 @@ of TLS hosts. It has been extracted from the `Requests`_ project. Installation ------------ -`certifi` is available on PyPI. Simply install it with `pip`:: +``certifi`` is available on PyPI. Simply install it with ``pip``:: $ pip install certifi @@ -46,6 +46,24 @@ To reference the installed CA Bundle, you can use the built-in function:: Enjoy! +1024-bit Root Certificates +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Browsers and certificate authorities have concluded that 1024-bit keys are +unacceptably weak for certificates, particularly root certificates. For this +reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its +bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key) +certifiate from the same CA. Because Mozilla removed these certificates from +its bundle, ``certifi`` removed them as well. + +Unfortunately, old versions of OpenSSL (less than 1.0.2) sometimes fail to +validate certificate chains that use the strong roots. For this reason, if you +fail to validate a certificate using the ``certifi.where()`` mechanism, you can +intentionally re-add the 1024-bit roots back into your bundle by calling +``certifi.old_where()`` instead. This is not recommended in production: if at +all possible you should upgrade to a newer OpenSSL. However, if you have no +other option, this may work for you. + .. _`Certifi`: http://certifi.io/en/latest/ .. _`Requests`: http://docs.python-requests.org/en/latest/ diff --git a/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/RECORD b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/RECORD new file mode 100644 index 0000000..80e48f3 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/RECORD @@ -0,0 +1,15 @@ +certifi/__init__.py,sha256=hG3J5tdsVc9gHdErxXJQnes5-EQI_102Y5UXk0G-qkk,63 +certifi/__main__.py,sha256=FiOYt1Fltst7wk9DRa6GCoBr8qBUxlNQu_MKJf04E6s,41 +certifi/cacert.pem,sha256=j_IWL25eEgetcl_WsBPVc3qYwpOleezC6wo_zWb98V0,315580 +certifi/core.py,sha256=DqvIINYNNXsp3Srlk_NRaiizaww8po3l8t8ksz-Xt6Q,716 +certifi/old_root.pem,sha256=Sm1SGy9Y3FjEDEy9ie0EX39fcJCv_r6gAPtj9yBrXEY,24014 +certifi/weak.pem,sha256=spA74ndnORVAEKwL68MswT1BBXwtOHd9ht2vIKRF0oE,339594 +certifi-2015.11.20.dist-info/DESCRIPTION.rst,sha256=u4KmW8nf84KSFVrJue_kb-ArB1h3uUQT4H6CV_oOeUI,1706 +certifi-2015.11.20.dist-info/METADATA,sha256=z-iWa7SyyBzqckDPWM7zx6oVo6C3EYMn3xKBp7Mgzgg,2522 +certifi-2015.11.20.dist-info/RECORD,, +certifi-2015.11.20.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110 +certifi-2015.11.20.dist-info/metadata.json,sha256=wO51GWDU74nDlRgLGR8kNuvtzKreMz0K4Fp3E4fhUys,911 +certifi-2015.11.20.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 +certifi/__pycache__/core.cpython-34.pyc,, +certifi/__pycache__/__init__.cpython-34.pyc,, +certifi/__pycache__/__main__.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/WHEEL b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/WHEEL similarity index 70% rename from Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/WHEEL rename to Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/WHEEL index 9dff69d..0de529b 100644 --- a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/WHEEL +++ b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/WHEEL @@ -1,5 +1,5 @@ Wheel-Version: 1.0 -Generator: bdist_wheel (0.24.0) +Generator: bdist_wheel (0.26.0) Root-Is-Purelib: true Tag: py2-none-any Tag: py3-none-any diff --git a/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/metadata.json new file mode 100644 index 0000000..6ceba05 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/metadata.json @@ -0,0 +1 @@ +{"generator": "bdist_wheel (0.26.0)", "summary": "Python package for providing Mozilla's CA Bundle.", "classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.0", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4"], "extensions": {"python.details": {"project_urls": {"Home": "http://certifi.io/"}, "contacts": [{"email": "me@kennethreitz.com", "name": "Kenneth Reitz", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "license": "ISC", "metadata_version": "2.0", "name": "certifi", "version": "2015.11.20"} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/top_level.txt b/Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/top_level.txt rename to Shared/lib/python3.4/site-packages/certifi-2015.11.20.dist-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/DESCRIPTION.rst b/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/DESCRIPTION.rst deleted file mode 100644 index 5cb5f6d..0000000 --- a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,30 +0,0 @@ -Certifi: Python SSL Certificates -================================ - -`Certifi`_ is a carefully curated collection of Root Certificates for -validating the trustworthiness of SSL certificates while verifying the identity -of TLS hosts. It has been extracted from the `Requests`_ project. - -Installation ------------- - -`certifi` is available on PyPI. Simply install it with `pip`:: - - $ pip install certifi - -Usage ------ - -To reference the installed CA Bundle, you can use the built-in function:: - - >>> import certifi - - >>> certifi.where() - '/usr/local/lib/python2.7/site-packages/certifi/cacert.pem' - -Enjoy! - -.. _`Certifi`: http://certifi.io/en/latest/ -.. _`Requests`: http://docs.python-requests.org/en/latest/ - - diff --git a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/RECORD b/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/RECORD deleted file mode 100644 index d5363bc..0000000 --- a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/RECORD +++ /dev/null @@ -1,15 +0,0 @@ -certifi/__init__.py,sha256=T8LOdkem2W_EqteuCirstbPu3iS11BmKnS_nKqQI_kQ,65 -certifi/__main__.py,sha256=FiOYt1Fltst7wk9DRa6GCoBr8qBUxlNQu_MKJf04E6s,41 -certifi/cacert.pem,sha256=wY10ezo0r5ZPcgfctoi3Q9KRZ79_tpb_MPDGsgWiOwE,320698 -certifi/core.py,sha256=DqvIINYNNXsp3Srlk_NRaiizaww8po3l8t8ksz-Xt6Q,716 -certifi/old_root.pem,sha256=Sm1SGy9Y3FjEDEy9ie0EX39fcJCv_r6gAPtj9yBrXEY,24014 -certifi/weak.pem,sha256=5xzWFRrSP0ZsXiW6emg8UQ_w497lT4qWCv32OO8R1ME,344712 -certifi-2015.9.6.2.dist-info/DESCRIPTION.rst,sha256=1HthO7cC8rfi_tZB3iPCnK7Npcd48svSApnFrl8J89Q,716 -certifi-2015.9.6.2.dist-info/METADATA,sha256=-IMJn5G46t_YY0VsjSgXQalm6mC4sChB8lsDanFlTME,1532 -certifi-2015.9.6.2.dist-info/metadata.json,sha256=LNvgTP4aFSgWMQ-8ySDRnRE7506kiisjTkPqBHna1YE,911 -certifi-2015.9.6.2.dist-info/RECORD,, -certifi-2015.9.6.2.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8 -certifi-2015.9.6.2.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 -certifi/__pycache__/__init__.cpython-34.pyc,, -certifi/__pycache__/__main__.cpython-34.pyc,, -certifi/__pycache__/core.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/metadata.json b/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/metadata.json deleted file mode 100644 index 0d756ff..0000000 --- a/Shared/lib/python3.4/site-packages/certifi-2015.9.6.2.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"license": "ISC", "name": "certifi", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "Python package for providing Mozilla's CA Bundle.", "version": "2015.9.6.2", "extensions": {"python.details": {"project_urls": {"Home": "http://certifi.io/"}, "document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "me@kennethreitz.com", "name": "Kenneth Reitz"}]}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.0", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4"]} \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/certifi/__init__.py b/Shared/lib/python3.4/site-packages/certifi/__init__.py index 3bb2e55..2c3ceb0 100644 --- a/Shared/lib/python3.4/site-packages/certifi/__init__.py +++ b/Shared/lib/python3.4/site-packages/certifi/__init__.py @@ -1,3 +1,3 @@ from .core import where, old_where -__version__ = "2015.09.06.2" +__version__ = "2015.11.20" diff --git a/Shared/lib/python3.4/site-packages/certifi/cacert.pem b/Shared/lib/python3.4/site-packages/certifi/cacert.pem index dfa23ce..672fb1f 100644 --- a/Shared/lib/python3.4/site-packages/certifi/cacert.pem +++ b/Shared/lib/python3.4/site-packages/certifi/cacert.pem @@ -1,31 +1,4 @@ -# Issuer: O=Equifax OU=Equifax Secure Certificate Authority -# Subject: O=Equifax OU=Equifax Secure Certificate Authority -# Label: "Equifax Secure CA" -# Serial: 903804111 -# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4 -# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a -# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78 ------BEGIN CERTIFICATE----- -MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV -UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy -dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1 -MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx -dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B -AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f -BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A -cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC -AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ -MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm -aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw -ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj -IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF -MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA -A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y -7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh -1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4 ------END CERTIFICATE----- - # Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA # Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA # Label: "GlobalSign Root CA" @@ -117,38 +90,6 @@ F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== -----END CERTIFICATE----- -# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Label: "Verisign Class 4 Public Primary Certification Authority - G3" -# Serial: 314531972711909413743075096039378935511 -# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df -# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d -# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06 ------BEGIN CERTIFICATE----- -MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw -CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl -cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu -LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT -aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD -VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT -aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ -bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu -IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg -LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1 -GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ -+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd -U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm -NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY -ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ -ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1 -CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq -g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm -fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c -2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/ -bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== ------END CERTIFICATE----- - # Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Label: "Entrust.net Premium 2048 Secure Server CA" @@ -910,40 +851,6 @@ u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3ynGQI0DvDKcWy iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== -----END CERTIFICATE----- -# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com -# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com -# Label: "UTN DATACorp SGC Root CA" -# Serial: 91374294542884689855167577680241077609 -# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06 -# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4 -# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48 ------BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug -Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI ------END CERTIFICATE----- - # Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com # Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com # Label: "UTN USERFirst Hardware Root CA" @@ -1507,39 +1414,6 @@ rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2 9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis= -----END CERTIFICATE----- -# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005 -# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005 -# Label: "TURKTRUST Certificate Services Provider Root 2" -# Serial: 1 -# MD5 Fingerprint: 37:a5:6e:d4:b1:25:84:97:b7:fd:56:15:7a:f9:a2:00 -# SHA1 Fingerprint: b4:35:d4:e1:11:9d:1c:66:90:a7:49:eb:b3:94:bd:63:7b:a7:82:b7 -# SHA256 Fingerprint: c4:70:cf:54:7e:23:02:b9:77:fb:29:dd:71:a8:9a:7b:6c:1f:60:77:7b:03:29:f5:60:17:f3:28:bf:4f:6b:e6 ------BEGIN CERTIFICATE----- -MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOc -UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx -c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xS -S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg -SGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcNMDUxMTA3MTAwNzU3 -WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVrdHJv -bmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJU -UjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSw -bGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWe -LiAoYykgS2FzxLFtIDIwMDUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqeLCDe2JAOCtFp0if7qnef -J1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKIx+XlZEdh -R3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJ -Qv2gQrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGX -JHpsmxcPbe9TmJEr5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1p -zpwACPI2/z7woQ8arBT9pmAPAgMBAAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58S -Fq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8GA1UdEwEB/wQFMAMBAf8wDQYJ -KoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/nttRbj2hWyfIvwq -ECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 -Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFz -gw2lGh1uEpJ+hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotH -uFEJjOp9zYhys2AzsfAKRO8P9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LS -y3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5UrbnBEI= ------END CERTIFICATE----- - # Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG # Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG # Label: "SwissSign Gold CA - G2" @@ -3362,37 +3236,6 @@ ducTZnV+ZfsBn5OHiJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmC IoaZM3Fa6hlXPZHNqcCjbgcTpsnt+GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= -----END CERTIFICATE----- -# Issuer: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03 -# Subject: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03 -# Label: "A-Trust-nQual-03" -# Serial: 93214 -# MD5 Fingerprint: 49:63:ae:27:f4:d5:95:3d:d8:db:24:86:b8:9c:07:53 -# SHA1 Fingerprint: d3:c0:63:f2:19:ed:07:3e:34:ad:5d:75:0b:32:76:29:ff:d5:9a:f2 -# SHA256 Fingerprint: 79:3c:bf:45:59:b9:fd:e3:8a:b2:2d:f1:68:69:f6:98:81:ae:14:c4:b0:13:9a:c7:88:a7:8a:1a:fc:ca:02:fb ------BEGIN CERTIFICATE----- -MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJB -VDFIMEYGA1UECgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBp -bSBlbGVrdHIuIERhdGVudmVya2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5R -dWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5RdWFsLTAzMB4XDTA1MDgxNzIyMDAw -MFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgwRgYDVQQKDD9BLVRy -dXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0ZW52 -ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMM -EEEtVHJ1c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQCtPWFuA/OQO8BBC4SAzewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUj -lUC5B3ilJfYKvUWG6Nm9wASOhURh73+nyfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZ -znF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPESU7l0+m0iKsMrmKS1GWH -2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4iHQF63n1 -k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs -2e3Vcuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYD -VR0OBAoECERqlWdVeRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC -AQEAVdRU0VlIXLOThaq/Yy/kgM40ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fG -KOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmrsQd7TZjTXLDR8KdCoLXEjq/+ -8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZdJXDRZslo+S4R -FGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS -mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmE -DNuxUCAKGkq6ahq97BvIxYSazQ== ------END CERTIFICATE----- - # Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Label: "TWCA Root Certification Authority" @@ -5227,3 +5070,83 @@ Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ 8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= -----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=Certification Authority of WoSign G2 O=WoSign CA Limited +# Subject: CN=Certification Authority of WoSign G2 O=WoSign CA Limited +# Label: "Certification Authority of WoSign G2" +# Serial: 142423943073812161787490648904721057092 +# MD5 Fingerprint: c8:1c:7d:19:aa:cb:71:93:f2:50:f8:52:a8:1e:ba:60 +# SHA1 Fingerprint: fb:ed:dc:90:65:b7:27:20:37:bc:55:0c:9c:56:de:bb:f2:78:94:e1 +# SHA256 Fingerprint: d4:87:a5:6f:83:b0:74:82:e8:5e:96:33:94:c1:ec:c2:c9:e5:1d:09:03:ee:94:6b:02:c3:01:58:1e:d9:9e:16 +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBY +MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNV +BAMTJENlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDEx +MDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgxCzAJBgNVBAYTAkNOMRowGAYDVQQK +ExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPX +JYY1kBaiXW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgO +gHzKtB0TiGsOqCR3A9DuW/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg +5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg95k4ot+vElbGs/V6r+kHLXZ1L3PR8du9n +fwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BKv0mUYQs4kI9dJGwlezt5 +2eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJ +KoZIhvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8 +fHulwqZm46qwtyeYP0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G +3CE4Q3RM+zD4F3LBMvzIkRfEzFg3TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yy +SrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu+sif/a+RZQp4OBXllxcU3fng +LDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+7Q9LGOHSJDy7 +XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg= +-----END CERTIFICATE----- + +# Issuer: CN=CA WoSign ECC Root O=WoSign CA Limited +# Subject: CN=CA WoSign ECC Root O=WoSign CA Limited +# Label: "CA WoSign ECC Root" +# Serial: 138625735294506723296996289575837012112 +# MD5 Fingerprint: 80:c6:53:ee:61:82:28:72:f0:ff:21:b9:17:ca:b2:20 +# SHA1 Fingerprint: d2:7a:d2:be:ed:94:c0:a1:3c:c7:25:21:ea:5d:71:be:81:19:f3:2b +# SHA256 Fingerprint: 8b:45:da:1c:06:f7:91:eb:0c:ab:f2:6b:e5:88:f5:fb:23:16:5c:2e:61:4b:f8:85:56:2d:0d:ce:50:b2:9b:02 +-----BEGIN CERTIFICATE----- +MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQsw +CQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMT +EkNBIFdvU2lnbiBFQ0MgUm9vdDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4 +NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEb +MBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACID +YgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiUt5v8 +KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES +1ns2o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUqv3VWqP2h4syhf3RMluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB +1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0Daupn75OcsqF1NnstTJFGG+rrQIwfcf3 +aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYua/GRspBl9JrmkO5K +-----END CERTIFICATE----- diff --git a/Shared/lib/python3.4/site-packages/certifi/weak.pem b/Shared/lib/python3.4/site-packages/certifi/weak.pem index 6a66daa..dca94c9 100644 --- a/Shared/lib/python3.4/site-packages/certifi/weak.pem +++ b/Shared/lib/python3.4/site-packages/certifi/weak.pem @@ -1,31 +1,4 @@ -# Issuer: O=Equifax OU=Equifax Secure Certificate Authority -# Subject: O=Equifax OU=Equifax Secure Certificate Authority -# Label: "Equifax Secure CA" -# Serial: 903804111 -# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4 -# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a -# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78 ------BEGIN CERTIFICATE----- -MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV -UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy -dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1 -MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx -dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B -AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f -BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A -cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC -AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ -MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm -aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw -ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj -IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF -MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA -A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y -7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh -1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4 ------END CERTIFICATE----- - # Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA # Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA # Label: "GlobalSign Root CA" @@ -117,38 +90,6 @@ F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== -----END CERTIFICATE----- -# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only -# Label: "Verisign Class 4 Public Primary Certification Authority - G3" -# Serial: 314531972711909413743075096039378935511 -# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df -# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d -# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06 ------BEGIN CERTIFICATE----- -MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw -CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl -cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu -LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT -aWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD -VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT -aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ -bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu -IENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg -LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1 -GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ -+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd -U6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm -NxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY -ufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ -ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1 -CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq -g6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm -fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c -2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/ -bLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== ------END CERTIFICATE----- - # Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Label: "Entrust.net Premium 2048 Secure Server CA" @@ -910,40 +851,6 @@ u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3ynGQI0DvDKcWy iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== -----END CERTIFICATE----- -# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com -# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com -# Label: "UTN DATACorp SGC Root CA" -# Serial: 91374294542884689855167577680241077609 -# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06 -# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4 -# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48 ------BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB -kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug -Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho -dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw -IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG -EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD -VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu -dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 -E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ -D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK -4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq -lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW -bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB -o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT -MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js -LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr -BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB -AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj -j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH -KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv -2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 -mfnGV/TJVTl4uix5yaaIK/QI ------END CERTIFICATE----- - # Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com # Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com # Label: "UTN USERFirst Hardware Root CA" @@ -1507,39 +1414,6 @@ rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2 9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis= -----END CERTIFICATE----- -# Issuer: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005 -# Subject: CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A.Ş. (c) Kasım 2005 -# Label: "TURKTRUST Certificate Services Provider Root 2" -# Serial: 1 -# MD5 Fingerprint: 37:a5:6e:d4:b1:25:84:97:b7:fd:56:15:7a:f9:a2:00 -# SHA1 Fingerprint: b4:35:d4:e1:11:9d:1c:66:90:a7:49:eb:b3:94:bd:63:7b:a7:82:b7 -# SHA256 Fingerprint: c4:70:cf:54:7e:23:02:b9:77:fb:29:dd:71:a8:9a:7b:6c:1f:60:77:7b:03:29:f5:60:17:f3:28:bf:4f:6b:e6 ------BEGIN CERTIFICATE----- -MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOc -UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx -c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xS -S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg -SGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcNMDUxMTA3MTAwNzU3 -WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVrdHJv -bmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJU -UjEPMA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSw -bGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWe -LiAoYykgS2FzxLFtIDIwMDUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqeLCDe2JAOCtFp0if7qnef -J1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKIx+XlZEdh -R3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJ -Qv2gQrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGX -JHpsmxcPbe9TmJEr5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1p -zpwACPI2/z7woQ8arBT9pmAPAgMBAAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58S -Fq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8GA1UdEwEB/wQFMAMBAf8wDQYJ -KoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/nttRbj2hWyfIvwq -ECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 -Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFz -gw2lGh1uEpJ+hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotH -uFEJjOp9zYhys2AzsfAKRO8P9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LS -y3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5UrbnBEI= ------END CERTIFICATE----- - # Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG # Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG # Label: "SwissSign Gold CA - G2" @@ -3362,37 +3236,6 @@ ducTZnV+ZfsBn5OHiJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmC IoaZM3Fa6hlXPZHNqcCjbgcTpsnt+GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= -----END CERTIFICATE----- -# Issuer: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03 -# Subject: CN=A-Trust-nQual-03 O=A-Trust Ges. f. Sicherheitssysteme im elektr. Datenverkehr GmbH OU=A-Trust-nQual-03 -# Label: "A-Trust-nQual-03" -# Serial: 93214 -# MD5 Fingerprint: 49:63:ae:27:f4:d5:95:3d:d8:db:24:86:b8:9c:07:53 -# SHA1 Fingerprint: d3:c0:63:f2:19:ed:07:3e:34:ad:5d:75:0b:32:76:29:ff:d5:9a:f2 -# SHA256 Fingerprint: 79:3c:bf:45:59:b9:fd:e3:8a:b2:2d:f1:68:69:f6:98:81:ae:14:c4:b0:13:9a:c7:88:a7:8a:1a:fc:ca:02:fb ------BEGIN CERTIFICATE----- -MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJB -VDFIMEYGA1UECgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBp -bSBlbGVrdHIuIERhdGVudmVya2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5R -dWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5RdWFsLTAzMB4XDTA1MDgxNzIyMDAw -MFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgwRgYDVQQKDD9BLVRy -dXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0ZW52 -ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMM -EEEtVHJ1c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQCtPWFuA/OQO8BBC4SAzewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUj -lUC5B3ilJfYKvUWG6Nm9wASOhURh73+nyfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZ -znF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPESU7l0+m0iKsMrmKS1GWH -2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4iHQF63n1 -k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs -2e3Vcuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYD -VR0OBAoECERqlWdVeRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC -AQEAVdRU0VlIXLOThaq/Yy/kgM40ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fG -KOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmrsQd7TZjTXLDR8KdCoLXEjq/+ -8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZdJXDRZslo+S4R -FGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS -mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmE -DNuxUCAKGkq6ahq97BvIxYSazQ== ------END CERTIFICATE----- - # Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA # Label: "TWCA Root Certification Authority" @@ -5227,6 +5070,86 @@ Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ 8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= -----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GB CA" +# Serial: 157768595616588414422159278966750757568 +# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d +# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed +# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6 +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +# Issuer: CN=Certification Authority of WoSign G2 O=WoSign CA Limited +# Subject: CN=Certification Authority of WoSign G2 O=WoSign CA Limited +# Label: "Certification Authority of WoSign G2" +# Serial: 142423943073812161787490648904721057092 +# MD5 Fingerprint: c8:1c:7d:19:aa:cb:71:93:f2:50:f8:52:a8:1e:ba:60 +# SHA1 Fingerprint: fb:ed:dc:90:65:b7:27:20:37:bc:55:0c:9c:56:de:bb:f2:78:94:e1 +# SHA256 Fingerprint: d4:87:a5:6f:83:b0:74:82:e8:5e:96:33:94:c1:ec:c2:c9:e5:1d:09:03:ee:94:6b:02:c3:01:58:1e:d9:9e:16 +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQayXaioidfLwPBbOxemFFRDANBgkqhkiG9w0BAQsFADBY +MQswCQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxLTArBgNV +BAMTJENlcnRpZmljYXRpb24gQXV0aG9yaXR5IG9mIFdvU2lnbiBHMjAeFw0xNDEx +MDgwMDU4NThaFw00NDExMDgwMDU4NThaMFgxCzAJBgNVBAYTAkNOMRowGAYDVQQK +ExFXb1NpZ24gQ0EgTGltaXRlZDEtMCsGA1UEAxMkQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkgb2YgV29TaWduIEcyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvsXEoCKASU+/2YcRxlPhuw+9YH+v9oIOH9ywjj2X4FA8jzrvZjtFB5sg+OPX +JYY1kBaiXW8wGQiHC38Gsp1ij96vkqVg1CuAmlI/9ZqD6TRay9nVYlzmDuDfBpgO +gHzKtB0TiGsOqCR3A9DuW/PKaZE1OVbFbeP3PU9ekzgkyhjpJMuSA93MHD0JcOQg +5PGurLtzaaNjOg9FD6FKmsLRY6zLEPg95k4ot+vElbGs/V6r+kHLXZ1L3PR8du9n +fwB6jdKgGlxNIuG12t12s9R23164i5jIFFTMaxeSt+BKv0mUYQs4kI9dJGwlezt5 +2eJ+na2fmKEG/HgUYFf47oB3sQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+mCp62XF3RYUCE4MD42b4Pdkr2cwDQYJ +KoZIhvcNAQELBQADggEBAFfDejaCnI2Y4qtAqkePx6db7XznPWZaOzG73/MWM5H8 +fHulwqZm46qwtyeYP0nXYGdnPzZPSsvxFPpahygc7Y9BMsaV+X3avXtbwrAh449G +3CE4Q3RM+zD4F3LBMvzIkRfEzFg3TgvMWvchNSiDbGAtROtSjFA9tWwS1/oJu2yy +SrHFieT801LYYRf+epSEj3m2M1m6D8QL4nCgS3gu+sif/a+RZQp4OBXllxcU3fng +LDT4ONCEIgDAFFEYKwLcMFrw6AF8NTojrwjkr6qOKEJJLvD1mTS+7Q9LGOHSJDy7 +XUe3IfKN0QqZjuNuPq1w4I+5ysxugTH2e5x6eeRncRg= +-----END CERTIFICATE----- + +# Issuer: CN=CA WoSign ECC Root O=WoSign CA Limited +# Subject: CN=CA WoSign ECC Root O=WoSign CA Limited +# Label: "CA WoSign ECC Root" +# Serial: 138625735294506723296996289575837012112 +# MD5 Fingerprint: 80:c6:53:ee:61:82:28:72:f0:ff:21:b9:17:ca:b2:20 +# SHA1 Fingerprint: d2:7a:d2:be:ed:94:c0:a1:3c:c7:25:21:ea:5d:71:be:81:19:f3:2b +# SHA256 Fingerprint: 8b:45:da:1c:06:f7:91:eb:0c:ab:f2:6b:e5:88:f5:fb:23:16:5c:2e:61:4b:f8:85:56:2d:0d:ce:50:b2:9b:02 +-----BEGIN CERTIFICATE----- +MIICCTCCAY+gAwIBAgIQaEpYcIBr8I8C+vbe6LCQkDAKBggqhkjOPQQDAzBGMQsw +CQYDVQQGEwJDTjEaMBgGA1UEChMRV29TaWduIENBIExpbWl0ZWQxGzAZBgNVBAMT +EkNBIFdvU2lnbiBFQ0MgUm9vdDAeFw0xNDExMDgwMDU4NThaFw00NDExMDgwMDU4 +NThaMEYxCzAJBgNVBAYTAkNOMRowGAYDVQQKExFXb1NpZ24gQ0EgTGltaXRlZDEb +MBkGA1UEAxMSQ0EgV29TaWduIEVDQyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACID +YgAE4f2OuEMkq5Z7hcK6C62N4DrjJLnSsb6IOsq/Srj57ywvr1FQPEd1bPiUt5v8 +KB7FVMxjnRZLU8HnIKvNrCXSf4/CwVqCXjCLelTOA7WRf6qU0NGKSMyCBSah1VES +1ns2o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUqv3VWqP2h4syhf3RMluARZPzA7gwCgYIKoZIzj0EAwMDaAAwZQIxAOSkhLCB +1T2wdKyUpOgOPQB0TKGXa/kNUTyh2Tv0Daupn75OcsqF1NnstTJFGG+rrQIwfcf3 +aWMvoeGY7xMQ0Xk/0f7qO3/eVvSQsRUR2LIiFdAvwyYua/GRspBl9JrmkO5K +-----END CERTIFICATE----- # Issuer: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Subject: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited # Label: "Entrust.net Secure Server CA" diff --git a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/html5lib-0.9999999.egg-info/installed-files.txt index 7118338..f9032dc 100644 --- a/Shared/lib/python3.4/site-packages/html5lib-0.9999999.egg-info/installed-files.txt +++ b/Shared/lib/python3.4/site-packages/html5lib-0.9999999.egg-info/installed-files.txt @@ -71,8 +71,8 @@ ../html5lib/trie/__pycache__/datrie.cpython-34.pyc ../html5lib/trie/__pycache__/py.cpython-34.pyc ./ +dependency_links.txt +requires.txt top_level.txt PKG-INFO -requires.txt -dependency_links.txt SOURCES.txt diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/PKG-INFO similarity index 80% rename from Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/PKG-INFO rename to Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/PKG-INFO index cad46d6..e0c8e95 100644 --- a/Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/PKG-INFO +++ b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/PKG-INFO @@ -1,12 +1,12 @@ Metadata-Version: 1.1 Name: ox -Version: 2.3.x +Version: 2.3.b-769- Summary: python-ox - the web in a dict -Home-page: http://code.0x2620.org/python-ox +Home-page: https://wiki.0x2620.org/wiki/python-ox Author: 0x2620 Author-email: 0x2620@0x2620.org License: GPLv3 -Download-URL: http://code.0x2620.org/python-ox/download +Download-URL: https://code.0x2620.org/python-ox/download Description: UNKNOWN Platform: UNKNOWN Classifier: Operating System :: OS Independent diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/SOURCES.txt similarity index 97% rename from Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/SOURCES.txt rename to Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/SOURCES.txt index 95ddf17..551bda3 100644 --- a/Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/SOURCES.txt +++ b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/SOURCES.txt @@ -1,5 +1,6 @@ README ox/__init__.py +ox/__version.py ox/api.py ox/cache.py ox/file.py diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/installed-files.txt similarity index 98% rename from Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/installed-files.txt rename to Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/installed-files.txt index 0204a65..8392691 100644 --- a/Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/installed-files.txt +++ b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/installed-files.txt @@ -19,6 +19,7 @@ ../ox/srt.py ../ox/text.py ../ox/utils.py +../ox/__version.py ../ox/django/__init__.py ../ox/django/decorators.py ../ox/django/fields.py @@ -98,6 +99,7 @@ ../ox/__pycache__/srt.cpython-34.pyc ../ox/__pycache__/text.cpython-34.pyc ../ox/__pycache__/utils.cpython-34.pyc +../ox/__pycache__/__version.cpython-34.pyc ../ox/django/__pycache__/__init__.cpython-34.pyc ../ox/django/__pycache__/decorators.cpython-34.pyc ../ox/django/__pycache__/fields.cpython-34.pyc @@ -157,8 +159,8 @@ ../ox/web/__pycache__/wikipedia.cpython-34.pyc ../ox/web/__pycache__/youtube.cpython-34.pyc ./ +top_level.txt PKG-INFO requires.txt dependency_links.txt -top_level.txt SOURCES.txt diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/requires.txt b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/requires.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/requires.txt rename to Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/requires.txt diff --git a/Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/top_level.txt b/Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/top_level.txt similarity index 100% rename from Shared/lib/python3.4/site-packages/ox-2.3.x.egg-info/top_level.txt rename to Shared/lib/python3.4/site-packages/ox-2.3.b_769_.egg-info/top_level.txt diff --git a/Shared/lib/python3.4/site-packages/ox/__version.py b/Shared/lib/python3.4/site-packages/ox/__version.py new file mode 100644 index 0000000..0de6ee5 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/ox/__version.py @@ -0,0 +1 @@ +VERSION="2.3.b'769'" \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/ox/jsonc.py b/Shared/lib/python3.4/site-packages/ox/jsonc.py index 9006d5c..83751ea 100644 --- a/Shared/lib/python3.4/site-packages/ox/jsonc.py +++ b/Shared/lib/python3.4/site-packages/ox/jsonc.py @@ -3,6 +3,8 @@ # vi:si:et:sw=4:sts=4:ts=4 from __future__ import with_statement, print_function +import re + from .js import minify from .utils import json @@ -14,8 +16,18 @@ def loads(source): try: minified = minify(source) return json.loads(minified) - except json.JSONDecodeError as e: - s = minified.split('\n') - context = s[e.lineno-1][max(0, e.colno-1):e.colno+30] - msg = e.msg + ' at ' + context - raise json.JSONDecodeError(msg, minified, e.pos) + except ValueError as e: + msg = e.message if hasattr(e, 'message') else str(e) + lineno = None + colno = None + try: + m = re.search(r'line (\d+) column (\d+)', msg) + if m: + (lineno, colno) = map(int, m.groups()) + except: + pass + if lineno and colno: + s = minified.split('\n') + context = s[lineno-1][max(0, colno-30):colno+30] + msg += ' at:\n\n %s\n %s\033[1m^\033[0m' %(context, ' ' * (colno - max(0, colno-30) - 2)) + raise ValueError(msg) diff --git a/Shared/lib/python3.4/site-packages/requests-2.3.0.dist-info/RECORD b/Shared/lib/python3.4/site-packages/requests-2.3.0.dist-info/RECORD index da61403..f1b8623 100644 --- a/Shared/lib/python3.4/site-packages/requests-2.3.0.dist-info/RECORD +++ b/Shared/lib/python3.4/site-packages/requests-2.3.0.dist-info/RECORD @@ -83,81 +83,81 @@ requests-2.3.0.dist-info/pydist.json,sha256=7nySdPrVYYyJK2C3cPlHJr1oSZ_-lFiBlp9D requests-2.3.0.dist-info/RECORD,, requests-2.3.0.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9 requests-2.3.0.dist-info/WHEEL,sha256=SXYYsi-y-rEGIva8sB8iKF6bAFD6YDhmqHX5hI3fc0o,110 -requests/packages/chardet/__pycache__/sbcsgroupprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/compat.cpython-34.pyc,, -requests/packages/chardet/__pycache__/big5prober.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/connection.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/ssl_.cpython-34.pyc,, -requests/packages/chardet/__pycache__/chardistribution.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/connection.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/timeout.cpython-34.pyc,, +requests/packages/urllib3/util/__pycache__/url.cpython-34.pyc,, requests/packages/chardet/__pycache__/utf8prober.cpython-34.pyc,, +requests/__pycache__/sessions.cpython-34.pyc,, +requests/packages/urllib3/util/__pycache__/request.cpython-34.pyc,, +requests/packages/chardet/__pycache__/latin1prober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/eucjpprober.cpython-34.pyc,, +requests/__pycache__/certs.cpython-34.pyc,, +requests/packages/urllib3/util/__pycache__/response.cpython-34.pyc,, +requests/packages/urllib3/__pycache__/__init__.cpython-34.pyc,, +requests/packages/chardet/__pycache__/universaldetector.cpython-34.pyc,, +requests/packages/chardet/__pycache__/gb2312freq.cpython-34.pyc,, +requests/packages/chardet/__pycache__/cp949prober.cpython-34.pyc,, +requests/packages/urllib3/util/__pycache__/ssl_.cpython-34.pyc,, +requests/__pycache__/api.cpython-34.pyc,, +requests/__pycache__/adapters.cpython-34.pyc,, +requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-34.pyc,, +requests/packages/chardet/__pycache__/hebrewprober.cpython-34.pyc,, +requests/packages/urllib3/__pycache__/connectionpool.cpython-34.pyc,, requests/packages/urllib3/__pycache__/_collections.cpython-34.pyc,, +requests/packages/chardet/__pycache__/escprober.cpython-34.pyc,, +requests/packages/urllib3/__pycache__/fields.cpython-34.pyc,, +requests/packages/chardet/__pycache__/euctwfreq.cpython-34.pyc,, +requests/packages/chardet/__pycache__/gb2312prober.cpython-34.pyc,, +requests/packages/urllib3/__pycache__/response.cpython-34.pyc,, +requests/__pycache__/__init__.cpython-34.pyc,, +requests/packages/chardet/__pycache__/euckrfreq.cpython-34.pyc,, +requests/packages/urllib3/__pycache__/connection.cpython-34.pyc,, +requests/packages/chardet/__pycache__/langhebrewmodel.cpython-34.pyc,, +requests/packages/urllib3/__pycache__/poolmanager.cpython-34.pyc,, +requests/packages/urllib3/contrib/__pycache__/pyopenssl.cpython-34.pyc,, +requests/__pycache__/status_codes.cpython-34.pyc,, +requests/packages/urllib3/util/__pycache__/__init__.cpython-34.pyc,, +requests/__pycache__/compat.cpython-34.pyc,, +requests/packages/chardet/__pycache__/mbcharsetprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/sbcsgroupprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/jpcntx.cpython-34.pyc,, +requests/packages/chardet/__pycache__/langthaimodel.cpython-34.pyc,, +requests/packages/chardet/__pycache__/constants.cpython-34.pyc,, +requests/__pycache__/exceptions.cpython-34.pyc,, requests/packages/chardet/__pycache__/escsm.cpython-34.pyc,, requests/packages/chardet/__pycache__/__init__.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/__init__.cpython-34.pyc,, +requests/packages/chardet/__pycache__/langcyrillicmodel.cpython-34.pyc,, +requests/packages/urllib3/__pycache__/filepost.cpython-34.pyc,, requests/packages/chardet/__pycache__/jisfreq.cpython-34.pyc,, -requests/packages/urllib3/packages/__pycache__/six.cpython-34.pyc,, -requests/__pycache__/hooks.cpython-34.pyc,, -requests/packages/chardet/__pycache__/mbcssm.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/__init__.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/poolmanager.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/url.cpython-34.pyc,, -requests/packages/chardet/__pycache__/constants.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langhebrewmodel.cpython-34.pyc,, -requests/packages/chardet/__pycache__/latin1prober.cpython-34.pyc,, -requests/__pycache__/sessions.cpython-34.pyc,, -requests/packages/chardet/__pycache__/euctwfreq.cpython-34.pyc,, -requests/__pycache__/certs.cpython-34.pyc,, -requests/packages/chardet/__pycache__/euckrprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/universaldetector.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langbulgarianmodel.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langhungarianmodel.cpython-34.pyc,, -requests/packages/chardet/__pycache__/codingstatemachine.cpython-34.pyc,, -requests/packages/chardet/__pycache__/jpcntx.cpython-34.pyc,, -requests/packages/chardet/__pycache__/chardetect.cpython-34.pyc,, -requests/__pycache__/exceptions.cpython-34.pyc,, -requests/__pycache__/compat.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/fields.cpython-34.pyc,, -requests/packages/chardet/__pycache__/sbcharsetprober.cpython-34.pyc,, -requests/__pycache__/api.cpython-34.pyc,, -requests/packages/chardet/__pycache__/hebrewprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/big5freq.cpython-34.pyc,, -requests/packages/urllib3/packages/__pycache__/ordered_dict.cpython-34.pyc,, -requests/packages/chardet/__pycache__/euckrfreq.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langthaimodel.cpython-34.pyc,, +requests/packages/chardet/__pycache__/euctwprober.cpython-34.pyc,, +requests/packages/urllib3/__pycache__/exceptions.cpython-34.pyc,, requests/packages/urllib3/packages/__pycache__/__init__.cpython-34.pyc,, -requests/packages/chardet/__pycache__/eucjpprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/gb2312freq.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/response.cpython-34.pyc,, -requests/packages/urllib3/contrib/__pycache__/ntlmpool.cpython-34.pyc,, -requests/__pycache__/status_codes.cpython-34.pyc,, -requests/__pycache__/models.cpython-34.pyc,, -requests/__pycache__/__init__.cpython-34.pyc,, -requests/__pycache__/adapters.cpython-34.pyc,, -requests/packages/chardet/__pycache__/escprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/sjisprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/mbcsgroupprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/charsetgroupprober.cpython-34.pyc,, requests/packages/urllib3/contrib/__pycache__/__init__.cpython-34.pyc,, requests/__pycache__/structures.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/response.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/exceptions.cpython-34.pyc,, -requests/packages/chardet/__pycache__/sjisprober.cpython-34.pyc,, -requests/packages/urllib3/util/__pycache__/request.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/filepost.cpython-34.pyc,, -requests/packages/chardet/__pycache__/charsetprober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langcyrillicmodel.cpython-34.pyc,, -requests/__pycache__/cookies.cpython-34.pyc,, -requests/packages/chardet/__pycache__/charsetgroupprober.cpython-34.pyc,, -requests/packages/__pycache__/__init__.cpython-34.pyc,, -requests/packages/urllib3/__pycache__/connectionpool.cpython-34.pyc,, -requests/packages/urllib3/contrib/__pycache__/pyopenssl.cpython-34.pyc,, -requests/packages/chardet/__pycache__/langgreekmodel.cpython-34.pyc,, -requests/packages/chardet/__pycache__/mbcsgroupprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/mbcssm.cpython-34.pyc,, +requests/packages/chardet/__pycache__/chardetect.cpython-34.pyc,, +requests/packages/chardet/__pycache__/sbcharsetprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/langbulgarianmodel.cpython-34.pyc,, +requests/packages/urllib3/util/__pycache__/connection.cpython-34.pyc,, requests/__pycache__/auth.cpython-34.pyc,, -requests/packages/chardet/__pycache__/cp949prober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/gb2312prober.cpython-34.pyc,, -requests/packages/chardet/__pycache__/mbcharsetprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/langhungarianmodel.cpython-34.pyc,, +requests/__pycache__/cookies.cpython-34.pyc,, +requests/packages/chardet/__pycache__/compat.cpython-34.pyc,, +requests/packages/chardet/__pycache__/langgreekmodel.cpython-34.pyc,, +requests/packages/chardet/__pycache__/charsetprober.cpython-34.pyc,, requests/packages/urllib3/__pycache__/request.cpython-34.pyc,, -requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-34.pyc,, -requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-34.pyc,, +requests/packages/urllib3/util/__pycache__/timeout.cpython-34.pyc,, +requests/packages/urllib3/packages/__pycache__/six.cpython-34.pyc,, +requests/packages/chardet/__pycache__/euckrprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/chardistribution.cpython-34.pyc,, +requests/packages/chardet/__pycache__/big5freq.cpython-34.pyc,, +requests/packages/__pycache__/__init__.cpython-34.pyc,, +requests/__pycache__/models.cpython-34.pyc,, +requests/packages/chardet/__pycache__/big5prober.cpython-34.pyc,, +requests/packages/urllib3/contrib/__pycache__/ntlmpool.cpython-34.pyc,, +requests/packages/urllib3/packages/__pycache__/ordered_dict.cpython-34.pyc,, requests/__pycache__/utils.cpython-34.pyc,, -requests/packages/chardet/__pycache__/euctwprober.cpython-34.pyc,, +requests/packages/chardet/__pycache__/codingstatemachine.cpython-34.pyc,, +requests/__pycache__/hooks.cpython-34.pyc,, +requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/setuptools-18.5.dist-info/RECORD b/Shared/lib/python3.4/site-packages/setuptools-18.5.dist-info/RECORD index 537e181..b7a25b5 100644 --- a/Shared/lib/python3.4/site-packages/setuptools-18.5.dist-info/RECORD +++ b/Shared/lib/python3.4/site-packages/setuptools-18.5.dist-info/RECORD @@ -71,54 +71,54 @@ setuptools-18.5.dist-info/top_level.txt,sha256=7780fzudMJkykiTcIrAQ8m8Lll6kot3EE setuptools-18.5.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 /srv/openmedialibrary/platform/Shared/home/.local/bin/easy_install,sha256=7h7lc5DCAhnE08UwEm49wGymGDGdOcrkRdncTKYmXIQ,233 /srv/openmedialibrary/platform/Shared/home/.local/bin/easy_install-3.4,sha256=7h7lc5DCAhnE08UwEm49wGymGDGdOcrkRdncTKYmXIQ,233 -setuptools/command/__pycache__/develop.cpython-34.pyc,, -setuptools/__pycache__/__init__.cpython-34.pyc,, -pkg_resources/__pycache__/__init__.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,, -setuptools/command/__pycache__/egg_info.cpython-34.pyc,, -pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,, -setuptools/command/__pycache__/install_scripts.cpython-34.pyc,, -setuptools/command/__pycache__/build_py.cpython-34.pyc,, -setuptools/command/__pycache__/register.cpython-34.pyc,, -setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,, _markerlib/__pycache__/markers.cpython-34.pyc,, -setuptools/command/__pycache__/test.cpython-34.pyc,, -setuptools/command/__pycache__/install_egg_info.cpython-34.pyc,, -setuptools/__pycache__/msvc9_support.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,, -setuptools/__pycache__/dist.cpython-34.pyc,, -setuptools/command/__pycache__/easy_install.cpython-34.pyc,, -setuptools/__pycache__/ssl_support.cpython-34.pyc,, -setuptools/__pycache__/py27compat.cpython-34.pyc,, -setuptools/__pycache__/py26compat.cpython-34.pyc,, -setuptools/command/__pycache__/upload_docs.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,, -setuptools/__pycache__/archive_util.cpython-34.pyc,, -_markerlib/__pycache__/__init__.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,, -setuptools/__pycache__/package_index.cpython-34.pyc,, -setuptools/command/__pycache__/rotate.cpython-34.pyc,, -setuptools/__pycache__/version.cpython-34.pyc,, -setuptools/command/__pycache__/build_ext.cpython-34.pyc,, -setuptools/__pycache__/py31compat.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,, -setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,, -setuptools/__pycache__/site-patch.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,, -setuptools/__pycache__/unicode_utils.cpython-34.pyc,, -setuptools/__pycache__/depends.cpython-34.pyc,, -setuptools/command/__pycache__/sdist.cpython-34.pyc,, -setuptools/command/__pycache__/install_lib.cpython-34.pyc,, -setuptools/__pycache__/utils.cpython-34.pyc,, -setuptools/__pycache__/sandbox.cpython-34.pyc,, -setuptools/command/__pycache__/saveopts.cpython-34.pyc,, -setuptools/command/__pycache__/install.cpython-34.pyc,, -setuptools/__pycache__/compat.cpython-34.pyc,, -setuptools/__pycache__/windows_support.cpython-34.pyc,, -setuptools/command/__pycache__/__init__.cpython-34.pyc,, setuptools/command/__pycache__/setopt.cpython-34.pyc,, -pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,, -__pycache__/easy_install.cpython-34.pyc,, -setuptools/__pycache__/extension.cpython-34.pyc,, setuptools/command/__pycache__/alias.cpython-34.pyc,, +setuptools/__pycache__/sandbox.cpython-34.pyc,, +setuptools/__pycache__/compat.cpython-34.pyc,, +pkg_resources/__pycache__/__init__.cpython-34.pyc,, +setuptools/command/__pycache__/install.cpython-34.pyc,, +setuptools/command/__pycache__/egg_info.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-34.pyc,, +setuptools/command/__pycache__/test.cpython-34.pyc,, +setuptools/command/__pycache__/register.cpython-34.pyc,, +setuptools/command/__pycache__/bdist_wininst.cpython-34.pyc,, +setuptools/__pycache__/utils.cpython-34.pyc,, +setuptools/command/__pycache__/easy_install.cpython-34.pyc,, +setuptools/command/__pycache__/bdist_rpm.cpython-34.pyc,, +setuptools/__pycache__/py31compat.cpython-34.pyc,, +setuptools/__pycache__/depends.cpython-34.pyc,, +setuptools/__pycache__/extension.cpython-34.pyc,, +setuptools/__pycache__/__init__.cpython-34.pyc,, +__pycache__/easy_install.cpython-34.pyc,, +setuptools/__pycache__/py27compat.cpython-34.pyc,, +setuptools/__pycache__/windows_support.cpython-34.pyc,, +setuptools/__pycache__/dist.cpython-34.pyc,, +setuptools/command/__pycache__/saveopts.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-34.pyc,, +_markerlib/__pycache__/__init__.cpython-34.pyc,, +setuptools/command/__pycache__/install_scripts.cpython-34.pyc,, +setuptools/__pycache__/msvc9_support.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-34.pyc,, +setuptools/__pycache__/ssl_support.cpython-34.pyc,, +setuptools/__pycache__/version.cpython-34.pyc,, +setuptools/__pycache__/site-patch.cpython-34.pyc,, +setuptools/command/__pycache__/rotate.cpython-34.pyc,, +pkg_resources/_vendor/__pycache__/__init__.cpython-34.pyc,, +setuptools/command/__pycache__/build_py.cpython-34.pyc,, +setuptools/__pycache__/archive_util.cpython-34.pyc,, +setuptools/command/__pycache__/install_lib.cpython-34.pyc,, +setuptools/command/__pycache__/__init__.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-34.pyc,, +setuptools/command/__pycache__/build_ext.cpython-34.pyc,, +setuptools/__pycache__/py26compat.cpython-34.pyc,, +setuptools/__pycache__/lib2to3_ex.cpython-34.pyc,, +setuptools/__pycache__/unicode_utils.cpython-34.pyc,, +setuptools/command/__pycache__/bdist_egg.cpython-34.pyc,, +setuptools/command/__pycache__/develop.cpython-34.pyc,, +setuptools/command/__pycache__/upload_docs.cpython-34.pyc,, +setuptools/command/__pycache__/sdist.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-34.pyc,, +pkg_resources/_vendor/packaging/__pycache__/version.cpython-34.pyc,, +setuptools/command/__pycache__/install_egg_info.cpython-34.pyc,, +setuptools/__pycache__/package_index.cpython-34.pyc,, diff --git a/Shared/lib/python3.4/site-packages/socks.py b/Shared/lib/python3.4/site-packages/socks.py new file mode 100644 index 0000000..56bfca8 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/socks.py @@ -0,0 +1,712 @@ +""" +SocksiPy - Python SOCKS module. +Version 1.5.6 + +Copyright 2006 Dan-Haim. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of Dan Haim nor the names of his contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA +OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE. + + +This module provides a standard socket-like interface for Python +for tunneling connections through SOCKS proxies. + +=============================================================================== + +Minor modifications made by Christopher Gilbert (http://motomastyle.com/) +for use in PyLoris (http://pyloris.sourceforge.net/) + +Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/) +mainly to merge bug fixes found in Sourceforge + +Modifications made by Anorov (https://github.com/Anorov) +-Forked and renamed to PySocks +-Fixed issue with HTTP proxy failure checking (same bug that was in the old ___recvall() method) +-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler, + courtesy of e000 (https://github.com/e000): https://gist.github.com/869791#file_socksipyhandler.py +-Re-styled code to make it readable + -Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc. + -Improved exception handling and output + -Removed irritating use of sequence indexes, replaced with tuple unpacked variables + -Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03" + -Other general fixes +-Added clarification that the HTTP proxy connection method only supports CONNECT-style tunneling HTTP proxies +-Various small bug fixes +""" + +__version__ = "1.5.6" + +import socket +import struct +from errno import EOPNOTSUPP, EINVAL, EAGAIN +from io import BytesIO +from os import SEEK_CUR +from collections import Callable + +PROXY_TYPE_SOCKS4 = SOCKS4 = 1 +PROXY_TYPE_SOCKS5 = SOCKS5 = 2 +PROXY_TYPE_HTTP = HTTP = 3 + +PROXY_TYPES = {"SOCKS4": SOCKS4, "SOCKS5": SOCKS5, "HTTP": HTTP} +PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys())) + +_orgsocket = _orig_socket = socket.socket + +class ProxyError(IOError): + """ + socket_err contains original socket.error exception. + """ + def __init__(self, msg, socket_err=None): + self.msg = msg + self.socket_err = socket_err + + if socket_err: + self.msg += ": {0}".format(socket_err) + + def __str__(self): + return self.msg + +class GeneralProxyError(ProxyError): pass +class ProxyConnectionError(ProxyError): pass +class SOCKS5AuthError(ProxyError): pass +class SOCKS5Error(ProxyError): pass +class SOCKS4Error(ProxyError): pass +class HTTPError(ProxyError): pass + +SOCKS4_ERRORS = { 0x5B: "Request rejected or failed", + 0x5C: "Request rejected because SOCKS server cannot connect to identd on the client", + 0x5D: "Request rejected because the client program and identd report different user-ids" + } + +SOCKS5_ERRORS = { 0x01: "General SOCKS server failure", + 0x02: "Connection not allowed by ruleset", + 0x03: "Network unreachable", + 0x04: "Host unreachable", + 0x05: "Connection refused", + 0x06: "TTL expired", + 0x07: "Command not supported, or protocol error", + 0x08: "Address type not supported" + } + +DEFAULT_PORTS = { SOCKS4: 1080, + SOCKS5: 1080, + HTTP: 8080 + } + +def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None): + """ + set_default_proxy(proxy_type, addr[, port[, rdns[, username, password]]]) + + Sets a default proxy which all further socksocket objects will use, + unless explicitly changed. All parameters are as for socket.set_proxy(). + """ + socksocket.default_proxy = (proxy_type, addr, port, rdns, + username.encode() if username else None, + password.encode() if password else None) + +setdefaultproxy = set_default_proxy + +def get_default_proxy(): + """ + Returns the default proxy, set by set_default_proxy. + """ + return socksocket.default_proxy + +getdefaultproxy = get_default_proxy + +def wrap_module(module): + """ + Attempts to replace a module's socket library with a SOCKS socket. Must set + a default proxy using set_default_proxy(...) first. + This will only work on modules that import socket directly into the namespace; + most of the Python Standard Library falls into this category. + """ + if socksocket.default_proxy: + module.socket.socket = socksocket + else: + raise GeneralProxyError("No default proxy specified") + +wrapmodule = wrap_module + +def create_connection(dest_pair, proxy_type=None, proxy_addr=None, + proxy_port=None, proxy_rdns=True, + proxy_username=None, proxy_password=None, + timeout=None, source_address=None, + socket_options=None): + """create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object + + Like socket.create_connection(), but connects to proxy + before returning the socket object. + + dest_pair - 2-tuple of (IP/hostname, port). + **proxy_args - Same args passed to socksocket.set_proxy() if present. + timeout - Optional socket timeout value, in seconds. + source_address - tuple (host, port) for the socket to bind to as its source + address before connecting (only for compatibility) + """ + sock = socksocket() + if socket_options is not None: + for opt in socket_options: + sock.setsockopt(*opt) + if isinstance(timeout, (int, float)): + sock.settimeout(timeout) + if proxy_type is not None: + sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns, + proxy_username, proxy_password) + if source_address is not None: + sock.bind(source_address) + + sock.connect(dest_pair) + return sock + +class _BaseSocket(socket.socket): + """Allows Python 2's "delegated" methods such as send() to be overridden + """ + def __init__(self, *pos, **kw): + _orig_socket.__init__(self, *pos, **kw) + + self._savedmethods = dict() + for name in self._savenames: + self._savedmethods[name] = getattr(self, name) + delattr(self, name) # Allows normal overriding mechanism to work + + _savenames = list() + +def _makemethod(name): + return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw) +for name in ("sendto", "send", "recvfrom", "recv"): + method = getattr(_BaseSocket, name, None) + + # Determine if the method is not defined the usual way + # as a function in the class. + # Python 2 uses __slots__, so there are descriptors for each method, + # but they are not functions. + if not isinstance(method, Callable): + _BaseSocket._savenames.append(name) + setattr(_BaseSocket, name, _makemethod(name)) + +class socksocket(_BaseSocket): + """socksocket([family[, type[, proto]]]) -> socket object + + Open a SOCKS enabled socket. The parameters are the same as + those of the standard socket init. In order for SOCKS to work, + you must specify family=AF_INET and proto=0. + The "type" argument must be either SOCK_STREAM or SOCK_DGRAM. + """ + + default_proxy = None + + def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, *args, **kwargs): + if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM): + msg = "Socket type must be stream or datagram, not {!r}" + raise ValueError(msg.format(type)) + + _BaseSocket.__init__(self, family, type, proto, *args, **kwargs) + self._proxyconn = None # TCP connection to keep UDP relay alive + + if self.default_proxy: + self.proxy = self.default_proxy + else: + self.proxy = (None, None, None, None, None, None) + self.proxy_sockname = None + self.proxy_peername = None + + def _readall(self, file, count): + """ + Receive EXACTLY the number of bytes requested from the file object. + Blocks until the required number of bytes have been received. + """ + data = b"" + while len(data) < count: + d = file.read(count - len(data)) + if not d: + raise GeneralProxyError("Connection closed unexpectedly") + data += d + return data + + def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None): + """set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]]) + Sets the proxy to be used. + + proxy_type - The type of the proxy to be used. Three types + are supported: PROXY_TYPE_SOCKS4 (including socks4a), + PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP + addr - The address of the server (IP or DNS). + port - The port of the server. Defaults to 1080 for SOCKS + servers and 8080 for HTTP proxy servers. + rdns - Should DNS queries be performed on the remote side + (rather than the local side). The default is True. + Note: This has no effect with SOCKS4 servers. + username - Username to authenticate with to the server. + The default is no authentication. + password - Password to authenticate with to the server. + Only relevant when username is also provided. + """ + self.proxy = (proxy_type, addr, port, rdns, + username.encode() if username else None, + password.encode() if password else None) + + setproxy = set_proxy + + def bind(self, *pos, **kw): + """ + Implements proxy connection for UDP sockets, + which happens during the bind() phase. + """ + proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy + if not proxy_type or self.type != socket.SOCK_DGRAM: + return _orig_socket.bind(self, *pos, **kw) + + if self._proxyconn: + raise socket.error(EINVAL, "Socket already bound to an address") + if proxy_type != SOCKS5: + msg = "UDP only supported by SOCKS5 proxy type" + raise socket.error(EOPNOTSUPP, msg) + _BaseSocket.bind(self, *pos, **kw) + + # Need to specify actual local port because + # some relays drop packets if a port of zero is specified. + # Avoid specifying host address in case of NAT though. + _, port = self.getsockname() + dst = ("0", port) + + self._proxyconn = _orig_socket() + proxy = self._proxy_addr() + self._proxyconn.connect(proxy) + + UDP_ASSOCIATE = b"\x03" + _, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst) + + # The relay is most likely on the same host as the SOCKS proxy, + # but some proxies return a private IP address (10.x.y.z) + host, _ = proxy + _, port = relay + _BaseSocket.connect(self, (host, port)) + self.proxy_sockname = ("0.0.0.0", 0) # Unknown + + def sendto(self, bytes, *args, **kwargs): + if self.type != socket.SOCK_DGRAM: + return _BaseSocket.sendto(self, bytes, *args, **kwargs) + if not self._proxyconn: + self.bind(("", 0)) + + address = args[-1] + flags = args[:-1] + + header = BytesIO() + RSV = b"\x00\x00" + header.write(RSV) + STANDALONE = b"\x00" + header.write(STANDALONE) + self._write_SOCKS5_address(address, header) + + sent = _BaseSocket.send(self, header.getvalue() + bytes, *flags, **kwargs) + return sent - header.tell() + + def send(self, bytes, flags=0, **kwargs): + if self.type == socket.SOCK_DGRAM: + return self.sendto(bytes, flags, self.proxy_peername, **kwargs) + else: + return _BaseSocket.send(self, bytes, flags, **kwargs) + + def recvfrom(self, bufsize, flags=0): + if self.type != socket.SOCK_DGRAM: + return _BaseSocket.recvfrom(self, bufsize, flags) + if not self._proxyconn: + self.bind(("", 0)) + + buf = BytesIO(_BaseSocket.recv(self, bufsize, flags)) + buf.seek(+2, SEEK_CUR) + frag = buf.read(1) + if ord(frag): + raise NotImplementedError("Received UDP packet fragment") + fromhost, fromport = self._read_SOCKS5_address(buf) + + if self.proxy_peername: + peerhost, peerport = self.proxy_peername + if fromhost != peerhost or peerport not in (0, fromport): + raise socket.error(EAGAIN, "Packet filtered") + + return (buf.read(), (fromhost, fromport)) + + def recv(self, *pos, **kw): + bytes, _ = self.recvfrom(*pos, **kw) + return bytes + + def close(self): + if self._proxyconn: + self._proxyconn.close() + return _BaseSocket.close(self) + + def get_proxy_sockname(self): + """ + Returns the bound IP address and port number at the proxy. + """ + return self.proxy_sockname + + getproxysockname = get_proxy_sockname + + def get_proxy_peername(self): + """ + Returns the IP and port number of the proxy. + """ + return _BaseSocket.getpeername(self) + + getproxypeername = get_proxy_peername + + def get_peername(self): + """ + Returns the IP address and port number of the destination + machine (note: get_proxy_peername returns the proxy) + """ + return self.proxy_peername + + getpeername = get_peername + + def _negotiate_SOCKS5(self, *dest_addr): + """ + Negotiates a stream connection through a SOCKS5 server. + """ + CONNECT = b"\x01" + self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(self, + CONNECT, dest_addr) + + def _SOCKS5_request(self, conn, cmd, dst): + """ + Send SOCKS5 request with given command (CMD field) and + address (DST field). Returns resolved DST address that was used. + """ + proxy_type, addr, port, rdns, username, password = self.proxy + + writer = conn.makefile("wb") + reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3 + try: + # First we'll send the authentication packages we support. + if username and password: + # The username/password details were supplied to the + # set_proxy method so we support the USERNAME/PASSWORD + # authentication (in addition to the standard none). + writer.write(b"\x05\x02\x00\x02") + else: + # No username/password were entered, therefore we + # only support connections with no authentication. + writer.write(b"\x05\x01\x00") + + # We'll receive the server's response to determine which + # method was selected + writer.flush() + chosen_auth = self._readall(reader, 2) + + if chosen_auth[0:1] != b"\x05": + # Note: string[i:i+1] is used because indexing of a bytestring + # via bytestring[i] yields an integer in Python 3 + raise GeneralProxyError("SOCKS5 proxy server sent invalid data") + + # Check the chosen authentication method + + if chosen_auth[1:2] == b"\x02": + # Okay, we need to perform a basic username/password + # authentication. + writer.write(b"\x01" + chr(len(username)).encode() + + username + + chr(len(password)).encode() + + password) + writer.flush() + auth_status = self._readall(reader, 2) + if auth_status[0:1] != b"\x01": + # Bad response + raise GeneralProxyError("SOCKS5 proxy server sent invalid data") + if auth_status[1:2] != b"\x00": + # Authentication failed + raise SOCKS5AuthError("SOCKS5 authentication failed") + + # Otherwise, authentication succeeded + + # No authentication is required if 0x00 + elif chosen_auth[1:2] != b"\x00": + # Reaching here is always bad + if chosen_auth[1:2] == b"\xFF": + raise SOCKS5AuthError("All offered SOCKS5 authentication methods were rejected") + else: + raise GeneralProxyError("SOCKS5 proxy server sent invalid data") + + # Now we can request the actual connection + writer.write(b"\x05" + cmd + b"\x00") + resolved = self._write_SOCKS5_address(dst, writer) + writer.flush() + + # Get the response + resp = self._readall(reader, 3) + if resp[0:1] != b"\x05": + raise GeneralProxyError("SOCKS5 proxy server sent invalid data") + + status = ord(resp[1:2]) + if status != 0x00: + # Connection failed: server returned an error + error = SOCKS5_ERRORS.get(status, "Unknown error") + raise SOCKS5Error("{0:#04x}: {1}".format(status, error)) + + # Get the bound address/port + bnd = self._read_SOCKS5_address(reader) + return (resolved, bnd) + finally: + reader.close() + writer.close() + + def _write_SOCKS5_address(self, addr, file): + """ + Return the host and port packed for the SOCKS5 protocol, + and the resolved address as a tuple object. + """ + host, port = addr + proxy_type, _, _, rdns, username, password = self.proxy + + # If the given destination address is an IP address, we'll + # use the IPv4 address request even if remote resolving was specified. + try: + addr_bytes = socket.inet_aton(host) + file.write(b"\x01" + addr_bytes) + host = socket.inet_ntoa(addr_bytes) + except socket.error: + # Well it's not an IP number, so it's probably a DNS name. + if rdns: + # Resolve remotely + host_bytes = host.encode('idna') + file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes) + else: + # Resolve locally + addr_bytes = socket.inet_aton(socket.gethostbyname(host)) + file.write(b"\x01" + addr_bytes) + host = socket.inet_ntoa(addr_bytes) + + file.write(struct.pack(">H", port)) + return host, port + + def _read_SOCKS5_address(self, file): + atyp = self._readall(file, 1) + if atyp == b"\x01": + addr = socket.inet_ntoa(self._readall(file, 4)) + elif atyp == b"\x03": + length = self._readall(file, 1) + addr = self._readall(file, ord(length)) + else: + raise GeneralProxyError("SOCKS5 proxy server sent invalid data") + + port = struct.unpack(">H", self._readall(file, 2))[0] + return addr, port + + def _negotiate_SOCKS4(self, dest_addr, dest_port): + """ + Negotiates a connection through a SOCKS4 server. + """ + proxy_type, addr, port, rdns, username, password = self.proxy + + writer = self.makefile("wb") + reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3 + try: + # Check if the destination address provided is an IP address + remote_resolve = False + try: + addr_bytes = socket.inet_aton(dest_addr) + except socket.error: + # It's a DNS name. Check where it should be resolved. + if rdns: + addr_bytes = b"\x00\x00\x00\x01" + remote_resolve = True + else: + addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr)) + + # Construct the request packet + writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port)) + writer.write(addr_bytes) + + # The username parameter is considered userid for SOCKS4 + if username: + writer.write(username) + writer.write(b"\x00") + + # DNS name if remote resolving is required + # NOTE: This is actually an extension to the SOCKS4 protocol + # called SOCKS4A and may not be supported in all cases. + if remote_resolve: + writer.write(dest_addr.encode('idna') + b"\x00") + writer.flush() + + # Get the response from the server + resp = self._readall(reader, 8) + if resp[0:1] != b"\x00": + # Bad data + raise GeneralProxyError("SOCKS4 proxy server sent invalid data") + + status = ord(resp[1:2]) + if status != 0x5A: + # Connection failed: server returned an error + error = SOCKS4_ERRORS.get(status, "Unknown error") + raise SOCKS4Error("{0:#04x}: {1}".format(status, error)) + + # Get the bound address/port + self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0]) + if remote_resolve: + self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port + else: + self.proxy_peername = dest_addr, dest_port + finally: + reader.close() + writer.close() + + def _negotiate_HTTP(self, dest_addr, dest_port): + """ + Negotiates a connection through an HTTP server. + NOTE: This currently only supports HTTP CONNECT-style proxies. + """ + proxy_type, addr, port, rdns, username, password = self.proxy + + # If we need to resolve locally, we do this now + addr = dest_addr if rdns else socket.gethostbyname(dest_addr) + + self.sendall(b"CONNECT " + addr.encode('idna') + b":" + str(dest_port).encode() + + b" HTTP/1.1\r\n" + b"Host: " + dest_addr.encode('idna') + b"\r\n\r\n") + + # We just need the first line to check if the connection was successful + fobj = self.makefile() + status_line = fobj.readline() + fobj.close() + + if not status_line: + raise GeneralProxyError("Connection closed unexpectedly") + + try: + proto, status_code, status_msg = status_line.split(" ", 2) + except ValueError: + raise GeneralProxyError("HTTP proxy server sent invalid response") + + if not proto.startswith("HTTP/"): + raise GeneralProxyError("Proxy server does not appear to be an HTTP proxy") + + try: + status_code = int(status_code) + except ValueError: + raise HTTPError("HTTP proxy server did not return a valid HTTP status") + + if status_code != 200: + error = "{0}: {1}".format(status_code, status_msg) + if status_code in (400, 403, 405): + # It's likely that the HTTP proxy server does not support the CONNECT tunneling method + error += ("\n[*] Note: The HTTP proxy server may not be supported by PySocks" + " (must be a CONNECT tunnel proxy)") + raise HTTPError(error) + + self.proxy_sockname = (b"0.0.0.0", 0) + self.proxy_peername = addr, dest_port + + _proxy_negotiators = { + SOCKS4: _negotiate_SOCKS4, + SOCKS5: _negotiate_SOCKS5, + HTTP: _negotiate_HTTP + } + + + def connect(self, dest_pair): + """ + Connects to the specified destination through a proxy. + Uses the same API as socket's connect(). + To select the proxy server, use set_proxy(). + + dest_pair - 2-tuple of (IP/hostname, port). + """ + if len(dest_pair) != 2 or dest_pair[0].startswith("["): + # Probably IPv6, not supported -- raise an error, and hope + # Happy Eyeballs (RFC6555) makes sure at least the IPv4 + # connection works... + raise socket.error("PySocks doesn't support IPv6") + + dest_addr, dest_port = dest_pair + + if self.type == socket.SOCK_DGRAM: + if not self._proxyconn: + self.bind(("", 0)) + dest_addr = socket.gethostbyname(dest_addr) + + # If the host address is INADDR_ANY or similar, reset the peer + # address so that packets are received from any peer + if dest_addr == "0.0.0.0" and not dest_port: + self.proxy_peername = None + else: + self.proxy_peername = (dest_addr, dest_port) + return + + proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy + + # Do a minimal input check first + if (not isinstance(dest_pair, (list, tuple)) + or len(dest_pair) != 2 + or not dest_addr + or not isinstance(dest_port, int)): + raise GeneralProxyError("Invalid destination-connection (host, port) pair") + + + if proxy_type is None: + # Treat like regular socket object + self.proxy_peername = dest_pair + _BaseSocket.connect(self, (dest_addr, dest_port)) + return + + proxy_addr = self._proxy_addr() + + try: + # Initial connection to proxy server + _BaseSocket.connect(self, proxy_addr) + + except socket.error as error: + # Error while connecting to proxy + self.close() + proxy_addr, proxy_port = proxy_addr + proxy_server = "{0}:{1}".format(proxy_addr, proxy_port) + printable_type = PRINTABLE_PROXY_TYPES[proxy_type] + + msg = "Error connecting to {0} proxy {1}".format(printable_type, + proxy_server) + raise ProxyConnectionError(msg, error) + + else: + # Connected to proxy server, now negotiate + try: + # Calls negotiate_{SOCKS4, SOCKS5, HTTP} + negotiate = self._proxy_negotiators[proxy_type] + negotiate(self, dest_addr, dest_port) + except socket.error as error: + # Wrap socket errors + self.close() + raise GeneralProxyError("Socket error", error) + except ProxyError: + # Protocol error while negotiating with proxy + self.close() + raise + + def _proxy_addr(self): + """ + Return proxy address to connect to as tuple object + """ + proxy_type, proxy_addr, proxy_port, rdns, username, password = self.proxy + proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type) + if not proxy_port: + raise GeneralProxyError("Invalid proxy type") + return proxy_addr, proxy_port diff --git a/Shared/lib/python3.4/site-packages/sockshandler.py b/Shared/lib/python3.4/site-packages/sockshandler.py new file mode 100644 index 0000000..26c8343 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/sockshandler.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python +""" +SocksiPy + urllib2 handler + +version: 0.3 +author: e + +This module provides a Handler which you can use with urllib2 to allow it to tunnel your connection through a socks.sockssocket socket, with out monkey patching the original socket... +""" +import ssl + +try: + import urllib2 + import httplib +except ImportError: # Python 3 + import urllib.request as urllib2 + import http.client as httplib + +import socks # $ pip install PySocks + +def merge_dict(a, b): + d = a.copy() + d.update(b) + return d + +class SocksiPyConnection(httplib.HTTPConnection): + def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs): + self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password) + httplib.HTTPConnection.__init__(self, *args, **kwargs) + + def connect(self): + self.sock = socks.socksocket() + self.sock.setproxy(*self.proxyargs) + if type(self.timeout) in (int, float): + self.sock.settimeout(self.timeout) + self.sock.connect((self.host, self.port)) + +class SocksiPyConnectionS(httplib.HTTPSConnection): + def __init__(self, proxytype, proxyaddr, proxyport=None, rdns=True, username=None, password=None, *args, **kwargs): + self.proxyargs = (proxytype, proxyaddr, proxyport, rdns, username, password) + httplib.HTTPSConnection.__init__(self, *args, **kwargs) + + def connect(self): + sock = socks.socksocket() + sock.setproxy(*self.proxyargs) + if type(self.timeout) in (int, float): + sock.settimeout(self.timeout) + sock.connect((self.host, self.port)) + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file) + +class SocksiPyHandler(urllib2.HTTPHandler, urllib2.HTTPSHandler): + def __init__(self, *args, **kwargs): + self.args = args + self.kw = kwargs + urllib2.HTTPHandler.__init__(self) + + def http_open(self, req): + def build(host, port=None, timeout=0, **kwargs): + kw = merge_dict(self.kw, kwargs) + conn = SocksiPyConnection(*self.args, host=host, port=port, timeout=timeout, **kw) + return conn + return self.do_open(build, req) + + def https_open(self, req): + def build(host, port=None, timeout=0, **kwargs): + kw = merge_dict(self.kw, kwargs) + conn = SocksiPyConnectionS(*self.args, host=host, port=port, timeout=timeout, **kw) + return conn + return self.do_open(build, req) + +if __name__ == "__main__": + import sys + try: + port = int(sys.argv[1]) + except (ValueError, IndexError): + port = 9050 + opener = urllib2.build_opener(SocksiPyHandler(socks.PROXY_TYPE_SOCKS5, "localhost", port)) + print("HTTP: " + opener.open("http://httpbin.org/ip").read().decode()) + print("HTTPS: " + opener.open("https://httpbin.org/ip").read().decode()) diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/PKG-INFO b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/PKG-INFO new file mode 100644 index 0000000..64dcaec --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/PKG-INFO @@ -0,0 +1,13 @@ +Metadata-Version: 1.1 +Name: stem +Version: 1.4.0 +Summary: Stem is a Python controller library that allows applications to interact with +Tor . +Home-page: https://stem.torproject.org/ +Author: Damian Johnson +Author-email: atagar@torproject.org +License: LGPLv3 +Description: UNKNOWN +Keywords: tor onion controller +Platform: UNKNOWN +Provides: stem diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/SOURCES.txt b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/SOURCES.txt new file mode 100644 index 0000000..64fe91f --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/SOURCES.txt @@ -0,0 +1,52 @@ +tor-prompt +stem/__init__.py +stem/connection.py +stem/control.py +stem/exit_policy.py +stem/prereq.py +stem/process.py +stem/socket.py +stem/version.py +stem.egg-info/PKG-INFO +stem.egg-info/SOURCES.txt +stem.egg-info/dependency_links.txt +stem.egg-info/top_level.txt +stem/descriptor/__init__.py +stem/descriptor/export.py +stem/descriptor/extrainfo_descriptor.py +stem/descriptor/hidden_service_descriptor.py +stem/descriptor/microdescriptor.py +stem/descriptor/networkstatus.py +stem/descriptor/reader.py +stem/descriptor/remote.py +stem/descriptor/router_status_entry.py +stem/descriptor/server_descriptor.py +stem/descriptor/tordnsel.py +stem/interpreter/__init__.py +stem/interpreter/arguments.py +stem/interpreter/autocomplete.py +stem/interpreter/commands.py +stem/interpreter/help.py +stem/interpreter/settings.cfg +stem/response/__init__.py +stem/response/add_onion.py +stem/response/authchallenge.py +stem/response/events.py +stem/response/getconf.py +stem/response/getinfo.py +stem/response/mapaddress.py +stem/response/protocolinfo.py +stem/util/__init__.py +stem/util/conf.py +stem/util/connection.py +stem/util/enum.py +stem/util/log.py +stem/util/lru_cache.py +stem/util/ordereddict.py +stem/util/ports.cfg +stem/util/proc.py +stem/util/str_tools.py +stem/util/system.py +stem/util/term.py +stem/util/test_tools.py +stem/util/tor_tools.py \ No newline at end of file diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/dependency_links.txt b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/installed-files.txt b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/installed-files.txt new file mode 100644 index 0000000..52ca914 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/installed-files.txt @@ -0,0 +1,98 @@ +../stem/__init__.py +../stem/connection.py +../stem/control.py +../stem/exit_policy.py +../stem/prereq.py +../stem/process.py +../stem/socket.py +../stem/version.py +../stem/descriptor/__init__.py +../stem/descriptor/export.py +../stem/descriptor/extrainfo_descriptor.py +../stem/descriptor/hidden_service_descriptor.py +../stem/descriptor/microdescriptor.py +../stem/descriptor/networkstatus.py +../stem/descriptor/reader.py +../stem/descriptor/remote.py +../stem/descriptor/router_status_entry.py +../stem/descriptor/server_descriptor.py +../stem/descriptor/tordnsel.py +../stem/interpreter/__init__.py +../stem/interpreter/arguments.py +../stem/interpreter/autocomplete.py +../stem/interpreter/commands.py +../stem/interpreter/help.py +../stem/response/__init__.py +../stem/response/add_onion.py +../stem/response/authchallenge.py +../stem/response/events.py +../stem/response/getconf.py +../stem/response/getinfo.py +../stem/response/mapaddress.py +../stem/response/protocolinfo.py +../stem/util/__init__.py +../stem/util/conf.py +../stem/util/connection.py +../stem/util/enum.py +../stem/util/log.py +../stem/util/lru_cache.py +../stem/util/ordereddict.py +../stem/util/proc.py +../stem/util/str_tools.py +../stem/util/system.py +../stem/util/term.py +../stem/util/test_tools.py +../stem/util/tor_tools.py +../stem/interpreter/settings.cfg +../stem/util/ports.cfg +../stem/__pycache__/__init__.cpython-34.pyc +../stem/__pycache__/connection.cpython-34.pyc +../stem/__pycache__/control.cpython-34.pyc +../stem/__pycache__/exit_policy.cpython-34.pyc +../stem/__pycache__/prereq.cpython-34.pyc +../stem/__pycache__/process.cpython-34.pyc +../stem/__pycache__/socket.cpython-34.pyc +../stem/__pycache__/version.cpython-34.pyc +../stem/descriptor/__pycache__/__init__.cpython-34.pyc +../stem/descriptor/__pycache__/export.cpython-34.pyc +../stem/descriptor/__pycache__/extrainfo_descriptor.cpython-34.pyc +../stem/descriptor/__pycache__/hidden_service_descriptor.cpython-34.pyc +../stem/descriptor/__pycache__/microdescriptor.cpython-34.pyc +../stem/descriptor/__pycache__/networkstatus.cpython-34.pyc +../stem/descriptor/__pycache__/reader.cpython-34.pyc +../stem/descriptor/__pycache__/remote.cpython-34.pyc +../stem/descriptor/__pycache__/router_status_entry.cpython-34.pyc +../stem/descriptor/__pycache__/server_descriptor.cpython-34.pyc +../stem/descriptor/__pycache__/tordnsel.cpython-34.pyc +../stem/interpreter/__pycache__/__init__.cpython-34.pyc +../stem/interpreter/__pycache__/arguments.cpython-34.pyc +../stem/interpreter/__pycache__/autocomplete.cpython-34.pyc +../stem/interpreter/__pycache__/commands.cpython-34.pyc +../stem/interpreter/__pycache__/help.cpython-34.pyc +../stem/response/__pycache__/__init__.cpython-34.pyc +../stem/response/__pycache__/add_onion.cpython-34.pyc +../stem/response/__pycache__/authchallenge.cpython-34.pyc +../stem/response/__pycache__/events.cpython-34.pyc +../stem/response/__pycache__/getconf.cpython-34.pyc +../stem/response/__pycache__/getinfo.cpython-34.pyc +../stem/response/__pycache__/mapaddress.cpython-34.pyc +../stem/response/__pycache__/protocolinfo.cpython-34.pyc +../stem/util/__pycache__/__init__.cpython-34.pyc +../stem/util/__pycache__/conf.cpython-34.pyc +../stem/util/__pycache__/connection.cpython-34.pyc +../stem/util/__pycache__/enum.cpython-34.pyc +../stem/util/__pycache__/log.cpython-34.pyc +../stem/util/__pycache__/lru_cache.cpython-34.pyc +../stem/util/__pycache__/ordereddict.cpython-34.pyc +../stem/util/__pycache__/proc.cpython-34.pyc +../stem/util/__pycache__/str_tools.cpython-34.pyc +../stem/util/__pycache__/system.cpython-34.pyc +../stem/util/__pycache__/term.cpython-34.pyc +../stem/util/__pycache__/test_tools.cpython-34.pyc +../stem/util/__pycache__/tor_tools.cpython-34.pyc +./ +dependency_links.txt +top_level.txt +PKG-INFO +SOURCES.txt +../../../../bin/tor-prompt diff --git a/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/top_level.txt b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/top_level.txt new file mode 100644 index 0000000..e63e524 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem-1.4.0.egg-info/top_level.txt @@ -0,0 +1 @@ +stem diff --git a/Shared/lib/python3.4/site-packages/stem/__init__.py b/Shared/lib/python3.4/site-packages/stem/__init__.py new file mode 100644 index 0000000..5134c2f --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/__init__.py @@ -0,0 +1,833 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Library for working with the tor process. + +**Module Overview:** + +:: + + ControllerError - Base exception raised when using the controller. + |- ProtocolError - Malformed socket data. + |- OperationFailed - Tor was unable to successfully complete the operation. + | |- UnsatisfiableRequest - Tor was unable to satisfy a valid request. + | | +- CircuitExtensionFailed - Attempt to make or extend a circuit failed. + | |- DescriptorUnavailable - The given relay descriptor is unavailable. + | +- InvalidRequest - Invalid request. + | +- InvalidArguments - Invalid request parameters. + +- SocketError - Communication with the socket failed. + +- SocketClosed - Socket has been shut down. + +.. data:: Runlevel (enum) + + Rating of importance used for event logging. + + =========== =========== + Runlevel Description + =========== =========== + **ERR** critical issues that impair tor's ability to function + **WARN** non-critical issues the user should be aware of + **NOTICE** information that may be helpful to the user + **INFO** high level runtime information + **DEBUG** low level runtime information + =========== =========== + +.. data:: Signal (enum) + + Signals that the tor process will accept. + + ========================= =========== + Signal Description + ========================= =========== + **RELOAD** or **HUP** reloads our torrc + **SHUTDOWN** or **INT** shut down, waiting ShutdownWaitLength first if we're a relay + **DUMP** or **USR1** dumps information about open connections and circuits to our log + **DEBUG** or **USR2** switch our logging to the DEBUG runlevel + **HALT** or **TERM** exit tor immediately + **NEWNYM** switch to new circuits, so new application requests don't share any circuits with old ones (this also clears our DNS cache) + **CLEARDNSCACHE** clears cached DNS results + **HEARTBEAT** trigger a heartbeat log message + ========================= =========== + +.. data:: Flag (enum) + + Flag assigned to tor relays by the authorities to indicate various + characteristics. + + **Note:** The BADDIRECTORY flag was `removed from tor `_. + + ================= =========== + Flag Description + ================= =========== + **AUTHORITY** relay is a directory authority + **BADEXIT** relay shouldn't be used as an exit due to being either problematic or malicious (`wiki `_) + **BADDIRECTORY** relay shouldn't be used for directory information + **EXIT** relay's exit policy makes it more useful as an exit rather than middle hop + **FAST** relay's suitable for high-bandwidth circuits + **GUARD** relay's suitable for being an entry guard (first hop) + **HSDIR** relay is being used as a v2 hidden service directory + **NAMED** relay can be referred to by its nickname + **RUNNING** relay is currently usable + **STABLE** relay's suitable for long-lived circuits + **UNNAMED** relay isn't currently bound to a nickname + **V2DIR** relay supports the v2 directory protocol + **VALID** relay has been validated + ================= =========== + +.. data:: CircStatus (enum) + + Statuses that a circuit can be in. Tor may provide statuses not in this enum. + + ============ =========== + CircStatus Description + ============ =========== + **LAUNCHED** new circuit was created + **BUILT** circuit finished being created and can accept traffic + **EXTENDED** circuit has been extended by a hop + **FAILED** circuit construction failed + **CLOSED** circuit has been closed + ============ =========== + +.. data:: CircBuildFlag (enum) + + Attributes about how a circuit is built. These were introduced in tor version + 0.2.3.11. Tor may provide flags not in this enum. + + ================= =========== + CircBuildFlag Description + ================= =========== + **ONEHOP_TUNNEL** single hop circuit to fetch directory information + **IS_INTERNAL** circuit that won't be used for client traffic + **NEED_CAPACITY** circuit only includes high capacity relays + **NEED_UPTIME** circuit only includes relays with a high uptime + ================= =========== + +.. data:: CircPurpose (enum) + + Description of what a circuit is intended for. These were introduced in tor + version 0.2.1.6. Tor may provide purposes not in this enum. + + ==================== =========== + CircPurpose Description + ==================== =========== + **GENERAL** client traffic or fetching directory information + **HS_CLIENT_INTRO** client side introduction point for a hidden service circuit + **HS_CLIENT_REND** client side hidden service rendezvous circuit + **HS_SERVICE_INTRO** server side introduction point for a hidden service circuit + **HS_SERVICE_REND** server side hidden service rendezvous circuit + **TESTING** testing to see if we're reachable, so we can be used as a relay + **CONTROLLER** circuit that was built by a controller + **MEASURE_TIMEOUT** circuit being kept around to see how long it takes + ==================== =========== + +.. data:: CircClosureReason (enum) + + Reason that a circuit is being closed or failed to be established. Tor may + provide reasons not in this enum. + + ========================= =========== + CircClosureReason Description + ========================= =========== + **NONE** no reason given + **TORPROTOCOL** violation in the tor protocol + **INTERNAL** internal error + **REQUESTED** requested by the client via a TRUNCATE command + **HIBERNATING** relay is currently hibernating + **RESOURCELIMIT** relay is out of memory, sockets, or circuit IDs + **CONNECTFAILED** unable to contact the relay + **OR_IDENTITY** relay had the wrong OR identification + **OR_CONN_CLOSED** connection failed after being established + **FINISHED** circuit has expired (see tor's MaxCircuitDirtiness config option) + **TIMEOUT** circuit construction timed out + **DESTROYED** circuit unexpectedly closed + **NOPATH** not enough relays to make a circuit + **NOSUCHSERVICE** requested hidden service does not exist + **MEASUREMENT_EXPIRED** same as **TIMEOUT** except that it was left open for measurement purposes + ========================= =========== + +.. data:: CircEvent (enum) + + Type of change reflected in a circuit by a CIRC_MINOR event. Tor may provide + event types not in this enum. + + ===================== =========== + CircEvent Description + ===================== =========== + **PURPOSE_CHANGED** circuit purpose or hidden service state has changed + **CANNIBALIZED** circuit connections are being reused for a different circuit + ===================== =========== + +.. data:: HiddenServiceState (enum) + + State that a hidden service circuit can have. These were introduced in tor + version 0.2.3.11. Tor may provide states not in this enum. + + Enumerations fall into four groups based on their prefix... + + ======= =========== + Prefix Description + ======= =========== + HSCI_* client-side introduction-point + HSCR_* client-side rendezvous-point + HSSI_* service-side introduction-point + HSSR_* service-side rendezvous-point + ======= =========== + + ============================= =========== + HiddenServiceState Description + ============================= =========== + **HSCI_CONNECTING** connecting to the introductory point + **HSCI_INTRO_SENT** sent INTRODUCE1 and awaiting a reply + **HSCI_DONE** received a reply, circuit is closing + **HSCR_CONNECTING** connecting to the introductory point + **HSCR_ESTABLISHED_IDLE** rendezvous-point established, awaiting an introduction + **HSCR_ESTABLISHED_WAITING** introduction received, awaiting a rend + **HSCR_JOINED** connected to the hidden service + **HSSI_CONNECTING** connecting to the introductory point + **HSSI_ESTABLISHED** established introductory point + **HSSR_CONNECTING** connecting to the introductory point + **HSSR_JOINED** connected to the rendezvous-point + ============================= =========== + +.. data:: RelayEndReason (enum) + + Reasons why the stream is to be closed. + + =================== =========== + RelayEndReason Description + =================== =========== + **MISC** none of the following reasons + **RESOLVEFAILED** unable to resolve the hostname + **CONNECTREFUSED** remote host refused the connection + **EXITPOLICY** OR refuses to connect to the destination + **DESTROY** circuit is being shut down + **DONE** connection has been closed + **TIMEOUT** connection timed out + **NOROUTE** routing error while contacting the destination + **HIBERNATING** relay is temporarily hibernating + **INTERNAL** internal error at the relay + **RESOURCELIMIT** relay has insufficient resources to service the request + **CONNRESET** connection was unexpectedly reset + **TORPROTOCOL** violation in the tor protocol + **NOTDIRECTORY** directory information requested from a relay that isn't mirroring it + =================== =========== + +.. data:: StreamStatus (enum) + + State that a stream going through tor can have. Tor may provide states not in + this enum. + + ================= =========== + StreamStatus Description + ================= =========== + **NEW** request for a new connection + **NEWRESOLVE** request to resolve an address + **REMAP** address is being re-mapped to another + **SENTCONNECT** sent a connect cell along a circuit + **SENTRESOLVE** sent a resolve cell along a circuit + **SUCCEEDED** stream has been established + **FAILED** stream is detached, and won't be re-established + **DETACHED** stream is detached, but might be re-established + **CLOSED** stream has closed + ================= =========== + +.. data:: StreamClosureReason (enum) + + Reason that a stream is being closed or failed to be established. This + includes all values in the :data:`~stem.RelayEndReason` enumeration as + well as the following. Tor may provide reasons not in this enum. + + ===================== =========== + StreamClosureReason Description + ===================== =========== + **END** endpoint has sent a RELAY_END cell + **PRIVATE_ADDR** endpoint was a private address (127.0.0.1, 10.0.0.1, etc) + ===================== =========== + +.. data:: StreamSource (enum) + + Cause of a stream being remapped to another address. Tor may provide sources + not in this enum. + + ============= =========== + StreamSource Description + ============= =========== + **CACHE** tor is remapping because of a cached answer + **EXIT** exit relay requested the remap + ============= =========== + +.. data:: StreamPurpose (enum) + + Purpsoe of the stream. This is only provided with new streams and tor may + provide purposes not in this enum. + + ================= =========== + StreamPurpose Description + ================= =========== + **DIR_FETCH** fetching directory information (descriptors, consensus, etc) + **DIR_UPLOAD** uploading our descriptor to an authority + **DNS_REQUEST** user initiated DNS request + **DIRPORT_TEST** checking that our directory port is reachable externally + **USER** either relaying user traffic or not one of the above categories + ================= =========== + +.. data:: ORStatus (enum) + + State that an OR connection can have. Tor may provide states not in this + enum. + + =============== =========== + ORStatus Description + =============== =========== + **NEW** received OR connection, starting server-side handshake + **LAUNCHED** launched outbound OR connection, starting client-side handshake + **CONNECTED** OR connection has been established + **FAILED** attempt to establish OR connection failed + **CLOSED** OR connection has been closed + =============== =========== + +.. data:: ORClosureReason (enum) + + Reason that an OR connection is being closed or failed to be established. Tor + may provide reasons not in this enum. + + =================== =========== + ORClosureReason Description + =================== =========== + **DONE** OR connection shut down cleanly + **CONNECTREFUSED** got a ECONNREFUSED when connecting to the relay + **IDENTITY** identity of the relay wasn't what we expected + **CONNECTRESET** got a ECONNRESET or similar error from relay + **TIMEOUT** got a ETIMEOUT or similar error from relay + **NOROUTE** got a ENOTCONN, ENETUNREACH, ENETDOWN, EHOSTUNREACH, or similar error from relay + **IOERROR** got a different kind of error from relay + **RESOURCELIMIT** relay has insufficient resources to service the request + **MISC** connection refused for another reason + **PT_MISSING** no pluggable transport was available + =================== =========== + +.. data:: AuthDescriptorAction (enum) + + Actions that directory authorities might take with relay descriptors. Tor may + provide reasons not in this enum. + + ===================== =========== + AuthDescriptorAction Description + ===================== =========== + **ACCEPTED** accepting the descriptor as the newest version + **DROPPED** descriptor rejected without notifying the relay + **REJECTED** relay notified that its descriptor has been rejected + ===================== =========== + +.. data:: StatusType (enum) + + Sources for tor status events. Tor may provide types not in this enum. + + ============= =========== + StatusType Description + ============= =========== + **GENERAL** general tor activity, not specifically as a client or relay + **CLIENT** related to our activity as a tor client + **SERVER** related to our activity as a tor relay + ============= =========== + +.. data:: GuardType (enum) + + Use a guard relay can be for. Tor may provide types not in this enum. + + =========== =========== + GuardType Description + =========== =========== + **ENTRY** used to connect to the tor network + =========== =========== + +.. data:: GuardStatus (enum) + + Status a guard relay can have. Tor may provide types not in this enum. + + ============= =========== + GuardStatus Description + ============= =========== + **NEW** new guard that we weren't previously using + **DROPPED** removed from use as one of our guards + **UP** guard is now reachable + **DOWN** guard is now unreachable + **BAD** consensus or relay considers this relay to be unusable as a guard + **GOOD** consensus or relay considers this relay to be usable as a guard + ============= =========== + +.. data:: TimeoutSetType (enum) + + Way in which the timeout value of a circuit is changing. Tor may provide + types not in this enum. + + =============== =========== + TimeoutSetType Description + =============== =========== + **COMPUTED** tor has computed a new timeout based on prior circuits + **RESET** timeout reverted to its default + **SUSPENDED** timeout reverted to its default until network connectivity has recovered + **DISCARD** throwing out timeout value from when the network was down + **RESUME** resumed calculations to determine the proper timeout + =============== =========== + +.. data:: ConnectionType (enum) + + Purpose for a tor connection. Tor may provide types not in this enum. + + The meaning behind these values is a bit unclear, pending :trac:`10086`. + + =============== =========== + ConnectionType Description + =============== =========== + **OR** carrying traffic within the tor network + **DIR** fetching or sending tor descriptor data + **EXIT** carrying traffic between the tor network and an external destination + =============== =========== + +.. data:: TokenBucket (enum) + + Bucket categories of TB_EMPTY events. + + =============== =========== + TokenBucket Description + =============== =========== + **GLOBAL** global token bucket + **RELAY** relay token bucket + **ORCONN** bucket used for OR connections + =============== =========== + +.. data:: HSDescAction (enum) + + Action beeing taken in a HS_DESC event. + + =============== =========== + HSDescAction Description + =============== =========== + **REQUESTED** uncached hidden service descriptor is being requested + **UPLOAD** descriptor is being uploaded with HSPOST + **RECEIVED** hidden service descriptor has been retrieved + **UPLOADED** descriptor was uploaded with HSPOST + **IGNORE** fetched descriptor was ignored because we already have its v0 descriptor + **FAILED** we were unable to retrieve the descriptor + =============== =========== + +.. data:: HSDescReason (enum) + + Reason for the hidden service descriptor to fail to be fetched. + + =================== =========== + HSDescReason Description + =================== =========== + **BAD_DESC** descriptor was unparseable + **QUERY_REJECTED** hidden service directory refused to provide the descriptor + **UPLOAD_REJECTED** descriptor was rejected by the hidden service directory + **NOT_FOUND** descriptor with the given identifier wasn't found + **UNEXPECTED** failure type is unknown + =================== =========== + +.. data:: HSAuth (enum) + + Type of authentication being used for a HS_DESC event. + + ================= =========== + HSAuth Description + ================= =========== + **NO_AUTH** no authentication + **BASIC_AUTH** general hidden service authentication + **STEALTH_AUTH** authentication method that hides service activity from unauthorized clients + **UNKNOWN** unrecognized method of authentication + ================= =========== +""" + +__version__ = '1.4.0' +__author__ = 'Damian Johnson' +__contact__ = 'atagar@torproject.org' +__url__ = 'https://stem.torproject.org/' +__license__ = 'LGPLv3' + +__all__ = [ + 'descriptor', + 'response', + 'util', + 'connection', + 'control', + 'exit_policy', + 'prereq', + 'process', + 'socket', + 'version', + 'ControllerError', + 'ProtocolError', + 'OperationFailed', + 'UnsatisfiableRequest', + 'CircuitExtensionFailed', + 'DescriptorUnavailable', + 'InvalidRequest', + 'InvalidArguments', + 'SocketError', + 'SocketClosed', + 'Runlevel', + 'Signal', + 'Flag', + 'CircStatus', + 'CircBuildFlag', + 'CircPurpose', + 'CircClosureReason', + 'CircEvent', + 'HiddenServiceState', + 'HSAuth', + 'HSDescAction', + 'HSDescReason', + 'RelayEndReason', + 'StreamStatus', + 'StreamClosureReason', + 'StreamSource', + 'StreamPurpose', + 'ORStatus', + 'ORClosureReason', + 'AuthDescriptorAction', + 'StatusType', + 'GuardType', + 'GuardStatus', + 'TimeoutSetType', +] + +import stem.prereq + +if stem.prereq.is_python_3(): + str_type = str + int_type = int +else: + str_type = unicode + int_type = long + +import stem.util.enum + +# Constant to indicate an undefined argument default. Usually we'd use None for +# this, but users will commonly provide None as the argument so need something +# else fairly unique... + +UNDEFINED = '' + + +class ControllerError(Exception): + 'Base error for controller communication issues.' + + +class ProtocolError(ControllerError): + 'Malformed content from the control socket.' + + +class OperationFailed(ControllerError): + """ + Base exception class for failed operations that return an error code + + :var str code: error code returned by Tor + :var str message: error message returned by Tor or a human readable error + message + """ + + def __init__(self, code = None, message = None): + super(ControllerError, self).__init__(message) + self.code = code + self.message = message + + +class UnsatisfiableRequest(OperationFailed): + """ + Exception raised if Tor was unable to process our request. + """ + + +class CircuitExtensionFailed(UnsatisfiableRequest): + """ + An attempt to create or extend a circuit failed. + + :var stem.response.CircuitEvent circ: response notifying us of the failure + """ + + def __init__(self, message, circ = None): + super(CircuitExtensionFailed, self).__init__(message = message) + self.circ = circ + + +class DescriptorUnavailable(OperationFailed): + """ + Tor was unable to provide a descriptor for the given relay. + """ + + def __init__(self, message): + super(DescriptorUnavailable, self).__init__(message = message) + + +class InvalidRequest(OperationFailed): + """ + Exception raised when the request was invalid or malformed. + """ + + +class InvalidArguments(InvalidRequest): + """ + Exception class for requests which had invalid arguments. + + :var str code: error code returned by Tor + :var str message: error message returned by Tor or a human readable error + message + :var list arguments: a list of arguments which were invalid + """ + + def __init__(self, code = None, message = None, arguments = None): + super(InvalidArguments, self).__init__(code, message) + self.arguments = arguments + + +class SocketError(ControllerError): + 'Error arose while communicating with the control socket.' + + +class SocketClosed(SocketError): + 'Control socket was closed before completing the message.' + +Runlevel = stem.util.enum.UppercaseEnum( + 'DEBUG', + 'INFO', + 'NOTICE', + 'WARN', + 'ERR', +) + +Flag = stem.util.enum.Enum( + ('AUTHORITY', 'Authority'), + ('BADEXIT', 'BadExit'), + ('BADDIRECTORY', 'BadDirectory'), + ('EXIT', 'Exit'), + ('FAST', 'Fast'), + ('GUARD', 'Guard'), + ('HSDIR', 'HSDir'), + ('NAMED', 'Named'), + ('RUNNING', 'Running'), + ('STABLE', 'Stable'), + ('UNNAMED', 'Unnamed'), + ('V2DIR', 'V2Dir'), + ('V3DIR', 'V3Dir'), + ('VALID', 'Valid'), +) + +Signal = stem.util.enum.UppercaseEnum( + 'RELOAD', + 'HUP', + 'SHUTDOWN', + 'INT', + 'DUMP', + 'USR1', + 'DEBUG', + 'USR2', + 'HALT', + 'TERM', + 'NEWNYM', + 'CLEARDNSCACHE', + 'HEARTBEAT', +) + +CircStatus = stem.util.enum.UppercaseEnum( + 'LAUNCHED', + 'BUILT', + 'EXTENDED', + 'FAILED', + 'CLOSED', +) + +CircBuildFlag = stem.util.enum.UppercaseEnum( + 'ONEHOP_TUNNEL', + 'IS_INTERNAL', + 'NEED_CAPACITY', + 'NEED_UPTIME', +) + +CircPurpose = stem.util.enum.UppercaseEnum( + 'GENERAL', + 'HS_CLIENT_INTRO', + 'HS_CLIENT_REND', + 'HS_SERVICE_INTRO', + 'HS_SERVICE_REND', + 'TESTING', + 'CONTROLLER', + 'MEASURE_TIMEOUT', +) + +CircClosureReason = stem.util.enum.UppercaseEnum( + 'NONE', + 'TORPROTOCOL', + 'INTERNAL', + 'REQUESTED', + 'HIBERNATING', + 'RESOURCELIMIT', + 'CONNECTFAILED', + 'OR_IDENTITY', + 'OR_CONN_CLOSED', + 'FINISHED', + 'TIMEOUT', + 'DESTROYED', + 'NOPATH', + 'NOSUCHSERVICE', + 'MEASUREMENT_EXPIRED', +) + +CircEvent = stem.util.enum.UppercaseEnum( + 'PURPOSE_CHANGED', + 'CANNIBALIZED', +) + +HiddenServiceState = stem.util.enum.UppercaseEnum( + 'HSCI_CONNECTING', + 'HSCI_INTRO_SENT', + 'HSCI_DONE', + 'HSCR_CONNECTING', + 'HSCR_ESTABLISHED_IDLE', + 'HSCR_ESTABLISHED_WAITING', + 'HSCR_JOINED', + 'HSSI_CONNECTING', + 'HSSI_ESTABLISHED', + 'HSSR_CONNECTING', + 'HSSR_JOINED', +) + +RelayEndReason = stem.util.enum.UppercaseEnum( + 'MISC', + 'RESOLVEFAILED', + 'CONNECTREFUSED', + 'EXITPOLICY', + 'DESTROY', + 'DONE', + 'TIMEOUT', + 'NOROUTE', + 'HIBERNATING', + 'INTERNAL', + 'RESOURCELIMIT', + 'CONNRESET', + 'TORPROTOCOL', + 'NOTDIRECTORY', +) + +StreamStatus = stem.util.enum.UppercaseEnum( + 'NEW', + 'NEWRESOLVE', + 'REMAP', + 'SENTCONNECT', + 'SENTRESOLVE', + 'SUCCEEDED', + 'FAILED', + 'DETACHED', + 'CLOSED', +) + +# StreamClosureReason is a superset of RelayEndReason +StreamClosureReason = stem.util.enum.UppercaseEnum(*(RelayEndReason.keys() + [ + 'END', + 'PRIVATE_ADDR', +])) + +StreamSource = stem.util.enum.UppercaseEnum( + 'CACHE', + 'EXIT', +) + +StreamPurpose = stem.util.enum.UppercaseEnum( + 'DIR_FETCH', + 'DIR_UPLOAD', + 'DNS_REQUEST', + 'DIRPORT_TEST', + 'USER', +) + +ORStatus = stem.util.enum.UppercaseEnum( + 'NEW', + 'LAUNCHED', + 'CONNECTED', + 'FAILED', + 'CLOSED', +) + +ORClosureReason = stem.util.enum.UppercaseEnum( + 'DONE', + 'CONNECTREFUSED', + 'IDENTITY', + 'CONNECTRESET', + 'TIMEOUT', + 'NOROUTE', + 'IOERROR', + 'RESOURCELIMIT', + 'MISC', + 'PT_MISSING', +) + +AuthDescriptorAction = stem.util.enum.UppercaseEnum( + 'ACCEPTED', + 'DROPPED', + 'REJECTED', +) + +StatusType = stem.util.enum.UppercaseEnum( + 'GENERAL', + 'CLIENT', + 'SERVER', +) + +GuardType = stem.util.enum.UppercaseEnum( + 'ENTRY', +) + +GuardStatus = stem.util.enum.UppercaseEnum( + 'NEW', + 'UP', + 'DOWN', + 'BAD', + 'GOOD', + 'DROPPED', +) + +TimeoutSetType = stem.util.enum.UppercaseEnum( + 'COMPUTED', + 'RESET', + 'SUSPENDED', + 'DISCARD', + 'RESUME', +) + +ConnectionType = stem.util.enum.UppercaseEnum( + 'OR', + 'DIR', + 'EXIT', +) + +TokenBucket = stem.util.enum.UppercaseEnum( + 'GLOBAL', + 'RELAY', + 'ORCONN', +) + +HSDescAction = stem.util.enum.UppercaseEnum( + 'REQUESTED', + 'UPLOAD', + 'RECEIVED', + 'UPLOADED', + 'IGNORE', + 'FAILED', +) + +HSDescReason = stem.util.enum.UppercaseEnum( + 'BAD_DESC', + 'QUERY_REJECTED', + 'UPLOAD_REJECTED', + 'NOT_FOUND', + 'UNEXPECTED', +) + +HSAuth = stem.util.enum.UppercaseEnum( + 'NO_AUTH', + 'BASIC_AUTH', + 'STEALTH_AUTH', + 'UNKNOWN', +) diff --git a/Shared/lib/python3.4/site-packages/stem/connection.py b/Shared/lib/python3.4/site-packages/stem/connection.py new file mode 100644 index 0000000..fb85225 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/connection.py @@ -0,0 +1,1284 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Functions for connecting and authenticating to the tor process. + +The :func:`~stem.connection.connect` function give an easy, one line +method for getting an authenticated control connection. This is handy for CLI +applications and the python interactive interpreter, but does several things +that makes it undesirable for applications (uses stdin/stdout, suppresses +exceptions, etc). + +:: + + import sys + + from stem.connection import connect + + if __name__ == '__main__': + controller = connect() + + if not controller: + sys.exit(1) # unable to get a connection + + print 'Tor is running version %s' % controller.get_version() + controller.close() + +:: + + % python example.py + Tor is running version 0.2.4.10-alpha-dev (git-8be6058d8f31e578) + +... or if Tor isn't running... + +:: + + % python example.py + [Errno 111] Connection refused + +The :func:`~stem.connection.authenticate` function, however, gives easy but +fine-grained control over the authentication process. For instance... + +:: + + import sys + import getpass + import stem.connection + import stem.socket + + try: + control_socket = stem.socket.ControlPort(port = 9051) + except stem.SocketError as exc: + print 'Unable to connect to port 9051 (%s)' % exc + sys.exit(1) + + try: + stem.connection.authenticate(control_socket) + except stem.connection.IncorrectSocketType: + print 'Please check in your torrc that 9051 is the ControlPort.' + print 'Maybe you configured it to be the ORPort or SocksPort instead?' + sys.exit(1) + except stem.connection.MissingPassword: + controller_password = getpass.getpass('Controller password: ') + + try: + stem.connection.authenticate_password(control_socket, controller_password) + except stem.connection.PasswordAuthFailed: + print 'Unable to authenticate, password is incorrect' + sys.exit(1) + except stem.connection.AuthenticationFailure as exc: + print 'Unable to authenticate: %s' % exc + sys.exit(1) + +**Module Overview:** + +:: + + connect - Simple method for getting authenticated control connection + + authenticate - Main method for authenticating to a control socket + authenticate_none - Authenticates to an open control socket + authenticate_password - Authenticates to a socket supporting password auth + authenticate_cookie - Authenticates to a socket supporting cookie auth + authenticate_safecookie - Authenticates to a socket supporting safecookie auth + + get_protocolinfo - Issues a PROTOCOLINFO query + + AuthenticationFailure - Base exception raised for authentication failures + |- UnrecognizedAuthMethods - Authentication methods are unsupported + |- IncorrectSocketType - Socket does not speak the tor control protocol + | + |- OpenAuthFailed - Failure when authenticating by an open socket + | +- OpenAuthRejected - Tor rejected this method of authentication + | + |- PasswordAuthFailed - Failure when authenticating by a password + | |- PasswordAuthRejected - Tor rejected this method of authentication + | |- IncorrectPassword - Password was rejected + | +- MissingPassword - Socket supports password auth but wasn't attempted + | + |- CookieAuthFailed - Failure when authenticating by a cookie + | |- CookieAuthRejected - Tor rejected this method of authentication + | |- IncorrectCookieValue - Authentication cookie was rejected + | |- IncorrectCookieSize - Size of the cookie file is incorrect + | |- UnreadableCookieFile - Unable to read the contents of the auth cookie + | +- AuthChallengeFailed - Failure completing the authchallenge request + | |- AuthChallengeUnsupported - Tor doesn't recognize the AUTHCHALLENGE command + | |- AuthSecurityFailure - Server provided the wrong nonce credentials + | |- InvalidClientNonce - The client nonce is invalid + | +- UnrecognizedAuthChallengeMethod - AUTHCHALLENGE does not support the given methods. + | + +- MissingAuthInfo - Unexpected PROTOCOLINFO response, missing auth info + |- NoAuthMethods - Missing any methods for authenticating + +- NoAuthCookie - Supports cookie auth but doesn't have its path + +.. data:: AuthMethod (enum) + + Enumeration of PROTOCOLINFO responses for supported authentication methods. + + ============== =========== + AuthMethod Description + ============== =========== + **NONE** No authentication required. + **PASSWORD** Password required, see tor's HashedControlPassword option. + **COOKIE** Contents of the cookie file required, see tor's CookieAuthentication option. + **SAFECOOKIE** Need to reply to a hmac challenge using the contents of the cookie file. + **UNKNOWN** Tor provided one or more authentication methods that we don't recognize, probably something new. + ============== =========== +""" + +import binascii +import getpass +import os + +import stem.control +import stem.response +import stem.socket +import stem.util.connection +import stem.util.enum +import stem.util.str_tools +import stem.util.system +import stem.version + +from stem.util import log + +AuthMethod = stem.util.enum.Enum('NONE', 'PASSWORD', 'COOKIE', 'SAFECOOKIE', 'UNKNOWN') + +CLIENT_HASH_CONSTANT = b'Tor safe cookie authentication controller-to-server hash' +SERVER_HASH_CONSTANT = b'Tor safe cookie authentication server-to-controller hash' + +MISSING_PASSWORD_BUG_MSG = """ +BUG: You provided a password but despite this stem reported that it was +missing. This shouldn't happen - please let us know about it! + + http://bugs.torproject.org +""" + +UNRECOGNIZED_AUTH_TYPE_MSG = """ +Tor is using a type of authentication we do not recognize... + + {auth_methods} + +Please check that stem is up to date and if there is an existing issue on +'http://bugs.torproject.org'. If there isn't one then let us know! +""" + + +UNREADABLE_COOKIE_FILE_MSG = """ +We were unable to read tor's authentication cookie... + + Path: {path} + Issue: {issue} +""" + +WRONG_PORT_TYPE_MSG = """ +Please check in your torrc that {port} is the ControlPort. Maybe you +configured it to be the ORPort or SocksPort instead? +""" + +WRONG_SOCKET_TYPE_MSG = """ +Unable to connect to tor. Are you sure the interface you specified belongs to +tor? +""" + +CONNECT_MESSAGES = { + 'general_auth_failure': 'Unable to authenticate: {error}', + 'incorrect_password': 'Incorrect password', + 'no_control_port': "Unable to connect to tor. Maybe it's running without a ControlPort?", + 'password_prompt': 'Tor controller password:', + 'needs_password': 'Tor requires a password to authenticate', + 'socket_doesnt_exist': "The socket file you specified ({path}) doesn't exist", + 'tor_isnt_running': "Unable to connect to tor. Are you sure it's running?", + 'unable_to_use_port': 'Unable to connect to {address}:{port}: {error}', + 'unable_to_use_socket': "Unable to connect to '{path}': {error}", + 'missing_password_bug': MISSING_PASSWORD_BUG_MSG.strip(), + 'uncrcognized_auth_type': UNRECOGNIZED_AUTH_TYPE_MSG.strip(), + 'unreadable_cookie_file': UNREADABLE_COOKIE_FILE_MSG.strip(), + 'wrong_port_type': WRONG_PORT_TYPE_MSG.strip(), + 'wrong_socket_type': WRONG_SOCKET_TYPE_MSG.strip(), +} + + +def connect(control_port = ('127.0.0.1', 9051), control_socket = '/var/run/tor/control', password = None, password_prompt = False, chroot_path = None, controller = stem.control.Controller): + """ + Convenience function for quickly getting a control connection. This is very + handy for debugging or CLI setup, handling setup and prompting for a password + if necessary (and none is provided). If any issues arise this prints a + description of the problem and returns **None**. + + If both a **control_port** and **control_socket** are provided then the + **control_socket** is tried first, and this provides a generic error message + if they're both unavailable. + + In much the same vein as git porcelain commands, users should not rely on + details of how this works. Messages and details of this function's behavior + could change in the future. + + .. versionadded:: 1.2.0 + + :param tuple contol_port: address and port tuple, for instance **('127.0.0.1', 9051)** + :param str path: path where the control socket is located + :param str password: passphrase to authenticate to the socket + :param bool password_prompt: prompt for the controller password if it wasn't + supplied + :param str chroot_path: path prefix if in a chroot environment + :param Class controller: :class:`~stem.control.BaseController` subclass to be + returned, this provides a :class:`~stem.socket.ControlSocket` if **None** + + :returns: authenticated control connection, the type based on the controller argument + + :raises: **ValueError** if given an invalid control_port, or both + **control_port** and **control_socket** are **None** + """ + + if control_port is None and control_socket is None: + raise ValueError('Neither a control port nor control socket were provided. Nothing to connect to.') + elif control_port: + if len(control_port) != 2: + raise ValueError('The control_port argument for connect() should be an (address, port) tuple.') + elif not stem.util.connection.is_valid_ipv4_address(control_port[0]): + raise ValueError("'%s' isn't a vaid IPv4 address" % control_port[0]) + elif not stem.util.connection.is_valid_port(control_port[1]): + raise ValueError("'%s' isn't a valid port" % control_port[1]) + + control_connection, error_msg = None, '' + + if control_socket: + if os.path.exists(control_socket): + try: + control_connection = stem.socket.ControlSocketFile(control_socket) + except stem.SocketError as exc: + error_msg = CONNECT_MESSAGES['unable_to_use_socket'].format(path = control_socket, error = exc) + else: + error_msg = CONNECT_MESSAGES['socket_doesnt_exist'].format(path = control_socket) + + if control_port and not control_connection: + address, port = control_port + + try: + control_connection = stem.socket.ControlPort(address, port) + except stem.SocketError as exc: + error_msg = CONNECT_MESSAGES['unable_to_use_port'].format(address = address, port = port, error = exc) + + # If unable to connect to either a control socket or port then finally fail + # out. If we only attempted to connect to one of them then provide the error + # output from that. Otherwise we provide a more generic error message. + # + # We check for a 'tor.real' process name because that's what TBB uses. + + if not control_connection: + if control_socket and control_port: + is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real') + error_msg = CONNECT_MESSAGES['no_control_port'] if is_tor_running else CONNECT_MESSAGES['tor_isnt_running'] + + print(error_msg) + return None + + return _connect_auth(control_connection, password, password_prompt, chroot_path, controller) + + +def connect_port(address = '127.0.0.1', port = 9051, password = None, chroot_path = None, controller = stem.control.Controller): + """ + Convenience function for quickly getting a control connection. This is very + handy for debugging or CLI setup, handling setup and prompting for a password + if necessary (and none is provided). If any issues arise this prints a + description of the problem and returns **None**. + + .. deprecated:: 1.2.0 + Use :func:`~stem.connection.connect` instead. + + :param str address: ip address of the controller + :param int port: port number of the controller + :param str password: passphrase to authenticate to the socket + :param str chroot_path: path prefix if in a chroot environment + :param Class controller: :class:`~stem.control.BaseController` subclass to be + returned, this provides a :class:`~stem.socket.ControlSocket` if **None** + + :returns: authenticated control connection, the type based on the controller argument + """ + + try: + control_port = stem.socket.ControlPort(address, port) + except stem.SocketError as exc: + print(exc) + return None + + return _connect_auth(control_port, password, True, chroot_path, controller) + + +def connect_socket_file(path = '/var/run/tor/control', password = None, chroot_path = None, controller = stem.control.Controller): + """ + Convenience function for quickly getting a control connection. For more + information see the :func:`~stem.connection.connect_port` function. + + In much the same vein as git porcelain commands, users should not rely on + details of how this works. Messages or details of this function's behavior + might change in the future. + + .. deprecated:: 1.2.0 + Use :func:`~stem.connection.connect` instead. + + :param str path: path where the control socket is located + :param str password: passphrase to authenticate to the socket + :param str chroot_path: path prefix if in a chroot environment + :param Class controller: :class:`~stem.control.BaseController` subclass to be + returned, this provides a :class:`~stem.socket.ControlSocket` if **None** + + :returns: authenticated control connection, the type based on the controller argument + """ + + try: + control_socket = stem.socket.ControlSocketFile(path) + except stem.SocketError as exc: + print(exc) + return None + + return _connect_auth(control_socket, password, True, chroot_path, controller) + + +def _connect_auth(control_socket, password, password_prompt, chroot_path, controller): + """ + Helper for the connect_* functions that authenticates the socket and + constructs the controller. + + :param stem.socket.ControlSocket control_socket: socket being authenticated to + :param str password: passphrase to authenticate to the socket + :param bool password_prompt: prompt for the controller password if it wasn't + supplied + :param str chroot_path: path prefix if in a chroot environment + :param Class controller: :class:`~stem.control.BaseController` subclass to be + returned, this provides a :class:`~stem.socket.ControlSocket` if **None** + + :returns: authenticated control connection, the type based on the controller argument + """ + + try: + authenticate(control_socket, password, chroot_path) + + if controller is None: + return control_socket + else: + return controller(control_socket, is_authenticated = True) + except IncorrectSocketType: + if isinstance(control_socket, stem.socket.ControlPort): + print(CONNECT_MESSAGES['wrong_port_type'].format(port = control_socket.get_port())) + else: + print(CONNECT_MESSAGES['wrong_socket_type']) + + control_socket.close() + return None + except UnrecognizedAuthMethods as exc: + print(CONNECT_MESSAGES['uncrcognized_auth_type'].format(auth_methods = ', '.join(exc.unknown_auth_methods))) + control_socket.close() + return None + except IncorrectPassword: + print(CONNECT_MESSAGES['incorrect_password']) + control_socket.close() + return None + except MissingPassword: + if password is not None: + control_socket.close() + raise ValueError(CONNECT_MESSAGES['missing_password_bug']) + + if password_prompt: + try: + password = getpass.getpass(CONNECT_MESSAGES['password_prompt'] + ' ') + except KeyboardInterrupt: + control_socket.close() + return None + + return _connect_auth(control_socket, password, password_prompt, chroot_path, controller) + else: + print(CONNECT_MESSAGES['needs_password']) + control_socket.close() + return None + except UnreadableCookieFile as exc: + print(CONNECT_MESSAGES['unreadable_cookie_file'].format(path = exc.cookie_path, issue = str(exc))) + control_socket.close() + return None + except AuthenticationFailure as exc: + print(CONNECT_MESSAGES['general_auth_failure'].format(error = exc)) + control_socket.close() + return None + + +def authenticate(controller, password = None, chroot_path = None, protocolinfo_response = None): + """ + Authenticates to a control socket using the information provided by a + PROTOCOLINFO response. In practice this will often be all we need to + authenticate, raising an exception if all attempts to authenticate fail. + + All exceptions are subclasses of AuthenticationFailure so, in practice, + callers should catch the types of authentication failure that they care + about, then have a :class:`~stem.connection.AuthenticationFailure` catch-all + at the end. + + This can authenticate to either a :class:`~stem.control.BaseController` or + :class:`~stem.socket.ControlSocket`. + + :param controller: tor controller or socket to be authenticated + :param str password: passphrase to present to the socket if it uses password + authentication (skips password auth if **None**) + :param str chroot_path: path prefix if in a chroot environment + :param stem.response.protocolinfo.ProtocolInfoResponse protocolinfo_response: + tor protocolinfo response, this is retrieved on our own if **None** + + :raises: If all attempts to authenticate fails then this will raise a + :class:`~stem.connection.AuthenticationFailure` subclass. Since this may + try multiple authentication methods it may encounter multiple exceptions. + If so then the exception this raises is prioritized as follows... + + * :class:`stem.connection.IncorrectSocketType` + + The controller does not speak the tor control protocol. Most often this + happened because the user confused the SocksPort or ORPort with the + ControlPort. + + * :class:`stem.connection.UnrecognizedAuthMethods` + + All of the authentication methods tor will accept are new and + unrecognized. Please upgrade stem and, if that doesn't work, file a + ticket on 'trac.torproject.org' and I'd be happy to add support. + + * :class:`stem.connection.MissingPassword` + + We were unable to authenticate but didn't attempt password authentication + because none was provided. You should prompt the user for a password and + try again via 'authenticate_password'. + + * :class:`stem.connection.IncorrectPassword` + + We were provided with a password but it was incorrect. + + * :class:`stem.connection.IncorrectCookieSize` + + Tor allows for authentication by reading it a cookie file, but that file + is the wrong size to be an authentication cookie. + + * :class:`stem.connection.UnreadableCookieFile` + + Tor allows for authentication by reading it a cookie file, but we can't + read that file (probably due to permissions). + + * **\***:class:`stem.connection.IncorrectCookieValue` + + Tor allows for authentication by reading it a cookie file, but rejected + the contents of that file. + + * **\***:class:`stem.connection.AuthChallengeUnsupported` + + Tor doesn't recognize the AUTHCHALLENGE command. This is probably a Tor + version prior to SAFECOOKIE being implement, but this exception shouldn't + arise because we won't attempt SAFECOOKIE auth unless Tor claims to + support it. + + * **\***:class:`stem.connection.UnrecognizedAuthChallengeMethod` + + Tor couldn't recognize the AUTHCHALLENGE method Stem sent to it. This + shouldn't happen at all. + + * **\***:class:`stem.connection.InvalidClientNonce` + + Tor says that the client nonce provided by Stem during the AUTHCHALLENGE + process is invalid. + + * **\***:class:`stem.connection.AuthSecurityFailure` + + Nonce value provided by the server was invalid. + + * **\***:class:`stem.connection.OpenAuthRejected` + + Tor says that it allows for authentication without any credentials, but + then rejected our authentication attempt. + + * **\***:class:`stem.connection.MissingAuthInfo` + + Tor provided us with a PROTOCOLINFO reply that is technically valid, but + missing the information we need to authenticate. + + * **\***:class:`stem.connection.AuthenticationFailure` + + There are numerous other ways that authentication could have failed + including socket failures, malformed controller responses, etc. These + mostly constitute transient failures or bugs. + + **\*** In practice it is highly unusual for this to occur, being more of a + theoretical possibility rather than something you should expect. It's fine + to treat these as errors. If you have a use case where this commonly + happens, please file a ticket on 'trac.torproject.org'. + + In the future new :class:`~stem.connection.AuthenticationFailure` + subclasses may be added to allow for better error handling. + """ + + if not protocolinfo_response: + try: + protocolinfo_response = get_protocolinfo(controller) + except stem.ProtocolError: + raise IncorrectSocketType('unable to use the control socket') + except stem.SocketError as exc: + raise AuthenticationFailure('socket connection failed (%s)' % exc) + + auth_methods = list(protocolinfo_response.auth_methods) + auth_exceptions = [] + + if len(auth_methods) == 0: + raise NoAuthMethods('our PROTOCOLINFO response did not have any methods for authenticating') + + # remove authentication methods that are either unknown or for which we don't + # have an input + if AuthMethod.UNKNOWN in auth_methods: + auth_methods.remove(AuthMethod.UNKNOWN) + + unknown_methods = protocolinfo_response.unknown_auth_methods + plural_label = 's' if len(unknown_methods) > 1 else '' + methods_label = ', '.join(unknown_methods) + + # we... er, can't do anything with only unrecognized auth types + if not auth_methods: + exc_msg = 'unrecognized authentication method%s (%s)' % (plural_label, methods_label) + auth_exceptions.append(UnrecognizedAuthMethods(exc_msg, unknown_methods)) + else: + log.debug('Authenticating to a socket with unrecognized auth method%s, ignoring them: %s' % (plural_label, methods_label)) + + if protocolinfo_response.cookie_path is None: + for cookie_auth_method in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE): + if cookie_auth_method in auth_methods: + auth_methods.remove(cookie_auth_method) + + exc_msg = 'our PROTOCOLINFO response did not have the location of our authentication cookie' + auth_exceptions.append(NoAuthCookie(exc_msg, cookie_auth_method == AuthMethod.SAFECOOKIE)) + + if AuthMethod.PASSWORD in auth_methods and password is None: + auth_methods.remove(AuthMethod.PASSWORD) + auth_exceptions.append(MissingPassword('no passphrase provided')) + + # iterating over AuthMethods so we can try them in this order + for auth_type in (AuthMethod.NONE, AuthMethod.PASSWORD, AuthMethod.SAFECOOKIE, AuthMethod.COOKIE): + if auth_type not in auth_methods: + continue + + try: + if auth_type == AuthMethod.NONE: + authenticate_none(controller, False) + elif auth_type == AuthMethod.PASSWORD: + authenticate_password(controller, password, False) + elif auth_type in (AuthMethod.COOKIE, AuthMethod.SAFECOOKIE): + cookie_path = protocolinfo_response.cookie_path + + if chroot_path: + cookie_path = os.path.join(chroot_path, cookie_path.lstrip(os.path.sep)) + + if auth_type == AuthMethod.SAFECOOKIE: + authenticate_safecookie(controller, cookie_path, False) + else: + authenticate_cookie(controller, cookie_path, False) + + return # success! + except OpenAuthRejected as exc: + auth_exceptions.append(exc) + except IncorrectPassword as exc: + auth_exceptions.append(exc) + except PasswordAuthRejected as exc: + # Since the PROTOCOLINFO says password auth is available we can assume + # that if PasswordAuthRejected is raised it's being raised in error. + log.debug('The authenticate_password method raised a PasswordAuthRejected when password auth should be available. Stem may need to be corrected to recognize this response: %s' % exc) + auth_exceptions.append(IncorrectPassword(str(exc))) + except AuthSecurityFailure as exc: + log.info('Tor failed to provide the nonce expected for safecookie authentication. (%s)' % exc) + auth_exceptions.append(exc) + except (InvalidClientNonce, UnrecognizedAuthChallengeMethod, AuthChallengeFailed) as exc: + auth_exceptions.append(exc) + except (IncorrectCookieSize, UnreadableCookieFile, IncorrectCookieValue) as exc: + auth_exceptions.append(exc) + except CookieAuthRejected as exc: + auth_func = 'authenticate_safecookie' if exc.is_safecookie else 'authenticate_cookie' + + log.debug('The %s method raised a CookieAuthRejected when cookie auth should be available. Stem may need to be corrected to recognize this response: %s' % (auth_func, exc)) + auth_exceptions.append(IncorrectCookieValue(str(exc), exc.cookie_path, exc.is_safecookie)) + except stem.ControllerError as exc: + auth_exceptions.append(AuthenticationFailure(str(exc))) + + # All authentication attempts failed. Raise the exception that takes priority + # according to our pydocs. + + for exc_type in AUTHENTICATE_EXCEPTIONS: + for auth_exc in auth_exceptions: + if isinstance(auth_exc, exc_type): + raise auth_exc + + # We really, really shouldn't get here. It means that auth_exceptions is + # either empty or contains something that isn't an AuthenticationFailure. + + raise AssertionError('BUG: Authentication failed without providing a recognized exception: %s' % str(auth_exceptions)) + + +def authenticate_none(controller, suppress_ctl_errors = True): + """ + Authenticates to an open control socket. All control connections need to + authenticate before they can be used, even if tor hasn't been configured to + use any authentication. + + If authentication fails tor will disconnect and we'll make a best effort + attempt to re-establish the connection. This may not succeed, so check + :func:`~stem.socket.ControlSocket.is_alive` before using the socket further. + + This can authenticate to either a :class:`~stem.control.BaseController` or + :class:`~stem.socket.ControlSocket`. + + For general usage use the :func:`~stem.connection.authenticate` function + instead. + + :param controller: tor controller or socket to be authenticated + :param bool suppress_ctl_errors: reports raised + :class:`~stem.ControllerError` as authentication rejection if + **True**, otherwise they're re-raised + + :raises: :class:`stem.connection.OpenAuthRejected` if the empty authentication credentials aren't accepted + """ + + try: + auth_response = _msg(controller, 'AUTHENTICATE') + + # if we got anything but an OK response then error + if str(auth_response) != 'OK': + try: + controller.connect() + except: + pass + + raise OpenAuthRejected(str(auth_response), auth_response) + except stem.ControllerError as exc: + try: + controller.connect() + except: + pass + + if not suppress_ctl_errors: + raise exc + else: + raise OpenAuthRejected('Socket failed (%s)' % exc) + + +def authenticate_password(controller, password, suppress_ctl_errors = True): + """ + Authenticates to a control socket that uses a password (via the + HashedControlPassword torrc option). Quotes in the password are escaped. + + If authentication fails tor will disconnect and we'll make a best effort + attempt to re-establish the connection. This may not succeed, so check + :func:`~stem.socket.ControlSocket.is_alive` before using the socket further. + + If you use this function directly, rather than + :func:`~stem.connection.authenticate`, we may mistakenly raise a + PasswordAuthRejected rather than IncorrectPassword. This is because we rely + on tor's error messaging which is liable to change in future versions + (:trac:`4817`). + + This can authenticate to either a :class:`~stem.control.BaseController` or + :class:`~stem.socket.ControlSocket`. + + For general usage use the :func:`~stem.connection.authenticate` function + instead. + + :param controller: tor controller or socket to be authenticated + :param str password: passphrase to present to the socket + :param bool suppress_ctl_errors: reports raised + :class:`~stem.ControllerError` as authentication rejection if + **True**, otherwise they're re-raised + + :raises: + * :class:`stem.connection.PasswordAuthRejected` if the socket doesn't + accept password authentication + * :class:`stem.connection.IncorrectPassword` if the authentication + credentials aren't accepted + """ + + # Escapes quotes. Tor can include those in the password hash, in which case + # it expects escaped quotes from the controller. For more information see... + # https://trac.torproject.org/projects/tor/ticket/4600 + + password = password.replace('"', '\\"') + + try: + auth_response = _msg(controller, 'AUTHENTICATE "%s"' % password) + + # if we got anything but an OK response then error + if str(auth_response) != 'OK': + try: + controller.connect() + except: + pass + + # all we have to go on is the error message from tor... + # Password did not match HashedControlPassword value value from configuration... + # Password did not match HashedControlPassword *or*... + + if 'Password did not match HashedControlPassword' in str(auth_response): + raise IncorrectPassword(str(auth_response), auth_response) + else: + raise PasswordAuthRejected(str(auth_response), auth_response) + except stem.ControllerError as exc: + try: + controller.connect() + except: + pass + + if not suppress_ctl_errors: + raise exc + else: + raise PasswordAuthRejected('Socket failed (%s)' % exc) + + +def authenticate_cookie(controller, cookie_path, suppress_ctl_errors = True): + """ + Authenticates to a control socket that uses the contents of an authentication + cookie (generated via the CookieAuthentication torrc option). This does basic + validation that this is a cookie before presenting the contents to the + socket. + + The :class:`~stem.connection.IncorrectCookieSize` and + :class:`~stem.connection.UnreadableCookieFile` exceptions take precedence + over the other types. + + If authentication fails tor will disconnect and we'll make a best effort + attempt to re-establish the connection. This may not succeed, so check + :func:`~stem.socket.ControlSocket.is_alive` before using the socket further. + + If you use this function directly, rather than + :func:`~stem.connection.authenticate`, we may mistakenly raise a + :class:`~stem.connection.CookieAuthRejected` rather than + :class:`~stem.connection.IncorrectCookieValue`. This is because we rely on + tor's error messaging which is liable to change in future versions + (:trac:`4817`). + + This can authenticate to either a :class:`~stem.control.BaseController` or + :class:`~stem.socket.ControlSocket`. + + For general usage use the :func:`~stem.connection.authenticate` function + instead. + + :param controller: tor controller or socket to be authenticated + :param str cookie_path: path of the authentication cookie to send to tor + :param bool suppress_ctl_errors: reports raised + :class:`~stem.ControllerError` as authentication rejection if + **True**, otherwise they're re-raised + + :raises: + * :class:`stem.connection.IncorrectCookieSize` if the cookie file's size + is wrong + * :class:`stem.connection.UnreadableCookieFile` if the cookie file doesn't + exist or we're unable to read it + * :class:`stem.connection.CookieAuthRejected` if cookie authentication is + attempted but the socket doesn't accept it + * :class:`stem.connection.IncorrectCookieValue` if the cookie file's value + is rejected + """ + + cookie_data = _read_cookie(cookie_path, False) + + try: + # binascii.b2a_hex() takes a byte string and returns one too. With python 3 + # this is a problem because string formatting for byte strings includes the + # b'' wrapper... + # + # >>> "AUTHENTICATE %s" % b'content' + # "AUTHENTICATE b'content'" + # + # This seems dumb but oh well. Converting the result to unicode so it won't + # misbehave. + + auth_token_hex = binascii.b2a_hex(stem.util.str_tools._to_bytes(cookie_data)) + msg = 'AUTHENTICATE %s' % stem.util.str_tools._to_unicode(auth_token_hex) + auth_response = _msg(controller, msg) + + # if we got anything but an OK response then error + if str(auth_response) != 'OK': + try: + controller.connect() + except: + pass + + # all we have to go on is the error message from tor... + # ... Authentication cookie did not match expected value. + # ... *or* authentication cookie. + + if '*or* authentication cookie.' in str(auth_response) or \ + 'Authentication cookie did not match expected value.' in str(auth_response): + raise IncorrectCookieValue(str(auth_response), cookie_path, False, auth_response) + else: + raise CookieAuthRejected(str(auth_response), cookie_path, False, auth_response) + except stem.ControllerError as exc: + try: + controller.connect() + except: + pass + + if not suppress_ctl_errors: + raise exc + else: + raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, False) + + +def authenticate_safecookie(controller, cookie_path, suppress_ctl_errors = True): + """ + Authenticates to a control socket using the safe cookie method, which is + enabled by setting the CookieAuthentication torrc option on Tor client's which + support it. + + Authentication with this is a two-step process... + + 1. send a nonce to the server and receives a challenge from the server for + the cookie's contents + 2. generate a hash digest using the challenge received in the first step, and + use it to authenticate the controller + + The :class:`~stem.connection.IncorrectCookieSize` and + :class:`~stem.connection.UnreadableCookieFile` exceptions take precedence + over the other exception types. + + The :class:`~stem.connection.AuthChallengeUnsupported`, + :class:`~stem.connection.UnrecognizedAuthChallengeMethod`, + :class:`~stem.connection.InvalidClientNonce` and + :class:`~stem.connection.CookieAuthRejected` exceptions are next in the order + of precedence. Depending on the reason, one of these is raised if the first + (AUTHCHALLENGE) step fails. + + In the second (AUTHENTICATE) step, + :class:`~stem.connection.IncorrectCookieValue` or + :class:`~stem.connection.CookieAuthRejected` maybe raised. + + If authentication fails tor will disconnect and we'll make a best effort + attempt to re-establish the connection. This may not succeed, so check + :func:`~stem.socket.ControlSocket.is_alive` before using the socket further. + + For general usage use the :func:`~stem.connection.authenticate` function + instead. + + :param controller: tor controller or socket to be authenticated + :param str cookie_path: path of the authentication cookie to send to tor + :param bool suppress_ctl_errors: reports raised + :class:`~stem.ControllerError` as authentication rejection if + **True**, otherwise they're re-raised + + :raises: + * :class:`stem.connection.IncorrectCookieSize` if the cookie file's size + is wrong + * :class:`stem.connection.UnreadableCookieFile` if the cookie file doesn't + exist or we're unable to read it + * :class:`stem.connection.CookieAuthRejected` if cookie authentication is + attempted but the socket doesn't accept it + * :class:`stem.connection.IncorrectCookieValue` if the cookie file's value + is rejected + * :class:`stem.connection.UnrecognizedAuthChallengeMethod` if the Tor + client fails to recognize the AuthChallenge method + * :class:`stem.connection.AuthChallengeUnsupported` if AUTHCHALLENGE is + unimplemented, or if unable to parse AUTHCHALLENGE response + * :class:`stem.connection.AuthSecurityFailure` if AUTHCHALLENGE's response + looks like a security attack + * :class:`stem.connection.InvalidClientNonce` if stem's AUTHCHALLENGE + client nonce is rejected for being invalid + """ + + cookie_data = _read_cookie(cookie_path, True) + client_nonce = os.urandom(32) + + try: + client_nonce_hex = stem.util.str_tools._to_unicode(binascii.b2a_hex(client_nonce)) + authchallenge_response = _msg(controller, 'AUTHCHALLENGE SAFECOOKIE %s' % client_nonce_hex) + + if not authchallenge_response.is_ok(): + try: + controller.connect() + except: + pass + + authchallenge_response_str = str(authchallenge_response) + + if 'Authentication required.' in authchallenge_response_str: + raise AuthChallengeUnsupported("SAFECOOKIE authentication isn't supported", cookie_path) + elif 'AUTHCHALLENGE only supports' in authchallenge_response_str: + raise UnrecognizedAuthChallengeMethod(authchallenge_response_str, cookie_path) + elif 'Invalid base16 client nonce' in authchallenge_response_str: + raise InvalidClientNonce(authchallenge_response_str, cookie_path) + elif 'Cookie authentication is disabled' in authchallenge_response_str: + raise CookieAuthRejected(authchallenge_response_str, cookie_path, True) + else: + raise AuthChallengeFailed(authchallenge_response, cookie_path) + except stem.ControllerError as exc: + try: + controller.connect() + except: + pass + + if not suppress_ctl_errors: + raise exc + else: + raise AuthChallengeFailed('Socket failed (%s)' % exc, cookie_path, True) + + try: + stem.response.convert('AUTHCHALLENGE', authchallenge_response) + except stem.ProtocolError as exc: + if not suppress_ctl_errors: + raise exc + else: + raise AuthChallengeFailed('Unable to parse AUTHCHALLENGE response: %s' % exc, cookie_path) + + expected_server_hash = stem.util.connection._hmac_sha256( + SERVER_HASH_CONSTANT, + cookie_data + client_nonce + authchallenge_response.server_nonce) + + if not stem.util.connection._cryptovariables_equal(authchallenge_response.server_hash, expected_server_hash): + raise AuthSecurityFailure('Tor provided the wrong server nonce', cookie_path) + + try: + client_hash = stem.util.connection._hmac_sha256( + CLIENT_HASH_CONSTANT, + cookie_data + client_nonce + authchallenge_response.server_nonce) + + auth_response = _msg(controller, 'AUTHENTICATE %s' % stem.util.str_tools._to_unicode(binascii.b2a_hex(client_hash))) + except stem.ControllerError as exc: + try: + controller.connect() + except: + pass + + if not suppress_ctl_errors: + raise exc + else: + raise CookieAuthRejected('Socket failed (%s)' % exc, cookie_path, True, auth_response) + + # if we got anything but an OK response then err + if not auth_response.is_ok(): + try: + controller.connect() + except: + pass + + # all we have to go on is the error message from tor... + # ... Safe cookie response did not match expected value + # ... *or* authentication cookie. + + if '*or* authentication cookie.' in str(auth_response) or \ + 'Safe cookie response did not match expected value' in str(auth_response): + raise IncorrectCookieValue(str(auth_response), cookie_path, True, auth_response) + else: + raise CookieAuthRejected(str(auth_response), cookie_path, True, auth_response) + + +def get_protocolinfo(controller): + """ + Issues a PROTOCOLINFO query to a control socket, getting information about + the tor process running on it. If the socket is already closed then it is + first reconnected. + + According to the control spec the cookie_file is an absolute path. However, + this often is not the case (especially for the Tor Browser Bundle). If the + path is relative then we'll make an attempt (which may not work) to correct + this (:trac:`1101`). + + This can authenticate to either a :class:`~stem.control.BaseController` or + :class:`~stem.socket.ControlSocket`. + + :param controller: tor controller or socket to be queried + + :returns: :class:`~stem.response.protocolinfo.ProtocolInfoResponse` provided by tor + + :raises: + * :class:`stem.ProtocolError` if the PROTOCOLINFO response is + malformed + * :class:`stem.SocketError` if problems arise in establishing or + using the socket + """ + + try: + protocolinfo_response = _msg(controller, 'PROTOCOLINFO 1') + except: + protocolinfo_response = None + + # Tor hangs up on sockets after receiving a PROTOCOLINFO query if it isn't + # next followed by authentication. Transparently reconnect if that happens. + + if not protocolinfo_response or str(protocolinfo_response) == 'Authentication required.': + controller.connect() + + try: + protocolinfo_response = _msg(controller, 'PROTOCOLINFO 1') + except stem.SocketClosed as exc: + raise stem.SocketError(exc) + + stem.response.convert('PROTOCOLINFO', protocolinfo_response) + + # attempt to expand relative cookie paths + + if protocolinfo_response.cookie_path: + _expand_cookie_path(protocolinfo_response, stem.util.system.pid_by_name, 'tor') + + # attempt to expand relative cookie paths via the control port or socket file + + if isinstance(controller, stem.socket.ControlSocket): + control_socket = controller + else: + control_socket = controller.get_socket() + + if isinstance(control_socket, stem.socket.ControlPort): + if control_socket.get_address() == '127.0.0.1': + pid_method = stem.util.system.pid_by_port + _expand_cookie_path(protocolinfo_response, pid_method, control_socket.get_port()) + elif isinstance(control_socket, stem.socket.ControlSocketFile): + pid_method = stem.util.system.pid_by_open_file + _expand_cookie_path(protocolinfo_response, pid_method, control_socket.get_socket_path()) + + return protocolinfo_response + + +def _msg(controller, message): + """ + Sends and receives a message with either a + :class:`~stem.socket.ControlSocket` or :class:`~stem.control.BaseController`. + """ + + if isinstance(controller, stem.socket.ControlSocket): + controller.send(message) + return controller.recv() + else: + return controller.msg(message) + + +def _read_cookie(cookie_path, is_safecookie): + """ + Provides the contents of a given cookie file. + + :param str cookie_path: absolute path of the cookie file + :param bool is_safecookie: **True** if this was for SAFECOOKIE + authentication, **False** if for COOKIE + + :raises: + * :class:`stem.connection.UnreadableCookieFile` if the cookie file is + unreadable + * :class:`stem.connection.IncorrectCookieSize` if the cookie size is + incorrect (not 32 bytes) + """ + + if not os.path.exists(cookie_path): + exc_msg = "Authentication failed: '%s' doesn't exist" % cookie_path + raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie) + + # Abort if the file isn't 32 bytes long. This is to avoid exposing arbitrary + # file content to the port. + # + # Without this a malicious socket could, for instance, claim that + # '~/.bash_history' or '~/.ssh/id_rsa' was its authentication cookie to trick + # us into reading it for them with our current permissions. + # + # https://trac.torproject.org/projects/tor/ticket/4303 + + auth_cookie_size = os.path.getsize(cookie_path) + + if auth_cookie_size != 32: + exc_msg = "Authentication failed: authentication cookie '%s' is the wrong size (%i bytes instead of 32)" % (cookie_path, auth_cookie_size) + raise IncorrectCookieSize(exc_msg, cookie_path, is_safecookie) + + try: + with open(cookie_path, 'rb', 0) as f: + return f.read() + except IOError as exc: + exc_msg = "Authentication failed: unable to read '%s' (%s)" % (cookie_path, exc) + raise UnreadableCookieFile(exc_msg, cookie_path, is_safecookie) + + +def _expand_cookie_path(protocolinfo_response, pid_resolver, pid_resolution_arg): + """ + Attempts to expand a relative cookie path with the given pid resolver. This + leaves the cookie_path alone if it's already absolute, **None**, or the + system calls fail. + """ + + cookie_path = protocolinfo_response.cookie_path + if cookie_path and not os.path.isabs(cookie_path): + try: + tor_pid = pid_resolver(pid_resolution_arg) + + if not tor_pid: + raise IOError('pid lookup failed') + + tor_cwd = stem.util.system.cwd(tor_pid) + + if not tor_cwd: + raise IOError('cwd lookup failed') + + cookie_path = stem.util.system.expand_path(cookie_path, tor_cwd) + except IOError as exc: + resolver_labels = { + stem.util.system.pid_by_name: ' by name', + stem.util.system.pid_by_port: ' by port', + stem.util.system.pid_by_open_file: ' by socket file', + } + + pid_resolver_label = resolver_labels.get(pid_resolver, '') + log.debug('unable to expand relative tor cookie path%s: %s' % (pid_resolver_label, exc)) + + protocolinfo_response.cookie_path = cookie_path + + +class AuthenticationFailure(Exception): + """ + Base error for authentication failures. + + :var stem.socket.ControlMessage auth_response: AUTHENTICATE response from the + control socket, **None** if one wasn't received + """ + + def __init__(self, message, auth_response = None): + super(AuthenticationFailure, self).__init__(message) + self.auth_response = auth_response + + +class UnrecognizedAuthMethods(AuthenticationFailure): + """ + All methods for authenticating aren't recognized. + + :var list unknown_auth_methods: authentication methods that weren't recognized + """ + + def __init__(self, message, unknown_auth_methods): + super(UnrecognizedAuthMethods, self).__init__(message) + self.unknown_auth_methods = unknown_auth_methods + + +class IncorrectSocketType(AuthenticationFailure): + 'Socket does not speak the control protocol.' + + +class OpenAuthFailed(AuthenticationFailure): + 'Failure to authenticate to an open socket.' + + +class OpenAuthRejected(OpenAuthFailed): + 'Attempt to connect to an open control socket was rejected.' + + +class PasswordAuthFailed(AuthenticationFailure): + 'Failure to authenticate with a password.' + + +class PasswordAuthRejected(PasswordAuthFailed): + 'Socket does not support password authentication.' + + +class IncorrectPassword(PasswordAuthFailed): + 'Authentication password incorrect.' + + +class MissingPassword(PasswordAuthFailed): + "Password authentication is supported but we weren't provided with one." + + +class CookieAuthFailed(AuthenticationFailure): + """ + Failure to authenticate with an authentication cookie. + + :param str cookie_path: location of the authentication cookie we attempted + :param bool is_safecookie: **True** if this was for SAFECOOKIE + authentication, **False** if for COOKIE + :param stem.response.ControlMessage auth_response: reply to our + authentication attempt + """ + + def __init__(self, message, cookie_path, is_safecookie, auth_response = None): + super(CookieAuthFailed, self).__init__(message, auth_response) + self.is_safecookie = is_safecookie + self.cookie_path = cookie_path + + +class CookieAuthRejected(CookieAuthFailed): + 'Socket does not support password authentication.' + + +class IncorrectCookieValue(CookieAuthFailed): + 'Authentication cookie value was rejected.' + + +class IncorrectCookieSize(CookieAuthFailed): + 'Aborted because the cookie file is the wrong size.' + + +class UnreadableCookieFile(CookieAuthFailed): + 'Error arose in reading the authentication cookie.' + + +class AuthChallengeFailed(CookieAuthFailed): + """ + AUTHCHALLENGE command has failed. + """ + + def __init__(self, message, cookie_path): + super(AuthChallengeFailed, self).__init__(message, cookie_path, True) + + +class AuthChallengeUnsupported(AuthChallengeFailed): + """ + AUTHCHALLENGE isn't implemented. + """ + + +class UnrecognizedAuthChallengeMethod(AuthChallengeFailed): + """ + Tor couldn't recognize our AUTHCHALLENGE method. + + :var str authchallenge_method: AUTHCHALLENGE method that Tor couldn't recognize + """ + + def __init__(self, message, cookie_path, authchallenge_method): + super(UnrecognizedAuthChallengeMethod, self).__init__(message, cookie_path) + self.authchallenge_method = authchallenge_method + + +class AuthSecurityFailure(AuthChallengeFailed): + 'AUTHCHALLENGE response is invalid.' + + +class InvalidClientNonce(AuthChallengeFailed): + 'AUTHCHALLENGE request contains an invalid client nonce.' + + +class MissingAuthInfo(AuthenticationFailure): + """ + The PROTOCOLINFO response didn't have enough information to authenticate. + These are valid control responses but really shouldn't happen in practice. + """ + + +class NoAuthMethods(MissingAuthInfo): + "PROTOCOLINFO response didn't have any methods for authenticating." + + +class NoAuthCookie(MissingAuthInfo): + """ + PROTOCOLINFO response supports cookie auth but doesn't have its path. + + :param bool is_safecookie: **True** if this was for SAFECOOKIE + authentication, **False** if for COOKIE + """ + + def __init__(self, message, is_safecookie): + super(NoAuthCookie, self).__init__(message) + self.is_safecookie = is_safecookie + +# authentication exceptions ordered as per the authenticate function's pydocs +AUTHENTICATE_EXCEPTIONS = ( + IncorrectSocketType, + UnrecognizedAuthMethods, + MissingPassword, + IncorrectPassword, + IncorrectCookieSize, + UnreadableCookieFile, + IncorrectCookieValue, + AuthChallengeUnsupported, + UnrecognizedAuthChallengeMethod, + InvalidClientNonce, + AuthSecurityFailure, + OpenAuthRejected, + MissingAuthInfo, + AuthenticationFailure +) diff --git a/Shared/lib/python3.4/site-packages/stem/control.py b/Shared/lib/python3.4/site-packages/stem/control.py new file mode 100644 index 0000000..657f559 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/control.py @@ -0,0 +1,3631 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Module for interacting with the Tor control socket. The +:class:`~stem.control.Controller` is a wrapper around a +:class:`~stem.socket.ControlSocket`, retaining many of its methods (connect, +close, is_alive, etc) in addition to providing its own for working with the +socket at a higher level. + +Stem has `several ways <../faq.html#how-do-i-connect-to-tor>`_ of getting a +:class:`~stem.control.Controller`, but the most flexible are +:func:`~stem.control.Controller.from_port` and +:func:`~stem.control.Controller.from_socket_file`. These static +:class:`~stem.control.Controller` methods give you an **unauthenticated** +Controller you can then authenticate yourself using its +:func:`~stem.control.Controller.authenticate` method. For example... + +:: + + import getpass + import sys + + import stem + import stem.connection + + from stem.control import Controller + + if __name__ == '__main__': + try: + controller = Controller.from_port() + except stem.SocketError as exc: + print("Unable to connect to tor on port 9051: %s" % exc) + sys.exit(1) + + try: + controller.authenticate() + except stem.connection.MissingPassword: + pw = getpass.getpass("Controller password: ") + + try: + controller.authenticate(password = pw) + except stem.connection.PasswordAuthFailed: + print("Unable to authenticate, password is incorrect") + sys.exit(1) + except stem.connection.AuthenticationFailure as exc: + print("Unable to authenticate: %s" % exc) + sys.exit(1) + + print("Tor is running version %s" % controller.get_version()) + controller.close() + +If you're fine with allowing your script to raise exceptions then this can be more nicely done as... + +:: + + from stem.control import Controller + + if __name__ == '__main__': + with Controller.from_port() as controller: + controller.authenticate() + + print("Tor is running version %s" % controller.get_version()) + +**Module Overview:** + +:: + + Controller - General controller class intended for direct use + | |- from_port - Provides a Controller based on a port connection. + | +- from_socket_file - Provides a Controller based on a socket file connection. + | + |- authenticate - authenticates this controller with tor + | + |- get_info - issues a GETINFO query for a parameter + |- get_version - provides our tor version + |- get_exit_policy - provides our exit policy + |- get_ports - provides the local ports where tor is listening for connections + |- get_listeners - provides the addresses and ports where tor is listening for connections + |- get_accounting_stats - provides stats related to relaying limits + |- get_protocolinfo - information about the controller interface + |- get_user - provides the user tor is running as + |- get_pid - provides the pid of our tor process + | + |- get_microdescriptor - querying the microdescriptor for a relay + |- get_microdescriptors - provides all currently available microdescriptors + |- get_server_descriptor - querying the server descriptor for a relay + |- get_server_descriptors - provides all currently available server descriptors + |- get_network_status - querying the router status entry for a relay + |- get_network_statuses - provides all preently available router status entries + |- get_hidden_service_descriptor - queries the given hidden service descriptor + | + |- get_conf - gets the value of a configuration option + |- get_conf_map - gets the values of multiple configuration options + |- set_conf - sets the value of a configuration option + |- reset_conf - reverts configuration options to their default values + |- set_options - sets or resets the values of multiple configuration options + | + |- get_hidden_service_conf - provides our hidden service configuration + |- set_hidden_service_conf - sets our hidden service configuration + |- create_hidden_service - creates a new hidden service or adds a new port + |- remove_hidden_service - removes a hidden service or drops a port + | + |- list_ephemeral_hidden_services - list ephemeral hidden serivces + |- create_ephemeral_hidden_service - create a new ephemeral hidden service + |- remove_ephemeral_hidden_service - removes an ephemeral hidden service + | + |- add_event_listener - attaches an event listener to be notified of tor events + |- remove_event_listener - removes a listener so it isn't notified of further events + | + |- is_caching_enabled - true if the controller has enabled caching + |- set_caching - enables or disables caching + |- clear_cache - clears any cached results + | + |- load_conf - loads configuration information as if it was in the torrc + |- save_conf - saves configuration information to the torrc + | + |- is_feature_enabled - checks if a given controller feature is enabled + |- enable_feature - enables a controller feature that has been disabled by default + | + |- get_circuit - provides an active circuit + |- get_circuits - provides a list of active circuits + |- new_circuit - create new circuits + |- extend_circuit - create new circuits and extend existing ones + |- repurpose_circuit - change a circuit's purpose + |- close_circuit - close a circuit + | + |- get_streams - provides a list of active streams + |- attach_stream - attach a stream to a circuit + |- close_stream - close a stream + | + |- signal - sends a signal to the tor client + |- is_newnym_available - true if tor would currently accept a NEWNYM signal + |- get_newnym_wait - seconds until tor would accept a NEWNYM signal + |- get_effective_rate - provides our effective relaying rate limit + |- is_geoip_unavailable - true if we've discovered our geoip db to be unavailable + |- map_address - maps one address to another such that connections to the original are replaced with the other + +- drop_guards - drops our set of guard relays and picks a new set + + BaseController - Base controller class asynchronous message handling + |- msg - communicates with the tor process + |- is_alive - reports if our connection to tor is open or closed + |- is_localhost - returns if the connection is for the local system or not + |- connection_time - time when we last connected or disconnected + |- is_authenticated - checks if we're authenticated to tor + |- connect - connects or reconnects to tor + |- close - shuts down our connection to the tor process + |- get_socket - provides the socket used for control communication + |- get_latest_heartbeat - timestamp for when we last heard from tor + |- add_status_listener - notifies a callback of changes in our status + |- remove_status_listener - prevents further notification of status changes + +- __enter__ / __exit__ - manages socket connection + +.. data:: State (enum) + + Enumeration for states that a controller can have. + + ========== =========== + State Description + ========== =========== + **INIT** new control connection + **RESET** received a reset/sighup signal + **CLOSED** control connection closed + ========== =========== + +.. data:: EventType (enum) + + Known types of events that the + :func:`~stem.control.Controller.add_event_listener` method of the + :class:`~stem.control.Controller` can listen for. + + The most frequently listened for event types tend to be the logging events + (**DEBUG**, **INFO**, **NOTICE**, **WARN**, and **ERR**), bandwidth usage + (**BW**), and circuit or stream changes (**CIRC** and **STREAM**). + + Enums are mapped to :class:`~stem.response.events.Event` subclasses as + follows... + + ======================= =========== + EventType Event Class + ======================= =========== + **ADDRMAP** :class:`stem.response.events.AddrMapEvent` + **AUTHDIR_NEWDESCS** :class:`stem.response.events.AuthDirNewDescEvent` + **BUILDTIMEOUT_SET** :class:`stem.response.events.BuildTimeoutSetEvent` + **BW** :class:`stem.response.events.BandwidthEvent` + **CELL_STATS** :class:`stem.response.events.CellStatsEvent` + **CIRC** :class:`stem.response.events.CircuitEvent` + **CIRC_BW** :class:`stem.response.events.CircuitBandwidthEvent` + **CIRC_MINOR** :class:`stem.response.events.CircMinorEvent` + **CLIENTS_SEEN** :class:`stem.response.events.ClientsSeenEvent` + **CONF_CHANGED** :class:`stem.response.events.ConfChangedEvent` + **CONN_BW** :class:`stem.response.events.ConnectionBandwidthEvent` + **DEBUG** :class:`stem.response.events.LogEvent` + **DESCCHANGED** :class:`stem.response.events.DescChangedEvent` + **ERR** :class:`stem.response.events.LogEvent` + **GUARD** :class:`stem.response.events.GuardEvent` + **HS_DESC** :class:`stem.response.events.HSDescEvent` + **HS_DESC_CONTENT** :class:`stem.response.events.HSDescContentEvent` + **INFO** :class:`stem.response.events.LogEvent` + **NEWCONSENSUS** :class:`stem.response.events.NewConsensusEvent` + **NEWDESC** :class:`stem.response.events.NewDescEvent` + **NOTICE** :class:`stem.response.events.LogEvent` + **NS** :class:`stem.response.events.NetworkStatusEvent` + **ORCONN** :class:`stem.response.events.ORConnEvent` + **SIGNAL** :class:`stem.response.events.SignalEvent` + **STATUS_CLIENT** :class:`stem.response.events.StatusEvent` + **STATUS_GENERAL** :class:`stem.response.events.StatusEvent` + **STATUS_SERVER** :class:`stem.response.events.StatusEvent` + **STREAM** :class:`stem.response.events.StreamEvent` + **STREAM_BW** :class:`stem.response.events.StreamBwEvent` + **TB_EMPTY** :class:`stem.response.events.TokenBucketEmptyEvent` + **TRANSPORT_LAUNCHED** :class:`stem.response.events.TransportLaunchedEvent` + **WARN** :class:`stem.response.events.LogEvent` + ======================= =========== + +.. data:: Listener (enum) + + Purposes for inbound connections that Tor handles. + + ============= =========== + Listener Description + ============= =========== + **OR** traffic we're relaying as a member of the network (torrc's **ORPort** and **ORListenAddress**) + **DIR** mirroring for tor descriptor content (torrc's **DirPort** and **DirListenAddress**) + **SOCKS** client traffic we're sending over Tor (torrc's **SocksPort** and **SocksListenAddress**) + **TRANS** transparent proxy handling (torrc's **TransPort** and **TransListenAddress**) + **NATD** forwarding for ipfw NATD connections (torrc's **NatdPort** and **NatdListenAddress**) + **DNS** DNS lookups for our traffic (torrc's **DNSPort** and **DNSListenAddress**) + **CONTROL** controller applications (torrc's **ControlPort** and **ControlListenAddress**) + ============= =========== +""" + +import calendar +import collections +import functools +import inspect +import io +import os +import threading +import time + +try: + # Added in 2.7 + from collections import OrderedDict +except ImportError: + from stem.util.ordereddict import OrderedDict + +try: + import queue + from io import StringIO +except ImportError: + import Queue as queue + from StringIO import StringIO + +import stem.descriptor.microdescriptor +import stem.descriptor.reader +import stem.descriptor.router_status_entry +import stem.descriptor.server_descriptor +import stem.exit_policy +import stem.response +import stem.response.events +import stem.socket +import stem.util.connection +import stem.util.enum +import stem.util.str_tools +import stem.util.system +import stem.util.tor_tools +import stem.version + +from stem import UNDEFINED, CircStatus, Signal, str_type +from stem.util import log + +# state changes a control socket can have + +State = stem.util.enum.Enum('INIT', 'RESET', 'CLOSED') + +EventType = stem.util.enum.UppercaseEnum( + 'ADDRMAP', + 'AUTHDIR_NEWDESCS', + 'BUILDTIMEOUT_SET', + 'BW', + 'CELL_STATS', + 'CIRC', + 'CIRC_BW', + 'CIRC_MINOR', + 'CONF_CHANGED', + 'CONN_BW', + 'CLIENTS_SEEN', + 'DEBUG', + 'DESCCHANGED', + 'ERR', + 'GUARD', + 'HS_DESC', + 'HS_DESC_CONTENT', + 'INFO', + 'NEWCONSENSUS', + 'NEWDESC', + 'NOTICE', + 'NS', + 'ORCONN', + 'SIGNAL', + 'STATUS_CLIENT', + 'STATUS_GENERAL', + 'STATUS_SERVER', + 'STREAM', + 'STREAM_BW', + 'TB_EMPTY', + 'TRANSPORT_LAUNCHED', + 'WARN', +) + +Listener = stem.util.enum.UppercaseEnum( + 'OR', + 'DIR', + 'SOCKS', + 'TRANS', + 'NATD', + 'DNS', + 'CONTROL', +) + +# Configuration options that are fetched by a special key. The keys are +# lowercase to make case insensitive lookups easier. + +MAPPED_CONFIG_KEYS = { + 'hiddenservicedir': 'HiddenServiceOptions', + 'hiddenserviceport': 'HiddenServiceOptions', + 'hiddenserviceversion': 'HiddenServiceOptions', + 'hiddenserviceauthorizeclient': 'HiddenServiceOptions', + 'hiddenserviceoptions': 'HiddenServiceOptions', +} + +# unchangeable GETINFO parameters + +CACHEABLE_GETINFO_PARAMS = ( + 'version', + 'config-file', + 'exit-policy/default', + 'fingerprint', + 'config/names', + 'config/defaults', + 'info/names', + 'events/names', + 'features/names', + 'process/descriptor-limit', +) + +# GETCONF parameters we shouldn't cache. This includes hidden service +# perameters due to the funky way they're set and retrieved (for instance, +# 'SETCONF HiddenServiceDir' effects 'GETCONF HiddenServiceOptions'). + +UNCACHEABLE_GETCONF_PARAMS = ( + 'hiddenserviceoptions', + 'hiddenservicedir', + 'hiddenserviceport', + 'hiddenserviceversion', + 'hiddenserviceauthorizeclient', +) + +# number of sequential attempts before we decide that the Tor geoip database +# is unavailable +GEOIP_FAILURE_THRESHOLD = 5 + +SERVER_DESCRIPTORS_UNSUPPORTED = "Tor is currently not configured to retrieve \ +server descriptors. As of Tor version 0.2.3.25 it downloads microdescriptors \ +instead unless you set 'UseMicrodescriptors 0' in your torrc." + +AccountingStats = collections.namedtuple('AccountingStats', [ + 'retrieved', + 'status', + 'interval_end', + 'time_until_reset', + 'read_bytes', + 'read_bytes_left', + 'read_limit', + 'written_bytes', + 'write_bytes_left', + 'write_limit', +]) + +CreateHiddenServiceOutput = collections.namedtuple('CreateHiddenServiceOutput', [ + 'path', + 'hostname', + 'hostname_for_client', + 'config', +]) + + +def with_default(yields = False): + """ + Provides a decorator to support having a default value. This should be + treated as private. + """ + + def decorator(func): + def get_default(func, args, kwargs): + arg_names = inspect.getargspec(func).args[1:] # drop 'self' + default_position = arg_names.index('default') if 'default' in arg_names else None + + if default_position is not None and default_position < len(args): + return args[default_position] + else: + return kwargs.get('default', UNDEFINED) + + if not yields: + @functools.wraps(func) + def wrapped(self, *args, **kwargs): + try: + return func(self, *args, **kwargs) + except Exception as exc: + default = get_default(func, args, kwargs) + + if default == UNDEFINED: + raise exc + else: + return default + else: + @functools.wraps(func) + def wrapped(self, *args, **kwargs): + try: + for val in func(self, *args, **kwargs): + yield val + except Exception as exc: + default = get_default(func, args, kwargs) + + if default == UNDEFINED: + raise exc + else: + if default is not None: + for val in default: + yield val + + return wrapped + + return decorator + + +class BaseController(object): + """ + Controller for the tor process. This is a minimal base class for other + controllers, providing basic process communication and event listing. Don't + use this directly - subclasses like the :class:`~stem.control.Controller` + provide higher level functionality. + + It's highly suggested that you don't interact directly with the + :class:`~stem.socket.ControlSocket` that we're constructed from - use our + wrapper methods instead. + + If the **control_socket** is already authenticated to Tor then the caller + should provide the **is_authenticated** flag. Otherwise, we will treat the + socket as though it hasn't yet been authenticated. + """ + + def __init__(self, control_socket, is_authenticated = False): + self._socket = control_socket + self._msg_lock = threading.RLock() + + self._status_listeners = [] # tuples of the form (callback, spawn_thread) + self._status_listeners_lock = threading.RLock() + + # queues where incoming messages are directed + self._reply_queue = queue.Queue() + self._event_queue = queue.Queue() + + # thread to continually pull from the control socket + self._reader_thread = None + + # thread to pull from the _event_queue and call handle_event + self._event_notice = threading.Event() + self._event_thread = None + + # saves our socket's prior _connect() and _close() methods so they can be + # called along with ours + + self._socket_connect = self._socket._connect + self._socket_close = self._socket._close + + self._socket._connect = self._connect + self._socket._close = self._close + + self._last_heartbeat = 0.0 # timestamp for when we last heard from tor + self._is_authenticated = False + + self._state_change_threads = [] # threads we've spawned to notify of state changes + + if self._socket.is_alive(): + self._launch_threads() + + if is_authenticated: + self._post_authentication() + + def msg(self, message): + """ + Sends a message to our control socket and provides back its reply. + + :param str message: message to be formatted and sent to tor + + :returns: :class:`~stem.response.ControlMessage` with the response + + :raises: + * :class:`stem.ProtocolError` the content from the socket is + malformed + * :class:`stem.SocketError` if a problem arises in using the + socket + * :class:`stem.SocketClosed` if the socket is shut down + """ + + with self._msg_lock: + # If our _reply_queue isn't empty then one of a few things happened... + # + # - Our connection was closed and probably re-restablished. This was + # in reply to pulling for an asynchronous event and getting this is + # expected - ignore it. + # + # - Pulling for asynchronous events produced an error. If this was a + # ProtocolError then it's a tor bug, and if a non-closure SocketError + # then it was probably a socket glitch. Deserves an INFO level log + # message. + # + # - This is a leftover response for a msg() call. We can't tell who an + # exception was earmarked for, so we only know that this was the case + # if it's a ControlMessage. + # + # This is the most concerning situation since it indicates that one of + # our callers didn't get their reply. However, this is still a + # perfectly viable use case. For instance... + # + # 1. We send a request. + # 2. The reader thread encounters an exception, for instance a socket + # error. We enqueue the exception. + # 3. The reader thread receives the reply. + # 4. We raise the socket error, and have an undelivered message. + # + # Thankfully this only seems to arise in edge cases around rapidly + # closing/reconnecting the socket. + + while not self._reply_queue.empty(): + try: + response = self._reply_queue.get_nowait() + + if isinstance(response, stem.SocketClosed): + pass # this is fine + elif isinstance(response, stem.ProtocolError): + log.info('Tor provided a malformed message (%s)' % response) + elif isinstance(response, stem.ControllerError): + log.info('Socket experienced a problem (%s)' % response) + elif isinstance(response, stem.response.ControlMessage): + log.info('Failed to deliver a response: %s' % response) + except queue.Empty: + # the empty() method is documented to not be fully reliable so this + # isn't entirely surprising + + break + + try: + self._socket.send(message) + response = self._reply_queue.get() + + # If the message we received back had an exception then re-raise it to the + # caller. Otherwise return the response. + + if isinstance(response, stem.ControllerError): + raise response + else: + # I really, really don't like putting hooks into this method, but + # this is the most reliable method I can think of for taking actions + # immediately after successfully authenticating to a connection. + + if message.upper().startswith('AUTHENTICATE'): + self._post_authentication() + + return response + except stem.SocketClosed as exc: + # If the recv() thread caused the SocketClosed then we could still be + # in the process of closing. Calling close() here so that we can + # provide an assurance to the caller that when we raise a SocketClosed + # exception we are shut down afterward for realz. + + self.close() + raise exc + + def is_alive(self): + """ + Checks if our socket is currently connected. This is a pass-through for our + socket's :func:`~stem.socket.ControlSocket.is_alive` method. + + :returns: **bool** that's **True** if our socket is connected and **False** otherwise + """ + + return self._socket.is_alive() + + def is_localhost(self): + """ + Returns if the connection is for the local system or not. + + .. versionadded:: 1.3.0 + + :returns: **bool** that's **True** if the connection is for the local host and **False** otherwise + """ + + return self._socket.is_localhost() + + def connection_time(self): + """ + Provides the unix timestamp for when our socket was either connected or + disconnected. That is to say, the time we connected if we're currently + connected and the time we disconnected if we're not connected. + + .. versionadded:: 1.3.0 + + :returns: **float** for when we last connected or disconnected, zero if + we've never connected + """ + + return self._socket.connection_time() + + def is_authenticated(self): + """ + Checks if our socket is both connected and authenticated. + + :returns: **bool** that's **True** if our socket is authenticated to tor + and **False** otherwise + """ + + if self.is_alive(): + return self._is_authenticated + + return False + + def connect(self): + """ + Reconnects our control socket. This is a pass-through for our socket's + :func:`~stem.socket.ControlSocket.connect` method. + + :raises: :class:`stem.SocketError` if unable to make a socket + """ + + self._socket.connect() + + def close(self): + """ + Closes our socket connection. This is a pass-through for our socket's + :func:`~stem.socket.ControlSocket.close` method. + """ + + self._socket.close() + + # Join on any outstanding state change listeners. Closing is a state change + # of its own, so if we have any listeners it's quite likely there's some + # work in progress. + # + # It's important that we do this outside of our locks so those daemons have + # access to us. This is why we're doing this here rather than _close(). + + for t in self._state_change_threads: + if t.is_alive() and threading.current_thread() != t: + t.join() + + def get_socket(self): + """ + Provides the socket used to speak with the tor process. Communicating with + the socket directly isn't advised since it may confuse this controller. + + :returns: :class:`~stem.socket.ControlSocket` we're communicating with + """ + + return self._socket + + def get_latest_heartbeat(self): + """ + Provides the unix timestamp for when we last heard from tor. This is zero + if we've never received a message. + + :returns: float for the unix timestamp of when we last heard from tor + """ + + return self._last_heartbeat + + def add_status_listener(self, callback, spawn = True): + """ + Notifies a given function when the state of our socket changes. Functions + are expected to be of the form... + + :: + + my_function(controller, state, timestamp) + + The state is a value from the :data:`stem.control.State` enum. Functions + **must** allow for new values. The timestamp is a float for the unix time + when the change occurred. + + This class only provides **State.INIT** and **State.CLOSED** notifications. + Subclasses may provide others. + + If spawn is **True** then the callback is notified via a new daemon thread. + If **False** then the notice is under our locks, within the thread where + the change occurred. In general this isn't advised, especially if your + callback could block for a while. If still outstanding these threads are + joined on as part of closing this controller. + + :param function callback: function to be notified when our state changes + :param bool spawn: calls function via a new thread if **True**, otherwise + it's part of the connect/close method call + """ + + with self._status_listeners_lock: + self._status_listeners.append((callback, spawn)) + + def remove_status_listener(self, callback): + """ + Stops listener from being notified of further events. + + :param function callback: function to be removed from our listeners + + :returns: **bool** that's **True** if we removed one or more occurrences of + the callback, **False** otherwise + """ + + with self._status_listeners_lock: + new_listeners, is_changed = [], False + + for listener, spawn in self._status_listeners: + if listener != callback: + new_listeners.append((listener, spawn)) + else: + is_changed = True + + self._status_listeners = new_listeners + return is_changed + + def __enter__(self): + return self + + def __exit__(self, exit_type, value, traceback): + self.close() + + def _handle_event(self, event_message): + """ + Callback to be overwritten by subclasses for event listening. This is + notified whenever we receive an event from the control socket. + + :param stem.response.ControlMessage event_message: message received from + the control socket + """ + + pass + + def _connect(self): + self._launch_threads() + self._notify_status_listeners(State.INIT) + self._socket_connect() + self._is_authenticated = False + + def _close(self): + # Our is_alive() state is now false. Our reader thread should already be + # awake from recv() raising a closure exception. Wake up the event thread + # too so it can end. + + self._event_notice.set() + self._is_authenticated = False + + # joins on our threads if it's safe to do so + + for t in (self._reader_thread, self._event_thread): + if t and t.is_alive() and threading.current_thread() != t: + t.join() + + self._notify_status_listeners(State.CLOSED) + + self._socket_close() + + def _post_authentication(self): + # actions to be taken after we have a newly authenticated connection + + self._is_authenticated = True + + def _notify_status_listeners(self, state): + """ + Informs our status listeners that a state change occurred. + + :param stem.control.State state: state change that has occurred + """ + + # Any changes to our is_alive() state happen under the send lock, so we + # need to have it to ensure it doesn't change beneath us. + + with self._socket._get_send_lock(): + with self._status_listeners_lock: + # States imply that our socket is either alive or not, which may not + # hold true when multiple events occur in quick succession. For + # instance, a sighup could cause two events (State.RESET for the sighup + # and State.CLOSE if it causes tor to crash). However, there's no + # guarantee of the order in which they occur, and it would be bad if + # listeners got the State.RESET last, implying that we were alive. + + expect_alive = None + + if state in (State.INIT, State.RESET): + expect_alive = True + elif state == State.CLOSED: + expect_alive = False + + change_timestamp = time.time() + + if expect_alive is not None and expect_alive != self.is_alive(): + return + + self._state_change_threads = list(filter(lambda t: t.is_alive(), self._state_change_threads)) + + for listener, spawn in self._status_listeners: + if spawn: + name = '%s notification' % state + args = (self, state, change_timestamp) + + notice_thread = threading.Thread(target = listener, args = args, name = name) + notice_thread.setDaemon(True) + notice_thread.start() + self._state_change_threads.append(notice_thread) + else: + listener(self, state, change_timestamp) + + def _launch_threads(self): + """ + Initializes daemon threads. Threads can't be reused so we need to recreate + them if we're restarted. + """ + + # In theory concurrent calls could result in multiple start() calls on a + # single thread, which would cause an unexpected exception. Best be safe. + + with self._socket._get_send_lock(): + if not self._reader_thread or not self._reader_thread.is_alive(): + self._reader_thread = threading.Thread(target = self._reader_loop, name = 'Tor Listener') + self._reader_thread.setDaemon(True) + self._reader_thread.start() + + if not self._event_thread or not self._event_thread.is_alive(): + self._event_thread = threading.Thread(target = self._event_loop, name = 'Event Notifier') + self._event_thread.setDaemon(True) + self._event_thread.start() + + def _reader_loop(self): + """ + Continually pulls from the control socket, directing the messages into + queues based on their type. Controller messages come in two varieties... + + * Responses to messages we've sent (GETINFO, SETCONF, etc). + * Asynchronous events, identified by a status code of 650. + """ + + while self.is_alive(): + try: + control_message = self._socket.recv() + self._last_heartbeat = time.time() + + if control_message.content()[-1][0] == '650': + # asynchronous message, adds to the event queue and wakes up its handler + self._event_queue.put(control_message) + self._event_notice.set() + else: + # response to a msg() call + self._reply_queue.put(control_message) + except stem.ControllerError as exc: + # Assume that all exceptions belong to the reader. This isn't always + # true, but the msg() call can do a better job of sorting it out. + # + # Be aware that the msg() method relies on this to unblock callers. + + self._reply_queue.put(exc) + + def _event_loop(self): + """ + Continually pulls messages from the _event_queue and sends them to our + handle_event callback. This is done via its own thread so subclasses with a + lengthy handle_event implementation don't block further reading from the + socket. + """ + + while True: + try: + event_message = self._event_queue.get_nowait() + self._handle_event(event_message) + except queue.Empty: + if not self.is_alive(): + break + + self._event_notice.wait() + self._event_notice.clear() + + +class Controller(BaseController): + """ + Communicates with a control socket. This is built on top of the + BaseController and provides a more user friendly API for library users. + """ + + @staticmethod + def from_port(address = '127.0.0.1', port = 9051): + """ + Constructs a :class:`~stem.socket.ControlPort` based Controller. + + :param str address: ip address of the controller + :param int port: port number of the controller + + :returns: :class:`~stem.control.Controller` attached to the given port + + :raises: :class:`stem.SocketError` if we're unable to establish a connection + """ + + if not stem.util.connection.is_valid_ipv4_address(address): + raise ValueError('Invalid IP address: %s' % address) + elif not stem.util.connection.is_valid_port(port): + raise ValueError('Invalid port: %s' % port) + + control_port = stem.socket.ControlPort(address, port) + return Controller(control_port) + + @staticmethod + def from_socket_file(path = '/var/run/tor/control'): + """ + Constructs a :class:`~stem.socket.ControlSocketFile` based Controller. + + :param str path: path where the control socket is located + + :returns: :class:`~stem.control.Controller` attached to the given socket file + + :raises: :class:`stem.SocketError` if we're unable to establish a connection + """ + + control_socket = stem.socket.ControlSocketFile(path) + return Controller(control_socket) + + def __init__(self, control_socket, is_authenticated = False): + self._is_caching_enabled = True + self._request_cache = {} + self._last_newnym = 0.0 + + self._cache_lock = threading.RLock() + + # mapping of event types to their listeners + + self._event_listeners = {} + self._event_listeners_lock = threading.RLock() + + # number of sequential 'GETINFO ip-to-country/*' lookups that have failed + + self._geoip_failure_count = 0 + self._enabled_features = [] + + super(Controller, self).__init__(control_socket, is_authenticated) + + def _sighup_listener(event): + if event.signal == Signal.RELOAD: + self.clear_cache() + self._notify_status_listeners(State.RESET) + + self.add_event_listener(_sighup_listener, EventType.SIGNAL) + + def _confchanged_listener(event): + if self.is_caching_enabled(): + self._set_cache(dict((k, None) for k in event.config), 'getconf') + + if 'exitpolicy' in event.config.keys(): + self._set_cache({'exitpolicy': None}) + + self.add_event_listener(_confchanged_listener, EventType.CONF_CHANGED) + + def connect(self): + super(Controller, self).connect() + self.clear_cache() + + def close(self): + # making a best-effort attempt to quit before detaching the socket + if self.is_alive(): + try: + self.msg('QUIT') + except: + pass + + self.clear_cache() + + super(Controller, self).close() + + def authenticate(self, *args, **kwargs): + """ + A convenience method to authenticate the controller. This is just a + pass-through to :func:`stem.connection.authenticate`. + """ + + import stem.connection + stem.connection.authenticate(self, *args, **kwargs) + + @with_default() + def get_info(self, params, default = UNDEFINED, get_bytes = False): + """ + get_info(params, default = UNDEFINED, get_bytes = False) + + Queries the control socket for the given GETINFO option. If provided a + default then that's returned if the GETINFO option is undefined or the + call fails for any reason (error response, control port closed, initiated, + etc). + + .. versionchanged:: 1.1.0 + Added the get_bytes argument. + + :param str,list params: GETINFO option or options to be queried + :param object default: response if the query fails + :param bool get_bytes: provides **bytes** values rather than a **str** under python 3.x + + :returns: + Response depends upon how we were called as follows... + + * **str** with the response if our param was a **str** + * **dict** with the 'param => response' mapping if our param was a **list** + * default if one was provided and our call failed + + :raises: + * :class:`stem.ControllerError` if the call fails and we weren't + provided a default response + * :class:`stem.InvalidArguments` if the 'params' requested was + invalid + * :class:`stem.ProtocolError` if the geoip database is known to be + unavailable + """ + + start_time = time.time() + reply = {} + + if isinstance(params, (bytes, str_type)): + is_multiple = False + params = set([params]) + else: + if not params: + return {} + + is_multiple = True + params = set(params) + + # check for cached results + + from_cache = [param.lower() for param in params] + cached_results = self._get_cache_map(from_cache, 'getinfo') + + for key in cached_results: + user_expected_key = _case_insensitive_lookup(params, key) + reply[user_expected_key] = cached_results[key] + params.remove(user_expected_key) + + for param in params: + if param.startswith('ip-to-country/') and self.is_geoip_unavailable(): + # the geoip database already looks to be unavailable - abort the request + + raise stem.ProtocolError('Tor geoip database is unavailable') + + # if everything was cached then short circuit making the query + if not params: + log.trace('GETINFO %s (cache fetch)' % ' '.join(reply.keys())) + + if is_multiple: + return reply + else: + return list(reply.values())[0] + + try: + response = self.msg('GETINFO %s' % ' '.join(params)) + stem.response.convert('GETINFO', response) + response._assert_matches(params) + + # usually we want unicode values under python 3.x + + if stem.prereq.is_python_3() and not get_bytes: + response.entries = dict((k, stem.util.str_tools._to_unicode(v)) for (k, v) in response.entries.items()) + + reply.update(response.entries) + + if self.is_caching_enabled(): + to_cache = {} + + for key, value in response.entries.items(): + key = key.lower() # make case insensitive + + if key in CACHEABLE_GETINFO_PARAMS: + to_cache[key] = value + elif key.startswith('ip-to-country/'): + # both cache-able and means that we should reset the geoip failure count + to_cache[key] = value + self._geoip_failure_count = -1 + + self._set_cache(to_cache, 'getinfo') + + log.debug('GETINFO %s (runtime: %0.4f)' % (' '.join(params), time.time() - start_time)) + + if is_multiple: + return reply + else: + return list(reply.values())[0] + except stem.ControllerError as exc: + # bump geoip failure count if... + # * we're caching results + # * this was soley a geoip lookup + # * we've never had a successful geoip lookup (failure count isn't -1) + + is_geoip_request = len(params) == 1 and list(params)[0].startswith('ip-to-country/') + + if is_geoip_request and self.is_caching_enabled() and self._geoip_failure_count != -1: + self._geoip_failure_count += 1 + + if self.is_geoip_unavailable(): + log.warn("Tor's geoip database is unavailable.") + + log.debug('GETINFO %s (failed: %s)' % (' '.join(params), exc)) + + raise exc + + @with_default() + def get_version(self, default = UNDEFINED): + """ + get_version(default = UNDEFINED) + + A convenience method to get tor version that current controller is + connected to. + + :param object default: response if the query fails + + :returns: :class:`~stem.version.Version` of the tor instance that we're + connected to + + :raises: + * :class:`stem.ControllerError` if unable to query the version + * **ValueError** if unable to parse the version + + An exception is only raised if we weren't provided a default response. + """ + + version = self._get_cache('version') + + if not version: + version = stem.version.Version(self.get_info('version')) + self._set_cache({'version': version}) + + return version + + @with_default() + def get_exit_policy(self, default = UNDEFINED): + """ + get_exit_policy(default = UNDEFINED) + + Effective ExitPolicy for our relay. This accounts for + ExitPolicyRejectPrivate and default policies. + + :param object default: response if the query fails + + :returns: :class:`~stem.exit_policy.ExitPolicy` of the tor instance that + we're connected to + + :raises: + * :class:`stem.ControllerError` if unable to query the policy + * **ValueError** if unable to parse the policy + + An exception is only raised if we weren't provided a default response. + """ + + with self._msg_lock: + config_policy = self._get_cache('exit_policy') + + if not config_policy: + policy = [] + + if self.get_conf('ExitPolicyRejectPrivate') == '1': + policy.append('reject private:*') + + for policy_line in self.get_conf('ExitPolicy', multiple = True): + policy += policy_line.split(',') + + policy += self.get_info('exit-policy/default').split(',') + + config_policy = stem.exit_policy.get_config_policy(policy, self.get_info('address', None)) + self._set_cache({'exit_policy': config_policy}) + + return config_policy + + @with_default() + def get_ports(self, listener_type, default = UNDEFINED): + """ + get_ports(listener_type, default = UNDEFINED) + + Provides the local ports where tor is listening for the given type of + connections. This is similar to + :func:`~stem.control.Controller.get_listeners`, but doesn't provide + addresses nor include non-local endpoints. + + .. versionadded:: 1.2.0 + + :param stem.control.Listener listener_type: connection type being handled + by the ports we return + :param object default: response if the query fails + + :returns: **list** of **ints** for the local ports where tor handles + connections of the given type + + :raises: :class:`stem.ControllerError` if unable to determine the ports + and no default was provided + """ + + return [port for (addr, port) in self.get_listeners(listener_type) if addr == '127.0.0.1'] + + @with_default() + def get_listeners(self, listener_type, default = UNDEFINED): + """ + get_listeners(listener_type, default = UNDEFINED) + + Provides the addresses and ports where tor is listening for connections of + the given type. This is similar to + :func:`~stem.control.Controller.get_ports` but includes listener addresses + and non-local endpoints. + + .. versionadded:: 1.2.0 + + :param stem.control.Listener listener_type: connection type being handled + by the listeners we return + :param object default: response if the query fails + + :returns: **list** of **(address, port)** tuples for the available + listeners + + :raises: :class:`stem.ControllerError` if unable to determine the listeners + and no default was provided + """ + + proxy_addrs = [] + query = 'net/listeners/%s' % listener_type.lower() + + try: + for listener in self.get_info(query).split(): + if not (listener.startswith('"') and listener.endswith('"')): + raise stem.ProtocolError("'GETINFO %s' responses are expected to be quoted: %s" % (query, listener)) + elif ':' not in listener: + raise stem.ProtocolError("'GETINFO %s' had a listener without a colon: %s" % (query, listener)) + + listener = listener[1:-1] # strip quotes + addr, port = listener.split(':') + + # Skip unix sockets, for instance... + # + # GETINFO net/listeners/control + # 250-net/listeners/control="unix:/tmp/tor/socket" + # 250 OK + + if addr == 'unix': + continue + + proxy_addrs.append((addr, port)) + except stem.InvalidArguments: + # Tor version is old (pre-tor-0.2.2.26-beta), use get_conf() instead. + # Some options (like the ORPort) can have optional attributes after the + # actual port number. + + port_option = { + Listener.OR: 'ORPort', + Listener.DIR: 'DirPort', + Listener.SOCKS: 'SocksPort', + Listener.TRANS: 'TransPort', + Listener.NATD: 'NatdPort', + Listener.DNS: 'DNSPort', + Listener.CONTROL: 'ControlPort', + }[listener_type] + + listener_option = { + Listener.OR: 'ORListenAddress', + Listener.DIR: 'DirListenAddress', + Listener.SOCKS: 'SocksListenAddress', + Listener.TRANS: 'TransListenAddress', + Listener.NATD: 'NatdListenAddress', + Listener.DNS: 'DNSListenAddress', + Listener.CONTROL: 'ControlListenAddress', + }[listener_type] + + port_value = self.get_conf(port_option).split()[0] + + for listener in self.get_conf(listener_option, multiple = True): + if ':' in listener: + addr, port = listener.split(':') + proxy_addrs.append((addr, port)) + else: + proxy_addrs.append((listener, port_value)) + + # validate that address/ports are valid, and convert ports to ints + + for addr, port in proxy_addrs: + if not stem.util.connection.is_valid_ipv4_address(addr): + raise stem.ProtocolError('Invalid address for a %s listener: %s' % (listener_type, addr)) + elif not stem.util.connection.is_valid_port(port): + raise stem.ProtocolError('Invalid port for a %s listener: %s' % (listener_type, port)) + + return [(addr, int(port)) for (addr, port) in proxy_addrs] + + @with_default() + def get_accounting_stats(self, default = UNDEFINED): + """ + get_accounting_stats(default = UNDEFINED) + + Provides stats related to our relaying limitations if AccountingMax was set + in our torrc. This provides a **namedtuple** with the following + attributes... + + * retrieved (float) - unix timestamp for when this was fetched + * status (str) - hibernation status of 'awake', 'soft', or 'hard' + * interval_end (datetime) + * time_until_reset (int) - seconds until our limits reset + * read_bytes (int) + * read_bytes_left (int) + * read_limit (int) + * written_bytes (int) + * write_bytes_left (int) + * write_limit (int) + + .. versionadded:: 1.3.0 + + :param object default: response if the query fails + + :returns: **namedtuple** with our accounting stats + + :raises: :class:`stem.ControllerError` if unable to determine the listeners + and no default was provided + """ + + if self.get_info('accounting/enabled') != '1': + raise stem.ControllerError("Accounting isn't enabled") + + retrieved = time.time() + status = self.get_info('accounting/hibernating') + interval_end = self.get_info('accounting/interval-end') + used = self.get_info('accounting/bytes') + left = self.get_info('accounting/bytes-left') + + interval_end = stem.util.str_tools._parse_timestamp(interval_end) + used_read, used_written = [int(val) for val in used.split(' ', 1)] + left_read, left_written = [int(val) for val in left.split(' ', 1)] + + return AccountingStats( + retrieved = retrieved, + status = status, + interval_end = interval_end, + time_until_reset = calendar.timegm(interval_end.timetuple()) - int(retrieved), + read_bytes = used_read, + read_bytes_left = left_read, + read_limit = used_read + left_read, + written_bytes = used_written, + write_bytes_left = left_written, + write_limit = used_written + left_written, + ) + + def get_socks_listeners(self, default = UNDEFINED): + """ + Provides the SOCKS **(address, port)** tuples that tor has open. + + .. deprecated:: 1.2.0 + Use :func:`~stem.control.Controller.get_listeners` with + **Listener.SOCKS** instead. + + :param object default: response if the query fails + + :returns: list of **(address, port)** tuples for the available SOCKS + listeners + + :raises: :class:`stem.ControllerError` if unable to determine the listeners + and no default was provided + """ + + return self.get_listeners(Listener.SOCKS, default) + + @with_default() + def get_protocolinfo(self, default = UNDEFINED): + """ + get_protocolinfo(default = UNDEFINED) + + A convenience method to get the protocol info of the controller. + + :param object default: response if the query fails + + :returns: :class:`~stem.response.protocolinfo.ProtocolInfoResponse` provided by tor + + :raises: + * :class:`stem.ProtocolError` if the PROTOCOLINFO response is + malformed + * :class:`stem.SocketError` if problems arise in establishing or + using the socket + + An exception is only raised if we weren't provided a default response. + """ + + import stem.connection + return stem.connection.get_protocolinfo(self) + + @with_default() + def get_user(self, default = UNDEFINED): + """ + get_user(default = UNDEFINED) + + Provides the user tor is running as. This often only works if tor is + running locally. Also, most of its checks are platform dependent, and hence + are not entirely reliable. + + .. versionadded:: 1.1.0 + + :param object default: response if the query fails + + :returns: str with the username tor is running as + """ + + user = self._get_cache('user') + + if not user: + user = self.get_info('process/user', None) + + if not user and self.is_localhost(): + pid = self.get_pid(None) + + if pid: + user = stem.util.system.user(pid) + + if user: + self._set_cache({'user': user}) + return user + else: + raise ValueError("Unable to resolve tor's user" if self.is_localhost() else "Tor isn't running locally") + + @with_default() + def get_pid(self, default = UNDEFINED): + """ + get_pid(default = UNDEFINED) + + Provides the process id of tor. This often only works if tor is running + locally. Also, most of its checks are platform dependent, and hence are not + entirely reliable. + + .. versionadded:: 1.1.0 + + :param object default: response if the query fails + + :returns: **int** for tor's pid + + :raises: **ValueError** if unable to determine the pid and no default was + provided + """ + + pid = self._get_cache('pid') + + if not pid: + getinfo_pid = self.get_info('process/pid', None) + + if getinfo_pid and getinfo_pid.isdigit(): + pid = int(getinfo_pid) + + if not pid and self.is_localhost(): + pid_file_path = self.get_conf('PidFile', None) + + if pid_file_path is not None: + with open(pid_file_path) as pid_file: + pid_file_contents = pid_file.read().strip() + + if pid_file_contents.isdigit(): + pid = int(pid_file_contents) + + if not pid: + pid = stem.util.system.pid_by_name('tor') + + if not pid: + control_socket = self.get_socket() + + if isinstance(control_socket, stem.socket.ControlPort): + pid = stem.util.system.pid_by_port(control_socket.get_port()) + elif isinstance(control_socket, stem.socket.ControlSocketFile): + pid = stem.util.system.pid_by_open_file(control_socket.get_socket_path()) + + if pid: + self._set_cache({'pid': pid}) + return pid + else: + raise ValueError("Unable to resolve tor's pid" if self.is_localhost() else "Tor isn't running locally") + + @with_default() + def get_microdescriptor(self, relay = None, default = UNDEFINED): + """ + get_microdescriptor(relay = None, default = UNDEFINED) + + Provides the microdescriptor for the relay with the given fingerprint or + nickname. If the relay identifier could be either a fingerprint *or* + nickname then it's queried as a fingerprint. + + If no **relay** is provided then this defaults to ourselves. Remember that + this requires that we've retrieved our own descriptor from remote + authorities so this both won't be available for newly started relays and + may be up to around an hour out of date. + + .. versionchanged:: 1.3.0 + Changed so we'd fetch our own descriptor if no 'relay' is provided. + + :param str relay: fingerprint or nickname of the relay to be queried + :param object default: response if the query fails + + :returns: :class:`~stem.descriptor.microdescriptor.Microdescriptor` for the given relay + + :raises: + * :class:`stem.DescriptorUnavailable` if unable to provide a descriptor + for the given relay + * :class:`stem.ControllerError` if unable to query the descriptor + * **ValueError** if **relay** doesn't conform with the pattern for being + a fingerprint or nickname + + An exception is only raised if we weren't provided a default response. + """ + + if relay is None: + try: + relay = self.get_info('fingerprint') + except stem.ControllerError as exc: + raise stem.ControllerError('Unable to determine our own fingerprint: %s' % exc) + + if stem.util.tor_tools.is_valid_fingerprint(relay): + query = 'md/id/%s' % relay + elif stem.util.tor_tools.is_valid_nickname(relay): + query = 'md/name/%s' % relay + else: + raise ValueError("'%s' isn't a valid fingerprint or nickname" % relay) + + try: + desc_content = self.get_info(query, get_bytes = True) + except stem.InvalidArguments as exc: + if str(exc).startswith('GETINFO request contained unrecognized keywords:'): + raise stem.DescriptorUnavailable("Tor was unable to provide the descriptor for '%s'" % relay) + else: + raise exc + + if not desc_content: + raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it') + + return stem.descriptor.microdescriptor.Microdescriptor(desc_content) + + @with_default(yields = True) + def get_microdescriptors(self, default = UNDEFINED): + """ + get_microdescriptors(default = UNDEFINED) + + Provides an iterator for all of the microdescriptors that tor currently + knows about. + + **Tor does not expose this information via the control protocol** + (:trac:`8323`). Until it does this reads the microdescriptors from disk, + and hence won't work remotely or if we lack read permissions. + + :param list default: items to provide if the query fails + + :returns: iterates over + :class:`~stem.descriptor.microdescriptor.Microdescriptor` for relays in + the tor network + + :raises: :class:`stem.ControllerError` if unable to query tor and no + default was provided + """ + + try: + data_directory = self.get_conf('DataDirectory') + except stem.ControllerError as exc: + raise stem.OperationFailed(message = 'Unable to determine the data directory (%s)' % exc) + + cached_descriptor_path = os.path.join(data_directory, 'cached-microdescs') + + if not os.path.exists(data_directory): + raise stem.OperationFailed(message = "Data directory reported by tor doesn't exist (%s)" % data_directory) + elif not os.path.exists(cached_descriptor_path): + raise stem.OperationFailed(message = "Data directory doens't contain cached microescriptors (%s)" % cached_descriptor_path) + + with stem.descriptor.reader.DescriptorReader([cached_descriptor_path]) as reader: + for desc in reader: + # It shouldn't be possible for these to be something other than + # microdescriptors but as the saying goes: trust but verify. + + if not isinstance(desc, stem.descriptor.microdescriptor.Microdescriptor): + raise stem.OperationFailed(message = 'BUG: Descriptor reader provided non-microdescriptor content (%s)' % type(desc)) + + yield desc + + @with_default() + def get_server_descriptor(self, relay = None, default = UNDEFINED): + """ + get_server_descriptor(relay = None, default = UNDEFINED) + + Provides the server descriptor for the relay with the given fingerprint or + nickname. If the relay identifier could be either a fingerprint *or* + nickname then it's queried as a fingerprint. + + If no **relay** is provided then this defaults to ourselves. Remember that + this requires that we've retrieved our own descriptor from remote + authorities so this both won't be available for newly started relays and + may be up to around an hour out of date. + + **As of Tor version 0.2.3.25 relays no longer get server descriptors by + default.** It's advised that you use microdescriptors instead, but if you + really need server descriptors then you can get them by setting + 'UseMicrodescriptors 0'. + + .. versionchanged:: 1.3.0 + Changed so we'd fetch our own descriptor if no 'relay' is provided. + + :param str relay: fingerprint or nickname of the relay to be queried + :param object default: response if the query fails + + :returns: :class:`~stem.descriptor.server_descriptor.RelayDescriptor` for the given relay + + :raises: + * :class:`stem.DescriptorUnavailable` if unable to provide a descriptor + for the given relay + * :class:`stem.ControllerError` if unable to query the descriptor + * **ValueError** if **relay** doesn't conform with the pattern for being + a fingerprint or nickname + + An exception is only raised if we weren't provided a default response. + """ + + try: + if relay is None: + try: + relay = self.get_info('fingerprint') + except stem.ControllerError as exc: + raise stem.ControllerError('Unable to determine our own fingerprint: %s' % exc) + + if stem.util.tor_tools.is_valid_fingerprint(relay): + query = 'desc/id/%s' % relay + elif stem.util.tor_tools.is_valid_nickname(relay): + query = 'desc/name/%s' % relay + else: + raise ValueError("'%s' isn't a valid fingerprint or nickname" % relay) + + try: + desc_content = self.get_info(query, get_bytes = True) + except stem.InvalidArguments as exc: + if str(exc).startswith('GETINFO request contained unrecognized keywords:'): + raise stem.DescriptorUnavailable("Tor was unable to provide the descriptor for '%s'" % relay) + else: + raise exc + + if not desc_content: + raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it') + + return stem.descriptor.server_descriptor.RelayDescriptor(desc_content) + except Exception as exc: + if not self._is_server_descriptors_available(): + raise ValueError(SERVER_DESCRIPTORS_UNSUPPORTED) + + raise exc + + @with_default(yields = True) + def get_server_descriptors(self, default = UNDEFINED): + """ + get_server_descriptors(default = UNDEFINED) + + Provides an iterator for all of the server descriptors that tor currently + knows about. + + **As of Tor version 0.2.3.25 relays no longer get server descriptors by + default.** It's advised that you use microdescriptors instead, but if you + really need server descriptors then you can get them by setting + 'UseMicrodescriptors 0'. + + :param list default: items to provide if the query fails + + :returns: iterates over + :class:`~stem.descriptor.server_descriptor.RelayDescriptor` for relays in + the tor network + + :raises: :class:`stem.ControllerError` if unable to query tor and no + default was provided + """ + + # TODO: We should iterate over the descriptors as they're read from the + # socket rather than reading the whole thing into memory. + # + # https://trac.torproject.org/8248 + + desc_content = self.get_info('desc/all-recent', get_bytes = True) + + if not desc_content: + if not self._is_server_descriptors_available(): + raise stem.ControllerError(SERVER_DESCRIPTORS_UNSUPPORTED) + else: + raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it') + + for desc in stem.descriptor.server_descriptor._parse_file(io.BytesIO(desc_content)): + yield desc + + def _is_server_descriptors_available(self): + """ + Checks to see if tor server descriptors should be available or not. + """ + + return self.get_version() < stem.version.Requirement.MICRODESCRIPTOR_IS_DEFAULT or \ + self.get_conf('UseMicrodescriptors', None) == '0' + + @with_default() + def get_network_status(self, relay = None, default = UNDEFINED): + """ + get_network_status(relay = None, default = UNDEFINED) + + Provides the router status entry for the relay with the given fingerprint + or nickname. If the relay identifier could be either a fingerprint *or* + nickname then it's queried as a fingerprint. + + This provides + :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` + instances if tor is using microdescriptors... + + :: + + controller.get_conf('UseMicrodescriptors', '0') == '1' + + ... and :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` + otherwise. + + If no **relay** is provided then this defaults to ourselves. Remember that + this requires that we've retrieved our own descriptor from remote + authorities so this both won't be available for newly started relays and + may be up to around an hour out of date. + + .. versionchanged:: 1.3.0 + Changed so we'd fetch our own descriptor if no 'relay' is provided. + + :param str relay: fingerprint or nickname of the relay to be queried + :param object default: response if the query fails + + :returns: :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` + for the given relay + + :raises: + * :class:`stem.DescriptorUnavailable` if unable to provide a descriptor + for the given relay + * :class:`stem.ControllerError` if unable to query the descriptor + * **ValueError** if **relay** doesn't conform with the pattern for being + a fingerprint or nickname + + An exception is only raised if we weren't provided a default response. + """ + + if relay is None: + try: + relay = self.get_info('fingerprint') + except stem.ControllerError as exc: + raise stem.ControllerError('Unable to determine our own fingerprint: %s' % exc) + + if stem.util.tor_tools.is_valid_fingerprint(relay): + query = 'ns/id/%s' % relay + elif stem.util.tor_tools.is_valid_nickname(relay): + query = 'ns/name/%s' % relay + else: + raise ValueError("'%s' isn't a valid fingerprint or nickname" % relay) + + try: + desc_content = self.get_info(query, get_bytes = True) + except stem.InvalidArguments as exc: + if str(exc).startswith('GETINFO request contained unrecognized keywords:'): + raise stem.DescriptorUnavailable("Tor was unable to provide the descriptor for '%s'" % relay) + else: + raise exc + + if not desc_content: + raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it') + + if self.get_conf('UseMicrodescriptors', '0') == '1': + return stem.descriptor.router_status_entry.RouterStatusEntryMicroV3(desc_content) + else: + return stem.descriptor.router_status_entry.RouterStatusEntryV3(desc_content) + + @with_default(yields = True) + def get_network_statuses(self, default = UNDEFINED): + """ + get_network_statuses(default = UNDEFINED) + + Provides an iterator for all of the router status entries that tor + currently knows about. + + This provides + :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` + instances if tor is using microdescriptors... + + :: + + controller.get_conf('UseMicrodescriptors', '0') == '1' + + ... and :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` + otherwise. + + :param list default: items to provide if the query fails + + :returns: iterates over + :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` for + relays in the tor network + + :raises: :class:`stem.ControllerError` if unable to query tor and no + default was provided + """ + + # TODO: We should iterate over the descriptors as they're read from the + # socket rather than reading the whole thing into memory. + # + # https://trac.torproject.org/8248 + + if self.get_conf('UseMicrodescriptors', '0') == '1': + desc_class = stem.descriptor.router_status_entry.RouterStatusEntryMicroV3 + else: + desc_class = stem.descriptor.router_status_entry.RouterStatusEntryV3 + + desc_content = self.get_info('ns/all', get_bytes = True) + + if not desc_content: + raise stem.DescriptorUnavailable('Descriptor information is unavailable, tor might still be downloading it') + + desc_iterator = stem.descriptor.router_status_entry._parse_file( + io.BytesIO(desc_content), + True, + entry_class = desc_class, + ) + + for desc in desc_iterator: + yield desc + + @with_default() + def get_hidden_service_descriptor(self, address, default = UNDEFINED, servers = None, await_result = True): + """ + get_hidden_service_descriptor(address, default = UNDEFINED, servers = None, await_result = True) + + Provides the descriptor for a hidden service. The **address** is the + '.onion' address of the hidden service (for instance 3g2upl4pq6kufc4m.onion + for DuckDuckGo). + + If **await_result** is **True** then this blocks until we either receive + the descriptor or the request fails. If **False** this returns right away. + + .. versionadded:: 1.4.0 + + :param str address: address of the hidden service descriptor, the '.onion' suffix is optional + :param object default: response if the query fails + :param list servers: requrest the descriptor from these specific servers + + :returns: :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor` + for the given service if **await_result** is **True**, or **None** otherwise + + :raises: + * :class:`stem.DescriptorUnavailable` if **await_result** is **True** and + unable to provide a descriptor for the given service + * :class:`stem.ControllerError` if unable to query the descriptor + * **ValueError** if **address** doesn't conform with the pattern of a + hidden service address + + An exception is only raised if we weren't provided a default response. + """ + + if address.endswith('.onion'): + address = address[:-6] + + if not stem.util.tor_tools.is_valid_hidden_service_address(address): + raise ValueError("'%s.onion' isn't a valid hidden service address" % address) + + if self.get_version() < stem.version.Requirement.HSFETCH: + raise stem.UnsatisfiableRequest(message = 'HSFETCH was added in tor version %s' % stem.version.Requirement.HSFETCH) + + hs_desc_queue, hs_desc_listener = queue.Queue(), None + hs_desc_content_queue, hs_desc_content_listener = queue.Queue(), None + + if await_result: + def hs_desc_listener(event): + hs_desc_queue.put(event) + + def hs_desc_content_listener(event): + hs_desc_content_queue.put(event) + + self.add_event_listener(hs_desc_listener, EventType.HS_DESC) + self.add_event_listener(hs_desc_content_listener, EventType.HS_DESC_CONTENT) + + try: + request = 'HSFETCH %s' % address + + if servers: + request += ' '.join(['SERVER=%s' % s for s in servers]) + + response = self.msg(request) + stem.response.convert('SINGLELINE', response) + + if not response.is_ok(): + raise stem.ProtocolError('HSFETCH returned unexpected response code: %s' % response.code) + + if not await_result: + return None # not waiting, so nothing to provide back + else: + while True: + event = hs_desc_content_queue.get() + + if event.address == address: + if event.descriptor: + return event.descriptor + else: + # no descriptor, looking through HS_DESC to figure out why + + while True: + event = hs_desc_queue.get() + + if event.address == address and event.action == stem.HSDescAction.FAILED: + if event.reason == stem.HSDescReason.NOT_FOUND: + raise stem.DescriptorUnavailable('No running hidden service at %s.onion' % address) + else: + raise stem.DescriptorUnavailable('Unable to retrieve the descriptor for %s.onion (retrieved from %s): %s' % (address, event.directory_fingerprint, event.reason)) + finally: + if hs_desc_listener: + self.remove_event_listener(hs_desc_listener) + + if hs_desc_content_listener: + self.remove_event_listener(hs_desc_content_listener) + + def get_conf(self, param, default = UNDEFINED, multiple = False): + """ + Queries the current value for a configuration option. Some configuration + options (like the ExitPolicy) can have multiple values. This provides a + **list** with all of the values if **multiple** is **True**. Otherwise this + will be a **str** with the first value. + + If provided with a **default** then that is provided if the configuration + option was unset or the query fails (invalid configuration option, error + response, control port closed, initiated, etc). + + If the configuration value is unset and no **default** was given then this + provides **None** if **multiple** was **False** and an empty list if it was + **True**. + + :param str param: configuration option to be queried + :param object default: response if the option is unset or the query fails + :param bool multiple: if **True** then provides a list with all of the + present values (this is an empty list if the config option is unset) + + :returns: + Response depends upon how we were called as follows... + + * **str** with the configuration value if **multiple** was **False**, + **None** if it was unset + * **list** with the response strings if multiple was **True** + * default if one was provided and the configuration option was either + unset or our call failed + + :raises: + * :class:`stem.ControllerError` if the call fails and we weren't + provided a default response + * :class:`stem.InvalidArguments` if the configuration option + requested was invalid + """ + + # Config options are case insensitive and don't contain whitespace. Using + # strip so the following check will catch whitespace-only params. + + param = param.lower().strip() + + if not param: + return default if default != UNDEFINED else None + + entries = self.get_conf_map(param, default, multiple) + return _case_insensitive_lookup(entries, param, default) + + def get_conf_map(self, params, default = UNDEFINED, multiple = True): + """ + Similar to :func:`~stem.control.Controller.get_conf` but queries multiple + configuration options, providing back a mapping of those options to their + values. + + There are three use cases for GETCONF: + + 1. a single value is provided (e.g. **ControlPort**) + 2. multiple values are provided for the option (e.g. **ExitPolicy**) + 3. a set of options that weren't necessarily requested are returned (for + instance querying **HiddenServiceOptions** gives **HiddenServiceDir**, + **HiddenServicePort**, etc) + + The vast majority of the options fall into the first two categories, in + which case calling :func:`~stem.control.Controller.get_conf` is sufficient. + However, for batch queries or the special options that give a set of values + this provides back the full response. As of tor version 0.2.1.25 + **HiddenServiceOptions** was the only option that falls into the third + category. + + :param str,list params: configuration option(s) to be queried + :param object default: value for the mappings if the configuration option + is either undefined or the query fails + :param bool multiple: if **True** then the values provided are lists with + all of the present values + + :returns: + **dict** of the 'config key => value' mappings. The value is a... + + * **str** if **multiple** is **False**, **None** if the configuration + option is unset + * **list** if **multiple** is **True** + * the **default** if it was set and the value was either undefined or our + lookup failed + + :raises: + * :class:`stem.ControllerError` if the call fails and we weren't provided + a default response + * :class:`stem.InvalidArguments` if the configuration option requested + was invalid + """ + + start_time = time.time() + reply = {} + + if isinstance(params, (bytes, str_type)): + params = [params] + + # remove strings which contain only whitespace + params = [entry for entry in params if entry.strip()] + + if params == []: + return {} + + # translate context sensitive options + lookup_params = set([MAPPED_CONFIG_KEYS.get(entry, entry) for entry in params]) + + # check for cached results + + from_cache = [param.lower() for param in lookup_params] + cached_results = self._get_cache_map(from_cache, 'getconf') + + for key in cached_results: + user_expected_key = _case_insensitive_lookup(lookup_params, key) + reply[user_expected_key] = cached_results[key] + lookup_params.remove(user_expected_key) + + # if everything was cached then short circuit making the query + if not lookup_params: + log.trace('GETCONF %s (cache fetch)' % ' '.join(reply.keys())) + return self._get_conf_dict_to_response(reply, default, multiple) + + try: + response = self.msg('GETCONF %s' % ' '.join(lookup_params)) + stem.response.convert('GETCONF', response) + reply.update(response.entries) + + if self.is_caching_enabled(): + to_cache = dict((k.lower(), v) for k, v in response.entries.items()) + + for key in UNCACHEABLE_GETCONF_PARAMS: + if key in to_cache: + del to_cache[key] + + self._set_cache(to_cache, 'getconf') + + # Maps the entries back to the parameters that the user requested so the + # capitalization matches (ie, if they request "exitpolicy" then that + # should be the key rather than "ExitPolicy"). When the same + # configuration key is provided multiple times this determines the case + # based on the first and ignores the rest. + # + # This retains the tor provided camel casing of MAPPED_CONFIG_KEYS + # entries since the user didn't request those by their key, so we can't + # be sure what they wanted. + + for key in reply: + if not key.lower() in MAPPED_CONFIG_KEYS.values(): + user_expected_key = _case_insensitive_lookup(params, key, key) + + if key != user_expected_key: + reply[user_expected_key] = reply[key] + del reply[key] + + log.debug('GETCONF %s (runtime: %0.4f)' % (' '.join(lookup_params), time.time() - start_time)) + return self._get_conf_dict_to_response(reply, default, multiple) + except stem.ControllerError as exc: + log.debug('GETCONF %s (failed: %s)' % (' '.join(lookup_params), exc)) + + if default != UNDEFINED: + return dict((param, default) for param in params) + else: + raise exc + + def _get_conf_dict_to_response(self, config_dict, default, multiple): + """ + Translates a dictionary of 'config key => [value1, value2...]' into the + return value of :func:`~stem.control.Controller.get_conf_map`, taking into + account what the caller requested. + """ + + return_dict = {} + + for key, values in list(config_dict.items()): + if values == []: + # config option was unset + if default != UNDEFINED: + return_dict[key] = default + else: + return_dict[key] = [] if multiple else None + else: + return_dict[key] = values if multiple else values[0] + + return return_dict + + def set_conf(self, param, value): + """ + Changes the value of a tor configuration option. Our value can be any of + the following... + + * a string to set a single value + * a list of strings to set a series of values (for instance the ExitPolicy) + * None to either set the value to 0/NULL + + :param str param: configuration option to be set + :param str,list value: value to set the parameter to + + :raises: + * :class:`stem.ControllerError` if the call fails + * :class:`stem.InvalidArguments` if configuration options + requested was invalid + * :class:`stem.InvalidRequest` if the configuration setting is + impossible or if there's a syntax error in the configuration values + """ + + self.set_options({param: value}, False) + + def reset_conf(self, *params): + """ + Reverts one or more parameters to their default values. + + :param str params: configuration option to be reset + + :raises: + * :class:`stem.ControllerError` if the call fails + * :class:`stem.InvalidArguments` if configuration options requested was invalid + * :class:`stem.InvalidRequest` if the configuration setting is + impossible or if there's a syntax error in the configuration values + """ + + self.set_options(dict([(entry, None) for entry in params]), True) + + def set_options(self, params, reset = False): + """ + Changes multiple tor configuration options via either a SETCONF or + RESETCONF query. Both behave identically unless our value is None, in which + case SETCONF sets the value to 0 or NULL, and RESETCONF returns it to its + default value. This accepts str, list, or None values in a similar fashion + to :func:`~stem.control.Controller.set_conf`. For example... + + :: + + my_controller.set_options({ + 'Nickname': 'caerSidi', + 'ExitPolicy': ['accept *:80', 'accept *:443', 'reject *:*'], + 'ContactInfo': 'caerSidi-exit@someplace.com', + 'Log': None, + }) + + The params can optionally be a list of key/value tuples, though the only + reason this type of argument would be useful is for hidden service + configuration (those options are order dependent). + + :param dict,list params: mapping of configuration options to the values + we're setting it to + :param bool reset: issues a RESETCONF, returning **None** values to their + defaults if **True** + + :raises: + * :class:`stem.ControllerError` if the call fails + * :class:`stem.InvalidArguments` if configuration options + requested was invalid + * :class:`stem.InvalidRequest` if the configuration setting is + impossible or if there's a syntax error in the configuration values + """ + + start_time = time.time() + + # constructs the SETCONF or RESETCONF query + query_comp = ['RESETCONF' if reset else 'SETCONF'] + + if isinstance(params, dict): + params = list(params.items()) + + for param, value in params: + if isinstance(value, str): + query_comp.append('%s="%s"' % (param, value.strip())) + elif value: + query_comp.extend(['%s="%s"' % (param, val.strip()) for val in value]) + else: + query_comp.append(param) + + query = ' '.join(query_comp) + response = self.msg(query) + stem.response.convert('SINGLELINE', response) + + if response.is_ok(): + log.debug('%s (runtime: %0.4f)' % (query, time.time() - start_time)) + + if self.is_caching_enabled(): + to_cache = {} + + for param, value in params: + param = param.lower() + + if isinstance(value, (bytes, str_type)): + value = [value] + + to_cache[param] = value + + if param == 'exitpolicy': + self._set_cache({'exitpolicy': None}) + + self._set_cache(to_cache, 'getconf') + else: + log.debug('%s (failed, code: %s, message: %s)' % (query, response.code, response.message)) + + if response.code == '552': + if response.message.startswith("Unrecognized option: Unknown option '"): + key = response.message[37:response.message.find("'", 37)] + raise stem.InvalidArguments(response.code, response.message, [key]) + raise stem.InvalidRequest(response.code, response.message) + elif response.code in ('513', '553'): + raise stem.InvalidRequest(response.code, response.message) + else: + raise stem.ProtocolError('Returned unexpected status code: %s' % response.code) + + @with_default() + def get_hidden_service_conf(self, default = UNDEFINED): + """ + get_hidden_service_conf(default = UNDEFINED) + + This provides a mapping of hidden service directories to their + attribute's key/value pairs. All hidden services are assured to have a + 'HiddenServicePort', but other entries may or may not exist. + + :: + + { + "/var/lib/tor/hidden_service_empty/": { + "HiddenServicePort": [ + ] + }, + "/var/lib/tor/hidden_service_with_two_ports/": { + "HiddenServiceAuthorizeClient": "stealth a, b", + "HiddenServicePort": [ + (8020, "127.0.0.1", 8020), # the ports order is kept + (8021, "127.0.0.1", 8021) + ], + "HiddenServiceVersion": "2" + }, + } + + .. versionadded:: 1.3.0 + + :param object default: response if the query fails + + :returns: **dict** with the hidden service configuration + + :raises: :class:`stem.ControllerError` if the call fails and we weren't + provided a default response + """ + + start_time = time.time() + + try: + response = self.msg('GETCONF HiddenServiceOptions') + stem.response.convert('GETCONF', response) + log.debug('GETCONF HiddenServiceOptions (runtime: %0.4f)' % + (time.time() - start_time)) + except stem.ControllerError as exc: + log.debug('GETCONF HiddenServiceOptions (failed: %s)' % exc) + raise exc + + service_dir_map = OrderedDict() + directory = None + + for status_code, divider, content in response.content(): + if content == 'HiddenServiceOptions': + continue + + if '=' not in content: + continue + + k, v = content.split('=', 1) + + if k == 'HiddenServiceDir': + directory = v + service_dir_map[directory] = {'HiddenServicePort': []} + elif k == 'HiddenServicePort': + port = target_port = v + target_address = '127.0.0.1' + + if not v.isdigit(): + port, target = v.split() + + if target.isdigit(): + target_port = target + else: + target_address, target_port = target.split(':') + + if not stem.util.connection.is_valid_port(port): + raise stem.ProtocolError('GETCONF provided an invalid HiddenServicePort port (%s): %s' % (port, content)) + elif not stem.util.connection.is_valid_ipv4_address(target_address): + raise stem.ProtocolError('GETCONF provided an invalid HiddenServicePort target address (%s): %s' % (target_address, content)) + elif not stem.util.connection.is_valid_port(target_port): + raise stem.ProtocolError('GETCONF provided an invalid HiddenServicePort target port (%s): %s' % (target_port, content)) + + service_dir_map[directory]['HiddenServicePort'].append((int(port), target_address, int(target_port))) + else: + service_dir_map[directory][k] = v + + return service_dir_map + + def set_hidden_service_conf(self, conf): + """ + Update all the configured hidden services from a dictionary having + the same format as + :func:`~stem.control.Controller.get_hidden_service_conf`. + + For convenience the HiddenServicePort entries can be an integer, string, or + tuple. If an **int** then we treat it as just a port. If a **str** we pass + that directly as the HiddenServicePort. And finally, if a **tuple** then + it's expected to be the **(port, target_address, target_port)** as provided + by :func:`~stem.control.Controller.get_hidden_service_conf`. + + This is to say the following three are equivalent... + + :: + + "HiddenServicePort": [ + 80, + '80 127.0.0.1:80', + (80, '127.0.0.1', 80), + ] + + .. versionadded:: 1.3.0 + + :param dict conf: configuration dictionary + + :raises: + * :class:`stem.ControllerError` if the call fails + * :class:`stem.InvalidArguments` if configuration options + requested was invalid + * :class:`stem.InvalidRequest` if the configuration setting is + impossible or if there's a syntax error in the configuration values + """ + + # If we're not adding or updating any hidden services then call RESETCONF + # so we drop existing values. Otherwise calling SETCONF is a no-op. + + if not conf: + self.reset_conf('HiddenServiceDir') + return + + # Convert conf dictionary into a list of ordered config tuples + + hidden_service_options = [] + + for directory in conf: + hidden_service_options.append(('HiddenServiceDir', directory)) + + for k, v in list(conf[directory].items()): + if k == 'HiddenServicePort': + for entry in v: + if isinstance(entry, int): + entry = '%s 127.0.0.1:%s' % (entry, entry) + elif isinstance(entry, str): + pass # just pass along what the user gave us + elif isinstance(entry, tuple): + port, target_address, target_port = entry + entry = '%s %s:%s' % (port, target_address, target_port) + + hidden_service_options.append(('HiddenServicePort', entry)) + else: + hidden_service_options.append((k, str(v))) + + self.set_options(hidden_service_options) + + def create_hidden_service(self, path, port, target_address = None, target_port = None, auth_type = None, client_names = None): + """ + Create a new hidden service. If the directory is already present, a + new port is added. This provides a **namedtuple** of the following... + + * path (str) - hidden service directory + + * hostname (str) - Content of the hostname file, if no **client_names** + are provided this is the onion address of the service. This is only + retrieved if we can read the hidden service directory. + + * hostname_for_client (dict) - mapping of client names to their onion + address, this is only set if the **client_names** was provided and we + can read the hidden service directory + + * config (dict) - tor's new hidden service configuration + + Our *.onion address is fetched by reading the hidden service directory. + However, this directory is only readable by the tor user, so if unavailable + the **hostname** will be **None**. + + **As of Tor 0.2.7.1 there's two ways for creating hidden services. This is + no longer the recommended method.** Rather, try using + :func:`~stem.control.Controller.create_ephemeral_hidden_service` instead. + + .. versionadded:: 1.3.0 + + .. versionchanged:: 1.4.0 + Added the auth_type and client_names arguments. + + :param str path: path for the hidden service's data directory + :param int port: hidden service port + :param str target_address: address of the service, by default 127.0.0.1 + :param int target_port: port of the service, by default this is the same as + **port** + :param str auth_type: authentication type: basic, stealth or None to disable auth + :param list client_names: client names (1-16 characters "A-Za-z0-9+-_") + + :returns: **CreateHiddenServiceOutput** if we create or update a hidden service, **None** otherwise + + :raises: :class:`stem.ControllerError` if the call fails + """ + + if not stem.util.connection.is_valid_port(port): + raise ValueError("%s isn't a valid port number" % port) + elif target_address and not stem.util.connection.is_valid_ipv4_address(target_address): + raise ValueError("%s isn't a valid IPv4 address" % target_address) + elif target_port is not None and not stem.util.connection.is_valid_port(target_port): + raise ValueError("%s isn't a valid port number" % target_port) + elif auth_type not in (None, 'basic', 'stealth'): + raise ValueError("%s isn't a recognized type of authentication" % auth_type) + + port = int(port) + target_address = target_address if target_address else '127.0.0.1' + target_port = port if target_port is None else int(target_port) + + conf = self.get_hidden_service_conf() + + if path in conf and (port, target_address, target_port) in conf[path]['HiddenServicePort']: + return None + + conf.setdefault(path, OrderedDict()).setdefault('HiddenServicePort', []).append((port, target_address, target_port)) + + if auth_type and client_names: + hsac = "%s %s" % (auth_type, ','.join(client_names)) + conf[path]['HiddenServiceAuthorizeClient'] = hsac + + self.set_hidden_service_conf(conf) + + hostname, hostname_for_client = None, {} + + if self.is_localhost(): + hostname_path = os.path.join(path, 'hostname') + + if not os.path.isabs(hostname_path): + cwd = stem.util.system.cwd(self.get_pid(None)) + + if cwd: + hostname_path = stem.util.system.expand_path(hostname_path, cwd) + + if os.path.isabs(hostname_path): + start_time = time.time() + + while not os.path.exists(hostname_path): + wait_time = time.time() - start_time + + if wait_time >= 3: + break + else: + time.sleep(0.05) + + try: + with open(hostname_path) as hostname_file: + hostname = hostname_file.read().strip() + + if client_names and '\n' in hostname: + # When there's multiple clients this looks like... + # + # ndisjxzkgcdhrwqf.onion sjUwjTSPznqWLdOPuwRUzg # client: c1 + # ndisjxzkgcdhrwqf.onion sUu92axuL5bKnA76s2KRfw # client: c2 + + for line in hostname.splitlines(): + if ' # client: ' in line: + address = line.split()[0] + client = line.split(' # client: ', 1)[1] + + if len(address) == 22 and address.endswith('.onion'): + hostname_for_client[client] = address + except: + pass + + return CreateHiddenServiceOutput( + path = path, + hostname = hostname, + hostname_for_client = hostname_for_client, + config = conf, + ) + + def remove_hidden_service(self, path, port = None): + """ + Discontinues a given hidden service. + + .. versionadded:: 1.3.0 + + :param str path: path for the hidden service's data directory + :param int port: hidden service port + + :returns: **True** if the hidden service is discontinued, **False** if it + wasn't running in the first place + + :raises: :class:`stem.ControllerError` if the call fails + """ + + if port and not stem.util.connection.is_valid_port(port): + raise ValueError("%s isn't a valid port number" % port) + + port = int(port) if port else None + conf = self.get_hidden_service_conf() + + if path not in conf: + return False + + if not port: + del conf[path] + else: + to_remove = [entry for entry in conf[path]['HiddenServicePort'] if entry[0] == port] + + if not to_remove: + return False + + for entry in to_remove: + conf[path]['HiddenServicePort'].remove(entry) + + if not conf[path]['HiddenServicePort']: + del conf[path] # no ports left + + self.set_hidden_service_conf(conf) + return True + + @with_default() + def list_ephemeral_hidden_services(self, default = UNDEFINED, our_services = True, detached = False): + """ + list_ephemeral_hidden_services(default = UNDEFINED, our_services = True, detached = False) + + Lists hidden service addresses created by + :func:`~stem.control.Controller.create_ephemeral_hidden_service`. + + .. versionadded:: 1.4.0 + + :param object default: response if the query fails + :param bool our_services: include services created with this controller + that weren't flagged as 'detached' + :param bool detached: include services whos contiuation isn't tied to a + controller + + :returns: **list** of hidden service addresses without their '.onion' + suffix + + :raises: :class:`stem.ControllerError` if the call fails and we weren't + provided a default response + """ + + if self.get_version() < stem.version.Requirement.ADD_ONION: + raise stem.UnsatisfiableRequest(message = 'Ephemeral hidden services were added in tor version %s' % stem.version.Requirement.ADD_ONION) + + result = [] + + if our_services: + try: + result += self.get_info('onions/current').split('\n') + except stem.ProtocolError as exc: + if 'No onion services of the specified type.' not in str(exc): + raise exc + + if detached: + try: + result += self.get_info('onions/detached').split('\n') + except stem.ProtocolError as exc: + if 'No onion services of the specified type.' not in str(exc): + raise exc + + return result + + def create_ephemeral_hidden_service(self, ports, key_type = 'NEW', key_content = 'BEST', discard_key = False, detached = False, await_publication = False): + """ + Creates a new hidden service. Unlike + :func:`~stem.control.Controller.create_hidden_service` this style of + hidden service doesn't touch disk, carrying with it a lot of advantages. + This is the suggested method for making hidden services. + + Our **ports** argument can be a single port... + + :: + + create_ephemeral_hidden_service(80) + + ... list of ports the service is available on... + + :: + + create_ephemeral_hidden_service([80, 443]) + + ... or a mapping of hidden service ports to their targets... + + :: + + create_ephemeral_hidden_service({80: 80, 443: '173.194.33.133:443'}) + + .. versionadded:: 1.4.0 + + :param int,list,dict ports: hidden service port(s) or mapping of hidden + service ports to their targets + :param str key_type: type of key being provided, generates a new key if + 'NEW' (options are: **NEW** and **RSA1024**) + :param str key_content: key for the service to use or type of key to be + generated (options when **key_type** is **NEW** are **BEST** and + **RSA1024**) + :param bool discard_key: avoid providing the key back in our response + :param bool detached: continue this hidden service even after this control + connection is closed if **True** + :param bool await_publication: blocks until our descriptor is successfully + published if **True** + + :returns: :class:`~stem.response.add_onion.AddOnionResponse` with the response + + :raises: :class:`stem.ControllerError` if the call fails + """ + + if self.get_version() < stem.version.Requirement.ADD_ONION: + raise stem.UnsatisfiableRequest(message = 'Ephemeral hidden services were added in tor version %s' % stem.version.Requirement.ADD_ONION) + + hs_desc_queue, hs_desc_listener = queue.Queue(), None + + if await_publication: + def hs_desc_listener(event): + hs_desc_queue.put(event) + + self.add_event_listener(hs_desc_listener, EventType.HS_DESC) + + request = 'ADD_ONION %s:%s' % (key_type, key_content) + + flags = [] + + if discard_key: + flags.append('DiscardPK') + + if detached: + flags.append('Detach') + + if flags: + request += ' Flags=%s' % ','.join(flags) + + if isinstance(ports, int): + request += ' Port=%s' % ports + elif isinstance(ports, list): + for port in ports: + request += ' Port=%s' % port + elif isinstance(ports, dict): + for port, target in ports.items(): + request += ' Port=%s,%s' % (port, target) + else: + raise ValueError("The 'ports' argument of create_ephemeral_hidden_service() needs to be an int, list, or dict") + + response = self.msg(request) + stem.response.convert('ADD_ONION', response) + + if await_publication: + # We should receive five UPLOAD events, followed by up to another five + # UPLOADED to indicate they've finished. Presently tor seems to have an + # issue where the address is provided for UPLOAD but not UPLOADED so need + # to just guess that if it's for the same hidden service authority then + # it's what we're looking for. + + directories_uploaded_to, failures = [], [] + + try: + while True: + event = hs_desc_queue.get() + + if event.action == stem.HSDescAction.UPLOAD and event.address == response.service_id: + directories_uploaded_to.append(event.directory_fingerprint) + elif event.action == stem.HSDescAction.UPLOADED and event.directory_fingerprint in directories_uploaded_to: + break # successfully uploaded to a HS authority... maybe + elif event.action == stem.HSDescAction.FAILED and event.directory_fingerprint in directories_uploaded_to: + failures.append('%s (%s)' % (event.directory_fingerprint, event.reason)) + + if len(directories_uploaded_to) == len(failures): + raise stem.OperationFailed(message = 'Failed to upload our hidden service descriptor to %s' % ', '.join(failures)) + finally: + self.remove_event_listener(hs_desc_listener) + + return response + + def remove_ephemeral_hidden_service(self, service_id): + """ + Discontinues a given hidden service that was created with + :func:`~stem.control.Controller.create_ephemeral_hidden_service`. + + .. versionadded:: 1.4.0 + + :param str service_id: hidden service address without the '.onion' suffix + + :returns: **True** if the hidden service is discontinued, **False** if it + wasn't running in the first place + + :raises: :class:`stem.ControllerError` if the call fails + """ + + if self.get_version() < stem.version.Requirement.ADD_ONION: + raise stem.UnsatisfiableRequest(message = 'Ephemeral hidden services were added in tor version %s' % stem.version.Requirement.ADD_ONION) + + response = self.msg('DEL_ONION %s' % service_id) + stem.response.convert('SINGLELINE', response) + + if response.is_ok(): + return True + elif response.code == '552': + return False # no hidden service to discontinue + else: + raise stem.ProtocolError('DEL_ONION returned unexpected response code: %s' % response.code) + + def add_event_listener(self, listener, *events): + """ + Directs further tor controller events to a given function. The function is + expected to take a single argument, which is a + :class:`~stem.response.events.Event` subclass. For instance the following + would print the bytes sent and received by tor over five seconds... + + :: + + import time + from stem.control import Controller, EventType + + def print_bw(event): + print('sent: %i, received: %i' % (event.written, event.read)) + + with Controller.from_port(port = 9051) as controller: + controller.authenticate() + controller.add_event_listener(print_bw, EventType.BW) + time.sleep(5) + + If a new control connection is initialized then this listener will be + reattached. + + :param functor listener: function to be called when an event is received + :param stem.control.EventType events: event types to be listened for + + :raises: :class:`stem.ProtocolError` if unable to set the events + """ + + # first checking that tor supports these event types + + with self._event_listeners_lock: + if self.is_authenticated(): + for event_type in events: + event_type = stem.response.events.EVENT_TYPE_TO_CLASS.get(event_type) + + if event_type and (self.get_version() < event_type._VERSION_ADDED): + raise stem.InvalidRequest(552, '%s event requires Tor version %s or later' % (event_type, event_type._VERSION_ADDED)) + + for event_type in events: + self._event_listeners.setdefault(event_type, []).append(listener) + + failed_events = self._attach_listeners()[1] + + # restricted the failures to just things we requested + + failed_events = set(failed_events).intersection(set(events)) + + if failed_events: + raise stem.ProtocolError('SETEVENTS rejected %s' % ', '.join(failed_events)) + + def remove_event_listener(self, listener): + """ + Stops a listener from being notified of further tor events. + + :param stem.control.EventListener listener: listener to be removed + + :raises: :class:`stem.ProtocolError` if unable to set the events + """ + + with self._event_listeners_lock: + event_types_changed = False + + for event_type, event_listeners in list(self._event_listeners.items()): + if listener in event_listeners: + event_listeners.remove(listener) + + if len(event_listeners) == 0: + event_types_changed = True + del self._event_listeners[event_type] + + if event_types_changed: + response = self.msg('SETEVENTS %s' % ' '.join(self._event_listeners.keys())) + + if not response.is_ok(): + raise stem.ProtocolError('SETEVENTS received unexpected response\n%s' % response) + + def _get_cache(self, param, namespace = None): + """ + Queries our request cache for the given key. + + :param str param: key to be queried + :param str namespace: namespace in which to check for the key + + :returns: cached value corresponding to key or **None** if the key wasn't found + """ + + return self._get_cache_map([param], namespace).get(param, None) + + def _get_cache_map(self, params, namespace = None): + """ + Queries our request cache for multiple entries. + + :param list params: keys to be queried + :param str namespace: namespace in which to check for the keys + + :returns: **dict** of 'param => cached value' pairs of keys present in cache + """ + + with self._cache_lock: + cached_values = {} + + if self.is_caching_enabled(): + for param in params: + if namespace: + cache_key = '%s.%s' % (namespace, param) + else: + cache_key = param + + if cache_key in self._request_cache: + cached_values[param] = self._request_cache[cache_key] + + return cached_values + + def _set_cache(self, params, namespace = None): + """ + Sets the given request cache entries. If the new cache value is **None** + then it is removed from our cache. + + :param dict params: **dict** of 'cache_key => value' pairs to be cached + :param str namespace: namespace for the keys + """ + + with self._cache_lock: + if not self.is_caching_enabled(): + return + + for key, value in list(params.items()): + if namespace: + cache_key = '%s.%s' % (namespace, key) + else: + cache_key = key + + if value is None: + if cache_key in self._request_cache: + del self._request_cache[cache_key] + else: + self._request_cache[cache_key] = value + + def is_caching_enabled(self): + """ + **True** if caching has been enabled, **False** otherwise. + + :returns: bool to indicate if caching is enabled + """ + + return self._is_caching_enabled + + def set_caching(self, enabled): + """ + Enables or disables caching of information retrieved from tor. + + :param bool enabled: **True** to enable caching, **False** to disable it + """ + + self._is_caching_enabled = enabled + + if not self._is_caching_enabled: + self.clear_cache() + + def clear_cache(self): + """ + Drops any cached results. + """ + + with self._cache_lock: + self._request_cache = {} + self._last_newnym = 0.0 + self._geoip_failure_count = 0 + + def load_conf(self, configtext): + """ + Sends the configuration text to Tor and loads it as if it has been read from + the torrc. + + :param str configtext: the configuration text + + :raises: :class:`stem.ControllerError` if the call fails + """ + + response = self.msg('LOADCONF\n%s' % configtext) + stem.response.convert('SINGLELINE', response) + + if response.code in ('552', '553'): + if response.code == '552' and response.message.startswith('Invalid config file: Failed to parse/validate config: Unknown option'): + raise stem.InvalidArguments(response.code, response.message, [response.message[70:response.message.find('.', 70) - 1]]) + raise stem.InvalidRequest(response.code, response.message) + elif not response.is_ok(): + raise stem.ProtocolError('+LOADCONF Received unexpected response\n%s' % str(response)) + + def save_conf(self): + """ + Saves the current configuration options into the active torrc file. + + :raises: + * :class:`stem.ControllerError` if the call fails + * :class:`stem.OperationFailed` if the client is unable to save + the configuration file + """ + + response = self.msg('SAVECONF') + stem.response.convert('SINGLELINE', response) + + if response.is_ok(): + return True + elif response.code == '551': + raise stem.OperationFailed(response.code, response.message) + else: + raise stem.ProtocolError('SAVECONF returned unexpected response code') + + def is_feature_enabled(self, feature): + """ + Checks if a control connection feature is enabled. These features can be + enabled using :func:`~stem.control.Controller.enable_feature`. + + :param str feature: feature to be checked + + :returns: **True** if feature is enabled, **False** otherwise + """ + + feature = feature.upper() + + if feature in self._enabled_features: + return True + else: + # check if this feature is on by default + defaulted_version = None + + if feature == 'EXTENDED_EVENTS': + defaulted_version = stem.version.Requirement.FEATURE_EXTENDED_EVENTS + elif feature == 'VERBOSE_NAMES': + defaulted_version = stem.version.Requirement.FEATURE_VERBOSE_NAMES + + if defaulted_version: + our_version = self.get_version(None) + + if our_version and our_version >= defaulted_version: + self._enabled_features.append(feature) + + return feature in self._enabled_features + + def enable_feature(self, features): + """ + Enables features that are disabled by default to maintain backward + compatibility. Once enabled, a feature cannot be disabled and a new + control connection must be opened to get a connection with the feature + disabled. Feature names are case-insensitive. + + The following features are currently accepted: + + * EXTENDED_EVENTS - Requests the extended event syntax + * VERBOSE_NAMES - Replaces ServerID with LongName in events and GETINFO results + + :param str,list features: a single feature or a list of features to be enabled + + :raises: + * :class:`stem.ControllerError` if the call fails + * :class:`stem.InvalidArguments` if features passed were invalid + """ + + if isinstance(features, (bytes, str_type)): + features = [features] + + response = self.msg('USEFEATURE %s' % ' '.join(features)) + stem.response.convert('SINGLELINE', response) + + if not response.is_ok(): + if response.code == '552': + invalid_feature = [] + + if response.message.startswith('Unrecognized feature "'): + invalid_feature = [response.message[22:response.message.find('"', 22)]] + + raise stem.InvalidArguments(response.code, response.message, invalid_feature) + + raise stem.ProtocolError('USEFEATURE provided an invalid response code: %s' % response.code) + + self._enabled_features += [entry.upper() for entry in features] + + @with_default() + def get_circuit(self, circuit_id, default = UNDEFINED): + """ + get_circuit(circuit_id, default = UNDEFINED) + + Provides a circuit currently available from tor. + + :param int circuit_id: circuit to be fetched + :param object default: response if the query fails + + :returns: :class:`stem.response.events.CircuitEvent` for the given circuit + + :raises: + * :class:`stem.ControllerError` if the call fails + * **ValueError** if the circuit doesn't exist + + An exception is only raised if we weren't provided a default response. + """ + + for circ in self.get_circuits(): + if circ.id == circuit_id: + return circ + + raise ValueError("Tor currently does not have a circuit with the id of '%s'" % circuit_id) + + @with_default() + def get_circuits(self, default = UNDEFINED): + """ + get_circuits(default = UNDEFINED) + + Provides tor's currently available circuits. + + :param object default: response if the query fails + + :returns: **list** of :class:`stem.response.events.CircuitEvent` for our circuits + + :raises: :class:`stem.ControllerError` if the call fails and no default was provided + """ + + circuits = [] + response = self.get_info('circuit-status') + + for circ in response.splitlines(): + circ_message = stem.socket.recv_message(StringIO('650 CIRC ' + circ + '\r\n')) + stem.response.convert('EVENT', circ_message, arrived_at = 0) + circuits.append(circ_message) + + return circuits + + def new_circuit(self, path = None, purpose = 'general', await_build = False): + """ + Requests a new circuit. If the path isn't provided, one is automatically + selected. + + :param list,str path: one or more relays to make a circuit through + :param str purpose: 'general' or 'controller' + :param bool await_build: blocks until the circuit is built if **True** + + :returns: str of the circuit id of the newly created circuit + + :raises: :class:`stem.ControllerError` if the call fails + """ + + return self.extend_circuit('0', path, purpose, await_build) + + def extend_circuit(self, circuit_id = '0', path = None, purpose = 'general', await_build = False): + """ + Either requests the creation of a new circuit or extends an existing one. + + When called with a circuit value of zero (the default) a new circuit is + created, and when non-zero the circuit with that id is extended. If the + path isn't provided, one is automatically selected. + + A python interpreter session used to create circuits could look like this... + + :: + + >>> controller.extend_circuit('0', ['718BCEA286B531757ACAFF93AE04910EA73DE617', '30BAB8EE7606CBD12F3CC269AE976E0153E7A58D', '2765D8A8C4BBA3F89585A9FFE0E8575615880BEB']) + 19 + >>> controller.extend_circuit('0') + 20 + >>> print(controller.get_info('circuit-status')) + 20 EXTENDED $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$649F2D0ACF418F7CFC6539AB2257EB2D5297BAFA=Eskimo BUILD_FLAGS=NEED_CAPACITY PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:51:11.433755 + 19 BUILT $718BCEA286B531757ACAFF93AE04910EA73DE617=KsmoinOK,$30BAB8EE7606CBD12F3CC269AE976E0153E7A58D=Pascal1,$2765D8A8C4BBA3F89585A9FFE0E8575615880BEB=Anthracite PURPOSE=GENERAL TIME_CREATED=2012-12-06T13:50:56.969938 + + :param str circuit_id: id of a circuit to be extended + :param list,str path: one or more relays to make a circuit through, this is + required if the circuit id is non-zero + :param str purpose: 'general' or 'controller' + :param bool await_build: blocks until the circuit is built if **True** + + :returns: str of the circuit id of the created or extended circuit + + :raises: + * :class:`stem.InvalidRequest` if one of the parameters were invalid + * :class:`stem.CircuitExtensionFailed` if we were waiting for the circuit + to build but it failed + * :class:`stem.ControllerError` if the call fails + """ + + # Attaches a temporary listener for CIRC events if we'll be waiting for it + # to build. This is icky, but we can't reliably do this via polling since + # we then can't get the failure if it can't be created. + + circ_queue, circ_listener = queue.Queue(), None + + if await_build: + def circ_listener(event): + circ_queue.put(event) + + self.add_event_listener(circ_listener, EventType.CIRC) + + try: + # we might accidently get integer circuit ids + circuit_id = str(circuit_id) + + if path is None and circuit_id == '0': + path_opt_version = stem.version.Requirement.EXTENDCIRCUIT_PATH_OPTIONAL + + if not self.get_version() >= path_opt_version: + raise stem.InvalidRequest(512, 'EXTENDCIRCUIT requires the path prior to version %s' % path_opt_version) + + args = [circuit_id] + + if isinstance(path, (bytes, str_type)): + path = [path] + + if path: + args.append(','.join(path)) + + if purpose: + args.append('purpose=%s' % purpose) + + response = self.msg('EXTENDCIRCUIT %s' % ' '.join(args)) + stem.response.convert('SINGLELINE', response) + + if response.code in ('512', '552'): + raise stem.InvalidRequest(response.code, response.message) + elif not response.is_ok(): + raise stem.ProtocolError('EXTENDCIRCUIT returned unexpected response code: %s' % response.code) + + if not response.message.startswith('EXTENDED '): + raise stem.ProtocolError('EXTENDCIRCUIT response invalid:\n%s', response) + + new_circuit = response.message.split(' ', 1)[1] + + if await_build: + while True: + circ = circ_queue.get() + + if circ.id == new_circuit: + if circ.status == CircStatus.BUILT: + break + elif circ.status == CircStatus.FAILED: + raise stem.CircuitExtensionFailed('Circuit failed to be created: %s' % circ.reason, circ) + elif circ.status == CircStatus.CLOSED: + raise stem.CircuitExtensionFailed('Circuit was closed prior to build', circ) + + return new_circuit + finally: + if circ_listener: + self.remove_event_listener(circ_listener) + + def repurpose_circuit(self, circuit_id, purpose): + """ + Changes a circuit's purpose. Currently, two purposes are recognized... + * general + * controller + + :param str circuit_id: id of the circuit whose purpose is to be changed + :param str purpose: purpose (either 'general' or 'controller') + + :raises: :class:`stem.InvalidArguments` if the circuit doesn't exist or if the purpose was invalid + """ + + response = self.msg('SETCIRCUITPURPOSE %s purpose=%s' % (circuit_id, purpose)) + stem.response.convert('SINGLELINE', response) + + if not response.is_ok(): + if response.code == '552': + raise stem.InvalidRequest(response.code, response.message) + else: + raise stem.ProtocolError('SETCIRCUITPURPOSE returned unexpected response code: %s' % response.code) + + def close_circuit(self, circuit_id, flag = ''): + """ + Closes the specified circuit. + + :param str circuit_id: id of the circuit to be closed + :param str flag: optional value to modify closing, the only flag available + is 'IfUnused' which will not close the circuit unless it is unused + + :raises: :class:`stem.InvalidArguments` if the circuit is unknown + :raises: :class:`stem.InvalidRequest` if not enough information is provided + """ + + response = self.msg('CLOSECIRCUIT %s %s' % (circuit_id, flag)) + stem.response.convert('SINGLELINE', response) + + if not response.is_ok(): + if response.code in ('512', '552'): + if response.message.startswith('Unknown circuit '): + raise stem.InvalidArguments(response.code, response.message, [circuit_id]) + raise stem.InvalidRequest(response.code, response.message) + else: + raise stem.ProtocolError('CLOSECIRCUIT returned unexpected response code: %s' % response.code) + + @with_default() + def get_streams(self, default = UNDEFINED): + """ + get_streams(default = UNDEFINED) + + Provides the list of streams tor is currently handling. + + :param object default: response if the query fails + + :returns: list of :class:`stem.response.events.StreamEvent` objects + + :raises: :class:`stem.ControllerError` if the call fails and no default was + provided + """ + + streams = [] + response = self.get_info('stream-status') + + for stream in response.splitlines(): + message = stem.socket.recv_message(StringIO('650 STREAM ' + stream + '\r\n')) + stem.response.convert('EVENT', message, arrived_at = 0) + streams.append(message) + + return streams + + def attach_stream(self, stream_id, circuit_id, exiting_hop = None): + """ + Attaches a stream to a circuit. + + Note: Tor attaches streams to circuits automatically unless the + __LeaveStreamsUnattached configuration variable is set to '1' + + :param str stream_id: id of the stream that must be attached + :param str circuit_id: id of the circuit to which it must be attached + :param int exiting_hop: hop in the circuit where traffic should exit + + :raises: + * :class:`stem.InvalidRequest` if the stream or circuit id were unrecognized + * :class:`stem.UnsatisfiableRequest` if the stream isn't in a state where it can be attached + * :class:`stem.OperationFailed` if the stream couldn't be attached for any other reason + """ + + query = 'ATTACHSTREAM %s %s' % (stream_id, circuit_id) + + if exiting_hop: + query += ' HOP=%s' % exiting_hop + + response = self.msg(query) + stem.response.convert('SINGLELINE', response) + + if not response.is_ok(): + if response.code == '552': + raise stem.InvalidRequest(response.code, response.message) + elif response.code == '551': + raise stem.OperationFailed(response.code, response.message) + elif response.code == '555': + raise stem.UnsatisfiableRequest(response.code, response.message) + else: + raise stem.ProtocolError('ATTACHSTREAM returned unexpected response code: %s' % response.code) + + def close_stream(self, stream_id, reason = stem.RelayEndReason.MISC, flag = ''): + """ + Closes the specified stream. + + :param str stream_id: id of the stream to be closed + :param stem.RelayEndReason reason: reason the stream is closing + :param str flag: not currently used + + :raises: + * :class:`stem.InvalidArguments` if the stream or reason are not recognized + * :class:`stem.InvalidRequest` if the stream and/or reason are missing + """ + + # there's a single value offset between RelayEndReason.index_of() and the + # value that tor expects since tor's value starts with the index of one + + response = self.msg('CLOSESTREAM %s %s %s' % (stream_id, stem.RelayEndReason.index_of(reason) + 1, flag)) + stem.response.convert('SINGLELINE', response) + + if not response.is_ok(): + if response.code in ('512', '552'): + if response.message.startswith('Unknown stream '): + raise stem.InvalidArguments(response.code, response.message, [stream_id]) + elif response.message.startswith('Unrecognized reason '): + raise stem.InvalidArguments(response.code, response.message, [reason]) + raise stem.InvalidRequest(response.code, response.message) + else: + raise stem.ProtocolError('CLOSESTREAM returned unexpected response code: %s' % response.code) + + def signal(self, signal): + """ + Sends a signal to the Tor client. + + :param stem.Signal signal: type of signal to be sent + + :raises: :class:`stem.InvalidArguments` if signal provided wasn't recognized + """ + + response = self.msg('SIGNAL %s' % signal) + stem.response.convert('SINGLELINE', response) + + if response.is_ok(): + if signal == stem.Signal.NEWNYM: + self._last_newnym = time.time() + else: + if response.code == '552': + raise stem.InvalidArguments(response.code, response.message, [signal]) + + raise stem.ProtocolError('SIGNAL response contained unrecognized status code: %s' % response.code) + + def is_newnym_available(self): + """ + Indicates if tor would currently accept a NEWNYM signal. This can only + account for signals sent via this controller. + + .. versionadded:: 1.2.0 + + :returns: **True** if tor would currently accept a NEWNYM signal, **False** + otherwise + """ + + if self.is_alive(): + return self.get_newnym_wait() == 0.0 + else: + return False + + def get_newnym_wait(self): + """ + Provides the number of seconds until a NEWNYM signal would be respected. + This can only account for signals sent via this controller. + + .. versionadded:: 1.2.0 + + :returns: **float** for the number of seconds until tor would respect + another NEWNYM signal + """ + + return max(0.0, self._last_newnym + 10 - time.time()) + + @with_default() + def get_effective_rate(self, default = UNDEFINED, burst = False): + """ + get_effective_rate(default = UNDEFINED, burst = False) + + Provides the maximum rate this relay is configured to relay in bytes per + second. This is based on multiple torrc parameters if they're set... + + * Effective Rate = min(BandwidthRate, RelayBandwidthRate, MaxAdvertisedBandwidth) + * Effective Burst = min(BandwidthBurst, RelayBandwidthBurst) + + .. versionadded:: 1.3.0 + + :param object default: response if the query fails + :param bool burst: provides the burst bandwidth, otherwise this provides + the standard rate + + :returns: **int** with the effective bandwidth rate in bytes per second + + :raises: :class:`stem.ControllerError` if the call fails and no default was + provided + """ + + if not burst: + attributes = ('BandwidthRate', 'RelayBandwidthRate', 'MaxAdvertisedBandwidth') + else: + attributes = ('BandwidthBurst', 'RelayBandwidthBurst') + + value = None + + for attr in attributes: + attr_value = int(self.get_conf(attr)) + + if attr_value == 0 and attr.startswith('Relay'): + continue # RelayBandwidthRate and RelayBandwidthBurst default to zero + + value = min(value, attr_value) if value else attr_value + + return value + + def is_geoip_unavailable(self): + """ + Provides **True** if we've concluded hat our geoip database is unavailable, + **False** otherwise. This is determined by having our 'GETINFO + ip-to-country/\*' lookups fail so this will default to **False** if we + aren't making those queries. + + Geoip failures will be untracked if caching is disabled. + + :returns: **bool** to indicate if we've concluded our geoip database to be + unavailable or not + """ + + return self._geoip_failure_count >= GEOIP_FAILURE_THRESHOLD + + def map_address(self, mapping): + """ + Map addresses to replacement addresses. Tor replaces subseqent connections + to the original addresses with the replacement addresses. + + If the original address is a null address, i.e., one of '0.0.0.0', '::0', or + '.' Tor picks an original address itself and returns it in the reply. If the + original address is already mapped to a different address the mapping is + removed. + + :param dict mapping: mapping of original addresses to replacement addresses + + :raises: + * :class:`stem.InvalidRequest` if the addresses are malformed + * :class:`stem.OperationFailed` if Tor couldn't fulfill the request + + :returns: **dict** with 'original -> replacement' address mappings + """ + + mapaddress_arg = ' '.join(['%s=%s' % (k, v) for (k, v) in list(mapping.items())]) + response = self.msg('MAPADDRESS %s' % mapaddress_arg) + stem.response.convert('MAPADDRESS', response) + + return response.entries + + def drop_guards(self): + """ + Drops our present guard nodes and picks a new set. + + .. versionadded:: 1.2.0 + + :raises: :class:`stem.ControllerError` if Tor couldn't fulfill the request + """ + + if self.get_version() < stem.version.Requirement.DROPGUARDS: + raise stem.UnsatisfiableRequest(message = 'DROPGUARDS was added in tor version %s' % stem.version.Requirement.DROPGUARDS) + + self.msg('DROPGUARDS') + + def _post_authentication(self): + super(Controller, self)._post_authentication() + + # try to re-attach event listeners to the new instance + + with self._event_listeners_lock: + try: + failed_events = self._attach_listeners()[1] + + if failed_events: + # remove our listeners for these so we don't keep failing + for event_type in failed_events: + del self._event_listeners[event_type] + + logging_id = 'stem.controller.event_reattach-%s' % '-'.join(failed_events) + log.log_once(logging_id, log.WARN, 'We were unable to re-attach our event listeners to the new tor instance for: %s' % ', '.join(failed_events)) + except stem.ProtocolError as exc: + log.warn('Unable to issue the SETEVENTS request to re-attach our listeners (%s)' % exc) + + # issue TAKEOWNERSHIP if we're the owning process for this tor instance + + owning_pid = self.get_conf('__OwningControllerProcess', None) + + if owning_pid == str(os.getpid()) and self.is_localhost(): + response = self.msg('TAKEOWNERSHIP') + stem.response.convert('SINGLELINE', response) + + if response.is_ok(): + # Now that tor is tracking our ownership of the process via the control + # connection, we can stop having it check for us via our pid. + + try: + self.reset_conf('__OwningControllerProcess') + except stem.ControllerError as exc: + log.warn("We were unable to reset tor's __OwningControllerProcess configuration. It will continue to periodically check if our pid exists. (%s)" % exc) + else: + log.warn('We were unable assert ownership of tor through TAKEOWNERSHIP, despite being configured to be the owning process through __OwningControllerProcess. (%s)' % response) + + def _handle_event(self, event_message): + stem.response.convert('EVENT', event_message, arrived_at = time.time()) + + with self._event_listeners_lock: + for event_type, event_listeners in list(self._event_listeners.items()): + if event_type == event_message.type: + for listener in event_listeners: + listener(event_message) + + def _attach_listeners(self): + """ + Attempts to subscribe to the self._event_listeners events from tor. This is + a no-op if we're not currently authenticated. + + :returns: tuple of the form (set_events, failed_events) + + :raises: :class:`stem.ControllerError` if unable to make our request to tor + """ + + set_events, failed_events = [], [] + + with self._event_listeners_lock: + if self.is_authenticated(): + # try to set them all + response = self.msg('SETEVENTS %s' % ' '.join(self._event_listeners.keys())) + + if response.is_ok(): + set_events = list(self._event_listeners.keys()) + else: + # One of the following likely happened... + # + # * Our user attached listeners before having an authenticated + # connection, so we couldn't check if we met the version + # requirement. + # + # * User attached listeners to one tor instance, then connected us to + # an older tor instancce. + # + # * Some other controller hiccup (far less likely). + # + # See if we can set some subset of our events. + + for event in list(self._event_listeners.keys()): + response = self.msg('SETEVENTS %s' % ' '.join(set_events + [event])) + + if response.is_ok(): + set_events.append(event) + else: + failed_events.append(event) + + return (set_events, failed_events) + + +def _parse_circ_path(path): + """ + Parses a circuit path as a list of **(fingerprint, nickname)** tuples. Tor + circuit paths are defined as being of the form... + + :: + + Path = LongName *("," LongName) + LongName = Fingerprint [ ( "=" / "~" ) Nickname ] + + example: + $999A226EBED397F331B612FE1E4CFAE5C1F201BA=piyaz + + ... *unless* this is prior to tor version 0.2.2.1 with the VERBOSE_NAMES + feature turned off (or before version 0.1.2.2 where the feature was + introduced). In that case either the fingerprint or nickname in the tuple + will be **None**, depending on which is missing. + + :: + + Path = ServerID *("," ServerID) + ServerID = Nickname / Fingerprint + + example: + $E57A476CD4DFBD99B4EE52A100A58610AD6E80B9,hamburgerphone,PrivacyRepublic14 + + :param str path: circuit path to be parsed + + :returns: list of **(fingerprint, nickname)** tuples, fingerprints do not have a proceeding '$' + + :raises: :class:`stem.ProtocolError` if the path is malformed + """ + + if path: + try: + return [_parse_circ_entry(entry) for entry in path.split(',')] + except stem.ProtocolError as exc: + # include the path with the exception + raise stem.ProtocolError('%s: %s' % (exc, path)) + else: + return [] + + +def _parse_circ_entry(entry): + """ + Parses a single relay's 'LongName' or 'ServerID'. See the + :func:`~stem.control._parse_circ_path` function for more information. + + :param str entry: relay information to be parsed + + :returns: **(fingerprint, nickname)** tuple + + :raises: :class:`stem.ProtocolError` if the entry is malformed + """ + + if '=' in entry: + # common case + fingerprint, nickname = entry.split('=') + elif '~' in entry: + # this is allowed for by the spec, but I've never seen it used + fingerprint, nickname = entry.split('~') + elif entry[0] == '$': + # old style, fingerprint only + fingerprint, nickname = entry, None + else: + # old style, nickname only + fingerprint, nickname = None, entry + + if fingerprint is not None: + if not stem.util.tor_tools.is_valid_fingerprint(fingerprint, True): + raise stem.ProtocolError('Fingerprint in the circuit path is malformed (%s)' % fingerprint) + + fingerprint = fingerprint[1:] # strip off the leading '$' + + if nickname is not None and not stem.util.tor_tools.is_valid_nickname(nickname): + raise stem.ProtocolError('Nickname in the circuit path is malformed (%s)' % nickname) + + return (fingerprint, nickname) + + +@with_default() +def _case_insensitive_lookup(entries, key, default = UNDEFINED): + """ + Makes a case insensitive lookup within a list or dictionary, providing the + first matching entry that we come across. + + :param list,dict entries: list or dictionary to be searched + :param str key: entry or key value to look up + :param object default: value to be returned if the key doesn't exist + + :returns: case insensitive match or default if one was provided and key wasn't found + + :raises: **ValueError** if no such value exists + """ + + if entries is not None: + if isinstance(entries, dict): + for k, v in list(entries.items()): + if k.lower() == key.lower(): + return v + else: + for entry in entries: + if entry.lower() == key.lower(): + return entry + + raise ValueError("key '%s' doesn't exist in dict: %s" % (key, entries)) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py b/Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py new file mode 100644 index 0000000..1ebe578 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/__init__.py @@ -0,0 +1,841 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Package for parsing and processing descriptor data. + +**Module Overview:** + +:: + + parse_file - Parses the descriptors in a file. + + Descriptor - Common parent for all descriptor file types. + |- get_path - location of the descriptor on disk if it came from a file + |- get_archive_path - location of the descriptor within the archive it came from + |- get_bytes - similar to str(), but provides our original bytes content + |- get_unrecognized_lines - unparsed descriptor content + +- __str__ - string that the descriptor was made from + +.. data:: DocumentHandler (enum) + + Ways in which we can parse a + :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`. + + Both **ENTRIES** and **BARE_DOCUMENT** have a 'thin' document, which doesn't + have a populated **routers** attribute. This allows for lower memory usage + and upfront runtime. However, if read time and memory aren't a concern then + **DOCUMENT** can provide you with a fully populated document. + + =================== =========== + DocumentHandler Description + =================== =========== + **ENTRIES** Iterates over the contained :class:`~stem.descriptor.router_status_entry.RouterStatusEntry`. Each has a reference to the bare document it came from (through its **document** attribute). + **DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` with the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` it contains (through its **routers** attribute). + **BARE_DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` **without** a reference to its contents (the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` are unread). + =================== =========== +""" + +__all__ = [ + 'export', + 'reader', + 'remote', + 'extrainfo_descriptor', + 'server_descriptor', + 'microdescriptor', + 'networkstatus', + 'router_status_entry', + 'tordnsel', + 'parse_file', + 'Descriptor', +] + +import base64 +import codecs +import copy +import hashlib +import os +import re +import tarfile + +import stem.prereq +import stem.util.enum +import stem.util.str_tools +import stem.util.system + +from stem import str_type + +try: + # added in python 2.7 + from collections import OrderedDict +except ImportError: + from stem.util.ordereddict import OrderedDict + +KEYWORD_CHAR = 'a-zA-Z0-9-' +WHITESPACE = ' \t' +KEYWORD_LINE = re.compile('^([%s]+)(?:[%s]+(.*))?$' % (KEYWORD_CHAR, WHITESPACE)) +SPECIFIC_KEYWORD_LINE = '^(%%s)(?:[%s]+(.*))?$' % WHITESPACE +PGP_BLOCK_START = re.compile('^-----BEGIN ([%s%s]+)-----$' % (KEYWORD_CHAR, WHITESPACE)) +PGP_BLOCK_END = '-----END %s-----' + +DocumentHandler = stem.util.enum.UppercaseEnum( + 'ENTRIES', + 'DOCUMENT', + 'BARE_DOCUMENT', +) + + +def parse_file(descriptor_file, descriptor_type = None, validate = False, document_handler = DocumentHandler.ENTRIES, **kwargs): + """ + Simple function to read the descriptor contents from a file, providing an + iterator for its :class:`~stem.descriptor.__init__.Descriptor` contents. + + If you don't provide a **descriptor_type** argument then this automatically + tries to determine the descriptor type based on the following... + + * The @type annotation on the first line. These are generally only found in + the `CollecTor archives `_. + + * The filename if it matches something from tor's data directory. For + instance, tor's 'cached-descriptors' contains server descriptors. + + This is a handy function for simple usage, but if you're reading multiple + descriptor files you might want to consider the + :class:`~stem.descriptor.reader.DescriptorReader`. + + Descriptor types include the following, including further minor versions (ie. + if we support 1.1 then we also support everything from 1.0 and most things + from 1.2, but not 2.0)... + + ========================================= ===== + Descriptor Type Class + ========================================= ===== + server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.RelayDescriptor` + extra-info 1.0 :class:`~stem.descriptor.extrainfo_descriptor.RelayExtraInfoDescriptor` + microdescriptor 1.0 :class:`~stem.descriptor.microdescriptor.Microdescriptor` + directory 1.0 **unsupported** + network-status-2 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV2`) + dir-key-certificate-3 1.0 :class:`~stem.descriptor.networkstatus.KeyCertificate` + network-status-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`) + network-status-vote-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`) + network-status-microdesc-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`) + bridge-network-status 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.BridgeNetworkStatusDocument`) + bridge-server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.BridgeDescriptor` + bridge-extra-info 1.1 or 1.2 :class:`~stem.descriptor.extrainfo_descriptor.BridgeExtraInfoDescriptor` + torperf 1.0 **unsupported** + bridge-pool-assignment 1.0 **unsupported** + tordnsel 1.0 :class:`~stem.descriptor.tordnsel.TorDNSEL` + hidden-service-descriptor 1.0 :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor` + ========================================= ===== + + If you're using **python 3** then beware that the open() function defaults to + using text mode. **Binary mode** is strongly suggested because it's both + faster (by my testing by about 33x) and doesn't do universal newline + translation which can make us misparse the document. + + :: + + my_descriptor_file = open(descriptor_path, 'rb') + + :param str,file,tarfile descriptor_file: path or opened file with the descriptor contents + :param str descriptor_type: `descriptor type `_, this is guessed if not provided + :param bool validate: checks the validity of the descriptor's content if + **True**, skips these checks otherwise + :param stem.descriptor.__init__.DocumentHandler document_handler: method in + which to parse the :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` + :param dict kwargs: additional arguments for the descriptor constructor + + :returns: iterator for :class:`~stem.descriptor.__init__.Descriptor` instances in the file + + :raises: + * **ValueError** if the contents is malformed and validate is True + * **TypeError** if we can't match the contents of the file to a descriptor type + * **IOError** if unable to read from the descriptor_file + """ + + # Delegate to a helper if this is a path or tarfile. + + handler = None + + if isinstance(descriptor_file, (bytes, str_type)): + if stem.util.system.is_tarfile(descriptor_file): + handler = _parse_file_for_tar_path + else: + handler = _parse_file_for_path + elif isinstance(descriptor_file, tarfile.TarFile): + handler = _parse_file_for_tarfile + + if handler: + for desc in handler(descriptor_file, descriptor_type, validate, document_handler, **kwargs): + yield desc + + return + + # The tor descriptor specifications do not provide a reliable method for + # identifying a descriptor file's type and version so we need to guess + # based on its filename. Metrics descriptors, however, can be identified + # by an annotation on their first line... + # https://trac.torproject.org/5651 + + initial_position = descriptor_file.tell() + first_line = stem.util.str_tools._to_unicode(descriptor_file.readline().strip()) + metrics_header_match = re.match('^@type (\S+) (\d+).(\d+)$', first_line) + + if not metrics_header_match: + descriptor_file.seek(initial_position) + + descriptor_path = getattr(descriptor_file, 'name', None) + filename = '' if descriptor_path is None else os.path.basename(descriptor_file.name) + file_parser = None + + if descriptor_type is not None: + descriptor_type_match = re.match('^(\S+) (\d+).(\d+)$', descriptor_type) + + if descriptor_type_match: + desc_type, major_version, minor_version = descriptor_type_match.groups() + file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs) + else: + raise ValueError("The descriptor_type must be of the form ' .'") + elif metrics_header_match: + # Metrics descriptor handling + + desc_type, major_version, minor_version = metrics_header_match.groups() + file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs) + else: + # Cached descriptor handling. These contain multiple descriptors per file. + + if filename == 'cached-descriptors' or filename == 'cached-descriptors.new': + file_parser = lambda f: stem.descriptor.server_descriptor._parse_file(f, validate = validate, **kwargs) + elif filename == 'cached-extrainfo' or filename == 'cached-extrainfo.new': + file_parser = lambda f: stem.descriptor.extrainfo_descriptor._parse_file(f, validate = validate, **kwargs) + elif filename == 'cached-microdescs' or filename == 'cached-microdescs.new': + file_parser = lambda f: stem.descriptor.microdescriptor._parse_file(f, validate = validate, **kwargs) + elif filename == 'cached-consensus': + file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, validate = validate, document_handler = document_handler, **kwargs) + elif filename == 'cached-microdesc-consensus': + file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs) + + if file_parser: + for desc in file_parser(descriptor_file): + if descriptor_path is not None: + desc._set_path(os.path.abspath(descriptor_path)) + + yield desc + + return + + # Not recognized as a descriptor file. + + raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line)) + + +def _parse_file_for_path(descriptor_file, *args, **kwargs): + with open(descriptor_file, 'rb') as desc_file: + for desc in parse_file(desc_file, *args, **kwargs): + yield desc + + +def _parse_file_for_tar_path(descriptor_file, *args, **kwargs): + # TODO: use 'with' for tarfile after dropping python 2.6 support + tar_file = tarfile.open(descriptor_file) + + try: + for desc in parse_file(tar_file, *args, **kwargs): + desc._set_path(os.path.abspath(descriptor_file)) + yield desc + finally: + if tar_file: + tar_file.close() + + +def _parse_file_for_tarfile(descriptor_file, *args, **kwargs): + for tar_entry in descriptor_file: + if tar_entry.isfile(): + entry = descriptor_file.extractfile(tar_entry) + + try: + for desc in parse_file(entry, *args, **kwargs): + desc._set_archive_path(entry.name) + yield desc + finally: + entry.close() + + +def _parse_metrics_file(descriptor_type, major_version, minor_version, descriptor_file, validate, document_handler, **kwargs): + # Parses descriptor files from metrics, yielding individual descriptors. This + # throws a TypeError if the descriptor_type or version isn't recognized. + + if descriptor_type == 'server-descriptor' and major_version == 1: + for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs): + yield desc + elif descriptor_type == 'bridge-server-descriptor' and major_version == 1: + for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs): + yield desc + elif descriptor_type == 'extra-info' and major_version == 1: + for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs): + yield desc + elif descriptor_type == 'microdescriptor' and major_version == 1: + for desc in stem.descriptor.microdescriptor._parse_file(descriptor_file, validate = validate, **kwargs): + yield desc + elif descriptor_type == 'bridge-extra-info' and major_version == 1: + # version 1.1 introduced a 'transport' field... + # https://trac.torproject.org/6257 + + for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs): + yield desc + elif descriptor_type == 'network-status-2' and major_version == 1: + document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV2 + + for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs): + yield desc + elif descriptor_type == 'dir-key-certificate-3' and major_version == 1: + for desc in stem.descriptor.networkstatus._parse_file_key_certs(descriptor_file, validate = validate, **kwargs): + yield desc + elif descriptor_type in ('network-status-consensus-3', 'network-status-vote-3') and major_version == 1: + document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3 + + for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs): + yield desc + elif descriptor_type == 'network-status-microdesc-consensus-3' and major_version == 1: + document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3 + + for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs): + yield desc + elif descriptor_type == 'bridge-network-status' and major_version == 1: + document_type = stem.descriptor.networkstatus.BridgeNetworkStatusDocument + + for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs): + yield desc + elif descriptor_type == 'tordnsel' and major_version == 1: + document_type = stem.descriptor.tordnsel.TorDNSEL + + for desc in stem.descriptor.tordnsel._parse_file(descriptor_file, validate = validate, **kwargs): + yield desc + elif descriptor_type == 'hidden-service-descriptor' and major_version == 1: + document_type = stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor + + for desc in stem.descriptor.hidden_service_descriptor._parse_file(descriptor_file, validate = validate, **kwargs): + yield desc + else: + raise TypeError("Unrecognized metrics descriptor format. type: '%s', version: '%i.%i'" % (descriptor_type, major_version, minor_version)) + + +def _value(line, entries): + return entries[line][0][0] + + +def _values(line, entries): + return [entry[0] for entry in entries[line]] + + +def _parse_simple_line(keyword, attribute): + def _parse(descriptor, entries): + setattr(descriptor, attribute, _value(keyword, entries)) + + return _parse + + +def _parse_bytes_line(keyword, attribute): + def _parse(descriptor, entries): + line_match = re.search(stem.util.str_tools._to_bytes('^(opt )?%s(?:[%s]+(.*))?$' % (keyword, WHITESPACE)), descriptor.get_bytes(), re.MULTILINE) + result = None + + if line_match: + value = line_match.groups()[1] + result = b'' if value is None else value + + setattr(descriptor, attribute, result) + + return _parse + + +def _parse_timestamp_line(keyword, attribute): + # "" YYYY-MM-DD HH:MM:SS + + def _parse(descriptor, entries): + value = _value(keyword, entries) + + try: + setattr(descriptor, attribute, stem.util.str_tools._parse_timestamp(value)) + except ValueError: + raise ValueError("Timestamp on %s line wasn't parsable: %s %s" % (keyword, keyword, value)) + + return _parse + + +def _parse_forty_character_hex(keyword, attribute): + # format of fingerprints, sha1 digests, etc + + def _parse(descriptor, entries): + value = _value(keyword, entries) + + if not stem.util.tor_tools.is_hex_digits(value, 40): + raise ValueError('%s line had an invalid value (should be 40 hex characters): %s %s' % (keyword, keyword, value)) + + setattr(descriptor, attribute, value) + + return _parse + + +def _parse_key_block(keyword, attribute, expected_block_type, value_attribute = None): + def _parse(descriptor, entries): + value, block_type, block_contents = entries[keyword][0] + + if not block_contents or block_type != expected_block_type: + raise ValueError("'%s' should be followed by a %s block, but was a %s" % (keyword, expected_block_type, block_type)) + + setattr(descriptor, attribute, block_contents) + + if value_attribute: + setattr(descriptor, value_attribute, value) + + return _parse + + +class Descriptor(object): + """ + Common parent for all types of descriptors. + """ + + ATTRIBUTES = {} # mapping of 'attribute' => (default_value, parsing_function) + PARSER_FOR_LINE = {} # line keyword to its associated parsing function + + def __init__(self, contents, lazy_load = False): + self._path = None + self._archive_path = None + self._raw_contents = contents + self._lazy_loading = lazy_load + self._entries = {} + self._unrecognized_lines = [] + + def get_path(self): + """ + Provides the absolute path that we loaded this descriptor from. + + :returns: **str** with the absolute path of the descriptor source + """ + + return self._path + + def get_archive_path(self): + """ + If this descriptor came from an archive then provides its path within the + archive. This is only set if the descriptor came from a + :class:`~stem.descriptor.reader.DescriptorReader`, and is **None** if this + descriptor didn't come from an archive. + + :returns: **str** with the descriptor's path within the archive + """ + + return self._archive_path + + def get_bytes(self): + """ + Provides the ASCII **bytes** of the descriptor. This only differs from + **str()** if you're running python 3.x, in which case **str()** provides a + **unicode** string. + + :returns: **bytes** for the descriptor's contents + """ + + return self._raw_contents + + def get_unrecognized_lines(self): + """ + Provides a list of lines that were either ignored or had data that we did + not know how to process. This is most common due to new descriptor fields + that this library does not yet know how to process. Patches welcome! + + :returns: **list** of lines of unrecognized content + """ + + if self._lazy_loading: + # we need to go ahead and parse the whole document to figure this out + self._parse(self._entries, False) + self._lazy_loading = False + + return list(self._unrecognized_lines) + + def _parse(self, entries, validate, parser_for_line = None): + """ + Parses a series of 'keyword => (value, pgp block)' mappings and applies + them as attributes. + + :param dict entries: descriptor contents to be applied + :param bool validate: checks the validity of descriptor content if True + :param dict parsers: mapping of lines to the function for parsing it + + :raises: **ValueError** if an error occurs in validation + """ + + if parser_for_line is None: + parser_for_line = self.PARSER_FOR_LINE + + # set defaults + + for attr in self.ATTRIBUTES: + if not hasattr(self, attr): + setattr(self, attr, copy.copy(self.ATTRIBUTES[attr][0])) + + for keyword, values in list(entries.items()): + try: + if keyword in parser_for_line: + parser_for_line[keyword](self, entries) + else: + for value, block_type, block_contents in values: + line = '%s %s' % (keyword, value) + + if block_contents: + line += '\n%s' % block_contents + + self._unrecognized_lines.append(line) + except ValueError as exc: + if validate: + raise exc + + def _set_path(self, path): + self._path = path + + def _set_archive_path(self, path): + self._archive_path = path + + def _name(self, is_plural = False): + return str(type(self)) + + def _digest_for_signature(self, signing_key, signature): + """ + Provides the signed digest we should have given this key and signature. + + :param str signing_key: key block used to make this signature + :param str signature: signed digest for this descriptor content + + :returns: the digest string encoded in uppercase hex + + :raises: ValueError if unable to provide a validly signed digest + """ + + if not stem.prereq.is_crypto_available(): + raise ValueError('Generating the signed digest requires pycrypto') + + from Crypto.Util import asn1 + from Crypto.Util.number import bytes_to_long, long_to_bytes + + # get the ASN.1 sequence + + seq = asn1.DerSequence() + seq.decode(_bytes_for_block(signing_key)) + modulus, public_exponent = seq[0], seq[1] + + sig_as_bytes = _bytes_for_block(signature) + sig_as_long = bytes_to_long(sig_as_bytes) # convert signature to an int + blocksize = 128 # block size will always be 128 for a 1024 bit key + + # use the public exponent[e] & the modulus[n] to decrypt the int + + decrypted_int = pow(sig_as_long, public_exponent, modulus) + + # convert the int to a byte array + + decrypted_bytes = long_to_bytes(decrypted_int, blocksize) + + ############################################################################ + # The decrypted bytes should have a structure exactly along these lines. + # 1 byte - [null '\x00'] + # 1 byte - [block type identifier '\x01'] - Should always be 1 + # N bytes - [padding '\xFF' ] + # 1 byte - [separator '\x00' ] + # M bytes - [message] + # Total - 128 bytes + # More info here http://www.ietf.org/rfc/rfc2313.txt + # esp the Notes in section 8.1 + ############################################################################ + + try: + if decrypted_bytes.index(b'\x00\x01') != 0: + raise ValueError('Verification failed, identifier missing') + except ValueError: + raise ValueError('Verification failed, malformed data') + + try: + identifier_offset = 2 + + # find the separator + seperator_index = decrypted_bytes.index(b'\x00', identifier_offset) + except ValueError: + raise ValueError('Verification failed, seperator not found') + + digest_hex = codecs.encode(decrypted_bytes[seperator_index + 1:], 'hex_codec') + return stem.util.str_tools._to_unicode(digest_hex.upper()) + + def _digest_for_content(self, start, end): + """ + Provides the digest of our descriptor's content in a given range. + + :param bytes start: start of the range to generate a digest for + :param bytes end: end of the range to generate a digest for + + :returns: the digest string encoded in uppercase hex + + :raises: ValueError if the digest canot be calculated + """ + + raw_descriptor = self.get_bytes() + + start_index = raw_descriptor.find(start) + end_index = raw_descriptor.find(end, start_index) + + if start_index == -1: + raise ValueError("Digest is for the range starting with '%s' but that isn't in our descriptor" % start) + elif end_index == -1: + raise ValueError("Digest is for the range ending with '%s' but that isn't in our descriptor" % end) + + digest_content = raw_descriptor[start_index:end_index + len(end)] + digest_hash = hashlib.sha1(stem.util.str_tools._to_bytes(digest_content)) + return stem.util.str_tools._to_unicode(digest_hash.hexdigest().upper()) + + def __getattr__(self, name): + # If attribute isn't already present we might be lazy loading it... + + if self._lazy_loading and name in self.ATTRIBUTES: + default, parsing_function = self.ATTRIBUTES[name] + + try: + parsing_function(self, self._entries) + except (ValueError, KeyError): + try: + # despite having a validation failure check to see if we set something + return super(Descriptor, self).__getattribute__(name) + except AttributeError: + setattr(self, name, copy.copy(default)) + + return super(Descriptor, self).__getattribute__(name) + + def __str__(self): + if stem.prereq.is_python_3(): + return stem.util.str_tools._to_unicode(self._raw_contents) + else: + return self._raw_contents + + +def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_first = False, skip = False, end_position = None, include_ending_keyword = False): + """ + Reads from the descriptor file until we get to one of the given keywords or reach the + end of the file. + + :param str,list keywords: keyword(s) we want to read until + :param file descriptor_file: file with the descriptor content + :param bool inclusive: includes the line with the keyword if True + :param bool ignore_first: doesn't check if the first line read has one of the + given keywords + :param bool skip: skips buffering content, returning None + :param int end_position: end if we reach this point in the file + :param bool include_ending_keyword: provides the keyword we broke on if **True** + + :returns: **list** with the lines until we find one of the keywords, this is + a two value tuple with the ending keyword if include_ending_keyword is + **True** + """ + + if skip: + content = None + content_append = lambda x: None + else: + content = [] + content_append = content.append + + ending_keyword = None + + if isinstance(keywords, (bytes, str_type)): + keywords = (keywords,) + + if ignore_first: + first_line = descriptor_file.readline() + + if first_line: + content_append(first_line) + + keyword_match = re.compile(SPECIFIC_KEYWORD_LINE % '|'.join(keywords)) + + while True: + last_position = descriptor_file.tell() + + if end_position and last_position >= end_position: + break + + line = descriptor_file.readline() + + if not line: + break # EOF + + line_match = keyword_match.match(stem.util.str_tools._to_unicode(line)) + + if line_match: + ending_keyword = line_match.groups()[0] + + if not inclusive: + descriptor_file.seek(last_position) + else: + content_append(line) + + break + else: + content_append(line) + + if include_ending_keyword: + return (content, ending_keyword) + else: + return content + + +def _bytes_for_block(content): + """ + Provides the base64 decoded content of a pgp-style block. + + :param str content: block to be decoded + + :returns: decoded block content + + :raises: **TypeError** if this isn't base64 encoded content + """ + + # strip the '-----BEGIN RSA PUBLIC KEY-----' header and footer + + content = ''.join(content.split('\n')[1:-1]) + + return base64.b64decode(stem.util.str_tools._to_bytes(content)) + + +def _get_pseudo_pgp_block(remaining_contents): + """ + Checks if given contents begins with a pseudo-Open-PGP-style block and, if + so, pops it off and provides it back to the caller. + + :param list remaining_contents: lines to be checked for a public key block + + :returns: **tuple** of the (block_type, content) or None if it doesn't exist + + :raises: **ValueError** if the contents starts with a key block but it's + malformed (for instance, if it lacks an ending line) + """ + + if not remaining_contents: + return None # nothing left + + block_match = PGP_BLOCK_START.match(remaining_contents[0]) + + if block_match: + block_type = block_match.groups()[0] + block_lines = [] + end_line = PGP_BLOCK_END % block_type + + while True: + if not remaining_contents: + raise ValueError("Unterminated pgp style block (looking for '%s'):\n%s" % (end_line, '\n'.join(block_lines))) + + line = remaining_contents.pop(0) + block_lines.append(line) + + if line == end_line: + return (block_type, '\n'.join(block_lines)) + else: + return None + + +def _get_descriptor_components(raw_contents, validate, extra_keywords = ()): + """ + Initial breakup of the server descriptor contents to make parsing easier. + + A descriptor contains a series of 'keyword lines' which are simply a keyword + followed by an optional value. Lines can also be followed by a signature + block. + + To get a sub-listing with just certain keywords use extra_keywords. This can + be useful if we care about their relative ordering with respect to each + other. For instance, we care about the ordering of 'accept' and 'reject' + entries because this influences the resulting exit policy, but for everything + else in server descriptors the order does not matter. + + :param str raw_contents: descriptor content provided by the relay + :param bool validate: checks the validity of the descriptor's content if + True, skips these checks otherwise + :param list extra_keywords: entity keywords to put into a separate listing + with ordering intact + + :returns: + **collections.OrderedDict** with the 'keyword => (value, pgp key) entries' + mappings. If a extra_keywords was provided then this instead provides a two + value tuple, the second being a list of those entries. + """ + + if isinstance(raw_contents, bytes): + raw_contents = stem.util.str_tools._to_unicode(raw_contents) + + entries = OrderedDict() + extra_entries = [] # entries with a keyword in extra_keywords + remaining_lines = raw_contents.split('\n') + + while remaining_lines: + line = remaining_lines.pop(0) + + # V2 network status documents explicitly can contain blank lines... + # + # "Implementations MAY insert blank lines for clarity between sections; + # these blank lines are ignored." + # + # ... and server descriptors end with an extra newline. But other documents + # don't say how blank lines should be handled so globally ignoring them. + + if not line: + continue + + # Some lines have an 'opt ' for backward compatibility. They should be + # ignored. This prefix is being removed in... + # https://trac.torproject.org/projects/tor/ticket/5124 + + if line.startswith('opt '): + line = line[4:] + + line_match = KEYWORD_LINE.match(line) + + if not line_match: + if not validate: + continue + + raise ValueError('Line contains invalid characters: %s' % line) + + keyword, value = line_match.groups() + + if value is None: + value = '' + + try: + block_attr = _get_pseudo_pgp_block(remaining_lines) + + if block_attr: + block_type, block_contents = block_attr + else: + block_type, block_contents = None, None + except ValueError as exc: + if not validate: + continue + + raise exc + + if keyword in extra_keywords: + extra_entries.append('%s %s' % (keyword, value)) + else: + entries.setdefault(keyword, []).append((value, block_type, block_contents)) + + if extra_keywords: + return entries, extra_entries + else: + return entries + +# importing at the end to avoid circular dependencies on our Descriptor class + +import stem.descriptor.server_descriptor +import stem.descriptor.extrainfo_descriptor +import stem.descriptor.networkstatus +import stem.descriptor.microdescriptor +import stem.descriptor.tordnsel +import stem.descriptor.hidden_service_descriptor diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/export.py b/Shared/lib/python3.4/site-packages/stem/descriptor/export.py new file mode 100644 index 0000000..f90a607 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/export.py @@ -0,0 +1,110 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Toolkit for exporting descriptors to other formats. + +**Module Overview:** + +:: + + export_csv - Exports descriptors to a CSV + export_csv_file - Writes exported CSV output to a file +""" + +import csv + +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO + +import stem.descriptor +import stem.prereq + + +class _ExportDialect(csv.excel): + lineterminator = '\n' + + +def export_csv(descriptors, included_fields = (), excluded_fields = (), header = True): + """ + Provides a newline separated CSV for one or more descriptors. If simply + provided with descriptors then the CSV contains all of its attributes, + labeled with a header row. Either 'included_fields' or 'excluded_fields' can + be used for more granular control over its attributes and the order. + + :param Descriptor,list descriptors: either a + :class:`~stem.descriptor.Descriptor` or list of descriptors to be exported + :param list included_fields: attributes to include in the csv + :param list excluded_fields: attributes to exclude from the csv + :param bool header: if **True** then the first line will be a comma separated + list of the attribute names (**only supported in python 2.7 and higher**) + + :returns: **str** of the CSV for the descriptors, one per line + :raises: **ValueError** if descriptors contain more than one descriptor type + """ + + output_buffer = StringIO() + export_csv_file(output_buffer, descriptors, included_fields, excluded_fields, header) + return output_buffer.getvalue() + + +def export_csv_file(output_file, descriptors, included_fields = (), excluded_fields = (), header = True): + """ + Similar to :func:`stem.descriptor.export.export_csv`, except that the CSV is + written directly to a file. + + :param file output_file: file to be written to + :param Descriptor,list descriptors: either a + :class:`~stem.descriptor.Descriptor` or list of descriptors to be exported + :param list included_fields: attributes to include in the csv + :param list excluded_fields: attributes to exclude from the csv + :param bool header: if **True** then the first line will be a comma separated + list of the attribute names (**only supported in python 2.7 and higher**) + + :returns: **str** of the CSV for the descriptors, one per line + :raises: **ValueError** if descriptors contain more than one descriptor type + """ + + if isinstance(descriptors, stem.descriptor.Descriptor): + descriptors = (descriptors,) + + if not descriptors: + return + + descriptor_type = type(descriptors[0]) + descriptor_type_label = descriptor_type.__name__ + included_fields = list(included_fields) + + # If the user didn't specify the fields to include then export everything, + # ordered alphabetically. If they did specify fields then make sure that + # they exist. + + desc_attr = sorted(vars(descriptors[0]).keys()) + + if included_fields: + for field in included_fields: + if field not in desc_attr: + raise ValueError("%s does not have a '%s' attribute, valid fields are: %s" % (descriptor_type_label, field, ', '.join(desc_attr))) + else: + included_fields = [attr for attr in desc_attr if not attr.startswith('_')] + + for field in excluded_fields: + try: + included_fields.remove(field) + except ValueError: + pass + + writer = csv.DictWriter(output_file, included_fields, dialect = _ExportDialect(), extrasaction='ignore') + + if header and stem.prereq.is_python_27(): + writer.writeheader() + + for desc in descriptors: + if not isinstance(desc, stem.descriptor.Descriptor): + raise ValueError('Unable to export a descriptor CSV since %s is not a descriptor.' % type(desc).__name__) + elif descriptor_type != type(desc): + raise ValueError('To export a descriptor CSV all of the descriptors must be of the same type. First descriptor was a %s but we later got a %s.' % (descriptor_type_label, type(desc))) + + writer.writerow(vars(desc)) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/extrainfo_descriptor.py b/Shared/lib/python3.4/site-packages/stem/descriptor/extrainfo_descriptor.py new file mode 100644 index 0000000..607bbbe --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/extrainfo_descriptor.py @@ -0,0 +1,939 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Parsing for Tor extra-info descriptors. These are published by relays whenever +their server descriptor is published and have a similar format. However, unlike +server descriptors these don't contain information that Tor clients require to +function and as such aren't fetched by default. + +Defined in section 2.2 of the `dir-spec +`_, +extra-info descriptors contain interesting but non-vital information such as +usage statistics. Tor clients cannot request these documents for bridges. + +Extra-info descriptors are available from a few sources... + +* If you have 'DownloadExtraInfo 1' in your torrc... + + * control port via 'GETINFO extra-info/digest/\*' queries + * the 'cached-extrainfo' file in tor's data directory + +* Archived descriptors provided by CollecTor + (https://collector.torproject.org/). + +* Directory authorities and mirrors via their DirPort. + +**Module Overview:** + +:: + + ExtraInfoDescriptor - Tor extra-info descriptor. + |- RelayExtraInfoDescriptor - Extra-info descriptor for a relay. + |- BridgeExtraInfoDescriptor - Extra-info descriptor for a bridge. + | + +- digest - calculates the upper-case hex digest value for our content + +.. data:: DirResponse (enum) + + Enumeration for known statuses for ExtraInfoDescriptor's dir_*_responses. + + =================== =========== + DirResponse Description + =================== =========== + **OK** network status requests that were answered + **NOT_ENOUGH_SIGS** network status wasn't signed by enough authorities + **UNAVAILABLE** requested network status was unavailable + **NOT_FOUND** requested network status was not found + **NOT_MODIFIED** network status unmodified since If-Modified-Since time + **BUSY** directory was busy + =================== =========== + +.. data:: DirStat (enum) + + Enumeration for known stats for ExtraInfoDescriptor's dir_*_direct_dl and + dir_*_tunneled_dl. + + ===================== =========== + DirStat Description + ===================== =========== + **COMPLETE** requests that completed successfully + **TIMEOUT** requests that didn't complete within a ten minute timeout + **RUNNING** requests still in process when measurement's taken + **MIN** smallest rate at which a descriptor was downloaded in B/s + **MAX** largest rate at which a descriptor was downloaded in B/s + **D1-4** and **D6-9** rate of the slowest x/10 download rates in B/s + **Q1** and **Q3** rate of the slowest and fastest quarter download rates in B/s + **MD** median download rate in B/s + ===================== =========== +""" + +import functools +import hashlib +import re + +import stem.util.connection +import stem.util.enum +import stem.util.str_tools + +from stem.descriptor import ( + PGP_BLOCK_END, + Descriptor, + _read_until_keywords, + _get_descriptor_components, + _value, + _values, + _parse_timestamp_line, + _parse_forty_character_hex, + _parse_key_block, +) + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + +# known statuses for dirreq-v2-resp and dirreq-v3-resp... +DirResponse = stem.util.enum.Enum( + ('OK', 'ok'), + ('NOT_ENOUGH_SIGS', 'not-enough-sigs'), + ('UNAVAILABLE', 'unavailable'), + ('NOT_FOUND', 'not-found'), + ('NOT_MODIFIED', 'not-modified'), + ('BUSY', 'busy'), +) + +# known stats for dirreq-v2/3-direct-dl and dirreq-v2/3-tunneled-dl... +dir_stats = ['complete', 'timeout', 'running', 'min', 'max', 'q1', 'q3', 'md'] +dir_stats += ['d%i' % i for i in range(1, 5)] +dir_stats += ['d%i' % i for i in range(6, 10)] +DirStat = stem.util.enum.Enum(*[(stat.upper(), stat) for stat in dir_stats]) + +# relay descriptors must have exactly one of the following +REQUIRED_FIELDS = ( + 'extra-info', + 'published', + 'router-signature', +) + +# optional entries that can appear at most once +SINGLE_FIELDS = ( + 'read-history', + 'write-history', + 'geoip-db-digest', + 'geoip6-db-digest', + 'bridge-stats-end', + 'bridge-ips', + 'dirreq-stats-end', + 'dirreq-v2-ips', + 'dirreq-v3-ips', + 'dirreq-v2-reqs', + 'dirreq-v3-reqs', + 'dirreq-v2-share', + 'dirreq-v3-share', + 'dirreq-v2-resp', + 'dirreq-v3-resp', + 'dirreq-v2-direct-dl', + 'dirreq-v3-direct-dl', + 'dirreq-v2-tunneled-dl', + 'dirreq-v3-tunneled-dl', + 'dirreq-read-history', + 'dirreq-write-history', + 'entry-stats-end', + 'entry-ips', + 'cell-stats-end', + 'cell-processed-cells', + 'cell-queued-cells', + 'cell-time-in-queue', + 'cell-circuits-per-decile', + 'conn-bi-direct', + 'exit-stats-end', + 'exit-kibibytes-written', + 'exit-kibibytes-read', + 'exit-streams-opened', +) + + +_timestamp_re = re.compile('^(.*) \(([0-9]+) s\)( .*)?$') +_locale_re = re.compile('^[a-zA-Z0-9\?]{2}$') + + +def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs): + """ + Iterates over the extra-info descriptors in a file. + + :param file descriptor_file: file with descriptor content + :param bool is_bridge: parses the file as being a bridge descriptor + :param bool validate: checks the validity of the descriptor's content if + **True**, skips these checks otherwise + :param dict kwargs: additional arguments for the descriptor constructor + + :returns: iterator for :class:`~stem.descriptor.extrainfo_descriptor.ExtraInfoDescriptor` + instances in the file + + :raises: + * **ValueError** if the contents is malformed and validate is **True** + * **IOError** if the file can't be read + """ + + while True: + if not is_bridge: + extrainfo_content = _read_until_keywords('router-signature', descriptor_file) + + # we've reached the 'router-signature', now include the pgp style block + + block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0] + extrainfo_content += _read_until_keywords(block_end_prefix, descriptor_file, True) + else: + extrainfo_content = _read_until_keywords('router-digest', descriptor_file, True) + + if extrainfo_content: + if extrainfo_content[0].startswith(b'@type'): + extrainfo_content = extrainfo_content[1:] + + if is_bridge: + yield BridgeExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs) + else: + yield RelayExtraInfoDescriptor(bytes.join(b'', extrainfo_content), validate, **kwargs) + else: + break # done parsing file + + +def _parse_timestamp_and_interval(keyword, content): + """ + Parses a 'YYYY-MM-DD HH:MM:SS (NSEC s) *' entry. + + :param str keyword: line's keyword + :param str content: line content to be parsed + + :returns: **tuple** of the form (timestamp (**datetime**), interval + (**int**), remaining content (**str**)) + + :raises: **ValueError** if the content is malformed + """ + + line = '%s %s' % (keyword, content) + content_match = _timestamp_re.match(content) + + if not content_match: + raise ValueError('Malformed %s line: %s' % (keyword, line)) + + timestamp_str, interval, remainder = content_match.groups() + + if remainder: + remainder = remainder[1:] # remove leading space + + if not interval.isdigit(): + raise ValueError("%s line's interval wasn't a number: %s" % (keyword, line)) + + try: + timestamp = stem.util.str_tools._parse_timestamp(timestamp_str) + return timestamp, int(interval), remainder + except ValueError: + raise ValueError("%s line's timestamp wasn't parsable: %s" % (keyword, line)) + + +def _parse_extra_info_line(descriptor, entries): + # "extra-info" Nickname Fingerprint + + value = _value('extra-info', entries) + extra_info_comp = value.split() + + if len(extra_info_comp) < 2: + raise ValueError('Extra-info line must have two values: extra-info %s' % value) + elif not stem.util.tor_tools.is_valid_nickname(extra_info_comp[0]): + raise ValueError("Extra-info line entry isn't a valid nickname: %s" % extra_info_comp[0]) + elif not stem.util.tor_tools.is_valid_fingerprint(extra_info_comp[1]): + raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % extra_info_comp[1]) + + descriptor.nickname = extra_info_comp[0] + descriptor.fingerprint = extra_info_comp[1] + + +def _parse_transport_line(descriptor, entries): + # "transport" transportname address:port [arglist] + # Everything after the transportname is scrubbed in published bridge + # descriptors, so we'll never see it in practice. + # + # These entries really only make sense for bridges, but have been seen + # on non-bridges in the wild when the relay operator configured it this + # way. + + transports = {} + + for value in _values('transport', entries): + name, address, port, args = None, None, None, None + + if ' ' not in value: + # scrubbed + name = value + else: + # not scrubbed + value_comp = value.split() + + if len(value_comp) < 1: + raise ValueError('Transport line is missing its transport name: transport %s' % value) + elif len(value_comp) < 2: + raise ValueError('Transport line is missing its address:port value: transport %s' % value) + elif ':' not in value_comp[1]: + raise ValueError("Transport line's address:port entry is missing a colon: transport %s" % value) + + name = value_comp[0] + address, port_str = value_comp[1].split(':', 1) + + if not stem.util.connection.is_valid_ipv4_address(address) or \ + stem.util.connection.is_valid_ipv6_address(address): + raise ValueError('Transport line has a malformed address: transport %s' % value) + elif not stem.util.connection.is_valid_port(port_str): + raise ValueError('Transport line has a malformed port: transport %s' % value) + + port = int(port_str) + args = value_comp[2:] if len(value_comp) >= 3 else [] + + transports[name] = (address, port, args) + + descriptor.transport = transports + + +def _parse_cell_circuits_per_decline_line(descriptor, entries): + # "cell-circuits-per-decile" num + + value = _value('cell-circuits-per-decile', entries) + + if not value.isdigit(): + raise ValueError('Non-numeric cell-circuits-per-decile value: %s' % value) + elif int(value) < 0: + raise ValueError('Negative cell-circuits-per-decile value: %s' % value) + + descriptor.cell_circuits_per_decile = int(value) + + +def _parse_dirreq_line(keyword, recognized_counts_attr, unrecognized_counts_attr, descriptor, entries): + value = _value(keyword, entries) + + recognized_counts = {} + unrecognized_counts = {} + + is_response_stats = keyword in ('dirreq-v2-resp', 'dirreq-v3-resp') + key_set = DirResponse if is_response_stats else DirStat + + key_type = 'STATUS' if is_response_stats else 'STAT' + error_msg = '%s lines should contain %s=COUNT mappings: %s %s' % (keyword, key_type, keyword, value) + + if value: + for entry in value.split(','): + if '=' not in entry: + raise ValueError(error_msg) + + status, count = entry.split('=', 1) + + if count.isdigit(): + if status in key_set: + recognized_counts[status] = int(count) + else: + unrecognized_counts[status] = int(count) + else: + raise ValueError(error_msg) + + setattr(descriptor, recognized_counts_attr, recognized_counts) + setattr(descriptor, unrecognized_counts_attr, unrecognized_counts) + + +def _parse_dirreq_share_line(keyword, attribute, descriptor, entries): + value = _value(keyword, entries) + + if not value.endswith('%'): + raise ValueError('%s lines should be a percentage: %s %s' % (keyword, keyword, value)) + elif float(value[:-1]) < 0: + raise ValueError('Negative percentage value: %s %s' % (keyword, value)) + + # bug means it might be above 100%: https://lists.torproject.org/pipermail/tor-dev/2012-June/003679.html + + setattr(descriptor, attribute, float(value[:-1]) / 100) + + +def _parse_cell_line(keyword, attribute, descriptor, entries): + # "" num,...,num + + value = _value(keyword, entries) + entries, exc = [], None + + if value: + for entry in value.split(','): + try: + # Values should be positive but as discussed in ticket #5849 + # there was a bug around this. It was fixed in tor 0.2.2.1. + + entries.append(float(entry)) + except ValueError: + exc = ValueError('Non-numeric entry in %s listing: %s %s' % (keyword, keyword, value)) + + setattr(descriptor, attribute, entries) + + if exc: + raise exc + + +def _parse_timestamp_and_interval_line(keyword, end_attribute, interval_attribute, descriptor, entries): + # "" YYYY-MM-DD HH:MM:SS (NSEC s) + + timestamp, interval, _ = _parse_timestamp_and_interval(keyword, _value(keyword, entries)) + setattr(descriptor, end_attribute, timestamp) + setattr(descriptor, interval_attribute, interval) + + +def _parse_conn_bi_direct_line(descriptor, entries): + # "conn-bi-direct" YYYY-MM-DD HH:MM:SS (NSEC s) BELOW,READ,WRITE,BOTH + + value = _value('conn-bi-direct', entries) + timestamp, interval, remainder = _parse_timestamp_and_interval('conn-bi-direct', value) + stats = remainder.split(',') + + if len(stats) != 4 or not (stats[0].isdigit() and stats[1].isdigit() and stats[2].isdigit() and stats[3].isdigit()): + raise ValueError('conn-bi-direct line should end with four numeric values: conn-bi-direct %s' % value) + + descriptor.conn_bi_direct_end = timestamp + descriptor.conn_bi_direct_interval = interval + descriptor.conn_bi_direct_below = int(stats[0]) + descriptor.conn_bi_direct_read = int(stats[1]) + descriptor.conn_bi_direct_write = int(stats[2]) + descriptor.conn_bi_direct_both = int(stats[3]) + + +def _parse_history_line(keyword, end_attribute, interval_attribute, values_attribute, descriptor, entries): + # "" YYYY-MM-DD HH:MM:SS (NSEC s) NUM,NUM,NUM,NUM,NUM... + + value = _value(keyword, entries) + timestamp, interval, remainder = _parse_timestamp_and_interval(keyword, value) + history_values = [] + + if remainder: + try: + history_values = [int(entry) for entry in remainder.split(',')] + except ValueError: + raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value)) + + setattr(descriptor, end_attribute, timestamp) + setattr(descriptor, interval_attribute, interval) + setattr(descriptor, values_attribute, history_values) + + +def _parse_port_count_line(keyword, attribute, descriptor, entries): + # "" port=N,port=N,... + + value, port_mappings = _value(keyword, entries), {} + error_msg = 'Entries in %s line should only be PORT=N entries: %s %s' % (keyword, keyword, value) + + if value: + for entry in value.split(','): + if '=' not in entry: + raise ValueError(error_msg) + + port, stat = entry.split('=', 1) + + if (port == 'other' or stem.util.connection.is_valid_port(port)) and stat.isdigit(): + if port != 'other': + port = int(port) + + port_mappings[port] = int(stat) + else: + raise ValueError(error_msg) + + setattr(descriptor, attribute, port_mappings) + + +def _parse_geoip_to_count_line(keyword, attribute, descriptor, entries): + # "" CC=N,CC=N,... + # + # The maxmind geoip (https://www.maxmind.com/app/iso3166) has numeric + # locale codes for some special values, for instance... + # A1,"Anonymous Proxy" + # A2,"Satellite Provider" + # ??,"Unknown" + + value, locale_usage = _value(keyword, entries), {} + error_msg = 'Entries in %s line should only be CC=N entries: %s %s' % (keyword, keyword, value) + + if value: + for entry in value.split(','): + if '=' not in entry: + raise ValueError(error_msg) + + locale, count = entry.split('=', 1) + + if _locale_re.match(locale) and count.isdigit(): + locale_usage[locale] = int(count) + else: + raise ValueError(error_msg) + + setattr(descriptor, attribute, locale_usage) + + +def _parse_bridge_ip_versions_line(descriptor, entries): + value, ip_versions = _value('bridge-ip-versions', entries), {} + + if value: + for entry in value.split(','): + if '=' not in entry: + raise stem.ProtocolError("The bridge-ip-versions should be a comma separated listing of '=' mappings: bridge-ip-versions %s" % value) + + protocol, count = entry.split('=', 1) + + if not count.isdigit(): + raise stem.ProtocolError('IP protocol count was non-numeric (%s): bridge-ip-versions %s' % (count, value)) + + ip_versions[protocol] = int(count) + + descriptor.ip_versions = ip_versions + + +def _parse_bridge_ip_transports_line(descriptor, entries): + value, ip_transports = _value('bridge-ip-transports', entries), {} + + if value: + for entry in value.split(','): + if '=' not in entry: + raise stem.ProtocolError("The bridge-ip-transports should be a comma separated listing of '=' mappings: bridge-ip-transports %s" % value) + + protocol, count = entry.split('=', 1) + + if not count.isdigit(): + raise stem.ProtocolError('Transport count was non-numeric (%s): bridge-ip-transports %s' % (count, value)) + + ip_transports[protocol] = int(count) + + descriptor.ip_transports = ip_transports + + +def _parse_hs_stats(keyword, stat_attribute, extra_attribute, descriptor, entries): + # "" num key=val key=val... + + value, stat, extra = _value(keyword, entries), None, {} + + if value is not None: + value_comp = value.split() + + if not value_comp: + raise ValueError("'%s' line was blank" % keyword) + + try: + stat = int(value_comp[0]) + except ValueError: + raise ValueError("'%s' stat was non-numeric (%s): %s %s" % (keyword, value_comp[0], keyword, value)) + + for entry in value_comp[1:]: + if '=' not in entry: + raise ValueError('Entries after the stat in %s lines should only be key=val entries: %s %s' % (keyword, keyword, value)) + + key, val = entry.split('=', 1) + extra[key] = val + + setattr(descriptor, stat_attribute, stat) + setattr(descriptor, extra_attribute, extra) + + +_parse_geoip_db_digest_line = _parse_forty_character_hex('geoip-db-digest', 'geoip_db_digest') +_parse_geoip6_db_digest_line = _parse_forty_character_hex('geoip6-db-digest', 'geoip6_db_digest') +_parse_dirreq_v2_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-resp', 'dir_v2_responses', 'dir_v2_responses_unknown') +_parse_dirreq_v3_resp_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-resp', 'dir_v3_responses', 'dir_v3_responses_unknown') +_parse_dirreq_v2_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-direct-dl', 'dir_v2_direct_dl', 'dir_v2_direct_dl_unknown') +_parse_dirreq_v3_direct_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-direct-dl', 'dir_v3_direct_dl', 'dir_v3_direct_dl_unknown') +_parse_dirreq_v2_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v2-tunneled-dl', 'dir_v2_tunneled_dl', 'dir_v2_tunneled_dl_unknown') +_parse_dirreq_v3_tunneled_dl_line = functools.partial(_parse_dirreq_line, 'dirreq-v3-tunneled-dl', 'dir_v3_tunneled_dl', 'dir_v3_tunneled_dl_unknown') +_parse_dirreq_v2_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v2-share', 'dir_v2_share') +_parse_dirreq_v3_share_line = functools.partial(_parse_dirreq_share_line, 'dirreq-v3-share', 'dir_v3_share') +_parse_cell_processed_cells_line = functools.partial(_parse_cell_line, 'cell-processed-cells', 'cell_processed_cells') +_parse_cell_queued_cells_line = functools.partial(_parse_cell_line, 'cell-queued-cells', 'cell_queued_cells') +_parse_cell_time_in_queue_line = functools.partial(_parse_cell_line, 'cell-time-in-queue', 'cell_time_in_queue') +_parse_published_line = _parse_timestamp_line('published', 'published') +_parse_geoip_start_time_line = _parse_timestamp_line('geoip-start-time', 'geoip_start_time') +_parse_cell_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'cell-stats-end', 'cell_stats_end', 'cell_stats_interval') +_parse_entry_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'entry-stats-end', 'entry_stats_end', 'entry_stats_interval') +_parse_exit_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'exit-stats-end', 'exit_stats_end', 'exit_stats_interval') +_parse_bridge_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'bridge-stats-end', 'bridge_stats_end', 'bridge_stats_interval') +_parse_dirreq_stats_end_line = functools.partial(_parse_timestamp_and_interval_line, 'dirreq-stats-end', 'dir_stats_end', 'dir_stats_interval') +_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values') +_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values') +_parse_dirreq_read_history_line = functools.partial(_parse_history_line, 'dirreq-read-history', 'dir_read_history_end', 'dir_read_history_interval', 'dir_read_history_values') +_parse_dirreq_write_history_line = functools.partial(_parse_history_line, 'dirreq-write-history', 'dir_write_history_end', 'dir_write_history_interval', 'dir_write_history_values') +_parse_exit_kibibytes_written_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-written', 'exit_kibibytes_written') +_parse_exit_kibibytes_read_line = functools.partial(_parse_port_count_line, 'exit-kibibytes-read', 'exit_kibibytes_read') +_parse_exit_streams_opened_line = functools.partial(_parse_port_count_line, 'exit-streams-opened', 'exit_streams_opened') +_parse_hidden_service_stats_end_line = _parse_timestamp_line('hidserv-stats-end', 'hs_stats_end') +_parse_hidden_service_rend_relayed_cells_line = functools.partial(_parse_hs_stats, 'hidserv-rend-relayed-cells', 'hs_rend_cells', 'hs_rend_cells_attr') +_parse_hidden_service_dir_onions_seen_line = functools.partial(_parse_hs_stats, 'hidserv-dir-onions-seen', 'hs_dir_onions_seen', 'hs_dir_onions_seen_attr') +_parse_dirreq_v2_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-ips', 'dir_v2_ips') +_parse_dirreq_v3_ips_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-ips', 'dir_v3_ips') +_parse_dirreq_v2_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v2-reqs', 'dir_v2_requests') +_parse_dirreq_v3_reqs_line = functools.partial(_parse_geoip_to_count_line, 'dirreq-v3-reqs', 'dir_v3_requests') +_parse_geoip_client_origins_line = functools.partial(_parse_geoip_to_count_line, 'geoip-client-origins', 'geoip_client_origins') +_parse_entry_ips_line = functools.partial(_parse_geoip_to_count_line, 'entry-ips', 'entry_ips') +_parse_bridge_ips_line = functools.partial(_parse_geoip_to_count_line, 'bridge-ips', 'bridge_ips') +_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest') +_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE') + + +class ExtraInfoDescriptor(Descriptor): + """ + Extra-info descriptor document. + + :var str nickname: **\*** relay's nickname + :var str fingerprint: **\*** identity key fingerprint + :var datetime published: **\*** time in UTC when this descriptor was made + :var str geoip_db_digest: sha1 of the geoIP database file for IPv4 addresses + :var str geoip6_db_digest: sha1 of the geoIP database file for IPv6 addresses + :var dict transport: **\*** mapping of transport methods to their (address, + port, args) tuple, these usually appear on bridges in which case all of + those are **None** + + **Bi-directional connection usage:** + + :var datetime conn_bi_direct_end: end of the sampling interval + :var int conn_bi_direct_interval: seconds per interval + :var int conn_bi_direct_below: connections that read/wrote less than 20 KiB + :var int conn_bi_direct_read: connections that read at least 10x more than wrote + :var int conn_bi_direct_write: connections that wrote at least 10x more than read + :var int conn_bi_direct_both: remaining connections + + **Bytes read/written for relayed traffic:** + + :var datetime read_history_end: end of the sampling interval + :var int read_history_interval: seconds per interval + :var list read_history_values: bytes read during each interval + + :var datetime write_history_end: end of the sampling interval + :var int write_history_interval: seconds per interval + :var list write_history_values: bytes written during each interval + + **Cell relaying statistics:** + + :var datetime cell_stats_end: end of the period when stats were gathered + :var int cell_stats_interval: length in seconds of the interval + :var list cell_processed_cells: measurement of processed cells per circuit + :var list cell_queued_cells: measurement of queued cells per circuit + :var list cell_time_in_queue: mean enqueued time in milliseconds for cells + :var int cell_circuits_per_decile: mean number of circuits in a decile + + **Directory Mirror Attributes:** + + :var datetime dir_stats_end: end of the period when stats were gathered + :var int dir_stats_interval: length in seconds of the interval + :var dict dir_v2_ips: mapping of locales to rounded count of requester ips + :var dict dir_v3_ips: mapping of locales to rounded count of requester ips + :var float dir_v2_share: percent of total directory traffic it expects to serve + :var float dir_v3_share: percent of total directory traffic it expects to serve + :var dict dir_v2_requests: mapping of locales to rounded count of requests + :var dict dir_v3_requests: mapping of locales to rounded count of requests + + :var dict dir_v2_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count + :var dict dir_v3_responses: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirResponse` to their rounded count + :var dict dir_v2_responses_unknown: mapping of unrecognized statuses to their count + :var dict dir_v3_responses_unknown: mapping of unrecognized statuses to their count + + :var dict dir_v2_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort + :var dict dir_v3_direct_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over DirPort + :var dict dir_v2_direct_dl_unknown: mapping of unrecognized stats to their measurement + :var dict dir_v3_direct_dl_unknown: mapping of unrecognized stats to their measurement + + :var dict dir_v2_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort + :var dict dir_v3_tunneled_dl: mapping of :data:`~stem.descriptor.extrainfo_descriptor.DirStat` to measurement over ORPort + :var dict dir_v2_tunneled_dl_unknown: mapping of unrecognized stats to their measurement + :var dict dir_v3_tunneled_dl_unknown: mapping of unrecognized stats to their measurement + + **Bytes read/written for directory mirroring:** + + :var datetime dir_read_history_end: end of the sampling interval + :var int dir_read_history_interval: seconds per interval + :var list dir_read_history_values: bytes read during each interval + + :var datetime dir_write_history_end: end of the sampling interval + :var int dir_write_history_interval: seconds per interval + :var list dir_write_history_values: bytes read during each interval + + **Guard Attributes:** + + :var datetime entry_stats_end: end of the period when stats were gathered + :var int entry_stats_interval: length in seconds of the interval + :var dict entry_ips: mapping of locales to rounded count of unique user ips + + **Exit Attributes:** + + :var datetime exit_stats_end: end of the period when stats were gathered + :var int exit_stats_interval: length in seconds of the interval + :var dict exit_kibibytes_written: traffic per port (keys are ints or 'other') + :var dict exit_kibibytes_read: traffic per port (keys are ints or 'other') + :var dict exit_streams_opened: streams per port (keys are ints or 'other') + + **Hidden Service Attributes:** + + :var datetime hs_stats_end: end of the sampling interval + :var int hs_rend_cells: rounded count of the RENDEZVOUS1 cells seen + :var int hs_rend_cells_attr: **\*** attributes provided for the hs_rend_cells + :var int hs_dir_onions_seen: rounded count of the identities seen + :var int hs_dir_onions_seen_attr: **\*** attributes provided for the hs_dir_onions_seen + + **Bridge Attributes:** + + :var datetime bridge_stats_end: end of the period when stats were gathered + :var int bridge_stats_interval: length in seconds of the interval + :var dict bridge_ips: mapping of locales to rounded count of unique user ips + :var datetime geoip_start_time: replaced by bridge_stats_end (deprecated) + :var dict geoip_client_origins: replaced by bridge_ips (deprecated) + :var dict ip_versions: mapping of ip protocols to a rounded count for the number of users + :var dict ip_versions: mapping of ip transports to a count for the number of users + + **\*** attribute is either required when we're parsed with validation or has + a default value, others are left as **None** if undefined + + .. versionchanged:: 1.4.0 + Added the hs_stats_end, hs_rend_cells, hs_rend_cells_attr, + hs_dir_onions_seen, and hs_dir_onions_seen_attr attributes. + """ + + ATTRIBUTES = { + 'nickname': (None, _parse_extra_info_line), + 'fingerprint': (None, _parse_extra_info_line), + 'published': (None, _parse_published_line), + 'geoip_db_digest': (None, _parse_geoip_db_digest_line), + 'geoip6_db_digest': (None, _parse_geoip6_db_digest_line), + 'transport': ({}, _parse_transport_line), + + 'conn_bi_direct_end': (None, _parse_conn_bi_direct_line), + 'conn_bi_direct_interval': (None, _parse_conn_bi_direct_line), + 'conn_bi_direct_below': (None, _parse_conn_bi_direct_line), + 'conn_bi_direct_read': (None, _parse_conn_bi_direct_line), + 'conn_bi_direct_write': (None, _parse_conn_bi_direct_line), + 'conn_bi_direct_both': (None, _parse_conn_bi_direct_line), + + 'read_history_end': (None, _parse_read_history_line), + 'read_history_interval': (None, _parse_read_history_line), + 'read_history_values': (None, _parse_read_history_line), + + 'write_history_end': (None, _parse_write_history_line), + 'write_history_interval': (None, _parse_write_history_line), + 'write_history_values': (None, _parse_write_history_line), + + 'cell_stats_end': (None, _parse_cell_stats_end_line), + 'cell_stats_interval': (None, _parse_cell_stats_end_line), + 'cell_processed_cells': (None, _parse_cell_processed_cells_line), + 'cell_queued_cells': (None, _parse_cell_queued_cells_line), + 'cell_time_in_queue': (None, _parse_cell_time_in_queue_line), + 'cell_circuits_per_decile': (None, _parse_cell_circuits_per_decline_line), + + 'dir_stats_end': (None, _parse_dirreq_stats_end_line), + 'dir_stats_interval': (None, _parse_dirreq_stats_end_line), + 'dir_v2_ips': (None, _parse_dirreq_v2_ips_line), + 'dir_v3_ips': (None, _parse_dirreq_v3_ips_line), + 'dir_v2_share': (None, _parse_dirreq_v2_share_line), + 'dir_v3_share': (None, _parse_dirreq_v3_share_line), + 'dir_v2_requests': (None, _parse_dirreq_v2_reqs_line), + 'dir_v3_requests': (None, _parse_dirreq_v3_reqs_line), + 'dir_v2_responses': (None, _parse_dirreq_v2_resp_line), + 'dir_v3_responses': (None, _parse_dirreq_v3_resp_line), + 'dir_v2_responses_unknown': (None, _parse_dirreq_v2_resp_line), + 'dir_v3_responses_unknown': (None, _parse_dirreq_v3_resp_line), + 'dir_v2_direct_dl': (None, _parse_dirreq_v2_direct_dl_line), + 'dir_v3_direct_dl': (None, _parse_dirreq_v3_direct_dl_line), + 'dir_v2_direct_dl_unknown': (None, _parse_dirreq_v2_direct_dl_line), + 'dir_v3_direct_dl_unknown': (None, _parse_dirreq_v3_direct_dl_line), + 'dir_v2_tunneled_dl': (None, _parse_dirreq_v2_tunneled_dl_line), + 'dir_v3_tunneled_dl': (None, _parse_dirreq_v3_tunneled_dl_line), + 'dir_v2_tunneled_dl_unknown': (None, _parse_dirreq_v2_tunneled_dl_line), + 'dir_v3_tunneled_dl_unknown': (None, _parse_dirreq_v3_tunneled_dl_line), + + 'dir_read_history_end': (None, _parse_dirreq_read_history_line), + 'dir_read_history_interval': (None, _parse_dirreq_read_history_line), + 'dir_read_history_values': (None, _parse_dirreq_read_history_line), + + 'dir_write_history_end': (None, _parse_dirreq_write_history_line), + 'dir_write_history_interval': (None, _parse_dirreq_write_history_line), + 'dir_write_history_values': (None, _parse_dirreq_write_history_line), + + 'entry_stats_end': (None, _parse_entry_stats_end_line), + 'entry_stats_interval': (None, _parse_entry_stats_end_line), + 'entry_ips': (None, _parse_entry_ips_line), + + 'exit_stats_end': (None, _parse_exit_stats_end_line), + 'exit_stats_interval': (None, _parse_exit_stats_end_line), + 'exit_kibibytes_written': (None, _parse_exit_kibibytes_written_line), + 'exit_kibibytes_read': (None, _parse_exit_kibibytes_read_line), + 'exit_streams_opened': (None, _parse_exit_streams_opened_line), + + 'hs_stats_end': (None, _parse_hidden_service_stats_end_line), + 'hs_rend_cells': (None, _parse_hidden_service_rend_relayed_cells_line), + 'hs_rend_cells_attr': ({}, _parse_hidden_service_rend_relayed_cells_line), + 'hs_dir_onions_seen': (None, _parse_hidden_service_dir_onions_seen_line), + 'hs_dir_onions_seen_attr': ({}, _parse_hidden_service_dir_onions_seen_line), + + 'bridge_stats_end': (None, _parse_bridge_stats_end_line), + 'bridge_stats_interval': (None, _parse_bridge_stats_end_line), + 'bridge_ips': (None, _parse_bridge_ips_line), + 'geoip_start_time': (None, _parse_geoip_start_time_line), + 'geoip_client_origins': (None, _parse_geoip_client_origins_line), + + 'ip_versions': (None, _parse_bridge_ip_versions_line), + 'ip_transports': (None, _parse_bridge_ip_transports_line), + } + + PARSER_FOR_LINE = { + 'extra-info': _parse_extra_info_line, + 'geoip-db-digest': _parse_geoip_db_digest_line, + 'geoip6-db-digest': _parse_geoip6_db_digest_line, + 'transport': _parse_transport_line, + 'cell-circuits-per-decile': _parse_cell_circuits_per_decline_line, + 'dirreq-v2-resp': _parse_dirreq_v2_resp_line, + 'dirreq-v3-resp': _parse_dirreq_v3_resp_line, + 'dirreq-v2-direct-dl': _parse_dirreq_v2_direct_dl_line, + 'dirreq-v3-direct-dl': _parse_dirreq_v3_direct_dl_line, + 'dirreq-v2-tunneled-dl': _parse_dirreq_v2_tunneled_dl_line, + 'dirreq-v3-tunneled-dl': _parse_dirreq_v3_tunneled_dl_line, + 'dirreq-v2-share': _parse_dirreq_v2_share_line, + 'dirreq-v3-share': _parse_dirreq_v3_share_line, + 'cell-processed-cells': _parse_cell_processed_cells_line, + 'cell-queued-cells': _parse_cell_queued_cells_line, + 'cell-time-in-queue': _parse_cell_time_in_queue_line, + 'published': _parse_published_line, + 'geoip-start-time': _parse_geoip_start_time_line, + 'cell-stats-end': _parse_cell_stats_end_line, + 'entry-stats-end': _parse_entry_stats_end_line, + 'exit-stats-end': _parse_exit_stats_end_line, + 'bridge-stats-end': _parse_bridge_stats_end_line, + 'dirreq-stats-end': _parse_dirreq_stats_end_line, + 'conn-bi-direct': _parse_conn_bi_direct_line, + 'read-history': _parse_read_history_line, + 'write-history': _parse_write_history_line, + 'dirreq-read-history': _parse_dirreq_read_history_line, + 'dirreq-write-history': _parse_dirreq_write_history_line, + 'exit-kibibytes-written': _parse_exit_kibibytes_written_line, + 'exit-kibibytes-read': _parse_exit_kibibytes_read_line, + 'exit-streams-opened': _parse_exit_streams_opened_line, + 'hidserv-stats-end': _parse_hidden_service_stats_end_line, + 'hidserv-rend-relayed-cells': _parse_hidden_service_rend_relayed_cells_line, + 'hidserv-dir-onions-seen': _parse_hidden_service_dir_onions_seen_line, + 'dirreq-v2-ips': _parse_dirreq_v2_ips_line, + 'dirreq-v3-ips': _parse_dirreq_v3_ips_line, + 'dirreq-v2-reqs': _parse_dirreq_v2_reqs_line, + 'dirreq-v3-reqs': _parse_dirreq_v3_reqs_line, + 'geoip-client-origins': _parse_geoip_client_origins_line, + 'entry-ips': _parse_entry_ips_line, + 'bridge-ips': _parse_bridge_ips_line, + 'bridge-ip-versions': _parse_bridge_ip_versions_line, + 'bridge-ip-transports': _parse_bridge_ip_transports_line, + } + + def __init__(self, raw_contents, validate = False): + """ + Extra-info descriptor constructor. By default this validates the + descriptor's content as it's parsed. This validation can be disabled to + either improve performance or be accepting of malformed data. + + :param str raw_contents: extra-info content provided by the relay + :param bool validate: checks the validity of the extra-info descriptor if + **True**, skips these checks otherwise + + :raises: **ValueError** if the contents is malformed and validate is True + """ + + super(ExtraInfoDescriptor, self).__init__(raw_contents, lazy_load = not validate) + entries = _get_descriptor_components(raw_contents, validate) + + if validate: + for keyword in self._required_fields(): + if keyword not in entries: + raise ValueError("Extra-info descriptor must have a '%s' entry" % keyword) + + for keyword in self._required_fields() + SINGLE_FIELDS: + if keyword in entries and len(entries[keyword]) > 1: + raise ValueError("The '%s' entry can only appear once in an extra-info descriptor" % keyword) + + expected_first_keyword = self._first_keyword() + if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]: + raise ValueError("Extra-info descriptor must start with a '%s' entry" % expected_first_keyword) + + expected_last_keyword = self._last_keyword() + if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]: + raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword) + + self._parse(entries, validate) + else: + self._entries = entries + + def digest(self): + """ + Provides the upper-case hex encoded sha1 of our content. This value is part + of the server descriptor entry for this relay. + + :returns: **str** with the upper-case hex digest value for this server + descriptor + """ + + raise NotImplementedError('Unsupported Operation: this should be implemented by the ExtraInfoDescriptor subclass') + + def _required_fields(self): + return REQUIRED_FIELDS + + def _first_keyword(self): + return 'extra-info' + + def _last_keyword(self): + return 'router-signature' + + +class RelayExtraInfoDescriptor(ExtraInfoDescriptor): + """ + Relay extra-info descriptor, constructed from data such as that provided by + 'GETINFO extra-info/digest/\*', cached descriptors, and metrics + (`specification `_). + + :var str signature: **\*** signature for this extrainfo descriptor + + **\*** attribute is required when we're parsed with validation + """ + + ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{ + 'signature': (None, _parse_router_signature_line), + }) + + PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{ + 'router-signature': _parse_router_signature_line, + }) + + @lru_cache() + def digest(self): + # our digest is calculated from everything except our signature + raw_content, ending = str(self), '\nrouter-signature\n' + raw_content = raw_content[:raw_content.find(ending) + len(ending)] + return hashlib.sha1(stem.util.str_tools._to_bytes(raw_content)).hexdigest().upper() + + +class BridgeExtraInfoDescriptor(ExtraInfoDescriptor): + """ + Bridge extra-info descriptor (`bridge descriptor specification + `_) + """ + + ATTRIBUTES = dict(ExtraInfoDescriptor.ATTRIBUTES, **{ + '_digest': (None, _parse_router_digest_line), + }) + + PARSER_FOR_LINE = dict(ExtraInfoDescriptor.PARSER_FOR_LINE, **{ + 'router-digest': _parse_router_digest_line, + }) + + def digest(self): + return self._digest + + def _required_fields(self): + excluded_fields = [ + 'router-signature', + ] + + included_fields = [ + 'router-digest', + ] + + return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields]) + + def _last_keyword(self): + return None diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/hidden_service_descriptor.py b/Shared/lib/python3.4/site-packages/stem/descriptor/hidden_service_descriptor.py new file mode 100644 index 0000000..31a99cc --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/hidden_service_descriptor.py @@ -0,0 +1,422 @@ +# Copyright 2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Parsing for Tor hidden service descriptors as described in Tor's `rend-spec +`_. + +Unlike other descriptor types these describe a hidden service rather than a +relay. They're created by the service, and can only be fetched via relays with +the HSDir flag. + +**Module Overview:** + +:: + + HiddenServiceDescriptor - Tor hidden service descriptor. + +.. versionadded:: 1.4.0 +""" + +# TODO: Add a description for how to retrieve them when tor supports that +# (#14847) and then update #15009. + +import base64 +import binascii +import collections +import hashlib +import io + +import stem.util.connection +import stem.util.str_tools + +from stem.descriptor import ( + PGP_BLOCK_END, + Descriptor, + _get_descriptor_components, + _read_until_keywords, + _bytes_for_block, + _value, + _parse_simple_line, + _parse_timestamp_line, + _parse_key_block, +) + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + +REQUIRED_FIELDS = ( + 'rendezvous-service-descriptor', + 'version', + 'permanent-key', + 'secret-id-part', + 'publication-time', + 'protocol-versions', + 'signature', +) + +INTRODUCTION_POINTS_ATTR = { + 'identifier': None, + 'address': None, + 'port': None, + 'onion_key': None, + 'service_key': None, + 'intro_authentication': [], +} + +# introduction-point fields that can only appear once + +SINGLE_INTRODUCTION_POINT_FIELDS = [ + 'introduction-point', + 'ip-address', + 'onion-port', + 'onion-key', + 'service-key', +] + +BASIC_AUTH = 1 +STEALTH_AUTH = 2 + +IntroductionPoint = collections.namedtuple('IntroductionPoints', INTRODUCTION_POINTS_ATTR.keys()) + + +class DecryptionFailure(Exception): + """ + Failure to decrypt the hidden service descriptor's introduction-points. + """ + + +def _parse_file(descriptor_file, validate = False, **kwargs): + """ + Iterates over the hidden service descriptors in a file. + + :param file descriptor_file: file with descriptor content + :param bool validate: checks the validity of the descriptor's content if + **True**, skips these checks otherwise + :param dict kwargs: additional arguments for the descriptor constructor + + :returns: iterator for :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor` + instances in the file + + :raises: + * **ValueError** if the contents is malformed and validate is **True** + * **IOError** if the file can't be read + """ + + while True: + descriptor_content = _read_until_keywords('signature', descriptor_file) + + # we've reached the 'signature', now include the pgp style block + block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0] + descriptor_content += _read_until_keywords(block_end_prefix, descriptor_file, True) + + if descriptor_content: + if descriptor_content[0].startswith(b'@type'): + descriptor_content = descriptor_content[1:] + + yield HiddenServiceDescriptor(bytes.join(b'', descriptor_content), validate, **kwargs) + else: + break # done parsing file + + +def _parse_version_line(descriptor, entries): + value = _value('version', entries) + + if value.isdigit(): + descriptor.version = int(value) + else: + raise ValueError('version line must have a positive integer value: %s' % value) + + +def _parse_protocol_versions_line(descriptor, entries): + value = _value('protocol-versions', entries) + + try: + versions = [int(entry) for entry in value.split(',')] + except ValueError: + raise ValueError('protocol-versions line has non-numeric versoins: protocol-versions %s' % value) + + for v in versions: + if v <= 0: + raise ValueError('protocol-versions must be positive integers: %s' % value) + + descriptor.protocol_versions = versions + + +def _parse_introduction_points_line(descriptor, entries): + _, block_type, block_contents = entries['introduction-points'][0] + + if not block_contents or block_type != 'MESSAGE': + raise ValueError("'introduction-points' should be followed by a MESSAGE block, but was a %s" % block_type) + + descriptor.introduction_points_encoded = block_contents + + try: + decoded_field = _bytes_for_block(block_contents) + except TypeError: + raise ValueError("'introduction-points' isn't base64 encoded content:\n%s" % block_contents) + + auth_types = [] + + while decoded_field.startswith(b'service-authentication ') and b'\n' in decoded_field: + auth_line, decoded_field = decoded_field.split(b'\n', 1) + auth_line_comp = auth_line.split(b' ') + + if len(auth_line_comp) < 3: + raise ValueError("Within introduction-points we expected 'service-authentication [auth_type] [auth_data]', but had '%s'" % auth_line) + + auth_types.append((auth_line_comp[1], auth_line_comp[2])) + + descriptor.introduction_points_auth = auth_types + descriptor.introduction_points_content = decoded_field + +_parse_rendezvous_service_descriptor_line = _parse_simple_line('rendezvous-service-descriptor', 'descriptor_id') +_parse_permanent_key_line = _parse_key_block('permanent-key', 'permanent_key', 'RSA PUBLIC KEY') +_parse_secret_id_part_line = _parse_simple_line('secret-id-part', 'secret_id_part') +_parse_publication_time_line = _parse_timestamp_line('publication-time', 'published') +_parse_signature_line = _parse_key_block('signature', 'signature', 'SIGNATURE') + + +class HiddenServiceDescriptor(Descriptor): + """ + Hidden service descriptor. + + :var str descriptor_id: **\*** identifier for this descriptor, this is a base32 hash of several fields + :var int version: **\*** hidden service descriptor version + :var str permanent_key: **\*** long term key of the hidden service + :var str secret_id_part: **\*** hash of the time period, cookie, and replica + values so our descriptor_id can be validated + :var datetime published: **\*** time in UTC when this descriptor was made + :var list protocol_versions: **\*** list of **int** versions that are supported when establishing a connection + :var str introduction_points_encoded: raw introduction points blob + :var list introduction_points_auth: **\*** tuples of the form + (auth_method, auth_data) for our introduction_points_content + :var bytes introduction_points_content: decoded introduction-points content + without authentication data, if using cookie authentication this is + encrypted + :var str signature: signature of the descriptor content + + **\*** attribute is either required when we're parsed with validation or has + a default value, others are left as **None** if undefined + """ + + ATTRIBUTES = { + 'descriptor_id': (None, _parse_rendezvous_service_descriptor_line), + 'version': (None, _parse_version_line), + 'permanent_key': (None, _parse_permanent_key_line), + 'secret_id_part': (None, _parse_secret_id_part_line), + 'published': (None, _parse_publication_time_line), + 'protocol_versions': ([], _parse_protocol_versions_line), + 'introduction_points_encoded': (None, _parse_introduction_points_line), + 'introduction_points_auth': ([], _parse_introduction_points_line), + 'introduction_points_content': (None, _parse_introduction_points_line), + 'signature': (None, _parse_signature_line), + } + + PARSER_FOR_LINE = { + 'rendezvous-service-descriptor': _parse_rendezvous_service_descriptor_line, + 'version': _parse_version_line, + 'permanent-key': _parse_permanent_key_line, + 'secret-id-part': _parse_secret_id_part_line, + 'publication-time': _parse_publication_time_line, + 'protocol-versions': _parse_protocol_versions_line, + 'introduction-points': _parse_introduction_points_line, + 'signature': _parse_signature_line, + } + + def __init__(self, raw_contents, validate = False): + super(HiddenServiceDescriptor, self).__init__(raw_contents, lazy_load = not validate) + entries = _get_descriptor_components(raw_contents, validate) + + if validate: + for keyword in REQUIRED_FIELDS: + if keyword not in entries: + raise ValueError("Hidden service descriptor must have a '%s' entry" % keyword) + elif keyword in entries and len(entries[keyword]) > 1: + raise ValueError("The '%s' entry can only appear once in a hidden service descriptor" % keyword) + + if 'rendezvous-service-descriptor' != list(entries.keys())[0]: + raise ValueError("Hidden service descriptor must start with a 'rendezvous-service-descriptor' entry") + elif 'signature' != list(entries.keys())[-1]: + raise ValueError("Hidden service descriptor must end with a 'signature' entry") + + self._parse(entries, validate) + + if stem.prereq.is_crypto_available(): + signed_digest = self._digest_for_signature(self.permanent_key, self.signature) + content_digest = self._digest_for_content(b'rendezvous-service-descriptor ', b'\nsignature\n') + + if signed_digest != content_digest: + raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, content_digest)) + else: + self._entries = entries + + @lru_cache() + def introduction_points(self, authentication_cookie = None): + """ + Provided this service's introduction points. This provides a list of + IntroductionPoint instances, which have the following attributes... + + * **identifier** (str): hash of this introduction point's identity key + * **address** (str): address of this introduction point + * **port** (int): port where this introduction point is listening + * **onion_key** (str): public key for communicating with this introduction point + * **service_key** (str): public key for communicating with this hidden service + * **intro_authentication** (list): tuples of the form (auth_type, auth_data) + for establishing a connection + + :param str authentication_cookie: cookie to decrypt the introduction-points + if it's encrypted + + :returns: **list** of IntroductionPoints instances + + :raises: + * **ValueError** if the our introduction-points is malformed + * **DecryptionFailure** if unable to decrypt this field + """ + + content = self.introduction_points_content + + if not content: + return [] + elif authentication_cookie: + if not stem.prereq.is_crypto_available(): + raise DecryptionFailure('Decrypting introduction-points requires pycrypto') + + try: + missing_padding = len(authentication_cookie) % 4 + authentication_cookie = base64.b64decode(stem.util.str_tools._to_bytes(authentication_cookie) + b'=' * missing_padding) + except TypeError as exc: + raise DecryptionFailure('authentication_cookie must be a base64 encoded string (%s)' % exc) + + authentication_type = int(binascii.hexlify(content[0:1]), 16) + + if authentication_type == BASIC_AUTH: + content = HiddenServiceDescriptor._decrypt_basic_auth(content, authentication_cookie) + elif authentication_type == STEALTH_AUTH: + content = HiddenServiceDescriptor._decrypt_stealth_auth(content, authentication_cookie) + else: + raise DecryptionFailure("Unrecognized authentication type '%s', currently we only support basic auth (%s) and stealth auth (%s)" % (authentication_type, BASIC_AUTH, STEALTH_AUTH)) + + if not content.startswith(b'introduction-point '): + raise DecryptionFailure('Unable to decrypt the introduction-points, maybe this is the wrong key?') + elif not content.startswith(b'introduction-point '): + raise DecryptionFailure('introduction-points content is encrypted, you need to provide its authentication_cookie') + + return HiddenServiceDescriptor._parse_introduction_points(content) + + @staticmethod + def _decrypt_basic_auth(content, authentication_cookie): + from Crypto.Cipher import AES + from Crypto.Util import Counter + from Crypto.Util.number import bytes_to_long + + try: + client_blocks = int(binascii.hexlify(content[1:2]), 16) + except ValueError: + raise DecryptionFailure("When using basic auth the content should start with a number of blocks but wasn't a hex digit: %s" % binascii.hexlify(content[1:2])) + + # parse the client id and encrypted session keys + + client_entries_length = client_blocks * 16 * 20 + client_entries = content[2:2 + client_entries_length] + client_keys = [(client_entries[i:i + 4], client_entries[i + 4:i + 20]) for i in range(0, client_entries_length, 4 + 16)] + + iv = content[2 + client_entries_length:2 + client_entries_length + 16] + encrypted = content[2 + client_entries_length + 16:] + + client_id = hashlib.sha1(authentication_cookie + iv).digest()[:4] + + for entry_id, encrypted_session_key in client_keys: + if entry_id != client_id: + continue # not the session key for this client + + # try decrypting the session key + + counter = Counter.new(128, initial_value = 0) + cipher = AES.new(authentication_cookie, AES.MODE_CTR, counter = counter) + session_key = cipher.decrypt(encrypted_session_key) + + # attempt to decrypt the intro points with the session key + + counter = Counter.new(128, initial_value = bytes_to_long(iv)) + cipher = AES.new(session_key, AES.MODE_CTR, counter = counter) + decrypted = cipher.decrypt(encrypted) + + # check if the decryption looks correct + + if decrypted.startswith(b'introduction-point '): + return decrypted + + return content # nope, unable to decrypt the content + + @staticmethod + def _decrypt_stealth_auth(content, authentication_cookie): + from Crypto.Cipher import AES + from Crypto.Util import Counter + from Crypto.Util.number import bytes_to_long + + # byte 1 = authentication type, 2-17 = input vector, 18 on = encrypted content + + iv, encrypted = content[1:17], content[17:] + counter = Counter.new(128, initial_value = bytes_to_long(iv)) + cipher = AES.new(authentication_cookie, AES.MODE_CTR, counter = counter) + + return cipher.decrypt(encrypted) + + @staticmethod + def _parse_introduction_points(content): + """ + Provides the parsed list of IntroductionPoint for the unencrypted content. + """ + + introduction_points = [] + content_io = io.BytesIO(content) + + while True: + content = b''.join(_read_until_keywords('introduction-point', content_io, ignore_first = True)) + + if not content: + break # reached the end + + attr = dict(INTRODUCTION_POINTS_ATTR) + entries = _get_descriptor_components(content, False) + + for keyword, values in list(entries.items()): + value, block_type, block_contents = values[0] + + if keyword in SINGLE_INTRODUCTION_POINT_FIELDS and len(values) > 1: + raise ValueError("'%s' can only appear once in an introduction-point block, but appeared %i times" % (keyword, len(values))) + + if keyword == 'introduction-point': + attr['identifier'] = value + elif keyword == 'ip-address': + if not stem.util.connection.is_valid_ipv4_address(value): + raise ValueError("'%s' is an invalid IPv4 address" % value) + + attr['address'] = value + elif keyword == 'onion-port': + if not stem.util.connection.is_valid_port(value): + raise ValueError("'%s' is an invalid port" % value) + + attr['port'] = int(value) + elif keyword == 'onion-key': + attr['onion_key'] = block_contents + elif keyword == 'service-key': + attr['service_key'] = block_contents + elif keyword == 'intro-authentication': + auth_entries = [] + + for auth_value, _, _ in values: + if ' ' not in auth_value: + raise ValueError("We expected 'intro-authentication [auth_type] [auth_data]', but had '%s'" % auth_value) + + auth_type, auth_data = auth_value.split(' ')[:2] + auth_entries.append((auth_type, auth_data)) + + introduction_points.append(IntroductionPoint(**attr)) + + return introduction_points diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/microdescriptor.py b/Shared/lib/python3.4/site-packages/stem/descriptor/microdescriptor.py new file mode 100644 index 0000000..ffbec43 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/microdescriptor.py @@ -0,0 +1,314 @@ +# Copyright 2013-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Parsing for Tor microdescriptors, which contain a distilled version of a +relay's server descriptor. As of Tor version 0.2.3.3-alpha Tor no longer +downloads server descriptors by default, opting for microdescriptors instead. + +Unlike most descriptor documents these aren't available on the metrics site +(since they don't contain any information that the server descriptors don't). + +The limited information in microdescriptors make them rather clunky to use +compared with server descriptors. For instance microdescriptors lack the +relay's fingerprint, making it difficut to use them to look up the relay's +other descriptors. + +To do so you need to match the microdescriptor's digest against its +corresponding router status entry. For added fun as of this writing the +controller doesn't even surface those router status entries +(:trac:`7953`). + +For instance, here's an example that prints the nickname and fignerprints of +the exit relays. + +:: + + import os + + from stem.control import Controller + from stem.descriptor import parse_file + + with Controller.from_port(port = 9051) as controller: + controller.authenticate() + + exit_digests = set() + data_dir = controller.get_conf('DataDirectory') + + for desc in controller.get_microdescriptors(): + if desc.exit_policy.is_exiting_allowed(): + exit_digests.add(desc.digest) + + print 'Exit Relays:' + + for desc in parse_file(os.path.join(data_dir, 'cached-microdesc-consensus')): + if desc.digest in exit_digests: + print ' %s (%s)' % (desc.nickname, desc.fingerprint) + +Doing the same is trivial with server descriptors... + +:: + + from stem.descriptor import parse_file + + print 'Exit Relays:' + + for desc in parse_file('/home/atagar/.tor/cached-descriptors'): + if desc.exit_policy.is_exiting_allowed(): + print ' %s (%s)' % (desc.nickname, desc.fingerprint) + +**Module Overview:** + +:: + + Microdescriptor - Tor microdescriptor. +""" + +import hashlib + +import stem.exit_policy + +from stem.descriptor import ( + Descriptor, + _get_descriptor_components, + _read_until_keywords, + _value, + _parse_simple_line, + _parse_key_block, +) + +from stem.descriptor.router_status_entry import ( + _parse_a_line, + _parse_p_line, +) + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + +REQUIRED_FIELDS = ( + 'onion-key', +) + +SINGLE_FIELDS = ( + 'onion-key', + 'ntor-onion-key', + 'family', + 'p', + 'p6', +) + + +def _parse_file(descriptor_file, validate = False, **kwargs): + """ + Iterates over the microdescriptors in a file. + + :param file descriptor_file: file with descriptor content + :param bool validate: checks the validity of the descriptor's content if + **True**, skips these checks otherwise + :param dict kwargs: additional arguments for the descriptor constructor + + :returns: iterator for Microdescriptor instances in the file + + :raises: + * **ValueError** if the contents is malformed and validate is True + * **IOError** if the file can't be read + """ + + while True: + annotations = _read_until_keywords('onion-key', descriptor_file) + + # read until we reach an annotation or onion-key line + descriptor_lines = [] + + # read the onion-key line, done if we're at the end of the document + + onion_key_line = descriptor_file.readline() + + if onion_key_line: + descriptor_lines.append(onion_key_line) + else: + break + + while True: + last_position = descriptor_file.tell() + line = descriptor_file.readline() + + if not line: + break # EOF + elif line.startswith(b'@') or line.startswith(b'onion-key'): + descriptor_file.seek(last_position) + break + else: + descriptor_lines.append(line) + + if descriptor_lines: + if descriptor_lines[0].startswith(b'@type'): + descriptor_lines = descriptor_lines[1:] + + # strip newlines from annotations + annotations = list(map(bytes.strip, annotations)) + + descriptor_text = bytes.join(b'', descriptor_lines) + + yield Microdescriptor(descriptor_text, validate, annotations, **kwargs) + else: + break # done parsing descriptors + + +def _parse_id_line(descriptor, entries): + value = _value('id', entries) + value_comp = value.split() + + if len(value_comp) >= 2: + descriptor.identifier_type = value_comp[0] + descriptor.identifier = value_comp[1] + else: + raise ValueError("'id' lines should contain both the key type and digest: id %s" % value) + + +_parse_digest = lambda descriptor, entries: setattr(descriptor, 'digest', hashlib.sha256(descriptor.get_bytes()).hexdigest().upper()) +_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY') +_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key') +_parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', _value('family', entries).split(' ')) +_parse_p6_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('p6', entries))) + + +class Microdescriptor(Descriptor): + """ + Microdescriptor (`descriptor specification + `_) + + :var str digest: **\*** hex digest for this microdescriptor, this can be used + to match against the corresponding digest attribute of a + :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` + :var str onion_key: **\*** key used to encrypt EXTEND cells + :var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol + :var list or_addresses: **\*** alternative for our address/or_port attributes, each + entry is a tuple of the form (address (**str**), port (**int**), is_ipv6 + (**bool**)) + :var list family: **\*** nicknames or fingerprints of declared family + :var stem.exit_policy.MicroExitPolicy exit_policy: **\*** relay's exit policy + :var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6 + :var str identifier_type: identity digest key type + :var str identifier: base64 encoded identity digest, this is only used for collision prevention (:trac:`11743`) + + **\*** attribute is required when we're parsed with validation + + .. versionchanged:: 1.1.0 + Added the identifier and identifier_type attributes. + """ + + ATTRIBUTES = { + 'onion_key': (None, _parse_onion_key_line), + 'ntor_onion_key': (None, _parse_ntor_onion_key_line), + 'or_addresses': ([], _parse_a_line), + 'family': ([], _parse_family_line), + 'exit_policy': (stem.exit_policy.MicroExitPolicy('reject 1-65535'), _parse_p_line), + 'exit_policy_v6': (None, _parse_p6_line), + 'identifier_type': (None, _parse_id_line), + 'identifier': (None, _parse_id_line), + 'digest': (None, _parse_digest), + } + + PARSER_FOR_LINE = { + 'onion-key': _parse_onion_key_line, + 'ntor-onion-key': _parse_ntor_onion_key_line, + 'a': _parse_a_line, + 'family': _parse_family_line, + 'p': _parse_p_line, + 'p6': _parse_p6_line, + 'id': _parse_id_line, + } + + def __init__(self, raw_contents, validate = False, annotations = None): + super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate) + self._annotation_lines = annotations if annotations else [] + entries = _get_descriptor_components(raw_contents, validate) + + if validate: + self.digest = hashlib.sha256(self.get_bytes()).hexdigest().upper() + self._parse(entries, validate) + self._check_constraints(entries) + else: + self._entries = entries + + @lru_cache() + def get_annotations(self): + """ + Provides content that appeared prior to the descriptor. If this comes from + the cached-microdescs then this commonly contains content like... + + :: + + @last-listed 2013-02-24 00:18:30 + + :returns: **dict** with the key/value pairs in our annotations + """ + + annotation_dict = {} + + for line in self._annotation_lines: + if b' ' in line: + key, value = line.split(b' ', 1) + annotation_dict[key] = value + else: + annotation_dict[line] = None + + return annotation_dict + + def get_annotation_lines(self): + """ + Provides the lines of content that appeared prior to the descriptor. This + is the same as the + :func:`~stem.descriptor.microdescriptor.Microdescriptor.get_annotations` + results, but with the unparsed lines and ordering retained. + + :returns: **list** with the lines of annotation that came before this descriptor + """ + + return self._annotation_lines + + def _check_constraints(self, entries): + """ + Does a basic check that the entries conform to this descriptor type's + constraints. + + :param dict entries: keyword => (value, pgp key) entries + + :raises: **ValueError** if an issue arises in validation + """ + + for keyword in REQUIRED_FIELDS: + if keyword not in entries: + raise ValueError("Microdescriptor must have a '%s' entry" % keyword) + + for keyword in SINGLE_FIELDS: + if keyword in entries and len(entries[keyword]) > 1: + raise ValueError("The '%s' entry can only appear once in a microdescriptor" % keyword) + + if 'onion-key' != list(entries.keys())[0]: + raise ValueError("Microdescriptor must start with a 'onion-key' entry") + + def _name(self, is_plural = False): + return 'microdescriptors' if is_plural else 'microdescriptor' + + def _compare(self, other, method): + if not isinstance(other, Microdescriptor): + return False + + return method(str(self).strip(), str(other).strip()) + + def __hash__(self): + return hash(str(self).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py b/Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py new file mode 100644 index 0000000..a162e2e --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/networkstatus.py @@ -0,0 +1,1444 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Parsing for Tor network status documents. This supports both the v2 and v3 +dir-spec. Documents can be obtained from a few sources... + +* The 'cached-consensus' file in Tor's data directory. + +* Archived descriptors provided by CollecTor + (https://collector.torproject.org/). + +* Directory authorities and mirrors via their DirPort. + +... and contain the following sections... + +* document header +* list of :class:`stem.descriptor.networkstatus.DirectoryAuthority` +* list of :class:`stem.descriptor.router_status_entry.RouterStatusEntry` +* document footer + +Of these, the router status entry section can be quite large (on the order of +hundreds of kilobytes). As such we provide a couple of methods for reading +network status documents through :func:`~stem.descriptor.__init__.parse_file`. +For more information see :func:`~stem.descriptor.__init__.DocumentHandler`... + +:: + + from stem.descriptor import parse_file, DocumentHandler + + with open('.tor/cached-consensus', 'rb') as consensus_file: + # Processes the routers as we read them in. The routers refer to a document + # with an unset 'routers' attribute. + + for router in parse_file(consensus_file, 'network-status-consensus-3 1.0', document_handler = DocumentHandler.ENTRIES): + print router.nickname + +**Module Overview:** + +:: + + NetworkStatusDocument - Network status document + |- NetworkStatusDocumentV2 - Version 2 network status document + |- NetworkStatusDocumentV3 - Version 3 network status document + +- BridgeNetworkStatusDocument - Version 3 network status document for bridges + + KeyCertificate - Certificate used to authenticate an authority + DocumentSignature - Signature of a document by a directory authority + DirectoryAuthority - Directory authority as defined in a v3 network status document + + +.. data:: PackageVersion + + Latest recommended version of a package that's available. + + :var str name: name of the package + :var str version: latest recommended version + :var str url: package's url + :var dict digests: mapping of digest types to their value +""" + +import collections +import io + +import stem.descriptor.router_status_entry +import stem.util.str_tools +import stem.util.tor_tools +import stem.version + +from stem.descriptor import ( + PGP_BLOCK_END, + Descriptor, + DocumentHandler, + _get_descriptor_components, + _read_until_keywords, + _value, + _parse_simple_line, + _parse_timestamp_line, + _parse_forty_character_hex, + _parse_key_block, +) + +from stem.descriptor.router_status_entry import ( + RouterStatusEntryV2, + RouterStatusEntryV3, + RouterStatusEntryMicroV3, +) + +PackageVersion = collections.namedtuple('PackageVersion', [ + 'name', + 'version', + 'url', + 'digests', +]) + +# Version 2 network status document fields, tuples of the form... +# (keyword, is_mandatory) + +NETWORK_STATUS_V2_FIELDS = ( + ('network-status-version', True), + ('dir-source', True), + ('fingerprint', True), + ('contact', True), + ('dir-signing-key', True), + ('client-versions', False), + ('server-versions', False), + ('published', True), + ('dir-options', False), + ('directory-signature', True), +) + +# Network status document are either a 'vote' or 'consensus', with different +# mandatory fields for each. Both though require that their fields appear in a +# specific order. This is an ordered listing of the following... +# +# (field, in_votes, in_consensus, is_mandatory) + +HEADER_STATUS_DOCUMENT_FIELDS = ( + ('network-status-version', True, True, True), + ('vote-status', True, True, True), + ('consensus-methods', True, False, False), + ('consensus-method', False, True, False), + ('published', True, False, True), + ('valid-after', True, True, True), + ('fresh-until', True, True, True), + ('valid-until', True, True, True), + ('voting-delay', True, True, True), + ('client-versions', True, True, False), + ('server-versions', True, True, False), + ('package', True, True, False), + ('known-flags', True, True, True), + ('flag-thresholds', True, False, False), + ('params', True, True, False), +) + +FOOTER_STATUS_DOCUMENT_FIELDS = ( + ('directory-footer', True, True, False), + ('bandwidth-weights', False, True, False), + ('directory-signature', True, True, True), +) + +HEADER_FIELDS = [attr[0] for attr in HEADER_STATUS_DOCUMENT_FIELDS] +FOOTER_FIELDS = [attr[0] for attr in FOOTER_STATUS_DOCUMENT_FIELDS] + +AUTH_START = 'dir-source' +ROUTERS_START = 'r' +FOOTER_START = 'directory-footer' +V2_FOOTER_START = 'directory-signature' + +DEFAULT_PARAMS = { + 'bwweightscale': 10000, + 'cbtdisabled': 0, + 'cbtnummodes': 3, + 'cbtrecentcount': 20, + 'cbtmaxtimeouts': 18, + 'cbtmincircs': 100, + 'cbtquantile': 80, + 'cbtclosequantile': 95, + 'cbttestfreq': 60, + 'cbtmintimeout': 2000, + 'cbtinitialtimeout': 60000, + 'Support022HiddenServices': 1, + 'usecreatefast': 1, +} + +# KeyCertificate fields, tuple is of the form... +# (keyword, is_mandatory) + +KEY_CERTIFICATE_PARAMS = ( + ('dir-key-certificate-version', True), + ('dir-address', False), + ('fingerprint', True), + ('dir-identity-key', True), + ('dir-key-published', True), + ('dir-key-expires', True), + ('dir-signing-key', True), + ('dir-key-crosscert', False), + ('dir-key-certification', True), +) + +# all parameters are constrained to int32 range +MIN_PARAM, MAX_PARAM = -2147483648, 2147483647 + +PARAM_RANGE = { + 'circwindow': (100, 1000), + 'CircuitPriorityHalflifeMsec': (-1, MAX_PARAM), + 'perconnbwrate': (-1, MAX_PARAM), + 'perconnbwburst': (-1, MAX_PARAM), + 'refuseunknownexits': (0, 1), + 'bwweightscale': (1, MAX_PARAM), + 'cbtdisabled': (0, 1), + 'cbtnummodes': (1, 20), + 'cbtrecentcount': (3, 1000), + 'cbtmaxtimeouts': (3, 10000), + 'cbtmincircs': (1, 10000), + 'cbtquantile': (10, 99), + 'cbtclosequantile': (MIN_PARAM, 99), + 'cbttestfreq': (1, MAX_PARAM), + 'cbtmintimeout': (500, MAX_PARAM), + 'UseOptimisticData': (0, 1), + 'Support022HiddenServices': (0, 1), + 'usecreatefast': (0, 1), + 'UseNTorHandshake': (0, 1), + 'FastFlagMinThreshold': (4, MAX_PARAM), + 'NumDirectoryGuards': (0, 10), + 'NumEntryGuards': (1, 10), + 'GuardLifetime': (2592000, 157766400), # min: 30 days, max: 1826 days + 'NumNTorsPerTAP': (1, 100000), + 'AllowNonearlyExtend': (0, 1), +} + + +def _parse_file(document_file, document_type = None, validate = False, is_microdescriptor = False, document_handler = DocumentHandler.ENTRIES, **kwargs): + """ + Parses a network status and iterates over the RouterStatusEntry in it. The + document that these instances reference have an empty 'routers' attribute to + allow for limited memory usage. + + :param file document_file: file with network status document content + :param class document_type: NetworkStatusDocument subclass + :param bool validate: checks the validity of the document's contents if + **True**, skips these checks otherwise + :param bool is_microdescriptor: **True** if this is for a microdescriptor + consensus, **False** otherwise + :param stem.descriptor.__init__.DocumentHandler document_handler: method in + which to parse :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` + :param dict kwargs: additional arguments for the descriptor constructor + + :returns: :class:`stem.descriptor.networkstatus.NetworkStatusDocument` object + + :raises: + * **ValueError** if the document_version is unrecognized or the contents is + malformed and validate is **True** + * **IOError** if the file can't be read + """ + + # we can't properly default this since NetworkStatusDocumentV3 isn't defined yet + + if document_type is None: + document_type = NetworkStatusDocumentV3 + + if document_type == NetworkStatusDocumentV2: + document_type, router_type = NetworkStatusDocumentV2, RouterStatusEntryV2 + elif document_type == NetworkStatusDocumentV3: + router_type = RouterStatusEntryMicroV3 if is_microdescriptor else RouterStatusEntryV3 + elif document_type == BridgeNetworkStatusDocument: + document_type, router_type = BridgeNetworkStatusDocument, RouterStatusEntryV2 + else: + raise ValueError("Document type %i isn't recognized (only able to parse v2, v3, and bridge)" % document_type) + + if document_handler == DocumentHandler.DOCUMENT: + yield document_type(document_file.read(), validate, **kwargs) + return + + # getting the document without the routers section + + header = _read_until_keywords((ROUTERS_START, FOOTER_START, V2_FOOTER_START), document_file) + + if header and header[0].startswith(b'@type'): + header = header[1:] + + routers_start = document_file.tell() + _read_until_keywords((FOOTER_START, V2_FOOTER_START), document_file, skip = True) + routers_end = document_file.tell() + + footer = document_file.readlines() + document_content = bytes.join(b'', header + footer) + + if document_handler == DocumentHandler.BARE_DOCUMENT: + yield document_type(document_content, validate, **kwargs) + elif document_handler == DocumentHandler.ENTRIES: + desc_iterator = stem.descriptor.router_status_entry._parse_file( + document_file, + validate, + entry_class = router_type, + entry_keyword = ROUTERS_START, + start_position = routers_start, + end_position = routers_end, + extra_args = (document_type(document_content, validate),), + **kwargs + ) + + for desc in desc_iterator: + yield desc + else: + raise ValueError('Unrecognized document_handler: %s' % document_handler) + + +def _parse_file_key_certs(certificate_file, validate = False): + """ + Parses a file containing one or more authority key certificates. + + :param file certificate_file: file with key certificates + :param bool validate: checks the validity of the certificate's contents if + **True**, skips these checks otherwise + + :returns: iterator for :class:`stem.descriptor.networkstatus.KeyCertificate` + instance in the file + + :raises: + * **ValueError** if the key certificate content is invalid and validate is + **True** + * **IOError** if the file can't be read + """ + + while True: + keycert_content = _read_until_keywords('dir-key-certification', certificate_file) + + # we've reached the 'router-signature', now include the pgp style block + block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0] + keycert_content += _read_until_keywords(block_end_prefix, certificate_file, True) + + if keycert_content: + yield stem.descriptor.networkstatus.KeyCertificate(bytes.join(b'', keycert_content), validate = validate) + else: + break # done parsing file + + +class NetworkStatusDocument(Descriptor): + """ + Common parent for network status documents. + """ + + +def _parse_version_line(keyword, attribute, expected_version): + def _parse(descriptor, entries): + value = _value(keyword, entries) + + if not value.isdigit(): + raise ValueError('Document has a non-numeric version: %s %s' % (keyword, value)) + + setattr(descriptor, attribute, int(value)) + + if int(value) != expected_version: + raise ValueError("Expected a version %i document, but got version '%s' instead" % (expected_version, value)) + + return _parse + + +def _parse_dir_source_line(descriptor, entries): + value = _value('dir-source', entries) + dir_source_comp = value.split() + + if len(dir_source_comp) < 3: + raise ValueError("The 'dir-source' line of a v2 network status document must have three values: dir-source %s" % value) + + if not dir_source_comp[0]: + # https://trac.torproject.org/7055 + raise ValueError("Authority's hostname can't be blank: dir-source %s" % value) + elif not stem.util.connection.is_valid_ipv4_address(dir_source_comp[1]): + raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[1]) + elif not stem.util.connection.is_valid_port(dir_source_comp[2], allow_zero = True): + raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[2]) + + descriptor.hostname = dir_source_comp[0] + descriptor.address = dir_source_comp[1] + descriptor.dir_port = None if dir_source_comp[2] == '0' else int(dir_source_comp[2]) + + +_parse_network_status_version_line = _parse_version_line('network-status-version', 'version', 2) +_parse_fingerprint_line = _parse_forty_character_hex('fingerprint', 'fingerprint') +_parse_contact_line = _parse_simple_line('contact', 'contact') +_parse_dir_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY') +_parse_client_versions_line = lambda descriptor, entries: setattr(descriptor, 'client_versions', _value('client-versions', entries).split(',')) +_parse_server_versions_line = lambda descriptor, entries: setattr(descriptor, 'server_versions', _value('server-versions', entries).split(',')) +_parse_published_line = _parse_timestamp_line('published', 'published') +_parse_dir_options_line = lambda descriptor, entries: setattr(descriptor, 'options', _value('dir-options', entries).split()) +_parse_directory_signature_line = _parse_key_block('directory-signature', 'signature', 'SIGNATURE', value_attribute = 'signing_authority') + + +class NetworkStatusDocumentV2(NetworkStatusDocument): + """ + Version 2 network status document. These have been deprecated and are no + longer generated by Tor. + + :var dict routers: fingerprints to :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2` + contained in the document + + :var int version: **\*** document version + + :var str hostname: **\*** hostname of the authority + :var str address: **\*** authority's IP address + :var int dir_port: **\*** authority's DirPort + :var str fingerprint: **\*** authority's fingerprint + :var str contact: **\*** authority's contact information + :var str signing_key: **\*** authority's public signing key + + :var list client_versions: list of recommended client tor version strings + :var list server_versions: list of recommended server tor version strings + :var datetime published: **\*** time when the document was published + :var list options: **\*** list of things that this authority decides + + :var str signing_authority: **\*** name of the authority signing the document + :var str signature: **\*** authority's signature for the document + + **\*** attribute is either required when we're parsed with validation or has + a default value, others are left as **None** if undefined + """ + + ATTRIBUTES = { + 'version': (None, _parse_network_status_version_line), + 'hostname': (None, _parse_dir_source_line), + 'address': (None, _parse_dir_source_line), + 'dir_port': (None, _parse_dir_source_line), + 'fingerprint': (None, _parse_fingerprint_line), + 'contact': (None, _parse_contact_line), + 'signing_key': (None, _parse_dir_signing_key_line), + + 'client_versions': ([], _parse_client_versions_line), + 'server_versions': ([], _parse_server_versions_line), + 'published': (None, _parse_published_line), + 'options': ([], _parse_dir_options_line), + + 'signing_authority': (None, _parse_directory_signature_line), + 'signatures': (None, _parse_directory_signature_line), + } + + PARSER_FOR_LINE = { + 'network-status-version': _parse_network_status_version_line, + 'dir-source': _parse_dir_source_line, + 'fingerprint': _parse_fingerprint_line, + 'contact': _parse_contact_line, + 'dir-signing-key': _parse_dir_signing_key_line, + 'client-versions': _parse_client_versions_line, + 'server-versions': _parse_server_versions_line, + 'published': _parse_published_line, + 'dir-options': _parse_dir_options_line, + 'directory-signature': _parse_directory_signature_line, + } + + def __init__(self, raw_content, validate = False): + super(NetworkStatusDocumentV2, self).__init__(raw_content, lazy_load = not validate) + + # Splitting the document from the routers. Unlike v3 documents we're not + # bending over backwards on the validation by checking the field order or + # that header/footer attributes aren't in the wrong section. This is a + # deprecated descriptor type - patches welcome if you want those checks. + + document_file = io.BytesIO(raw_content) + document_content = bytes.join(b'', _read_until_keywords((ROUTERS_START, V2_FOOTER_START), document_file)) + + router_iter = stem.descriptor.router_status_entry._parse_file( + document_file, + validate, + entry_class = RouterStatusEntryV2, + entry_keyword = ROUTERS_START, + section_end_keywords = (V2_FOOTER_START,), + extra_args = (self,), + ) + + self.routers = dict((desc.fingerprint, desc) for desc in router_iter) + + entries = _get_descriptor_components(document_content + b'\n' + document_file.read(), validate) + + if validate: + self._check_constraints(entries) + self._parse(entries, validate) + + # 'client-versions' and 'server-versions' are only required if 'Versions' + # is among the options + + if 'Versions' in self.options and not ('client-versions' in entries and 'server-versions' in entries): + raise ValueError("Version 2 network status documents must have a 'client-versions' and 'server-versions' when 'Versions' is listed among its dir-options:\n%s" % str(self)) + else: + self._entries = entries + + def _check_constraints(self, entries): + required_fields = [field for (field, is_mandatory) in NETWORK_STATUS_V2_FIELDS if is_mandatory] + for keyword in required_fields: + if keyword not in entries: + raise ValueError("Network status document (v2) must have a '%s' line:\n%s" % (keyword, str(self))) + + # all recognized fields can only appear once + single_fields = [field for (field, _) in NETWORK_STATUS_V2_FIELDS] + for keyword in single_fields: + if keyword in entries and len(entries[keyword]) > 1: + raise ValueError("Network status document (v2) can only have a single '%s' line, got %i:\n%s" % (keyword, len(entries[keyword]), str(self))) + + if 'network-status-version' != list(entries.keys())[0]: + raise ValueError("Network status document (v2) are expected to start with a 'network-status-version' line:\n%s" % str(self)) + + +def _parse_header_network_status_version_line(descriptor, entries): + # "network-status-version" version + + value = _value('network-status-version', entries) + + if ' ' in value: + version, flavor = value.split(' ', 1) + else: + version, flavor = value, None + + if not version.isdigit(): + raise ValueError('Network status document has a non-numeric version: network-status-version %s' % value) + + descriptor.version = int(version) + descriptor.version_flavor = flavor + descriptor.is_microdescriptor = flavor == 'microdesc' + + if descriptor.version != 3: + raise ValueError("Expected a version 3 network status document, got version '%s' instead" % descriptor.version) + + +def _parse_header_vote_status_line(descriptor, entries): + # "vote-status" type + # + # The consensus-method and consensus-methods fields are optional since + # they weren't included in version 1. Setting a default now that we + # know if we're a vote or not. + + value = _value('vote-status', entries) + + if value == 'consensus': + descriptor.is_consensus, descriptor.is_vote = True, False + elif value == 'vote': + descriptor.is_consensus, descriptor.is_vote = False, True + else: + raise ValueError("A network status document's vote-status line can only be 'consensus' or 'vote', got '%s' instead" % value) + + +def _parse_header_consensus_methods_line(descriptor, entries): + # "consensus-methods" IntegerList + + if descriptor._lazy_loading and descriptor.is_vote: + descriptor.consensus_methods = [1] + + value, consensus_methods = _value('consensus-methods', entries), [] + + for entry in value.split(' '): + if not entry.isdigit(): + raise ValueError("A network status document's consensus-methods must be a list of integer values, but was '%s'" % value) + + consensus_methods.append(int(entry)) + + descriptor.consensus_methods = consensus_methods + + +def _parse_header_consensus_method_line(descriptor, entries): + # "consensus-method" Integer + + if descriptor._lazy_loading and descriptor.is_consensus: + descriptor.consensus_method = 1 + + value = _value('consensus-method', entries) + + if not value.isdigit(): + raise ValueError("A network status document's consensus-method must be an integer, but was '%s'" % value) + + descriptor.consensus_method = int(value) + + +def _parse_header_voting_delay_line(descriptor, entries): + # "voting-delay" VoteSeconds DistSeconds + + value = _value('voting-delay', entries) + value_comp = value.split(' ') + + if len(value_comp) == 2 and value_comp[0].isdigit() and value_comp[1].isdigit(): + descriptor.vote_delay = int(value_comp[0]) + descriptor.dist_delay = int(value_comp[1]) + else: + raise ValueError("A network status document's 'voting-delay' line must be a pair of integer values, but was '%s'" % value) + + +def _parse_versions_line(keyword, attribute): + def _parse(descriptor, entries): + value, entries = _value(keyword, entries), [] + + for entry in value.split(','): + try: + entries.append(stem.version._get_version(entry)) + except ValueError: + raise ValueError("Network status document's '%s' line had '%s', which isn't a parsable tor version: %s %s" % (keyword, entry, keyword, value)) + + setattr(descriptor, attribute, entries) + + return _parse + + +def _parse_header_flag_thresholds_line(descriptor, entries): + # "flag-thresholds" SP THRESHOLDS + + value, thresholds = _value('flag-thresholds', entries).strip(), {} + + if value: + for entry in value.split(' '): + if '=' not in entry: + raise ValueError("Network status document's 'flag-thresholds' line is expected to be space separated key=value mappings, got: flag-thresholds %s" % value) + + entry_key, entry_value = entry.split('=', 1) + + try: + if entry_value.endswith('%'): + # opting for string manipulation rather than just + # 'float(entry_value) / 100' because floating point arithmetic + # will lose precision + + thresholds[entry_key] = float('0.' + entry_value[:-1].replace('.', '', 1)) + elif '.' in entry_value: + thresholds[entry_key] = float(entry_value) + else: + thresholds[entry_key] = int(entry_value) + except ValueError: + raise ValueError("Network status document's 'flag-thresholds' line is expected to have float values, got: flag-thresholds %s" % value) + + descriptor.flag_thresholds = thresholds + + +def _parse_header_parameters_line(descriptor, entries): + # "params" [Parameters] + # Parameter ::= Keyword '=' Int32 + # Int32 ::= A decimal integer between -2147483648 and 2147483647. + # Parameters ::= Parameter | Parameters SP Parameter + + if descriptor._lazy_loading: + descriptor.params = dict(DEFAULT_PARAMS) if descriptor._default_params else {} + + value = _value('params', entries) + + # should only appear in consensus-method 7 or later + + if not descriptor.meets_consensus_method(7): + raise ValueError("A network status document's 'params' line should only appear in consensus-method 7 or later") + + if value != '': + descriptor.params = _parse_int_mappings('params', value, True) + descriptor._check_params_constraints() + + +def _parse_directory_footer_line(descriptor, entries): + # nothing to parse, simply checking that we don't have a value + + value = _value('directory-footer', entries) + + if value: + raise ValueError("A network status document's 'directory-footer' line shouldn't have any content, got 'directory-footer %s'" % value) + + +def _parse_footer_directory_signature_line(descriptor, entries): + signatures = [] + + for sig_value, block_type, block_contents in entries['directory-signature']: + if sig_value.count(' ') not in (1, 2): + raise ValueError("Authority signatures in a network status document are expected to be of the form 'directory-signature [METHOD] FINGERPRINT KEY_DIGEST', received: %s" % sig_value) + + if not block_contents or block_type != 'SIGNATURE': + raise ValueError("'directory-signature' should be followed by a SIGNATURE block, but was a %s" % block_type) + + if sig_value.count(' ') == 1: + method = 'sha1' # default if none was provided + fingerprint, key_digest = sig_value.split(' ', 1) + else: + method, fingerprint, key_digest = sig_value.split(' ', 2) + + signatures.append(DocumentSignature(method, fingerprint, key_digest, block_contents, True)) + + descriptor.signatures = signatures + + +def _parse_package_line(descriptor, entries): + package_versions = [] + + for value, _, _ in entries['package']: + value_comp = value.split() + + if len(value_comp) < 3: + raise ValueError("'package' must at least have a 'PackageName Version URL': %s" % value) + + name, version, url = value_comp[:3] + digests = {} + + for digest_entry in value_comp[3:]: + if '=' not in digest_entry: + raise ValueError("'package' digest entries should be 'key=value' pairs: %s" % value) + + key, value = digest_entry.split('=', 1) + digests[key] = value + + package_versions.append(PackageVersion(name, version, url, digests)) + + descriptor.packages = package_versions + + +_parse_header_valid_after_line = _parse_timestamp_line('valid-after', 'valid_after') +_parse_header_fresh_until_line = _parse_timestamp_line('fresh-until', 'fresh_until') +_parse_header_valid_until_line = _parse_timestamp_line('valid-until', 'valid_until') +_parse_header_client_versions_line = _parse_versions_line('client-versions', 'client_versions') +_parse_header_server_versions_line = _parse_versions_line('server-versions', 'server_versions') +_parse_header_known_flags_line = lambda descriptor, entries: setattr(descriptor, 'known_flags', [entry for entry in _value('known-flags', entries).split(' ') if entry]) +_parse_footer_bandwidth_weights_line = lambda descriptor, entries: setattr(descriptor, 'bandwidth_weights', _parse_int_mappings('bandwidth-weights', _value('bandwidth-weights', entries), True)) + + +class NetworkStatusDocumentV3(NetworkStatusDocument): + """ + Version 3 network status document. This could be either a vote or consensus. + + :var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` + contained in the document + + :var int version: **\*** document version + :var str version_flavor: **\*** flavor associated with the document (such as 'microdesc') + :var bool is_consensus: **\*** **True** if the document is a consensus + :var bool is_vote: **\*** **True** if the document is a vote + :var bool is_microdescriptor: **\*** **True** if this is a microdescriptor + flavored document, **False** otherwise + :var datetime valid_after: **\*** time when the consensus became valid + :var datetime fresh_until: **\*** time when the next consensus should be produced + :var datetime valid_until: **\*** time when this consensus becomes obsolete + :var int vote_delay: **\*** number of seconds allowed for collecting votes + from all authorities + :var int dist_delay: **\*** number of seconds allowed for collecting + signatures from all authorities + :var list client_versions: list of recommended client tor versions + :var list server_versions: list of recommended server tor versions + :var list packages: **\*** list of :data:`~stem.descriptor.networkstatus.PackageVersion` entries + :var list known_flags: **\*** list of :data:`~stem.Flag` for the router's flags + :var dict params: **\*** dict of parameter(**str**) => value(**int**) mappings + :var list directory_authorities: **\*** list of :class:`~stem.descriptor.networkstatus.DirectoryAuthority` + objects that have generated this document + :var list signatures: **\*** :class:`~stem.descriptor.networkstatus.DocumentSignature` + of the authorities that have signed the document + + **Consensus Attributes:** + + :var int consensus_method: method version used to generate this consensus + :var dict bandwidth_weights: dict of weight(str) => value(int) mappings + + **Vote Attributes:** + + :var list consensus_methods: list of ints for the supported method versions + :var datetime published: time when the document was published + :var dict flag_thresholds: **\*** mapping of internal performance thresholds used while making the vote, values are **ints** or **floats** + + **\*** attribute is either required when we're parsed with validation or has + a default value, others are left as None if undefined + + .. versionchanged:: 1.4.0 + Added the packages attribute. + """ + + ATTRIBUTES = { + 'version': (None, _parse_header_network_status_version_line), + 'version_flavor': (None, _parse_header_network_status_version_line), + 'is_consensus': (True, _parse_header_vote_status_line), + 'is_vote': (False, _parse_header_vote_status_line), + 'is_microdescriptor': (False, _parse_header_network_status_version_line), + 'consensus_methods': ([], _parse_header_consensus_methods_line), + 'published': (None, _parse_published_line), + 'consensus_method': (None, _parse_header_consensus_method_line), + 'valid_after': (None, _parse_header_valid_after_line), + 'fresh_until': (None, _parse_header_fresh_until_line), + 'valid_until': (None, _parse_header_valid_until_line), + 'vote_delay': (None, _parse_header_voting_delay_line), + 'dist_delay': (None, _parse_header_voting_delay_line), + 'client_versions': ([], _parse_header_client_versions_line), + 'server_versions': ([], _parse_header_server_versions_line), + 'packages': ([], _parse_package_line), + 'known_flags': ([], _parse_header_known_flags_line), + 'flag_thresholds': ({}, _parse_header_flag_thresholds_line), + 'params': ({}, _parse_header_parameters_line), + + 'signatures': ([], _parse_footer_directory_signature_line), + 'bandwidth_weights': ({}, _parse_footer_bandwidth_weights_line), + } + + HEADER_PARSER_FOR_LINE = { + 'network-status-version': _parse_header_network_status_version_line, + 'vote-status': _parse_header_vote_status_line, + 'consensus-methods': _parse_header_consensus_methods_line, + 'consensus-method': _parse_header_consensus_method_line, + 'published': _parse_published_line, + 'valid-after': _parse_header_valid_after_line, + 'fresh-until': _parse_header_fresh_until_line, + 'valid-until': _parse_header_valid_until_line, + 'voting-delay': _parse_header_voting_delay_line, + 'client-versions': _parse_header_client_versions_line, + 'server-versions': _parse_header_server_versions_line, + 'package': _parse_package_line, + 'known-flags': _parse_header_known_flags_line, + 'flag-thresholds': _parse_header_flag_thresholds_line, + 'params': _parse_header_parameters_line, + } + + FOOTER_PARSER_FOR_LINE = { + 'directory-footer': _parse_directory_footer_line, + 'bandwidth-weights': _parse_footer_bandwidth_weights_line, + 'directory-signature': _parse_footer_directory_signature_line, + } + + def __init__(self, raw_content, validate = False, default_params = True): + """ + Parse a v3 network status document. + + :param str raw_content: raw network status document data + :param bool validate: **True** if the document is to be validated, **False** otherwise + :param bool default_params: includes defaults in our params dict, otherwise + it just contains values from the document + + :raises: **ValueError** if the document is invalid + """ + + super(NetworkStatusDocumentV3, self).__init__(raw_content, lazy_load = not validate) + document_file = io.BytesIO(raw_content) + + self._default_params = default_params + self._header(document_file, validate) + + self.directory_authorities = tuple(stem.descriptor.router_status_entry._parse_file( + document_file, + validate, + entry_class = DirectoryAuthority, + entry_keyword = AUTH_START, + section_end_keywords = (ROUTERS_START, FOOTER_START, V2_FOOTER_START), + extra_args = (self.is_vote,), + )) + + if validate and self.is_vote and len(self.directory_authorities) != 1: + raise ValueError('Votes should only have an authority entry for the one that issued it, got %i: %s' % (len(self.directory_authorities), self.directory_authorities)) + + router_iter = stem.descriptor.router_status_entry._parse_file( + document_file, + validate, + entry_class = RouterStatusEntryMicroV3 if self.is_microdescriptor else RouterStatusEntryV3, + entry_keyword = ROUTERS_START, + section_end_keywords = (FOOTER_START, V2_FOOTER_START), + extra_args = (self,), + ) + + self.routers = dict((desc.fingerprint, desc) for desc in router_iter) + self._footer(document_file, validate) + + def get_unrecognized_lines(self): + if self._lazy_loading: + self._parse(self._header_entries, False, parser_for_line = self.HEADER_PARSER_FOR_LINE) + self._parse(self._footer_entries, False, parser_for_line = self.FOOTER_PARSER_FOR_LINE) + self._lazy_loading = False + + return super(NetworkStatusDocumentV3, self).get_unrecognized_lines() + + def meets_consensus_method(self, method): + """ + Checks if we meet the given consensus-method. This works for both votes and + consensuses, checking our 'consensus-method' and 'consensus-methods' + entries. + + :param int method: consensus-method to check for + + :returns: **True** if we meet the given consensus-method, and **False** otherwise + """ + + if self.consensus_method is not None: + return self.consensus_method >= method + elif self.consensus_methods is not None: + return bool([x for x in self.consensus_methods if x >= method]) + else: + return False # malformed document + + def _compare(self, other, method): + if not isinstance(other, NetworkStatusDocumentV3): + return False + + return method(str(self).strip(), str(other).strip()) + + def _header(self, document_file, validate): + content = bytes.join(b'', _read_until_keywords((AUTH_START, ROUTERS_START, FOOTER_START), document_file)) + entries = _get_descriptor_components(content, validate) + + if validate: + # all known header fields can only appear once except + + for keyword, values in list(entries.items()): + if len(values) > 1 and keyword in HEADER_FIELDS and keyword != 'package': + raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values))) + + if self._default_params: + self.params = dict(DEFAULT_PARAMS) + + self._parse(entries, validate, parser_for_line = self.HEADER_PARSER_FOR_LINE) + + _check_for_missing_and_disallowed_fields(self, entries, HEADER_STATUS_DOCUMENT_FIELDS) + _check_for_misordered_fields(entries, HEADER_FIELDS) + + # default consensus_method and consensus_methods based on if we're a consensus or vote + + if self.is_consensus and not self.consensus_method: + self.consensus_method = 1 + elif self.is_vote and not self.consensus_methods: + self.consensus_methods = [1] + else: + self._header_entries = entries + self._entries.update(entries) + + def _footer(self, document_file, validate): + entries = _get_descriptor_components(document_file.read(), validate) + + if validate: + for keyword, values in list(entries.items()): + # all known footer fields can only appear once except... + # * 'directory-signature' in a consensus + + if len(values) > 1 and keyword in FOOTER_FIELDS: + if not (keyword == 'directory-signature' and self.is_consensus): + raise ValueError("Network status documents can only have a single '%s' line, got %i" % (keyword, len(values))) + + self._parse(entries, validate, parser_for_line = self.FOOTER_PARSER_FOR_LINE) + + # Check that the footer has the right initial line. Prior to consensus + # method 9 it's a 'directory-signature' and after that footers start with + # 'directory-footer'. + + if entries: + if self.meets_consensus_method(9): + if list(entries.keys())[0] != 'directory-footer': + raise ValueError("Network status document's footer should start with a 'directory-footer' line in consensus-method 9 or later") + else: + if list(entries.keys())[0] != 'directory-signature': + raise ValueError("Network status document's footer should start with a 'directory-signature' line prior to consensus-method 9") + + _check_for_missing_and_disallowed_fields(self, entries, FOOTER_STATUS_DOCUMENT_FIELDS) + _check_for_misordered_fields(entries, FOOTER_FIELDS) + else: + self._footer_entries = entries + self._entries.update(entries) + + def _check_params_constraints(self): + """ + Checks that the params we know about are within their documented ranges. + """ + + for key, value in self.params.items(): + minimum, maximum = PARAM_RANGE.get(key, (MIN_PARAM, MAX_PARAM)) + + # there's a few dynamic parameter ranges + + if key == 'cbtclosequantile': + minimum = self.params.get('cbtquantile', minimum) + elif key == 'cbtinitialtimeout': + minimum = self.params.get('cbtmintimeout', minimum) + + if value < minimum or value > maximum: + raise ValueError("'%s' value on the params line must be in the range of %i - %i, was %i" % (key, minimum, maximum, value)) + + def __hash__(self): + return hash(str(self).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + +def _check_for_missing_and_disallowed_fields(document, entries, fields): + """ + Checks that we have mandatory fields for our type, and that we don't have + any fields exclusive to the other (ie, no vote-only fields appear in a + consensus or vice versa). + + :param NetworkStatusDocumentV3 document: network status document + :param dict entries: ordered keyword/value mappings of the header or footer + :param list fields: expected field attributes (either + **HEADER_STATUS_DOCUMENT_FIELDS** or **FOOTER_STATUS_DOCUMENT_FIELDS**) + + :raises: **ValueError** if we're missing mandatory fields or have fields we shouldn't + """ + + missing_fields, disallowed_fields = [], [] + + for field, in_votes, in_consensus, mandatory in fields: + if mandatory and ((document.is_consensus and in_consensus) or (document.is_vote and in_votes)): + # mandatory field, check that we have it + if field not in entries.keys(): + missing_fields.append(field) + elif (document.is_consensus and not in_consensus) or (document.is_vote and not in_votes): + # field we shouldn't have, check that we don't + if field in entries.keys(): + disallowed_fields.append(field) + + if missing_fields: + raise ValueError('Network status document is missing mandatory field: %s' % ', '.join(missing_fields)) + + if disallowed_fields: + raise ValueError("Network status document has fields that shouldn't appear in this document type or version: %s" % ', '.join(disallowed_fields)) + + +def _check_for_misordered_fields(entries, expected): + """ + To be valid a network status document's fiends need to appear in a specific + order. Checks that known fields appear in that order (unrecognized fields + are ignored). + + :param dict entries: ordered keyword/value mappings of the header or footer + :param list expected: ordered list of expected fields (either + **HEADER_FIELDS** or **FOOTER_FIELDS**) + + :raises: **ValueError** if entries aren't properly ordered + """ + + # Earlier validation has ensured that our fields either belong to our + # document type or are unknown. Remove the unknown fields since they + # reflect a spec change and can appear anywhere in the document. + + actual = [field for field in entries.keys() if field in expected] + + # Narrow the expected to just what we have. If the lists then match then the + # order's valid. + + expected = [field for field in expected if field in actual] + + if actual != expected: + actual_label = ', '.join(actual) + expected_label = ', '.join(expected) + raise ValueError("The fields in a section of the document are misordered. It should be '%s' but was '%s'" % (actual_label, expected_label)) + + +def _parse_int_mappings(keyword, value, validate): + # Parse a series of 'key=value' entries, checking the following: + # - values are integers + # - keys are sorted in lexical order + + results, seen_keys = {}, [] + for entry in value.split(' '): + try: + if '=' not in entry: + raise ValueError("must only have 'key=value' entries") + + entry_key, entry_value = entry.split('=', 1) + + try: + # the int() function accepts things like '+123', but we don't want to + if entry_value.startswith('+'): + raise ValueError() + + entry_value = int(entry_value) + except ValueError: + raise ValueError("'%s' is a non-numeric value" % entry_value) + + if validate: + # parameters should be in ascending order by their key + for prior_key in seen_keys: + if prior_key > entry_key: + raise ValueError('parameters must be sorted by their key') + + results[entry_key] = entry_value + seen_keys.append(entry_key) + except ValueError as exc: + if not validate: + continue + + raise ValueError("Unable to parse network status document's '%s' line (%s): %s'" % (keyword, exc, value)) + + return results + + +def _parse_dirauth_source_line(descriptor, entries): + # "dir-source" nickname identity address IP dirport orport + + value = _value('dir-source', entries) + dir_source_comp = value.split(' ') + + if len(dir_source_comp) < 6: + raise ValueError("Authority entry's 'dir-source' line must have six values: dir-source %s" % value) + + if not stem.util.tor_tools.is_valid_nickname(dir_source_comp[0].rstrip('-legacy')): + raise ValueError("Authority's nickname is invalid: %s" % dir_source_comp[0]) + elif not stem.util.tor_tools.is_valid_fingerprint(dir_source_comp[1]): + raise ValueError("Authority's v3ident is invalid: %s" % dir_source_comp[1]) + elif not dir_source_comp[2]: + # https://trac.torproject.org/7055 + raise ValueError("Authority's hostname can't be blank: dir-source %s" % value) + elif not stem.util.connection.is_valid_ipv4_address(dir_source_comp[3]): + raise ValueError("Authority's address isn't a valid IPv4 address: %s" % dir_source_comp[3]) + elif not stem.util.connection.is_valid_port(dir_source_comp[4], allow_zero = True): + raise ValueError("Authority's DirPort is invalid: %s" % dir_source_comp[4]) + elif not stem.util.connection.is_valid_port(dir_source_comp[5]): + raise ValueError("Authority's ORPort is invalid: %s" % dir_source_comp[5]) + + descriptor.nickname = dir_source_comp[0] + descriptor.v3ident = dir_source_comp[1] + descriptor.hostname = dir_source_comp[2] + descriptor.address = dir_source_comp[3] + descriptor.dir_port = None if dir_source_comp[4] == '0' else int(dir_source_comp[4]) + descriptor.or_port = int(dir_source_comp[5]) + descriptor.is_legacy = descriptor.nickname.endswith('-legacy') + + +_parse_legacy_dir_key_line = _parse_forty_character_hex('legacy-dir-key', 'legacy_dir_key') +_parse_vote_digest_line = _parse_forty_character_hex('vote-digest', 'vote_digest') + + +class DirectoryAuthority(Descriptor): + """ + Directory authority information obtained from a v3 network status document. + + Authorities can optionally use a legacy format. These are no longer found in + practice, but have the following differences... + + * The authority's nickname ends with '-legacy'. + * There's no **contact** or **vote_digest** attribute. + + :var str nickname: **\*** authority's nickname + :var str v3ident: **\*** identity key fingerprint used to sign votes and consensus + :var str hostname: **\*** hostname of the authority + :var str address: **\*** authority's IP address + :var int dir_port: **\*** authority's DirPort + :var int or_port: **\*** authority's ORPort + :var bool is_legacy: **\*** if the authority's using the legacy format + :var str contact: contact information, this is included if is_legacy is **False** + + **Consensus Attributes:** + + :var str vote_digest: digest of the authority that contributed to the consensus, this is included if is_legacy is **False** + + **Vote Attributes:** + + :var str legacy_dir_key: fingerprint of and obsolete identity key + :var stem.descriptor.networkstatus.KeyCertificate key_certificate: **\*** + authority's key certificate + + **\*** mandatory attribute + + .. versionchanged:: 1.4.0 + Renamed our 'fingerprint' attribute to 'v3ident' (prior attribute exists + for backward compatability, but is deprecated). + """ + + ATTRIBUTES = { + 'nickname': (None, _parse_dirauth_source_line), + 'v3ident': (None, _parse_dirauth_source_line), + 'hostname': (None, _parse_dirauth_source_line), + 'address': (None, _parse_dirauth_source_line), + 'dir_port': (None, _parse_dirauth_source_line), + 'or_port': (None, _parse_dirauth_source_line), + 'is_legacy': (False, _parse_dirauth_source_line), + 'contact': (None, _parse_contact_line), + 'vote_digest': (None, _parse_vote_digest_line), + 'legacy_dir_key': (None, _parse_legacy_dir_key_line), + } + + PARSER_FOR_LINE = { + 'dir-source': _parse_dirauth_source_line, + 'contact': _parse_contact_line, + 'legacy-dir-key': _parse_legacy_dir_key_line, + 'vote-digest': _parse_vote_digest_line, + } + + def __init__(self, raw_content, validate = False, is_vote = False): + """ + Parse a directory authority entry in a v3 network status document. + + :param str raw_content: raw directory authority entry information + :param bool validate: checks the validity of the content if True, skips + these checks otherwise + :param bool is_vote: True if this is for a vote, False if it's for a consensus + + :raises: ValueError if the descriptor data is invalid + """ + + super(DirectoryAuthority, self).__init__(raw_content, lazy_load = not validate) + content = stem.util.str_tools._to_unicode(raw_content) + + # separate the directory authority entry from its key certificate + key_div = content.find('\ndir-key-certificate-version') + + if key_div != -1: + self.key_certificate = KeyCertificate(content[key_div + 1:], validate) + content = content[:key_div + 1] + else: + self.key_certificate = None + + entries = _get_descriptor_components(content, validate) + + if validate and 'dir-source' != list(entries.keys())[0]: + raise ValueError("Authority entries are expected to start with a 'dir-source' line:\n%s" % (content)) + + # check that we have mandatory fields + + if validate: + is_legacy, dir_source_entry = False, entries.get('dir-source') + + if dir_source_entry: + is_legacy = dir_source_entry[0][0].split()[0].endswith('-legacy') + + required_fields, excluded_fields = ['dir-source'], [] + + if not is_legacy: + required_fields += ['contact'] + + if is_vote: + if not self.key_certificate: + raise ValueError('Authority votes must have a key certificate:\n%s' % content) + + excluded_fields += ['vote-digest'] + elif not is_vote: + if self.key_certificate: + raise ValueError("Authority consensus entries shouldn't have a key certificate:\n%s" % content) + + if not is_legacy: + required_fields += ['vote-digest'] + + excluded_fields += ['legacy-dir-key'] + + for keyword in required_fields: + if keyword not in entries: + raise ValueError("Authority entries must have a '%s' line:\n%s" % (keyword, content)) + + for keyword in entries: + if keyword in excluded_fields: + type_label = 'votes' if is_vote else 'consensus entries' + raise ValueError("Authority %s shouldn't have a '%s' line:\n%s" % (type_label, keyword, content)) + + # all known attributes can only appear at most once + for keyword, values in list(entries.items()): + if len(values) > 1 and keyword in ('dir-source', 'contact', 'legacy-dir-key', 'vote-digest'): + raise ValueError("Authority entries can only have a single '%s' line, got %i:\n%s" % (keyword, len(values), content)) + + self._parse(entries, validate) + else: + self._entries = entries + + # TODO: Due to a bug we had a 'fingerprint' rather than 'v3ident' attribute + # for a long while. Keeping this around for backward compatability, but + # this will be dropped in stem's 2.0 release. + + self.fingerprint = self.v3ident + + def _compare(self, other, method): + if not isinstance(other, DirectoryAuthority): + return False + + return method(str(self).strip(), str(other).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + +def _parse_dir_address_line(descriptor, entries): + # "dir-address" IPPort + + value = _value('dir-address', entries) + + if ':' not in value: + raise ValueError("Key certificate's 'dir-address' is expected to be of the form ADDRESS:PORT: dir-address %s" % value) + + address, dirport = value.split(':', 1) + + if not stem.util.connection.is_valid_ipv4_address(address): + raise ValueError("Key certificate's address isn't a valid IPv4 address: dir-address %s" % value) + elif not stem.util.connection.is_valid_port(dirport): + raise ValueError("Key certificate's dirport is invalid: dir-address %s" % value) + + descriptor.address = address + descriptor.dir_port = int(dirport) + + +_parse_dir_key_certificate_version_line = _parse_version_line('dir-key-certificate-version', 'version', 3) +_parse_dir_key_published_line = _parse_timestamp_line('dir-key-published', 'published') +_parse_dir_key_expires_line = _parse_timestamp_line('dir-key-expires', 'expires') +_parse_identity_key_line = _parse_key_block('dir-identity-key', 'identity_key', 'RSA PUBLIC KEY') +_parse_signing_key_line = _parse_key_block('dir-signing-key', 'signing_key', 'RSA PUBLIC KEY') +_parse_dir_key_crosscert_line = _parse_key_block('dir-key-crosscert', 'crosscert', 'ID SIGNATURE') +_parse_dir_key_certification_line = _parse_key_block('dir-key-certification', 'certification', 'SIGNATURE') + + +class KeyCertificate(Descriptor): + """ + Directory key certificate for a v3 network status document. + + :var int version: **\*** version of the key certificate + :var str address: authority's IP address + :var int dir_port: authority's DirPort + :var str fingerprint: **\*** authority's fingerprint + :var str identity_key: **\*** long term authority identity key + :var datetime published: **\*** time when this key was generated + :var datetime expires: **\*** time after which this key becomes invalid + :var str signing_key: **\*** directory server's public signing key + :var str crosscert: signature made using certificate's signing key + :var str certification: **\*** signature of this key certificate signed with + the identity key + + **\*** mandatory attribute + """ + + ATTRIBUTES = { + 'version': (None, _parse_dir_key_certificate_version_line), + 'address': (None, _parse_dir_address_line), + 'dir_port': (None, _parse_dir_address_line), + 'fingerprint': (None, _parse_fingerprint_line), + 'identity_key': (None, _parse_identity_key_line), + 'published': (None, _parse_dir_key_published_line), + 'expires': (None, _parse_dir_key_expires_line), + 'signing_key': (None, _parse_signing_key_line), + 'crosscert': (None, _parse_dir_key_crosscert_line), + 'certification': (None, _parse_dir_key_certification_line), + } + + PARSER_FOR_LINE = { + 'dir-key-certificate-version': _parse_dir_key_certificate_version_line, + 'dir-address': _parse_dir_address_line, + 'fingerprint': _parse_fingerprint_line, + 'dir-key-published': _parse_dir_key_published_line, + 'dir-key-expires': _parse_dir_key_expires_line, + 'dir-identity-key': _parse_identity_key_line, + 'dir-signing-key': _parse_signing_key_line, + 'dir-key-crosscert': _parse_dir_key_crosscert_line, + 'dir-key-certification': _parse_dir_key_certification_line, + } + + def __init__(self, raw_content, validate = False): + super(KeyCertificate, self).__init__(raw_content, lazy_load = not validate) + entries = _get_descriptor_components(raw_content, validate) + + if validate: + if 'dir-key-certificate-version' != list(entries.keys())[0]: + raise ValueError("Key certificates must start with a 'dir-key-certificate-version' line:\n%s" % (raw_content)) + elif 'dir-key-certification' != list(entries.keys())[-1]: + raise ValueError("Key certificates must end with a 'dir-key-certification' line:\n%s" % (raw_content)) + + # check that we have mandatory fields and that our known fields only + # appear once + + for keyword, is_mandatory in KEY_CERTIFICATE_PARAMS: + if is_mandatory and keyword not in entries: + raise ValueError("Key certificates must have a '%s' line:\n%s" % (keyword, raw_content)) + + entry_count = len(entries.get(keyword, [])) + if entry_count > 1: + raise ValueError("Key certificates can only have a single '%s' line, got %i:\n%s" % (keyword, entry_count, raw_content)) + + self._parse(entries, validate) + else: + self._entries = entries + + def _compare(self, other, method): + if not isinstance(other, KeyCertificate): + return False + + return method(str(self).strip(), str(other).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + +class DocumentSignature(object): + """ + Directory signature of a v3 network status document. + + :var str method: algorithm used to make the signature + :var str identity: fingerprint of the authority that made the signature + :var str key_digest: digest of the signing key + :var str signature: document signature + :param bool validate: checks validity if **True** + + :raises: **ValueError** if a validity check fails + """ + + def __init__(self, method, identity, key_digest, signature, validate = False): + # Checking that these attributes are valid. Technically the key + # digest isn't a fingerprint, but it has the same characteristics. + + if validate: + if not stem.util.tor_tools.is_valid_fingerprint(identity): + raise ValueError('Malformed fingerprint (%s) in the document signature' % identity) + + if not stem.util.tor_tools.is_valid_fingerprint(key_digest): + raise ValueError('Malformed key digest (%s) in the document signature' % key_digest) + + self.method = method + self.identity = identity + self.key_digest = key_digest + self.signature = signature + + def _compare(self, other, method): + if not isinstance(other, DocumentSignature): + return False + + for attr in ('method', 'identity', 'key_digest', 'signature'): + if getattr(self, attr) != getattr(other, attr): + return method(getattr(self, attr), getattr(other, attr)) + + return method(True, True) # we're equal + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + +class BridgeNetworkStatusDocument(NetworkStatusDocument): + """ + Network status document containing bridges. This is only available through + the metrics site. + + :var tuple routers: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2` + contained in the document + :var datetime published: time when the document was published + """ + + def __init__(self, raw_content, validate = False): + super(BridgeNetworkStatusDocument, self).__init__(raw_content) + + self.published = None + + document_file = io.BytesIO(raw_content) + published_line = stem.util.str_tools._to_unicode(document_file.readline()) + + if published_line.startswith('published '): + published_line = published_line.split(' ', 1)[1].strip() + + try: + self.published = stem.util.str_tools._parse_timestamp(published_line) + except ValueError: + if validate: + raise ValueError("Bridge network status document's 'published' time wasn't parsable: %s" % published_line) + elif validate: + raise ValueError("Bridge network status documents must start with a 'published' line:\n%s" % stem.util.str_tools._to_unicode(raw_content)) + + router_iter = stem.descriptor.router_status_entry._parse_file( + document_file, + validate, + entry_class = RouterStatusEntryV2, + extra_args = (self,), + ) + + self.routers = dict((desc.fingerprint, desc) for desc in router_iter) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/reader.py b/Shared/lib/python3.4/site-packages/stem/descriptor/reader.py new file mode 100644 index 0000000..a96406d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/reader.py @@ -0,0 +1,574 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Utilities for reading descriptors from local directories and archives. This is +mostly done through the :class:`~stem.descriptor.reader.DescriptorReader` +class, which is an iterator for the descriptor data in a series of +destinations. For example... + +:: + + my_descriptors = [ + '/tmp/server-descriptors-2012-03.tar.bz2', + '/tmp/archived_descriptors/', + ] + + # prints the contents of all the descriptor files + with DescriptorReader(my_descriptors) as reader: + for descriptor in reader: + print descriptor + +This ignores files that cannot be processed due to read errors or unparsable +content. To be notified of skipped files you can register a listener with +:func:`~stem.descriptor.reader.DescriptorReader.register_skip_listener`. + +The :class:`~stem.descriptor.reader.DescriptorReader` keeps track of the last +modified timestamps for descriptor files that it has read so it can skip +unchanged files if run again. This listing of processed files can also be +persisted and applied to other +:class:`~stem.descriptor.reader.DescriptorReader` instances. For example, the +following prints descriptors as they're changed over the course of a minute, +and picks up where it left off if run again... + +:: + + reader = DescriptorReader(['/tmp/descriptor_data']) + + try: + processed_files = load_processed_files('/tmp/used_descriptors') + reader.set_processed_files(processed_files) + except: pass # could not load, maybe this is the first run + + start_time = time.time() + + while (time.time() - start_time) < 60: + # prints any descriptors that have changed since last checked + with reader: + for descriptor in reader: + print descriptor + + time.sleep(1) + + save_processed_files('/tmp/used_descriptors', reader.get_processed_files()) + +**Module Overview:** + +:: + + load_processed_files - Loads a listing of processed files + save_processed_files - Saves a listing of processed files + + DescriptorReader - Iterator for descriptor data on the local file system + |- get_processed_files - provides the listing of files that we've processed + |- set_processed_files - sets our tracking of the files we have processed + |- register_read_listener - adds a listener for when files are read + |- register_skip_listener - adds a listener that's notified of skipped files + |- start - begins reading descriptor data + |- stop - stops reading descriptor data + |- __enter__ / __exit__ - manages the descriptor reader thread in the context + +- __iter__ - iterates over descriptor data in unread files + + FileSkipped - Base exception for a file that was skipped + |- AlreadyRead - We've already read a file with this last modified timestamp + |- ParsingFailure - Contents can't be parsed as descriptor data + |- UnrecognizedType - File extension indicates non-descriptor data + +- ReadFailed - Wraps an error that was raised while reading the file + +- FileMissing - File does not exist +""" + +import mimetypes +import os +import tarfile +import threading + +try: + import queue +except ImportError: + import Queue as queue + +import stem.descriptor +import stem.prereq +import stem.util.system + +from stem import str_type + +# flag to indicate when the reader thread is out of descriptor files to read +FINISHED = 'DONE' + + +class FileSkipped(Exception): + "Base error when we can't provide descriptor data from a file." + + +class AlreadyRead(FileSkipped): + """ + Already read a file with this 'last modified' timestamp or later. + + :param int last_modified: unix timestamp for when the file was last modified + :param int last_modified_when_read: unix timestamp for the modification time + when we last read this file + """ + + def __init__(self, last_modified, last_modified_when_read): + super(AlreadyRead, self).__init__('File has already been read since it was last modified. modification time: %s, last read: %s' % (last_modified, last_modified_when_read)) + self.last_modified = last_modified + self.last_modified_when_read = last_modified_when_read + + +class ParsingFailure(FileSkipped): + """ + File contents could not be parsed as descriptor data. + + :param ValueError exception: issue that arose when parsing + """ + + def __init__(self, parsing_exception): + super(ParsingFailure, self).__init__(parsing_exception) + self.exception = parsing_exception + + +class UnrecognizedType(FileSkipped): + """ + File doesn't contain descriptor data. This could either be due to its file + type or because it doesn't conform to a recognizable descriptor type. + + :param tuple mime_type: the (type, encoding) tuple provided by mimetypes.guess_type() + """ + + def __init__(self, mime_type): + super(UnrecognizedType, self).__init__('Unrecognized mime type: %s (%s)' % mime_type) + self.mime_type = mime_type + + +class ReadFailed(FileSkipped): + """ + An IOError occurred while trying to read the file. + + :param IOError exception: issue that arose when reading the file, **None** if + this arose due to the file not being present + """ + + def __init__(self, read_exception): + super(ReadFailed, self).__init__(read_exception) + self.exception = read_exception + + +class FileMissing(ReadFailed): + 'File does not exist.' + + def __init__(self): + super(FileMissing, self).__init__('File does not exist') + + +def load_processed_files(path): + """ + Loads a dictionary of 'path => last modified timestamp' mappings, as + persisted by :func:`~stem.descriptor.reader.save_processed_files`, from a + file. + + :param str path: location to load the processed files dictionary from + + :returns: **dict** of 'path (**str**) => last modified unix timestamp + (**int**)' mappings + + :raises: + * **IOError** if unable to read the file + * **TypeError** if unable to parse the file's contents + """ + + processed_files = {} + + with open(path) as input_file: + for line in input_file.readlines(): + line = line.strip() + + if not line: + continue # skip blank lines + + if ' ' not in line: + raise TypeError('Malformed line: %s' % line) + + path, timestamp = line.rsplit(' ', 1) + + if not os.path.isabs(path): + raise TypeError("'%s' is not an absolute path" % path) + elif not timestamp.isdigit(): + raise TypeError("'%s' is not an integer timestamp" % timestamp) + + processed_files[path] = int(timestamp) + + return processed_files + + +def save_processed_files(path, processed_files): + """ + Persists a dictionary of 'path => last modified timestamp' mappings (as + provided by the DescriptorReader's + :func:`~stem.descriptor.reader.DescriptorReader.get_processed_files` method) + so that they can be loaded later and applied to another + :class:`~stem.descriptor.reader.DescriptorReader`. + + :param str path: location to save the processed files dictionary to + :param dict processed_files: 'path => last modified' mappings + + :raises: + * **IOError** if unable to write to the file + * **TypeError** if processed_files is of the wrong type + """ + + # makes the parent directory if it doesn't already exist + try: + path_dir = os.path.dirname(path) + + if not os.path.exists(path_dir): + os.makedirs(path_dir) + except OSError as exc: + raise IOError(exc) + + with open(path, 'w') as output_file: + for path, timestamp in list(processed_files.items()): + if not os.path.isabs(path): + raise TypeError('Only absolute paths are acceptable: %s' % path) + + output_file.write('%s %i\n' % (path, timestamp)) + + +class DescriptorReader(object): + """ + Iterator for the descriptor data on the local file system. This can process + text files, tarball archives (gzip or bzip2), or recurse directories. + + By default this limits the number of descriptors that we'll read ahead before + waiting for our caller to fetch some of them. This is included to avoid + unbounded memory usage. + + Our persistence_path argument is a convenient method to persist the listing + of files we have processed between runs, however it doesn't allow for error + handling. If you want that then use the + :func:`~stem.descriptor.reader.load_processed_files` and + :func:`~stem.descriptor.reader.save_processed_files` functions instead. + + :param str,list target: path or list of paths for files or directories to be read from + :param bool validate: checks the validity of the descriptor's content if + **True**, skips these checks otherwise + :param bool follow_links: determines if we'll follow symlinks when traversing + directories (requires python 2.6) + :param int buffer_size: descriptors we'll buffer before waiting for some to + be read, this is unbounded if zero + :param str persistence_path: if set we will load and save processed file + listings from this path, errors are ignored + :param stem.descriptor.__init__.DocumentHandler document_handler: method in + which to parse :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` + :param dict kwargs: additional arguments for the descriptor constructor + """ + + def __init__(self, target, validate = False, follow_links = False, buffer_size = 100, persistence_path = None, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs): + if isinstance(target, (bytes, str_type)): + self._targets = [target] + else: + self._targets = target + + # expand any relative paths we got + + self._targets = list(map(os.path.abspath, self._targets)) + + self._validate = validate + self._follow_links = follow_links + self._persistence_path = persistence_path + self._document_handler = document_handler + self._kwargs = kwargs + self._read_listeners = [] + self._skip_listeners = [] + self._processed_files = {} + + self._reader_thread = None + self._reader_thread_lock = threading.RLock() + + self._iter_lock = threading.RLock() + self._iter_notice = threading.Event() + + self._is_stopped = threading.Event() + self._is_stopped.set() + + # Descriptors that we have read but not yet provided to the caller. A + # FINISHED entry is used by the reading thread to indicate the end. + + self._unreturned_descriptors = queue.Queue(buffer_size) + + if self._persistence_path: + try: + processed_files = load_processed_files(self._persistence_path) + self.set_processed_files(processed_files) + except: + pass + + def get_processed_files(self): + """ + For each file that we have read descriptor data from this provides a + mapping of the form... + + :: + + absolute path (str) => last modified unix timestamp (int) + + This includes entries set through the + :func:`~stem.descriptor.reader.DescriptorReader.set_processed_files` + method. Each run resets this to only the files that were present during + that run. + + :returns: **dict** with the absolute paths and unix timestamp for the last + modified times of the files we have processed + """ + + # make sure that we only provide back absolute paths + return dict((os.path.abspath(k), v) for (k, v) in list(self._processed_files.items())) + + def set_processed_files(self, processed_files): + """ + Sets the listing of the files we have processed. Most often this is used + with a newly created :class:`~stem.descriptor.reader.DescriptorReader` to + pre-populate the listing of descriptor files that we have seen. + + :param dict processed_files: mapping of absolute paths (**str**) to unix + timestamps for the last modified time (**int**) + """ + + self._processed_files = dict(processed_files) + + def register_read_listener(self, listener): + """ + Registers a listener for when files are read. This is executed prior to + processing files. Listeners are expected to be of the form... + + :: + + my_listener(path) + + :param functor listener: functor to be notified when files are read + """ + + self._read_listeners.append(listener) + + def register_skip_listener(self, listener): + """ + Registers a listener for files that are skipped. This listener is expected + to be a functor of the form... + + :: + + my_listener(path, exception) + + :param functor listener: functor to be notified of files that are skipped + to read errors or because they couldn't be parsed as valid descriptor data + """ + + self._skip_listeners.append(listener) + + def get_buffered_descriptor_count(self): + """ + Provides the number of descriptors that are waiting to be iterated over. + This is limited to the buffer_size that we were constructed with. + + :returns: **int** for the estimated number of currently enqueued + descriptors, this is not entirely reliable + """ + + return self._unreturned_descriptors.qsize() + + def start(self): + """ + Starts reading our descriptor files. + + :raises: **ValueError** if we're already reading the descriptor files + """ + + with self._reader_thread_lock: + if self._reader_thread: + raise ValueError('Already running, you need to call stop() first') + else: + self._is_stopped.clear() + self._reader_thread = threading.Thread(target = self._read_descriptor_files, name='Descriptor Reader') + self._reader_thread.setDaemon(True) + self._reader_thread.start() + + def stop(self): + """ + Stops further reading of descriptor files. + """ + + with self._reader_thread_lock: + self._is_stopped.set() + self._iter_notice.set() + + # clears our queue to unblock enqueue calls + + try: + while True: + self._unreturned_descriptors.get_nowait() + except queue.Empty: + pass + + self._reader_thread.join() + self._reader_thread = None + + if self._persistence_path: + try: + processed_files = self.get_processed_files() + save_processed_files(self._persistence_path, processed_files) + except: + pass + + def _read_descriptor_files(self): + new_processed_files = {} + remaining_files = list(self._targets) + + while remaining_files and not self._is_stopped.is_set(): + target = remaining_files.pop(0) + + if not os.path.exists(target): + self._notify_skip_listeners(target, FileMissing()) + continue + + if os.path.isdir(target): + walker = os.walk(target, followlinks = self._follow_links) + self._handle_walker(walker, new_processed_files) + else: + self._handle_file(target, new_processed_files) + + self._processed_files = new_processed_files + + if not self._is_stopped.is_set(): + self._unreturned_descriptors.put(FINISHED) + + self._iter_notice.set() + + def __iter__(self): + with self._iter_lock: + while not self._is_stopped.is_set(): + try: + descriptor = self._unreturned_descriptors.get_nowait() + + if descriptor == FINISHED: + break + else: + yield descriptor + except queue.Empty: + self._iter_notice.wait() + self._iter_notice.clear() + + def _handle_walker(self, walker, new_processed_files): + for root, _, files in walker: + for filename in files: + self._handle_file(os.path.join(root, filename), new_processed_files) + + # this can take a while if, say, we're including the root directory + if self._is_stopped.is_set(): + return + + def _handle_file(self, target, new_processed_files): + # This is a file. Register its last modified timestamp and check if + # it's a file that we should skip. + + try: + last_modified = int(os.stat(target).st_mtime) + last_used = self._processed_files.get(target) + new_processed_files[target] = last_modified + except OSError as exc: + self._notify_skip_listeners(target, ReadFailed(exc)) + return + + if last_used and last_used >= last_modified: + self._notify_skip_listeners(target, AlreadyRead(last_modified, last_used)) + return + + # Block devices and such are never descriptors, and can cause us to block + # for quite a while so skipping anything that isn't a regular file. + + if not os.path.isfile(target): + return + + # The mimetypes module only checks the file extension. To actually + # check the content (like the 'file' command) we'd need something like + # pymagic (https://github.com/cloudburst/pymagic). + + target_type = mimetypes.guess_type(target) + + if target_type[0] in (None, 'text/plain'): + # either '.txt' or an unknown type + self._handle_descriptor_file(target, target_type) + elif stem.util.system.is_tarfile(target): + # handles gzip, bz2, and decompressed tarballs among others + self._handle_archive(target) + else: + self._notify_skip_listeners(target, UnrecognizedType(target_type)) + + def _handle_descriptor_file(self, target, mime_type): + try: + self._notify_read_listeners(target) + + with open(target, 'rb') as target_file: + for desc in stem.descriptor.parse_file(target_file, validate = self._validate, document_handler = self._document_handler, **self._kwargs): + if self._is_stopped.is_set(): + return + + self._unreturned_descriptors.put(desc) + self._iter_notice.set() + except TypeError as exc: + self._notify_skip_listeners(target, UnrecognizedType(mime_type)) + except ValueError as exc: + self._notify_skip_listeners(target, ParsingFailure(exc)) + except IOError as exc: + self._notify_skip_listeners(target, ReadFailed(exc)) + + def _handle_archive(self, target): + # TODO: When dropping python 2.6 support go back to using 'with' for + # tarfiles... + # + # http://bugs.python.org/issue7232 + + tar_file = None + + try: + self._notify_read_listeners(target) + tar_file = tarfile.open(target) + + for tar_entry in tar_file: + if tar_entry.isfile(): + entry = tar_file.extractfile(tar_entry) + + try: + for desc in stem.descriptor.parse_file(entry, validate = self._validate, document_handler = self._document_handler, **self._kwargs): + if self._is_stopped.is_set(): + return + + desc._set_path(os.path.abspath(target)) + desc._set_archive_path(tar_entry.name) + self._unreturned_descriptors.put(desc) + self._iter_notice.set() + except TypeError as exc: + self._notify_skip_listeners(target, ParsingFailure(exc)) + except ValueError as exc: + self._notify_skip_listeners(target, ParsingFailure(exc)) + finally: + entry.close() + except IOError as exc: + self._notify_skip_listeners(target, ReadFailed(exc)) + finally: + if tar_file: + tar_file.close() + + def _notify_read_listeners(self, path): + for listener in self._read_listeners: + listener(path) + + def _notify_skip_listeners(self, path, exception): + for listener in self._skip_listeners: + listener(path, exception) + + def __enter__(self): + self.start() + return self + + def __exit__(self, exit_type, value, traceback): + self.stop() diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/remote.py b/Shared/lib/python3.4/site-packages/stem/descriptor/remote.py new file mode 100644 index 0000000..4d3423d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/remote.py @@ -0,0 +1,777 @@ +# Copyright 2013-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Module for remotely retrieving descriptors from directory authorities and +mirrors. This is most easily done through the +:class:`~stem.descriptor.remote.DescriptorDownloader` class, which issues +:class:`~stem.descriptor.remote.Query` instances to get you the descriptor +content. For example... + +:: + + from stem.descriptor.remote import DescriptorDownloader + + downloader = DescriptorDownloader( + use_mirrors = True, + timeout = 10, + ) + + query = downloader.get_server_descriptors() + + print 'Exit Relays:' + + try: + for desc in query.run(): + if desc.exit_policy.is_exiting_allowed(): + print ' %s (%s)' % (desc.nickname, desc.fingerprint) + + print + print 'Query took %0.2f seconds' % query.runtime + except Exception as exc: + print 'Unable to retrieve the server descriptors: %s' % exc + +If you don't care about errors then you can also simply iterate over the query +itself... + +:: + + for desc in downloader.get_server_descriptors(): + if desc.exit_policy.is_exiting_allowed(): + print ' %s (%s)' % (desc.nickname, desc.fingerprint) + +:: + + get_authorities - Provides tor directory information. + + DirectoryAuthority - Information about a tor directory authority. + + Query - Asynchronous request to download tor descriptors + |- start - issues the query if it isn't already running + +- run - blocks until the request is finished and provides the results + + DescriptorDownloader - Configurable class for issuing queries + |- use_directory_mirrors - use directory mirrors to download future descriptors + |- get_server_descriptors - provides present server descriptors + |- get_extrainfo_descriptors - provides present extrainfo descriptors + |- get_microdescriptors - provides present microdescriptors + |- get_consensus - provides the present consensus or router status entries + |- get_key_certificates - provides present authority key certificates + +- query - request an arbitrary descriptor resource + +.. versionadded:: 1.1.0 + +.. data:: MAX_FINGERPRINTS + + Maximum number of descriptors that can requested at a time by their + fingerprints. + +.. data:: MAX_MICRODESCRIPTOR_HASHES + + Maximum number of microdescriptors that can requested at a time by their + hashes. +""" + +import io +import random +import sys +import threading +import time +import zlib + +try: + import urllib.request as urllib +except ImportError: + import urllib2 as urllib + +import stem.descriptor + +from stem import Flag +from stem.util import log + +# Tor has a limited number of descriptors we can fetch explicitly by their +# fingerprint or hashes due to a limit on the url length by squid proxies. + +MAX_FINGERPRINTS = 96 +MAX_MICRODESCRIPTOR_HASHES = 92 + +# We commonly only want authorities that vote in the consensus, and hence have +# a v3ident. + +HAS_V3IDENT = lambda auth: auth.v3ident is not None + + +def _guess_descriptor_type(resource): + # Attempts to determine the descriptor type based on the resource url. This + # raises a ValueError if the resource isn't recognized. + + if resource.startswith('/tor/server/'): + return 'server-descriptor 1.0' + elif resource.startswith('/tor/extra/'): + return 'extra-info 1.0' + elif resource.startswith('/tor/micro/'): + return 'microdescriptor 1.0' + elif resource.startswith('/tor/status-vote/'): + return 'network-status-consensus-3 1.0' + elif resource.startswith('/tor/keys/'): + return 'dir-key-certificate-3 1.0' + else: + raise ValueError("Unable to determine the descriptor type for '%s'" % resource) + + +class Query(object): + """ + Asynchronous request for descriptor content from a directory authority or + mirror. These can either be made through the + :class:`~stem.descriptor.remote.DescriptorDownloader` or directly for more + advanced usage. + + To block on the response and get results either call + :func:`~stem.descriptor.remote.Query.run` or iterate over the Query. The + :func:`~stem.descriptor.remote.Query.run` method pass along any errors that + arise... + + :: + + from stem.descriptor.remote import Query + + query = Query( + '/tor/server/all.z', + block = True, + timeout = 30, + ) + + print 'Current relays:' + + if not query.error: + for desc in query: + print desc.fingerprint + else: + print 'Unable to retrieve the server descriptors: %s' % query.error + + ... while iterating fails silently... + + :: + + print 'Current relays:' + + for desc in Query('/tor/server/all.z', 'server-descriptor 1.0'): + print desc.fingerprint + + In either case exceptions are available via our 'error' attribute. + + Tor provides quite a few different descriptor resources via its directory + protocol (see section 4.2 and later of the `dir-spec + `_). + Commonly useful ones include... + + ===================================== =========== + Resource Description + ===================================== =========== + /tor/server/all.z all present server descriptors + /tor/server/fp/++.z server descriptors with the given fingerprints + /tor/extra/all.z all present extrainfo descriptors + /tor/extra/fp/++.z extrainfo descriptors with the given fingerprints + /tor/micro/d/-.z microdescriptors with the given hashes + /tor/status-vote/current/consensus.z present consensus + /tor/keys/all.z key certificates for the authorities + /tor/keys/fp/+.z key certificates for specific authorities + ===================================== =========== + + The '.z' suffix can be excluded to get a plaintext rather than compressed + response. Compression is handled transparently, so this shouldn't matter to + the caller. + + :var str resource: resource being fetched, such as '/tor/server/all.z' + :var str descriptor_type: type of descriptors being fetched (for options see + :func:`~stem.descriptor.__init__.parse_file`), this is guessed from the + resource if **None** + + :var list endpoints: (address, dirport) tuples of the authority or mirror + we're querying, this uses authorities if undefined + :var int retries: number of times to attempt the request if downloading it + fails + :var bool fall_back_to_authority: when retrying request issues the last + request to a directory authority if **True** + + :var str content: downloaded descriptor content + :var Exception error: exception if a problem occured + :var bool is_done: flag that indicates if our request has finished + :var str download_url: last url used to download the descriptor, this is + unset until we've actually made a download attempt + + :var float start_time: unix timestamp when we first started running + :var float timeout: duration before we'll time out our request + :var float runtime: time our query took, this is **None** if it's not yet + finished + + :var bool validate: checks the validity of the descriptor's content if + **True**, skips these checks otherwise + :var stem.descriptor.__init__.DocumentHandler document_handler: method in + which to parse a :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` + :var dict kwargs: additional arguments for the descriptor constructor + + :param bool start: start making the request when constructed (default is **True**) + :param bool block: only return after the request has been completed, this is + the same as running **query.run(True)** (default is **False**) + """ + + def __init__(self, resource, descriptor_type = None, endpoints = None, retries = 2, fall_back_to_authority = False, timeout = None, start = True, block = False, validate = False, document_handler = stem.descriptor.DocumentHandler.ENTRIES, **kwargs): + if not resource.startswith('/'): + raise ValueError("Resources should start with a '/': %s" % resource) + + self.resource = resource + + if descriptor_type: + self.descriptor_type = descriptor_type + else: + self.descriptor_type = _guess_descriptor_type(resource) + + self.endpoints = endpoints if endpoints else [] + self.retries = retries + self.fall_back_to_authority = fall_back_to_authority + + self.content = None + self.error = None + self.is_done = False + self.download_url = None + + self.start_time = None + self.timeout = timeout + self.runtime = None + + self.validate = validate + self.document_handler = document_handler + self.kwargs = kwargs + + self._downloader_thread = None + self._downloader_thread_lock = threading.RLock() + + if start: + self.start() + + if block: + self.run(True) + + def start(self): + """ + Starts downloading the scriptors if we haven't started already. + """ + + with self._downloader_thread_lock: + if self._downloader_thread is None: + self._downloader_thread = threading.Thread( + name = 'Descriptor Query', + target = self._download_descriptors, + args = (self.retries,) + ) + + self._downloader_thread.setDaemon(True) + self._downloader_thread.start() + + def run(self, suppress = False): + """ + Blocks until our request is complete then provides the descriptors. If we + haven't yet started our request then this does so. + + :param bool suppress: avoids raising exceptions if **True** + + :returns: list for the requested :class:`~stem.descriptor.__init__.Descriptor` instances + + :raises: + Using the iterator can fail with the following if **suppress** is + **False**... + + * **ValueError** if the descriptor contents is malformed + * **socket.timeout** if our request timed out + * **urllib2.URLError** for most request failures + + Note that the urllib2 module may fail with other exception types, in + which case we'll pass it along. + """ + + return list(self._run(suppress)) + + def _run(self, suppress): + with self._downloader_thread_lock: + self.start() + self._downloader_thread.join() + + if self.error: + if suppress: + return + + raise self.error + else: + if self.content is None: + if suppress: + return + + raise ValueError('BUG: _download_descriptors() finished without either results or an error') + + try: + results = stem.descriptor.parse_file( + io.BytesIO(self.content), + self.descriptor_type, + validate = self.validate, + document_handler = self.document_handler, + **self.kwargs + ) + + for desc in results: + yield desc + except ValueError as exc: + self.error = exc # encountered a parsing error + + if suppress: + return + + raise self.error + + def __iter__(self): + for desc in self._run(True): + yield desc + + def _pick_url(self, use_authority = False): + """ + Provides a url that can be queried. If we have multiple endpoints then one + will be picked randomly. + + :param bool use_authority: ignores our endpoints and uses a directory + authority instead + + :returns: **str** for the url being queried by this request + """ + + if use_authority or not self.endpoints: + authority = random.choice(filter(HAS_V3IDENT, get_authorities().values())) + address, dirport = authority.address, authority.dir_port + else: + address, dirport = random.choice(self.endpoints) + + return 'http://%s:%i/%s' % (address, dirport, self.resource.lstrip('/')) + + def _download_descriptors(self, retries): + try: + use_authority = retries == 0 and self.fall_back_to_authority + self.download_url = self._pick_url(use_authority) + + self.start_time = time.time() + response = urllib.urlopen(self.download_url, timeout = self.timeout).read() + + if self.download_url.endswith('.z'): + response = zlib.decompress(response) + + self.content = response.strip() + + self.runtime = time.time() - self.start_time + log.trace("Descriptors retrieved from '%s' in %0.2fs" % (self.download_url, self.runtime)) + except: + exc = sys.exc_info()[1] + + if retries > 0: + log.debug("Unable to download descriptors from '%s' (%i retries remaining): %s" % (self.download_url, retries, exc)) + return self._download_descriptors(retries - 1) + else: + log.debug("Unable to download descriptors from '%s': %s" % (self.download_url, exc)) + self.error = exc + finally: + self.is_done = True + + +class DescriptorDownloader(object): + """ + Configurable class that issues :class:`~stem.descriptor.remote.Query` + instances on your behalf. + + :param bool use_mirrors: downloads the present consensus and uses the directory + mirrors to fetch future requests, this fails silently if the consensus + cannot be downloaded + :param default_args: default arguments for the + :class:`~stem.descriptor.remote.Query` constructor + """ + + def __init__(self, use_mirrors = False, **default_args): + self._default_args = default_args + + authorities = filter(HAS_V3IDENT, get_authorities().values()) + self._endpoints = [(auth.address, auth.dir_port) for auth in authorities] + + if use_mirrors: + try: + start_time = time.time() + self.use_directory_mirrors() + log.debug('Retrieved directory mirrors (took %0.2fs)' % (time.time() - start_time)) + except Exception as exc: + log.debug('Unable to retrieve directory mirrors: %s' % exc) + + def use_directory_mirrors(self): + """ + Downloads the present consensus and configures ourselves to use directory + mirrors, in addition to authorities. + + :returns: :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3` + from which we got the directory mirrors + + :raises: **Exception** if unable to determine the directory mirrors + """ + + authorities = filter(HAS_V3IDENT, get_authorities().values()) + new_endpoints = set([(auth.address, auth.dir_port) for auth in authorities]) + + consensus = list(self.get_consensus(document_handler = stem.descriptor.DocumentHandler.DOCUMENT).run())[0] + + for desc in consensus.routers.values(): + if Flag.V2DIR in desc.flags: + new_endpoints.add((desc.address, desc.dir_port)) + + # we need our endpoints to be a list rather than set for random.choice() + + self._endpoints = list(new_endpoints) + + return consensus + + def get_server_descriptors(self, fingerprints = None, **query_args): + """ + Provides the server descriptors with the given fingerprints. If no + fingerprints are provided then this returns all descriptors in the present + consensus. + + :param str,list fingerprints: fingerprint or list of fingerprints to be + retrieved, gets all descriptors if **None** + :param query_args: additional arguments for the + :class:`~stem.descriptor.remote.Query` constructor + + :returns: :class:`~stem.descriptor.remote.Query` for the server descriptors + + :raises: **ValueError** if we request more than 96 descriptors by their + fingerprints (this is due to a limit on the url length by squid proxies). + """ + + resource = '/tor/server/all.z' + + if isinstance(fingerprints, str): + fingerprints = [fingerprints] + + if fingerprints: + if len(fingerprints) > MAX_FINGERPRINTS: + raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS) + + resource = '/tor/server/fp/%s.z' % '+'.join(fingerprints) + + return self.query(resource, **query_args) + + def get_extrainfo_descriptors(self, fingerprints = None, **query_args): + """ + Provides the extrainfo descriptors with the given fingerprints. If no + fingerprints are provided then this returns all descriptors in the present + consensus. + + :param str,list fingerprints: fingerprint or list of fingerprints to be + retrieved, gets all descriptors if **None** + :param query_args: additional arguments for the + :class:`~stem.descriptor.remote.Query` constructor + + :returns: :class:`~stem.descriptor.remote.Query` for the extrainfo descriptors + + :raises: **ValueError** if we request more than 96 descriptors by their + fingerprints (this is due to a limit on the url length by squid proxies). + """ + + resource = '/tor/extra/all.z' + + if isinstance(fingerprints, str): + fingerprints = [fingerprints] + + if fingerprints: + if len(fingerprints) > MAX_FINGERPRINTS: + raise ValueError('Unable to request more than %i descriptors at a time by their fingerprints' % MAX_FINGERPRINTS) + + resource = '/tor/extra/fp/%s.z' % '+'.join(fingerprints) + + return self.query(resource, **query_args) + + def get_microdescriptors(self, hashes, **query_args): + """ + Provides the microdescriptors with the given hashes. To get these see the + 'microdescriptor_hashes' attribute of + :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3`. Note + that these are only provided via a microdescriptor consensus (such as + 'cached-microdesc-consensus' in your data directory). + + :param str,list hashes: microdescriptor hash or list of hashes to be + retrieved + :param query_args: additional arguments for the + :class:`~stem.descriptor.remote.Query` constructor + + :returns: :class:`~stem.descriptor.remote.Query` for the microdescriptors + + :raises: **ValueError** if we request more than 92 microdescriptors by their + hashes (this is due to a limit on the url length by squid proxies). + """ + + if isinstance(hashes, str): + hashes = [hashes] + + if len(hashes) > MAX_MICRODESCRIPTOR_HASHES: + raise ValueError('Unable to request more than %i microdescriptors at a time by their hashes' % MAX_MICRODESCRIPTOR_HASHES) + + return self.query('/tor/micro/d/%s.z' % '-'.join(hashes), **query_args) + + def get_consensus(self, authority_v3ident = None, **query_args): + """ + Provides the present router status entries. + + :param str authority_v3ident: fingerprint of the authority key for which + to get the consensus, see `'v3ident' in tor's config.c + `_ + for the values. + :param query_args: additional arguments for the + :class:`~stem.descriptor.remote.Query` constructor + + :returns: :class:`~stem.descriptor.remote.Query` for the router status + entries + """ + + resource = '/tor/status-vote/current/consensus' + + if authority_v3ident: + resource += '/%s' % authority_v3ident + + return self.query(resource + '.z', **query_args) + + def get_vote(self, authority, **query_args): + """ + Provides the present vote for a given directory authority. + + :param stem.descriptor.remote.DirectoryAuthority authority: authority for which to retrieve a vote for + :param query_args: additional arguments for the + :class:`~stem.descriptor.remote.Query` constructor + + :returns: :class:`~stem.descriptor.remote.Query` for the router status + entries + """ + + resource = '/tor/status-vote/current/authority' + + if 'endpoint' not in query_args: + query_args['endpoints'] = [(authority.address, authority.dir_port)] + + return self.query(resource + '.z', **query_args) + + def get_key_certificates(self, authority_v3idents = None, **query_args): + """ + Provides the key certificates for authorities with the given fingerprints. + If no fingerprints are provided then this returns all present key + certificates. + + :param str authority_v3idents: fingerprint or list of fingerprints of the + authority keys, see `'v3ident' in tor's config.c + `_ + for the values. + :param query_args: additional arguments for the + :class:`~stem.descriptor.remote.Query` constructor + + :returns: :class:`~stem.descriptor.remote.Query` for the key certificates + + :raises: **ValueError** if we request more than 96 key certificates by + their identity fingerprints (this is due to a limit on the url length by + squid proxies). + """ + + resource = '/tor/keys/all.z' + + if isinstance(authority_v3idents, str): + authority_v3idents = [authority_v3idents] + + if authority_v3idents: + if len(authority_v3idents) > MAX_FINGERPRINTS: + raise ValueError('Unable to request more than %i key certificates at a time by their identity fingerprints' % MAX_FINGERPRINTS) + + resource = '/tor/keys/fp/%s.z' % '+'.join(authority_v3idents) + + return self.query(resource, **query_args) + + def query(self, resource, **query_args): + """ + Issues a request for the given resource. + + :param str resource: resource being fetched, such as '/tor/server/all.z' + :param query_args: additional arguments for the + :class:`~stem.descriptor.remote.Query` constructor + + :returns: :class:`~stem.descriptor.remote.Query` for the descriptors + + :raises: **ValueError** if resource is clearly invalid or the descriptor + type can't be determined when 'descriptor_type' is **None** + """ + + args = dict(self._default_args) + args.update(query_args) + + if 'endpoints' not in args: + args['endpoints'] = self._endpoints + + if 'fall_back_to_authority' not in args: + args['fall_back_to_authority'] = True + + return Query( + resource, + **args + ) + + +class DirectoryAuthority(object): + """ + Tor directory authority, a special type of relay `hardcoded into tor + `_ + that enumerates the other relays within the network. + + At a very high level tor works as follows... + + 1. A volunteer starts up a new tor relay, during which it sends a `server + descriptor `_ to each of the directory + authorities. + + 2. Each hour the directory authorities make a `vote `_ + that says who they think the active relays are in the network and some + attributes about them. + + 3. The directory authorities send each other their votes, and compile that + into the `consensus `_. This document is very similar + to the votes, the only difference being that the majority of the + authorities agree upon and sign this document. The idividual relay entries + in the vote or consensus is called `router status entries + `_. + + 4. Tor clients (people using the service) download the consensus from one of + the authorities or a mirror to determine the active relays within the + network. They in turn use this to construct their circuits and use the + network. + + .. versionchanged:: 1.3.0 + Added the is_bandwidth_authority attribute. + + :var str nickname: nickname of the authority + :var str address: IP address of the authority, currently they're all IPv4 but + this may not always be the case + :var int or_port: port on which the relay services relay traffic + :var int dir_port: port on which directory information is available + :var str fingerprint: relay fingerprint + :var str v3ident: identity key fingerprint used to sign votes and consensus + """ + + def __init__(self, nickname = None, address = None, or_port = None, dir_port = None, is_bandwidth_authority = False, fingerprint = None, v3ident = None): + self.nickname = nickname + self.address = address + self.or_port = or_port + self.dir_port = dir_port + self.is_bandwidth_authority = is_bandwidth_authority + self.fingerprint = fingerprint + self.v3ident = v3ident + + +DIRECTORY_AUTHORITIES = { + 'moria1': DirectoryAuthority( + nickname = 'moria1', + address = '128.31.0.39', + or_port = 9101, + dir_port = 9131, + is_bandwidth_authority = True, + fingerprint = '9695DFC35FFEB861329B9F1AB04C46397020CE31', + v3ident = 'D586D18309DED4CD6D57C18FDB97EFA96D330566', + ), + 'tor26': DirectoryAuthority( + nickname = 'tor26', + address = '86.59.21.38', + or_port = 443, + dir_port = 80, + is_bandwidth_authority = True, + fingerprint = '847B1F850344D7876491A54892F904934E4EB85D', + v3ident = '14C131DFC5C6F93646BE72FA1401C02A8DF2E8B4', + ), + 'dizum': DirectoryAuthority( + nickname = 'dizum', + address = '194.109.206.212', + or_port = 443, + dir_port = 80, + is_bandwidth_authority = False, + fingerprint = '7EA6EAD6FD83083C538F44038BBFA077587DD755', + v3ident = 'E8A9C45EDE6D711294FADF8E7951F4DE6CA56B58', + ), + 'Tonga': DirectoryAuthority( + nickname = 'Tonga', + address = '82.94.251.203', + or_port = 443, + dir_port = 80, + is_bandwidth_authority = False, + fingerprint = '4A0CCD2DDC7995083D73F5D667100C8A5831F16D', + v3ident = None, # does not vote in the consensus + ), + 'gabelmoo': DirectoryAuthority( + nickname = 'gabelmoo', + address = '131.188.40.189', + or_port = 443, + dir_port = 80, + is_bandwidth_authority = True, + fingerprint = 'F2044413DAC2E02E3D6BCF4735A19BCA1DE97281', + v3ident = 'ED03BB616EB2F60BEC80151114BB25CEF515B226', + ), + 'dannenberg': DirectoryAuthority( + nickname = 'dannenberg', + address = '193.23.244.244', + or_port = 443, + dir_port = 80, + is_bandwidth_authority = False, + fingerprint = '7BE683E65D48141321C5ED92F075C55364AC7123', + v3ident = '585769C78764D58426B8B52B6651A5A71137189A', + ), + 'urras': DirectoryAuthority( + nickname = 'urras', + address = '208.83.223.34', + or_port = 80, + dir_port = 443, + is_bandwidth_authority = False, + fingerprint = '0AD3FA884D18F89EEA2D89C019379E0E7FD94417', + v3ident = '80550987E1D626E3EBA5E5E75A458DE0626D088C', + ), + 'maatuska': DirectoryAuthority( + nickname = 'maatuska', + address = '171.25.193.9', + or_port = 80, + dir_port = 443, + is_bandwidth_authority = True, + fingerprint = 'BD6A829255CB08E66FBE7D3748363586E46B3810', + v3ident = '49015F787433103580E3B66A1707A00E60F2D15B', + ), + 'Faravahar': DirectoryAuthority( + nickname = 'Faravahar', + address = '154.35.175.225', + or_port = 443, + dir_port = 80, + is_bandwidth_authority = False, + fingerprint = 'CF6D0AAFB385BE71B8E111FC5CFF4B47923733BC', + v3ident = 'EFCBE720AB3A82B99F9E953CD5BF50F7EEFC7B97', + ), + 'longclaw': DirectoryAuthority( + nickname = 'longclaw', + address = '199.254.238.52', + or_port = 443, + dir_port = 80, + is_bandwidth_authority = True, + fingerprint = '74A910646BCEEFBCD2E874FC1DC997430F968145', + v3ident = '23D15D965BC35114467363C165C4F724B64B4F66', + ), +} + + +def get_authorities(): + """ + Provides the Tor directory authority information as of **Tor on 11/21/14**. + The directory information hardcoded into Tor and occasionally changes, so the + information this provides might not necessarily match your version of tor. + + :returns: dict of str nicknames to :class:`~stem.descriptor.remote.DirectoryAuthority` instances + """ + + return dict(DIRECTORY_AUTHORITIES) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/router_status_entry.py b/Shared/lib/python3.4/site-packages/stem/descriptor/router_status_entry.py new file mode 100644 index 0000000..561f855 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/router_status_entry.py @@ -0,0 +1,625 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Parsing for router status entries, the information for individual routers +within a network status document. This information is provided from a few +sources... + +* control port via 'GETINFO ns/\*' and 'GETINFO md/\*' queries +* router entries in a network status document, like the cached-consensus + +**Module Overview:** + +:: + + RouterStatusEntry - Common parent for router status entries + |- RouterStatusEntryV2 - Entry for a network status v2 document + |- RouterStatusEntryV3 - Entry for a network status v3 document + +- RouterStatusEntryMicroV3 - Entry for a microdescriptor flavored v3 document +""" + +import base64 +import binascii + +import stem.exit_policy +import stem.prereq +import stem.util.str_tools + +from stem.descriptor import ( + KEYWORD_LINE, + Descriptor, + _value, + _values, + _get_descriptor_components, + _read_until_keywords, +) + + +def _parse_file(document_file, validate, entry_class, entry_keyword = 'r', start_position = None, end_position = None, section_end_keywords = (), extra_args = ()): + """ + Reads a range of the document_file containing some number of entry_class + instances. We deliminate the entry_class entries by the keyword on their + first line (entry_keyword). When finished the document is left at the + end_position. + + Either an end_position or section_end_keywords must be provided. + + :param file document_file: file with network status document content + :param bool validate: checks the validity of the document's contents if + **True**, skips these checks otherwise + :param class entry_class: class to construct instance for + :param str entry_keyword: first keyword for the entry instances + :param int start_position: start of the section, default is the current position + :param int end_position: end of the section + :param tuple section_end_keywords: keyword(s) that deliminate the end of the + section if no end_position was provided + :param tuple extra_args: extra arguments for the entry_class (after the + content and validate flag) + + :returns: iterator over entry_class instances + + :raises: + * **ValueError** if the contents is malformed and validate is **True** + * **IOError** if the file can't be read + """ + + if start_position: + document_file.seek(start_position) + else: + start_position = document_file.tell() + + # check if we're starting at the end of the section (ie, there's no entries to read) + if section_end_keywords: + first_keyword = None + line_match = KEYWORD_LINE.match(stem.util.str_tools._to_unicode(document_file.readline())) + + if line_match: + first_keyword = line_match.groups()[0] + + document_file.seek(start_position) + + if first_keyword in section_end_keywords: + return + + while end_position is None or document_file.tell() < end_position: + desc_lines, ending_keyword = _read_until_keywords( + (entry_keyword,) + section_end_keywords, + document_file, + ignore_first = True, + end_position = end_position, + include_ending_keyword = True + ) + + desc_content = bytes.join(b'', desc_lines) + + if desc_content: + yield entry_class(desc_content, validate, *extra_args) + + # check if we stopped at the end of the section + if ending_keyword in section_end_keywords: + break + else: + break + + +def _parse_r_line(descriptor, entries): + # Parses a RouterStatusEntry's 'r' line. They're very nearly identical for + # all current entry types (v2, v3, and microdescriptor v3) with one little + # wrinkle: only the microdescriptor flavor excludes a 'digest' field. + # + # For v2 and v3 router status entries: + # "r" nickname identity digest publication IP ORPort DirPort + # example: r mauer BD7xbfsCFku3+tgybEZsg8Yjhvw itcuKQ6PuPLJ7m/Oi928WjO2j8g 2012-06-22 13:19:32 80.101.105.103 9001 0 + # + # For v3 microdescriptor router status entries: + # "r" nickname identity publication IP ORPort DirPort + # example: r Konata ARIJF2zbqirB9IwsW0mQznccWww 2012-09-24 13:40:40 69.64.48.168 9001 9030 + + value = _value('r', entries) + include_digest = not isinstance(descriptor, RouterStatusEntryMicroV3) + + r_comp = value.split(' ') + + # inject a None for the digest to normalize the field positioning + if not include_digest: + r_comp.insert(2, None) + + if len(r_comp) < 8: + expected_field_count = 'eight' if include_digest else 'seven' + raise ValueError("%s 'r' line must have %s values: r %s" % (descriptor._name(), expected_field_count, value)) + + if not stem.util.tor_tools.is_valid_nickname(r_comp[0]): + raise ValueError("%s nickname isn't valid: %s" % (descriptor._name(), r_comp[0])) + elif not stem.util.connection.is_valid_ipv4_address(r_comp[5]): + raise ValueError("%s address isn't a valid IPv4 address: %s" % (descriptor._name(), r_comp[5])) + elif not stem.util.connection.is_valid_port(r_comp[6]): + raise ValueError('%s ORPort is invalid: %s' % (descriptor._name(), r_comp[6])) + elif not stem.util.connection.is_valid_port(r_comp[7], allow_zero = True): + raise ValueError('%s DirPort is invalid: %s' % (descriptor._name(), r_comp[7])) + + descriptor.nickname = r_comp[0] + descriptor.fingerprint = _base64_to_hex(r_comp[1]) + + if include_digest: + descriptor.digest = _base64_to_hex(r_comp[2]) + + descriptor.address = r_comp[5] + descriptor.or_port = int(r_comp[6]) + descriptor.dir_port = None if r_comp[7] == '0' else int(r_comp[7]) + + try: + published = '%s %s' % (r_comp[3], r_comp[4]) + descriptor.published = stem.util.str_tools._parse_timestamp(published) + except ValueError: + raise ValueError("Publication time time wasn't parsable: r %s" % value) + + +def _parse_a_line(descriptor, entries): + # "a" SP address ":" portlist + # example: a [2001:888:2133:0:82:94:251:204]:9001 + + or_addresses = [] + + for value in _values('a', entries): + if ':' not in value: + raise ValueError("%s 'a' line must be of the form '[address]:[ports]': a %s" % (descriptor._name(), value)) + + address, port = value.rsplit(':', 1) + is_ipv6 = address.startswith('[') and address.endswith(']') + + if is_ipv6: + address = address[1:-1] # remove brackets + + if not ((not is_ipv6 and stem.util.connection.is_valid_ipv4_address(address)) or + (is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))): + raise ValueError("%s 'a' line must start with an IPv6 address: a %s" % (descriptor._name(), value)) + + if stem.util.connection.is_valid_port(port): + or_addresses.append((address, int(port), is_ipv6)) + else: + raise ValueError("%s 'a' line had an invalid port (%s): a %s" % (descriptor._name(), port, value)) + + descriptor.or_addresses = or_addresses + + +def _parse_s_line(descriptor, entries): + # "s" Flags + # example: s Named Running Stable Valid + + value = _value('s', entries) + flags = [] if value == '' else value.split(' ') + descriptor.flags = flags + + for flag in flags: + if flags.count(flag) > 1: + raise ValueError('%s had duplicate flags: s %s' % (descriptor._name(), value)) + elif flag == '': + raise ValueError("%s had extra whitespace on its 's' line: s %s" % (descriptor._name(), value)) + + +def _parse_v_line(descriptor, entries): + # "v" version + # example: v Tor 0.2.2.35 + # + # The spec says that if this starts with "Tor " then what follows is a + # tor version. If not then it has "upgraded to a more sophisticated + # protocol versioning system". + + value = _value('v', entries) + descriptor.version_line = value + + if value.startswith('Tor '): + try: + descriptor.version = stem.version._get_version(value[4:]) + except ValueError as exc: + raise ValueError('%s has a malformed tor version (%s): v %s' % (descriptor._name(), exc, value)) + + +def _parse_w_line(descriptor, entries): + # "w" "Bandwidth=" INT ["Measured=" INT] ["Unmeasured=1"] + # example: w Bandwidth=7980 + + value = _value('w', entries) + w_comp = value.split(' ') + + if len(w_comp) < 1: + raise ValueError("%s 'w' line is blank: w %s" % (descriptor._name(), value)) + elif not w_comp[0].startswith('Bandwidth='): + raise ValueError("%s 'w' line needs to start with a 'Bandwidth=' entry: w %s" % (descriptor._name(), value)) + + for w_entry in w_comp: + if '=' in w_entry: + w_key, w_value = w_entry.split('=', 1) + else: + w_key, w_value = w_entry, None + + if w_key == 'Bandwidth': + if not (w_value and w_value.isdigit()): + raise ValueError("%s 'Bandwidth=' entry needs to have a numeric value: w %s" % (descriptor._name(), value)) + + descriptor.bandwidth = int(w_value) + elif w_key == 'Measured': + if not (w_value and w_value.isdigit()): + raise ValueError("%s 'Measured=' entry needs to have a numeric value: w %s" % (descriptor._name(), value)) + + descriptor.measured = int(w_value) + elif w_key == 'Unmeasured': + if w_value != '1': + raise ValueError("%s 'Unmeasured=' should only have the value of '1': w %s" % (descriptor._name(), value)) + + descriptor.is_unmeasured = True + else: + descriptor.unrecognized_bandwidth_entries.append(w_entry) + + +def _parse_p_line(descriptor, entries): + # "p" ("accept" / "reject") PortList + # p reject 1-65535 + # example: p accept 80,110,143,443,993,995,6660-6669,6697,7000-7001 + + value = _value('p', entries) + + try: + descriptor.exit_policy = stem.exit_policy.MicroExitPolicy(value) + except ValueError as exc: + raise ValueError('%s exit policy is malformed (%s): p %s' % (descriptor._name(), exc, value)) + + +def _parse_m_line(descriptor, entries): + # "m" methods 1*(algorithm "=" digest) + # example: m 8,9,10,11,12 sha256=g1vx9si329muxV3tquWIXXySNOIwRGMeAESKs/v4DWs + + all_hashes = [] + + for value in _values('m', entries): + m_comp = value.split(' ') + + if not (descriptor.document and descriptor.document.is_vote): + vote_status = 'vote' if descriptor.document else '' + raise ValueError("%s 'm' line should only appear in votes (appeared in a %s): m %s" % (descriptor._name(), vote_status, value)) + elif len(m_comp) < 1: + raise ValueError("%s 'm' line needs to start with a series of methods: m %s" % (descriptor._name(), value)) + + try: + methods = [int(entry) for entry in m_comp[0].split(',')] + except ValueError: + raise ValueError('%s microdescriptor methods should be a series of comma separated integers: m %s' % (descriptor._name(), value)) + + hashes = {} + + for entry in m_comp[1:]: + if '=' not in entry: + raise ValueError("%s can only have a series of 'algorithm=digest' mappings after the methods: m %s" % (descriptor._name(), value)) + + hash_name, digest = entry.split('=', 1) + hashes[hash_name] = digest + + all_hashes.append((methods, hashes)) + + descriptor.microdescriptor_hashes = all_hashes + + +def _parse_microdescriptor_m_line(descriptor, entries): + # "m" digest + # example: m aiUklwBrua82obG5AsTX+iEpkjQA2+AQHxZ7GwMfY70 + + descriptor.digest = _base64_to_hex(_value('m', entries), check_if_fingerprint = False) + + +def _base64_to_hex(identity, check_if_fingerprint = True): + """ + Decodes a base64 value to hex. For example... + + :: + + >>> _base64_to_hex('p1aag7VwarGxqctS7/fS0y5FU+s') + 'A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB' + + :param str identity: encoded fingerprint from the consensus + :param bool check_if_fingerprint: asserts that the result is a fingerprint if **True** + + :returns: **str** with the uppercase hex encoding of the relay's fingerprint + + :raises: **ValueError** if the result isn't a valid fingerprint + """ + + # trailing equal signs were stripped from the identity + missing_padding = len(identity) % 4 + identity += '=' * missing_padding + + try: + identity_decoded = base64.b64decode(stem.util.str_tools._to_bytes(identity)) + except (TypeError, binascii.Error): + raise ValueError("Unable to decode identity string '%s'" % identity) + + fingerprint = binascii.b2a_hex(identity_decoded).upper() + + if stem.prereq.is_python_3(): + fingerprint = stem.util.str_tools._to_unicode(fingerprint) + + if check_if_fingerprint: + if not stem.util.tor_tools.is_valid_fingerprint(fingerprint): + raise ValueError("Decoded '%s' to be '%s', which isn't a valid fingerprint" % (identity, fingerprint)) + + return fingerprint + + +class RouterStatusEntry(Descriptor): + """ + Information about an individual router stored within a network status + document. This is the common parent for concrete status entry types. + + :var stem.descriptor.networkstatus.NetworkStatusDocument document: **\*** document that this descriptor came from + + :var str nickname: **\*** router's nickname + :var str fingerprint: **\*** router's fingerprint + :var datetime published: **\*** router's publication + :var str address: **\*** router's IP address + :var int or_port: **\*** router's ORPort + :var int dir_port: **\*** router's DirPort + + :var list flags: **\*** list of :data:`~stem.Flag` associated with the relay + + :var stem.version.Version version: parsed version of tor, this is **None** if + the relay's using a new versioning scheme + :var str version_line: versioning information reported by the relay + """ + + ATTRIBUTES = { + 'nickname': (None, _parse_r_line), + 'fingerprint': (None, _parse_r_line), + 'published': (None, _parse_r_line), + 'address': (None, _parse_r_line), + 'or_port': (None, _parse_r_line), + 'dir_port': (None, _parse_r_line), + + 'flags': (None, _parse_s_line), + + 'version_line': (None, _parse_v_line), + 'version': (None, _parse_v_line), + } + + PARSER_FOR_LINE = { + 'r': _parse_r_line, + 's': _parse_s_line, + 'v': _parse_v_line, + } + + def __init__(self, content, validate = False, document = None): + """ + Parse a router descriptor in a network status document. + + :param str content: router descriptor content to be parsed + :param NetworkStatusDocument document: document this descriptor came from + :param bool validate: checks the validity of the content if **True**, skips + these checks otherwise + + :raises: **ValueError** if the descriptor data is invalid + """ + + super(RouterStatusEntry, self).__init__(content, lazy_load = not validate) + self.document = document + entries = _get_descriptor_components(content, validate) + + if validate: + for keyword in self._required_fields(): + if keyword not in entries: + raise ValueError("%s must have a '%s' line:\n%s" % (self._name(True), keyword, str(self))) + + for keyword in self._single_fields(): + if keyword in entries and len(entries[keyword]) > 1: + raise ValueError("%s can only have a single '%s' line, got %i:\n%s" % (self._name(True), keyword, len(entries[keyword]), str(self))) + + if 'r' != list(entries.keys())[0]: + raise ValueError("%s are expected to start with a 'r' line:\n%s" % (self._name(True), str(self))) + + self._parse(entries, validate) + else: + self._entries = entries + + def _name(self, is_plural = False): + """ + Name for this descriptor type. + """ + + return 'Router status entries' if is_plural else 'Router status entry' + + def _required_fields(self): + """ + Provides lines that must appear in the descriptor. + """ + + return () + + def _single_fields(self): + """ + Provides lines that can only appear in the descriptor once. + """ + + return () + + def _compare(self, other, method): + if not isinstance(other, RouterStatusEntry): + return False + + return method(str(self).strip(), str(other).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + +class RouterStatusEntryV2(RouterStatusEntry): + """ + Information about an individual router stored within a version 2 network + status document. + + :var str digest: **\*** router's upper-case hex digest + + **\*** attribute is either required when we're parsed with validation or has + a default value, others are left as **None** if undefined + """ + + ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{ + 'digest': (None, _parse_r_line), + }) + + def _name(self, is_plural = False): + return 'Router status entries (v2)' if is_plural else 'Router status entry (v2)' + + def _required_fields(self): + return ('r') + + def _single_fields(self): + return ('r', 's', 'v') + + def _compare(self, other, method): + if not isinstance(other, RouterStatusEntryV2): + return False + + return method(str(self).strip(), str(other).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + +class RouterStatusEntryV3(RouterStatusEntry): + """ + Information about an individual router stored within a version 3 network + status document. + + :var list or_addresses: **\*** relay's OR addresses, this is a tuple listing + of the form (address (**str**), port (**int**), is_ipv6 (**bool**)) + :var str digest: **\*** router's upper-case hex digest + + :var int bandwidth: bandwidth claimed by the relay (in kb/s) + :var int measured: bandwidth measured to be available by the relay, this is a + unit-less heuristic generated by the Bandwidth authoritites to weight relay + selection + :var bool is_unmeasured: bandwidth measurement isn't based on three or more + measurements + :var list unrecognized_bandwidth_entries: **\*** bandwidth weighting + information that isn't yet recognized + + :var stem.exit_policy.MicroExitPolicy exit_policy: router's exit policy + + :var list microdescriptor_hashes: **\*** tuples of two values, the list of + consensus methods for generating a set of digests and the 'algorithm => + digest' mappings + + **\*** attribute is either required when we're parsed with validation or has + a default value, others are left as **None** if undefined + """ + + ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{ + 'digest': (None, _parse_r_line), + 'or_addresses': ([], _parse_a_line), + + 'bandwidth': (None, _parse_w_line), + 'measured': (None, _parse_w_line), + 'is_unmeasured': (False, _parse_w_line), + 'unrecognized_bandwidth_entries': ([], _parse_w_line), + + 'exit_policy': (None, _parse_p_line), + 'microdescriptor_hashes': ([], _parse_m_line), + }) + + PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{ + 'a': _parse_a_line, + 'w': _parse_w_line, + 'p': _parse_p_line, + 'm': _parse_m_line, + }) + + def _name(self, is_plural = False): + return 'Router status entries (v3)' if is_plural else 'Router status entry (v3)' + + def _required_fields(self): + return ('r', 's') + + def _single_fields(self): + return ('r', 's', 'v', 'w', 'p') + + def _compare(self, other, method): + if not isinstance(other, RouterStatusEntryV3): + return False + + return method(str(self).strip(), str(other).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + +class RouterStatusEntryMicroV3(RouterStatusEntry): + """ + Information about an individual router stored within a microdescriptor + flavored network status document. + + :var int bandwidth: bandwidth claimed by the relay (in kb/s) + :var int measured: bandwidth measured to be available by the relay + :var bool is_unmeasured: bandwidth measurement isn't based on three or more + measurements + :var list unrecognized_bandwidth_entries: **\*** bandwidth weighting + information that isn't yet recognized + + :var str digest: **\*** router's hex encoded digest of our corresponding microdescriptor + + **\*** attribute is either required when we're parsed with validation or has + a default value, others are left as **None** if undefined + """ + + ATTRIBUTES = dict(RouterStatusEntry.ATTRIBUTES, **{ + 'bandwidth': (None, _parse_w_line), + 'measured': (None, _parse_w_line), + 'is_unmeasured': (False, _parse_w_line), + 'unrecognized_bandwidth_entries': ([], _parse_w_line), + + 'digest': (None, _parse_microdescriptor_m_line), + }) + + PARSER_FOR_LINE = dict(RouterStatusEntry.PARSER_FOR_LINE, **{ + 'w': _parse_w_line, + 'm': _parse_microdescriptor_m_line, + }) + + def _name(self, is_plural = False): + return 'Router status entries (micro v3)' if is_plural else 'Router status entry (micro v3)' + + def _required_fields(self): + return ('r', 's', 'm') + + def _single_fields(self): + return ('r', 's', 'v', 'w', 'm') + + def _compare(self, other, method): + if not isinstance(other, RouterStatusEntryMicroV3): + return False + + return method(str(self).strip(), str(other).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/server_descriptor.py b/Shared/lib/python3.4/site-packages/stem/descriptor/server_descriptor.py new file mode 100644 index 0000000..b375066 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/server_descriptor.py @@ -0,0 +1,822 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Parsing for Tor server descriptors, which contains the infrequently changing +information about a Tor relay (contact information, exit policy, public keys, +etc). This information is provided from a few sources... + +* The control port via 'GETINFO desc/\*' queries. + +* The 'cached-descriptors' file in Tor's data directory. + +* Archived descriptors provided by CollecTor + (https://collector.torproject.org/). + +* Directory authorities and mirrors via their DirPort. + +**Module Overview:** + +:: + + ServerDescriptor - Tor server descriptor. + |- RelayDescriptor - Server descriptor for a relay. + | + |- BridgeDescriptor - Scrubbed server descriptor for a bridge. + | |- is_scrubbed - checks if our content has been properly scrubbed + | +- get_scrubbing_issues - description of issues with our scrubbing + | + |- digest - calculates the upper-case hex digest value for our content + |- get_annotations - dictionary of content prior to the descriptor entry + +- get_annotation_lines - lines that provided the annotations +""" + +import functools +import hashlib +import re + +import stem.descriptor.extrainfo_descriptor +import stem.exit_policy +import stem.prereq +import stem.util.connection +import stem.util.str_tools +import stem.util.tor_tools +import stem.version + +from stem import str_type + +from stem.descriptor import ( + PGP_BLOCK_END, + Descriptor, + _get_descriptor_components, + _read_until_keywords, + _bytes_for_block, + _value, + _values, + _parse_simple_line, + _parse_bytes_line, + _parse_timestamp_line, + _parse_forty_character_hex, + _parse_key_block, +) + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + +# relay descriptors must have exactly one of the following +REQUIRED_FIELDS = ( + 'router', + 'bandwidth', + 'published', + 'onion-key', + 'signing-key', + 'router-signature', +) + +# optional entries that can appear at most once +SINGLE_FIELDS = ( + 'platform', + 'fingerprint', + 'hibernating', + 'uptime', + 'contact', + 'read-history', + 'write-history', + 'eventdns', + 'family', + 'caches-extra-info', + 'extra-info-digest', + 'hidden-service-dir', + 'protocols', + 'allow-single-hop-exits', + 'ntor-onion-key', +) + +DEFAULT_IPV6_EXIT_POLICY = stem.exit_policy.MicroExitPolicy('reject 1-65535') +REJECT_ALL_POLICY = stem.exit_policy.ExitPolicy('reject *:*') + + +def _parse_file(descriptor_file, is_bridge = False, validate = False, **kwargs): + """ + Iterates over the server descriptors in a file. + + :param file descriptor_file: file with descriptor content + :param bool is_bridge: parses the file as being a bridge descriptor + :param bool validate: checks the validity of the descriptor's content if + **True**, skips these checks otherwise + :param dict kwargs: additional arguments for the descriptor constructor + + :returns: iterator for ServerDescriptor instances in the file + + :raises: + * **ValueError** if the contents is malformed and validate is True + * **IOError** if the file can't be read + """ + + # Handler for relay descriptors + # + # Cached descriptors consist of annotations followed by the descriptor + # itself. For instance... + # + # @downloaded-at 2012-03-14 16:31:05 + # @source "145.53.65.130" + # router caerSidi 71.35.143.157 9001 0 0 + # platform Tor 0.2.1.30 on Linux x86_64 + # + # router-signature + # -----BEGIN SIGNATURE----- + # + # -----END SIGNATURE----- + # + # Metrics descriptor files are the same, but lack any annotations. The + # following simply does the following... + # + # - parse as annotations until we get to 'router' + # - parse as descriptor content until we get to 'router-signature' followed + # by the end of the signature block + # - construct a descriptor and provide it back to the caller + # + # Any annotations after the last server descriptor is ignored (never provided + # to the caller). + + while True: + annotations = _read_until_keywords('router', descriptor_file) + + if not is_bridge: + descriptor_content = _read_until_keywords('router-signature', descriptor_file) + + # we've reached the 'router-signature', now include the pgp style block + + block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0] + descriptor_content += _read_until_keywords(block_end_prefix, descriptor_file, True) + else: + descriptor_content = _read_until_keywords('router-digest', descriptor_file, True) + + if descriptor_content: + if descriptor_content[0].startswith(b'@type'): + descriptor_content = descriptor_content[1:] + + # strip newlines from annotations + annotations = list(map(bytes.strip, annotations)) + + descriptor_text = bytes.join(b'', descriptor_content) + + if is_bridge: + yield BridgeDescriptor(descriptor_text, validate, annotations, **kwargs) + else: + yield RelayDescriptor(descriptor_text, validate, annotations, **kwargs) + else: + if validate and annotations: + orphaned_annotations = stem.util.str_tools._to_unicode(b'\n'.join(annotations)) + raise ValueError('Content conform to being a server descriptor:\n%s' % orphaned_annotations) + + break # done parsing descriptors + + +def _parse_router_line(descriptor, entries): + # "router" nickname address ORPort SocksPort DirPort + + value = _value('router', entries) + router_comp = value.split() + + if len(router_comp) < 5: + raise ValueError('Router line must have five values: router %s' % value) + elif not stem.util.tor_tools.is_valid_nickname(router_comp[0]): + raise ValueError("Router line entry isn't a valid nickname: %s" % router_comp[0]) + elif not stem.util.connection.is_valid_ipv4_address(router_comp[1]): + raise ValueError("Router line entry isn't a valid IPv4 address: %s" % router_comp[1]) + elif not stem.util.connection.is_valid_port(router_comp[2], allow_zero = True): + raise ValueError("Router line's ORPort is invalid: %s" % router_comp[2]) + elif not stem.util.connection.is_valid_port(router_comp[3], allow_zero = True): + raise ValueError("Router line's SocksPort is invalid: %s" % router_comp[3]) + elif not stem.util.connection.is_valid_port(router_comp[4], allow_zero = True): + raise ValueError("Router line's DirPort is invalid: %s" % router_comp[4]) + + descriptor.nickname = router_comp[0] + descriptor.address = router_comp[1] + descriptor.or_port = int(router_comp[2]) + descriptor.socks_port = None if router_comp[3] == '0' else int(router_comp[3]) + descriptor.dir_port = None if router_comp[4] == '0' else int(router_comp[4]) + + +def _parse_bandwidth_line(descriptor, entries): + # "bandwidth" bandwidth-avg bandwidth-burst bandwidth-observed + + value = _value('bandwidth', entries) + bandwidth_comp = value.split() + + if len(bandwidth_comp) < 3: + raise ValueError('Bandwidth line must have three values: bandwidth %s' % value) + elif not bandwidth_comp[0].isdigit(): + raise ValueError("Bandwidth line's average rate isn't numeric: %s" % bandwidth_comp[0]) + elif not bandwidth_comp[1].isdigit(): + raise ValueError("Bandwidth line's burst rate isn't numeric: %s" % bandwidth_comp[1]) + elif not bandwidth_comp[2].isdigit(): + raise ValueError("Bandwidth line's observed rate isn't numeric: %s" % bandwidth_comp[2]) + + descriptor.average_bandwidth = int(bandwidth_comp[0]) + descriptor.burst_bandwidth = int(bandwidth_comp[1]) + descriptor.observed_bandwidth = int(bandwidth_comp[2]) + + +def _parse_platform_line(descriptor, entries): + # "platform" string + + _parse_bytes_line('platform', 'platform')(descriptor, entries) + + # The platform attribute was set earlier. This line can contain any + # arbitrary data, but tor seems to report its version followed by the + # os like the following... + # + # platform Tor 0.2.2.35 (git-73ff13ab3cc9570d) on Linux x86_64 + # + # There's no guarantee that we'll be able to pick these out the + # version, but might as well try to save our caller the effort. + + value = _value('platform', entries) + platform_match = re.match('^(?:node-)?Tor (\S*).* on (.*)$', value) + + if platform_match: + version_str, descriptor.operating_system = platform_match.groups() + + try: + descriptor.tor_version = stem.version._get_version(version_str) + except ValueError: + pass + + +def _parse_fingerprint_line(descriptor, entries): + # This is forty hex digits split into space separated groups of four. + # Checking that we match this pattern. + + value = _value('fingerprint', entries) + fingerprint = value.replace(' ', '') + + for grouping in value.split(' '): + if len(grouping) != 4: + raise ValueError('Fingerprint line should have groupings of four hex digits: %s' % value) + + if not stem.util.tor_tools.is_valid_fingerprint(fingerprint): + raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value) + + descriptor.fingerprint = fingerprint + + +def _parse_hibernating_line(descriptor, entries): + # "hibernating" 0|1 (in practice only set if one) + + value = _value('hibernating', entries) + + if value not in ('0', '1'): + raise ValueError('Hibernating line had an invalid value, must be zero or one: %s' % value) + + descriptor.hibernating = value == '1' + + +def _parse_hidden_service_dir_line(descriptor, entries): + value = _value('hidden-service-dir', entries) + + if value: + descriptor.hidden_service_dir = value.split(' ') + else: + descriptor.hidden_service_dir = ['2'] + + +def _parse_uptime_line(descriptor, entries): + # We need to be tolerant of negative uptimes to accommodate a past tor + # bug... + # + # Changes in version 0.1.2.7-alpha - 2007-02-06 + # - If our system clock jumps back in time, don't publish a negative + # uptime in the descriptor. Also, don't let the global rate limiting + # buckets go absurdly negative. + # + # After parsing all of the attributes we'll double check that negative + # uptimes only occurred prior to this fix. + + value = _value('uptime', entries) + + try: + descriptor.uptime = int(value) + except ValueError: + raise ValueError('Uptime line must have an integer value: %s' % value) + + +def _parse_protocols_line(descriptor, entries): + value = _value('protocols', entries) + protocols_match = re.match('^Link (.*) Circuit (.*)$', value) + + if not protocols_match: + raise ValueError('Protocols line did not match the expected pattern: protocols %s' % value) + + link_versions, circuit_versions = protocols_match.groups() + descriptor.link_protocols = link_versions.split(' ') + descriptor.circuit_protocols = circuit_versions.split(' ') + + +def _parse_or_address_line(descriptor, entries): + all_values = _values('or-address', entries) + or_addresses = [] + + for entry in all_values: + line = 'or-address %s' % entry + + if ':' not in entry: + raise ValueError('or-address line missing a colon: %s' % line) + + address, port = entry.rsplit(':', 1) + is_ipv6 = address.startswith('[') and address.endswith(']') + + if is_ipv6: + address = address[1:-1] # remove brackets + + if not ((not is_ipv6 and stem.util.connection.is_valid_ipv4_address(address)) or + (is_ipv6 and stem.util.connection.is_valid_ipv6_address(address))): + raise ValueError('or-address line has a malformed address: %s' % line) + + if not stem.util.connection.is_valid_port(port): + raise ValueError('or-address line has a malformed port: %s' % line) + + or_addresses.append((address, int(port), is_ipv6)) + + descriptor.or_addresses = or_addresses + + +def _parse_history_line(keyword, history_end_attribute, history_interval_attribute, history_values_attribute, descriptor, entries): + value = _value(keyword, entries) + timestamp, interval, remainder = stem.descriptor.extrainfo_descriptor._parse_timestamp_and_interval(keyword, value) + + try: + if remainder: + history_values = [int(entry) for entry in remainder.split(',')] + else: + history_values = [] + except ValueError: + raise ValueError('%s line has non-numeric values: %s %s' % (keyword, keyword, value)) + + setattr(descriptor, history_end_attribute, timestamp) + setattr(descriptor, history_interval_attribute, interval) + setattr(descriptor, history_values_attribute, history_values) + + +def _parse_exit_policy(descriptor, entries): + if hasattr(descriptor, '_unparsed_exit_policy'): + if descriptor._unparsed_exit_policy == [str_type('reject *:*')]: + descriptor.exit_policy = REJECT_ALL_POLICY + else: + descriptor.exit_policy = stem.exit_policy.ExitPolicy(*descriptor._unparsed_exit_policy) + + del descriptor._unparsed_exit_policy + + +_parse_contact_line = _parse_bytes_line('contact', 'contact') +_parse_published_line = _parse_timestamp_line('published', 'published') +_parse_extrainfo_digest_line = _parse_forty_character_hex('extra-info-digest', 'extra_info_digest') +_parse_read_history_line = functools.partial(_parse_history_line, 'read-history', 'read_history_end', 'read_history_interval', 'read_history_values') +_parse_write_history_line = functools.partial(_parse_history_line, 'write-history', 'write_history_end', 'write_history_interval', 'write_history_values') +_parse_ipv6_policy_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('ipv6-policy', entries))) +_parse_allow_single_hop_exits_line = lambda descriptor, entries: setattr(descriptor, 'allow_single_hop_exits', 'allow_single_hop_exits' in entries) +_parse_caches_extra_info_line = lambda descriptor, entries: setattr(descriptor, 'extra_info_cache', 'extra_info_cache' in entries) +_parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', set(_value('family', entries).split(' '))) +_parse_eventdns_line = lambda descriptor, entries: setattr(descriptor, 'eventdns', _value('eventdns', entries) == '1') +_parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY') +_parse_signing_key_line = _parse_key_block('signing-key', 'signing_key', 'RSA PUBLIC KEY') +_parse_router_signature_line = _parse_key_block('router-signature', 'signature', 'SIGNATURE') +_parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key') +_parse_router_digest_line = _parse_forty_character_hex('router-digest', '_digest') + + +class ServerDescriptor(Descriptor): + """ + Common parent for server descriptors. + + :var str nickname: **\*** relay's nickname + :var str fingerprint: identity key fingerprint + :var datetime published: **\*** time in UTC when this descriptor was made + + :var str address: **\*** IPv4 address of the relay + :var int or_port: **\*** port used for relaying + :var int socks_port: **\*** port used as client (deprecated, always **None**) + :var int dir_port: **\*** port used for descriptor mirroring + + :var bytes platform: line with operating system and tor version + :var stem.version.Version tor_version: version of tor + :var str operating_system: operating system + :var int uptime: uptime when published in seconds + :var bytes contact: contact information + :var stem.exit_policy.ExitPolicy exit_policy: **\*** stated exit policy + :var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6 + :var set family: **\*** nicknames or fingerprints of declared family + + :var int average_bandwidth: **\*** average rate it's willing to relay in bytes/s + :var int burst_bandwidth: **\*** burst rate it's willing to relay in bytes/s + :var int observed_bandwidth: **\*** estimated capacity based on usage in bytes/s + + :var list link_protocols: link protocols supported by the relay + :var list circuit_protocols: circuit protocols supported by the relay + :var bool hibernating: **\*** hibernating when published + :var bool allow_single_hop_exits: **\*** flag if single hop exiting is allowed + :var bool extra_info_cache: **\*** flag if a mirror for extra-info documents + :var str extra_info_digest: upper-case hex encoded digest of our extra-info document + :var bool eventdns: flag for evdns backend (deprecated, always unset) + :var list or_addresses: **\*** alternative for our address/or_port + attributes, each entry is a tuple of the form (address (**str**), port + (**int**), is_ipv6 (**bool**)) + + Deprecated, moved to extra-info descriptor... + + :var datetime read_history_end: end of the sampling interval + :var int read_history_interval: seconds per interval + :var list read_history_values: bytes read during each interval + + :var datetime write_history_end: end of the sampling interval + :var int write_history_interval: seconds per interval + :var list write_history_values: bytes written during each interval + + **\*** attribute is either required when we're parsed with validation or has + a default value, others are left as **None** if undefined + """ + + ATTRIBUTES = { + 'nickname': (None, _parse_router_line), + 'fingerprint': (None, _parse_fingerprint_line), + 'contact': (None, _parse_contact_line), + 'published': (None, _parse_published_line), + 'exit_policy': (None, _parse_exit_policy), + + 'address': (None, _parse_router_line), + 'or_port': (None, _parse_router_line), + 'socks_port': (None, _parse_router_line), + 'dir_port': (None, _parse_router_line), + + 'platform': (None, _parse_platform_line), + 'tor_version': (None, _parse_platform_line), + 'operating_system': (None, _parse_platform_line), + 'uptime': (None, _parse_uptime_line), + 'exit_policy_v6': (DEFAULT_IPV6_EXIT_POLICY, _parse_ipv6_policy_line), + 'family': (set(), _parse_family_line), + + 'average_bandwidth': (None, _parse_bandwidth_line), + 'burst_bandwidth': (None, _parse_bandwidth_line), + 'observed_bandwidth': (None, _parse_bandwidth_line), + + 'link_protocols': (None, _parse_protocols_line), + 'circuit_protocols': (None, _parse_protocols_line), + 'hibernating': (False, _parse_hibernating_line), + 'allow_single_hop_exits': (False, _parse_allow_single_hop_exits_line), + 'extra_info_cache': (False, _parse_caches_extra_info_line), + 'extra_info_digest': (None, _parse_extrainfo_digest_line), + 'hidden_service_dir': (None, _parse_hidden_service_dir_line), + 'eventdns': (None, _parse_eventdns_line), + 'or_addresses': ([], _parse_or_address_line), + + 'read_history_end': (None, _parse_read_history_line), + 'read_history_interval': (None, _parse_read_history_line), + 'read_history_values': (None, _parse_read_history_line), + + 'write_history_end': (None, _parse_write_history_line), + 'write_history_interval': (None, _parse_write_history_line), + 'write_history_values': (None, _parse_write_history_line), + } + + PARSER_FOR_LINE = { + 'router': _parse_router_line, + 'bandwidth': _parse_bandwidth_line, + 'platform': _parse_platform_line, + 'published': _parse_published_line, + 'fingerprint': _parse_fingerprint_line, + 'contact': _parse_contact_line, + 'hibernating': _parse_hibernating_line, + 'extra-info-digest': _parse_extrainfo_digest_line, + 'hidden-service-dir': _parse_hidden_service_dir_line, + 'uptime': _parse_uptime_line, + 'protocols': _parse_protocols_line, + 'or-address': _parse_or_address_line, + 'read-history': _parse_read_history_line, + 'write-history': _parse_write_history_line, + 'ipv6-policy': _parse_ipv6_policy_line, + 'allow-single-hop-exits': _parse_allow_single_hop_exits_line, + 'caches-extra-info': _parse_caches_extra_info_line, + 'family': _parse_family_line, + 'eventdns': _parse_eventdns_line, + } + + def __init__(self, raw_contents, validate = False, annotations = None): + """ + Server descriptor constructor, created from an individual relay's + descriptor content (as provided by 'GETINFO desc/*', cached descriptors, + and metrics). + + By default this validates the descriptor's content as it's parsed. This + validation can be disables to either improve performance or be accepting of + malformed data. + + :param str raw_contents: descriptor content provided by the relay + :param bool validate: checks the validity of the descriptor's content if + **True**, skips these checks otherwise + :param list annotations: lines that appeared prior to the descriptor + + :raises: **ValueError** if the contents is malformed and validate is True + """ + + super(ServerDescriptor, self).__init__(raw_contents, lazy_load = not validate) + self._annotation_lines = annotations if annotations else [] + + # A descriptor contains a series of 'keyword lines' which are simply a + # keyword followed by an optional value. Lines can also be followed by a + # signature block. + # + # We care about the ordering of 'accept' and 'reject' entries because this + # influences the resulting exit policy, but for everything else the order + # does not matter so breaking it into key / value pairs. + + entries, self._unparsed_exit_policy = _get_descriptor_components(stem.util.str_tools._to_unicode(raw_contents), validate, ('accept', 'reject')) + + if validate: + self._parse(entries, validate) + + _parse_exit_policy(self, entries) + + # if we have a negative uptime and a tor version that shouldn't exhibit + # this bug then fail validation + + if validate and self.uptime and self.tor_version: + if self.uptime < 0 and self.tor_version >= stem.version.Version('0.1.2.7'): + raise ValueError("Descriptor for version '%s' had a negative uptime value: %i" % (self.tor_version, self.uptime)) + + self._check_constraints(entries) + else: + self._entries = entries + + def digest(self): + """ + Provides the hex encoded sha1 of our content. This value is part of the + network status entry for this relay. + + :returns: **unicode** with the upper-case hex digest value for this server descriptor + """ + + raise NotImplementedError('Unsupported Operation: this should be implemented by the ServerDescriptor subclass') + + @lru_cache() + def get_annotations(self): + """ + Provides content that appeared prior to the descriptor. If this comes from + the cached-descriptors file then this commonly contains content like... + + :: + + @downloaded-at 2012-03-18 21:18:29 + @source "173.254.216.66" + + :returns: **dict** with the key/value pairs in our annotations + """ + + annotation_dict = {} + + for line in self._annotation_lines: + if b' ' in line: + key, value = line.split(b' ', 1) + annotation_dict[key] = value + else: + annotation_dict[line] = None + + return annotation_dict + + def get_annotation_lines(self): + """ + Provides the lines of content that appeared prior to the descriptor. This + is the same as the + :func:`~stem.descriptor.server_descriptor.ServerDescriptor.get_annotations` + results, but with the unparsed lines and ordering retained. + + :returns: **list** with the lines of annotation that came before this descriptor + """ + + return self._annotation_lines + + def _check_constraints(self, entries): + """ + Does a basic check that the entries conform to this descriptor type's + constraints. + + :param dict entries: keyword => (value, pgp key) entries + + :raises: **ValueError** if an issue arises in validation + """ + + for keyword in self._required_fields(): + if keyword not in entries: + raise ValueError("Descriptor must have a '%s' entry" % keyword) + + for keyword in self._single_fields(): + if keyword in entries and len(entries[keyword]) > 1: + raise ValueError("The '%s' entry can only appear once in a descriptor" % keyword) + + expected_first_keyword = self._first_keyword() + if expected_first_keyword and expected_first_keyword != list(entries.keys())[0]: + raise ValueError("Descriptor must start with a '%s' entry" % expected_first_keyword) + + expected_last_keyword = self._last_keyword() + if expected_last_keyword and expected_last_keyword != list(entries.keys())[-1]: + raise ValueError("Descriptor must end with a '%s' entry" % expected_last_keyword) + + if not self.exit_policy: + raise ValueError("Descriptor must have at least one 'accept' or 'reject' entry") + + # Constraints that the descriptor must meet to be valid. These can be None if + # not applicable. + + def _required_fields(self): + return REQUIRED_FIELDS + + def _single_fields(self): + return REQUIRED_FIELDS + SINGLE_FIELDS + + def _first_keyword(self): + return 'router' + + def _last_keyword(self): + return 'router-signature' + + +class RelayDescriptor(ServerDescriptor): + """ + Server descriptor (`descriptor specification + `_) + + :var str onion_key: **\*** key used to encrypt EXTEND cells + :var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol + :var str signing_key: **\*** relay's long-term identity key + :var str signature: **\*** signature for this descriptor + + **\*** attribute is required when we're parsed with validation + """ + + ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{ + 'onion_key': (None, _parse_onion_key_line), + 'ntor_onion_key': (None, _parse_ntor_onion_key_line), + 'signing_key': (None, _parse_signing_key_line), + 'signature': (None, _parse_router_signature_line), + }) + + PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{ + 'onion-key': _parse_onion_key_line, + 'ntor-onion-key': _parse_ntor_onion_key_line, + 'signing-key': _parse_signing_key_line, + 'router-signature': _parse_router_signature_line, + }) + + def __init__(self, raw_contents, validate = False, annotations = None): + super(RelayDescriptor, self).__init__(raw_contents, validate, annotations) + + if validate: + if self.fingerprint: + key_hash = hashlib.sha1(_bytes_for_block(self.signing_key)).hexdigest() + + if key_hash != self.fingerprint.lower(): + raise ValueError('Fingerprint does not match the hash of our signing key (fingerprint: %s, signing key hash: %s)' % (self.fingerprint.lower(), key_hash)) + + if stem.prereq.is_crypto_available(): + signed_digest = self._digest_for_signature(self.signing_key, self.signature) + + if signed_digest != self.digest(): + raise ValueError('Decrypted digest does not match local digest (calculated: %s, local: %s)' % (signed_digest, self.digest())) + + @lru_cache() + def digest(self): + """ + Provides the digest of our descriptor's content. + + :returns: the digest string encoded in uppercase hex + + :raises: ValueError if the digest canot be calculated + """ + + return self._digest_for_content(b'router ', b'\nrouter-signature\n') + + def _compare(self, other, method): + if not isinstance(other, RelayDescriptor): + return False + + return method(str(self).strip(), str(other).strip()) + + def __hash__(self): + return hash(str(self).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + +class BridgeDescriptor(ServerDescriptor): + """ + Bridge descriptor (`bridge descriptor specification + `_) + """ + + ATTRIBUTES = dict(ServerDescriptor.ATTRIBUTES, **{ + '_digest': (None, _parse_router_digest_line), + }) + + PARSER_FOR_LINE = dict(ServerDescriptor.PARSER_FOR_LINE, **{ + 'router-digest': _parse_router_digest_line, + }) + + def digest(self): + return self._digest + + def is_scrubbed(self): + """ + Checks if we've been properly scrubbed in accordance with the `bridge + descriptor specification + `_. + Validation is a moving target so this may not be fully up to date. + + :returns: **True** if we're scrubbed, **False** otherwise + """ + + return self.get_scrubbing_issues() == [] + + @lru_cache() + def get_scrubbing_issues(self): + """ + Provides issues with our scrubbing. + + :returns: **list** of strings which describe issues we have with our + scrubbing, this list is empty if we're properly scrubbed + """ + + issues = [] + + if not self.address.startswith('10.'): + issues.append("Router line's address should be scrubbed to be '10.x.x.x': %s" % self.address) + + if self.contact and self.contact != 'somebody': + issues.append("Contact line should be scrubbed to be 'somebody', but instead had '%s'" % self.contact) + + for address, _, is_ipv6 in self.or_addresses: + if not is_ipv6 and not address.startswith('10.'): + issues.append("or-address line's address should be scrubbed to be '10.x.x.x': %s" % address) + elif is_ipv6 and not address.startswith('fd9f:2e19:3bcf::'): + # TODO: this check isn't quite right because we aren't checking that + # the next grouping of hex digits contains 1-2 digits + issues.append("or-address line's address should be scrubbed to be 'fd9f:2e19:3bcf::xx:xxxx': %s" % address) + + for line in self.get_unrecognized_lines(): + if line.startswith('onion-key '): + issues.append('Bridge descriptors should have their onion-key scrubbed: %s' % line) + elif line.startswith('signing-key '): + issues.append('Bridge descriptors should have their signing-key scrubbed: %s' % line) + elif line.startswith('router-signature '): + issues.append('Bridge descriptors should have their signature scrubbed: %s' % line) + + return issues + + def _required_fields(self): + # bridge required fields are the same as a relay descriptor, minus items + # excluded according to the format page + + excluded_fields = [ + 'onion-key', + 'signing-key', + 'router-signature', + ] + + included_fields = [ + 'router-digest', + ] + + return tuple(included_fields + [f for f in REQUIRED_FIELDS if f not in excluded_fields]) + + def _single_fields(self): + return self._required_fields() + SINGLE_FIELDS + + def _last_keyword(self): + return None + + def _compare(self, other, method): + if not isinstance(other, BridgeDescriptor): + return False + + return method(str(self).strip(), str(other).strip()) + + def __hash__(self): + return hash(str(self).strip()) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) diff --git a/Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py b/Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py new file mode 100644 index 0000000..75a252b --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/descriptor/tordnsel.py @@ -0,0 +1,117 @@ +# Copyright 2013-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Parsing for `TorDNSEL `_ +exit list files. + +:: + + TorDNSEL - Exit list provided by TorDNSEL +""" + +import stem.util.connection +import stem.util.str_tools +import stem.util.tor_tools + +from stem.descriptor import ( + Descriptor, + _read_until_keywords, + _get_descriptor_components, +) + + +def _parse_file(tordnsel_file, validate = False, **kwargs): + """ + Iterates over a tordnsel file. + + :returns: iterator for :class:`~stem.descriptor.tordnsel.TorDNSEL` + instances in the file + + :raises: + * **ValueError** if the contents is malformed and validate is **True** + * **IOError** if the file can't be read + """ + + # skip content prior to the first ExitNode + _read_until_keywords('ExitNode', tordnsel_file, skip = True) + + while True: + contents = _read_until_keywords('ExitAddress', tordnsel_file) + contents += _read_until_keywords('ExitNode', tordnsel_file) + + if contents: + yield TorDNSEL(bytes.join(b'', contents), validate, **kwargs) + else: + break # done parsing file + + +class TorDNSEL(Descriptor): + """ + TorDNSEL descriptor (`exitlist specification + `_) + + :var str fingerprint: **\*** authority's fingerprint + :var datetime published: **\*** time in UTC when this descriptor was made + :var datetime last_status: **\*** time in UTC when the relay was seen in a v2 network status + :var list exit_addresses: **\*** list of (str address, datetime date) tuples consisting of the found IPv4 exit address and the time + + **\*** attribute is either required when we're parsed with validation or has + a default value, others are left as **None** if undefined + """ + + def __init__(self, raw_contents, validate): + super(TorDNSEL, self).__init__(raw_contents) + raw_contents = stem.util.str_tools._to_unicode(raw_contents) + entries = _get_descriptor_components(raw_contents, validate) + + self.fingerprint = None + self.published = None + self.last_status = None + self.exit_addresses = [] + + self._parse(entries, validate) + + def _parse(self, entries, validate): + + for keyword, values in list(entries.items()): + value, block_type, block_content = values[0] + + if validate and block_content: + raise ValueError('Unexpected block content: %s' % block_content) + + if keyword == 'ExitNode': + if validate and not stem.util.tor_tools.is_valid_fingerprint(value): + raise ValueError('Tor relay fingerprints consist of forty hex digits: %s' % value) + + self.fingerprint = value + elif keyword == 'Published': + try: + self.published = stem.util.str_tools._parse_timestamp(value) + except ValueError: + if validate: + raise ValueError("Published time wasn't parsable: %s" % value) + elif keyword == 'LastStatus': + try: + self.last_status = stem.util.str_tools._parse_timestamp(value) + except ValueError: + if validate: + raise ValueError("LastStatus time wasn't parsable: %s" % value) + elif keyword == 'ExitAddress': + for value, block_type, block_content in values: + address, date = value.split(' ', 1) + + if validate: + if not stem.util.connection.is_valid_ipv4_address(address): + raise ValueError("ExitAddress isn't a valid IPv4 address: %s" % address) + elif block_content: + raise ValueError('Unexpected block content: %s' % block_content) + + try: + date = stem.util.str_tools._parse_timestamp(date) + self.exit_addresses.append((address, date)) + except ValueError: + if validate: + raise ValueError("ExitAddress found time wasn't parsable: %s" % value) + elif validate: + raise ValueError('Unrecognized keyword: %s' % keyword) diff --git a/Shared/lib/python3.4/site-packages/stem/exit_policy.py b/Shared/lib/python3.4/site-packages/stem/exit_policy.py new file mode 100644 index 0000000..62b9a12 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/exit_policy.py @@ -0,0 +1,1094 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Representation of tor exit policies. These can be easily used to check if +exiting to a destination is permissible or not. For instance... + +:: + + >>> from stem.exit_policy import ExitPolicy, MicroExitPolicy + >>> policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*') + >>> print(policy) + accept *:80, accept *:443, reject *:* + >>> print(policy.summary()) + accept 80, 443 + >>> policy.can_exit_to('75.119.206.243', 80) + True + + >>> policy = MicroExitPolicy('accept 80,443') + >>> print(policy) + accept 80,443 + >>> policy.can_exit_to('75.119.206.243', 80) + True + +:: + + ExitPolicy - Exit policy for a Tor relay + |- MicroExitPolicy - Microdescriptor exit policy + | + |- can_exit_to - check if exiting to this destination is allowed or not + |- is_exiting_allowed - check if any exiting is allowed + |- summary - provides a short label, similar to a microdescriptor + |- has_private - checks if policy has anything expanded from the 'private' keyword + |- strip_private - provides a copy of the policy without 'private' entries + |- has_default - checks if policy ends with the defaultly appended suffix + |- strip_default - provides a copy of the policy without the default suffix + |- __str__ - string representation + +- __iter__ - ExitPolicyRule entries that this contains + + ExitPolicyRule - Single rule of an exit policy chain + |- MicroExitPolicyRule - Single rule for a microdescriptor policy + | + |- is_address_wildcard - checks if we'll accept any address + |- is_port_wildcard - checks if we'll accept any port + |- get_address_type - provides the protocol our ip address belongs to + |- is_match - checks if we match a given destination + |- get_mask - provides the address representation of our mask + |- get_masked_bits - provides the bit representation of our mask + |- is_default - flag indicating if this was part of the default end of a policy + |- is_private - flag indicating if this was expanded from a 'private' keyword + +- __str__ - string representation for this rule + + get_config_policy - provides the ExitPolicy based on torrc rules + +.. data:: AddressType (enum) + + Enumerations for IP address types that can be in an exit policy. + + ============ =========== + AddressType Description + ============ =========== + **WILDCARD** any address of either IPv4 or IPv6 + **IPv4** IPv4 address + **IPv6** IPv6 address + ============ =========== +""" + +from __future__ import absolute_import + +import socket +import zlib + +import stem.prereq +import stem.util.connection +import stem.util.enum +import stem.util.str_tools + +from stem import str_type + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + +AddressType = stem.util.enum.Enum(('WILDCARD', 'Wildcard'), ('IPv4', 'IPv4'), ('IPv6', 'IPv6')) + +# Addresses aliased by the 'private' policy. From the tor man page... +# +# To specify all internal and link-local networks (including 0.0.0.0/8, +# 169.254.0.0/16, 127.0.0.0/8, 192.168.0.0/16, 10.0.0.0/8, and 172.16.0.0/12), +# you can use the 'private' alias instead of an address. + +PRIVATE_ADDRESSES = ( + '0.0.0.0/8', + '169.254.0.0/16', + '127.0.0.0/8', + '192.168.0.0/16', + '10.0.0.0/8', + '172.16.0.0/12', +) + + +def get_config_policy(rules, ip_address = None): + """ + Converts an ExitPolicy found in a torrc to a proper exit pattern. This + accounts for... + + * ports being optional + * the 'private' keyword + + :param str,list rules: comma separated rules or list to be converted + :param str ip_address: this relay's IP address for the 'private' policy if + it's present, this defaults to the local address + + :returns: :class:`~stem.exit_policy.ExitPolicy` reflected by the rules + + :raises: **ValueError** if input isn't a valid tor exit policy + """ + + if ip_address and not (stem.util.connection.is_valid_ipv4_address(ip_address) or stem.util.connection.is_valid_ipv6_address(ip_address)): + raise ValueError("%s isn't a valid IP address" % ip_address) + + if isinstance(rules, (bytes, str_type)): + rules = rules.split(',') + + result = [] + + for rule in rules: + rule = rule.strip() + + if not rule: + continue + + if ':' not in rule: + rule = '%s:*' % rule + + if 'private' in rule: + acceptance = rule.split(' ', 1)[0] + port = rule.split(':', 1)[1] + addresses = list(PRIVATE_ADDRESSES) + + if ip_address: + addresses.append(ip_address) + else: + try: + addresses.append(socket.gethostbyname(socket.gethostname())) + except: + pass # we might not have a network connection + + for private_addr in addresses: + result.append(ExitPolicyRule('%s %s:%s' % (acceptance, private_addr, port))) + else: + result.append(ExitPolicyRule(rule)) + + # torrc policies can apply to IPv4 or IPv6, so we need to make sure /0 + # addresses aren't treated as being a full wildcard + + for rule in result: + rule._submask_wildcard = False + + return ExitPolicy(*result) + + +def _flag_private_rules(rules): + """ + Determine if part of our policy was expanded from the 'private' keyword. This + doesn't differentiate if this actually came from the 'private' keyword or a + series of rules exactly matching it. + """ + + matches = [] + + for i, rule in enumerate(rules): + if i + len(PRIVATE_ADDRESSES) + 1 > len(rules): + break + + rule_str = '%s/%s' % (rule.address, rule.get_masked_bits()) + + if rule_str == PRIVATE_ADDRESSES[0]: + matches.append(i) + + for start_index in matches: + # To match the private policy the following must all be true... + # + # * series of addresses and bit masks match PRIVATE_ADDRESSES + # * all rules have the same port range and acceptance + # * all rules have the same acceptance (all accept or reject entries) + + rule_set = rules[start_index:start_index + len(PRIVATE_ADDRESSES) + 1] + is_match = True + + min_port, max_port = rule_set[0].min_port, rule_set[0].max_port + is_accept = rule_set[0].is_accept + + for i, rule in enumerate(rule_set[:-1]): + rule_str = '%s/%s' % (rule.address, rule.get_masked_bits()) + + if rule_str != PRIVATE_ADDRESSES[i] or rule.min_port != min_port or rule.max_port != max_port or rule.is_accept != is_accept: + is_match = False + break + + # The last rule is for the relay's public address, so it's dynamic. + + last_rule = rule_set[-1] + + if last_rule.is_address_wildcard() or last_rule.min_port != min_port or last_rule.max_port != max_port or last_rule.is_accept != is_accept: + is_match = False + if is_match: + for rule in rule_set: + rule._is_private = True + + +def _flag_default_rules(rules): + """ + Determine if part of our policy ends with the defaultly appended suffix. + """ + + if len(rules) >= len(DEFAULT_POLICY_RULES): + rules_suffix = tuple(rules[-len(DEFAULT_POLICY_RULES):]) + + if rules_suffix == DEFAULT_POLICY_RULES: + for rule in rules_suffix: + rule._is_default_suffix = True + + +class ExitPolicy(object): + """ + Policy for the destinations that a relay allows or denies exiting to. This + is, in effect, just a list of :class:`~stem.exit_policy.ExitPolicyRule` + entries. + + :param list rules: **str** or :class:`~stem.exit_policy.ExitPolicyRule` + entries that make up this policy + """ + + def __init__(self, *rules): + # sanity check the types + + for rule in rules: + if not isinstance(rule, (bytes, str_type, ExitPolicyRule)): + raise TypeError('Exit policy rules can only contain strings or ExitPolicyRules, got a %s (%s)' % (type(rule), rules)) + + # Unparsed representation of the rules we were constructed with. Our + # _get_rules() method consumes this to provide ExitPolicyRule instances. + # This is lazily evaluated so we don't need to actually parse the exit + # policy if it's never used. + + is_all_str = True + + for rule in rules: + if not isinstance(rule, (bytes, str_type)): + is_all_str = False + + if rules and is_all_str: + byte_rules = [stem.util.str_tools._to_bytes(r) for r in rules] + self._input_rules = zlib.compress(b','.join(byte_rules)) + else: + self._input_rules = rules + + self._rules = None + self._hash = None + + # Result when no rules apply. According to the spec policies default to 'is + # allowed', but our microdescriptor policy subclass might want to change + # this. + + self._is_allowed_default = True + + @lru_cache() + def can_exit_to(self, address = None, port = None, strict = False): + """ + Checks if this policy allows exiting to a given destination or not. If the + address or port is omitted then this will check if we're allowed to exit to + any instances of the defined address or port. + + :param str address: IPv4 or IPv6 address (with or without brackets) + :param int port: port number + :param bool strict: if the address or port is excluded then check if we can + exit to **all** instances of the defined address or port + + :returns: **True** if exiting to this destination is allowed, **False** otherwise + """ + + for rule in self._get_rules(): + if rule.is_match(address, port, strict): + return rule.is_accept + + return self._is_allowed_default + + @lru_cache() + def is_exiting_allowed(self): + """ + Provides **True** if the policy allows exiting whatsoever, **False** + otherwise. + """ + + rejected_ports = set() + + for rule in self._get_rules(): + if rule.is_accept: + for port in range(rule.min_port, rule.max_port + 1): + if port not in rejected_ports: + return True + elif rule.is_address_wildcard(): + if rule.is_port_wildcard(): + return False + else: + rejected_ports.update(range(rule.min_port, rule.max_port + 1)) + + return self._is_allowed_default + + @lru_cache() + def summary(self): + """ + Provides a short description of our policy chain, similar to a + microdescriptor. This excludes entries that don't cover all IP + addresses, and is either white-list or blacklist policy based on + the final entry. For instance... + + :: + + >>> policy = ExitPolicy('accept *:80', 'accept *:443', 'reject *:*') + >>> policy.summary() + 'accept 80, 443' + + >>> policy = ExitPolicy('accept *:443', 'reject *:1-1024', 'accept *:*') + >>> policy.summary() + 'reject 1-442, 444-1024' + + :returns: **str** with a concise summary for our policy + """ + + # determines if we're a white-list or blacklist + is_whitelist = not self._is_allowed_default + + for rule in self._get_rules(): + if rule.is_address_wildcard() and rule.is_port_wildcard(): + is_whitelist = not rule.is_accept + break + + # Iterates over the policies and adds the the ports we'll return (ie, + # allows if a white-list and rejects if a blacklist). Regardless of a + # port's allow/reject policy, all further entries with that port are + # ignored since policies respect the first matching policy. + + display_ports, skip_ports = [], set() + + for rule in self._get_rules(): + if not rule.is_address_wildcard(): + continue + elif rule.is_port_wildcard(): + break + + for port in range(rule.min_port, rule.max_port + 1): + if port in skip_ports: + continue + + # if accept + white-list or reject + blacklist then add + if rule.is_accept == is_whitelist: + display_ports.append(port) + + # all further entries with this port should be ignored + skip_ports.add(port) + + # convert port list to a list of ranges (ie, ['1-3'] rather than [1, 2, 3]) + if display_ports: + display_ranges, temp_range = [], [] + display_ports.sort() + display_ports.append(None) # ending item to include last range in loop + + for port in display_ports: + if not temp_range or temp_range[-1] + 1 == port: + temp_range.append(port) + else: + if len(temp_range) > 1: + display_ranges.append('%i-%i' % (temp_range[0], temp_range[-1])) + else: + display_ranges.append(str(temp_range[0])) + + temp_range = [port] + else: + # everything for the inverse + is_whitelist = not is_whitelist + display_ranges = ['1-65535'] + + # constructs the summary string + label_prefix = 'accept ' if is_whitelist else 'reject ' + + return (label_prefix + ', '.join(display_ranges)).strip() + + def has_private(self): + """ + Checks if we have any rules expanded from the 'private' keyword. Tor + appends these by default to the start of the policy and includes a dynamic + address (the relay's public IP). + + .. versionadded:: 1.3.0 + + :returns: **True** if we have any private rules expanded from the 'private' + keyword, **False** otherwise + """ + + for rule in self._get_rules(): + if rule.is_private(): + return True + + return False + + def strip_private(self): + """ + Provides a copy of this policy without 'private' policy entries. + + .. versionadded:: 1.3.0 + + :returns: **ExitPolicy** without private rules + """ + + return ExitPolicy(*[rule for rule in self._get_rules() if not rule.is_private()]) + + def has_default(self): + """ + Checks if we have the default policy suffix. + + .. versionadded:: 1.3.0 + + :returns: **True** if we have the default policy suffix, **False** otherwise + """ + + for rule in self._get_rules(): + if rule.is_default(): + return True + + return False + + def strip_default(self): + """ + Provides a copy of this policy without the default policy suffix. + + .. versionadded:: 1.3.0 + + :returns: **ExitPolicy** without default rules + """ + + return ExitPolicy(*[rule for rule in self._get_rules() if not rule.is_default()]) + + def _get_rules(self): + if self._rules is None: + rules = [] + is_all_accept, is_all_reject = True, True + + if isinstance(self._input_rules, bytes): + decompressed_rules = zlib.decompress(self._input_rules).split(b',') + else: + decompressed_rules = self._input_rules + + for rule in decompressed_rules: + if isinstance(rule, bytes): + rule = stem.util.str_tools._to_unicode(rule) + + if isinstance(rule, str_type): + rule = ExitPolicyRule(rule.strip()) + + if rule.is_accept: + is_all_reject = False + else: + is_all_accept = False + + rules.append(rule) + + if rule.is_address_wildcard() and rule.is_port_wildcard(): + break # this is a catch-all, no reason to include more + + # If we only have one kind of entry *and* end with a wildcard then + # we might as well use the simpler version. For instance... + # + # reject *:80, reject *:443, reject *:* + # + # ... could also be represented as simply... + # + # reject *:* + # + # This mostly comes up with reject-all policies because the + # 'reject private:*' appends an extra seven rules that have no + # effect. + + if rules and (rules[-1].is_address_wildcard() and rules[-1].is_port_wildcard()): + if is_all_accept: + rules = [ExitPolicyRule('accept *:*')] + elif is_all_reject: + rules = [ExitPolicyRule('reject *:*')] + + _flag_private_rules(rules) + _flag_default_rules(rules) + + self._rules = rules + self._input_rules = None + + return self._rules + + def __len__(self): + return len(self._get_rules()) + + def __iter__(self): + for rule in self._get_rules(): + yield rule + + @lru_cache() + def __str__(self): + return ', '.join([str(rule) for rule in self._get_rules()]) + + def __hash__(self): + if self._hash is None: + my_hash = 0 + + for rule in self._get_rules(): + my_hash *= 1024 + my_hash += hash(rule) + + self._hash = my_hash + + return self._hash + + def __eq__(self, other): + if isinstance(other, ExitPolicy): + return self._get_rules() == list(other) + else: + return False + + +class MicroExitPolicy(ExitPolicy): + """ + Exit policy provided by the microdescriptors. This is a distilled version of + a normal :class:`~stem.exit_policy.ExitPolicy` contains, just consisting of a + list of ports that are either accepted or rejected. For instance... + + :: + + accept 80,443 # only accepts common http ports + reject 1-1024 # only accepts non-privileged ports + + Since these policies are a subset of the exit policy information (lacking IP + ranges) clients can only use them to guess if a relay will accept traffic or + not. To quote the `dir-spec `_ (section 3.2.1)... + + :: + + With microdescriptors, clients don't learn exact exit policies: + clients can only guess whether a relay accepts their request, try the + BEGIN request, and might get end-reason-exit-policy if they guessed + wrong, in which case they'll have to try elsewhere. + + :var bool is_accept: **True** if these are ports that we accept, **False** if + they're ports that we reject + + :param str policy: policy string that describes this policy + """ + + def __init__(self, policy): + # Microdescriptor policies are of the form... + # + # MicrodescriptrPolicy ::= ("accept" / "reject") SP PortList NL + # PortList ::= PortOrRange + # PortList ::= PortList "," PortOrRange + # PortOrRange ::= INT "-" INT / INT + + self._policy = policy + + if policy.startswith('accept'): + self.is_accept = True + elif policy.startswith('reject'): + self.is_accept = False + else: + raise ValueError("A microdescriptor exit policy must start with either 'accept' or 'reject': %s" % policy) + + policy = policy[6:] + + if not policy.startswith(' ') or (len(policy) - 1 != len(policy.lstrip())): + raise ValueError('A microdescriptor exit policy should have a space separating accept/reject from its port list: %s' % self._policy) + + policy = policy[1:] + + # convert our port list into MicroExitPolicyRule + rules = [] + + for port_entry in policy.split(','): + if '-' in port_entry: + min_port, max_port = port_entry.split('-', 1) + else: + min_port = max_port = port_entry + + if not stem.util.connection.is_valid_port(min_port) or \ + not stem.util.connection.is_valid_port(max_port): + raise ValueError("'%s' is an invalid port range" % port_entry) + + rules.append(MicroExitPolicyRule(self.is_accept, int(min_port), int(max_port))) + + super(MicroExitPolicy, self).__init__(*rules) + self._is_allowed_default = not self.is_accept + + def __str__(self): + return self._policy + + def __hash__(self): + return hash(str(self)) + + def __eq__(self, other): + if isinstance(other, MicroExitPolicy): + return str(self) == str(other) + else: + return False + + +class ExitPolicyRule(object): + """ + Single rule from the user's exit policy. These rules are chained together to + form complete policies that describe where a relay will and will not allow + traffic to exit. + + The format of these rules are formally described in the `dir-spec + `_ as an + 'exitpattern'. Note that while these are similar to tor's man page entry for + ExitPolicies, it's not the exact same. An exitpattern is better defined and + stricter in what it'll accept. For instance, ports are not optional and it + does not contain the 'private' alias. + + This should be treated as an immutable object. + + :var bool is_accept: indicates if exiting is allowed or disallowed + + :var str address: address that this rule is for + + :var int min_port: lower end of the port range that we include (inclusive) + :var int max_port: upper end of the port range that we include (inclusive) + + :param str rule: exit policy rule to be parsed + + :raises: **ValueError** if input isn't a valid tor exit policy rule + """ + + def __init__(self, rule): + # policy ::= "accept" exitpattern | "reject" exitpattern + # exitpattern ::= addrspec ":" portspec + + if rule.startswith('accept'): + self.is_accept = True + elif rule.startswith('reject'): + self.is_accept = False + else: + raise ValueError("An exit policy must start with either 'accept' or 'reject': %s" % rule) + + exitpattern = rule[6:] + + if not exitpattern.startswith(' ') or (len(exitpattern) - 1 != len(exitpattern.lstrip())): + raise ValueError('An exit policy should have a space separating its accept/reject from the exit pattern: %s' % rule) + + exitpattern = exitpattern[1:] + + if ':' not in exitpattern: + raise ValueError("An exitpattern must be of the form 'addrspec:portspec': %s" % rule) + + self.address = None + self._address_type = None + self._masked_bits = None + self.min_port = self.max_port = None + self._hash = None + + # Our mask in ip notation (ex. '255.255.255.0'). This is only set if we + # either have a custom mask that can't be represented by a number of bits, + # or the user has called mask(), lazily loading this. + + self._mask = None + + addrspec, portspec = exitpattern.rsplit(':', 1) + self._apply_addrspec(rule, addrspec) + self._apply_portspec(rule, portspec) + + # If true then a submask of /0 is treated by is_address_wildcard() as being + # a wildcard. + + self._submask_wildcard = True + + # Flags to indicate if this rule seems to be expanded from the 'private' + # keyword or tor's default policy suffix. + + self._is_private = False + self._is_default_suffix = False + + def is_address_wildcard(self): + """ + **True** if we'll match against any address, **False** otherwise. + + Note that if this policy can apply to both IPv4 and IPv6 then this is + different from being for a /0 (since, for instance, 0.0.0.0/0 wouldn't + match against an IPv6 address). That said, /0 addresses are highly unusual + and most things citing exit policies are IPv4 specific anyway, making this + moot. + + :returns: **bool** for if our address matching is a wildcard + """ + + if self._submask_wildcard and self.get_masked_bits() == 0: + return True + + return self._address_type == _address_type_to_int(AddressType.WILDCARD) + + def is_port_wildcard(self): + """ + **True** if we'll match against any port, **False** otherwise. + + :returns: **bool** for if our port matching is a wildcard + """ + + return self.min_port in (0, 1) and self.max_port == 65535 + + def is_match(self, address = None, port = None, strict = False): + """ + **True** if we match against the given destination, **False** otherwise. If + the address or port is omitted then this will check if we're allowed to + exit to any instances of the defined address or port. + + :param str address: IPv4 or IPv6 address (with or without brackets) + :param int port: port number + :param bool strict: if the address or port is excluded then check if we can + exit to **all** instances of the defined address or port + + :returns: **bool** indicating if we match against this destination + + :raises: **ValueError** if provided with a malformed address or port + """ + + # validate our input and check if the argument doesn't match our address type + + if address is not None: + address_type = self.get_address_type() + + if stem.util.connection.is_valid_ipv4_address(address): + if address_type == AddressType.IPv6: + return False + elif stem.util.connection.is_valid_ipv6_address(address, allow_brackets = True): + if address_type == AddressType.IPv4: + return False + + address = address.lstrip('[').rstrip(']') + else: + raise ValueError("'%s' isn't a valid IPv4 or IPv6 address" % address) + + if port is not None and not stem.util.connection.is_valid_port(port): + raise ValueError("'%s' isn't a valid port" % port) + + # If we're not matching against an address or port but the rule has one + # then we're a fuzzy match. When that happens... + # + # * If strict and a reject rule then we're a match ('can exit to *all* instances'). + # * If not strict and an accept rule then match ('an exit ot *any* instance'). + + fuzzy_match = False + + if not self.is_address_wildcard(): + # Already got the integer representation of our mask and our address + # with the mask applied. Just need to check if this address with the + # mask applied matches. + + if address is None: + fuzzy_match = True + else: + comparison_addr_bin = int(stem.util.connection._get_address_binary(address), 2) + comparison_addr_bin &= self._get_mask_bin() + + if self._get_address_bin() != comparison_addr_bin: + return False + + if not self.is_port_wildcard(): + if port is None: + fuzzy_match = True + elif port < self.min_port or port > self.max_port: + return False + + if fuzzy_match: + return strict != self.is_accept + else: + return True + + def get_address_type(self): + """ + Provides the :data:`~stem.exit_policy.AddressType` for our policy. + + :returns: :data:`~stem.exit_policy.AddressType` for the type of address that we have + """ + + return _int_to_address_type(self._address_type) + + def get_mask(self, cache = True): + """ + Provides the address represented by our mask. This is **None** if our + address type is a wildcard. + + :param bool cache: caches the result if **True** + + :returns: str of our subnet mask for the address (ex. '255.255.255.0') + """ + + # Lazy loading our mask because it very infrequently requested. There's + # no reason to usually usse memory for it. + + if not self._mask: + address_type = self.get_address_type() + + if address_type == AddressType.WILDCARD: + mask = None + elif address_type == AddressType.IPv4: + mask = stem.util.connection.get_mask_ipv4(self._masked_bits) + elif address_type == AddressType.IPv6: + mask = stem.util.connection.get_mask_ipv6(self._masked_bits) + + if not cache: + return mask + + self._mask = mask + + return self._mask + + def get_masked_bits(self): + """ + Provides the number of bits our subnet mask represents. This is **None** if + our mask can't have a bit representation. + + :returns: int with the bit representation of our mask + """ + + return self._masked_bits + + def is_private(self): + """ + Checks if this rule was expanded from the 'private' policy keyword. + + .. versionadded:: 1.3.0 + + :returns: **True** if this rule was expanded from the 'private' keyword, **False** otherwise. + """ + + return self._is_private + + def is_default(self): + """ + Checks if this rule belongs to the default exit policy suffix. + + .. versionadded:: 1.3.0 + + :returns: **True** if this rule was part of the default end of a policy, **False** otherwise. + """ + + return self._is_default_suffix + + @lru_cache() + def __str__(self): + """ + Provides the string representation of our policy. This does not + necessarily match the rule that we were constructed from (due to things + like IPv6 address collapsing or the multiple representations that our mask + can have). However, it is a valid that would be accepted by our constructor + to re-create this rule. + """ + + label = 'accept ' if self.is_accept else 'reject ' + + if self.is_address_wildcard(): + label += '*:' + else: + address_type = self.get_address_type() + + if address_type == AddressType.IPv4: + label += self.address + else: + label += '[%s]' % self.address + + # Including our mask label as follows... + # - exclude our mask if it doesn't do anything + # - use our masked bit count if we can + # - use the mask itself otherwise + + if (address_type == AddressType.IPv4 and self._masked_bits == 32) or \ + (address_type == AddressType.IPv6 and self._masked_bits == 128): + label += ':' + elif self._masked_bits is not None: + label += '/%i:' % self._masked_bits + else: + label += '/%s:' % self.get_mask() + + if self.is_port_wildcard(): + label += '*' + elif self.min_port == self.max_port: + label += str(self.min_port) + else: + label += '%i-%i' % (self.min_port, self.max_port) + + return label + + def __hash__(self): + if self._hash is None: + my_hash = 0 + + for attr in ('is_accept', 'address', 'min_port', 'max_port'): + my_hash *= 1024 + + attr_value = getattr(self, attr) + + if attr_value is not None: + my_hash += hash(attr_value) + + my_hash *= 1024 + my_hash += hash(self.get_mask(False)) + + self._hash = my_hash + + return self._hash + + @lru_cache() + def _get_mask_bin(self): + # provides an integer representation of our mask + + return int(stem.util.connection._get_address_binary(self.get_mask(False)), 2) + + @lru_cache() + def _get_address_bin(self): + # provides an integer representation of our address + + return int(stem.util.connection._get_address_binary(self.address), 2) & self._get_mask_bin() + + def _apply_addrspec(self, rule, addrspec): + # Parses the addrspec... + # addrspec ::= "*" | ip4spec | ip6spec + + if '/' in addrspec: + self.address, addr_extra = addrspec.split('/', 1) + else: + self.address, addr_extra = addrspec, None + + if addrspec == '*': + self._address_type = _address_type_to_int(AddressType.WILDCARD) + self.address = self._masked_bits = None + elif stem.util.connection.is_valid_ipv4_address(self.address): + # ipv4spec ::= ip4 | ip4 "/" num_ip4_bits | ip4 "/" ip4mask + # ip4 ::= an IPv4 address in dotted-quad format + # ip4mask ::= an IPv4 mask in dotted-quad format + # num_ip4_bits ::= an integer between 0 and 32 + + self._address_type = _address_type_to_int(AddressType.IPv4) + + if addr_extra is None: + self._masked_bits = 32 + elif stem.util.connection.is_valid_ipv4_address(addr_extra): + # provided with an ip4mask + try: + self._masked_bits = stem.util.connection._get_masked_bits(addr_extra) + except ValueError: + # mask can't be represented as a number of bits (ex. '255.255.0.255') + self._mask = addr_extra + self._masked_bits = None + elif addr_extra.isdigit(): + # provided with a num_ip4_bits + self._masked_bits = int(addr_extra) + + if self._masked_bits < 0 or self._masked_bits > 32: + raise ValueError('IPv4 masks must be in the range of 0-32 bits') + else: + raise ValueError("The '%s' isn't a mask nor number of bits: %s" % (addr_extra, rule)) + elif self.address.startswith('[') and self.address.endswith(']') and \ + stem.util.connection.is_valid_ipv6_address(self.address[1:-1]): + # ip6spec ::= ip6 | ip6 "/" num_ip6_bits + # ip6 ::= an IPv6 address, surrounded by square brackets. + # num_ip6_bits ::= an integer between 0 and 128 + + self.address = stem.util.connection.expand_ipv6_address(self.address[1:-1].upper()) + self._address_type = _address_type_to_int(AddressType.IPv6) + + if addr_extra is None: + self._masked_bits = 128 + elif addr_extra.isdigit(): + # provided with a num_ip6_bits + self._masked_bits = int(addr_extra) + + if self._masked_bits < 0 or self._masked_bits > 128: + raise ValueError('IPv6 masks must be in the range of 0-128 bits') + else: + raise ValueError("The '%s' isn't a number of bits: %s" % (addr_extra, rule)) + else: + raise ValueError("Address isn't a wildcard, IPv4, or IPv6 address: %s" % rule) + + def _apply_portspec(self, rule, portspec): + # Parses the portspec... + # portspec ::= "*" | port | port "-" port + # port ::= an integer between 1 and 65535, inclusive. + # + # Due to a tor bug the spec says that we should accept port of zero, but + # connections to port zero are never permitted. + + if portspec == '*': + self.min_port, self.max_port = 1, 65535 + elif portspec.isdigit(): + # provided with a single port + if stem.util.connection.is_valid_port(portspec, allow_zero = True): + self.min_port = self.max_port = int(portspec) + else: + raise ValueError("'%s' isn't within a valid port range: %s" % (portspec, rule)) + elif '-' in portspec: + # provided with a port range + port_comp = portspec.split('-', 1) + + if stem.util.connection.is_valid_port(port_comp, allow_zero = True): + self.min_port = int(port_comp[0]) + self.max_port = int(port_comp[1]) + + if self.min_port > self.max_port: + raise ValueError("Port range has a lower bound that's greater than its upper bound: %s" % rule) + else: + raise ValueError('Malformed port range: %s' % rule) + else: + raise ValueError("Port value isn't a wildcard, integer, or range: %s" % rule) + + def __eq__(self, other): + if isinstance(other, ExitPolicyRule): + # Our string representation encompasses our effective policy. Technically + # this isn't quite right since our rule attribute may differ (ie, 'accept + # 0.0.0.0/0' == 'accept 0.0.0.0/0.0.0.0' will be True), but these + # policies are effectively equivalent. + + return hash(self) == hash(other) + else: + return False + + +def _address_type_to_int(address_type): + return AddressType.index_of(address_type) + + +def _int_to_address_type(address_type_int): + return list(AddressType)[address_type_int] + + +class MicroExitPolicyRule(ExitPolicyRule): + """ + Lighter weight ExitPolicyRule derivative for microdescriptors. + """ + + def __init__(self, is_accept, min_port, max_port): + self.is_accept = is_accept + self.address = None # wildcard address + self.min_port = min_port + self.max_port = max_port + self._hash = None + + def is_address_wildcard(self): + return True + + def get_address_type(self): + return AddressType.WILDCARD + + def get_mask(self, cache = True): + return None + + def get_masked_bits(self): + return None + + def __hash__(self): + if self._hash is None: + my_hash = 0 + + for attr in ('is_accept', 'min_port', 'max_port'): + my_hash *= 1024 + + attr_value = getattr(self, attr) + + if attr_value is not None: + my_hash += hash(attr_value) + + self._hash = my_hash + + return self._hash + + +DEFAULT_POLICY_RULES = tuple([ExitPolicyRule(rule) for rule in ( + 'reject *:25', + 'reject *:119', + 'reject *:135-139', + 'reject *:445', + 'reject *:563', + 'reject *:1214', + 'reject *:4661-4666', + 'reject *:6346-6429', + 'reject *:6699', + 'reject *:6881-6999', + 'accept *:*', +)]) diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py b/Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py new file mode 100644 index 0000000..cf69d63 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/__init__.py @@ -0,0 +1,141 @@ +# Copyright 2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Interactive interpreter for interacting with Tor directly. This adds usability +features such as tab completion, history, and IRC-style functions (like /help). +""" + +__all__ = [ + 'arguments', + 'autocomplete', + 'commands', + 'help', +] + +import os +import sys + +import stem +import stem.connection +import stem.prereq +import stem.process +import stem.util.conf +import stem.util.system +import stem.util.term + +from stem.util.term import Attr, Color, format + +PROMPT = format('>>> ', Color.GREEN, Attr.BOLD, Attr.READLINE_ESCAPE) + +STANDARD_OUTPUT = (Color.BLUE, ) +BOLD_OUTPUT = (Color.BLUE, Attr.BOLD) +HEADER_OUTPUT = (Color.GREEN, ) +HEADER_BOLD_OUTPUT = (Color.GREEN, Attr.BOLD) +ERROR_OUTPUT = (Attr.BOLD, Color.RED) + +settings_path = os.path.join(os.path.dirname(__file__), 'settings.cfg') +uses_settings = stem.util.conf.uses_settings('stem_interpreter', settings_path) + + +@uses_settings +def msg(message, config, **attr): + return config.get(message).format(**attr) + + +def main(): + import readline + + import stem.interpreter.arguments + import stem.interpreter.autocomplete + import stem.interpreter.commands + + try: + args = stem.interpreter.arguments.parse(sys.argv[1:]) + except ValueError as exc: + print(exc) + sys.exit(1) + + if args.print_help: + print(stem.interpreter.arguments.get_help()) + sys.exit() + + if args.disable_color: + global PROMPT + stem.util.term.DISABLE_COLOR_SUPPORT = True + PROMPT = '>>> ' + + # If the user isn't connecting to something in particular then offer to start + # tor if it isn't running. + + if not (args.user_provided_port or args.user_provided_socket): + is_tor_running = stem.util.system.is_running('tor') or stem.util.system.is_running('tor.real') + + if not is_tor_running: + if not stem.util.system.is_available('tor'): + print(format(msg('msg.tor_unavailable'), *ERROR_OUTPUT)) + sys.exit(1) + else: + print(format(msg('msg.starting_tor'), *HEADER_OUTPUT)) + + stem.process.launch_tor_with_config( + config = { + 'SocksPort': '0', + 'ControlPort': str(args.control_port), + 'CookieAuthentication': '1', + 'ExitPolicy': 'reject *:*', + }, + completion_percent = 5, + take_ownership = True, + ) + + control_port = (args.control_address, args.control_port) + control_socket = args.control_socket + + # If the user explicitely specified an endpoint then just try to connect to + # that. + + if args.user_provided_socket and not args.user_provided_port: + control_port = None + elif args.user_provided_port and not args.user_provided_socket: + control_socket = None + + controller = stem.connection.connect( + control_port = control_port, + control_socket = control_socket, + password_prompt = True, + ) + + if controller is None: + sys.exit(1) + + with controller: + autocompleter = stem.interpreter.autocomplete.Autocompleter(controller) + readline.parse_and_bind('tab: complete') + readline.set_completer(autocompleter.complete) + readline.set_completer_delims('\n') + + interpreter = stem.interpreter.commands.ControlInterpretor(controller) + + for line in msg('msg.startup_banner').splitlines(): + line_format = HEADER_BOLD_OUTPUT if line.startswith(' ') else HEADER_OUTPUT + print(format(line, *line_format)) + + print('') + + while True: + try: + prompt = '... ' if interpreter.is_multiline_context else PROMPT + + if stem.prereq.is_python_3(): + user_input = input(prompt) + else: + user_input = raw_input(prompt) + + response = interpreter.run_command(user_input) + + if response is not None: + print(response) + except (KeyboardInterrupt, EOFError, stem.SocketClosed) as exc: + print('') # move cursor to the following line + break diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/arguments.py b/Shared/lib/python3.4/site-packages/stem/interpreter/arguments.py new file mode 100644 index 0000000..eadd043 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/arguments.py @@ -0,0 +1,94 @@ +# Copyright 2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Commandline argument parsing for our interpreter prompt. +""" + +import collections +import getopt + +import stem.interpreter +import stem.util.connection + +DEFAULT_ARGS = { + 'control_address': '127.0.0.1', + 'control_port': 9051, + 'user_provided_port': False, + 'control_socket': '/var/run/tor/control', + 'user_provided_socket': False, + 'disable_color': False, + 'print_help': False, +} + +OPT = 'i:s:h' +OPT_EXPANDED = ['interface=', 'socket=', 'no-color', 'help'] + + +def parse(argv): + """ + Parses our arguments, providing a named tuple with their values. + + :param list argv: input arguments to be parsed + + :returns: a **named tuple** with our parsed arguments + + :raises: **ValueError** if we got an invalid argument + """ + + args = dict(DEFAULT_ARGS) + + try: + recognized_args, unrecognized_args = getopt.getopt(argv, OPT, OPT_EXPANDED) + + if unrecognized_args: + error_msg = "aren't recognized arguments" if len(unrecognized_args) > 1 else "isn't a recognized argument" + raise getopt.GetoptError("'%s' %s" % ("', '".join(unrecognized_args), error_msg)) + except Exception as exc: + raise ValueError('%s (for usage provide --help)' % exc) + + for opt, arg in recognized_args: + if opt in ('-i', '--interface'): + if ':' in arg: + address, port = arg.split(':', 1) + else: + address, port = None, arg + + if address is not None: + if not stem.util.connection.is_valid_ipv4_address(address): + raise ValueError("'%s' isn't a valid IPv4 address" % address) + + args['control_address'] = address + + if not stem.util.connection.is_valid_port(port): + raise ValueError("'%s' isn't a valid port number" % port) + + args['control_port'] = int(port) + args['user_provided_port'] = True + elif opt in ('-s', '--socket'): + args['control_socket'] = arg + args['user_provided_socket'] = True + elif opt == '--no-color': + args['disable_color'] = True + elif opt in ('-h', '--help'): + args['print_help'] = True + + # translates our args dict into a named tuple + + Args = collections.namedtuple('Args', args.keys()) + return Args(**args) + + +def get_help(): + """ + Provides our --help usage information. + + :returns: **str** with our usage information + """ + + return stem.interpreter.msg( + 'msg.help', + address = DEFAULT_ARGS['control_address'], + port = DEFAULT_ARGS['control_port'], + socket = DEFAULT_ARGS['control_socket'], + ) diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/autocomplete.py b/Shared/lib/python3.4/site-packages/stem/interpreter/autocomplete.py new file mode 100644 index 0000000..a6d940d --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/autocomplete.py @@ -0,0 +1,115 @@ +# Copyright 2014-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Tab completion for our interpreter prompt. +""" + +from stem.interpreter import uses_settings + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + + +@uses_settings +def _get_commands(controller, config): + """ + Provides commands recognized by tor. + """ + + commands = config.get('autocomplete', []) + + if controller is None: + return commands + + # GETINFO commands. Lines are of the form '[option] -- [description]'. This + # strips '*' from options that accept values. + + results = controller.get_info('info/names', None) + + if results: + for line in results.splitlines(): + option = line.split(' ', 1)[0].rstrip('*') + commands.append('GETINFO %s' % option) + else: + commands.append('GETINFO ') + + # GETCONF, SETCONF, and RESETCONF commands. Lines are of the form + # '[option] [type]'. + + results = controller.get_info('config/names', None) + + if results: + for line in results.splitlines(): + option = line.split(' ', 1)[0] + + commands.append('GETCONF %s' % option) + commands.append('SETCONF %s' % option) + commands.append('RESETCONF %s' % option) + else: + commands += ['GETCONF ', 'SETCONF ', 'RESETCONF '] + + # SETEVENT, USEFEATURE, and SIGNAL commands. For each of these the GETINFO + # results are simply a space separated lists of the values they can have. + + options = ( + ('SETEVENTS ', 'events/names'), + ('USEFEATURE ', 'features/names'), + ('SIGNAL ', 'signal/names'), + ) + + for prefix, getinfo_cmd in options: + results = controller.get_info(getinfo_cmd, None) + + if results: + commands += [prefix + value for value in results.split()] + else: + commands.append(prefix) + + # Adds /help commands. + + usage_info = config.get('help.usage', {}) + + for cmd in usage_info.keys(): + commands.append('/help ' + cmd) + + return commands + + +class Autocompleter(object): + def __init__(self, controller): + self._commands = _get_commands(controller) + + @lru_cache() + def matches(self, text): + """ + Provides autocompletion matches for the given text. + + :param str text: text to check for autocompletion matches with + + :returns: **list** with possible matches + """ + + lowercase_text = text.lower() + return [cmd for cmd in self._commands if cmd.lower().startswith(lowercase_text)] + + def complete(self, text, state): + """ + Provides case insensetive autocompletion options, acting as a functor for + the readlines set_completer function. + + :param str text: text to check for autocompletion matches with + :param int state: index of result to be provided, readline fetches matches + until this function provides None + + :returns: **str** with the autocompletion match, **None** if eithe none + exists or state is higher than our number of matches + """ + + try: + return self.matches(text)[state] + except IndexError: + return None diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/commands.py b/Shared/lib/python3.4/site-packages/stem/interpreter/commands.py new file mode 100644 index 0000000..4047517 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/commands.py @@ -0,0 +1,354 @@ +# Copyright 2014-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Handles making requests and formatting the responses. +""" + +import code +import socket + +import stem +import stem.control +import stem.descriptor.remote +import stem.interpreter.help +import stem.util.connection +import stem.util.str_tools +import stem.util.tor_tools + +from stem.interpreter import STANDARD_OUTPUT, BOLD_OUTPUT, ERROR_OUTPUT, uses_settings, msg +from stem.util.term import format + + +def _get_fingerprint(arg, controller): + """ + Resolves user input into a relay fingerprint. This accepts... + + * Fingerprints + * Nicknames + * IPv4 addresses, either with or without an ORPort + * Empty input, which is resolved to ourselves if we're a relay + + :param str arg: input to be resolved to a relay fingerprint + :param stem.control.Controller controller: tor control connection + + :returns: **str** for the relay fingerprint + + :raises: **ValueError** if we're unable to resolve the input to a relay + """ + + if not arg: + try: + return controller.get_info('fingerprint') + except: + raise ValueError("We aren't a relay, no information to provide") + elif stem.util.tor_tools.is_valid_fingerprint(arg): + return arg + elif stem.util.tor_tools.is_valid_nickname(arg): + try: + return controller.get_network_status(arg).fingerprint + except: + raise ValueError("Unable to find a relay with the nickname of '%s'" % arg) + elif ':' in arg or stem.util.connection.is_valid_ipv4_address(arg): + if ':' in arg: + address, port = arg.split(':', 1) + + if not stem.util.connection.is_valid_ipv4_address(address): + raise ValueError("'%s' isn't a valid IPv4 address" % address) + elif port and not stem.util.connection.is_valid_port(port): + raise ValueError("'%s' isn't a valid port" % port) + + port = int(port) + else: + address, port = arg, None + + matches = {} + + for desc in controller.get_network_statuses(): + if desc.address == address: + if not port or desc.or_port == port: + matches[desc.or_port] = desc.fingerprint + + if len(matches) == 0: + raise ValueError('No relays found at %s' % arg) + elif len(matches) == 1: + return list(matches.values())[0] + else: + response = "There's multiple relays at %s, include a port to specify which.\n\n" % arg + + for i, or_port in enumerate(matches): + response += ' %i. %s:%s, fingerprint: %s\n' % (i + 1, address, or_port, matches[or_port]) + + raise ValueError(response) + else: + raise ValueError("'%s' isn't a fingerprint, nickname, or IP address" % arg) + + +class ControlInterpretor(code.InteractiveConsole): + """ + Handles issuing requests and providing nicely formed responses, with support + for special irc style subcommands. + """ + + def __init__(self, controller): + self._received_events = [] + + code.InteractiveConsole.__init__(self, { + 'stem': stem, + 'stem.control': stem.control, + 'controller': controller, + 'events': self.get_events, + }) + + self._controller = controller + self._run_python_commands = True + + # Indicates if we're processing a multiline command, such as conditional + # block or loop. + + self.is_multiline_context = False + + # Intercept events our controller hears about at a pretty low level since + # the user will likely be requesting them by direct 'SETEVENTS' calls. + + handle_event_real = self._controller._handle_event + + def handle_event_wrapper(event_message): + handle_event_real(event_message) + self._received_events.append(event_message) + + self._controller._handle_event = handle_event_wrapper + + def get_events(self, *event_types): + events = list(self._received_events) + event_types = list(map(str.upper, event_types)) # make filtering case insensitive + + if event_types: + events = [e for e in events if e.type in event_types] + + return events + + def do_help(self, arg): + """ + Performs the '/help' operation, giving usage information for the given + argument or a general summary if there wasn't one. + """ + + return stem.interpreter.help.response(self._controller, arg) + + def do_events(self, arg): + """ + Performs the '/events' operation, dumping the events that we've received + belonging to the given types. If no types are specified then this provides + all buffered events. + + If the user runs '/events clear' then this clears the list of events we've + received. + """ + + event_types = arg.upper().split() + + if 'CLEAR' in event_types: + del self._received_events[:] + return format('cleared event backlog', *STANDARD_OUTPUT) + + return '\n'.join([format(str(e), *STANDARD_OUTPUT) for e in self.get_events(*event_types)]) + + def do_info(self, arg): + """ + Performs the '/info' operation, looking up a relay by fingerprint, IP + address, or nickname and printing its descriptor and consensus entries in a + pretty fashion. + """ + + try: + fingerprint = _get_fingerprint(arg, self._controller) + except ValueError as exc: + return format(str(exc), *ERROR_OUTPUT) + + ns_desc = self._controller.get_network_status(fingerprint, None) + server_desc = self._controller.get_server_descriptor(fingerprint, None) + extrainfo_desc = None + micro_desc = self._controller.get_microdescriptor(fingerprint, None) + + # We'll mostly rely on the router status entry. Either the server + # descriptor or microdescriptor will be missing, so we'll treat them as + # being optional. + + if not ns_desc: + return format('Unable to find consensus information for %s' % fingerprint, *ERROR_OUTPUT) + + # More likely than not we'll have the microdescriptor but not server and + # extrainfo descriptors. If so then fetching them. + + downloader = stem.descriptor.remote.DescriptorDownloader(timeout = 5) + server_desc_query = downloader.get_server_descriptors(fingerprint) + extrainfo_desc_query = downloader.get_extrainfo_descriptors(fingerprint) + + for desc in server_desc_query: + server_desc = desc + + for desc in extrainfo_desc_query: + extrainfo_desc = desc + + address_extrainfo = [] + + try: + address_extrainfo.append(socket.gethostbyaddr(ns_desc.address)[0]) + except: + pass + + try: + address_extrainfo.append(self._controller.get_info('ip-to-country/%s' % ns_desc.address)) + except: + pass + + address_extrainfo_label = ' (%s)' % ', '.join(address_extrainfo) if address_extrainfo else '' + + if server_desc: + exit_policy_label = str(server_desc.exit_policy) + elif micro_desc: + exit_policy_label = str(micro_desc.exit_policy) + else: + exit_policy_label = 'Unknown' + + lines = [ + '%s (%s)' % (ns_desc.nickname, fingerprint), + format('address: ', *BOLD_OUTPUT) + '%s:%s%s' % (ns_desc.address, ns_desc.or_port, address_extrainfo_label), + ] + + if server_desc: + lines.append(format('tor version: ', *BOLD_OUTPUT) + str(server_desc.tor_version)) + + lines.append(format('flags: ', *BOLD_OUTPUT) + ', '.join(ns_desc.flags)) + lines.append(format('exit policy: ', *BOLD_OUTPUT) + exit_policy_label) + + if server_desc and server_desc.contact: + contact = stem.util.str_tools._to_unicode(server_desc.contact) + + # clears up some highly common obscuring + + for alias in (' at ', ' AT '): + contact = contact.replace(alias, '@') + + for alias in (' dot ', ' DOT '): + contact = contact.replace(alias, '.') + + lines.append(format('contact: ', *BOLD_OUTPUT) + contact) + + descriptor_section = [ + ('Server Descriptor:', server_desc), + ('Extrainfo Descriptor:', extrainfo_desc), + ('Microdescriptor:', micro_desc), + ('Router Status Entry:', ns_desc), + ] + + div = format('-' * 80, *STANDARD_OUTPUT) + + for label, desc in descriptor_section: + if desc: + lines += ['', div, format(label, *BOLD_OUTPUT), div, ''] + lines += [format(l, *STANDARD_OUTPUT) for l in str(desc).splitlines()] + + return '\n'.join(lines) + + def do_python(self, arg): + """ + Performs the '/python' operation, toggling if we accept python commands or + not. + """ + + if not arg: + status = 'enabled' if self._run_python_commands else 'disabled' + return format('Python support is currently %s.' % status, *STANDARD_OUTPUT) + elif arg.lower() == 'enable': + self._run_python_commands = True + elif arg.lower() == 'disable': + self._run_python_commands = False + else: + return format("'%s' is not recognized. Please run either '/python enable' or '/python disable'." % arg, *ERROR_OUTPUT) + + if self._run_python_commands: + response = "Python support enabled, we'll now run non-interpreter commands as python." + else: + response = "Python support disabled, we'll now pass along all commands to tor." + + return format(response, *STANDARD_OUTPUT) + + @uses_settings + def run_command(self, command, config): + """ + Runs the given command. Requests starting with a '/' are special commands + to the interpreter, and anything else is sent to the control port. + + :param stem.control.Controller controller: tor control connection + :param str command: command to be processed + + :returns: **list** out output lines, each line being a list of + (msg, format) tuples + + :raises: **stem.SocketClosed** if the control connection has been severed + """ + + if not self._controller.is_alive(): + raise stem.SocketClosed() + + # Commands fall into three categories: + # + # * Interpretor commands. These start with a '/'. + # + # * Controller commands stem knows how to handle. We use our Controller's + # methods for these to take advantage of caching and present nicer + # output. + # + # * Other tor commands. We pass these directly on to the control port. + + cmd, arg = command.strip(), '' + + if ' ' in cmd: + cmd, arg = cmd.split(' ', 1) + + output = '' + + if cmd.startswith('/'): + cmd = cmd.lower() + + if cmd == '/quit': + raise stem.SocketClosed() + elif cmd == '/events': + output = self.do_events(arg) + elif cmd == '/info': + output = self.do_info(arg) + elif cmd == '/python': + output = self.do_python(arg) + elif cmd == '/help': + output = self.do_help(arg) + else: + output = format("'%s' isn't a recognized command" % command, *ERROR_OUTPUT) + else: + cmd = cmd.upper() # makes commands uppercase to match the spec + + if cmd.replace('+', '') in ('LOADCONF', 'POSTDESCRIPTOR'): + # provides a notice that multi-line controller input isn't yet implemented + output = format(msg('msg.multiline_unimplemented_notice'), *ERROR_OUTPUT) + elif cmd == 'QUIT': + self._controller.msg(command) + raise stem.SocketClosed() + else: + is_tor_command = cmd in config.get('help.usage', {}) and cmd.lower() != 'events' + + if self._run_python_commands and not is_tor_command: + self.is_multiline_context = code.InteractiveConsole.push(self, command) + return + else: + try: + output = format(self._controller.msg(command).raw_content().strip(), *STANDARD_OUTPUT) + except stem.ControllerError as exc: + if isinstance(exc, stem.SocketClosed): + raise exc + else: + output = format(str(exc), *ERROR_OUTPUT) + + output += '\n' # give ourselves an extra line before the next prompt + + return output diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/help.py b/Shared/lib/python3.4/site-packages/stem/interpreter/help.py new file mode 100644 index 0000000..83db0b1 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/help.py @@ -0,0 +1,145 @@ +# Copyright 2014-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Provides our /help responses. +""" + +from stem.interpreter import ( + STANDARD_OUTPUT, + BOLD_OUTPUT, + ERROR_OUTPUT, + msg, + uses_settings, +) + +from stem.util.term import format + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + + +def response(controller, arg): + """ + Provides our /help response. + + :param stem.control.Controller controller: tor control connection + :param str arg: controller or interpreter command to provide help output for + + :returns: **str** with our help response + """ + + # Normalizing inputs first so we can better cache responses. + + return _response(controller, _normalize(arg)) + + +def _normalize(arg): + arg = arg.upper() + + # If there's multiple arguments then just take the first. This is + # particularly likely if they're trying to query a full command (for + # instance "/help GETINFO version") + + arg = arg.split(' ')[0] + + # strip slash if someone enters an interpreter command (ex. "/help /help") + + if arg.startswith('/'): + arg = arg[1:] + + return arg + + +@lru_cache() +@uses_settings +def _response(controller, arg, config): + if not arg: + return _general_help() + + usage_info = config.get('help.usage', {}) + + if arg not in usage_info: + return format("No help information available for '%s'..." % arg, *ERROR_OUTPUT) + + output = format(usage_info[arg] + '\n', *BOLD_OUTPUT) + + description = config.get('help.description.%s' % arg.lower(), '') + + for line in description.splitlines(): + output += format(' ' + line, *STANDARD_OUTPUT) + '\n' + + output += '\n' + + if arg == 'GETINFO': + results = controller.get_info('info/names', None) + + if results: + for line in results.splitlines(): + if ' -- ' in line: + opt, summary = line.split(' -- ', 1) + + output += format('%-33s' % opt, *BOLD_OUTPUT) + output += format(' - %s' % summary, *STANDARD_OUTPUT) + '\n' + elif arg == 'GETCONF': + results = controller.get_info('config/names', None) + + if results: + options = [opt.split(' ', 1)[0] for opt in results.splitlines()] + + for i in range(0, len(options), 2): + line = '' + + for entry in options[i:i + 2]: + line += '%-42s' % entry + + output += format(line.rstrip(), *STANDARD_OUTPUT) + '\n' + elif arg == 'SIGNAL': + signal_options = config.get('help.signal.options', {}) + + for signal, summary in signal_options.items(): + output += format('%-15s' % signal, *BOLD_OUTPUT) + output += format(' - %s' % summary, *STANDARD_OUTPUT) + '\n' + elif arg == 'SETEVENTS': + results = controller.get_info('events/names', None) + + if results: + entries = results.split() + + # displays four columns of 20 characters + + for i in range(0, len(entries), 4): + line = '' + + for entry in entries[i:i + 4]: + line += '%-20s' % entry + + output += format(line.rstrip(), *STANDARD_OUTPUT) + '\n' + elif arg == 'USEFEATURE': + results = controller.get_info('features/names', None) + + if results: + output += format(results, *STANDARD_OUTPUT) + '\n' + elif arg in ('LOADCONF', 'POSTDESCRIPTOR'): + # gives a warning that this option isn't yet implemented + output += format(msg('msg.multiline_unimplemented_notice'), *ERROR_OUTPUT) + '\n' + + return output.rstrip() + + +def _general_help(): + lines = [] + + for line in msg('help.general').splitlines(): + div = line.find(' - ') + + if div != -1: + cmd, description = line[:div], line[div:] + lines.append(format(cmd, *BOLD_OUTPUT) + format(description, *STANDARD_OUTPUT)) + else: + lines.append(format(line, *BOLD_OUTPUT)) + + return '\n'.join(lines) diff --git a/Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg b/Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg new file mode 100644 index 0000000..1bacf1c --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/interpreter/settings.cfg @@ -0,0 +1,326 @@ +################################################################################ +# +# Configuration data used by Stem's interpreter prompt. +# +################################################################################ + + ################## +# GENERAL MESSAGES # + ################## + +msg.multiline_unimplemented_notice Multi-line control options like this are not yet implemented. + +msg.help +|Interactive interpreter for Tor. This provides you with direct access +|to Tor's control interface via either python or direct requests. +| +| -i, --interface [ADDRESS:]PORT change control interface from {address}:{port} +| -s, --socket SOCKET_PATH attach using unix domain socket if present, +| SOCKET_PATH defaults to: {socket} +| --no-color disables colorized output +| -h, --help presents this help +| + +msg.startup_banner +|Welcome to Stem's interpreter prompt. This provides you with direct access to +|Tor's control interface. +| +|This acts like a standard python interpreter with a Tor connection available +|via your 'controller' variable... +| +| >>> controller.get_info('version') +| '0.2.5.1-alpha-dev (git-245ecfff36c0cecc)' +| +|You can also issue requests directly to Tor... +| +| >>> GETINFO version +| 250-version=0.2.5.1-alpha-dev (git-245ecfff36c0cecc) +| 250 OK +| +|For more information run '/help'. +| + +msg.tor_unavailable Tor isn't running and the command currently isn't in your PATH. + +msg.starting_tor +|Tor isn't running. Starting a temporary Tor instance for our interpreter to +|interact with. This will have a minimal non-relaying configuration, and be +|shut down when you're done. +| +|-------------------------------------------------------------------------------- +| + + ################# +# OUTPUT OF /HELP # + ################# + +# Response for the '/help' command without any arguments. + +help.general +|Interpretor commands include: +| /help - provides information for interpreter and tor commands +| /events - prints events that we've received +| /info - general information for a relay +| /python - enable or disable support for running python commands +| /quit - shuts down the interpreter +| +|Tor commands include: +| GETINFO - queries information from tor +| GETCONF, SETCONF, RESETCONF - show or edit a configuration option +| SIGNAL - issues control signal to the process (for resetting, stopping, etc) +| SETEVENTS - configures the events tor will notify us of +| +| USEFEATURE - enables custom behavior for the controller +| SAVECONF - writes tor's current configuration to our torrc +| LOADCONF - loads the given input like it was part of our torrc +| MAPADDRESS - replaces requests for one address with another +| POSTDESCRIPTOR - adds a relay descriptor to our cache +| EXTENDCIRCUIT - create or extend a tor circuit +| SETCIRCUITPURPOSE - configures the purpose associated with a circuit +| CLOSECIRCUIT - closes the given circuit +| ATTACHSTREAM - associates an application's stream with a tor circuit +| REDIRECTSTREAM - sets a stream's destination +| CLOSESTREAM - closes the given stream +| ADD_ONION - create a new hidden service +| DEL_ONION - delete a hidden service that was created with ADD_ONION +| HSFETCH - retrieve a hidden service descriptor, providing it in a HS_DESC_CONTENT event +| HSPOST - uploads a hidden service descriptor +| RESOLVE - issues an asynchronous dns or rdns request over tor +| TAKEOWNERSHIP - instructs tor to quit when this control connection is closed +| PROTOCOLINFO - queries version and controller authentication information +| QUIT - disconnect the control connection +| +|For more information use '/help [OPTION]'. + +# Usage of tor and interpreter commands. + +help.usage HELP => /help [OPTION] +help.usage EVENTS => /events [types] +help.usage INFO => /info [relay fingerprint, nickname, or IP address] +help.usage PYTHON => /python [enable,disable] +help.usage QUIT => /quit +help.usage GETINFO => GETINFO OPTION +help.usage GETCONF => GETCONF OPTION +help.usage SETCONF => SETCONF PARAM[=VALUE] +help.usage RESETCONF => RESETCONF PARAM[=VALUE] +help.usage SIGNAL => SIGNAL SIG +help.usage SETEVENTS => SETEVENTS [EXTENDED] [EVENTS] +help.usage USEFEATURE => USEFEATURE OPTION +help.usage SAVECONF => SAVECONF +help.usage LOADCONF => LOADCONF... +help.usage MAPADDRESS => MAPADDRESS SOURCE_ADDR=DESTINATION_ADDR +help.usage POSTDESCRIPTOR => POSTDESCRIPTOR [purpose=general/controller/bridge] [cache=yes/no]... +help.usage EXTENDCIRCUIT => EXTENDCIRCUIT CircuitID [PATH] [purpose=general/controller] +help.usage SETCIRCUITPURPOSE => SETCIRCUITPURPOSE CircuitID purpose=general/controller +help.usage CLOSECIRCUIT => CLOSECIRCUIT CircuitID [IfUnused] +help.usage ATTACHSTREAM => ATTACHSTREAM StreamID CircuitID [HOP=HopNum] +help.usage REDIRECTSTREAM => REDIRECTSTREAM StreamID Address [Port] +help.usage CLOSESTREAM => CLOSESTREAM StreamID Reason [Flag] +help.usage ADD_ONION => KeyType:KeyBlob [Flags=Flag] (Port=Port [,Target])... +help.usage DEL_ONION => ServiceID +help.usage HSFETCH => HSFETCH (HSAddress/v2-DescId) [SERVER=Server]... +help.usage HSPOST => [SERVER=Server] DESCRIPTOR +help.usage RESOLVE => RESOLVE [mode=reverse] address +help.usage TAKEOWNERSHIP => TAKEOWNERSHIP +help.usage PROTOCOLINFO => PROTOCOLINFO [ProtocolVersion] + +# Longer description of what tor and interpreter commands do. + +help.description.help +|Provides usage information for the given interpreter, tor command, or tor +|configuration option. +| +|Example: +| /help info # provides a description of the '/info' option +| /help GETINFO # usage information for tor's GETINFO controller option + +help.description.events +|Provides events that we've received belonging to the given event types. If +|no types are specified then this provides all the messages that we've +|received. +| +|You can also run '/events clear' to clear the backlog of events we've +|received. + +help.description.info +|Provides information for a relay that's currently in the consensus. If no +|relay is specified then this provides information on ourselves. + +help.description.python +|Enables or disables support for running python commands. This determines how +|we treat commands this interpreter doesn't recognize... +| +|* If enabled then unrecognized commands are executed as python. +|* If disabled then unrecognized commands are passed along to tor. + +help.description.quit +|Terminates the interpreter. + +help.description.getinfo +|Queries the tor process for information. Options are... +| + +help.description.getconf +|Provides the current value for a given configuration value. Options include... +| + +help.description.setconf +|Sets the given configuration parameters. Values can be quoted or non-quoted +|strings, and reverts the option to 0 or NULL if not provided. +| +|Examples: +| * Sets a contact address and resets our family to NULL +| SETCONF MyFamily ContactInfo=foo@bar.com +| +| * Sets an exit policy that only includes port 80/443 +| SETCONF ExitPolicy=\"accept *:80, accept *:443, reject *:*\"\ + +help.description.resetconf +|Reverts the given configuration options to their default values. If a value +|is provided then this behaves in the same way as SETCONF. +| +|Examples: +| * Returns both of our accounting parameters to their defaults +| RESETCONF AccountingMax AccountingStart +| +| * Uses the default exit policy and sets our nickname to be 'Goomba' +| RESETCONF ExitPolicy Nickname=Goomba + +help.description.signal +|Issues a signal that tells the tor process to reload its torrc, dump its +|stats, halt, etc. + +help.description.setevents +|Sets the events that we will receive. This turns off any events that aren't +|listed so sending 'SETEVENTS' without any values will turn off all event reporting. +| +|For Tor versions between 0.1.1.9 and 0.2.2.1 adding 'EXTENDED' causes some +|events to give us additional information. After version 0.2.2.1 this is +|always on. +| +|Events include... +| + +help.description.usefeature +|Customizes the behavior of the control port. Options include... +| + +help.description.saveconf +|Writes Tor's current configuration to its torrc. + +help.description.loadconf +|Reads the given text like it belonged to our torrc. +| +|Example: +| +LOADCONF +| # sets our exit policy to just accept ports 80 and 443 +| ExitPolicy accept *:80 +| ExitPolicy accept *:443 +| ExitPolicy reject *:* +| . + +help.description.mapaddress +|Replaces future requests for one address with another. +| +|Example: +| MAPADDRESS 0.0.0.0=torproject.org 1.2.3.4=tor.freehaven.net + +help.description.postdescriptor +|Simulates getting a new relay descriptor. + +help.description.extendcircuit +|Extends the given circuit or create a new one if the CircuitID is zero. The +|PATH is a comma separated list of fingerprints. If it isn't set then this +|uses Tor's normal path selection. + +help.description.setcircuitpurpose +|Sets the purpose attribute for a circuit. + +help.description.closecircuit +|Closes the given circuit. If "IfUnused" is included then this only closes +|the circuit if it isn't currently being used. + +help.description.attachstream +|Attaches a stream with the given built circuit (tor picks one on its own if +|CircuitID is zero). If HopNum is given then this hop is used to exit the +|circuit, otherwise the last relay is used. + +help.description.redirectstream +|Sets the destination for a given stream. This can only be done after a +|stream is created but before it's attached to a circuit. + +help.description.closestream +|Closes the given stream, the reason being an integer matching a reason as +|per section 6.3 of the tor-spec. + +help.description.add_onion +|Creates a new hidden service. Unlike 'SETCONF HiddenServiceDir...' this +|doesn't persist the service to disk. + +help.description.del_onion +|Delete a hidden service that was created with ADD_ONION. + +help.description.hsfetch +|Retrieves the descriptor for a hidden service. This is an asynchronous +|request, with the descriptor provided by a HS_DESC_CONTENT event. + +help.description.hspost +|Uploads a descriptor to a hidden service directory. + +help.description.resolve +|Performs IPv4 DNS resolution over tor, doing a reverse lookup instead if +|"mode=reverse" is included. This request is processed in the background and +|results in a ADDRMAP event with the response. + +help.description.takeownership +|Instructs Tor to gracefully shut down when this control connection is closed. + +help.description.protocolinfo +|Provides bootstrapping information that a controller might need when first +|starting, like Tor's version and controller authentication. This can be done +|before authenticating to the control port. + +help.signal.options RELOAD / HUP => reload our torrc +help.signal.options SHUTDOWN / INT => gracefully shut down, waiting 30 seconds if we're a relay +help.signal.options DUMP / USR1 => logs information about open connections and circuits +help.signal.options DEBUG / USR2 => makes us log at the DEBUG runlevel +help.signal.options HALT / TERM => immediately shut down +help.signal.options CLEARDNSCACHE => clears any cached DNS results +help.signal.options NEWNYM => clears the DNS cache and uses new circuits for future connections + + ################ +# TAB COMPLETION # + ################ + +# Commands we'll autocomplete when the user hits tab. This is just the start of +# our autocompletion list - more are determined dynamically by checking what +# tor supports. + +autocomplete /help +autocomplete /events +autocomplete /info +autocomplete /quit +autocomplete SAVECONF +autocomplete MAPADDRESS +autocomplete EXTENDCIRCUIT +autocomplete SETCIRCUITPURPOSE +autocomplete SETROUTERPURPOSE +autocomplete ATTACHSTREAM +#autocomplete +POSTDESCRIPTOR # TODO: needs multi-line support +autocomplete REDIRECTSTREAM +autocomplete CLOSESTREAM +autocomplete CLOSECIRCUIT +autocomplete QUIT +autocomplete RESOLVE +autocomplete PROTOCOLINFO +#autocomplete +LOADCONF # TODO: needs multi-line support +autocomplete TAKEOWNERSHIP +autocomplete AUTHCHALLENGE +autocomplete DROPGUARDS +autocomplete ADD_ONION NEW:BEST +autocomplete ADD_ONION NEW:RSA1024 +autocomplete ADD_ONION RSA1024: +autocomplete DEL_ONION +autocomplete HSFETCH +autocomplete HSPOST + diff --git a/Shared/lib/python3.4/site-packages/stem/prereq.py b/Shared/lib/python3.4/site-packages/stem/prereq.py new file mode 100644 index 0000000..5ed87ef --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/prereq.py @@ -0,0 +1,132 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Checks for stem dependencies. We require python 2.6 or greater (including the +3.x series). Other requirements for complete functionality are... + +* pycrypto module + + * validating descriptor signature integrity + +:: + + check_requirements - checks for minimum requirements for running stem + + is_python_27 - checks if python 2.7 or later is available + is_python_3 - checks if python 3.0 or later is available + + is_crypto_available - checks if the pycrypto module is available +""" + +import inspect +import sys + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + +CRYPTO_UNAVAILABLE = "Unable to import the pycrypto module. Because of this we'll be unable to verify descriptor signature integrity. You can get pycrypto from: https://www.dlitz.net/software/pycrypto/" + + +def check_requirements(): + """ + Checks that we meet the minimum requirements to run stem. If we don't then + this raises an ImportError with the issue. + + :raises: **ImportError** with the problem if we don't meet stem's + requirements + """ + + major_version, minor_version = sys.version_info[0:2] + + if major_version < 2 or (major_version == 2 and minor_version < 6): + raise ImportError('stem requires python version 2.6 or greater') + + +def is_python_27(): + """ + Checks if we're running python 2.7 or above (including the 3.x series). + + :returns: **True** if we meet this requirement and **False** otherwise + """ + + major_version, minor_version = sys.version_info[0:2] + + return major_version > 2 or (major_version == 2 and minor_version >= 7) + + +def is_python_3(): + """ + Checks if we're in the 3.0 - 3.x range. + + :returns: **True** if we meet this requirement and **False** otherwise + """ + + return sys.version_info[0] == 3 + + +@lru_cache() +def is_crypto_available(): + """ + Checks if the pycrypto functions we use are available. This is used for + verifying relay descriptor signatures. + + :returns: **True** if we can use pycrypto and **False** otherwise + """ + + from stem.util import log + + try: + from Crypto.PublicKey import RSA + from Crypto.Util import asn1 + from Crypto.Util.number import long_to_bytes + return True + except ImportError: + log.log_once('stem.prereq.is_crypto_available', log.INFO, CRYPTO_UNAVAILABLE) + return False + + +@lru_cache() +def is_mock_available(): + """ + Checks if the mock module is available. In python 3.3 and up it is a builtin + unittest module, but before this it needed to be `installed separately + `_. Imports should be as follows.... + + :: + + try: + # added in python 3.3 + from unittest.mock import Mock + except ImportError: + from mock import Mock + + :returns: **True** if the mock module is available and **False** otherwise + """ + + try: + # checks for python 3.3 version + import unittest.mock + return True + except ImportError: + pass + + try: + import mock + + # check for mock's patch.dict() which was introduced in version 0.7.0 + + if not hasattr(mock.patch, 'dict'): + raise ImportError() + + # check for mock's new_callable argument for patch() which was introduced in version 0.8.0 + + if 'new_callable' not in inspect.getargspec(mock.patch).args: + raise ImportError() + + return True + except ImportError: + return False diff --git a/Shared/lib/python3.4/site-packages/stem/process.py b/Shared/lib/python3.4/site-packages/stem/process.py new file mode 100644 index 0000000..b8b1f92 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/process.py @@ -0,0 +1,272 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Helper functions for working with tor as a process. + +:NO_TORRC: + when provided as a torrc_path tor is ran with a blank configuration + +:DEFAULT_INIT_TIMEOUT: + number of seconds before we time out our attempt to start a tor instance + +**Module Overview:** + +:: + + launch_tor - starts up a tor process + launch_tor_with_config - starts a tor process with a custom torrc +""" + +import os +import re +import signal +import subprocess +import tempfile + +import stem.prereq +import stem.util.str_tools +import stem.util.system +import stem.version + +NO_TORRC = '' +DEFAULT_INIT_TIMEOUT = 90 + + +def launch_tor(tor_cmd = 'tor', args = None, torrc_path = None, completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False, stdin = None): + """ + Initializes a tor process. This blocks until initialization completes or we + error out. + + If tor's data directory is missing or stale then bootstrapping will include + making several requests to the directory authorities which can take a little + while. Usually this is done in 50 seconds or so, but occasionally calls seem + to get stuck, taking well over the default timeout. + + **To work to must log at NOTICE runlevel to stdout.** It does this by + default, but if you have a 'Log' entry in your torrc then you'll also need + 'Log NOTICE stdout'. + + Note: The timeout argument does not work on Windows, and relies on the global + state of the signal module. + + :param str tor_cmd: command for starting tor + :param list args: additional arguments for tor + :param str torrc_path: location of the torrc for us to use + :param int completion_percent: percent of bootstrap completion at which + this'll return + :param functor init_msg_handler: optional functor that will be provided with + tor's initialization stdout as we get it + :param int timeout: time after which the attempt to start tor is aborted, no + timeouts are applied if **None** + :param bool take_ownership: asserts ownership over the tor process so it + aborts if this python process terminates or a :class:`~stem.control.Controller` + we establish to it disconnects + :param str stdin: content to provide on stdin + + :returns: **subprocess.Popen** instance for the tor subprocess + + :raises: **OSError** if we either fail to create the tor process or reached a + timeout without success + """ + + if stem.util.system.is_windows(): + timeout = None + + # sanity check that we got a tor binary + + if os.path.sep in tor_cmd: + # got a path (either relative or absolute), check what it leads to + + if os.path.isdir(tor_cmd): + raise OSError("'%s' is a directory, not the tor executable" % tor_cmd) + elif not os.path.isfile(tor_cmd): + raise OSError("'%s' doesn't exist" % tor_cmd) + elif not stem.util.system.is_available(tor_cmd): + raise OSError("'%s' isn't available on your system. Maybe it's not in your PATH?" % tor_cmd) + + # double check that we have a torrc to work with + if torrc_path not in (None, NO_TORRC) and not os.path.exists(torrc_path): + raise OSError("torrc doesn't exist (%s)" % torrc_path) + + # starts a tor subprocess, raising an OSError if it fails + runtime_args, temp_file = [tor_cmd], None + + if args: + runtime_args += args + + if torrc_path: + if torrc_path == NO_TORRC: + temp_file = tempfile.mkstemp(prefix = 'empty-torrc-', text = True)[1] + runtime_args += ['-f', temp_file] + else: + runtime_args += ['-f', torrc_path] + + if take_ownership: + runtime_args += ['__OwningControllerProcess', str(os.getpid())] + + tor_process = subprocess.Popen(runtime_args, stdout = subprocess.PIPE, stdin = subprocess.PIPE, stderr = subprocess.PIPE) + + if stdin: + tor_process.stdin.write(stem.util.str_tools._to_bytes(stdin)) + tor_process.stdin.close() + + if timeout: + def timeout_handler(signum, frame): + # terminates the uninitialized tor process and raise on timeout + + tor_process.kill() + raise OSError('reached a %i second timeout without success' % timeout) + + signal.signal(signal.SIGALRM, timeout_handler) + signal.alarm(timeout) + + bootstrap_line = re.compile('Bootstrapped ([0-9]+)%: ') + problem_line = re.compile('\[(warn|err)\] (.*)$') + last_problem = 'Timed out' + + try: + while True: + # Tor's stdout will be read as ASCII bytes. This is fine for python 2, but + # in python 3 that means it'll mismatch with other operations (for instance + # the bootstrap_line.search() call later will fail). + # + # It seems like python 2.x is perfectly happy for this to be unicode, so + # normalizing to that. + + init_line = tor_process.stdout.readline().decode('utf-8', 'replace').strip() + + # this will provide empty results if the process is terminated + + if not init_line: + tor_process.kill() # ... but best make sure + raise OSError('Process terminated: %s' % last_problem) + + # provide the caller with the initialization message if they want it + + if init_msg_handler: + init_msg_handler(init_line) + + # return the process if we're done with bootstrapping + + bootstrap_match = bootstrap_line.search(init_line) + problem_match = problem_line.search(init_line) + + if bootstrap_match and int(bootstrap_match.group(1)) >= completion_percent: + return tor_process + elif problem_match: + runlevel, msg = problem_match.groups() + + if 'see warnings above' not in msg: + if ': ' in msg: + msg = msg.split(': ')[-1].strip() + + last_problem = msg + finally: + if timeout: + signal.alarm(0) # stop alarm + + tor_process.stdout.close() + tor_process.stderr.close() + + if temp_file: + try: + os.remove(temp_file) + except: + pass + + +def launch_tor_with_config(config, tor_cmd = 'tor', completion_percent = 100, init_msg_handler = None, timeout = DEFAULT_INIT_TIMEOUT, take_ownership = False): + """ + Initializes a tor process, like :func:`~stem.process.launch_tor`, but with a + customized configuration. This writes a temporary torrc to disk, launches + tor, then deletes the torrc. + + For example... + + :: + + tor_process = stem.process.launch_tor_with_config( + config = { + 'ControlPort': '2778', + 'Log': [ + 'NOTICE stdout', + 'ERR file /tmp/tor_error_log', + ], + }, + ) + + :param dict config: configuration options, such as "{'ControlPort': '9051'}", + values can either be a **str** or **list of str** if for multiple values + :param str tor_cmd: command for starting tor + :param int completion_percent: percent of bootstrap completion at which + this'll return + :param functor init_msg_handler: optional functor that will be provided with + tor's initialization stdout as we get it + :param int timeout: time after which the attempt to start tor is aborted, no + timeouts are applied if **None** + :param bool take_ownership: asserts ownership over the tor process so it + aborts if this python process terminates or a :class:`~stem.control.Controller` + we establish to it disconnects + + :returns: **subprocess.Popen** instance for the tor subprocess + + :raises: **OSError** if we either fail to create the tor process or reached a + timeout without success + """ + + # TODO: Drop this version check when tor 0.2.6.3 or higher is the only game + # in town. + + try: + use_stdin = stem.version.get_system_tor_version(tor_cmd) >= stem.version.Requirement.TORRC_VIA_STDIN + except IOError: + use_stdin = False + + # we need to be sure that we're logging to stdout to figure out when we're + # done bootstrapping + + if 'Log' in config: + stdout_options = ['DEBUG stdout', 'INFO stdout', 'NOTICE stdout'] + + if isinstance(config['Log'], str): + config['Log'] = [config['Log']] + + has_stdout = False + + for log_config in config['Log']: + if log_config in stdout_options: + has_stdout = True + break + + if not has_stdout: + config['Log'].append('NOTICE stdout') + + config_str = '' + + for key, values in list(config.items()): + if isinstance(values, str): + config_str += '%s %s\n' % (key, values) + else: + for value in values: + config_str += '%s %s\n' % (key, value) + + if use_stdin: + return launch_tor(tor_cmd, ['-f', '-'], None, completion_percent, init_msg_handler, timeout, take_ownership, stdin = config_str) + else: + torrc_descriptor, torrc_path = tempfile.mkstemp(prefix = 'torrc-', text = True) + + try: + with open(torrc_path, 'w') as torrc_file: + torrc_file.write(config_str) + + # prevents tor from erroring out due to a missing torrc if it gets a sighup + args = ['__ReloadTorrcOnSIGHUP', '0'] + + return launch_tor(tor_cmd, args, torrc_path, completion_percent, init_msg_handler, timeout, take_ownership) + finally: + try: + os.close(torrc_descriptor) + os.remove(torrc_path) + except: + pass diff --git a/Shared/lib/python3.4/site-packages/stem/response/__init__.py b/Shared/lib/python3.4/site-packages/stem/response/__init__.py new file mode 100644 index 0000000..df534a9 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/response/__init__.py @@ -0,0 +1,588 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Parses replies from the control socket. + +**Module Overview:** + +:: + + convert - translates a ControlMessage into a particular response subclass + + ControlMessage - Message that's read from the control socket. + |- SingleLineResponse - Simple tor response only including a single line of information. + | + |- from_str - provides a ControlMessage for the given string + |- is_ok - response had a 250 status + |- content - provides the parsed message content + |- raw_content - unparsed socket data + |- __str__ - content stripped of protocol formatting + +- __iter__ - ControlLine entries for the content of the message + + ControlLine - String subclass with methods for parsing controller responses. + |- remainder - provides the unparsed content + |- is_empty - checks if the remaining content is empty + |- is_next_quoted - checks if the next entry is a quoted value + |- is_next_mapping - checks if the next entry is a KEY=VALUE mapping + |- peek_key - provides the key of the next entry + |- pop - removes and returns the next entry + +- pop_mapping - removes and returns the next entry as a KEY=VALUE mapping +""" + +__all__ = [ + 'add_onion', + 'events', + 'getinfo', + 'getconf', + 'protocolinfo', + 'authchallenge', + 'convert', + 'ControlMessage', + 'ControlLine', + 'SingleLineResponse', +] + +import re +import threading + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +import stem.socket + +KEY_ARG = re.compile('^(\S+)=') + +# Escape sequences from the 'esc_for_log' function of tor's 'common/util.c'. +# It's hard to tell what controller functions use this in practice, but direct +# users are... +# - 'COOKIEFILE' field of PROTOCOLINFO responses +# - logged messages about bugs +# - the 'getinfo_helper_listeners' function of control.c + +CONTROL_ESCAPES = {r'\\': '\\', r'\"': '\"', r'\'': '\'', + r'\r': '\r', r'\n': '\n', r'\t': '\t'} + + +def convert(response_type, message, **kwargs): + """ + Converts a :class:`~stem.response.ControlMessage` into a particular kind of + tor response. This does an in-place conversion of the message from being a + :class:`~stem.response.ControlMessage` to a subclass for its response type. + Recognized types include... + + =================== ===== + response_type Class + =================== ===== + **GETINFO** :class:`stem.response.getinfo.GetInfoResponse` + **GETCONF** :class:`stem.response.getconf.GetConfResponse` + **MAPADDRESS** :class:`stem.response.mapaddress.MapAddressResponse` + **EVENT** :class:`stem.response.events.Event` subclass + **PROTOCOLINFO** :class:`stem.response.protocolinfo.ProtocolInfoResponse` + **AUTHCHALLENGE** :class:`stem.response.authchallenge.AuthChallengeResponse` + **SINGLELINE** :class:`stem.response.SingleLineResponse` + =================== ===== + + :param str response_type: type of tor response to convert to + :param stem.response.ControlMessage message: message to be converted + :param kwargs: optional keyword arguments to be passed to the parser method + + :raises: + * :class:`stem.ProtocolError` the message isn't a proper response of + that type + * :class:`stem.InvalidArguments` the arguments given as input are + invalid, this is can only be raised if the response_type is: **GETINFO**, + **GETCONF** + * :class:`stem.InvalidRequest` the arguments given as input are + invalid, this is can only be raised if the response_type is: + **MAPADDRESS** + * :class:`stem.OperationFailed` if the action the event represents failed, + this is can only be raised if the response_type is: **MAPADDRESS** + * **TypeError** if argument isn't a :class:`~stem.response.ControlMessage` + or response_type isn't supported + """ + + import stem.response.add_onion + import stem.response.authchallenge + import stem.response.events + import stem.response.getinfo + import stem.response.getconf + import stem.response.mapaddress + import stem.response.protocolinfo + + if not isinstance(message, ControlMessage): + raise TypeError('Only able to convert stem.response.ControlMessage instances') + + response_types = { + 'ADD_ONION': stem.response.add_onion.AddOnionResponse, + 'AUTHCHALLENGE': stem.response.authchallenge.AuthChallengeResponse, + 'EVENT': stem.response.events.Event, + 'GETINFO': stem.response.getinfo.GetInfoResponse, + 'GETCONF': stem.response.getconf.GetConfResponse, + 'MAPADDRESS': stem.response.mapaddress.MapAddressResponse, + 'SINGLELINE': SingleLineResponse, + 'PROTOCOLINFO': stem.response.protocolinfo.ProtocolInfoResponse, + } + + try: + response_class = response_types[response_type] + except TypeError: + raise TypeError('Unsupported response type: %s' % response_type) + + message.__class__ = response_class + message._parse_message(**kwargs) + + +class ControlMessage(object): + """ + Message from the control socket. This is iterable and can be stringified for + individual message components stripped of protocol formatting. Messages are + never empty. + """ + + @staticmethod + def from_str(content, msg_type = None, **kwargs): + """ + Provides a ControlMessage for the given content. + + .. versionadded:: 1.1.0 + + :param str content: message to construct the message from + :param str msg_type: type of tor reply to parse the content as + :param kwargs: optional keyword arguments to be passed to the parser method + + :returns: stem.response.ControlMessage instance + """ + + msg = stem.socket.recv_message(StringIO(content)) + + if msg_type is not None: + convert(msg_type, msg, **kwargs) + + return msg + + def __init__(self, parsed_content, raw_content): + if not parsed_content: + raise ValueError("ControlMessages can't be empty") + + self._parsed_content = parsed_content + self._raw_content = raw_content + + def is_ok(self): + """ + Checks if any of our lines have a 250 response. + + :returns: **True** if any lines have a 250 response code, **False** otherwise + """ + + for code, _, _ in self._parsed_content: + if code == '250': + return True + + return False + + def content(self, get_bytes = False): + """ + Provides the parsed message content. These are entries of the form... + + :: + + (status_code, divider, content) + + **status_code** + Three character code for the type of response (defined in section 4 of + the control-spec). + + **divider** + Single character to indicate if this is mid-reply, data, or an end to the + message (defined in section 2.3 of the control-spec). + + **content** + The following content is the actual payload of the line. + + For data entries the content is the full multi-line payload with newline + linebreaks and leading periods unescaped. + + The **status_code** and **divider** are both strings (**bytes** in python + 2.x and **unicode** in python 3.x). The **content** however is **bytes** if + **get_bytes** is **True**. + + .. versionchanged:: 1.1.0 + Added the get_bytes argument. + + :param bool get_bytes: provides **bytes** for the **content** rather than a **str** + + :returns: **list** of (str, str, str) tuples for the components of this message + """ + + if stem.prereq.is_python_3() and not get_bytes: + return [(code, div, stem.util.str_tools._to_unicode(content)) for (code, div, content) in self._parsed_content] + else: + return list(self._parsed_content) + + def raw_content(self, get_bytes = False): + """ + Provides the unparsed content read from the control socket. + + .. versionchanged:: 1.1.0 + Added the get_bytes argument. + + :param bool get_bytes: if **True** then this provides **bytes** rather than a **str** + + :returns: **str** of the socket data used to generate this message + """ + + if stem.prereq.is_python_3() and not get_bytes: + return stem.util.str_tools._to_unicode(self._raw_content) + else: + return self._raw_content + + def __str__(self): + """ + Content of the message, stripped of status code and divider protocol + formatting. + """ + + return '\n'.join(list(self)) + + def __iter__(self): + """ + Provides :class:`~stem.response.ControlLine` instances for the content of + the message. This is stripped of status codes and dividers, for instance... + + :: + + 250+info/names= + desc/id/* -- Router descriptors by ID. + desc/name/* -- Router descriptors by nickname. + . + 250 OK + + Would provide two entries... + + :: + + 1st - "info/names= + desc/id/* -- Router descriptors by ID. + desc/name/* -- Router descriptors by nickname." + 2nd - "OK" + """ + + for _, _, content in self._parsed_content: + if stem.prereq.is_python_3(): + content = stem.util.str_tools._to_unicode(content) + + yield ControlLine(content) + + def __len__(self): + """ + :returns: number of ControlLines + """ + + return len(self._parsed_content) + + def __getitem__(self, index): + """ + :returns: :class:`~stem.response.ControlLine` at the index + """ + + content = self._parsed_content[index][2] + + if stem.prereq.is_python_3(): + content = stem.util.str_tools._to_unicode(content) + + return ControlLine(content) + + +class ControlLine(str): + """ + String subclass that represents a line of controller output. This behaves as + a normal string with additional methods for parsing and popping entries from + a space delimited series of elements like a stack. + + None of these additional methods effect ourselves as a string (which is still + immutable). All methods are thread safe. + """ + + def __new__(self, value): + return str.__new__(self, value) + + def __init__(self, value): + self._remainder = value + self._remainder_lock = threading.RLock() + + def remainder(self): + """ + Provides our unparsed content. This is an empty string after we've popped + all entries. + + :returns: **str** of the unparsed content + """ + + return self._remainder + + def is_empty(self): + """ + Checks if we have further content to pop or not. + + :returns: **True** if we have additional content, **False** otherwise + """ + + return self._remainder == '' + + def is_next_quoted(self, escaped = False): + """ + Checks if our next entry is a quoted value or not. + + :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + + :returns: **True** if the next entry can be parsed as a quoted value, **False** otherwise + """ + + start_quote, end_quote = _get_quote_indices(self._remainder, escaped) + return start_quote == 0 and end_quote != -1 + + def is_next_mapping(self, key = None, quoted = False, escaped = False): + """ + Checks if our next entry is a KEY=VALUE mapping or not. + + :param str key: checks that the key matches this value, skipping the check if **None** + :param bool quoted: checks that the mapping is to a quoted value + :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + + :returns: **True** if the next entry can be parsed as a key=value mapping, + **False** otherwise + """ + + remainder = self._remainder # temp copy to avoid locking + key_match = KEY_ARG.match(remainder) + + if key_match: + if key and key != key_match.groups()[0]: + return False + + if quoted: + # checks that we have a quoted value and that it comes after the 'key=' + start_quote, end_quote = _get_quote_indices(remainder, escaped) + return start_quote == key_match.end() and end_quote != -1 + else: + return True # we just needed to check for the key + else: + return False # doesn't start with a key + + def peek_key(self): + """ + Provides the key of the next entry, providing **None** if it isn't a + key/value mapping. + + :returns: **str** with the next entry's key + """ + + remainder = self._remainder + key_match = KEY_ARG.match(remainder) + + if key_match: + return key_match.groups()[0] + else: + return None + + def pop(self, quoted = False, escaped = False): + """ + Parses the next space separated entry, removing it and the space from our + remaining content. Examples... + + :: + + >>> line = ControlLine("\\"We're all mad here.\\" says the grinning cat.") + >>> print line.pop(True) + "We're all mad here." + >>> print line.pop() + "says" + >>> print line.remainder() + "the grinning cat." + + >>> line = ControlLine("\\"this has a \\\\\\" and \\\\\\\\ in it\\" foo=bar more_data") + >>> print line.pop(True, True) + "this has a \\" and \\\\ in it" + + :param bool quoted: parses the next entry as a quoted value, removing the quotes + :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + + :returns: **str** of the next space separated entry + + :raises: + * **ValueError** if quoted is True without the value being quoted + * **IndexError** if we don't have any remaining content left to parse + """ + + with self._remainder_lock: + next_entry, remainder = _parse_entry(self._remainder, quoted, escaped) + self._remainder = remainder + return next_entry + + def pop_mapping(self, quoted = False, escaped = False): + """ + Parses the next space separated entry as a KEY=VALUE mapping, removing it + and the space from our remaining content. + + :param bool quoted: parses the value as being quoted, removing the quotes + :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + + :returns: **tuple** of the form (key, value) + + :raises: **ValueError** if this isn't a KEY=VALUE mapping or if quoted is + **True** without the value being quoted + :raises: **IndexError** if there's nothing to parse from the line + """ + + with self._remainder_lock: + if self.is_empty(): + raise IndexError('no remaining content to parse') + + key_match = KEY_ARG.match(self._remainder) + + if not key_match: + raise ValueError("the next entry isn't a KEY=VALUE mapping: " + self._remainder) + + # parse off the key + key = key_match.groups()[0] + remainder = self._remainder[key_match.end():] + + next_entry, remainder = _parse_entry(remainder, quoted, escaped) + self._remainder = remainder + return (key, next_entry) + + +def _parse_entry(line, quoted, escaped): + """ + Parses the next entry from the given space separated content. + + :param str line: content to be parsed + :param bool quoted: parses the next entry as a quoted value, removing the quotes + :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + + :returns: **tuple** of the form (entry, remainder) + + :raises: + * **ValueError** if quoted is True without the next value being quoted + * **IndexError** if there's nothing to parse from the line + """ + + if line == '': + raise IndexError('no remaining content to parse') + + next_entry, remainder = '', line + + if quoted: + # validate and parse the quoted value + start_quote, end_quote = _get_quote_indices(remainder, escaped) + + if start_quote != 0 or end_quote == -1: + raise ValueError("the next entry isn't a quoted value: " + line) + + next_entry, remainder = remainder[1:end_quote], remainder[end_quote + 1:] + else: + # non-quoted value, just need to check if there's more data afterward + if ' ' in remainder: + next_entry, remainder = remainder.split(' ', 1) + else: + next_entry, remainder = remainder, '' + + if escaped: + next_entry = _unescape(next_entry) + + return (next_entry, remainder.lstrip()) + + +def _get_quote_indices(line, escaped): + """ + Provides the indices of the next two quotes in the given content. + + :param str line: content to be parsed + :param bool escaped: unescapes the CONTROL_ESCAPES escape sequences + + :returns: **tuple** of two ints, indices being -1 if a quote doesn't exist + """ + + indices, quote_index = [], -1 + + for _ in range(2): + quote_index = line.find('"', quote_index + 1) + + # if we have escapes then we need to skip any r'\"' entries + if escaped: + # skip check if index is -1 (no match) or 0 (first character) + while quote_index >= 1 and line[quote_index - 1] == '\\': + quote_index = line.find('"', quote_index + 1) + + indices.append(quote_index) + + return tuple(indices) + + +def _unescape(entry): + # Unescapes the given string with the mappings in CONTROL_ESCAPES. + # + # This can't be a simple series of str.replace() calls because replacements + # need to be excluded from consideration for further unescaping. For + # instance, '\\t' should be converted to '\t' rather than a tab. + + def _pop_with_unescape(entry): + # Pop either the first character or the escape sequence conversion the + # entry starts with. This provides a tuple of... + # + # (unescaped prefix, remaining entry) + + for esc_sequence, replacement in CONTROL_ESCAPES.items(): + if entry.startswith(esc_sequence): + return (replacement, entry[len(esc_sequence):]) + + return (entry[0], entry[1:]) + + result = [] + + while entry: + prefix, entry = _pop_with_unescape(entry) + result.append(prefix) + + return ''.join(result) + + +class SingleLineResponse(ControlMessage): + """ + Reply to a request that performs an action rather than querying data. These + requests only contain a single line, which is 'OK' if successful, and a + description of the problem if not. + + :var str code: status code for our line + :var str message: content of the line + """ + + def is_ok(self, strict = False): + """ + Checks if the response code is "250". If strict is **True** then this + checks if the response is "250 OK" + + :param bool strict: checks for a "250 OK" message if **True** + + :returns: + * If strict is **False**: **True** if the response code is "250", **False** otherwise + * If strict is **True**: **True** if the response is "250 OK", **False** otherwise + """ + + if strict: + return self.content()[0] == ('250', ' ', 'OK') + + return self.content()[0][0] == '250' + + def _parse_message(self): + content = self.content() + + if len(content) > 1: + raise stem.ProtocolError('Received multi-line response') + elif len(content) == 0: + raise stem.ProtocolError('Received empty response') + else: + self.code, _, self.message = content[0] diff --git a/Shared/lib/python3.4/site-packages/stem/response/add_onion.py b/Shared/lib/python3.4/site-packages/stem/response/add_onion.py new file mode 100644 index 0000000..1472668 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/response/add_onion.py @@ -0,0 +1,43 @@ +# Copyright 2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +import stem.response + + +class AddOnionResponse(stem.response.ControlMessage): + """ + ADD_ONION response. + + :var str service_id: hidden service address without the '.onion' suffix + :var str private_key: base64 encoded hidden service private key + :var str private_key_type: crypto used to generate the hidden service private + key (such as RSA1024) + """ + + def _parse_message(self): + # Example: + # 250-ServiceID=gfzprpioee3hoppz + # 250-PrivateKey=RSA1024:MIICXgIBAAKBgQDZvYVxv... + # 250 OK + + self.service_id = None + self.private_key = None + self.private_key_type = None + + if not self.is_ok(): + raise stem.ProtocolError("ADD_ONION response didn't have an OK status: %s" % self) + + if not str(self).startswith('ServiceID='): + raise stem.ProtocolError('ADD_ONION response should start with the service id: %s' % self) + + for line in list(self): + if '=' in line: + key, value = line.split('=', 1) + + if key == 'ServiceID': + self.service_id = value + elif key == 'PrivateKey': + if ':' not in value: + raise stem.ProtocolError("ADD_ONION PrivateKey lines should be of the form 'PrivateKey=[type]:[key]: %s" % self) + + self.private_key_type, self.private_key = value.split(':', 1) diff --git a/Shared/lib/python3.4/site-packages/stem/response/authchallenge.py b/Shared/lib/python3.4/site-packages/stem/response/authchallenge.py new file mode 100644 index 0000000..60f3997 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/response/authchallenge.py @@ -0,0 +1,56 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +import binascii + +import stem.response +import stem.socket +import stem.util.str_tools +import stem.util.tor_tools + + +class AuthChallengeResponse(stem.response.ControlMessage): + """ + AUTHCHALLENGE query response. + + :var str server_hash: server hash provided by tor + :var str server_nonce: server nonce provided by tor + """ + + def _parse_message(self): + # Example: + # 250 AUTHCHALLENGE SERVERHASH=680A73C9836C4F557314EA1C4EDE54C285DB9DC89C83627401AEF9D7D27A95D5 SERVERNONCE=F8EA4B1F2C8B40EF1AF68860171605B910E3BBCABADF6FC3DB1FA064F4690E85 + + self.server_hash = None + self.server_nonce = None + + if not self.is_ok(): + raise stem.ProtocolError("AUTHCHALLENGE response didn't have an OK status:\n%s" % self) + elif len(self) > 1: + raise stem.ProtocolError('Received multiline AUTHCHALLENGE response:\n%s' % self) + + line = self[0] + + # sanity check that we're a AUTHCHALLENGE response + if not line.pop() == 'AUTHCHALLENGE': + raise stem.ProtocolError('Message is not an AUTHCHALLENGE response (%s)' % self) + + if line.is_next_mapping('SERVERHASH'): + value = line.pop_mapping()[1] + + if not stem.util.tor_tools.is_hex_digits(value, 64): + raise stem.ProtocolError('SERVERHASH has an invalid value: %s' % value) + + self.server_hash = binascii.a2b_hex(stem.util.str_tools._to_bytes(value)) + else: + raise stem.ProtocolError('Missing SERVERHASH mapping: %s' % line) + + if line.is_next_mapping('SERVERNONCE'): + value = line.pop_mapping()[1] + + if not stem.util.tor_tools.is_hex_digits(value, 64): + raise stem.ProtocolError('SERVERNONCE has an invalid value: %s' % value) + + self.server_nonce = binascii.a2b_hex(stem.util.str_tools._to_bytes(value)) + else: + raise stem.ProtocolError('Missing SERVERNONCE mapping: %s' % line) diff --git a/Shared/lib/python3.4/site-packages/stem/response/events.py b/Shared/lib/python3.4/site-packages/stem/response/events.py new file mode 100644 index 0000000..9c38649 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/response/events.py @@ -0,0 +1,1331 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +import io +import re +import time + +import stem +import stem.control +import stem.descriptor.router_status_entry +import stem.response +import stem.version + +from stem import str_type, int_type +from stem.util import connection, log, str_tools, tor_tools + +# Matches keyword=value arguments. This can't be a simple "(.*)=(.*)" pattern +# because some positional arguments, like circuit paths, can have an equal +# sign. + +KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)=(\S*)$') +QUOTED_KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)="(.*)"$') +CELL_TYPE = re.compile('^[a-z0-9_]+$') + + +class Event(stem.response.ControlMessage): + """ + Base for events we receive asynchronously, as described in section 4.1 of the + `control-spec + `_. + + :var str type: event type + :var int arrived_at: unix timestamp for when the message arrived + :var list positional_args: positional arguments of the event + :var dict keyword_args: key/value arguments of the event + """ + + _POSITIONAL_ARGS = () # attribute names for recognized positional arguments + _KEYWORD_ARGS = {} # map of 'keyword => attribute' for recognized attributes + _QUOTED = () # positional arguments that are quoted + _OPTIONALLY_QUOTED = () # positional arguments that may or may not be quoted + _SKIP_PARSING = False # skip parsing contents into our positional_args and keyword_args + _VERSION_ADDED = stem.version.Version('0.1.1.1-alpha') # minimum version with control-spec V1 event support + + def _parse_message(self, arrived_at = None): + if arrived_at is None: + arrived_at = int(time.time()) + + if not str(self).strip(): + raise stem.ProtocolError('Received a blank tor event. Events must at the very least have a type.') + + self.type = str(self).split()[0] + self.arrived_at = arrived_at + + # if we're a recognized event type then translate ourselves into that subclass + + if self.type in EVENT_TYPE_TO_CLASS: + self.__class__ = EVENT_TYPE_TO_CLASS[self.type] + + self.positional_args = [] + self.keyword_args = {} + + if not self._SKIP_PARSING: + self._parse_standard_attr() + + self._parse() + + def _parse_standard_attr(self): + """ + Most events are of the form... + 650 *( positional_args ) *( key "=" value ) + + This parses this standard format, populating our **positional_args** and + **keyword_args** attributes and creating attributes if it's in our event's + **_POSITIONAL_ARGS** and **_KEYWORD_ARGS**. + """ + + # Tor events contain some number of positional arguments followed by + # key/value mappings. Parsing keyword arguments from the end until we hit + # something that isn't a key/value mapping. The rest are positional. + + content = str(self) + + while True: + match = QUOTED_KW_ARG.match(content) + + if not match: + match = KW_ARG.match(content) + + if match: + content, keyword, value = match.groups() + self.keyword_args[keyword] = value + else: + break + + # Setting attributes for the fields that we recognize. + + self.positional_args = content.split()[1:] + positional = list(self.positional_args) + + for attr_name in self._POSITIONAL_ARGS: + attr_value = None + + if positional: + if attr_name in self._QUOTED or (attr_name in self._OPTIONALLY_QUOTED and positional[0].startswith('"')): + attr_values = [positional.pop(0)] + + if not attr_values[0].startswith('"'): + raise stem.ProtocolError("The %s value should be quoted, but didn't have a starting quote: %s" % (attr_name, self)) + + while True: + if not positional: + raise stem.ProtocolError("The %s value should be quoted, but didn't have an ending quote: %s" % (attr_name, self)) + + attr_values.append(positional.pop(0)) + + if attr_values[-1].endswith('"'): + break + + attr_value = ' '.join(attr_values)[1:-1] + else: + attr_value = positional.pop(0) + + setattr(self, attr_name, attr_value) + + for controller_attr_name, attr_name in self._KEYWORD_ARGS.items(): + setattr(self, attr_name, self.keyword_args.get(controller_attr_name)) + + # method overwritten by our subclasses for special handling that they do + def _parse(self): + pass + + def _log_if_unrecognized(self, attr, attr_enum): + """ + Checks if an attribute exists in a given enumeration, logging a message if + it isn't. Attributes can either be for a string or collection of strings + + :param str attr: name of the attribute to check + :param stem.util.enum.Enum enum: enumeration to check against + """ + + attr_values = getattr(self, attr) + + if attr_values: + if isinstance(attr_values, (bytes, str_type)): + attr_values = [attr_values] + + for value in attr_values: + if value not in attr_enum: + log_id = 'event.%s.unknown_%s.%s' % (self.type.lower(), attr, value) + unrecognized_msg = "%s event had an unrecognized %s (%s). Maybe a new addition to the control protocol? Full Event: '%s'" % (self.type, attr, value, self) + log.log_once(log_id, log.INFO, unrecognized_msg) + + +class AddrMapEvent(Event): + """ + Event that indicates a new address mapping. + + The ADDRMAP event was one of the first Control Protocol V1 events and was + introduced in tor version 0.1.1.1-alpha. + + .. versionchanged:: 1.1.0 + Added the cached attribute. + + :var str hostname: address being resolved + :var str destination: destionation of the resolution, this is usually an ip, + but could be a hostname if TrackHostExits is enabled or **NONE** if the + resolution failed + :var datetime expiry: expiration time of the resolution in local time + :var str error: error code if the resolution failed + :var datetime utc_expiry: expiration time of the resolution in UTC + :var bool cached: **True** if the resolution will be kept until it expires, + **False** otherwise or **None** if undefined + """ + + _POSITIONAL_ARGS = ('hostname', 'destination', 'expiry') + _KEYWORD_ARGS = { + 'error': 'error', + 'EXPIRES': 'utc_expiry', + 'CACHED': 'cached', + } + _OPTIONALLY_QUOTED = ('expiry') + + def _parse(self): + if self.destination == '': + self.destination = None + + if self.expiry is not None: + if self.expiry == 'NEVER': + self.expiry = None + else: + try: + self.expiry = stem.util.str_tools._parse_timestamp(self.expiry) + except ValueError: + raise stem.ProtocolError('Unable to parse date in ADDRMAP event: %s' % self) + + if self.utc_expiry is not None: + self.utc_expiry = stem.util.str_tools._parse_timestamp(self.utc_expiry) + + if self.cached is not None: + if self.cached == 'YES': + self.cached = True + elif self.cached == 'NO': + self.cached = False + else: + raise stem.ProtocolError("An ADDRMAP event's CACHED mapping can only be 'YES' or 'NO': %s" % self) + + +class AuthDirNewDescEvent(Event): + """ + Event specific to directory authorities, indicating that we just received new + descriptors. The descriptor type contained within this event is unspecified + so the descriptor contents are left unparsed. + + The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha. + + :var stem.AuthDescriptorAction action: what is being done with the descriptor + :var str message: explanation of why we chose this action + :var str descriptor: content of the descriptor + """ + + _SKIP_PARSING = True + _VERSION_ADDED = stem.version.Requirement.EVENT_AUTHDIR_NEWDESCS + + def _parse(self): + lines = str(self).split('\n') + + if len(lines) < 5: + raise stem.ProtocolError("AUTHDIR_NEWDESCS events must contain lines for at least the type, action, message, descriptor, and terminating 'OK'") + elif lines[-1] != 'OK': + raise stem.ProtocolError("AUTHDIR_NEWDESCS doesn't end with an 'OK'") + + # TODO: For stem 2.0.0 we should consider changing 'descriptor' to a + # ServerDescriptor instance. + + self.action = lines[1] + self.message = lines[2] + self.descriptor = '\n'.join(lines[3:-1]) + + +class BandwidthEvent(Event): + """ + Event emitted every second with the bytes sent and received by tor. + + The BW event was one of the first Control Protocol V1 events and was + introduced in tor version 0.1.1.1-alpha. + + :var long read: bytes received by tor that second + :var long written: bytes sent by tor that second + """ + + _POSITIONAL_ARGS = ('read', 'written') + + def _parse(self): + if not self.read: + raise stem.ProtocolError('BW event is missing its read value') + elif not self.written: + raise stem.ProtocolError('BW event is missing its written value') + elif not self.read.isdigit() or not self.written.isdigit(): + raise stem.ProtocolError("A BW event's bytes sent and received should be a positive numeric value, received: %s" % self) + + self.read = int_type(self.read) + self.written = int_type(self.written) + + +class BuildTimeoutSetEvent(Event): + """ + Event indicating that the timeout value for a circuit has changed. This was + first added in tor version 0.2.2.7. + + The BUILDTIMEOUT_SET event was introduced in tor version 0.2.2.7-alpha. + + :var stem.TimeoutSetType set_type: way in which the timeout is changing + :var int total_times: circuit build times tor used to determine the timeout + :var int timeout: circuit timeout value in milliseconds + :var int xm: Pareto parameter Xm in milliseconds + :var float alpha: Pareto parameter alpha + :var float quantile: CDF quantile cutoff point + :var float timeout_rate: ratio of circuits that have time out + :var int close_timeout: duration to keep measurement circuits in milliseconds + :var float close_rate: ratio of measurement circuits that are closed + """ + + _POSITIONAL_ARGS = ('set_type',) + _KEYWORD_ARGS = { + 'TOTAL_TIMES': 'total_times', + 'TIMEOUT_MS': 'timeout', + 'XM': 'xm', + 'ALPHA': 'alpha', + 'CUTOFF_QUANTILE': 'quantile', + 'TIMEOUT_RATE': 'timeout_rate', + 'CLOSE_MS': 'close_timeout', + 'CLOSE_RATE': 'close_rate', + } + _VERSION_ADDED = stem.version.Requirement.EVENT_BUILDTIMEOUT_SET + + def _parse(self): + # convert our integer and float parameters + + for param in ('total_times', 'timeout', 'xm', 'close_timeout'): + param_value = getattr(self, param) + + if param_value is not None: + try: + setattr(self, param, int(param_value)) + except ValueError: + raise stem.ProtocolError('The %s of a BUILDTIMEOUT_SET should be an integer: %s' % (param, self)) + + for param in ('alpha', 'quantile', 'timeout_rate', 'close_rate'): + param_value = getattr(self, param) + + if param_value is not None: + try: + setattr(self, param, float(param_value)) + except ValueError: + raise stem.ProtocolError('The %s of a BUILDTIMEOUT_SET should be a float: %s' % (param, self)) + + self._log_if_unrecognized('set_type', stem.TimeoutSetType) + + +class CircuitEvent(Event): + """ + Event that indicates that a circuit has changed. + + The fingerprint or nickname values in our 'path' may be **None** if the + VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor + version 0.1.2.2, and on by default after 0.2.2.1. + + The CIRC event was one of the first Control Protocol V1 events and was + introduced in tor version 0.1.1.1-alpha. + + .. versionchanged:: 1.4.0 + Added the socks_username and socks_password attributes which is used for + `stream isolation + `_. + + :var str id: circuit identifier + :var stem.CircStatus status: reported status for the circuit + :var tuple path: relays involved in the circuit, these are + **(fingerprint, nickname)** tuples + :var tuple build_flags: :data:`~stem.CircBuildFlag` attributes + governing how the circuit is built + :var stem.CircPurpose purpose: purpose that the circuit is intended for + :var stem.HiddenServiceState hs_state: status if this is a hidden service circuit + :var str rend_query: circuit's rendezvous-point if this is hidden service related + :var datetime created: time when the circuit was created or cannibalized + :var stem.CircClosureReason reason: reason for the circuit to be closed + :var stem.CircClosureReason remote_reason: remote side's reason for the circuit to be closed + :var str socks_username: username for using this circuit + :var str socks_password: password for using this circuit + """ + + _POSITIONAL_ARGS = ('id', 'status', 'path') + _KEYWORD_ARGS = { + 'BUILD_FLAGS': 'build_flags', + 'PURPOSE': 'purpose', + 'HS_STATE': 'hs_state', + 'REND_QUERY': 'rend_query', + 'TIME_CREATED': 'created', + 'REASON': 'reason', + 'REMOTE_REASON': 'remote_reason', + 'SOCKS_USERNAME': 'socks_username', + 'SOCKS_PASSWORD': 'socks_password', + } + + def _parse(self): + self.path = tuple(stem.control._parse_circ_path(self.path)) + + if self.build_flags is not None: + self.build_flags = tuple(self.build_flags.split(',')) + + if self.created is not None: + try: + self.created = str_tools._parse_iso_timestamp(self.created) + except ValueError as exc: + raise stem.ProtocolError('Unable to parse create date (%s): %s' % (exc, self)) + + if not tor_tools.is_valid_circuit_id(self.id): + raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) + + self._log_if_unrecognized('status', stem.CircStatus) + self._log_if_unrecognized('build_flags', stem.CircBuildFlag) + self._log_if_unrecognized('purpose', stem.CircPurpose) + self._log_if_unrecognized('hs_state', stem.HiddenServiceState) + self._log_if_unrecognized('reason', stem.CircClosureReason) + self._log_if_unrecognized('remote_reason', stem.CircClosureReason) + + def _compare(self, other, method): + if not isinstance(other, CircuitEvent): + return False + + for attr in ('id', 'status', 'path', 'build_flags', 'purpose', 'hs_state', 'rend_query', 'created', 'reason', 'remote_reason', 'socks_username', 'socks_port'): + my_attr = getattr(self, attr) + other_attr = getattr(other, attr) + + # Our id attribute is technically a string, but Tor conventionally uses + # ints. Attempt to handle as ints if that's the case so we get numeric + # ordering. + + if attr == 'id' and my_attr and other_attr: + if my_attr.isdigit() and other_attr.isdigit(): + my_attr = int(my_attr) + other_attr = int(other_attr) + + if my_attr is None: + my_attr = '' + + if other_attr is None: + other_attr = '' + + if my_attr != other_attr: + return method(my_attr, other_attr) + + return True + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + +class CircMinorEvent(Event): + """ + Event providing information about minor changes in our circuits. This was + first added in tor version 0.2.3.11. + + The CIRC_MINOR event was introduced in tor version 0.2.3.11-alpha. + + :var str id: circuit identifier + :var stem.CircEvent event: type of change in the circuit + :var tuple path: relays involved in the circuit, these are + **(fingerprint, nickname)** tuples + :var tuple build_flags: :data:`~stem.CircBuildFlag` attributes + governing how the circuit is built + :var stem.CircPurpose purpose: purpose that the circuit is intended for + :var stem.HiddenServiceState hs_state: status if this is a hidden service circuit + :var str rend_query: circuit's rendezvous-point if this is hidden service related + :var datetime created: time when the circuit was created or cannibalized + :var stem.CircPurpose old_purpose: prior purpose for the circuit + :var stem.HiddenServiceState old_hs_state: prior status as a hidden service circuit + """ + + _POSITIONAL_ARGS = ('id', 'event', 'path') + _KEYWORD_ARGS = { + 'BUILD_FLAGS': 'build_flags', + 'PURPOSE': 'purpose', + 'HS_STATE': 'hs_state', + 'REND_QUERY': 'rend_query', + 'TIME_CREATED': 'created', + 'OLD_PURPOSE': 'old_purpose', + 'OLD_HS_STATE': 'old_hs_state', + } + _VERSION_ADDED = stem.version.Requirement.EVENT_CIRC_MINOR + + def _parse(self): + self.path = tuple(stem.control._parse_circ_path(self.path)) + + if self.build_flags is not None: + self.build_flags = tuple(self.build_flags.split(',')) + + if self.created is not None: + try: + self.created = str_tools._parse_iso_timestamp(self.created) + except ValueError as exc: + raise stem.ProtocolError('Unable to parse create date (%s): %s' % (exc, self)) + + if not tor_tools.is_valid_circuit_id(self.id): + raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) + + self._log_if_unrecognized('event', stem.CircEvent) + self._log_if_unrecognized('build_flags', stem.CircBuildFlag) + self._log_if_unrecognized('purpose', stem.CircPurpose) + self._log_if_unrecognized('hs_state', stem.HiddenServiceState) + self._log_if_unrecognized('old_purpose', stem.CircPurpose) + self._log_if_unrecognized('old_hs_state', stem.HiddenServiceState) + + +class ClientsSeenEvent(Event): + """ + Periodic event on bridge relays that provides a summary of our users. + + The CLIENTS_SEEN event was introduced in tor version 0.2.1.10-alpha. + + :var datetime start_time: time in UTC that we started collecting these stats + :var dict locales: mapping of country codes to a rounded count for the number of users + :var dict ip_versions: mapping of ip protocols to a rounded count for the number of users + """ + + _KEYWORD_ARGS = { + 'TimeStarted': 'start_time', + 'CountrySummary': 'locales', + 'IPVersions': 'ip_versions', + } + _VERSION_ADDED = stem.version.Requirement.EVENT_CLIENTS_SEEN + + def _parse(self): + if self.start_time is not None: + self.start_time = stem.util.str_tools._parse_timestamp(self.start_time) + + if self.locales is not None: + locale_to_count = {} + + for entry in self.locales.split(','): + if '=' not in entry: + raise stem.ProtocolError("The CLIENTS_SEEN's CountrySummary should be a comma separated listing of '=' mappings: %s" % self) + + locale, count = entry.split('=', 1) + + if len(locale) != 2: + raise stem.ProtocolError("Locales should be a two character code, got '%s': %s" % (locale, self)) + elif not count.isdigit(): + raise stem.ProtocolError('Locale count was non-numeric (%s): %s' % (count, self)) + elif locale in locale_to_count: + raise stem.ProtocolError("CountrySummary had multiple mappings for '%s': %s" % (locale, self)) + + locale_to_count[locale] = int(count) + + self.locales = locale_to_count + + if self.ip_versions is not None: + protocol_to_count = {} + + for entry in self.ip_versions.split(','): + if '=' not in entry: + raise stem.ProtocolError("The CLIENTS_SEEN's IPVersions should be a comma separated listing of '=' mappings: %s" % self) + + protocol, count = entry.split('=', 1) + + if not count.isdigit(): + raise stem.ProtocolError('IP protocol count was non-numeric (%s): %s' % (count, self)) + + protocol_to_count[protocol] = int(count) + + self.ip_versions = protocol_to_count + + +class ConfChangedEvent(Event): + """ + Event that indicates that our configuration changed, either in response to a + SETCONF or RELOAD signal. + + The CONF_CHANGED event was introduced in tor version 0.2.3.3-alpha. + + :var dict config: mapping of configuration options to their new values + (**None** if the option is being unset) + """ + + _SKIP_PARSING = True + _VERSION_ADDED = stem.version.Requirement.EVENT_CONF_CHANGED + + def _parse(self): + self.config = {} + + # Skip first and last line since they're the header and footer. For + # instance... + # + # 650-CONF_CHANGED + # 650-ExitNodes=caerSidi + # 650-ExitPolicy + # 650-MaxCircuitDirtiness=20 + # 650 OK + + for line in str(self).splitlines()[1:-1]: + if '=' in line: + key, value = line.split('=', 1) + else: + key, value = line, None + + self.config[key] = value + + +class DescChangedEvent(Event): + """ + Event that indicates that our descriptor has changed. + + The DESCCHANGED event was introduced in tor version 0.1.2.2-alpha. + """ + + _VERSION_ADDED = stem.version.Requirement.EVENT_DESCCHANGED + + +class GuardEvent(Event): + """ + Event that indicates that our guard relays have changed. The 'endpoint' could + be either a... + + * fingerprint + * 'fingerprint=nickname' pair + + The derived 'endpoint_*' attributes are generally more useful. + + The GUARD event was introduced in tor version 0.1.2.5-alpha. + + :var stem.GuardType guard_type: purpose the guard relay is for + :var str endpoint: relay that the event concerns + :var str endpoint_fingerprint: endpoint's finterprint + :var str endpoint_nickname: endpoint's nickname if it was provided + :var stem.GuardStatus status: status of the guard relay + """ + + _VERSION_ADDED = stem.version.Requirement.EVENT_GUARD + _POSITIONAL_ARGS = ('guard_type', 'endpoint', 'status') + + def _parse(self): + self.endpoint_fingerprint = None + self.endpoint_nickname = None + + try: + self.endpoint_fingerprint, self.endpoint_nickname = \ + stem.control._parse_circ_entry(self.endpoint) + except stem.ProtocolError: + raise stem.ProtocolError("GUARD's endpoint doesn't match a ServerSpec: %s" % self) + + self._log_if_unrecognized('guard_type', stem.GuardType) + self._log_if_unrecognized('status', stem.GuardStatus) + + +class HSDescEvent(Event): + """ + Event triggered when we fetch a hidden service descriptor that currently isn't in our cache. + + The HS_DESC event was introduced in tor version 0.2.5.2-alpha. + + .. versionadded:: 1.2.0 + + .. versionchanged:: 1.3.0 + Added the reason attribute. + + :var stem.HSDescAction action: what is happening with the descriptor + :var str address: hidden service address + :var stem.HSAuth authentication: service's authentication method + :var str directory: hidden service directory servicing the request + :var str directory_fingerprint: hidden service directory's finterprint + :var str directory_nickname: hidden service directory's nickname if it was provided + :var str descriptor_id: descriptor identifier + :var stem.HSDescReason reason: reason the descriptor failed to be fetched + """ + + _VERSION_ADDED = stem.version.Requirement.EVENT_HS_DESC + _POSITIONAL_ARGS = ('action', 'address', 'authentication', 'directory', 'descriptor_id') + _KEYWORD_ARGS = {'REASON': 'reason'} + + def _parse(self): + self.directory_fingerprint = None + self.directory_nickname = None + + try: + self.directory_fingerprint, self.directory_nickname = \ + stem.control._parse_circ_entry(self.directory) + except stem.ProtocolError: + raise stem.ProtocolError("HS_DESC's directory doesn't match a ServerSpec: %s" % self) + + self._log_if_unrecognized('action', stem.HSDescAction) + self._log_if_unrecognized('authentication', stem.HSAuth) + + +class HSDescContentEvent(Event): + """ + Provides the content of hidden service descriptors we fetch. + + The HS_DESC_CONTENT event was introduced in tor version 0.2.7.1-alpha. + + .. versionadded:: 1.4.0 + + :var str address: hidden service address + :var str descriptor_id: descriptor identifier + :var str directory: hidden service directory servicing the request + :var str directory_fingerprint: hidden service directory's finterprint + :var str directory_nickname: hidden service directory's nickname if it was provided + :var stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor descriptor: descriptor that was retrieved + """ + + _VERSION_ADDED = stem.version.Requirement.EVENT_HS_DESC_CONTENT + _POSITIONAL_ARGS = ('address', 'descriptor_id', 'directory') + + def _parse(self): + if self.address == 'UNKNOWN': + self.address = None + + self.directory_fingerprint = None + self.directory_nickname = None + + try: + self.directory_fingerprint, self.directory_nickname = \ + stem.control._parse_circ_entry(self.directory) + except stem.ProtocolError: + raise stem.ProtocolError("HS_DESC_CONTENT's directory doesn't match a ServerSpec: %s" % self) + + # skip the first line (our positional arguments) and last ('OK') + + desc_content = str_tools._to_bytes('\n'.join(str(self).splitlines()[1:-1])) + self.descriptor = None + + if desc_content: + self.descriptor = list(stem.descriptor.hidden_service_descriptor._parse_file(io.BytesIO(desc_content)))[0] + + +class LogEvent(Event): + """ + Tor logging event. These are the most visible kind of event since, by + default, tor logs at the NOTICE :data:`~stem.Runlevel` to stdout. + + The logging events were some of the first Control Protocol V1 events + and were introduced in tor version 0.1.1.1-alpha. + + :var stem.Runlevel runlevel: runlevel of the logged message + :var str message: logged message + """ + + _SKIP_PARSING = True + + def _parse(self): + self.runlevel = self.type + self._log_if_unrecognized('runlevel', stem.Runlevel) + + # message is our content, minus the runlevel and ending "OK" if a + # multi-line message + + self.message = str(self)[len(self.runlevel) + 1:].rstrip('\nOK') + + +class NetworkStatusEvent(Event): + """ + Event for when our copy of the consensus has changed. This was introduced in + tor version 0.1.2.3. + + The NS event was introduced in tor version 0.1.2.3-alpha. + + :var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors + """ + + _SKIP_PARSING = True + _VERSION_ADDED = stem.version.Requirement.EVENT_NS + + def _parse(self): + content = str(self).lstrip('NS\n').rstrip('\nOK') + + # TODO: For stem 2.0.0 consider changing 'desc' to 'descriptors' to match + # our other events. + + self.desc = list(stem.descriptor.router_status_entry._parse_file( + io.BytesIO(str_tools._to_bytes(content)), + True, + entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3, + )) + + +class NewConsensusEvent(Event): + """ + Event for when we have a new consensus. This is similar to + :class:`~stem.response.events.NetworkStatusEvent`, except that it contains + the whole consensus so anything not listed is implicitly no longer + recommended. + + The NEWCONSENSUS event was introduced in tor version 0.2.1.13-alpha. + + :var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors + """ + + _SKIP_PARSING = True + _VERSION_ADDED = stem.version.Requirement.EVENT_NEWCONSENSUS + + def _parse(self): + content = str(self).lstrip('NEWCONSENSUS\n').rstrip('\nOK') + + # TODO: For stem 2.0.0 consider changing 'desc' to 'descriptors' to match + # our other events. + + self.desc = list(stem.descriptor.router_status_entry._parse_file( + io.BytesIO(str_tools._to_bytes(content)), + True, + entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3, + )) + + +class NewDescEvent(Event): + """ + Event that indicates that a new descriptor is available. + + The fingerprint or nickname values in our 'relays' may be **None** if the + VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor + version 0.1.2.2, and on by default after 0.2.2.1. + + The NEWDESC event was one of the first Control Protocol V1 events and was + introduced in tor version 0.1.1.1-alpha. + + :var tuple relays: **(fingerprint, nickname)** tuples for the relays with + new descriptors + """ + + def _parse(self): + self.relays = tuple([stem.control._parse_circ_entry(entry) for entry in str(self).split()[1:]]) + + +class ORConnEvent(Event): + """ + Event that indicates a change in a relay connection. The 'endpoint' could be + any of several things including a... + + * fingerprint + * nickname + * 'fingerprint=nickname' pair + * address:port + + The derived 'endpoint_*' attributes are generally more useful. + + The ORCONN event was one of the first Control Protocol V1 events and was + introduced in tor version 0.1.1.1-alpha. Its id attribute was added in + version 0.2.5.2-alpha. + + .. versionchanged:: 1.2.0 + Added the id attribute. + + :var str id: connection identifier + :var str endpoint: relay that the event concerns + :var str endpoint_fingerprint: endpoint's finterprint if it was provided + :var str endpoint_nickname: endpoint's nickname if it was provided + :var str endpoint_address: endpoint's address if it was provided + :var int endpoint_port: endpoint's port if it was provided + :var stem.ORStatus status: state of the connection + :var stem.ORClosureReason reason: reason for the connection to be closed + :var int circ_count: number of established and pending circuits + """ + + _POSITIONAL_ARGS = ('endpoint', 'status') + _KEYWORD_ARGS = { + 'REASON': 'reason', + 'NCIRCS': 'circ_count', + 'ID': 'id', + } + + def _parse(self): + self.endpoint_fingerprint = None + self.endpoint_nickname = None + self.endpoint_address = None + self.endpoint_port = None + + try: + self.endpoint_fingerprint, self.endpoint_nickname = \ + stem.control._parse_circ_entry(self.endpoint) + except stem.ProtocolError: + if ':' not in self.endpoint: + raise stem.ProtocolError("ORCONN endpoint is neither a relay nor 'address:port': %s" % self) + + address, port = self.endpoint.split(':', 1) + + if not connection.is_valid_port(port): + raise stem.ProtocolError("ORCONN's endpoint location's port is invalid: %s" % self) + + self.endpoint_address = address + self.endpoint_port = int(port) + + if self.circ_count is not None: + if not self.circ_count.isdigit(): + raise stem.ProtocolError('ORCONN event got a non-numeric circuit count (%s): %s' % (self.circ_count, self)) + + self.circ_count = int(self.circ_count) + + if self.id and not tor_tools.is_valid_connection_id(self.id): + raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) + + self._log_if_unrecognized('status', stem.ORStatus) + self._log_if_unrecognized('reason', stem.ORClosureReason) + + +class SignalEvent(Event): + """ + Event that indicates that tor has received and acted upon a signal being sent + to the process. As of tor version 0.2.4.6 the only signals conveyed by this + event are... + + * RELOAD + * DUMP + * DEBUG + * NEWNYM + * CLEARDNSCACHE + + The SIGNAL event was introduced in tor version 0.2.3.1-alpha. + + :var stem.Signal signal: signal that tor received + """ + + _POSITIONAL_ARGS = ('signal',) + _VERSION_ADDED = stem.version.Requirement.EVENT_SIGNAL + + def _parse(self): + # log if we recieved an unrecognized signal + expected_signals = ( + stem.Signal.RELOAD, + stem.Signal.DUMP, + stem.Signal.DEBUG, + stem.Signal.NEWNYM, + stem.Signal.CLEARDNSCACHE, + ) + + self._log_if_unrecognized('signal', expected_signals) + + +class StatusEvent(Event): + """ + Notification of a change in tor's state. These are generally triggered for + the same sort of things as log messages of the NOTICE level or higher. + However, unlike :class:`~stem.response.events.LogEvent` these contain well + formed data. + + The STATUS_GENERAL, STATUS_CLIENT, STATUS_SERVER events were introduced + in tor version 0.1.2.3-alpha. + + :var stem.StatusType status_type: category of the status event + :var stem.Runlevel runlevel: runlevel of the logged message + :var str action: activity that caused this message + :var dict arguments: attributes about the event + """ + + _POSITIONAL_ARGS = ('runlevel', 'action') + _VERSION_ADDED = stem.version.Requirement.EVENT_STATUS + + def _parse(self): + if self.type == 'STATUS_GENERAL': + self.status_type = stem.StatusType.GENERAL + elif self.type == 'STATUS_CLIENT': + self.status_type = stem.StatusType.CLIENT + elif self.type == 'STATUS_SERVER': + self.status_type = stem.StatusType.SERVER + else: + raise ValueError("BUG: Unrecognized status type (%s), likely an EVENT_TYPE_TO_CLASS addition without revising how 'status_type' is assigned." % self.type) + + # Just an alias for our parent class' keyword_args since that already + # parses these for us. Unlike our other event types Tor commonly supplies + # arbitrary key/value pairs for these, so making an alias here to better + # draw attention that the StatusEvent will likely have them. + + self.arguments = self.keyword_args + + self._log_if_unrecognized('runlevel', stem.Runlevel) + + +class StreamEvent(Event): + """ + Event that indicates that a stream has changed. + + The STREAM event was one of the first Control Protocol V1 events and was + introduced in tor version 0.1.1.1-alpha. + + :var str id: stream identifier + :var stem.StreamStatus status: reported status for the stream + :var str circ_id: circuit that the stream is attached to, this is **None** of + the stream is unattached + :var str target: destination of the stream + :var str target_address: destination address (ip, hostname, or '(Tor_internal)') + :var int target_port: destination port + :var stem.StreamClosureReason reason: reason for the stream to be closed + :var stem.StreamClosureReason remote_reason: remote side's reason for the stream to be closed + :var stem.StreamSource source: origin of the REMAP request + :var str source_addr: requester of the connection + :var str source_address: requester address (ip or hostname) + :var int source_port: requester port + :var stem.StreamPurpose purpose: purpose for the stream + """ + + _POSITIONAL_ARGS = ('id', 'status', 'circ_id', 'target') + _KEYWORD_ARGS = { + 'REASON': 'reason', + 'REMOTE_REASON': 'remote_reason', + 'SOURCE': 'source', + 'SOURCE_ADDR': 'source_addr', + 'PURPOSE': 'purpose', + } + + def _parse(self): + if self.target is None: + raise stem.ProtocolError("STREAM event didn't have a target: %s" % self) + else: + if ':' not in self.target: + raise stem.ProtocolError("Target location must be of the form 'address:port': %s" % self) + + address, port = self.target.rsplit(':', 1) + + if not connection.is_valid_port(port, allow_zero = True): + raise stem.ProtocolError("Target location's port is invalid: %s" % self) + + self.target_address = address + self.target_port = int(port) + + if self.source_addr is None: + self.source_address = None + self.source_port = None + else: + if ':' not in self.source_addr: + raise stem.ProtocolError("Source location must be of the form 'address:port': %s" % self) + + address, port = self.source_addr.split(':', 1) + + if not connection.is_valid_port(port, allow_zero = True): + raise stem.ProtocolError("Source location's port is invalid: %s" % self) + + self.source_address = address + self.source_port = int(port) + + # spec specifies a circ_id of zero if the stream is unattached + + if self.circ_id == '0': + self.circ_id = None + + self._log_if_unrecognized('reason', stem.StreamClosureReason) + self._log_if_unrecognized('remote_reason', stem.StreamClosureReason) + self._log_if_unrecognized('purpose', stem.StreamPurpose) + + +class StreamBwEvent(Event): + """ + Event (emitted approximately every second) with the bytes sent and received + by the application since the last such event on this stream. + + The STREAM_BW event was introduced in tor version 0.1.2.8-beta. + + :var str id: stream identifier + :var long written: bytes sent by the application + :var long read: bytes received by the application + """ + + _POSITIONAL_ARGS = ('id', 'written', 'read') + _VERSION_ADDED = stem.version.Requirement.EVENT_STREAM_BW + + def _parse(self): + if not tor_tools.is_valid_stream_id(self.id): + raise stem.ProtocolError("Stream IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) + elif not self.written: + raise stem.ProtocolError('STREAM_BW event is missing its written value') + elif not self.read: + raise stem.ProtocolError('STREAM_BW event is missing its read value') + elif not self.read.isdigit() or not self.written.isdigit(): + raise stem.ProtocolError("A STREAM_BW event's bytes sent and received should be a positive numeric value, received: %s" % self) + + self.read = int_type(self.read) + self.written = int_type(self.written) + + +class TransportLaunchedEvent(Event): + """ + Event triggered when a pluggable transport is launched. + + The TRANSPORT_LAUNCHED event was introduced in tor version 0.2.5.0-alpha. + + .. versionadded:: 1.1.0 + + :var str type: 'server' or 'client' + :var str name: name of the pluggable transport + :var str address: IPv4 or IPv6 address where the transport is listening for + connections + :var int port: port where the transport is listening for connections + """ + + _POSITIONAL_ARGS = ('type', 'name', 'address', 'port') + _VERSION_ADDED = stem.version.Requirement.EVENT_TRANSPORT_LAUNCHED + + def _parse(self): + if self.type not in ('server', 'client'): + raise stem.ProtocolError("Transport type should either be 'server' or 'client': %s" % self) + + if not connection.is_valid_ipv4_address(self.address) and \ + not connection.is_valid_ipv6_address(self.address): + raise stem.ProtocolError("Transport address isn't a valid IPv4 or IPv6 address: %s" % self) + + if not connection.is_valid_port(self.port): + raise stem.ProtocolError('Transport port is invalid: %s' % self) + + self.port = int(self.port) + + +class ConnectionBandwidthEvent(Event): + """ + Event emitted every second with the bytes sent and received by tor on a + per-connection basis. + + The CONN_BW event was introduced in tor version 0.2.5.2-alpha. + + .. versionadded:: 1.2.0 + + :var str id: connection identifier + :var stem.ConnectionType type: connection type + :var long read: bytes received by tor that second + :var long written: bytes sent by tor that second + """ + + _KEYWORD_ARGS = { + 'ID': 'id', + 'TYPE': 'type', + 'READ': 'read', + 'WRITTEN': 'written', + } + + _VERSION_ADDED = stem.version.Requirement.EVENT_CONN_BW + + def _parse(self): + if not self.id: + raise stem.ProtocolError('CONN_BW event is missing its id') + elif not self.type: + raise stem.ProtocolError('CONN_BW event is missing its type') + elif not self.read: + raise stem.ProtocolError('CONN_BW event is missing its read value') + elif not self.written: + raise stem.ProtocolError('CONN_BW event is missing its written value') + elif not self.read.isdigit() or not self.written.isdigit(): + raise stem.ProtocolError("A CONN_BW event's bytes sent and received should be a positive numeric value, received: %s" % self) + elif not tor_tools.is_valid_connection_id(self.id): + raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) + + self.read = int_type(self.read) + self.written = int_type(self.written) + + self._log_if_unrecognized('type', stem.ConnectionType) + + +class CircuitBandwidthEvent(Event): + """ + Event emitted every second with the bytes sent and received by tor on a + per-circuit basis. + + The CIRC_BW event was introduced in tor version 0.2.5.2-alpha. + + .. versionadded:: 1.2.0 + + :var str id: circuit identifier + :var long read: bytes received by tor that second + :var long written: bytes sent by tor that second + """ + + _KEYWORD_ARGS = { + 'ID': 'id', + 'READ': 'read', + 'WRITTEN': 'written', + } + + _VERSION_ADDED = stem.version.Requirement.EVENT_CIRC_BW + + def _parse(self): + if not self.id: + raise stem.ProtocolError('CIRC_BW event is missing its id') + elif not self.read: + raise stem.ProtocolError('CIRC_BW event is missing its read value') + elif not self.written: + raise stem.ProtocolError('CIRC_BW event is missing its written value') + elif not self.read.isdigit() or not self.written.isdigit(): + raise stem.ProtocolError("A CIRC_BW event's bytes sent and received should be a positive numeric value, received: %s" % self) + elif not tor_tools.is_valid_circuit_id(self.id): + raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) + + self.read = int_type(self.read) + self.written = int_type(self.written) + + +class CellStatsEvent(Event): + """ + Event emitted every second with a count of the number of cells types broken + down by the circuit. **These events are only emitted if TestingTorNetwork is + set.** + + The CELL_STATS event was introduced in tor version 0.2.5.2-alpha. + + .. versionadded:: 1.2.0 + + :var str id: circuit identifier + :var str inbound_queue: inbound queue identifier + :var str inbound_connection: inbound connection identifier + :var dict inbound_added: mapping of added inbound cell types to their count + :var dict inbound_removed: mapping of removed inbound cell types to their count + :var dict inbound_time: mapping of inbound cell types to the time they took to write in milliseconds + :var str outbound_queue: outbound queue identifier + :var str outbound_connection: outbound connection identifier + :var dict outbound_added: mapping of added outbound cell types to their count + :var dict outbound_removed: mapping of removed outbound cell types to their count + :var dict outbound_time: mapping of outbound cell types to the time they took to write in milliseconds + """ + + _KEYWORD_ARGS = { + 'ID': 'id', + 'InboundQueue': 'inbound_queue', + 'InboundConn': 'inbound_connection', + 'InboundAdded': 'inbound_added', + 'InboundRemoved': 'inbound_removed', + 'InboundTime': 'inbound_time', + 'OutboundQueue': 'outbound_queue', + 'OutboundConn': 'outbound_connection', + 'OutboundAdded': 'outbound_added', + 'OutboundRemoved': 'outbound_removed', + 'OutboundTime': 'outbound_time', + } + + _VERSION_ADDED = stem.version.Requirement.EVENT_CELL_STATS + + def _parse(self): + if self.id and not tor_tools.is_valid_circuit_id(self.id): + raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) + elif self.inbound_queue and not tor_tools.is_valid_circuit_id(self.inbound_queue): + raise stem.ProtocolError("Queue IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.inbound_queue, self)) + elif self.inbound_connection and not tor_tools.is_valid_connection_id(self.inbound_connection): + raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.inbound_connection, self)) + elif self.outbound_queue and not tor_tools.is_valid_circuit_id(self.outbound_queue): + raise stem.ProtocolError("Queue IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.outbound_queue, self)) + elif self.outbound_connection and not tor_tools.is_valid_connection_id(self.outbound_connection): + raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.outbound_connection, self)) + + self.inbound_added = _parse_cell_type_mapping(self.inbound_added) + self.inbound_removed = _parse_cell_type_mapping(self.inbound_removed) + self.inbound_time = _parse_cell_type_mapping(self.inbound_time) + self.outbound_added = _parse_cell_type_mapping(self.outbound_added) + self.outbound_removed = _parse_cell_type_mapping(self.outbound_removed) + self.outbound_time = _parse_cell_type_mapping(self.outbound_time) + + +class TokenBucketEmptyEvent(Event): + """ + Event emitted when refilling an empty token bucket. **These events are only + emitted if TestingTorNetwork is set.** + + The TB_EMPTY event was introduced in tor version 0.2.5.2-alpha. + + .. versionadded:: 1.2.0 + + :var stem.TokenBucket bucket: bucket being refilled + :var str id: connection identifier + :var int read: time in milliseconds since the read bucket was last refilled + :var int written: time in milliseconds since the write bucket was last refilled + :var int last_refill: time in milliseconds the bucket has been empty since last refilled + """ + + _POSITIONAL_ARGS = ('bucket',) + _KEYWORD_ARGS = { + 'ID': 'id', + 'READ': 'read', + 'WRITTEN': 'written', + 'LAST': 'last_refill', + } + + _VERSION_ADDED = stem.version.Requirement.EVENT_TB_EMPTY + + def _parse(self): + if self.id and not tor_tools.is_valid_connection_id(self.id): + raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self)) + elif not self.read.isdigit(): + raise stem.ProtocolError("A TB_EMPTY's READ value should be a positive numeric value, received: %s" % self) + elif not self.written.isdigit(): + raise stem.ProtocolError("A TB_EMPTY's WRITTEN value should be a positive numeric value, received: %s" % self) + elif not self.last_refill.isdigit(): + raise stem.ProtocolError("A TB_EMPTY's LAST value should be a positive numeric value, received: %s" % self) + + self.read = int(self.read) + self.written = int(self.written) + self.last_refill = int(self.last_refill) + + self._log_if_unrecognized('bucket', stem.TokenBucket) + + +def _parse_cell_type_mapping(mapping): + """ + Parses a mapping of the form... + + key1:value1,key2:value2... + + ... in which keys are strings and values are integers. + + :param str mapping: value to be parsed + + :returns: dict of **str => int** mappings + + :rasies: **stem.ProtocolError** if unable to parse the mapping + """ + + if mapping is None: + return None + + results = {} + + for entry in mapping.split(','): + if ':' not in entry: + raise stem.ProtocolError("Mappings are expected to be of the form 'key:value', got '%s': %s" % (entry, mapping)) + + key, value = entry.split(':', 1) + + if not CELL_TYPE.match(key): + raise stem.ProtocolError("Key had invalid characters, got '%s': %s" % (key, mapping)) + elif not value.isdigit(): + raise stem.ProtocolError("Values should just be integers, got '%s': %s" % (value, mapping)) + + results[key] = int(value) + + return results + + +EVENT_TYPE_TO_CLASS = { + 'ADDRMAP': AddrMapEvent, + 'AUTHDIR_NEWDESCS': AuthDirNewDescEvent, + 'BUILDTIMEOUT_SET': BuildTimeoutSetEvent, + 'BW': BandwidthEvent, + 'CELL_STATS': CellStatsEvent, + 'CIRC': CircuitEvent, + 'CIRC_BW': CircuitBandwidthEvent, + 'CIRC_MINOR': CircMinorEvent, + 'CLIENTS_SEEN': ClientsSeenEvent, + 'CONF_CHANGED': ConfChangedEvent, + 'CONN_BW': ConnectionBandwidthEvent, + 'DEBUG': LogEvent, + 'DESCCHANGED': DescChangedEvent, + 'ERR': LogEvent, + 'GUARD': GuardEvent, + 'HS_DESC': HSDescEvent, + 'HS_DESC_CONTENT': HSDescContentEvent, + 'INFO': LogEvent, + 'NEWCONSENSUS': NewConsensusEvent, + 'NEWDESC': NewDescEvent, + 'NOTICE': LogEvent, + 'NS': NetworkStatusEvent, + 'ORCONN': ORConnEvent, + 'SIGNAL': SignalEvent, + 'STATUS_CLIENT': StatusEvent, + 'STATUS_GENERAL': StatusEvent, + 'STATUS_SERVER': StatusEvent, + 'STREAM': StreamEvent, + 'STREAM_BW': StreamBwEvent, + 'TB_EMPTY': TokenBucketEmptyEvent, + 'TRANSPORT_LAUNCHED': TransportLaunchedEvent, + 'WARN': LogEvent, + + # accounting for a bug in tor 0.2.0.22 + 'STATUS_SEVER': StatusEvent, +} diff --git a/Shared/lib/python3.4/site-packages/stem/response/getconf.py b/Shared/lib/python3.4/site-packages/stem/response/getconf.py new file mode 100644 index 0000000..ce14553 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/response/getconf.py @@ -0,0 +1,55 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +import stem.response +import stem.socket + + +class GetConfResponse(stem.response.ControlMessage): + """ + Reply for a GETCONF query. + + Note that configuration parameters won't match what we queried for if it's one + of the special mapping options (ex. 'HiddenServiceOptions'). + + :var dict entries: mapping between the config parameter (**str**) and their + values (**list** of **str**) + """ + + def _parse_message(self): + # Example: + # 250-CookieAuthentication=0 + # 250-ControlPort=9100 + # 250-DataDirectory=/home/neena/.tor + # 250 DirPort + + self.entries = {} + remaining_lines = list(self) + + if self.content() == [('250', ' ', 'OK')]: + return + + if not self.is_ok(): + unrecognized_keywords = [] + for code, _, line in self.content(): + if code == '552' and line.startswith('Unrecognized configuration key "') and line.endswith('"'): + unrecognized_keywords.append(line[32:-1]) + + if unrecognized_keywords: + raise stem.InvalidArguments('552', 'GETCONF request contained unrecognized keywords: %s' % ', '.join(unrecognized_keywords), unrecognized_keywords) + else: + raise stem.ProtocolError('GETCONF response contained a non-OK status code:\n%s' % self) + + while remaining_lines: + line = remaining_lines.pop(0) + + if line.is_next_mapping(): + key, value = line.split('=', 1) + else: + key, value = (line.pop(), None) + + if key not in self.entries: + self.entries[key] = [] + + if value is not None: + self.entries[key].append(value) diff --git a/Shared/lib/python3.4/site-packages/stem/response/getinfo.py b/Shared/lib/python3.4/site-packages/stem/response/getinfo.py new file mode 100644 index 0000000..0798593 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/response/getinfo.py @@ -0,0 +1,78 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +import stem.response +import stem.socket + + +class GetInfoResponse(stem.response.ControlMessage): + """ + Reply for a GETINFO query. + + :var dict entries: mapping between the queried options and their bytes values + """ + + def _parse_message(self): + # Example: + # 250-version=0.2.3.11-alpha-dev (git-ef0bc7f8f26a917c) + # 250+config-text= + # ControlPort 9051 + # DataDirectory /home/atagar/.tor + # ExitPolicy reject *:* + # Log notice stdout + # Nickname Unnamed + # ORPort 9050 + # . + # 250 OK + + self.entries = {} + remaining_lines = [content for (code, div, content) in self.content(get_bytes = True)] + + if not self.is_ok() or not remaining_lines.pop() == b'OK': + unrecognized_keywords = [] + for code, _, line in self.content(): + if code == '552' and line.startswith('Unrecognized key "') and line.endswith('"'): + unrecognized_keywords.append(line[18:-1]) + + if unrecognized_keywords: + raise stem.InvalidArguments('552', 'GETINFO request contained unrecognized keywords: %s\n' % ', '.join(unrecognized_keywords), unrecognized_keywords) + else: + raise stem.ProtocolError("GETINFO response didn't have an OK status:\n%s" % self) + + while remaining_lines: + try: + key, value = remaining_lines.pop(0).split(b'=', 1) + except ValueError: + raise stem.ProtocolError('GETINFO replies should only contain parameter=value mappings:\n%s' % self) + + if stem.prereq.is_python_3(): + key = stem.util.str_tools._to_unicode(key) + + # if the value is a multiline value then it *must* be of the form + # '=\n' + + if b'\n' in value: + if not value.startswith(b'\n'): + raise stem.ProtocolError("GETINFO response contained a multi-line value that didn't start with a newline:\n%s" % self) + + value = value[1:] + + self.entries[key] = value + + def _assert_matches(self, params): + """ + Checks if we match a given set of parameters, and raise a ProtocolError if not. + + :param set params: parameters to assert that we contain + + :raises: + * :class:`stem.ProtocolError` if parameters don't match this response + """ + + reply_params = set(self.entries.keys()) + + if params != reply_params: + requested_label = ', '.join(params) + reply_label = ', '.join(reply_params) + + raise stem.ProtocolError("GETINFO reply doesn't match the parameters that we requested. Queried '%s' but got '%s'." % (requested_label, reply_label)) diff --git a/Shared/lib/python3.4/site-packages/stem/response/mapaddress.py b/Shared/lib/python3.4/site-packages/stem/response/mapaddress.py new file mode 100644 index 0000000..5d2b418 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/response/mapaddress.py @@ -0,0 +1,42 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +import stem.response +import stem.socket + + +class MapAddressResponse(stem.response.ControlMessage): + """ + Reply for a MAPADDRESS query. + Doesn't raise an exception unless no addresses were mapped successfully. + + :var dict entries: mapping between the original and replacement addresses + + :raises: + * :class:`stem.OperationFailed` if Tor was unable to satisfy the request + * :class:`stem.InvalidRequest` if the addresses provided were invalid + """ + + def _parse_message(self): + # Example: + # 250-127.192.10.10=torproject.org + # 250 1.2.3.4=tor.freehaven.net + + if not self.is_ok(): + for code, _, message in self.content(): + if code == '512': + raise stem.InvalidRequest(code, message) + elif code == '451': + raise stem.OperationFailed(code, message) + else: + raise stem.ProtocolError('MAPADDRESS returned unexpected response code: %s', code) + + self.entries = {} + + for code, _, message in self.content(): + if code == '250': + try: + key, value = message.split('=', 1) + self.entries[key] = value + except ValueError: + raise stem.ProtocolError(None, "MAPADDRESS returned '%s', which isn't a mapping" % message) diff --git a/Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py b/Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py new file mode 100644 index 0000000..97201de --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/response/protocolinfo.py @@ -0,0 +1,122 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +import stem.response +import stem.socket +import stem.version + +from stem.connection import AuthMethod +from stem.util import log + + +class ProtocolInfoResponse(stem.response.ControlMessage): + """ + Version one PROTOCOLINFO query response. + + The protocol_version is the only mandatory data for a valid PROTOCOLINFO + response, so all other values are None if undefined or empty if a collection. + + :var int protocol_version: protocol version of the response + :var stem.version.Version tor_version: version of the tor process + :var tuple auth_methods: :data:`stem.connection.AuthMethod` types that tor will accept + :var tuple unknown_auth_methods: strings of unrecognized auth methods + :var str cookie_path: path of tor's authentication cookie + """ + + def _parse_message(self): + # Example: + # 250-PROTOCOLINFO 1 + # 250-AUTH METHODS=COOKIE COOKIEFILE="/home/atagar/.tor/control_auth_cookie" + # 250-VERSION Tor="0.2.1.30" + # 250 OK + + self.protocol_version = None + self.tor_version = None + self.auth_methods = () + self.unknown_auth_methods = () + self.cookie_path = None + + auth_methods, unknown_auth_methods = [], [] + remaining_lines = list(self) + + if not self.is_ok() or not remaining_lines.pop() == 'OK': + raise stem.ProtocolError("PROTOCOLINFO response didn't have an OK status:\n%s" % self) + + # sanity check that we're a PROTOCOLINFO response + if not remaining_lines[0].startswith('PROTOCOLINFO'): + raise stem.ProtocolError('Message is not a PROTOCOLINFO response:\n%s' % self) + + while remaining_lines: + line = remaining_lines.pop(0) + line_type = line.pop() + + if line_type == 'PROTOCOLINFO': + # Line format: + # FirstLine = "PROTOCOLINFO" SP PIVERSION CRLF + # PIVERSION = 1*DIGIT + + if line.is_empty(): + raise stem.ProtocolError("PROTOCOLINFO response's initial line is missing the protocol version: %s" % line) + + try: + self.protocol_version = int(line.pop()) + except ValueError: + raise stem.ProtocolError('PROTOCOLINFO response version is non-numeric: %s' % line) + + # The piversion really should be '1' but, according to the spec, tor + # does not necessarily need to provide the PROTOCOLINFO version that we + # requested. Log if it's something we aren't expecting but still make + # an effort to parse like a v1 response. + + if self.protocol_version != 1: + log.info("We made a PROTOCOLINFO version 1 query but got a version %i response instead. We'll still try to use it, but this may cause problems." % self.protocol_version) + elif line_type == 'AUTH': + # Line format: + # AuthLine = "250-AUTH" SP "METHODS=" AuthMethod *("," AuthMethod) + # *(SP "COOKIEFILE=" AuthCookieFile) CRLF + # AuthMethod = "NULL" / "HASHEDPASSWORD" / "COOKIE" + # AuthCookieFile = QuotedString + + # parse AuthMethod mapping + if not line.is_next_mapping('METHODS'): + raise stem.ProtocolError("PROTOCOLINFO response's AUTH line is missing its mandatory 'METHODS' mapping: %s" % line) + + for method in line.pop_mapping()[1].split(','): + if method == 'NULL': + auth_methods.append(AuthMethod.NONE) + elif method == 'HASHEDPASSWORD': + auth_methods.append(AuthMethod.PASSWORD) + elif method == 'COOKIE': + auth_methods.append(AuthMethod.COOKIE) + elif method == 'SAFECOOKIE': + auth_methods.append(AuthMethod.SAFECOOKIE) + else: + unknown_auth_methods.append(method) + message_id = 'stem.response.protocolinfo.unknown_auth_%s' % method + log.log_once(message_id, log.INFO, "PROTOCOLINFO response included a type of authentication that we don't recognize: %s" % method) + + # our auth_methods should have a single AuthMethod.UNKNOWN entry if + # any unknown authentication methods exist + if AuthMethod.UNKNOWN not in auth_methods: + auth_methods.append(AuthMethod.UNKNOWN) + + # parse optional COOKIEFILE mapping (quoted and can have escapes) + if line.is_next_mapping('COOKIEFILE', True, True): + self.cookie_path = line.pop_mapping(True, True)[1] + elif line_type == 'VERSION': + # Line format: + # VersionLine = "250-VERSION" SP "Tor=" TorVersion OptArguments CRLF + # TorVersion = QuotedString + + if not line.is_next_mapping('Tor', True): + raise stem.ProtocolError("PROTOCOLINFO response's VERSION line is missing its mandatory tor version mapping: %s" % line) + + try: + self.tor_version = stem.version.Version(line.pop_mapping(True)[1]) + except ValueError as exc: + raise stem.ProtocolError(exc) + else: + log.debug("Unrecognized PROTOCOLINFO line type '%s', ignoring it: %s" % (line_type, line)) + + self.auth_methods = tuple(auth_methods) + self.unknown_auth_methods = tuple(unknown_auth_methods) diff --git a/Shared/lib/python3.4/site-packages/stem/socket.py b/Shared/lib/python3.4/site-packages/stem/socket.py new file mode 100644 index 0000000..8024098 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/socket.py @@ -0,0 +1,663 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Supports communication with sockets speaking the Tor control protocol. This +allows us to send messages as basic strings, and receive responses as +:class:`~stem.response.ControlMessage` instances. + +**This module only consists of low level components, and is not intended for +users.** See our `tutorials <../tutorials.html>`_ and `Control Module +`_ if you're new to Stem and looking to get started. + +With that aside, these can still be used for raw socket communication with +Tor... + +:: + + import stem + import stem.connection + import stem.socket + + if __name__ == '__main__': + try: + control_socket = stem.socket.ControlPort(port = 9051) + stem.connection.authenticate(control_socket) + except stem.SocketError as exc: + print 'Unable to connect to tor on port 9051: %s' % exc + sys.exit(1) + except stem.connection.AuthenticationFailure as exc: + print 'Unable to authenticate: %s' % exc + sys.exit(1) + + print "Issuing 'GETINFO version' query...\\n" + control_socket.send('GETINFO version') + print control_socket.recv() + +:: + + % python example.py + Issuing 'GETINFO version' query... + + version=0.2.4.10-alpha-dev (git-8be6058d8f31e578) + OK + +**Module Overview:** + +:: + + ControlSocket - Socket wrapper that speaks the tor control protocol. + |- ControlPort - Control connection via a port. + | |- get_address - provides the ip address of our socket + | +- get_port - provides the port of our socket + | + |- ControlSocketFile - Control connection via a local file socket. + | +- get_socket_path - provides the path of the socket we connect to + | + |- send - sends a message to the socket + |- recv - receives a ControlMessage from the socket + |- is_alive - reports if the socket is known to be closed + |- is_localhost - returns if the socket is for the local system or not + |- connect - connects a new socket + |- close - shuts down the socket + +- __enter__ / __exit__ - manages socket connection + + send_message - Writes a message to a control socket. + recv_message - Reads a ControlMessage from a control socket. + send_formatting - Performs the formatting expected from sent messages. +""" + +from __future__ import absolute_import + +import re +import socket +import threading +import time + +import stem.prereq +import stem.response +import stem.util.str_tools + +from stem.util import log + + +class ControlSocket(object): + """ + Wrapper for a socket connection that speaks the Tor control protocol. To the + better part this transparently handles the formatting for sending and + receiving complete messages. All methods are thread safe. + + Callers should not instantiate this class directly, but rather use subclasses + which are expected to implement the **_make_socket()** method. + """ + + def __init__(self): + self._socket, self._socket_file = None, None + self._is_alive = False + self._connection_time = 0.0 # time when we last connected or disconnected + + # Tracks sending and receiving separately. This should be safe, and doing + # so prevents deadlock where we block writes because we're waiting to read + # a message that isn't coming. + + self._send_lock = threading.RLock() + self._recv_lock = threading.RLock() + + def send(self, message, raw = False): + """ + Formats and sends a message to the control socket. For more information see + the :func:`~stem.socket.send_message` function. + + :param str message: message to be formatted and sent to the socket + :param bool raw: leaves the message formatting untouched, passing it to the socket as-is + + :raises: + * :class:`stem.SocketError` if a problem arises in using the socket + * :class:`stem.SocketClosed` if the socket is known to be shut down + """ + + with self._send_lock: + try: + if not self.is_alive(): + raise stem.SocketClosed() + + send_message(self._socket_file, message, raw) + except stem.SocketClosed as exc: + # if send_message raises a SocketClosed then we should properly shut + # everything down + + if self.is_alive(): + self.close() + + raise exc + + def recv(self): + """ + Receives a message from the control socket, blocking until we've received + one. For more information see the :func:`~stem.socket.recv_message` function. + + :returns: :class:`~stem.response.ControlMessage` for the message received + + :raises: + * :class:`stem.ProtocolError` the content from the socket is malformed + * :class:`stem.SocketClosed` if the socket closes before we receive a complete message + """ + + with self._recv_lock: + try: + # makes a temporary reference to the _socket_file because connect() + # and close() may set or unset it + + socket_file = self._socket_file + + if not socket_file: + raise stem.SocketClosed() + + return recv_message(socket_file) + except stem.SocketClosed as exc: + # If recv_message raises a SocketClosed then we should properly shut + # everything down. However, there's a couple cases where this will + # cause deadlock... + # + # * this socketClosed was *caused by* a close() call, which is joining + # on our thread + # + # * a send() call that's currently in flight is about to call close(), + # also attempting to join on us + # + # To resolve this we make a non-blocking call to acquire the send lock. + # If we get it then great, we can close safely. If not then one of the + # above are in progress and we leave the close to them. + + if self.is_alive(): + if self._send_lock.acquire(False): + self.close() + self._send_lock.release() + + raise exc + + def is_alive(self): + """ + Checks if the socket is known to be closed. We won't be aware if it is + until we either use it or have explicitily shut it down. + + In practice a socket derived from a port knows about its disconnection + after a failed :func:`~stem.socket.ControlSocket.recv` call. Socket file + derived connections know after either a + :func:`~stem.socket.ControlSocket.send` or + :func:`~stem.socket.ControlSocket.recv`. + + This means that to have reliable detection for when we're disconnected + you need to continually pull from the socket (which is part of what the + :class:`~stem.control.BaseController` does). + + :returns: **bool** that's **True** if our socket is connected and **False** otherwise + """ + + return self._is_alive + + def is_localhost(self): + """ + Returns if the connection is for the local system or not. + + :returns: **bool** that's **True** if the connection is for the local host and **False** otherwise + """ + + return False + + def connection_time(self): + """ + Provides the unix timestamp for when our socket was either connected or + disconnected. That is to say, the time we connected if we're currently + connected and the time we disconnected if we're not connected. + + .. versionadded:: 1.3.0 + + :returns: **float** for when we last connected or disconnected, zero if + we've never connected + """ + + return self._connection_time + + def connect(self): + """ + Connects to a new socket, closing our previous one if we're already + attached. + + :raises: :class:`stem.SocketError` if unable to make a socket + """ + + with self._send_lock: + # Closes the socket if we're currently attached to one. Once we're no + # longer alive it'll be safe to acquire the recv lock because recv() + # calls no longer block (raising SocketClosed instead). + + if self.is_alive(): + self.close() + + with self._recv_lock: + self._socket = self._make_socket() + self._socket_file = self._socket.makefile(mode = 'rwb') + self._is_alive = True + self._connection_time = time.time() + + # It's possible for this to have a transient failure... + # SocketError: [Errno 4] Interrupted system call + # + # It's safe to retry, so give it another try if it fails. + + try: + self._connect() + except stem.SocketError: + self._connect() # single retry + + def close(self): + """ + Shuts down the socket. If it's already closed then this is a no-op. + """ + + with self._send_lock: + # Function is idempotent with one exception: we notify _close() if this + # is causing our is_alive() state to change. + + is_change = self.is_alive() + + if self._socket: + # if we haven't yet established a connection then this raises an error + # socket.error: [Errno 107] Transport endpoint is not connected + + try: + self._socket.shutdown(socket.SHUT_RDWR) + except socket.error: + pass + + # Suppressing unexpected exceptions from close. For instance, if the + # socket's file has already been closed then with python 2.7 that raises + # with... + # error: [Errno 32] Broken pipe + + try: + self._socket.close() + except: + pass + + if self._socket_file: + try: + self._socket_file.close() + except: + pass + + self._socket = None + self._socket_file = None + self._is_alive = False + self._connection_time = time.time() + + if is_change: + self._close() + + def _get_send_lock(self): + """ + The send lock is useful to classes that interact with us at a deep level + because it's used to lock :func:`stem.socket.ControlSocket.connect` / + :func:`stem.socket.ControlSocket.close`, and by extension our + :func:`stem.socket.ControlSocket.is_alive` state changes. + + :returns: **threading.RLock** that governs sending messages to our socket + and state changes + """ + + return self._send_lock + + def __enter__(self): + return self + + def __exit__(self, exit_type, value, traceback): + self.close() + + def _connect(self): + """ + Connection callback that can be overwritten by subclasses and wrappers. + """ + + pass + + def _close(self): + """ + Disconnection callback that can be overwritten by subclasses and wrappers. + """ + + pass + + def _make_socket(self): + """ + Constructs and connects new socket. This is implemented by subclasses. + + :returns: **socket.socket** for our configuration + + :raises: + * :class:`stem.SocketError` if unable to make a socket + * **NotImplementedError** if not implemented by a subclass + """ + + raise NotImplementedError('Unsupported Operation: this should be implemented by the ControlSocket subclass') + + +class ControlPort(ControlSocket): + """ + Control connection to tor. For more information see tor's ControlPort torrc + option. + """ + + def __init__(self, address = '127.0.0.1', port = 9051, connect = True): + """ + ControlPort constructor. + + :param str address: ip address of the controller + :param int port: port number of the controller + :param bool connect: connects to the socket if True, leaves it unconnected otherwise + + :raises: :class:`stem.SocketError` if connect is **True** and we're + unable to establish a connection + """ + + super(ControlPort, self).__init__() + self._control_addr = address + self._control_port = port + + if connect: + self.connect() + + def get_address(self): + """ + Provides the ip address our socket connects to. + + :returns: str with the ip address of our socket + """ + + return self._control_addr + + def get_port(self): + """ + Provides the port our socket connects to. + + :returns: int with the port of our socket + """ + + return self._control_port + + def is_localhost(self): + return self._control_addr == '127.0.0.1' + + def _make_socket(self): + try: + control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + control_socket.connect((self._control_addr, self._control_port)) + return control_socket + except socket.error as exc: + raise stem.SocketError(exc) + + +class ControlSocketFile(ControlSocket): + """ + Control connection to tor. For more information see tor's ControlSocket torrc + option. + """ + + def __init__(self, path = '/var/run/tor/control', connect = True): + """ + ControlSocketFile constructor. + + :param str socket_path: path where the control socket is located + :param bool connect: connects to the socket if True, leaves it unconnected otherwise + + :raises: :class:`stem.SocketError` if connect is **True** and we're + unable to establish a connection + """ + + super(ControlSocketFile, self).__init__() + self._socket_path = path + + if connect: + self.connect() + + def get_socket_path(self): + """ + Provides the path our socket connects to. + + :returns: str with the path for our control socket + """ + + return self._socket_path + + def is_localhost(self): + return True + + def _make_socket(self): + try: + control_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + control_socket.connect(self._socket_path) + return control_socket + except socket.error as exc: + raise stem.SocketError(exc) + + +def send_message(control_file, message, raw = False): + """ + Sends a message to the control socket, adding the expected formatting for + single verses multi-line messages. Neither message type should contain an + ending newline (if so it'll be treated as a multi-line message with a blank + line at the end). If the message doesn't contain a newline then it's sent + as... + + :: + + \\r\\n + + and if it does contain newlines then it's split on ``\\n`` and sent as... + + :: + + +\\r\\n + \\r\\n + \\r\\n + .\\r\\n + + :param file control_file: file derived from the control socket (see the + socket's makefile() method for more information) + :param str message: message to be sent on the control socket + :param bool raw: leaves the message formatting untouched, passing it to the + socket as-is + + :raises: + * :class:`stem.SocketError` if a problem arises in using the socket + * :class:`stem.SocketClosed` if the socket is known to be shut down + """ + + if not raw: + message = send_formatting(message) + + try: + control_file.write(stem.util.str_tools._to_bytes(message)) + control_file.flush() + + log_message = message.replace('\r\n', '\n').rstrip() + log.trace('Sent to tor:\n' + log_message) + except socket.error as exc: + log.info('Failed to send message: %s' % exc) + + # When sending there doesn't seem to be a reliable method for + # distinguishing between failures from a disconnect verses other things. + # Just accounting for known disconnection responses. + + if str(exc) == '[Errno 32] Broken pipe': + raise stem.SocketClosed(exc) + else: + raise stem.SocketError(exc) + except AttributeError: + # if the control_file has been closed then flush will receive: + # AttributeError: 'NoneType' object has no attribute 'sendall' + + log.info('Failed to send message: file has been closed') + raise stem.SocketClosed('file has been closed') + + +def recv_message(control_file): + """ + Pulls from a control socket until we either have a complete message or + encounter a problem. + + :param file control_file: file derived from the control socket (see the + socket's makefile() method for more information) + + :returns: :class:`~stem.response.ControlMessage` read from the socket + + :raises: + * :class:`stem.ProtocolError` the content from the socket is malformed + * :class:`stem.SocketClosed` if the socket closes before we receive + a complete message + """ + + parsed_content, raw_content = [], b'' + logging_prefix = 'Error while receiving a control message (%s): ' + + while True: + try: + # From a real socket readline() would always provide bytes, but during + # tests we might be given a StringIO in which case it's unicode under + # python 3.x. + + line = stem.util.str_tools._to_bytes(control_file.readline()) + except AttributeError: + # if the control_file has been closed then we will receive: + # AttributeError: 'NoneType' object has no attribute 'recv' + + prefix = logging_prefix % 'SocketClosed' + log.info(prefix + 'socket file has been closed') + raise stem.SocketClosed('socket file has been closed') + except (socket.error, ValueError) as exc: + # When disconnected we get... + # + # Python 2: + # socket.error: [Errno 107] Transport endpoint is not connected + # + # Python 3: + # ValueError: I/O operation on closed file. + + prefix = logging_prefix % 'SocketClosed' + log.info(prefix + 'received exception "%s"' % exc) + raise stem.SocketClosed(exc) + + raw_content += line + + # Parses the tor control lines. These are of the form... + # \r\n + + if len(line) == 0: + # if the socket is disconnected then the readline() method will provide + # empty content + + prefix = logging_prefix % 'SocketClosed' + log.info(prefix + 'empty socket content') + raise stem.SocketClosed('Received empty socket content.') + elif len(line) < 4: + prefix = logging_prefix % 'ProtocolError' + log.info(prefix + 'line too short, "%s"' % log.escape(line)) + raise stem.ProtocolError('Badly formatted reply line: too short') + elif not re.match(b'^[a-zA-Z0-9]{3}[-+ ]', line): + prefix = logging_prefix % 'ProtocolError' + log.info(prefix + 'malformed status code/divider, "%s"' % log.escape(line)) + raise stem.ProtocolError('Badly formatted reply line: beginning is malformed') + elif not line.endswith(b'\r\n'): + prefix = logging_prefix % 'ProtocolError' + log.info(prefix + 'no CRLF linebreak, "%s"' % log.escape(line)) + raise stem.ProtocolError('All lines should end with CRLF') + + line = line[:-2] # strips off the CRLF + status_code, divider, content = line[:3], line[3:4], line[4:] + + if stem.prereq.is_python_3(): + status_code = stem.util.str_tools._to_unicode(status_code) + divider = stem.util.str_tools._to_unicode(divider) + + if divider == '-': + # mid-reply line, keep pulling for more content + parsed_content.append((status_code, divider, content)) + elif divider == ' ': + # end of the message, return the message + parsed_content.append((status_code, divider, content)) + + log_message = raw_content.replace(b'\r\n', b'\n').rstrip() + log.trace('Received from tor:\n' + stem.util.str_tools._to_unicode(log_message)) + + return stem.response.ControlMessage(parsed_content, raw_content) + elif divider == '+': + # data entry, all of the following lines belong to the content until we + # get a line with just a period + + while True: + try: + line = stem.util.str_tools._to_bytes(control_file.readline()) + except socket.error as exc: + prefix = logging_prefix % 'SocketClosed' + log.info(prefix + 'received an exception while mid-way through a data reply (exception: "%s", read content: "%s")' % (exc, log.escape(raw_content))) + raise stem.SocketClosed(exc) + + raw_content += line + + if not line.endswith(b'\r\n'): + prefix = logging_prefix % 'ProtocolError' + log.info(prefix + 'CRLF linebreaks missing from a data reply, "%s"' % log.escape(raw_content)) + raise stem.ProtocolError('All lines should end with CRLF') + elif line == b'.\r\n': + break # data block termination + + line = line[:-2] # strips off the CRLF + + # lines starting with a period are escaped by a second period (as per + # section 2.4 of the control-spec) + + if line.startswith(b'..'): + line = line[1:] + + # appends to previous content, using a newline rather than CRLF + # separator (more conventional for multi-line string content outside + # the windows world) + + content += b'\n' + line + + parsed_content.append((status_code, divider, content)) + else: + # this should never be reached due to the prefix regex, but might as well + # be safe... + prefix = logging_prefix % 'ProtocolError' + log.warn(prefix + "\"%s\" isn't a recognized divider type" % divider) + raise stem.ProtocolError("Unrecognized divider type '%s': %s" % (divider, stem.util.str_tools._to_unicode(line))) + + +def send_formatting(message): + """ + Performs the formatting expected from sent control messages. For more + information see the :func:`~stem.socket.send_message` function. + + :param str message: message to be formatted + + :returns: **str** of the message wrapped by the formatting expected from + controllers + """ + + # From control-spec section 2.2... + # Command = Keyword OptArguments CRLF / "+" Keyword OptArguments CRLF CmdData + # Keyword = 1*ALPHA + # OptArguments = [ SP *(SP / VCHAR) ] + # + # A command is either a single line containing a Keyword and arguments, or a + # multiline command whose initial keyword begins with +, and whose data + # section ends with a single "." on a line of its own. + + # if we already have \r\n entries then standardize on \n to start with + message = message.replace('\r\n', '\n') + + if '\n' in message: + return '+%s\r\n.\r\n' % message.replace('\n', '\r\n') + else: + return message + '\r\n' diff --git a/Shared/lib/python3.4/site-packages/stem/util/__init__.py b/Shared/lib/python3.4/site-packages/stem/util/__init__.py new file mode 100644 index 0000000..1c18df3 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Utility functions used by the stem library. +""" + +__all__ = [ + 'conf', + 'connection', + 'enum', + 'log', + 'lru_cache', + 'ordereddict', + 'proc', + 'system', + 'term', + 'test_tools', + 'tor_tools', +] diff --git a/Shared/lib/python3.4/site-packages/stem/util/conf.py b/Shared/lib/python3.4/site-packages/stem/util/conf.py new file mode 100644 index 0000000..6b0efd7 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/conf.py @@ -0,0 +1,745 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Handlers for text configuration files. Configurations are simple string to +string mappings, with the configuration files using the following rules... + +* the key/value is separated by a space +* anything after a '#' is ignored as a comment +* excess whitespace is trimmed +* empty lines are ignored +* multi-line values can be defined by following the key with lines starting + with a '|' + +For instance... + +:: + + # This is my sample config + user.name Galen + user.password yabba1234 # here's an inline comment + user.notes takes a fancy to pepperjack cheese + blankEntry.example + + msg.greeting + |Multi-line message exclaiming of the + |wonder and awe that is pepperjack! + +... would be loaded as... + +:: + + config = { + 'user.name': 'Galen', + 'user.password': 'yabba1234', + 'user.notes': 'takes a fancy to pepperjack cheese', + 'blankEntry.example': '', + 'msg.greeting': 'Multi-line message exclaiming of the\\nwonder and awe that is pepperjack!', + } + +Configurations are managed via the :class:`~stem.util.conf.Config` class. The +:class:`~stem.util.conf.Config` can be be used directly with its +:func:`~stem.util.conf.Config.get` and :func:`~stem.util.conf.Config.set` +methods, but usually modules will want a local dictionary with just the +configurations that it cares about. + +To do this use the :func:`~stem.util.conf.config_dict` function. For example... + +:: + + import getpass + from stem.util import conf, connection + + def config_validator(key, value): + if key == 'timeout': + # require at least a one second timeout + return max(1, value) + elif key == 'endpoint': + if not connection.is_valid_ipv4_address(value): + raise ValueError("'%s' isn't a valid IPv4 address" % value) + elif key == 'port': + if not connection.is_valid_port(value): + raise ValueError("'%s' isn't a valid port" % value) + elif key == 'retries': + # negative retries really don't make sense + return max(0, value) + + CONFIG = conf.config_dict('ssh_login', { + 'username': getpass.getuser(), + 'password': '', + 'timeout': 10, + 'endpoint': '263.12.8.0', + 'port': 22, + 'reconnect': False, + 'retries': 3, + }, config_validator) + +There's several things going on here so lets take it step by step... + +* The :func:`~stem.util.conf.config_dict` provides a dictionary that's bound + to a given configuration. If the "ssh_proxy_config" configuration changes + then so will the contents of CONFIG. + +* The dictionary we're passing to :func:`~stem.util.conf.config_dict` provides + two important pieces of information: default values and their types. See the + Config's :func:`~stem.util.conf.Config.get` method for how these type + inferences work. + +* The config_validator is a hook we're adding to make sure CONFIG only gets + values we think are valid. In this case it ensures that our timeout value + is at least one second, and rejects endpoints or ports that are invalid. + +Now lets say our user has the following configuration file... + +:: + + username waddle_doo + password jabberwocky + timeout -15 + port 9000000 + retries lots + reconnect true + logging debug + +... and we load it as follows... + +:: + + >>> from stem.util import conf + >>> our_config = conf.get_config('ssh_login') + >>> our_config.load('/home/atagar/user_config') + >>> print CONFIG # doctest: +SKIP + { + "username": "waddle_doo", + "password": "jabberwocky", + "timeout": 1, + "endpoint": "263.12.8.0", + "port": 22, + "reconnect": True, + "retries": 3, + } + +Here's an expanation of what happened... + +* the username, password, and reconnect attributes took the values in the + configuration file + +* the 'config_validator' we added earlier allows for a minimum timeout of one + and rejected the invalid port (with a log message) + +* we weren't able to convert the retries' "lots" value to an integer so it kept + its default value and logged a warning + +* the user didn't supply an endpoint so that remained unchanged + +* our CONFIG didn't have a 'logging' attribute so it was ignored + +**Module Overview:** + +:: + + config_dict - provides a dictionary that's kept in sync with our config + get_config - singleton for getting configurations + uses_settings - provides an annotation for functions that use configurations + parse_enum_csv - helper funcion for parsing confguration entries for enums + + Config - Custom configuration + |- load - reads a configuration file + |- save - writes the current configuration to a file + |- clear - empties our loaded configuration contents + |- add_listener - notifies the given listener when an update occurs + |- clear_listeners - removes any attached listeners + |- keys - provides keys in the loaded configuration + |- set - sets the given key/value pair + |- unused_keys - provides keys that have never been requested + |- get - provides the value for a given key, with type inference + +- get_value - provides the value for a given key as a string +""" + +import inspect +import os +import threading + +from stem.util import log + +try: + # added in python 2.7 + from collections import OrderedDict +except ImportError: + from stem.util.ordereddict import OrderedDict + +CONFS = {} # mapping of identifier to singleton instances of configs + + +class _SyncListener(object): + def __init__(self, config_dict, interceptor): + self.config_dict = config_dict + self.interceptor = interceptor + + def update(self, config, key): + if key in self.config_dict: + new_value = config.get(key, self.config_dict[key]) + + if new_value == self.config_dict[key]: + return # no change + + if self.interceptor: + interceptor_value = self.interceptor(key, new_value) + + if interceptor_value: + new_value = interceptor_value + + self.config_dict[key] = new_value + + +def config_dict(handle, conf_mappings, handler = None): + """ + Makes a dictionary that stays synchronized with a configuration. + + This takes a dictionary of 'config_key => default_value' mappings and + changes the values to reflect our current configuration. This will leave + the previous values alone if... + + * we don't have a value for that config_key + * we can't convert our value to be the same type as the default_value + + If a handler is provided then this is called just prior to assigning new + values to the config_dict. The handler function is expected to accept the + (key, value) for the new values and return what we should actually insert + into the dictionary. If this returns None then the value is updated as + normal. + + For more information about how we convert types see our + :func:`~stem.util.conf.Config.get` method. + + **The dictionary you get from this is manged by the + :class:`~stem.util.conf.Config` class and should be treated as being + read-only.** + + :param str handle: unique identifier for a config instance + :param dict conf_mappings: config key/value mappings used as our defaults + :param functor handler: function referred to prior to assigning values + """ + + selected_config = get_config(handle) + selected_config.add_listener(_SyncListener(conf_mappings, handler).update) + return conf_mappings + + +def get_config(handle): + """ + Singleton constructor for configuration file instances. If a configuration + already exists for the handle then it's returned. Otherwise a fresh instance + is constructed. + + :param str handle: unique identifier used to access this config instance + """ + + if handle not in CONFS: + CONFS[handle] = Config() + + return CONFS[handle] + + +def uses_settings(handle, path, lazy_load = True): + """ + Provides a function that can be used as a decorator for other functions that + require settings to be loaded. Functions with this decorator will be provided + with the configuration as its 'config' keyword argument. + + .. versionchanged:: 1.3.0 + Omits the 'config' argument if the funcion we're decorating doesn't accept + it. + + :: + + uses_settings = stem.util.conf.uses_settings('my_app', '/path/to/settings.cfg') + + @uses_settings + def my_function(config): + print 'hello %s!' % config.get('username', '') + + :param str handle: hande for the configuration + :param str path: path where the configuration should be loaded from + :param bool lazy_load: loads the configuration file when the decorator is + used if true, otherwise it's loaded right away + + :returns: **function** that can be used as a decorator to provide the + configuration + + :raises: **IOError** if we fail to read the configuration file, if + **lazy_load** is true then this arises when we use the decorator + """ + + config = get_config(handle) + + if not lazy_load and not config.get('settings_loaded', False): + config.load(path) + config.set('settings_loaded', 'true') + + def decorator(func): + def wrapped(*args, **kwargs): + if lazy_load and not config.get('settings_loaded', False): + config.load(path) + config.set('settings_loaded', 'true') + + if 'config' in inspect.getargspec(func).args: + return func(*args, config = config, **kwargs) + else: + return func(*args, **kwargs) + + return wrapped + + return decorator + + +def parse_enum(key, value, enumeration): + """ + Provides the enumeration value for a given key. This is a case insensitive + lookup and raises an exception if the enum key doesn't exist. + + :param str key: configuration key being looked up + :param str value: value to be parsed + :param stem.util.enum.Enum enumeration: enumeration the values should be in + + :returns: enumeration value + + :raises: **ValueError** if the **value** isn't among the enumeration keys + """ + + return parse_enum_csv(key, value, enumeration, 1)[0] + + +def parse_enum_csv(key, value, enumeration, count = None): + """ + Parses a given value as being a comma separated listing of enumeration keys, + returning the corresponding enumeration values. This is intended to be a + helper for config handlers. The checks this does are case insensitive. + + The **count** attribute can be used to make assertions based on the number of + values. This can be... + + * None to indicate that there's no restrictions. + * An int to indicate that we should have this many values. + * An (int, int) tuple to indicate the range that values can be in. This range + is inclusive and either can be None to indicate the lack of a lower or + upper bound. + + :param str key: configuration key being looked up + :param str value: value to be parsed + :param stem.util.enum.Enum enumeration: enumeration the values should be in + :param int,tuple count: validates that we have this many items + + :returns: list with the enumeration values + + :raises: **ValueError** if the count assertion fails or the **value** entries + don't match the enumeration keys + """ + + values = [val.upper().strip() for val in value.split(',')] + + if values == ['']: + return [] + + if count is None: + pass # no count validateion checks to do + elif isinstance(count, int): + if len(values) != count: + raise ValueError("Config entry '%s' is expected to be %i comma separated values, got '%s'" % (key, count, value)) + elif isinstance(count, tuple) and len(count) == 2: + minimum, maximum = count + + if minimum is not None and len(values) < minimum: + raise ValueError("Config entry '%s' must have at least %i comma separated values, got '%s'" % (key, minimum, value)) + + if maximum is not None and len(values) > maximum: + raise ValueError("Config entry '%s' can have at most %i comma separated values, got '%s'" % (key, maximum, value)) + else: + raise ValueError("The count must be None, an int, or two value tuple. Got '%s' (%s)'" % (count, type(count))) + + result = [] + enum_keys = [k.upper() for k in list(enumeration.keys())] + enum_values = list(enumeration) + + for val in values: + if val in enum_keys: + result.append(enum_values[enum_keys.index(val)]) + else: + raise ValueError("The '%s' entry of config entry '%s' wasn't in the enumeration (expected %s)" % (val, key, ', '.join(enum_keys))) + + return result + + +class Config(object): + """ + Handler for easily working with custom configurations, providing persistence + to and from files. All operations are thread safe. + + **Example usage:** + + User has a file at '/home/atagar/myConfig' with... + + :: + + destination.ip 1.2.3.4 + destination.port blarg + + startup.run export PATH=$PATH:~/bin + startup.run alias l=ls + + And they have a script with... + + :: + + from stem.util import conf + + # Configuration values we'll use in this file. These are mappings of + # configuration keys to the default values we'll use if the user doesn't + # have something different in their config file (or it doesn't match this + # type). + + ssh_config = conf.config_dict('ssh_login', { + 'login.user': 'atagar', + 'login.password': 'pepperjack_is_awesome!', + 'destination.ip': '127.0.0.1', + 'destination.port': 22, + 'startup.run': [], + }) + + # Makes an empty config instance with the handle of 'ssh_login'. This is + # a singleton so other classes can fetch this same configuration from + # this handle. + + user_config = conf.get_config('ssh_login') + + # Loads the user's configuration file, warning if this fails. + + try: + user_config.load("/home/atagar/myConfig") + except IOError as exc: + print "Unable to load the user's config: %s" % exc + + # This replace the contents of ssh_config with the values from the user's + # config file if... + # + # * the key is present in the config file + # * we're able to convert the configuration file's value to the same type + # as what's in the mapping (see the Config.get() method for how these + # type inferences work) + # + # For instance in this case... + # + # * the login values are left alone because they aren't in the user's + # config file + # + # * the 'destination.port' is also left with the value of 22 because we + # can't turn "blarg" into an integer + # + # The other values are replaced, so ssh_config now becomes... + # + # {'login.user': 'atagar', + # 'login.password': 'pepperjack_is_awesome!', + # 'destination.ip': '1.2.3.4', + # 'destination.port': 22, + # 'startup.run': ['export PATH=$PATH:~/bin', 'alias l=ls']} + # + # Information for what values fail to load and why are reported to + # 'stem.util.log'. + """ + + def __init__(self): + self._path = None # location we last loaded from or saved to + self._contents = {} # configuration key/value pairs + self._listeners = [] # functors to be notified of config changes + + # used for accessing _contents + self._contents_lock = threading.RLock() + + # keys that have been requested (used to provide unused config contents) + self._requested_keys = set() + + def load(self, path = None): + """ + Reads in the contents of the given path, adding its configuration values + to our current contents. If the path is a directory then this loads each + of the files, recursively. + + .. versionchanged:: 1.3.0 + Added support for directories. + + :param str path: file or directory path to be loaded, this uses the last + loaded path if not provided + + :raises: + * **IOError** if we fail to read the file (it doesn't exist, insufficient + permissions, etc) + * **ValueError** if no path was provided and we've never been provided one + """ + + if path: + self._path = path + elif not self._path: + raise ValueError('Unable to load configuration: no path provided') + + if os.path.isdir(self._path): + for root, dirnames, filenames in os.walk(self._path): + for filename in filenames: + self.load(os.path.join(root, filename)) + + return + + with open(self._path, 'r') as config_file: + read_contents = config_file.readlines() + + with self._contents_lock: + while read_contents: + line = read_contents.pop(0) + + # strips any commenting or excess whitespace + comment_start = line.find('#') + + if comment_start != -1: + line = line[:comment_start] + + line = line.strip() + + # parse the key/value pair + if line: + try: + key, value = line.split(' ', 1) + value = value.strip() + except ValueError: + log.debug("Config entry '%s' is expected to be of the format 'Key Value', defaulting to '%s' -> ''" % (line, line)) + key, value = line, '' + + if not value: + # this might be a multi-line entry, try processing it as such + multiline_buffer = [] + + while read_contents and read_contents[0].lstrip().startswith('|'): + content = read_contents.pop(0).lstrip()[1:] # removes '\s+|' prefix + content = content.rstrip('\n') # trailing newline + multiline_buffer.append(content) + + if multiline_buffer: + self.set(key, '\n'.join(multiline_buffer), False) + continue + + self.set(key, value, False) + + def save(self, path = None): + """ + Saves configuration contents to disk. If a path is provided then it + replaces the configuration location that we track. + + :param str path: location to be saved to + + :raises: **ValueError** if no path was provided and we've never been provided one + """ + + if path: + self._path = path + elif not self._path: + raise ValueError('Unable to save configuration: no path provided') + + with self._contents_lock: + with open(self._path, 'w') as output_file: + for entry_key in sorted(self.keys()): + for entry_value in self.get_value(entry_key, multiple = True): + # check for multi line entries + if '\n' in entry_value: + entry_value = '\n|' + entry_value.replace('\n', '\n|') + + output_file.write('%s %s\n' % (entry_key, entry_value)) + + def clear(self): + """ + Drops the configuration contents and reverts back to a blank, unloaded + state. + """ + + with self._contents_lock: + self._contents.clear() + self._requested_keys = set() + + def add_listener(self, listener, backfill = True): + """ + Registers the function to be notified of configuration updates. Listeners + are expected to be functors which accept (config, key). + + :param functor listener: function to be notified when our configuration is changed + :param bool backfill: calls the function with our current values if **True** + """ + + with self._contents_lock: + self._listeners.append(listener) + + if backfill: + for key in self.keys(): + listener(self, key) + + def clear_listeners(self): + """ + Removes all attached listeners. + """ + + self._listeners = [] + + def keys(self): + """ + Provides all keys in the currently loaded configuration. + + :returns: **list** if strings for the configuration keys we've loaded + """ + + return list(self._contents.keys()) + + def unused_keys(self): + """ + Provides the configuration keys that have never been provided to a caller + via :func:`~stem.util.conf.config_dict` or the + :func:`~stem.util.conf.Config.get` and + :func:`~stem.util.conf.Config.get_value` methods. + + :returns: **set** of configuration keys we've loaded but have never been requested + """ + + return set(self.keys()).difference(self._requested_keys) + + def set(self, key, value, overwrite = True): + """ + Appends the given key/value configuration mapping, behaving the same as if + we'd loaded this from a configuration file. + + :param str key: key for the configuration mapping + :param str,list value: value we're setting the mapping to + :param bool overwrite: replaces the previous value if **True**, otherwise + the values are appended + """ + + with self._contents_lock: + if isinstance(value, str): + if not overwrite and key in self._contents: + self._contents[key].append(value) + else: + self._contents[key] = [value] + + for listener in self._listeners: + listener(self, key) + elif isinstance(value, (list, tuple)): + if not overwrite and key in self._contents: + self._contents[key] += value + else: + self._contents[key] = value + + for listener in self._listeners: + listener(self, key) + else: + raise ValueError("Config.set() only accepts str, list, or tuple. Provided value was a '%s'" % type(value)) + + def get(self, key, default = None): + """ + Fetches the given configuration, using the key and default value to + determine the type it should be. Recognized inferences are: + + * **default is a boolean => boolean** + + * values are case insensitive + * provides the default if the value isn't "true" or "false" + + * **default is an integer => int** + + * provides the default if the value can't be converted to an int + + * **default is a float => float** + + * provides the default if the value can't be converted to a float + + * **default is a list => list** + + * string contents for all configuration values with this key + + * **default is a tuple => tuple** + + * string contents for all configuration values with this key + + * **default is a dictionary => dict** + + * values without "=>" in them are ignored + * values are split into key/value pairs on "=>" with extra whitespace + stripped + + :param str key: config setting to be fetched + :param default object: value provided if no such key exists or fails to be converted + + :returns: given configuration value with its type inferred with the above rules + """ + + is_multivalue = isinstance(default, (list, tuple, dict)) + val = self.get_value(key, default, is_multivalue) + + if val == default: + return val # don't try to infer undefined values + + if isinstance(default, bool): + if val.lower() == 'true': + val = True + elif val.lower() == 'false': + val = False + else: + log.debug("Config entry '%s' is expected to be a boolean, defaulting to '%s'" % (key, str(default))) + val = default + elif isinstance(default, int): + try: + val = int(val) + except ValueError: + log.debug("Config entry '%s' is expected to be an integer, defaulting to '%i'" % (key, default)) + val = default + elif isinstance(default, float): + try: + val = float(val) + except ValueError: + log.debug("Config entry '%s' is expected to be a float, defaulting to '%f'" % (key, default)) + val = default + elif isinstance(default, list): + val = list(val) # make a shallow copy + elif isinstance(default, tuple): + val = tuple(val) + elif isinstance(default, dict): + val_map = OrderedDict() + for entry in val: + if '=>' in entry: + entry_key, entry_val = entry.split('=>', 1) + val_map[entry_key.strip()] = entry_val.strip() + else: + log.debug('Ignoring invalid %s config entry (expected a mapping, but "%s" was missing "=>")' % (key, entry)) + val = val_map + + return val + + def get_value(self, key, default = None, multiple = False): + """ + This provides the current value associated with a given key. + + :param str key: config setting to be fetched + :param object default: value provided if no such key exists + :param bool multiple: provides back a list of all values if **True**, + otherwise this returns the last loaded configuration value + + :returns: **str** or **list** of string configuration values associated + with the given key, providing the default if no such key exists + """ + + with self._contents_lock: + if key in self._contents: + self._requested_keys.add(key) + + if multiple: + return self._contents[key] + else: + return self._contents[key][-1] + else: + message_id = 'stem.util.conf.missing_config_key_%s' % key + log.log_once(message_id, log.TRACE, "config entry '%s' not found, defaulting to '%s'" % (key, default)) + return default diff --git a/Shared/lib/python3.4/site-packages/stem/util/connection.py b/Shared/lib/python3.4/site-packages/stem/util/connection.py new file mode 100644 index 0000000..88d70d5 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/connection.py @@ -0,0 +1,651 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Connection and networking based utility functions. + +**Module Overview:** + +:: + + get_connections - quieries the connections belonging to a given process + system_resolvers - provides connection resolution methods that are likely to be available + port_usage - brief description of the common usage for a port + + is_valid_ipv4_address - checks if a string is a valid IPv4 address + is_valid_ipv6_address - checks if a string is a valid IPv6 address + is_valid_port - checks if something is a valid representation for a port + is_private_address - checks if an IPv4 address belongs to a private range or not + + expand_ipv6_address - provides an IPv6 address with its collapsed portions expanded + get_mask_ipv4 - provides the mask representation for a given number of bits + get_mask_ipv6 - provides the IPv6 mask representation for a given number of bits + +.. data:: Resolver (enum) + + Method for resolving a process' connections. + + .. versionadded:: 1.1.0 + .. versionchanged:: 1.4.0 + Added **NETSTAT_WINDOWS**. + + ==================== =========== + Resolver Description + ==================== =========== + **PROC** /proc contents + **NETSTAT** netstat + **NETSTAT_WINDOWS** netstat command under Windows + **SS** ss command + **LSOF** lsof command + **SOCKSTAT** sockstat command under *nix + **BSD_SOCKSTAT** sockstat command under FreeBSD + **BSD_PROCSTAT** procstat command under FreeBSD + ==================== =========== +""" + +import collections +import hashlib +import hmac +import os +import platform +import re + +import stem.util.proc +import stem.util.system + +from stem import str_type +from stem.util import conf, enum, log + +# Connection resolution is risky to log about since it's highly likely to +# contain sensitive information. That said, it's also difficult to get right in +# a platform independent fashion. To opt into the logging requried to +# troubleshoot connection resolution set the following... + +LOG_CONNECTION_RESOLUTION = False + +Resolver = enum.Enum( + ('PROC', 'proc'), + ('NETSTAT', 'netstat'), + ('NETSTAT_WINDOWS', 'netstat (windows)'), + ('SS', 'ss'), + ('LSOF', 'lsof'), + ('SOCKSTAT', 'sockstat'), + ('BSD_SOCKSTAT', 'sockstat (bsd)'), + ('BSD_PROCSTAT', 'procstat (bsd)') +) + +Connection = collections.namedtuple('Connection', [ + 'local_address', + 'local_port', + 'remote_address', + 'remote_port', + 'protocol', +]) + +FULL_IPv4_MASK = '255.255.255.255' +FULL_IPv6_MASK = 'FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF' + +CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE = os.urandom(32) + +PORT_USES = None # port number => description + +RESOLVER_COMMAND = { + Resolver.PROC: '', + + # -n = prevents dns lookups, -p = include process + Resolver.NETSTAT: 'netstat -np', + + # -a = show all TCP/UDP connections, -n = numeric addresses and ports, -o = include pid + Resolver.NETSTAT_WINDOWS: 'netstat -ano', + + # -n = numeric ports, -p = include process, -t = tcp sockets, -u = udp sockets + Resolver.SS: 'ss -nptu', + + # -n = prevent dns lookups, -P = show port numbers (not names), -i = ip only, -w = no warnings + # (lsof provides a '-p ' but oddly in practice it seems to be ~11-28% slower) + Resolver.LSOF: 'lsof -wnPi', + + Resolver.SOCKSTAT: 'sockstat', + + # -4 = IPv4, -c = connected sockets + Resolver.BSD_SOCKSTAT: 'sockstat -4c', + + # -f = process pid + Resolver.BSD_PROCSTAT: 'procstat -f {pid}', +} + +RESOLVER_FILTER = { + Resolver.PROC: '', + + # tcp 0 586 192.168.0.1:44284 38.229.79.2:443 ESTABLISHED 15843/tor + Resolver.NETSTAT: '^{protocol}\s+.*\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED\s+{pid}/{name}\s*$', + + # tcp 586 192.168.0.1:44284 38.229.79.2:443 ESTABLISHED 15843 + Resolver.NETSTAT_WINDOWS: '^\s*{protocol}\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED\s+{pid}\s*$', + + # tcp ESTAB 0 0 192.168.0.20:44415 38.229.79.2:443 users:(("tor",15843,9)) + Resolver.SS: '^{protocol}\s+ESTAB\s+.*\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+users:\(\("{name}",{pid},[0-9]+\)\)$', + + # tor 3873 atagar 45u IPv4 40994 0t0 TCP 10.243.55.20:45724->194.154.227.109:9001 (ESTABLISHED) + Resolver.LSOF: '^{name}\s+{pid}\s+.*\s+{protocol}\s+{local_address}:{local_port}->{remote_address}:{remote_port} \(ESTABLISHED\)$', + + # atagar tor 15843 tcp4 192.168.0.20:44092 68.169.35.102:443 ESTABLISHED + Resolver.SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+{protocol}4\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}\s+ESTABLISHED$', + + # _tor tor 4397 12 tcp4 172.27.72.202:54011 127.0.0.1:9001 + Resolver.BSD_SOCKSTAT: '^\S+\s+{name}\s+{pid}\s+\S+\s+{protocol}4\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}$', + + # 3561 tor 4 s - rw---n-- 2 0 TCP 10.0.0.2:9050 10.0.0.1:22370 + Resolver.BSD_PROCSTAT: '^\s*{pid}\s+{name}\s+.*\s+{protocol}\s+{local_address}:{local_port}\s+{remote_address}:{remote_port}$', +} + + +def get_connections(resolver, process_pid = None, process_name = None): + """ + Retrieves a list of the current connections for a given process. This + provides a list of Connection instances, which have five attributes... + + * **local_address** (str) + * **local_port** (int) + * **remote_address** (str) + * **remote_port** (int) + * **protocol** (str, generally either 'tcp' or 'udp') + + .. versionadded:: 1.1.0 + + :param Resolver resolver: method of connection resolution to use + :param int process_pid: pid of the process to retrieve + :param str process_name: name of the process to retrieve + + :returns: **list** of Connection instances + + :raises: + * **ValueError** if using **Resolver.PROC** or **Resolver.BSD_PROCSTAT** + and the process_pid wasn't provided + + * **IOError** if no connections are available or resolution fails + (generally they're indistinguishable). The common causes are the + command being unavailable or permissions. + """ + + def _log(msg): + if LOG_CONNECTION_RESOLUTION: + log.debug(msg) + + _log('=' * 80) + _log('Querying connections for resolver: %s, pid: %s, name: %s' % (resolver, process_pid, process_name)) + + if isinstance(process_pid, str): + try: + process_pid = int(process_pid) + except ValueError: + raise ValueError('Process pid was non-numeric: %s' % process_pid) + + if process_pid is None and process_name and resolver == Resolver.NETSTAT_WINDOWS: + process_pid = stem.util.system.pid_by_name(process_name) + + if process_pid is None and resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT): + raise ValueError('%s resolution requires a pid' % resolver) + + if resolver == Resolver.PROC: + return [Connection(*conn) for conn in stem.util.proc.connections(process_pid)] + + resolver_command = RESOLVER_COMMAND[resolver].format(pid = process_pid) + + try: + results = stem.util.system.call(resolver_command) + except OSError as exc: + raise IOError("Unable to query '%s': %s" % (resolver_command, exc)) + + resolver_regex_str = RESOLVER_FILTER[resolver].format( + protocol = '(?P\S+)', + local_address = '(?P[0-9.]+)', + local_port = '(?P[0-9]+)', + remote_address = '(?P[0-9.]+)', + remote_port = '(?P[0-9]+)', + pid = process_pid if process_pid else '[0-9]*', + name = process_name if process_name else '\S*', + ) + + _log('Resolver regex: %s' % resolver_regex_str) + _log('Resolver results:\n%s' % '\n'.join(results)) + + connections = [] + resolver_regex = re.compile(resolver_regex_str) + + for line in results: + match = resolver_regex.match(line) + + if match: + attr = match.groupdict() + local_addr = attr['local_address'] + local_port = int(attr['local_port']) + remote_addr = attr['remote_address'] + remote_port = int(attr['remote_port']) + protocol = attr['protocol'].lower() + + if remote_addr == '0.0.0.0': + continue # procstat response for unestablished connections + + if not (is_valid_ipv4_address(local_addr) and is_valid_ipv4_address(remote_addr)): + _log('Invalid address (%s or %s): %s' % (local_addr, remote_addr, line)) + elif not (is_valid_port(local_port) and is_valid_port(remote_port)): + _log('Invalid port (%s or %s): %s' % (local_port, remote_port, line)) + elif protocol not in ('tcp', 'udp'): + _log('Unrecognized protocol (%s): %s' % (protocol, line)) + + conn = Connection(local_addr, local_port, remote_addr, remote_port, protocol) + connections.append(conn) + _log(str(conn)) + + _log('%i connections found' % len(connections)) + + if not connections: + raise IOError('No results found using: %s' % resolver_command) + + return connections + + +def system_resolvers(system = None): + """ + Provides the types of connection resolvers likely to be available on this platform. + + .. versionadded:: 1.1.0 + + .. versionchanged:: 1.3.0 + Renamed from get_system_resolvers() to system_resolvers(). The old name + still works as an alias, but will be dropped in Stem version 2.0.0. + + :param str system: system to get resolvers for, this is determined by + platform.system() if not provided + + :returns: **list** of :data:`~stem.util.connection.Resolver` instances available on this platform + """ + if system is None: + if stem.util.system.is_gentoo(): + system = 'Gentoo' + else: + system = platform.system() + + if system == 'Windows': + resolvers = [Resolver.NETSTAT_WINDOWS] + elif system in ('Darwin', 'OpenBSD'): + resolvers = [Resolver.LSOF] + elif system == 'FreeBSD': + # Netstat is available, but lacks a '-p' equivalent so we can't associate + # the results to processes. The platform also has a ss command, but it + # belongs to a spreadsheet application. + + resolvers = [Resolver.BSD_SOCKSTAT, Resolver.BSD_PROCSTAT, Resolver.LSOF] + else: + # Sockstat isn't available by default on ubuntu. + + resolvers = [Resolver.NETSTAT, Resolver.SOCKSTAT, Resolver.LSOF, Resolver.SS] + + # remove any that aren't in the user's PATH + + resolvers = [r for r in resolvers if stem.util.system.is_available(RESOLVER_COMMAND[r])] + + # proc resolution, by far, outperforms the others so defaults to this is able + + if stem.util.proc.is_available() and os.access('/proc/net/tcp', os.R_OK) and os.access('/proc/net/udp', os.R_OK): + resolvers = [Resolver.PROC] + resolvers + + return resolvers + + +def port_usage(port): + """ + Provides the common use of a given port. For example, 'HTTP' for port 80 or + 'SSH' for 22. + + .. versionadded:: 1.2.0 + + :param int port: port number to look up + + :returns: **str** with a description for the port, **None** if none is known + """ + + global PORT_USES + + if PORT_USES is None: + config = conf.Config() + config_path = os.path.join(os.path.dirname(__file__), 'ports.cfg') + + try: + config.load(config_path) + port_uses = {} + + for key, value in config.get('port', {}).items(): + if key.isdigit(): + port_uses[int(key)] = value + elif '-' in key: + min_port, max_port = key.split('-', 1) + + for port_entry in range(int(min_port), int(max_port) + 1): + port_uses[port_entry] = value + else: + raise ValueError("'%s' is an invalid key" % key) + + PORT_USES = port_uses + except Exception as exc: + log.warn("BUG: stem failed to load its internal port descriptions from '%s': %s" % (config_path, exc)) + + if not PORT_USES: + return None + + if isinstance(port, str) and port.isdigit(): + port = int(port) + + return PORT_USES.get(port) + + +def is_valid_ipv4_address(address): + """ + Checks if a string is a valid IPv4 address. + + :param str address: string to be checked + + :returns: **True** if input is a valid IPv4 address, **False** otherwise + """ + + if not isinstance(address, (bytes, str_type)): + return False + + # checks if theres four period separated values + + if address.count('.') != 3: + return False + + # checks that each value in the octet are decimal values between 0-255 + for entry in address.split('.'): + if not entry.isdigit() or int(entry) < 0 or int(entry) > 255: + return False + elif entry[0] == '0' and len(entry) > 1: + return False # leading zeros, for instance in '1.2.3.001' + + return True + + +def is_valid_ipv6_address(address, allow_brackets = False): + """ + Checks if a string is a valid IPv6 address. + + :param str address: string to be checked + :param bool allow_brackets: ignore brackets which form '[address]' + + :returns: **True** if input is a valid IPv6 address, **False** otherwise + """ + + if allow_brackets: + if address.startswith('[') and address.endswith(']'): + address = address[1:-1] + + # addresses are made up of eight colon separated groups of four hex digits + # with leading zeros being optional + # https://en.wikipedia.org/wiki/IPv6#Address_format + + colon_count = address.count(':') + + if colon_count > 7: + return False # too many groups + elif colon_count != 7 and '::' not in address: + return False # not enough groups and none are collapsed + elif address.count('::') > 1 or ':::' in address: + return False # multiple groupings of zeros can't be collapsed + + for entry in address.split(':'): + if not re.match('^[0-9a-fA-f]{0,4}$', entry): + return False + + return True + + +def is_valid_port(entry, allow_zero = False): + """ + Checks if a string or int is a valid port number. + + :param list,str,int entry: string, integer or list to be checked + :param bool allow_zero: accept port number of zero (reserved by definition) + + :returns: **True** if input is an integer and within the valid port range, **False** otherwise + """ + + try: + value = int(entry) + + if str(value) != str(entry): + return False # invalid leading char, e.g. space or zero + elif allow_zero and value == 0: + return True + else: + return value > 0 and value < 65536 + except TypeError: + if isinstance(entry, (tuple, list)): + for port in entry: + if not is_valid_port(port, allow_zero): + return False + + return True + else: + return False + except ValueError: + return False + + +def is_private_address(address): + """ + Checks if the IPv4 address is in a range belonging to the local network or + loopback. These include: + + * Private ranges: 10.*, 172.16.* - 172.31.*, 192.168.* + * Loopback: 127.* + + .. versionadded:: 1.1.0 + + :param str address: string to be checked + + :returns: **True** if input is in a private range, **False** otherwise + + :raises: **ValueError** if the address isn't a valid IPv4 address + """ + + if not is_valid_ipv4_address(address): + raise ValueError("'%s' isn't a valid IPv4 address" % address) + + # checks for any of the simple wildcard ranges + + if address.startswith('10.') or address.startswith('192.168.') or address.startswith('127.'): + return True + + # checks for the 172.16.* - 172.31.* range + + if address.startswith('172.'): + second_octet = int(address.split('.')[1]) + + if second_octet >= 16 and second_octet <= 31: + return True + + return False + + +def expand_ipv6_address(address): + """ + Expands abbreviated IPv6 addresses to their full colon separated hex format. + For instance... + + :: + + >>> expand_ipv6_address('2001:db8::ff00:42:8329') + '2001:0db8:0000:0000:0000:ff00:0042:8329' + + >>> expand_ipv6_address('::') + '0000:0000:0000:0000:0000:0000:0000:0000' + + :param str address: IPv6 address to be expanded + + :raises: **ValueError** if the address can't be expanded due to being malformed + """ + + if not is_valid_ipv6_address(address): + raise ValueError("'%s' isn't a valid IPv6 address" % address) + + # expands collapsed groupings, there can only be a single '::' in a valid + # address + if '::' in address: + missing_groups = 7 - address.count(':') + address = address.replace('::', '::' + ':' * missing_groups) + + # inserts missing zeros + for index in range(8): + start = index * 5 + end = address.index(':', start) if index != 7 else len(address) + missing_zeros = 4 - (end - start) + + if missing_zeros > 0: + address = address[:start] + '0' * missing_zeros + address[start:] + + return address + + +def get_mask_ipv4(bits): + """ + Provides the IPv4 mask for a given number of bits, in the dotted-quad format. + + :param int bits: number of bits to be converted + + :returns: **str** with the subnet mask representation for this many bits + + :raises: **ValueError** if given a number of bits outside the range of 0-32 + """ + + if bits > 32 or bits < 0: + raise ValueError('A mask can only be 0-32 bits, got %i' % bits) + elif bits == 32: + return FULL_IPv4_MASK + + # get the binary representation of the mask + mask_bin = _get_binary(2 ** bits - 1, 32)[::-1] + + # breaks it into eight character groupings + octets = [mask_bin[8 * i:8 * (i + 1)] for i in range(4)] + + # converts each octet into its integer value + return '.'.join([str(int(octet, 2)) for octet in octets]) + + +def get_mask_ipv6(bits): + """ + Provides the IPv6 mask for a given number of bits, in the hex colon-delimited + format. + + :param int bits: number of bits to be converted + + :returns: **str** with the subnet mask representation for this many bits + + :raises: **ValueError** if given a number of bits outside the range of 0-128 + """ + + if bits > 128 or bits < 0: + raise ValueError('A mask can only be 0-128 bits, got %i' % bits) + elif bits == 128: + return FULL_IPv6_MASK + + # get the binary representation of the mask + mask_bin = _get_binary(2 ** bits - 1, 128)[::-1] + + # breaks it into sixteen character groupings + groupings = [mask_bin[16 * i:16 * (i + 1)] for i in range(8)] + + # converts each group into its hex value + return ':'.join(['%04x' % int(group, 2) for group in groupings]).upper() + + +def _get_masked_bits(mask): + """ + Provides the number of bits that an IPv4 subnet mask represents. Note that + not all masks can be represented by a bit count. + + :param str mask: mask to be converted + + :returns: **int** with the number of bits represented by the mask + + :raises: **ValueError** if the mask is invalid or can't be converted + """ + + if not is_valid_ipv4_address(mask): + raise ValueError("'%s' is an invalid subnet mask" % mask) + + # converts octets to binary representation + mask_bin = _get_address_binary(mask) + mask_match = re.match('^(1*)(0*)$', mask_bin) + + if mask_match: + return 32 - len(mask_match.groups()[1]) + else: + raise ValueError('Unable to convert mask to a bit count: %s' % mask) + + +def _get_binary(value, bits): + """ + Provides the given value as a binary string, padded with zeros to the given + number of bits. + + :param int value: value to be converted + :param int bits: number of bits to pad to + """ + + # http://www.daniweb.com/code/snippet216539.html + return ''.join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)]) + + +def _get_address_binary(address): + """ + Provides the binary value for an IPv4 or IPv6 address. + + :returns: **str** with the binary representation of this address + + :raises: **ValueError** if address is neither an IPv4 nor IPv6 address + """ + + if is_valid_ipv4_address(address): + return ''.join([_get_binary(int(octet), 8) for octet in address.split('.')]) + elif is_valid_ipv6_address(address): + address = expand_ipv6_address(address) + return ''.join([_get_binary(int(grouping, 16), 16) for grouping in address.split(':')]) + else: + raise ValueError("'%s' is neither an IPv4 or IPv6 address" % address) + + +def _hmac_sha256(key, msg): + """ + Generates a sha256 digest using the given key and message. + + :param str key: starting key for the hash + :param str msg: message to be hashed + + :returns: sha256 digest of msg as bytes, hashed using the given key + """ + + return hmac.new(key, msg, hashlib.sha256).digest() + + +def _cryptovariables_equal(x, y): + """ + Compares two strings for equality securely. + + :param str x: string to be compared. + :param str y: the other string to be compared. + + :returns: **True** if both strings are equal, **False** otherwise. + """ + + return ( + _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, x) == + _hmac_sha256(CRYPTOVARIABLE_EQUALITY_COMPARISON_NONCE, y)) + +# TODO: drop with stem 2.x +# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old +# names for backward compatability. + +get_system_resolvers = system_resolvers diff --git a/Shared/lib/python3.4/site-packages/stem/util/enum.py b/Shared/lib/python3.4/site-packages/stem/util/enum.py new file mode 100644 index 0000000..5cf81b8 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/enum.py @@ -0,0 +1,172 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Basic enumeration, providing ordered types for collections. These can be +constructed as simple type listings... + +:: + + >>> from stem.util import enum + >>> insects = enum.Enum('ANT', 'WASP', 'LADYBUG', 'FIREFLY') + >>> insects.ANT + 'Ant' + >>> tuple(insects) + ('Ant', 'Wasp', 'Ladybug', 'Firefly') + +... or with overwritten string counterparts... + +:: + + >>> from stem.util import enum + >>> pets = enum.Enum(('DOG', 'Skippy'), 'CAT', ('FISH', 'Nemo')) + >>> pets.DOG + 'Skippy' + >>> pets.CAT + 'Cat' + +**Module Overview:** + +:: + + UppercaseEnum - Provides an enum instance with capitalized values + + Enum - Provides a basic, ordered enumeration + |- keys - string representation of our enum keys + |- index_of - index of an enum value + |- next - provides the enum after a given enum value + |- previous - provides the enum before a given value + |- __getitem__ - provides the value for an enum key + +- __iter__ - iterator over our enum keys +""" + +from stem import str_type + + +def UppercaseEnum(*args): + """ + Provides an :class:`~stem.util.enum.Enum` instance where the values are + identical to the keys. Since the keys are uppercase by convention this means + the values are too. For instance... + + :: + + >>> from stem.util import enum + >>> runlevels = enum.UppercaseEnum('DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERROR') + >>> runlevels.DEBUG + 'DEBUG' + + :param list args: enum keys to initialize with + + :returns: :class:`~stem.util.enum.Enum` instance with the given keys + """ + + return Enum(*[(v, v) for v in args]) + + +class Enum(object): + """ + Basic enumeration. + """ + + def __init__(self, *args): + from stem.util.str_tools import _to_camel_case + + # ordered listings of our keys and values + keys, values = [], [] + + for entry in args: + if isinstance(entry, (bytes, str_type)): + key, val = entry, _to_camel_case(entry) + elif isinstance(entry, tuple) and len(entry) == 2: + key, val = entry + else: + raise ValueError('Unrecognized input: %s' % args) + + keys.append(key) + values.append(val) + setattr(self, key, val) + + self._keys = tuple(keys) + self._values = tuple(values) + + def keys(self): + """ + Provides an ordered listing of the enumeration keys in this set. + + :returns: **list** with our enum keys + """ + + return list(self._keys) + + def index_of(self, value): + """ + Provides the index of the given value in the collection. + + :param str value: entry to be looked up + + :returns: **int** index of the given entry + + :raises: **ValueError** if no such element exists + """ + + return self._values.index(value) + + def next(self, value): + """ + Provides the next enumeration after the given value. + + :param str value: enumeration for which to get the next entry + + :returns: enum value following the given entry + + :raises: **ValueError** if no such element exists + """ + + if value not in self._values: + raise ValueError('No such enumeration exists: %s (options: %s)' % (value, ', '.join(self._values))) + + next_index = (self._values.index(value) + 1) % len(self._values) + return self._values[next_index] + + def previous(self, value): + """ + Provides the previous enumeration before the given value. + + :param str value: enumeration for which to get the previous entry + + :returns: enum value proceeding the given entry + + :raises: **ValueError** if no such element exists + """ + + if value not in self._values: + raise ValueError('No such enumeration exists: %s (options: %s)' % (value, ', '.join(self._values))) + + prev_index = (self._values.index(value) - 1) % len(self._values) + return self._values[prev_index] + + def __getitem__(self, item): + """ + Provides the values for the given key. + + :param str item: key to be looked up + + :returns: **str** with the value for the given key + + :raises: **ValueError** if the key doesn't exist + """ + + if item in vars(self): + return getattr(self, item) + else: + keys = ', '.join(self.keys()) + raise ValueError("'%s' isn't among our enumeration keys, which includes: %s" % (item, keys)) + + def __iter__(self): + """ + Provides an ordered listing of the enums in this set. + """ + + for entry in self._values: + yield entry diff --git a/Shared/lib/python3.4/site-packages/stem/util/log.py b/Shared/lib/python3.4/site-packages/stem/util/log.py new file mode 100644 index 0000000..4154706 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/log.py @@ -0,0 +1,253 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Functions to aid library logging. The default logging +:data:`~stem.util.log.Runlevel` is usually NOTICE and above. + +**Stem users are more than welcome to listen for stem events, but these +functions are not being vended to our users. They may change in the future, use +them at your own risk.** + +**Module Overview:** + +:: + + get_logger - provides the stem's Logger instance + logging_level - converts a runlevel to its logging number + escape - escapes special characters in a message in preparation for logging + + log - logs a message at the given runlevel + log_once - logs a message, deduplicating if it has already been logged + trace - logs a message at the TRACE runlevel + debug - logs a message at the DEBUG runlevel + info - logs a message at the INFO runlevel + notice - logs a message at the NOTICE runlevel + warn - logs a message at the WARN runlevel + error - logs a message at the ERROR runlevel + + LogBuffer - Buffers logged events so they can be iterated over. + |- is_empty - checks if there's events in our buffer + +- __iter__ - iterates over and removes the buffered events + + log_to_stdout - reports further logged events to stdout + +.. data:: Runlevel (enum) + + Enumeration for logging runlevels. + + ========== =========== + Runlevel Description + ========== =========== + **ERROR** critical issue occurred, the user needs to be notified + **WARN** non-critical issue occurred that the user should be aware of + **NOTICE** information that is helpful to the user + **INFO** high level library activity + **DEBUG** low level library activity + **TRACE** request/reply logging + ========== =========== +""" + +import logging + +import stem.prereq +import stem.util.enum +import stem.util.str_tools + +# Logging runlevels. These are *very* commonly used so including shorter +# aliases (so they can be referenced as log.DEBUG, log.WARN, etc). + +Runlevel = stem.util.enum.UppercaseEnum('TRACE', 'DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERROR') +TRACE, DEBUG, INFO, NOTICE, WARN, ERR = list(Runlevel) + +# mapping of runlevels to the logger module's values, TRACE and DEBUG aren't +# built into the module + +LOG_VALUES = { + Runlevel.TRACE: logging.DEBUG - 5, + Runlevel.DEBUG: logging.DEBUG, + Runlevel.INFO: logging.INFO, + Runlevel.NOTICE: logging.INFO + 5, + Runlevel.WARN: logging.WARN, + Runlevel.ERROR: logging.ERROR, +} + +logging.addLevelName(LOG_VALUES[TRACE], 'TRACE') +logging.addLevelName(LOG_VALUES[NOTICE], 'NOTICE') + +LOGGER = logging.getLogger('stem') +LOGGER.setLevel(LOG_VALUES[TRACE]) + +# There's some messages that we don't want to log more than once. This set has +# the messages IDs that we've logged which fall into this category. +DEDUPLICATION_MESSAGE_IDS = set() + +# Adds a default nullhandler for the stem logger, suppressing the 'No handlers +# could be found for logger "stem"' warning as per... +# http://docs.python.org/release/3.1.3/library/logging.html#configuring-logging-for-a-library + + +class _NullHandler(logging.Handler): + def emit(self, record): + pass + +if not LOGGER.handlers: + LOGGER.addHandler(_NullHandler()) + + +def get_logger(): + """ + Provides the stem logger. + + :return: **logging.Logger** for stem + """ + + return LOGGER + + +def logging_level(runlevel): + """ + Translates a runlevel into the value expected by the logging module. + + :param stem.util.log.Runlevel runlevel: runlevel to be returned, no logging if **None** + """ + + if runlevel: + return LOG_VALUES[runlevel] + else: + return logging.FATAL + 5 + + +def escape(message): + """ + Escapes specific sequences for logging (newlines, tabs, carriage returns). If + the input is **bytes** then this converts it to **unicode** under python 3.x. + + :param str message: string to be escaped + + :returns: str that is escaped + """ + + if stem.prereq.is_python_3(): + message = stem.util.str_tools._to_unicode(message) + + for pattern, replacement in (('\n', '\\n'), ('\r', '\\r'), ('\t', '\\t')): + message = message.replace(pattern, replacement) + + return message + + +def log(runlevel, message): + """ + Logs a message at the given runlevel. + + :param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None** + :param str message: message to be logged + """ + + if runlevel: + LOGGER.log(LOG_VALUES[runlevel], message) + + +def log_once(message_id, runlevel, message): + """ + Logs a message at the given runlevel. If a message with this ID has already + been logged then this is a no-op. + + :param str message_id: unique message identifier to deduplicate on + :param stem.util.log.Runlevel runlevel: runlevel to log the message at, logging is skipped if **None** + :param str message: message to be logged + + :returns: **True** if we log the message, **False** otherwise + """ + + if not runlevel or message_id in DEDUPLICATION_MESSAGE_IDS: + return False + else: + DEDUPLICATION_MESSAGE_IDS.add(message_id) + log(runlevel, message) + +# shorter aliases for logging at a runlevel + + +def trace(message): + log(Runlevel.TRACE, message) + + +def debug(message): + log(Runlevel.DEBUG, message) + + +def info(message): + log(Runlevel.INFO, message) + + +def notice(message): + log(Runlevel.NOTICE, message) + + +def warn(message): + log(Runlevel.WARN, message) + + +def error(message): + log(Runlevel.ERROR, message) + + +class LogBuffer(logging.Handler): + """ + Basic log handler that listens for stem events and stores them so they can be + read later. Log entries are cleared as they are read. + + .. versionchanged:: 1.4.0 + Added the yield_records argument. + """ + + def __init__(self, runlevel, yield_records = False): + # TODO: At least in python 2.6 logging.Handler has a bug in that it doesn't + # extend object, causing our super() call to fail. When we drop python 2.6 + # support we should switch back to using super() instead. + # + # super(LogBuffer, self).__init__(level = logging_level(runlevel)) + + logging.Handler.__init__(self, level = logging_level(runlevel)) + + self.formatter = logging.Formatter( + fmt = '%(asctime)s [%(levelname)s] %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S') + + self._buffer = [] + self._yield_records = yield_records + + def is_empty(self): + return not bool(self._buffer) + + def __iter__(self): + while self._buffer: + record = self._buffer.pop(0) + yield record if self._yield_records else self.formatter.format(record) + + def emit(self, record): + self._buffer.append(record) + + +class _StdoutLogger(logging.Handler): + def __init__(self, runlevel): + logging.Handler.__init__(self, level = logging_level(runlevel)) + + self.formatter = logging.Formatter( + fmt = '%(asctime)s [%(levelname)s] %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S') + + def emit(self, record): + print(self.formatter.format(record)) + + +def log_to_stdout(runlevel): + """ + Logs further events to stdout. + + :param stem.util.log.Runlevel runlevel: minimum runlevel a message needs to be to be logged + """ + + get_logger().addHandler(_StdoutLogger(runlevel)) diff --git a/Shared/lib/python3.4/site-packages/stem/util/lru_cache.py b/Shared/lib/python3.4/site-packages/stem/util/lru_cache.py new file mode 100644 index 0000000..011d445 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/lru_cache.py @@ -0,0 +1,182 @@ +# Drop in replace for python 3.2's collections.lru_cache, from... +# http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/ +# +# ... which is under the MIT license. Stem users should *not* rely upon this +# module. It will be removed when we drop support for python 3.2 and below. + +""" +Memoization decorator that caches a function's return value. If later called +with the same arguments then the cached value is returned rather than +reevaluated. + +This is a a python 2.x port of `functools.lru_cache +`_. If +using python 3.2 or later you should use that instead. +""" + +from collections import namedtuple +from functools import update_wrapper +from threading import RLock + +_CacheInfo = namedtuple('CacheInfo', ['hits', 'misses', 'maxsize', 'currsize']) + + +class _HashedSeq(list): + __slots__ = 'hashvalue' + + def __init__(self, tup, hash=hash): + self[:] = tup + self.hashvalue = hash(tup) + + def __hash__(self): + return self.hashvalue + + +def _make_key(args, kwds, typed, + kwd_mark = (object(),), + fasttypes = set([int, str, frozenset, type(None)]), + sorted=sorted, tuple=tuple, type=type, len=len): + 'Make a cache key from optionally typed positional and keyword arguments' + key = args + if kwds: + sorted_items = sorted(kwds.items()) + key += kwd_mark + for item in sorted_items: + key += item + if typed: + key += tuple(type(v) for v in args) + if kwds: + key += tuple(type(v) for k, v in sorted_items) + elif len(key) == 1 and type(key[0]) in fasttypes: + return key[0] + return _HashedSeq(key) + + +def lru_cache(maxsize=100, typed=False): + """Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) with + f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used + + """ + + # Users should only access the lru_cache through its public API: + # cache_info, cache_clear, and f.__wrapped__ + # The internals of the lru_cache are encapsulated for thread safety and + # to allow the implementation to change (including a possible C version). + + def decorating_function(user_function): + + cache = dict() + stats = [0, 0] # make statistics updateable non-locally + HITS, MISSES = 0, 1 # names for the stats fields + make_key = _make_key + cache_get = cache.get # bound method to lookup key or return None + _len = len # localize the global len() function + lock = RLock() # because linkedlist updates aren't threadsafe + root = [] # root of the circular doubly linked list + root[:] = [root, root, None, None] # initialize by pointing to self + nonlocal_root = [root] # make updateable non-locally + PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields + + if maxsize == 0: + + def wrapper(*args, **kwds): + # no caching, just do a statistics update after a successful call + result = user_function(*args, **kwds) + stats[MISSES] += 1 + return result + + elif maxsize is None: + + def wrapper(*args, **kwds): + # simple caching without ordering or size limit + key = make_key(args, kwds, typed) + result = cache_get(key, root) # root used here as a unique not-found sentinel + if result is not root: + stats[HITS] += 1 + return result + result = user_function(*args, **kwds) + cache[key] = result + stats[MISSES] += 1 + return result + + else: + + def wrapper(*args, **kwds): + # size limited caching that tracks accesses by recency + key = make_key(args, kwds, typed) if kwds or typed else args + with lock: + link = cache_get(key) + if link is not None: + # record recent use of the key by moving it to the front of the list + root, = nonlocal_root + link_prev, link_next, key, result = link + link_prev[NEXT] = link_next + link_next[PREV] = link_prev + last = root[PREV] + last[NEXT] = root[PREV] = link + link[PREV] = last + link[NEXT] = root + stats[HITS] += 1 + return result + result = user_function(*args, **kwds) + with lock: + root, = nonlocal_root + if key in cache: + # getting here means that this same key was added to the + # cache while the lock was released. since the link + # update is already done, we need only return the + # computed result and update the count of misses. + pass + elif _len(cache) >= maxsize: + # use the old root to store the new key and result + oldroot = root + oldroot[KEY] = key + oldroot[RESULT] = result + # empty the oldest link and make it the new root + root = nonlocal_root[0] = oldroot[NEXT] + oldkey = root[KEY] + root[KEY] = root[RESULT] = None + # now update the cache dictionary for the new links + del cache[oldkey] + cache[key] = oldroot + else: + # put result in a new link at the front of the list + last = root[PREV] + link = [last, root, key, result] + last[NEXT] = root[PREV] = cache[key] = link + stats[MISSES] += 1 + return result + + def cache_info(): + """Report cache statistics""" + with lock: + return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) + + def cache_clear(): + """Clear the cache and cache statistics""" + with lock: + cache.clear() + root = nonlocal_root[0] + root[:] = [root, root, None, None] + stats[:] = [0, 0] + + wrapper.__wrapped__ = user_function + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return update_wrapper(wrapper, user_function) + + return decorating_function diff --git a/Shared/lib/python3.4/site-packages/stem/util/ordereddict.py b/Shared/lib/python3.4/site-packages/stem/util/ordereddict.py new file mode 100644 index 0000000..07c7d4e --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/ordereddict.py @@ -0,0 +1,133 @@ +# Drop in replacement for python 2.7's OrderedDict, from... +# http://pypi.python.org/pypi/ordereddict +# +# Stem users should *not* rely upon this module. It will be removed when we +# drop support for python 2.6 and below. + +# Copyright (c) 2009 Raymond Hettinger +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation files +# (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, +# publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +from UserDict import DictMixin + + +class OrderedDict(dict, DictMixin): + def __init__(self, *args, **kwds): + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__end + except AttributeError: + self.clear() + self.update(*args, **kwds) + + def clear(self): + self.__end = end = [] + end += [None, end, end] # sentinel node for doubly linked list + self.__map = {} # key --> [key, prev, next] + dict.clear(self) + + def __setitem__(self, key, value): + if key not in self: + end = self.__end + curr = end[1] + curr[2] = end[1] = self.__map[key] = [key, curr, end] + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + dict.__delitem__(self, key) + key, prev, next = self.__map.pop(key) + prev[2] = next + next[1] = prev + + def __iter__(self): + end = self.__end + curr = end[2] + while curr is not end: + yield curr[0] + curr = curr[2] + + def __reversed__(self): + end = self.__end + curr = end[1] + while curr is not end: + yield curr[0] + curr = curr[1] + + def popitem(self, last=True): + if not self: + raise KeyError('dictionary is empty') + if last: + key = reversed(self).next() + else: + key = iter(self).next() + value = self.pop(key) + return key, value + + def __reduce__(self): + items = [[k, self[k]] for k in self] + tmp = self.__map, self.__end + del self.__map, self.__end + inst_dict = vars(self).copy() + self.__map, self.__end = tmp + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def keys(self): + return list(self) + + setdefault = DictMixin.setdefault + update = DictMixin.update + pop = DictMixin.pop + values = DictMixin.values + items = DictMixin.items + iterkeys = DictMixin.iterkeys + itervalues = DictMixin.itervalues + iteritems = DictMixin.iteritems + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + + def copy(self): + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + if isinstance(other, OrderedDict): + if len(self) != len(other): + return False + for p, q in zip(self.items(), other.items()): + if p != q: + return False + return True + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other diff --git a/Shared/lib/python3.4/site-packages/stem/util/ports.cfg b/Shared/lib/python3.4/site-packages/stem/util/ports.cfg new file mode 100644 index 0000000..8b7829e --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/ports.cfg @@ -0,0 +1,313 @@ +################################################################################ +# +# Common usages for port . This is based on... +# +# https://secure.wikimedia.org/wikipedia/en/wiki/List_of_TCP_and_UDP_port numbers +# http://isc.sans.edu/services.html +# +################################################################################ + +port 1 => TCPMUX +port 2 => CompressNET +port 3 => CompressNET +port 5 => RJE +port 7 => Echo +port 9 => Discard +port 11 => SYSTAT +port 13 => Daytime +port 15 => netstat +port 17 => QOTD +port 18 => MSP +port 19 => CHARGEN +port 20 => FTP +port 21 => FTP +port 22 => SSH +port 23 => Telnet +port 24 => Priv-mail +port 25 => SMTP +port 34 => RF +port 35 => Printer +port 37 => TIME +port 39 => RLP +port 41 => Graphics +port 42 => WINS +port 43 => WHOIS +port 47 => NI FTP +port 49 => TACACS +port 50 => Remote Mail +port 51 => IMP +port 52 => XNS +port 53 => DNS +port 54 => XNS +port 55 => ISI-GL +port 56 => RAP +port 57 => MTP +port 58 => XNS +port 67 => BOOTP +port 68 => BOOTP +port 69 => TFTP +port 70 => Gopher +port 79 => Finger +port 80 => HTTP +port 81 => HTTP Alternate +port 82 => Torpark +port 83 => MIT ML +port 88 => Kerberos +port 90 => dnsix +port 99 => WIP +port 101 => NIC +port 102 => ISO-TSAP +port 104 => ACR/NEMA +port 105 => CCSO +port 107 => Telnet +port 108 => SNA +port 109 => POP2 +port 110 => POP3 +port 111 => ONC RPC +port 113 => ident +port 115 => SFTP +port 117 => UUCP +port 118 => SQL +port 119 => NNTP +port 123 => NTP +port 135 => DCE +port 137 => NetBIOS +port 138 => NetBIOS +port 139 => NetBIOS +port 143 => IMAP +port 152 => BFTP +port 153 => SGMP +port 156 => SQL +port 158 => DMSP +port 161 => SNMP +port 162 => SNMPTRAP +port 170 => Print-srv +port 177 => XDMCP +port 179 => BGP +port 194 => IRC +port 199 => SMUX +port 201 => AppleTalk +port 209 => QMTP +port 210 => ANSI +port 213 => IPX +port 218 => MPP +port 220 => IMAP +port 256 => 2DEV +port 259 => ESRO +port 264 => BGMP +port 308 => Novastor +port 311 => OSX Admin +port 318 => PKIX TSP +port 319 => PTP +port 320 => PTP +port 323 => IMMP +port 350 => MATIP +port 351 => MATIP +port 366 => ODMR +port 369 => Rpc2port ap +port 370 => codaauth2 +port 371 => ClearCase +port 383 => HP Alarm Mgr +port 384 => ARNS +port 387 => AURP +port 389 => LDAP +port 401 => UPS +port 402 => Altiris +port 427 => SLP +port 443 => HTTPS +port 444 => SNPP +port 445 => SMB +port 464 => Kerberos (kpasswd) +port 465 => SMTP +port 475 => tcpnethaspsrv +port 497 => Retrospect +port 500 => ISAKMP +port 501 => STMF +port 502 => Modbus +port 504 => Citadel +port 510 => FirstClass +port 512 => Rexec +port 513 => rlogin +port 514 => rsh +port 515 => LPD +port 517 => Talk +port 518 => NTalk +port 520 => efs +port 524 => NCP +port 530 => RPC +port 531 => AIM/IRC +port 532 => netnews +port 533 => netwall +port 540 => UUCP +port 542 => commerce +port 543 => Kerberos (klogin) +port 544 => Kerberos (kshell) +port 545 => OSISoft PI +port 546 => DHCPv6 +port 547 => DHCPv6 +port 548 => AFP +port 550 => new-who +port 554 => RTSP +port 556 => RFS +port 560 => rmonitor +port 561 => monitor +port 563 => NNTPS +port 587 => SMTP +port 591 => FileMaker +port 593 => HTTP RPC +port 604 => TUNNEL +port 623 => ASF-RMCP +port 631 => CUPS +port 635 => RLZ DBase +port 636 => LDAPS +port 639 => MSDP +port 641 => Support oft +port 646 => LDP +port 647 => DHCP +port 648 => RRP +port 651 => IEEE-MMS +port 652 => DTCP +port 653 => Support oft +port 654 => MMS/MMP +port 657 => RMC +port 660 => OSX Admin +port 665 => sun-dr +port 666 => Doom +port 674 => ACAP +port 691 => MS Exchange +port 692 => Hyperwave-ISP +port 694 => Linux-HA +port 695 => IEEE-MMS-SSL +port 698 => OLSR +port 699 => Access Network +port 700 => EPP +port 701 => LMP +port 702 => IRIS +port 706 => SILC +port 711 => MPLS +port 712 => TBRPF +port 720 => SMQP +port 749 => Kerberos (admin) +port 750 => rfile +port 751 => pump +port 752 => qrh +port 753 => rrh +port 754 => tell send +port 760 => ns +port 782 => Conserver +port 783 => spamd +port 829 => CMP +port 843 => Flash +port 847 => DHCP +port 860 => iSCSI +port 873 => rsync +port 888 => CDDB +port 901 => SWAT +port 902-904 => VMware +port 911 => NCA +port 953 => DNS RNDC +port 981 => SofaWare Firewall +port 989 => FTPS +port 990 => FTPS +port 991 => NAS +port 992 => Telnets +port 993 => IMAPS +port 994 => IRCS +port 995 => POP3S +port 999 => ScimoreDB +port 1001 => JtoMB +port 1002 => cogbot + +port 1080 => SOCKS +port 1085 => WebObjects +port 1109 => KPOP +port 1169 => Tripwire +port 1194 => OpenVPN +port 1214 => Kazaa +port 1220 => QuickTime +port 1234 => VLC +port 1241 => Nessus +port 1270 => SCOM +port 1293 => IPSec +port 1433 => MSSQL +port 1434 => MSSQL +port 1500 => NetGuard +port 1503 => MSN +port 1512 => WINS +port 1521 => Oracle +port 1526 => Oracle +port 1533 => Sametime +port 1666 => Perforce +port 1677 => GroupWise +port 1723 => PPTP +port 1725 => Steam +port 1863 => MSNP +port 2049 => NFS +port 2082 => Infowave +port 2083 => radsec +port 2086 => GNUnet +port 2087 => ELI +port 2095 => NBX SER +port 2096 => NBX DIR +port 2102-2104 => Zephyr +port 2401 => CVS +port 2525 => SMTP +port 2710 => BitTorrent +port 3074 => XBox LIVE +port 3101 => BlackBerry +port 3128 => SQUID +port 3306 => MySQL +port 3389 => WBT +port 3690 => SVN +port 3723 => Battle.net +port 3724 => WoW +port 4321 => RWHOIS +port 4643 => Virtuozzo +port 4662 => eMule +port 5003 => FileMaker +port 5050 => Yahoo IM +port 5060 => SIP +port 5061 => SIP +port 5190 => AIM/ICQ +port 5222 => Jabber +port 5223 => Jabber +port 5228 => Android Market +port 5269 => Jabber +port 5298 => Jabber +port 5432 => PostgreSQL +port 5500 => VNC +port 5556 => Freeciv +port 5666 => NRPE +port 5667 => NSCA +port 5800 => VNC +port 5900 => VNC +port 6346 => gnutella +port 6347 => gnutella +port 6660-6669 => IRC +port 6679 => IRC +port 6697 => IRC +port 6881-6999 => BitTorrent +port 8000 => iRDMI +port 8008 => HTTP Alternate +port 8010 => XMPP +port 8074 => Gadu-Gadu +port 8080 => HTTP Proxy +port 8087 => SPP +port 8088 => Radan HTTP +port 8118 => Privoxy +port 8123 => Polipo +port 8332-8333 => Bitcoin +port 8443 => PCsync HTTPS +port 8888 => NewsEDGE +port 9030 => Tor +port 9050 => Tor +port 9051 => Tor +port 9418 => Git +port 9999 => distinct +port 10000 => Webmin +port 19294 => Google Voice +port 19638 => Ensim +port 23399 => Skype +port 30301 => BitTorrent +port 33434 => traceroute + diff --git a/Shared/lib/python3.4/site-packages/stem/util/proc.py b/Shared/lib/python3.4/site-packages/stem/util/proc.py new file mode 100644 index 0000000..e4a826e --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/proc.py @@ -0,0 +1,547 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Helper functions for querying process and system information from the /proc +contents. Fetching information this way provides huge performance benefits +over lookups via system utilities (ps, netstat, etc). For instance, resolving +connections this way cuts the runtime by around 90% verses the alternatives. +These functions may not work on all platforms (only Linux?). + +The method for reading these files (and a little code) are borrowed from +`psutil `_, which was written by Jay Loden, +Dave Daeschler, Giampaolo Rodola' and is under the BSD license. + +**These functions are not being vended to stem users. They may change in the +future, use them at your own risk.** + +.. versionchanged:: 1.3.0 + Dropped the get_* prefix from several function names. The old names still + work, but are deprecated aliases. + +**Module Overview:** + +:: + + is_available - checks if proc utilities can be used on this system + system_start_time - unix timestamp for when the system started + physical_memory - memory available on this system + cwd - provides the current working directory for a process + uid - provides the user id a process is running under + memory_usage - provides the memory usage of a process + stats - queries statistics about a process + file_descriptors_used - number of file descriptors used by a process + connections - provides the connections made by a process + +.. data:: Stat (enum) + + Types of data available via the :func:`~stem.util.proc.stats` function. + + ============== =========== + Stat Description + ============== =========== + **COMMAND** command name under which the process is running + **CPU_UTIME** total user time spent on the process + **CPU_STIME** total system time spent on the process + **START_TIME** when this process began, in unix time + ============== =========== +""" + +import base64 +import os +import platform +import socket +import sys +import time + +import stem.util.enum + +from stem.util import log + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + +# os.sysconf is only defined on unix +try: + CLOCK_TICKS = os.sysconf(os.sysconf_names['SC_CLK_TCK']) +except AttributeError: + CLOCK_TICKS = None + +Stat = stem.util.enum.Enum( + ('COMMAND', 'command'), ('CPU_UTIME', 'utime'), + ('CPU_STIME', 'stime'), ('START_TIME', 'start time') +) + + +@lru_cache() +def is_available(): + """ + Checks if proc information is available on this platform. + + :returns: **True** if proc contents exist on this platform, **False** otherwise + """ + + if platform.system() != 'Linux': + return False + else: + # list of process independent proc paths we use + proc_paths = ('/proc/stat', '/proc/meminfo', '/proc/net/tcp', '/proc/net/udp') + + for path in proc_paths: + if not os.path.exists(path): + return False + + return True + + +@lru_cache() +def system_start_time(): + """ + Provides the unix time (seconds since epoch) when the system started. + + :returns: **float** for the unix time of when the system started + + :raises: **IOError** if it can't be determined + """ + + start_time, parameter = time.time(), 'system start time' + btime_line = _get_line('/proc/stat', 'btime', parameter) + + try: + result = float(btime_line.strip().split()[1]) + _log_runtime(parameter, '/proc/stat[btime]', start_time) + return result + except: + exc = IOError('unable to parse the /proc/stat btime entry: %s' % btime_line) + _log_failure(parameter, exc) + raise exc + + +@lru_cache() +def physical_memory(): + """ + Provides the total physical memory on the system in bytes. + + :returns: **int** for the bytes of physical memory this system has + + :raises: **IOError** if it can't be determined + """ + + start_time, parameter = time.time(), 'system physical memory' + mem_total_line = _get_line('/proc/meminfo', 'MemTotal:', parameter) + + try: + result = int(mem_total_line.split()[1]) * 1024 + _log_runtime(parameter, '/proc/meminfo[MemTotal]', start_time) + return result + except: + exc = IOError('unable to parse the /proc/meminfo MemTotal entry: %s' % mem_total_line) + _log_failure(parameter, exc) + raise exc + + +def cwd(pid): + """ + Provides the current working directory for the given process. + + :param int pid: process id of the process to be queried + + :returns: **str** with the path of the working directory for the process + + :raises: **IOError** if it can't be determined + """ + + start_time, parameter = time.time(), 'cwd' + proc_cwd_link = '/proc/%s/cwd' % pid + + if pid == 0: + cwd = '' + else: + try: + cwd = os.readlink(proc_cwd_link) + except OSError: + exc = IOError('unable to read %s' % proc_cwd_link) + _log_failure(parameter, exc) + raise exc + + _log_runtime(parameter, proc_cwd_link, start_time) + return cwd + + +def uid(pid): + """ + Provides the user ID the given process is running under. + + :param int pid: process id of the process to be queried + + :returns: **int** with the user id for the owner of the process + + :raises: **IOError** if it can't be determined + """ + + start_time, parameter = time.time(), 'uid' + status_path = '/proc/%s/status' % pid + uid_line = _get_line(status_path, 'Uid:', parameter) + + try: + result = int(uid_line.split()[1]) + _log_runtime(parameter, '%s[Uid]' % status_path, start_time) + return result + except: + exc = IOError('unable to parse the %s Uid entry: %s' % (status_path, uid_line)) + _log_failure(parameter, exc) + raise exc + + +def memory_usage(pid): + """ + Provides the memory usage in bytes for the given process. + + :param int pid: process id of the process to be queried + + :returns: **tuple** of two ints with the memory usage of the process, of the + form **(resident_size, virtual_size)** + + :raises: **IOError** if it can't be determined + """ + + # checks if this is the kernel process + + if pid == 0: + return (0, 0) + + start_time, parameter = time.time(), 'memory usage' + status_path = '/proc/%s/status' % pid + mem_lines = _get_lines(status_path, ('VmRSS:', 'VmSize:'), parameter) + + try: + residentSize = int(mem_lines['VmRSS:'].split()[1]) * 1024 + virtualSize = int(mem_lines['VmSize:'].split()[1]) * 1024 + + _log_runtime(parameter, '%s[VmRSS|VmSize]' % status_path, start_time) + return (residentSize, virtualSize) + except: + exc = IOError('unable to parse the %s VmRSS and VmSize entries: %s' % (status_path, ', '.join(mem_lines))) + _log_failure(parameter, exc) + raise exc + + +def stats(pid, *stat_types): + """ + Provides process specific information. See the :data:`~stem.util.proc.Stat` + enum for valid options. + + :param int pid: process id of the process to be queried + :param Stat stat_types: information to be provided back + + :returns: **tuple** with all of the requested statistics as strings + + :raises: **IOError** if it can't be determined + """ + + if CLOCK_TICKS is None: + raise IOError('Unable to look up SC_CLK_TCK') + + start_time, parameter = time.time(), 'process %s' % ', '.join(stat_types) + + # the stat file contains a single line, of the form... + # 8438 (tor) S 8407 8438 8407 34818 8438 4202496... + stat_path = '/proc/%s/stat' % pid + stat_line = _get_line(stat_path, str(pid), parameter) + + # breaks line into component values + stat_comp = [] + cmd_start, cmd_end = stat_line.find('('), stat_line.find(')') + + if cmd_start != -1 and cmd_end != -1: + stat_comp.append(stat_line[:cmd_start]) + stat_comp.append(stat_line[cmd_start + 1:cmd_end]) + stat_comp += stat_line[cmd_end + 1:].split() + + if len(stat_comp) < 44 and _is_float(stat_comp[13], stat_comp[14], stat_comp[21]): + exc = IOError('stat file had an unexpected format: %s' % stat_path) + _log_failure(parameter, exc) + raise exc + + results = [] + for stat_type in stat_types: + if stat_type == Stat.COMMAND: + if pid == 0: + results.append('sched') + else: + results.append(stat_comp[1]) + elif stat_type == Stat.CPU_UTIME: + if pid == 0: + results.append('0') + else: + results.append(str(float(stat_comp[13]) / CLOCK_TICKS)) + elif stat_type == Stat.CPU_STIME: + if pid == 0: + results.append('0') + else: + results.append(str(float(stat_comp[14]) / CLOCK_TICKS)) + elif stat_type == Stat.START_TIME: + if pid == 0: + return system_start_time() + else: + # According to documentation, starttime is in field 21 and the unit is + # jiffies (clock ticks). We divide it for clock ticks, then add the + # uptime to get the seconds since the epoch. + p_start_time = float(stat_comp[21]) / CLOCK_TICKS + results.append(str(p_start_time + system_start_time())) + + _log_runtime(parameter, stat_path, start_time) + return tuple(results) + + +def file_descriptors_used(pid): + """ + Provides the number of file descriptors currently being used by a process. + + .. versionadded:: 1.3.0 + + :param int pid: process id of the process to be queried + + :returns: **int** of the number of file descriptors used + + :raises: **IOError** if it can't be determined + """ + + try: + pid = int(pid) + + if pid < 0: + raise IOError("Process pids can't be negative: %s" % pid) + except (ValueError, TypeError): + raise IOError('Process pid was non-numeric: %s' % pid) + + try: + return len(os.listdir('/proc/%i/fd' % pid)) + except Exception as exc: + raise IOError('Unable to check number of file descriptors used: %s' % exc) + + +def connections(pid): + """ + Queries connection related information from the proc contents. This provides + similar results to netstat, lsof, sockstat, and other connection resolution + utilities (though the lookup is far quicker). + + :param int pid: process id of the process to be queried + + :returns: A listing of connection tuples of the form **[(local_ipAddr1, + local_port1, foreign_ipAddr1, foreign_port1, protocol), ...]** (addresses + and protocols are strings and ports are ints) + + :raises: **IOError** if it can't be determined + """ + + try: + pid = int(pid) + + if pid < 0: + raise IOError("Process pids can't be negative: %s" % pid) + except (ValueError, TypeError): + raise IOError('Process pid was non-numeric: %s' % pid) + + if pid == 0: + return [] + + # fetches the inode numbers for socket file descriptors + + start_time, parameter = time.time(), 'process connections' + inodes = [] + + for fd in os.listdir('/proc/%s/fd' % pid): + fd_path = '/proc/%s/fd/%s' % (pid, fd) + + try: + # File descriptor link, such as 'socket:[30899]' + + fd_name = os.readlink(fd_path) + + if fd_name.startswith('socket:['): + inodes.append(fd_name[8:-1]) + except OSError as exc: + if not os.path.exists(fd_path): + continue # descriptors may shift while we're in the middle of iterating over them + + # most likely couldn't be read due to permissions + exc = IOError('unable to determine file descriptor destination (%s): %s' % (exc, fd_path)) + _log_failure(parameter, exc) + raise exc + + if not inodes: + # unable to fetch any connections for this process + return [] + + # check for the connection information from the /proc/net contents + + conn = [] + + for proc_file_path in ('/proc/net/tcp', '/proc/net/udp'): + try: + proc_file = open(proc_file_path) + proc_file.readline() # skip the first line + + for line in proc_file: + _, l_addr, f_addr, status, _, _, _, _, _, inode = line.split()[:10] + + if inode in inodes: + # if a tcp connection, skip if it isn't yet established + if proc_file_path.endswith('/tcp') and status != '01': + continue + + local_ip, local_port = _decode_proc_address_encoding(l_addr) + foreign_ip, foreign_port = _decode_proc_address_encoding(f_addr) + protocol = proc_file_path[10:] + conn.append((local_ip, local_port, foreign_ip, foreign_port, protocol)) + + proc_file.close() + except IOError as exc: + exc = IOError("unable to read '%s': %s" % (proc_file_path, exc)) + _log_failure(parameter, exc) + raise exc + except Exception as exc: + exc = IOError("unable to parse '%s': %s" % (proc_file_path, exc)) + _log_failure(parameter, exc) + raise exc + + _log_runtime(parameter, '/proc/net/[tcp|udp]', start_time) + return conn + + +def _decode_proc_address_encoding(addr): + """ + Translates an address entry in the /proc/net/* contents to a human readable + form (`reference `_, + for instance: + + :: + + "0500000A:0016" -> ("10.0.0.5", 22) + + :param str addr: proc address entry to be decoded + + :returns: **tuple** of the form **(addr, port)**, with addr as a string and port an int + """ + + ip, port = addr.split(':') + + # the port is represented as a two-byte hexadecimal number + port = int(port, 16) + + if sys.version_info >= (3,): + ip = ip.encode('ascii') + + # The IPv4 address portion is a little-endian four-byte hexadecimal number. + # That is, the least significant byte is listed first, so we need to reverse + # the order of the bytes to convert it to an IP address. + # + # This needs to account for the endian ordering as per... + # http://code.google.com/p/psutil/issues/detail?id=201 + # https://trac.torproject.org/projects/tor/ticket/4777 + + if sys.byteorder == 'little': + ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip)[::-1]) + else: + ip = socket.inet_ntop(socket.AF_INET, base64.b16decode(ip)) + + return (ip, port) + + +def _is_float(*value): + try: + for v in value: + float(v) + + return True + except ValueError: + return False + + +def _get_line(file_path, line_prefix, parameter): + return _get_lines(file_path, (line_prefix, ), parameter)[line_prefix] + + +def _get_lines(file_path, line_prefixes, parameter): + """ + Fetches lines with the given prefixes from a file. This only provides back + the first instance of each prefix. + + :param str file_path: path of the file to read + :param tuple line_prefixes: string prefixes of the lines to return + :param str parameter: description of the proc attribute being fetch + + :returns: mapping of prefixes to the matching line + + :raises: **IOError** if unable to read the file or can't find all of the prefixes + """ + + try: + remaining_prefixes = list(line_prefixes) + proc_file, results = open(file_path), {} + + for line in proc_file: + if not remaining_prefixes: + break # found everything we're looking for + + for prefix in remaining_prefixes: + if line.startswith(prefix): + results[prefix] = line + remaining_prefixes.remove(prefix) + break + + proc_file.close() + + if remaining_prefixes: + if len(remaining_prefixes) == 1: + msg = '%s did not contain a %s entry' % (file_path, remaining_prefixes[0]) + else: + msg = '%s did not contain %s entries' % (file_path, ', '.join(remaining_prefixes)) + + raise IOError(msg) + else: + return results + except IOError as exc: + _log_failure(parameter, exc) + raise exc + + +def _log_runtime(parameter, proc_location, start_time): + """ + Logs a message indicating a successful proc query. + + :param str parameter: description of the proc attribute being fetch + :param str proc_location: proc files we were querying + :param int start_time: unix time for when this query was started + """ + + runtime = time.time() - start_time + log.debug('proc call (%s): %s (runtime: %0.4f)' % (parameter, proc_location, runtime)) + + +def _log_failure(parameter, exc): + """ + Logs a message indicating that the proc query failed. + + :param str parameter: description of the proc attribute being fetch + :param Exception exc: exception that we're raising + """ + + log.debug('proc call failed (%s): %s' % (parameter, exc)) + +# TODO: drop with stem 2.x +# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old +# names for backward compatability. + +get_system_start_time = system_start_time +get_physical_memory = physical_memory +get_cwd = cwd +get_uid = uid +get_memory_usage = memory_usage +get_stats = stats +get_connections = connections diff --git a/Shared/lib/python3.4/site-packages/stem/util/str_tools.py b/Shared/lib/python3.4/site-packages/stem/util/str_tools.py new file mode 100644 index 0000000..497564e --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/str_tools.py @@ -0,0 +1,558 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Toolkit for various string activity. + +.. versionchanged:: 1.3.0 + Dropped the get_* prefix from several function names. The old names still + work, but are deprecated aliases. + +**Module Overview:** + +:: + + crop - shortens string to a given length + + size_label - human readable label for a number of bytes + time_label - human readable label for a number of seconds + time_labels - human readable labels for each time unit + short_time_label - condensed time label output + parse_short_time_label - seconds represented by a short time label +""" + +import codecs +import datetime +import re +import sys + +import stem.prereq +import stem.util.enum + +from stem import str_type + +# label conversion tuples of the form... +# (bits / bytes / seconds, short label, long label) + +SIZE_UNITS_BITS = ( + (140737488355328.0, ' Pb', ' Petabit'), + (137438953472.0, ' Tb', ' Terabit'), + (134217728.0, ' Gb', ' Gigabit'), + (131072.0, ' Mb', ' Megabit'), + (128.0, ' Kb', ' Kilobit'), + (0.125, ' b', ' Bit'), +) + +SIZE_UNITS_BYTES = ( + (1125899906842624.0, ' PB', ' Petabyte'), + (1099511627776.0, ' TB', ' Terabyte'), + (1073741824.0, ' GB', ' Gigabyte'), + (1048576.0, ' MB', ' Megabyte'), + (1024.0, ' KB', ' Kilobyte'), + (1.0, ' B', ' Byte'), +) + +TIME_UNITS = ( + (86400.0, 'd', ' day'), + (3600.0, 'h', ' hour'), + (60.0, 'm', ' minute'), + (1.0, 's', ' second'), +) + +_timestamp_re = re.compile(r'(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})') + +if stem.prereq.is_python_3(): + def _to_bytes_impl(msg): + if isinstance(msg, str): + return codecs.latin_1_encode(msg, 'replace')[0] + else: + return msg + + def _to_unicode_impl(msg): + if msg is not None and not isinstance(msg, str): + return msg.decode('utf-8', 'replace') + else: + return msg +else: + def _to_bytes_impl(msg): + if msg is not None and isinstance(msg, str_type): + return codecs.latin_1_encode(msg, 'replace')[0] + else: + return msg + + def _to_unicode_impl(msg): + if msg is not None and not isinstance(msg, str_type): + return msg.decode('utf-8', 'replace') + else: + return msg + + +def _to_bytes(msg): + """ + Provides the ASCII bytes for the given string. This is purely to provide + python 3 compatability, normalizing the unicode/ASCII change in the version + bump. For an explanation of this see... + + http://python3porting.com/problems.html#nicer-solutions + + :param str,unicode msg: string to be converted + + :returns: ASCII bytes for string + """ + + return _to_bytes_impl(msg) + + +def _to_unicode(msg): + """ + Provides the unicode string for the given ASCII bytes. This is purely to + provide python 3 compatability, normalizing the unicode/ASCII change in the + version bump. + + :param str,unicode msg: string to be converted + + :returns: unicode conversion + """ + + return _to_unicode_impl(msg) + + +def _to_camel_case(label, divider = '_', joiner = ' '): + """ + Converts the given string to camel case, ie: + + :: + + >>> _to_camel_case('I_LIKE_PEPPERJACK!') + 'I Like Pepperjack!' + + :param str label: input string to be converted + :param str divider: word boundary + :param str joiner: replacement for word boundaries + + :returns: camel cased string + """ + + words = [] + for entry in label.split(divider): + if len(entry) == 0: + words.append('') + elif len(entry) == 1: + words.append(entry.upper()) + else: + words.append(entry[0].upper() + entry[1:].lower()) + + return joiner.join(words) + + +# This needs to be defined after _to_camel_case() to avoid a circular +# dependency with the enum module. + +Ending = stem.util.enum.Enum('ELLIPSE', 'HYPHEN') + + +def crop(msg, size, min_word_length = 4, min_crop = 0, ending = Ending.ELLIPSE, get_remainder = False): + """ + Shortens a string to a given length. + + If we crop content then a given ending is included (counting itself toward + the size limitation). This crops on word breaks so we only include a word if + we can display at least **min_word_length** characters of it. + + If there isn't room for even a truncated single word (or one word plus the + ellipse if including those) then this provides an empty string. + + If a cropped string ends with a comma or period then it's stripped (unless + we're providing the remainder back). For example... + + >>> crop('This is a looooong message', 17) + 'This is a looo...' + + >>> crop('This is a looooong message', 12) + 'This is a...' + + >>> crop('This is a looooong message', 3) + '' + + The whole point of this method is to provide human friendly croppings, and as + such details of how this works might change in the future. Callers should not + rely on the details of how this crops. + + .. versionadded:: 1.3.0 + + :param str msg: text to be processed + :param int size: space available for text + :param int min_word_length: minimum characters before which a word is + dropped, requires whole word if **None** + :param int min_crop: minimum characters that must be dropped if a word is + cropped + :param Ending ending: type of ending used when truncating, no special + truncation is used if **None** + :param bool get_remainder: returns a tuple with the second part being the + cropped portion of the message + + :returns: **str** of the text truncated to the given length + """ + + # checks if there's room for the whole message + + if len(msg) <= size: + return (msg, '') if get_remainder else msg + + if size < 0: + raise ValueError("Crop size can't be negative (received %i)" % size) + elif min_word_length and min_word_length < 0: + raise ValueError("Crop's min_word_length can't be negative (received %i)" % min_word_length) + elif min_crop < 0: + raise ValueError("Crop's min_crop can't be negative (received %i)" % min_crop) + + # since we're cropping, the effective space available is less with an + # ellipse, and cropping words requires an extra space for hyphens + + if ending == Ending.ELLIPSE: + size -= 3 + elif min_word_length and ending == Ending.HYPHEN: + min_word_length += 1 + + if min_word_length is None: + min_word_length = sys.maxsize + + # checks if there isn't the minimum space needed to include anything + + last_wordbreak = msg.rfind(' ', 0, size + 1) + + if last_wordbreak == -1: + # we're splitting the first word + + if size < min_word_length: + return ('', msg) if get_remainder else '' + + include_crop = True + else: + last_wordbreak = len(msg[:last_wordbreak].rstrip()) # drops extra ending whitespaces + include_crop = size - last_wordbreak - 1 >= min_word_length + + # if there's a max crop size then make sure we're cropping at least that many characters + + if include_crop and min_crop: + next_wordbreak = msg.find(' ', size) + + if next_wordbreak == -1: + next_wordbreak = len(msg) + + include_crop = next_wordbreak - size + 1 >= min_crop + + if include_crop: + return_msg, remainder = msg[:size], msg[size:] + + if ending == Ending.HYPHEN: + remainder = return_msg[-1] + remainder + return_msg = return_msg[:-1].rstrip() + '-' + else: + return_msg, remainder = msg[:last_wordbreak], msg[last_wordbreak:] + + # if this is ending with a comma or period then strip it off + + if not get_remainder and return_msg and return_msg[-1] in (',', '.'): + return_msg = return_msg[:-1] + + if ending == Ending.ELLIPSE: + return_msg = return_msg.rstrip() + '...' + + return (return_msg, remainder) if get_remainder else return_msg + + +def size_label(byte_count, decimal = 0, is_long = False, is_bytes = True): + """ + Converts a number of bytes into a human readable label in its most + significant units. For instance, 7500 bytes would return "7 KB". If the + is_long option is used this expands unit labels to be the properly pluralized + full word (for instance 'Kilobytes' rather than 'KB'). Units go up through + petabytes. + + :: + + >>> size_label(2000000) + '1 MB' + + >>> size_label(1050, 2) + '1.02 KB' + + >>> size_label(1050, 3, True) + '1.025 Kilobytes' + + :param int byte_count: number of bytes to be converted + :param int decimal: number of decimal digits to be included + :param bool is_long: expands units label + :param bool is_bytes: provides units in bytes if **True**, bits otherwise + + :returns: **str** with human readable representation of the size + """ + + if is_bytes: + return _get_label(SIZE_UNITS_BYTES, byte_count, decimal, is_long) + else: + return _get_label(SIZE_UNITS_BITS, byte_count, decimal, is_long) + + +def time_label(seconds, decimal = 0, is_long = False): + """ + Converts seconds into a time label truncated to its most significant units. + For instance, 7500 seconds would return "2h". Units go up through days. + + This defaults to presenting single character labels, but if the is_long + option is used this expands labels to be the full word (space included and + properly pluralized). For instance, "4h" would be "4 hours" and "1m" would + become "1 minute". + + :: + + >>> time_label(10000) + '2h' + + >>> time_label(61, 1, True) + '1.0 minute' + + >>> time_label(61, 2, True) + '1.01 minutes' + + :param int seconds: number of seconds to be converted + :param int decimal: number of decimal digits to be included + :param bool is_long: expands units label + + :returns: **str** with human readable representation of the time + """ + + return _get_label(TIME_UNITS, seconds, decimal, is_long) + + +def time_labels(seconds, is_long = False): + """ + Provides a list of label conversions for each time unit, starting with its + most significant units on down. Any counts that evaluate to zero are omitted. + For example... + + :: + + >>> time_labels(400) + ['6m', '40s'] + + >>> time_labels(3640, True) + ['1 hour', '40 seconds'] + + :param int seconds: number of seconds to be converted + :param bool is_long: expands units label + + :returns: **list** of strings with human readable representations of the time + """ + + time_labels = [] + + for count_per_unit, _, _ in TIME_UNITS: + if abs(seconds) >= count_per_unit: + time_labels.append(_get_label(TIME_UNITS, seconds, 0, is_long)) + seconds %= count_per_unit + + return time_labels + + +def short_time_label(seconds): + """ + Provides a time in the following format: + [[dd-]hh:]mm:ss + + :: + + >>> short_time_label(111) + '01:51' + + >>> short_time_label(544100) + '6-07:08:20' + + :param int seconds: number of seconds to be converted + + :returns: **str** with the short representation for the time + + :raises: **ValueError** if the input is negative + """ + + if seconds < 0: + raise ValueError("Input needs to be a non-negative integer, got '%i'" % seconds) + + time_comp = {} + + for amount, _, label in TIME_UNITS: + count = int(seconds / amount) + seconds %= amount + time_comp[label.strip()] = count + + label = '%02i:%02i' % (time_comp['minute'], time_comp['second']) + + if time_comp['day']: + label = '%i-%02i:%s' % (time_comp['day'], time_comp['hour'], label) + elif time_comp['hour']: + label = '%02i:%s' % (time_comp['hour'], label) + + return label + + +def parse_short_time_label(label): + """ + Provides the number of seconds corresponding to the formatting used for the + cputime and etime fields of ps: + [[dd-]hh:]mm:ss or mm:ss.ss + + :: + + >>> parse_short_time_label('01:51') + 111 + + >>> parse_short_time_label('6-07:08:20') + 544100 + + :param str label: time entry to be parsed + + :returns: **int** with the number of seconds represented by the label + + :raises: **ValueError** if input is malformed + """ + + days, hours, minutes, seconds = '0', '0', '0', '0' + + if '-' in label: + days, label = label.split('-', 1) + + time_comp = label.split(':') + + if len(time_comp) == 3: + hours, minutes, seconds = time_comp + elif len(time_comp) == 2: + minutes, seconds = time_comp + else: + raise ValueError("Invalid time format, we expected '[[dd-]hh:]mm:ss' or 'mm:ss.ss': %s" % label) + + try: + time_sum = int(float(seconds)) + time_sum += int(minutes) * 60 + time_sum += int(hours) * 3600 + time_sum += int(days) * 86400 + return time_sum + except ValueError: + raise ValueError('Non-numeric value in time entry: %s' % label) + + +def _parse_timestamp(entry): + """ + Parses the date and time that in format like like... + + :: + + 2012-11-08 16:48:41 + + :param str entry: timestamp to be parsed + + :returns: **datetime** for the time represented by the timestamp + + :raises: **ValueError** if the timestamp is malformed + """ + + if not isinstance(entry, (str, str_type)): + raise ValueError('parse_timestamp() input must be a str, got a %s' % type(entry)) + + try: + time = [int(x) for x in _timestamp_re.match(entry).groups()] + except AttributeError: + raise ValueError('Expected timestamp in format YYYY-MM-DD HH:MM:ss but got ' + entry) + + return datetime.datetime(time[0], time[1], time[2], time[3], time[4], time[5]) + + +def _parse_iso_timestamp(entry): + """ + Parses the ISO 8601 standard that provides for timestamps like... + + :: + + 2012-11-08T16:48:41.420251 + + :param str entry: timestamp to be parsed + + :returns: **datetime** for the time represented by the timestamp + + :raises: **ValueError** if the timestamp is malformed + """ + + if not isinstance(entry, (str, str_type)): + raise ValueError('parse_iso_timestamp() input must be a str, got a %s' % type(entry)) + + # based after suggestions from... + # http://stackoverflow.com/questions/127803/how-to-parse-iso-formatted-date-in-python + + if '.' in entry: + timestamp_str, microseconds = entry.split('.') + else: + timestamp_str, microseconds = entry, '000000' + + if len(microseconds) != 6 or not microseconds.isdigit(): + raise ValueError("timestamp's microseconds should be six digits") + + if timestamp_str[10] == 'T': + timestamp_str = timestamp_str[:10] + ' ' + timestamp_str[11:] + else: + raise ValueError("timestamp didn't contain delimeter 'T' between date and time") + + timestamp = _parse_timestamp(timestamp_str) + return timestamp + datetime.timedelta(microseconds = int(microseconds)) + + +def _get_label(units, count, decimal, is_long): + """ + Provides label corresponding to units of the highest significance in the + provided set. This rounds down (ie, integer truncation after visible units). + + :param tuple units: type of units to be used for conversion, containing + (count_per_unit, short_label, long_label) + :param int count: number of base units being converted + :param int decimal: decimal precision of label + :param bool is_long: uses the long label if **True**, short label otherwise + """ + + # formatted string for the requested number of digits + label_format = '%%.%if' % decimal + + if count < 0: + label_format = '-' + label_format + count = abs(count) + elif count == 0: + units_label = units[-1][2] + 's' if is_long else units[-1][1] + return '%s%s' % (label_format % count, units_label) + + for count_per_unit, short_label, long_label in units: + if count >= count_per_unit: + # Rounding down with a '%f' is a little clunky. Reducing the count so + # it'll divide evenly as the rounded down value. + + count -= count % (count_per_unit / (10 ** decimal)) + count_label = label_format % (count / count_per_unit) + + if is_long: + # Pluralize if any of the visible units make it greater than one. For + # instance 1.0003 is plural but 1.000 isn't. + + if decimal > 0: + is_plural = count > count_per_unit + else: + is_plural = count >= count_per_unit * 2 + + return count_label + long_label + ('s' if is_plural else '') + else: + return count_label + short_label + +# TODO: drop with stem 2.x +# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old +# names for backward compatability. + +get_size_label = size_label +get_time_label = time_label +get_time_labels = time_labels +get_short_time_label = short_time_label diff --git a/Shared/lib/python3.4/site-packages/stem/util/system.py b/Shared/lib/python3.4/site-packages/stem/util/system.py new file mode 100644 index 0000000..aa13cb9 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/system.py @@ -0,0 +1,1176 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Helper functions for working with the underlying system. These are mostly os +dependent, only working on linux, osx, and bsd. In almost all cases they're +best-effort, providing **None** if the lookup fails. + +.. versionchanged:: 1.3.0 + Dropped the get_* prefix from several function names. The old names still + work, but are deprecated aliases. + +**Module Overview:** + +:: + + is_windows - checks if we're running on windows + is_mac - checks if we're running on a mac + is_gentoo - checks if we're running on gentoo + is_bsd - checks if we're running on the bsd family of operating systems + + is_available - determines if a command is available on this system + is_running - determines if a given process is running + call - runs the given system command and provides back the results + + name_by_pid - gets the name for a process by the given pid + pid_by_name - gets the pid for a process by the given name + pid_by_port - gets the pid for a process listening to a given port + pid_by_open_file - gets the pid for the process with an open file + cwd - provides the current working directory for a given process + user - provides the user a process is running under + start_time - provides the unix timestamp when the process started + tail - provides lines from the end of a file + bsd_jail_id - provides the BSD jail id a given process is running within + bsd_jail_path - provides the path of the given BSD jail + + is_tarfile - checks if the given path is a tarball + expand_path - expands relative paths and ~ entries + files_with_suffix - provides files with the given suffix + + get_process_name - provides our process' name + set_process_name - changes our process' name +""" + +import ctypes +import ctypes.util +import distutils.spawn +import mimetypes +import os +import platform +import re +import subprocess +import tarfile +import time + +import stem.util.proc +import stem.util.str_tools + +from stem import UNDEFINED, str_type +from stem.util import log + +# Mapping of commands to if they're available or not. + +CMD_AVAILABLE_CACHE = {} + +# An incomplete listing of commands provided by the shell. Expand this as +# needed. Some noteworthy things about shell commands... +# +# * They're not in the path so is_available() will fail. +# * subprocess.Popen() without the 'shell = True' argument will fail with... +# OSError: [Errno 2] No such file or directory + +SHELL_COMMANDS = ['ulimit'] + +IS_RUNNING_PS_LINUX = 'ps -A co command' +IS_RUNNING_PS_BSD = 'ps -ao ucomm=' +GET_NAME_BY_PID_PS = 'ps -p %s -o comm' +GET_PID_BY_NAME_PGREP = 'pgrep -x %s' +GET_PID_BY_NAME_PIDOF = 'pidof %s' +GET_PID_BY_NAME_PS_LINUX = 'ps -o pid -C %s' +GET_PID_BY_NAME_PS_BSD = 'ps axc' +GET_PID_BY_NAME_LSOF = 'lsof -tc %s' +GET_PID_BY_PORT_NETSTAT = 'netstat -npltu' +GET_PID_BY_PORT_SOCKSTAT = 'sockstat -4l -P tcp -p %s' +GET_PID_BY_PORT_LSOF = 'lsof -wnP -iTCP -sTCP:LISTEN' +GET_PID_BY_FILE_LSOF = 'lsof -tw %s' +GET_CWD_PWDX = 'pwdx %s' +GET_CWD_LSOF = 'lsof -a -p %s -d cwd -Fn' +GET_BSD_JAIL_ID_PS = 'ps -p %s -o jid' +GET_BSD_JAIL_PATH = 'jls -j %s' + +BLOCK_SIZE = 1024 + +# flag for setting the process name, found in '/usr/include/linux/prctl.h' + +PR_SET_NAME = 15 + +argc_t = ctypes.POINTER(ctypes.c_char_p) + +# The following can fail with pypy... +# AttributeError: No symbol Py_GetArgcArgv found in library + +try: + Py_GetArgcArgv = ctypes.pythonapi.Py_GetArgcArgv + Py_GetArgcArgv.restype = None + Py_GetArgcArgv.argtypes = [ + ctypes.POINTER(ctypes.c_int), + ctypes.POINTER(argc_t), + ] +except: + Py_GetArgcArgv = None + +# This is both a cache for get_process_name() and tracks what we've changed our +# process name to. + +_PROCESS_NAME = None + +# Length of our original process name. +# +# The original author our process renaming is based on did a memset for 256, +# while Jake did it for the original process name length (capped at 1608). I'm +# not sure of the reasons for either of these limits, but setting it to +# anything higher than our original name length should be pointless, so opting +# for Jake's limit. + +_MAX_NAME_LENGTH = -1 + + +def is_windows(): + """ + Checks if we are running on Windows. + + :returns: **bool** to indicate if we're on Windows + """ + + return platform.system() == 'Windows' + + +def is_mac(): + """ + Checks if we are running on Mac OSX. + + :returns: **bool** to indicate if we're on a Mac + """ + + return platform.system() == 'Darwin' + + +def is_gentoo(): + """ + Checks if we're running on Gentoo. + + :returns: **bool** to indicate if we're on Gentoo + """ + + return os.path.exists('/etc/gentoo-release') + + +def is_bsd(): + """ + Checks if we are within the BSD family of operating systems. This currently + recognizes Macs, FreeBSD, and OpenBSD but may be expanded later. + + :returns: **bool** to indicate if we're on a BSD OS + """ + + return platform.system() in ('Darwin', 'FreeBSD', 'OpenBSD') + + +def is_available(command, cached=True): + """ + Checks the current PATH to see if a command is available or not. If more + than one command is present (for instance "ls -a | grep foo") then this + just checks the first. + + Note that shell (like cd and ulimit) aren't in the PATH so this lookup will + try to assume that it's available. This only happends for recognized shell + commands (those in SHELL_COMMANDS). + + :param str command: command to search for + :param bool cached: makes use of available cached results if **True** + + :returns: **True** if an executable we can use by that name exists in the + PATH, **False** otherwise + """ + + if ' ' in command: + command = command.split(' ')[0] + + if command in SHELL_COMMANDS: + # we can't actually look it up, so hope the shell really provides it... + + return True + elif cached and command in CMD_AVAILABLE_CACHE: + return CMD_AVAILABLE_CACHE[command] + else: + cmd_exists = distutils.spawn.find_executable(command) is not None + CMD_AVAILABLE_CACHE[command] = cmd_exists + return cmd_exists + + +def is_running(command): + """ + Checks for if a process with a given name is running or not. + + :param str command: process name to be checked + + :returns: **True** if the process is running, **False** if it's not among ps + results, and **None** if ps can't be queried + """ + + # Linux and the BSD families have different variants of ps. Guess based on + # the is_bsd() check which to try first, then fall back to the other. + # + # Linux + # -A - Select all processes. + # -co command - Shows just the base command. + # + # Mac / BSD + # -a - Display information about other users' processes as well as + # our own. + # -o ucomm= - Shows just the ucomm attribute ("name to be used for + # accounting") + + if is_available('ps'): + if is_bsd(): + primary_resolver = IS_RUNNING_PS_BSD + secondary_resolver = IS_RUNNING_PS_LINUX + else: + primary_resolver = IS_RUNNING_PS_LINUX + secondary_resolver = IS_RUNNING_PS_BSD + + command_listing = call(primary_resolver, None) + + if not command_listing: + command_listing = call(secondary_resolver, None) + + if command_listing: + command_listing = map(str_type.strip, command_listing) + return command in command_listing + + return None + + +def name_by_pid(pid): + """ + Attempts to determine the name a given process is running under (not + including arguments). This uses... + + :: + + 1. Information from /proc + 2. ps -p -o command + + :param int pid: process id of the process to be queried + + :returns: **str** with the process name, **None** if it can't be determined + """ + + process_name = None + + if stem.util.proc.is_available(): + try: + process_name = stem.util.proc.stats(pid, stem.util.proc.Stat.COMMAND)[0] + except IOError: + pass + + # attempts to resolve using ps, failing if: + # - system's ps variant doesn't handle these flags (none known at the moment) + # + # example output: + # atagar@morrigan:~$ ps -p 5767 -o comm + # COMMAND + # vim + + if not process_name: + try: + results = call(GET_NAME_BY_PID_PS % pid) + except OSError: + results = None + + if results and len(results) == 2 and results[0] == 'COMMAND': + process_name = results[1].strip() + + return process_name + + +def pid_by_name(process_name, multiple = False): + """ + Attempts to determine the process id for a running process, using... + + :: + + 1. pgrep -x + 2. pidof + 3. ps -o pid -C (linux) + ps axc | egrep " $" (bsd) + 4. lsof -tc + 5. tasklist | str .exe + + :param str process_name: process name for which to fetch the pid + :param bool multiple: provides a list of all pids if **True**, otherwise + results with multiple processes are discarded + + :returns: + Response depends upon the 'multiple' argument as follows... + + * if **False** then this provides an **int** with the process id or **None** if it can't be determined + * if **True** then this provides a **list** of all **int** process ids, and an empty list if it can't be determined + """ + + # attempts to resolve using pgrep, failing if: + # - we're running on bsd (command unavailable) + # + # example output: + # atagar@morrigan:~$ pgrep -x vim + # 3283 + # 3392 + + if is_available('pgrep'): + results = call(GET_PID_BY_NAME_PGREP % process_name, None) + + if results: + try: + pids = list(map(int, results)) + + if multiple: + return pids + elif len(pids) == 1: + return pids[0] + except ValueError: + pass + + # attempts to resolve using pidof, failing if: + # - we're running on bsd (command unavailable) + # + # example output: + # atagar@morrigan:~$ pidof vim + # 3392 3283 + + if is_available('pidof'): + results = call(GET_PID_BY_NAME_PIDOF % process_name, None) + + if results and len(results) == 1: + try: + pids = list(map(int, results[0].split())) + + if multiple: + return pids + elif len(pids) == 1: + return pids[0] + except ValueError: + pass + + # attempts to resolve using ps, failing if: + # - system's ps variant doesn't handle these flags (none known at the moment) + # + # example output: + # atagar@morrigan:~/Desktop/stem$ ps -o pid -C vim + # PID + # 3283 + # 3392 + # + # atagar$ ps axc + # PID TT STAT TIME COMMAND + # 1 ?? Ss 9:00.22 launchd + # 10 ?? Ss 0:09.97 kextd + # 11 ?? Ss 5:47.36 DirectoryService + # 12 ?? Ss 3:01.44 notifyd + + if is_available('ps'): + if not is_bsd(): + # linux variant of ps + results = call(GET_PID_BY_NAME_PS_LINUX % process_name, None) + + if results: + try: + pids = list(map(int, results[1:])) + + if multiple: + return pids + elif len(pids) == 1: + return pids[0] + except ValueError: + pass + + if is_bsd(): + # bsd variant of ps + results = call(GET_PID_BY_NAME_PS_BSD, None) + + if results: + # filters results to those with our process name + results = [r.split()[0] for r in results if r.endswith(' %s' % process_name)] + + try: + pids = list(map(int, results)) + + if multiple: + return pids + elif len(pids) == 1: + return pids[0] + except ValueError: + pass + + # resolves using lsof which works on both Linux and BSD, only failing if: + # - lsof is unavailable (not included by default on OpenBSD) + # - the process being run as a different user due to permissions + # - the process doesn't have any open files to be reported by lsof? + # + # flags: + # t - only show pids + # c - restrict results to that command + # + # example output: + # atagar@morrigan:~$ lsof -t -c vim + # 2470 + # 2561 + + if is_available('lsof'): + results = call(GET_PID_BY_NAME_LSOF % process_name, None) + + if results: + try: + pids = list(map(int, results)) + + if multiple: + return pids + elif len(pids) == 1: + return pids[0] + except ValueError: + pass + + if is_available('tasklist') and is_windows(): + if not process_name.endswith('.exe'): + process_name = process_name + '.exe' + + process_ids = [] + + results = stem.util.system.call('tasklist', None) + + if results: + tasklist_regex = re.compile('^\s*%s\s+(?P[0-9]*)' % process_name) + + for line in results: + match = tasklist_regex.search(line) + + if match: + process_ids.append(int(match.group('pid'))) + + if multiple: + return process_ids + elif len(process_ids) > 0: + return process_ids[0] + + log.debug("failed to resolve a pid for '%s'" % process_name) + return [] if multiple else None + + +def pid_by_port(port): + """ + Attempts to determine the process id for a process with the given port, + using... + + :: + + 1. netstat -npltu | grep 127.0.0.1: + 2. sockstat -4l -P tcp -p + 3. lsof -wnP -iTCP -sTCP:LISTEN | grep ":" + + Most queries limit results to listening TCP connections. This function likely + won't work on Mac OSX. + + :param int port: port where the process we're looking for is listening + + :returns: **int** with the process id, **None** if it can't be determined + """ + + # attempts to resolve using netstat, failing if: + # - netstat doesn't accept these flags (Linux only) + # - the process being run as a different user due to permissions + # + # flags: + # n - numeric (disables hostname lookups) + # p - program (include pids) + # l - listening (include listening sockets) + # tu - show tcp and udp sockets, and nothing else + # + # example output: + # atagar@morrigan:~$ netstat -npltu + # Active Internet connections (only servers) + # Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name + # tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN - + # tcp 0 0 127.0.0.1:9051 0.0.0.0:* LISTEN 1641/tor + # tcp6 0 0 ::1:631 :::* LISTEN - + # udp 0 0 0.0.0.0:5353 0.0.0.0:* - + # udp6 0 0 fe80::7ae4:ff:fe2f::123 :::* - + + if is_available('netstat'): + results = call(GET_PID_BY_PORT_NETSTAT, None) + + if results: + # filters to results with our port + results = [r for r in results if '127.0.0.1:%s' % port in r] + + if len(results) == 1 and len(results[0].split()) == 7: + results = results[0].split()[6] # process field (ex. "7184/tor") + pid = results[:results.find('/')] + + if pid.isdigit(): + return int(pid) + + # attempts to resolve using sockstat, failing if: + # - sockstat doesn't accept the -4 flag (BSD only) + # - sockstat isn't available (encountered with OSX 10.5.8) + # - there are multiple instances using the same port on different addresses + # + # flags: + # 4 - only show IPv4 sockets + # l - listening sockets + # P tcp - only show tcp connections + # p - only includes results if the local or foreign port match this + # + # example output: + # # sockstat -4 | grep tor + # _tor tor 4397 7 tcp4 51.64.7.84:9050 *:* + # _tor tor 4397 8 udp4 51.64.7.84:53 *:* + # _tor tor 4397 12 tcp4 51.64.7.84:54011 80.3.121.7:9001 + # _tor tor 4397 15 tcp4 51.64.7.84:59374 7.42.1.102:9001 + # _tor tor 4397 20 tcp4 51.64.7.84:51946 32.83.7.104:443 + + if is_available('sockstat'): + results = call(GET_PID_BY_PORT_SOCKSTAT % port, None) + + if results: + # filters to results where this is the local port + results = [r for r in results if (len(r.split()) == 7 and (':%s' % port) in r.split()[5])] + + if len(results) == 1: + pid = results[0].split()[2] + + if pid.isdigit(): + return int(pid) + + # resolves using lsof which works on both Linux and BSD, only failing if: + # - lsof is unavailable (not included by default on OpenBSD) + # - lsof doesn't provide the port ip/port, nor accept the -i and -s args + # (encountered with OSX 10.5.8) + # - the process being run as a different user due to permissions + # - there are multiple instances using the same port on different addresses + # + # flags: + # w - disables warning messages + # n - numeric addresses (disables hostname lookups) + # P - numeric ports (disables replacement of ports with their protocol) + # iTCP - only show tcp connections + # sTCP:LISTEN - listening sockets + # + # example output: + # atagar@morrigan:~$ lsof -wnP -iTCP -sTCP:LISTEN + # COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME + # tor 1745 atagar 6u IPv4 14229 0t0 TCP 127.0.0.1:9051 (LISTEN) + + if is_available('lsof'): + results = call(GET_PID_BY_PORT_LSOF, None) + + if results: + # filters to results with our port + results = [r for r in results if (len(r.split()) == 10 and (':%s' % port) in r.split()[8])] + + if len(results) == 1: + pid = results[0].split()[1] + + if pid.isdigit(): + return int(pid) + + return None # all queries failed + + +def pid_by_open_file(path): + """ + Attempts to determine the process id for a process with the given open file, + using... + + :: + + lsof -w + + :param str path: location of the socket file to query against + + :returns: **int** with the process id, **None** if it can't be determined + """ + + # resolves using lsof which works on both Linux and BSD, only failing if: + # - lsof is unavailable (not included by default on OpenBSD) + # - the file can't be read due to permissions + # + # flags: + # t - only show pids + # w - disables warning messages + # + # example output: + # atagar@morrigan:~$ lsof -tw /tmp/foo + # 4762 + + if is_available('lsof'): + results = call(GET_PID_BY_FILE_LSOF % path, []) + + if len(results) == 1: + pid = results[0].strip() + + if pid.isdigit(): + return int(pid) + + return None # all queries failed + + +def cwd(pid): + """ + Provides the working directory of the given process. + + :param int pid: process id of the process to be queried + + :returns: **str** with the absolute path for the process' present working + directory, **None** if it can't be determined + """ + + # try fetching via the proc contents if it's available + if stem.util.proc.is_available(): + try: + return stem.util.proc.cwd(pid) + except IOError: + pass + + # Fall back to a pwdx query. This isn't available on BSD. + logging_prefix = 'cwd(%s):' % pid + + if is_available('pwdx'): + # pwdx results are of the form: + # 3799: /home/atagar + # 5839: No such process + + results = call(GET_CWD_PWDX % pid, None) + + if not results: + log.debug("%s pwdx didn't return any results" % logging_prefix) + elif results[0].endswith('No such process'): + log.debug('%s pwdx processes reported for this pid' % logging_prefix) + elif len(results) != 1 or results[0].count(' ') != 1 or not results[0].startswith('%s: ' % pid): + log.debug('%s we got unexpected output from pwdx: %s' % (logging_prefix, results)) + else: + return results[0].split(' ', 1)[1].strip() + + # Use lsof as the final fallback. This is available on both Linux and is the + # only lookup method here that works for BSD... + # https://trac.torproject.org/projects/tor/ticket/4236 + # + # flags: + # a - presents the intersection of the following arguments + # p - limits results to this pid + # d cwd - limits results to just the cwd rather than all open files + # Fn - short listing in a single column, with just the pid and cwd + # + # example output: + # ~$ lsof -a -p 75717 -d cwd -Fn + # p75717 + # n/Users/atagar/tor/src/or + + if is_available('lsof'): + results = call(GET_CWD_LSOF % pid, []) + + if len(results) == 2 and results[1].startswith('n/'): + lsof_result = results[1][1:].strip() + + # If we lack read permissions for the cwd then it returns... + # p2683 + # n/proc/2683/cwd (readlink: Permission denied) + + if ' ' not in lsof_result: + return lsof_result + else: + log.debug('%s we got unexpected output from lsof: %s' % (logging_prefix, results)) + + return None # all queries failed + + +def user(pid): + """ + Provides the user a process is running under. + + :param int pid: process id of the process to be queried + + :returns: **str** with the username a process is running under, **None** if + it can't be determined + """ + + if not isinstance(pid, int) or pid < 0: + return None + + if stem.util.proc.is_available(): + try: + import pwd # only available on unix platforms + + uid = stem.util.proc.uid(pid) + + if uid and uid.isdigit(): + return pwd.getpwuid(int(uid)).pw_name + except: + pass + + if is_available('ps'): + results = call('ps -o user %s' % pid, []) + + if len(results) >= 2: + return results[1].strip() + + return None + + +def start_time(pid): + """ + Provides the unix timestamp when the given process started. + + :param int pid: process id of the process to be queried + + :returns: **float** for the unix timestamp when the process began, **None** + if it can't be determined + """ + + if not isinstance(pid, int) or pid < 0: + return None + + if stem.util.proc.is_available(): + try: + return float(stem.util.proc.stats(pid, stem.util.proc.Stat.START_TIME)[0]) + except IOError: + pass + + try: + ps_results = call('ps -p %s -o etime' % pid, []) + + if len(ps_results) >= 2: + etime = ps_results[1].strip() + return time.time() - stem.util.str_tools.parse_short_time_label(etime) + except: + pass + + return None + + +def tail(target, lines = None): + """ + Provides lines of a file starting with the end. For instance, + 'tail -n 50 /tmp/my_log' could be done with... + + :: + + reversed(list(tail('/tmp/my_log', 50))) + + :param str,file target: path or file object to read from + :param int lines: number of lines to read + + :returns: **generator** that reads lines, starting with the end + + :raises: **IOError** if unable to read the file + """ + + if isinstance(target, str): + with open(target) as target_file: + for line in tail(target_file, lines): + yield line + + return + + # based on snippet from... + # https://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail + + target.seek(0, 2) # go to the end of the file + block_end_byte = target.tell() + block_number = -1 + content = '' + + while (lines is None or lines > 0) and block_end_byte > 0: + if (block_end_byte - BLOCK_SIZE > 0): + # read the last block we haven't yet read + target.seek(block_number * BLOCK_SIZE, 2) + content, completed_lines = (target.read(BLOCK_SIZE) + content).split('\n', 1) + else: + # reached the start of the file, just read what's left + target.seek(0, 0) + completed_lines = target.read(block_end_byte) + content + + for line in reversed(completed_lines.splitlines()): + if lines is None or lines > 0: + if lines is not None: + lines -= 1 + + yield line + + block_end_byte -= BLOCK_SIZE + block_number -= 1 + + +def bsd_jail_id(pid): + """ + Gets the jail id for a process. These seem to only exist for FreeBSD (this + style for jails does not exist on Linux, OSX, or OpenBSD). + + :param int pid: process id of the jail id to be queried + + :returns: **int** for the jail id, zero if this can't be determined + """ + + # Output when called from a FreeBSD jail or when Tor isn't jailed: + # JID + # 0 + # + # Otherwise it's something like: + # JID + # 1 + + ps_output = call(GET_BSD_JAIL_ID_PS % pid, []) + + if len(ps_output) == 2 and len(ps_output[1].split()) == 1: + jid = ps_output[1].strip() + + if jid.isdigit(): + return int(jid) + + os_name = platform.system() + if os_name == 'FreeBSD': + log.warn('Unable to get the jail id for process %s.' % pid) + else: + log.debug('bsd_jail_id(%s): jail ids do not exist on %s' % (pid, os_name)) + + return 0 + + +def bsd_jail_path(jid): + """ + Provides the path of the given FreeBSD jail. + + :param int jid: jail id to be queried + + :returns: **str** of the path prefix, **None** if this can't be determined + """ + + if jid != 0: + # Output should be something like: + # JID IP Address Hostname Path + # 1 10.0.0.2 tor-jail /usr/jails/tor-jail + + jls_output = call(GET_BSD_JAIL_PATH % jid, []) + + if len(jls_output) == 2 and len(jls_output[1].split()) == 4: + return jls_output[1].split()[3] + + return None + + +def is_tarfile(path): + """ + Returns if the path belongs to a tarfile or not. + + .. versionadded:: 1.2.0 + + :param str path: path to be checked + + :returns: **True** if the path belongs to a tarball, **False** otherwise + """ + + # Checking if it's a tar file may fail due to permissions so failing back + # to the mime type... + # + # IOError: [Errno 13] Permission denied: '/vmlinuz.old' + # + # With python 3 insuffient permissions raises an AttributeError instead... + # + # http://bugs.python.org/issue17059 + + try: + return tarfile.is_tarfile(path) + except (IOError, AttributeError): + return mimetypes.guess_type(path)[0] == 'application/x-tar' + + +def expand_path(path, cwd = None): + """ + Provides an absolute path, expanding tildes with the user's home and + appending a current working directory if the path was relative. + + :param str path: path to be expanded + :param str cwd: current working directory to expand relative paths with, our + process' if this is **None** + + :returns: **str** of the path expanded to be an absolute path, never with an + ending slash + """ + + if is_windows(): + relative_path = path.replace('/', '\\').rstrip('\\') + else: + relative_path = path.rstrip('/') + + if not relative_path or os.path.isabs(relative_path): + # empty or already absolute - nothing to do + pass + elif relative_path.startswith('~'): + # prefixed with a ~ or ~user entry + relative_path = os.path.expanduser(relative_path) + else: + # relative path, expand with the cwd + + if not cwd: + cwd = os.getcwd() + + # we'll be dealing with both "my/path/" and "./my/path" entries, so + # cropping the later + if relative_path.startswith('./') or relative_path.startswith('.\\'): + relative_path = relative_path[2:] + elif relative_path == '.': + relative_path = '' + + if relative_path == '': + relative_path = cwd + else: + relative_path = os.path.join(cwd, relative_path) + + return relative_path + + +def files_with_suffix(base_path, suffix): + """ + Iterates over files in a given directory, providing filenames with a certain + suffix. + + .. versionadded:: 1.2.0 + + :param str base_path: directory to be iterated over + :param str suffix: filename suffix to look for + + :returns: iterator that yields the absolute path for files with the given suffix + """ + + if os.path.isfile(base_path): + if base_path.endswith(suffix): + yield base_path + else: + for root, _, files in os.walk(base_path): + for filename in files: + if filename.endswith(suffix): + yield os.path.join(root, filename) + + +def call(command, default = UNDEFINED, ignore_exit_status = False): + """ + Issues a command in a subprocess, blocking until completion and returning the + results. This is not actually ran in a shell so pipes and other shell syntax + are not permitted. + + :param str,list command: command to be issued + :param object default: response if the query fails + :param bool ignore_exit_status: reports failure if our command's exit status + was non-zero + + :returns: **list** with the lines of output from the command + + :raises: **OSError** if this fails and no default was provided + """ + + if isinstance(command, str): + command_list = command.split(' ') + else: + command_list = command + + try: + is_shell_command = command_list[0] in SHELL_COMMANDS + + start_time = time.time() + process = subprocess.Popen(command_list, stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = is_shell_command) + + stdout, stderr = process.communicate() + stdout, stderr = stdout.strip(), stderr.strip() + runtime = time.time() - start_time + + log.debug('System call: %s (runtime: %0.2f)' % (command, runtime)) + trace_prefix = 'Received from system (%s)' % command + + if stdout and stderr: + log.trace(trace_prefix + ', stdout:\n%s\nstderr:\n%s' % (stdout, stderr)) + elif stdout: + log.trace(trace_prefix + ', stdout:\n%s' % stdout) + elif stderr: + log.trace(trace_prefix + ', stderr:\n%s' % stderr) + + exit_code = process.poll() + + if not ignore_exit_status and exit_code != 0: + raise OSError('%s returned exit status %i' % (command, exit_code)) + + if stdout: + return stdout.decode('utf-8', 'replace').splitlines() + else: + return [] + except OSError as exc: + log.debug('System call (failed): %s (error: %s)' % (command, exc)) + + if default != UNDEFINED: + return default + else: + raise exc + + +def get_process_name(): + """ + Provides the present name of our process. + + :returns: **str** with the present name of our process + """ + + global _PROCESS_NAME, _MAX_NAME_LENGTH + + if _PROCESS_NAME is None: + # Example output... + # + # COMMAND + # python run_tests.py --unit + + ps_output = call('ps -p %i -o args' % os.getpid(), []) + + if len(ps_output) == 2 and ps_output[0] in ('COMMAND', 'ARGS'): + _PROCESS_NAME = ps_output[1] + else: + # Falling back on using ctypes to get our argv. Unfortunately the simple + # method for getting this... + # + # ' '.join(['python'] + sys.argv) + # + # ... doesn't do the trick since this will miss interpreter arguments. + # + # python -W ignore::DeprecationWarning my_script.py + + args, argc = [], argc_t() + + for i in range(100): + # The ending index can be either None or raise a ValueError when + # accessed... + # + # ValueError: NULL pointer access + + try: + if argc[i] is None: + break + except ValueError: + break + + args.append(str(argc[i])) + + _PROCESS_NAME = ' '.join(args) + + _MAX_NAME_LENGTH = len(_PROCESS_NAME) + + return _PROCESS_NAME + + +def set_process_name(process_name): + """ + Renames our current process from "python " to a custom name. This is + best-effort, not necessarily working on all platforms. + + **Note:** This might have issues on FreeBSD (:trac:`9804`). + + :param str process_name: new name for our process + """ + + # This is mostly based on... + # + # http://www.rhinocerus.net/forum/lang-python/569677-setting-program-name-like-0-perl.html#post2272369 + # + # ... and an adaptation by Jake... + # + # https://github.com/ioerror/chameleon + # + # A cleaner implementation is available at... + # + # https://github.com/cream/libs/blob/b38970e2a6f6d2620724c828808235be0445b799/cream/util/procname.py + # + # but I'm not quite clear on their implementation, and it only does targeted + # argument replacement (ie, replace argv[0], argv[1], etc but with a string + # the same size). + + _set_argv(process_name) + + if platform.system() == 'Linux': + _set_prctl_name(process_name) + elif platform.system() in ('Darwin', 'FreeBSD', 'OpenBSD'): + _set_proc_title(process_name) + + +def _set_argv(process_name): + """ + Overwrites our argv in a similar fashion to how it's done in C with: + strcpy(argv[0], 'new_name'); + """ + + if Py_GetArgcArgv is None: + return + + global _PROCESS_NAME + + # both gets the current process name and initializes _MAX_NAME_LENGTH + + current_name = get_process_name() + + argv, argc = ctypes.c_int(0), argc_t() + Py_GetArgcArgv(argv, ctypes.pointer(argc)) + + if len(process_name) > _MAX_NAME_LENGTH: + raise IOError("Can't rename process to something longer than our initial name (this would overwrite memory used for the env)") + + # space we need to clear + zero_size = max(len(current_name), len(process_name)) + + ctypes.memset(argc.contents, 0, zero_size + 1) # null terminate the string's end + process_name_encoded = process_name.encode('utf8') + ctypes.memmove(argc.contents, process_name_encoded, len(process_name)) + _PROCESS_NAME = process_name + + +def _set_prctl_name(process_name): + """ + Sets the prctl name, which is used by top and killall. This appears to be + Linux specific and has the max of 15 characters. + + This is from... + http://stackoverflow.com/questions/564695/is-there-a-way-to-change-effective-process-name-in-python/923034#923034 + """ + + libc = ctypes.CDLL(ctypes.util.find_library('c')) + name_buffer = ctypes.create_string_buffer(len(process_name) + 1) + name_buffer.value = stem.util.str_tools._to_bytes(process_name) + libc.prctl(PR_SET_NAME, ctypes.byref(name_buffer), 0, 0, 0) + + +def _set_proc_title(process_name): + """ + BSD specific calls (should be compataible with both FreeBSD and OpenBSD: + http://fxr.watson.org/fxr/source/gen/setproctitle.c?v=FREEBSD-LIBC + http://www.rootr.net/man/man/setproctitle/3 + """ + + libc = ctypes.CDLL(ctypes.util.find_library('c')) + name_buffer = ctypes.create_string_buffer(len(process_name) + 1) + name_buffer.value = process_name + + try: + libc.setproctitle(ctypes.byref(name_buffer)) + except AttributeError: + # Possible issue (seen on OSX): + # AttributeError: dlsym(0x7fff6a41d1e0, setproctitle): symbol not found + + pass + + +# TODO: drop with stem 2.x +# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old +# names for backward compatability. + +get_name_by_pid = name_by_pid +get_pid_by_name = pid_by_name +get_pid_by_port = pid_by_port +get_pid_by_open_file = pid_by_open_file +get_cwd = cwd +get_user = user +get_start_time = start_time +get_bsd_jail_id = bsd_jail_id +get_bsd_jail_path = bsd_jail_path diff --git a/Shared/lib/python3.4/site-packages/stem/util/term.py b/Shared/lib/python3.4/site-packages/stem/util/term.py new file mode 100644 index 0000000..bb110ea --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/term.py @@ -0,0 +1,116 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Utilities for working with the terminal. + +**Module Overview:** + +:: + + format - wrap text with ANSI for the given colors or attributes + +.. data:: Color (enum) +.. data:: BgColor (enum) + + Enumerations for foreground or background terminal color. + + =========== =========== + Color Description + =========== =========== + **BLACK** black color + **BLUE** blue color + **CYAN** cyan color + **GREEN** green color + **MAGENTA** magenta color + **RED** red color + **WHITE** white color + **YELLOW** yellow color + =========== =========== + +.. data:: Attr (enum) + + Enumerations of terminal text attributes. + + =================== =========== + Attr Description + =================== =========== + **BOLD** heavy typeface + **HILIGHT** inverted foreground and background + **UNDERLINE** underlined text + **READLINE_ESCAPE** wrap encodings in `RL_PROMPT_START_IGNORE and RL_PROMPT_END_IGNORE sequences `_ + =================== =========== +""" + +import stem.util.enum +import stem.util.str_tools + +TERM_COLORS = ('BLACK', 'RED', 'GREEN', 'YELLOW', 'BLUE', 'MAGENTA', 'CYAN', 'WHITE') + +# DISABLE_COLOR_SUPPORT is *not* being vended to Stem users. This is likely to +# go away if I can think of a more graceful method for color toggling. + +DISABLE_COLOR_SUPPORT = False + +Color = stem.util.enum.Enum(*TERM_COLORS) +BgColor = stem.util.enum.Enum(*['BG_' + color for color in TERM_COLORS]) +Attr = stem.util.enum.Enum('BOLD', 'UNDERLINE', 'HILIGHT', 'READLINE_ESCAPE') + +# mappings of terminal attribute enums to their ANSI escape encoding +FG_ENCODING = dict([(list(Color)[i], str(30 + i)) for i in range(8)]) +BG_ENCODING = dict([(list(BgColor)[i], str(40 + i)) for i in range(8)]) +ATTR_ENCODING = {Attr.BOLD: '1', Attr.UNDERLINE: '4', Attr.HILIGHT: '7'} + +CSI = '\x1B[%sm' +RESET = CSI % '0' + + +def format(msg, *attr): + """ + Simple terminal text formatting using `ANSI escape sequences + `_. + The following are some toolkits providing similar capabilities: + + * `django.utils.termcolors `_ + * `termcolor `_ + * `colorama `_ + + :param str msg: string to be formatted + :param str attr: text attributes, this can be :data:`~stem.util.term.Color`, + :data:`~stem.util.term.BgColor`, or :data:`~stem.util.term.Attr` enums + and are case insensitive (so strings like 'red' are fine) + + :returns: **str** wrapped with ANSI escape encodings, starting with the given + attributes and ending with a reset + """ + + if DISABLE_COLOR_SUPPORT: + return msg + + # if we have reset sequences in the message then apply our attributes + # after each of them + + if RESET in msg: + return ''.join([format(comp, *attr) for comp in msg.split(RESET)]) + + encodings = [] + + for text_attr in attr: + text_attr, encoding = stem.util.str_tools._to_camel_case(text_attr), None + encoding = FG_ENCODING.get(text_attr, encoding) + encoding = BG_ENCODING.get(text_attr, encoding) + encoding = ATTR_ENCODING.get(text_attr, encoding) + + if encoding: + encodings.append(encoding) + + if encodings: + prefix, suffix = CSI % ';'.join(encodings), RESET + + if Attr.READLINE_ESCAPE in attr: + prefix = '\001%s\002' % prefix + suffix = '\001%s\002' % suffix + + return prefix + msg + suffix + else: + return msg diff --git a/Shared/lib/python3.4/site-packages/stem/util/test_tools.py b/Shared/lib/python3.4/site-packages/stem/util/test_tools.py new file mode 100644 index 0000000..d6a81c4 --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/test_tools.py @@ -0,0 +1,341 @@ +# Copyright 2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Helper functions for testing. + +.. versionadded:: 1.2.0 + +:: + + clean_orphaned_pyc - delete *.pyc files without corresponding *.py + + is_pyflakes_available - checks if pyflakes is available + is_pep8_available - checks if pep8 is available + + stylistic_issues - checks for PEP8 and other stylistic issues + pyflakes_issues - static checks for problems via pyflakes +""" + +import collections +import linecache +import os +import re + +import stem.util.conf +import stem.util.system + +CONFIG = stem.util.conf.config_dict('test', { + 'pep8.ignore': [], + 'pyflakes.ignore': [], + 'exclude_paths': [], +}) + +Issue = collections.namedtuple('Issue', [ + 'line_number', + 'message', + 'line', +]) + + +def clean_orphaned_pyc(paths): + """ + Deletes any file with a *.pyc extention without a corresponding *.py. This + helps to address a common gotcha when deleting python files... + + * You delete module 'foo.py' and run the tests to ensure that you haven't + broken anything. They pass, however there *are* still some 'import foo' + statements that still work because the bytecode (foo.pyc) is still around. + + * You push your change. + + * Another developer clones our repository and is confused because we have a + bunch of ImportErrors. + + :param list paths: paths to search for orphaned pyc files + + :returns: list of absolute paths that were deleted + """ + + orphaned_pyc = [] + + for path in paths: + for pyc_path in stem.util.system.files_with_suffix(path, '.pyc'): + py_path = pyc_path[:-1] + + # If we're running python 3 then the *.pyc files are no longer bundled + # with the *.py. Rather, they're in a __pycache__ directory. + + pycache = '%s__pycache__%s' % (os.path.sep, os.path.sep) + + if pycache in pyc_path: + directory, pycache_filename = pyc_path.split(pycache, 1) + + if not pycache_filename.endswith('.pyc'): + continue # should look like 'test_tools.cpython-32.pyc' + + py_path = os.path.join(directory, pycache_filename.split('.')[0] + '.py') + + if not os.path.exists(py_path): + orphaned_pyc.append(pyc_path) + os.remove(pyc_path) + + return orphaned_pyc + + +def is_pyflakes_available(): + """ + Checks if pyflakes is availalbe. + + :returns: **True** if we can use pyflakes and **False** otherwise + """ + + try: + import pyflakes.api + import pyflakes.reporter + return True + except ImportError: + return False + + +def is_pep8_available(): + """ + Checks if pep8 is availalbe. + + :returns: **True** if we can use pep8 and **False** otherwise + """ + + try: + import pep8 + + if not hasattr(pep8, 'BaseReport'): + raise ImportError() + + return True + except ImportError: + return False + + +def stylistic_issues(paths, check_two_space_indents = False, check_newlines = False, check_trailing_whitespace = False, check_exception_keyword = False, prefer_single_quotes = False): + """ + Checks for stylistic issues that are an issue according to the parts of PEP8 + we conform to. You can suppress PEP8 issues by making a 'test' configuration + that sets 'pep8.ignore'. + + For example, with a 'test/settings.cfg' of... + + :: + + # PEP8 compliance issues that we're ignoreing... + # + # * E111 and E121 four space indentations + # * E501 line is over 79 characters + + pep8.ignore E111 + pep8.ignore E121 + pep8.ignore E501 + + ... you can then run tests with... + + :: + + import stem.util.conf + + test_config = stem.util.conf.get_config('test') + test_config.load('test/settings.cfg') + + issues = stylistic_issues('my_project') + + If a 'exclude_paths' was set in our test config then we exclude any absolute + paths matching those regexes. + + .. versionchanged:: 1.3.0 + Renamed from get_stylistic_issues() to stylistic_issues(). The old name + still works as an alias, but will be dropped in Stem version 2.0.0. + + .. versionchanged:: 1.4.0 + Changing tuples in return value to be namedtuple instances, and adding the + line that had the issue. + + .. versionchanged:: 1.4.0 + Added the prefer_single_quotes option. + + :param list paths: paths to search for stylistic issues + :param bool check_two_space_indents: check for two space indentations and + that no tabs snuck in + :param bool check_newlines: check that we have standard newlines (\\n), not + windows (\\r\\n) nor classic mac (\\r) + :param bool check_trailing_whitespace: check that our lines don't end with + trailing whitespace + :param bool check_exception_keyword: checks that we're using 'as' for + exceptions rather than a comma + :param bool prefer_single_quotes: standardize on using single rather than + double quotes for strings, when reasonable + + :returns: **dict** of the form ``path => [(line_number, message)...]`` + """ + + issues = {} + + if is_pep8_available(): + import pep8 + + class StyleReport(pep8.BaseReport): + def __init__(self, options): + super(StyleReport, self).__init__(options) + + def error(self, line_number, offset, text, check): + code = super(StyleReport, self).error(line_number, offset, text, check) + + if code: + issues.setdefault(self.filename, []).append(Issue(line_number, '%s %s' % (code, text), text)) + + style_checker = pep8.StyleGuide(ignore = CONFIG['pep8.ignore'], reporter = StyleReport) + style_checker.check_files(list(_python_files(paths))) + + if check_two_space_indents or check_newlines or check_trailing_whitespace or check_exception_keyword: + for path in _python_files(paths): + with open(path) as f: + file_contents = f.read() + + lines = file_contents.split('\n') + is_block_comment = False + + for index, line in enumerate(lines): + whitespace, content = re.match('^(\s*)(.*)$', line).groups() + + # TODO: This does not check that block indentations are two spaces + # because differentiating source from string blocks ("""foo""") is more + # of a pita than I want to deal with right now. + + if '"""' in content: + is_block_comment = not is_block_comment + + if check_two_space_indents and '\t' in whitespace: + issues.setdefault(path, []).append(Issue(index + 1, 'indentation has a tab', line)) + elif check_newlines and '\r' in content: + issues.setdefault(path, []).append(Issue(index + 1, 'contains a windows newline', line)) + elif check_trailing_whitespace and content != content.rstrip(): + issues.setdefault(path, []).append(Issue(index + 1, 'line has trailing whitespace', line)) + elif check_exception_keyword and content.lstrip().startswith('except') and content.endswith(', exc:'): + # Python 2.6 - 2.7 supports two forms for exceptions... + # + # except ValueError, exc: + # except ValueError as exc: + # + # The former is the old method and no longer supported in python 3 + # going forward. + + # TODO: This check only works if the exception variable is called + # 'exc'. We should generalize this via a regex so other names work + # too. + + issues.setdefault(path, []).append(Issue(index + 1, "except clause should use 'as', not comma", line)) + + if prefer_single_quotes and line and not is_block_comment: + content = line.strip().split('#', 1)[0] + + if '"' in content and "'" not in content and '"""' not in content and not content.endswith('\\'): + # Checking if the line already has any single quotes since that + # usually means double quotes are preferable for the content (for + # instance "I'm hungry"). Also checking for '\' at the end since + # that can indicate a multi-line string. + + issues.setdefault(path, []).append(Issue(index + 1, "use single rather than double quotes", line)) + + return issues + + +def pyflakes_issues(paths): + """ + Performs static checks via pyflakes. False positives can be ignored via + 'pyflakes.ignore' entries in our 'test' config. For instance... + + :: + + pyflakes.ignore stem/util/test_tools.py => 'pyflakes' imported but unused + pyflakes.ignore stem/util/test_tools.py => 'pep8' imported but unused + + If a 'exclude_paths' was set in our test config then we exclude any absolute + paths matching those regexes. + + .. versionchanged:: 1.3.0 + Renamed from get_pyflakes_issues() to pyflakes_issues(). The old name + still works as an alias, but will be dropped in Stem version 2.0.0. + + .. versionchanged:: 1.4.0 + Changing tuples in return value to be namedtuple instances, and adding the + line that had the issue. + + :param list paths: paths to search for problems + + :returns: dict of the form ``path => [(line_number, message)...]`` + """ + + issues = {} + + if is_pyflakes_available(): + import pyflakes.api + import pyflakes.reporter + + class Reporter(pyflakes.reporter.Reporter): + def __init__(self): + self._ignored_issues = {} + + for line in CONFIG['pyflakes.ignore']: + path, issue = line.split('=>') + self._ignored_issues.setdefault(path.strip(), []).append(issue.strip()) + + def unexpectedError(self, filename, msg): + self._register_issue(filename, None, msg, None) + + def syntaxError(self, filename, msg, lineno, offset, text): + self._register_issue(filename, lineno, msg, text) + + def flake(self, msg): + self._register_issue(msg.filename, msg.lineno, msg.message % msg.message_args, None) + + def _is_ignored(self, path, issue): + # Paths in pyflakes_ignore are relative, so we need to check to see if our + # path ends with any of them. + + for ignored_path, ignored_issues in self._ignored_issues.items(): + if path.endswith(ignored_path) and issue in ignored_issues: + return True + + return False + + def _register_issue(self, path, line_number, issue, line): + if not self._is_ignored(path, issue): + if path and line_number and not line: + line = linecache.getline(path, line_number) + + issues.setdefault(path, []).append(Issue(line_number, issue, line)) + + reporter = Reporter() + + for path in _python_files(paths): + pyflakes.api.checkPath(path, reporter) + + return issues + + +def _python_files(paths): + for path in paths: + for file_path in stem.util.system.files_with_suffix(path, '.py'): + skip = False + + for exclude_path in CONFIG['exclude_paths']: + if re.match(exclude_path, file_path): + skip = True + break + + if not skip: + yield file_path + +# TODO: drop with stem 2.x +# We renamed our methods to drop a redundant 'get_*' prefix, so alias the old +# names for backward compatability. + +get_stylistic_issues = stylistic_issues +get_pyflakes_issues = pyflakes_issues diff --git a/Shared/lib/python3.4/site-packages/stem/util/tor_tools.py b/Shared/lib/python3.4/site-packages/stem/util/tor_tools.py new file mode 100644 index 0000000..01c29ee --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/util/tor_tools.py @@ -0,0 +1,151 @@ +# Copyright 2012-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Miscellaneous utility functions for working with tor. + +.. versionadded:: 1.2.0 + +**Module Overview:** + +:: + + is_valid_fingerprint - checks if a string is a valid tor relay fingerprint + is_valid_nickname - checks if a string is a valid tor relay nickname + is_valid_circuit_id - checks if a string is a valid tor circuit id + is_valid_stream_id - checks if a string is a valid tor stream id + is_valid_connection_id - checks if a string is a valid tor connection id + is_valid_hidden_service_address - checks if a string is a valid hidden service address + is_hex_digits - checks if a string is only made up of hex digits +""" + +import re + +# The control-spec defines the following as... +# +# Fingerprint = "$" 40*HEXDIG +# NicknameChar = "a"-"z" / "A"-"Z" / "0" - "9" +# Nickname = 1*19 NicknameChar +# +# CircuitID = 1*16 IDChar +# IDChar = ALPHA / DIGIT +# +# HEXDIG is defined in RFC 5234 as being uppercase and used in RFC 5987 as +# case insensitive. Tor doesn't define this in the spec so flipping a coin +# and going with case insensitive. + +NICKNAME_PATTERN = re.compile('^[a-zA-Z0-9]{1,19}$') +CIRC_ID_PATTERN = re.compile('^[a-zA-Z0-9]{1,16}$') + +# Hidden service addresses are sixteen base32 characters. + +HS_ADDRESS_PATTERN = re.compile('^[a-z2-7]{16}$') + + +def is_valid_fingerprint(entry, check_prefix = False): + """ + Checks if a string is a properly formatted relay fingerprint. This checks for + a '$' prefix if check_prefix is true, otherwise this only validates the hex + digits. + + :param str entry: string to be checked + :param bool check_prefix: checks for a '$' prefix + + :returns: **True** if the string could be a relay fingerprint, **False** otherwise + """ + + try: + if check_prefix: + if not entry or entry[0] != '$': + return False + + entry = entry[1:] + + return is_hex_digits(entry, 40) + except TypeError: + return False + + +def is_valid_nickname(entry): + """ + Checks if a string is a valid format for being a nickname. + + :param str entry: string to be checked + + :returns: **True** if the string could be a nickname, **False** otherwise + """ + + try: + return bool(NICKNAME_PATTERN.match(entry)) + except TypeError: + return False + + +def is_valid_circuit_id(entry): + """ + Checks if a string is a valid format for being a circuit identifier. + + :returns: **True** if the string could be a circuit id, **False** otherwise + """ + + try: + return bool(CIRC_ID_PATTERN.match(entry)) + except TypeError: + return False + + +def is_valid_stream_id(entry): + """ + Checks if a string is a valid format for being a stream identifier. + Currently, this is just an alias to :func:`~stem.util.tor_tools.is_valid_circuit_id`. + + :returns: **True** if the string could be a stream id, **False** otherwise + """ + + return is_valid_circuit_id(entry) + + +def is_valid_connection_id(entry): + """ + Checks if a string is a valid format for being a connection identifier. + Currently, this is just an alias to :func:`~stem.util.tor_tools.is_valid_circuit_id`. + + :returns: **True** if the string could be a connection id, **False** otherwise + """ + + return is_valid_circuit_id(entry) + + +def is_valid_hidden_service_address(entry): + """ + Checks if a string is a valid format for being a hidden service address (not + including the '.onion' suffix). + + :returns: **True** if the string could be a hidden service address, **False** otherwise + """ + + try: + return bool(HS_ADDRESS_PATTERN.match(entry)) + except TypeError: + return False + + +def is_hex_digits(entry, count): + """ + Checks if a string is the given number of hex digits. Digits represented by + letters are case insensitive. + + :param str entry: string to be checked + :param int count: number of hex digits to be checked for + + :returns: **True** if the given number of hex digits, **False** otherwise + """ + + try: + if len(entry) != count: + return False + + int(entry, 16) # attempt to convert it as hex + return True + except (ValueError, TypeError): + return False diff --git a/Shared/lib/python3.4/site-packages/stem/version.py b/Shared/lib/python3.4/site-packages/stem/version.py new file mode 100644 index 0000000..1182bfc --- /dev/null +++ b/Shared/lib/python3.4/site-packages/stem/version.py @@ -0,0 +1,376 @@ +# Copyright 2011-2015, Damian Johnson and The Tor Project +# See LICENSE for licensing information + +""" +Tor versioning information and requirements for its features. These can be +easily parsed and compared, for instance... + +:: + + >>> from stem.version import get_system_tor_version, Requirement + >>> my_version = get_system_tor_version() + >>> print(my_version) + 0.2.1.30 + >>> my_version >= Requirement.TORRC_CONTROL_SOCKET + True + +**Module Overview:** + +:: + + get_system_tor_version - gets the version of our system's tor installation + + Version - Tor versioning information + +.. data:: Requirement (enum) + + Enumerations for the version requirements of features. + + ===================================== =========== + Requirement Description + ===================================== =========== + **AUTH_SAFECOOKIE** SAFECOOKIE authentication method + **DROPGUARDS** DROPGUARDS requests + **EVENT_AUTHDIR_NEWDESCS** AUTHDIR_NEWDESC events + **EVENT_BUILDTIMEOUT_SET** BUILDTIMEOUT_SET events + **EVENT_CIRC_MINOR** CIRC_MINOR events + **EVENT_CLIENTS_SEEN** CLIENTS_SEEN events + **EVENT_CONF_CHANGED** CONF_CHANGED events + **EVENT_DESCCHANGED** DESCCHANGED events + **EVENT_GUARD** GUARD events + **EVENT_HS_DESC_CONTENT** HS_DESC_CONTENT events + **EVENT_NEWCONSENSUS** NEWCONSENSUS events + **EVENT_NS** NS events + **EVENT_SIGNAL** SIGNAL events + **EVENT_STATUS** STATUS_GENERAL, STATUS_CLIENT, and STATUS_SERVER events + **EVENT_STREAM_BW** STREAM_BW events + **EVENT_TRANSPORT_LAUNCHED** TRANSPORT_LAUNCHED events + **EVENT_CONN_BW** CONN_BW events + **EVENT_CIRC_BW** CIRC_BW events + **EVENT_CELL_STATS** CELL_STATS events + **EVENT_TB_EMPTY** TB_EMPTY events + **EVENT_HS_DESC** HS_DESC events + **EXTENDCIRCUIT_PATH_OPTIONAL** EXTENDCIRCUIT queries can omit the path if the circuit is zero + **FEATURE_EXTENDED_EVENTS** 'EXTENDED_EVENTS' optional feature + **FEATURE_VERBOSE_NAMES** 'VERBOSE_NAMES' optional feature + **GETINFO_CONFIG_TEXT** 'GETINFO config-text' query + **HSFETCH** HSFETCH requests + **HSPOST** HSPOST requests + **ADD_ONION** ADD_ONION and DEL_ONION requests + **LOADCONF** LOADCONF requests + **MICRODESCRIPTOR_IS_DEFAULT** Tor gets microdescriptors by default rather than server descriptors + **TAKEOWNERSHIP** TAKEOWNERSHIP requests + **TORRC_CONTROL_SOCKET** 'ControlSocket ' config option + **TORRC_PORT_FORWARDING** 'PortForwarding' config option + **TORRC_DISABLE_DEBUGGER_ATTACHMENT** 'DisableDebuggerAttachment' config option + **TORRC_VIA_STDIN** Allow torrc options via 'tor -f -' (:trac:`13865`) + ===================================== =========== +""" + +import os +import re + +import stem.util.enum +import stem.util.system + +try: + # added in python 3.2 + from functools import lru_cache +except ImportError: + from stem.util.lru_cache import lru_cache + +# cache for the get_system_tor_version function +VERSION_CACHE = {} + + +def get_system_tor_version(tor_cmd = 'tor'): + """ + Queries tor for its version. This is os dependent, only working on linux, + osx, and bsd. + + :param str tor_cmd: command used to run tor + + :returns: :class:`~stem.version.Version` provided by the tor command + + :raises: **IOError** if unable to query or parse the version + """ + + if tor_cmd not in VERSION_CACHE: + version_cmd = '%s --version' % tor_cmd + + try: + version_output = stem.util.system.call(version_cmd) + except OSError as exc: + # make the error message nicer if this is due to tor being unavialable + + if 'No such file or directory' in str(exc): + if os.path.isabs(tor_cmd): + exc = "Unable to check tor's version. '%s' doesn't exist." % tor_cmd + else: + exc = "Unable to run '%s'. Mabye tor isn't in your PATH?" % version_cmd + + raise IOError(exc) + + if version_output: + # output example: + # Oct 21 07:19:27.438 [notice] Tor v0.2.1.30. This is experimental software. Do not rely on it for strong anonymity. (Running on Linux i686) + # Tor version 0.2.1.30. + + last_line = version_output[-1] + + if last_line.startswith('Tor version ') and last_line.endswith('.'): + try: + version_str = last_line[12:-1] + VERSION_CACHE[tor_cmd] = Version(version_str) + except ValueError as exc: + raise IOError(exc) + else: + raise IOError("Unexpected response from '%s': %s" % (version_cmd, last_line)) + else: + raise IOError("'%s' didn't have any output" % version_cmd) + + return VERSION_CACHE[tor_cmd] + + +@lru_cache() +def _get_version(version_str): + return Version(version_str) + + +class Version(object): + """ + Comparable tor version. These are constructed from strings that conform to + the 'new' style in the `tor version-spec + `_, + such as "0.1.4" or "0.2.2.23-alpha (git-7dcd105be34a4f44)". + + :var int major: major version + :var int minor: minor version + :var int micro: micro version + :var int patch: patch level (**None** if undefined) + :var str status: status tag such as 'alpha' or 'beta-dev' (**None** if undefined) + :var str extra: extra information without its parentheses such as + 'git-8be6058d8f31e578' (**None** if undefined) + :var str git_commit: git commit id (**None** if it wasn't provided) + + :param str version_str: version to be parsed + + :raises: **ValueError** if input isn't a valid tor version + """ + + def __init__(self, version_str): + self.version_str = version_str + version_parts = re.match(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)?(-\S*)?( \(\S*\))?$', version_str) + self._hash = None + + if version_parts: + major, minor, micro, patch, status, extra = version_parts.groups() + + # The patch and status matches are optional (may be None) and have an extra + # proceeding period or dash if they exist. Stripping those off. + + if patch: + patch = int(patch[1:]) + + if status: + status = status[1:] + + if extra: + extra = extra[2:-1] + + self.major = int(major) + self.minor = int(minor) + self.micro = int(micro) + self.patch = patch + self.status = status + self.extra = extra + + if extra and re.match('^git-[0-9a-f]{16}$', extra): + self.git_commit = extra[4:] + else: + self.git_commit = None + else: + raise ValueError("'%s' isn't a properly formatted tor version" % version_str) + + def __str__(self): + """ + Provides the string used to construct the version. + """ + + return self.version_str + + def _compare(self, other, method): + """ + Compares version ordering according to the spec. + """ + + if not isinstance(other, Version): + return False + + for attr in ('major', 'minor', 'micro', 'patch'): + my_version = getattr(self, attr) + other_version = getattr(other, attr) + + if my_version is None: + my_version = 0 + + if other_version is None: + other_version = 0 + + if my_version != other_version: + return method(my_version, other_version) + + # According to the version spec... + # + # If we *do* encounter two versions that differ only by status tag, we + # compare them lexically as ASCII byte strings. + + my_status = self.status if self.status else '' + other_status = other.status if other.status else '' + + return method(my_status, other_status) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __gt__(self, other): + """ + Checks if this version meets the requirements for a given feature. We can + be compared to either a :class:`~stem.version.Version` or + :class:`~stem.version._VersionRequirements`. + """ + + if isinstance(other, _VersionRequirements): + for rule in other.rules: + if rule(self): + return True + + return False + + return self._compare(other, lambda s, o: s > o) + + def __ge__(self, other): + if isinstance(other, _VersionRequirements): + for rule in other.rules: + if rule(self): + return True + + return False + + return self._compare(other, lambda s, o: s >= o) + + def __hash__(self): + if self._hash is None: + my_hash = 0 + + for attr in ('major', 'minor', 'micro', 'patch', 'status'): + my_hash *= 1024 + + attr_value = getattr(self, attr) + + if attr_value is not None: + my_hash += hash(attr_value) + + self._hash = my_hash + + return self._hash + + +class _VersionRequirements(object): + """ + Series of version constraints that can be compared to. For instance, this + allows for comparisons like 'if I'm greater than version X in the 0.2.2 + series, or greater than version Y in the 0.2.3 series'. + + This is a logical 'or' of the series of rules. + """ + + def __init__(self): + self.rules = [] + + def greater_than(self, version, inclusive = True): + """ + Adds a constraint that we're greater than the given version. + + :param stem.version.Version version: version we're checking against + :param bool inclusive: if comparison is inclusive or not + """ + + if inclusive: + self.rules.append(lambda v: version <= v) + else: + self.rules.append(lambda v: version < v) + + def less_than(self, version, inclusive = True): + """ + Adds a constraint that we're less than the given version. + + :param stem.version.Version version: version we're checking against + :param bool inclusive: if comparison is inclusive or not + """ + + if inclusive: + self.rules.append(lambda v: version >= v) + else: + self.rules.append(lambda v: version > v) + + def in_range(self, from_version, to_version, from_inclusive = True, to_inclusive = False): + """ + Adds constraint that we're within the range from one version to another. + + :param stem.version.Version from_version: beginning of the comparison range + :param stem.version.Version to_version: end of the comparison range + :param bool from_inclusive: if comparison is inclusive with the starting version + :param bool to_inclusive: if comparison is inclusive with the ending version + """ + + if from_inclusive and to_inclusive: + new_rule = lambda v: from_version <= v <= to_version + elif from_inclusive: + new_rule = lambda v: from_version <= v < to_version + else: + new_rule = lambda v: from_version < v < to_version + + self.rules.append(new_rule) + +safecookie_req = _VersionRequirements() +safecookie_req.in_range(Version('0.2.2.36'), Version('0.2.3.0')) +safecookie_req.greater_than(Version('0.2.3.13')) + +Requirement = stem.util.enum.Enum( + ('AUTH_SAFECOOKIE', safecookie_req), + ('DROPGUARDS', Version('0.2.5.1-alpha')), + ('EVENT_AUTHDIR_NEWDESCS', Version('0.1.1.10-alpha')), + ('EVENT_BUILDTIMEOUT_SET', Version('0.2.2.7-alpha')), + ('EVENT_CIRC_MINOR', Version('0.2.3.11-alpha')), + ('EVENT_CLIENTS_SEEN', Version('0.2.1.10-alpha')), + ('EVENT_CONF_CHANGED', Version('0.2.3.3-alpha')), + ('EVENT_DESCCHANGED', Version('0.1.2.2-alpha')), + ('EVENT_GUARD', Version('0.1.2.5-alpha')), + ('EVENT_HS_DESC_CONTENT', Version('0.2.7.1-alpha')), + ('EVENT_NS', Version('0.1.2.3-alpha')), + ('EVENT_NEWCONSENSUS', Version('0.2.1.13-alpha')), + ('EVENT_SIGNAL', Version('0.2.3.1-alpha')), + ('EVENT_STATUS', Version('0.1.2.3-alpha')), + ('EVENT_STREAM_BW', Version('0.1.2.8-beta')), + ('EVENT_TRANSPORT_LAUNCHED', Version('0.2.5.0-alpha')), + ('EVENT_CONN_BW', Version('0.2.5.2-alpha')), + ('EVENT_CIRC_BW', Version('0.2.5.2-alpha')), + ('EVENT_CELL_STATS', Version('0.2.5.2-alpha')), + ('EVENT_TB_EMPTY', Version('0.2.5.2-alpha')), + ('EVENT_HS_DESC', Version('0.2.5.2-alpha')), + ('EXTENDCIRCUIT_PATH_OPTIONAL', Version('0.2.2.9')), + ('FEATURE_EXTENDED_EVENTS', Version('0.2.2.1-alpha')), + ('FEATURE_VERBOSE_NAMES', Version('0.2.2.1-alpha')), + ('GETINFO_CONFIG_TEXT', Version('0.2.2.7-alpha')), + ('HSFETCH', Version('0.2.7.1-alpha')), + ('HSPOST', Version('0.2.7.1-alpha')), + ('ADD_ONION', Version('0.2.7.1-alpha')), + ('LOADCONF', Version('0.2.1.1')), + ('MICRODESCRIPTOR_IS_DEFAULT', Version('0.2.3.3')), + ('TAKEOWNERSHIP', Version('0.2.2.28-beta')), + ('TORRC_CONTROL_SOCKET', Version('0.2.0.30')), + ('TORRC_PORT_FORWARDING', Version('0.2.3.1-alpha')), + ('TORRC_DISABLE_DEBUGGER_ATTACHMENT', Version('0.2.3.9')), + ('TORRC_VIA_STDIN', Version('0.2.6.3-alpha')), +)