diff --git a/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/RECORD b/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/RECORD deleted file mode 100644 index 3bba30a..0000000 --- a/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/RECORD +++ /dev/null @@ -1,367 +0,0 @@ -SQLAlchemy-0.9.7.dist-info/DESCRIPTION.rst,sha256=ZN8fj2owI_rw0Emr3_RXqoNfTFkThjiZy7xcCzg1W_g,5013 -SQLAlchemy-0.9.7.dist-info/METADATA,sha256=BJMEdxvRA6_2F3TeCnFjCS87MJ-9mQDfNhMasw7zlxw,5785 -SQLAlchemy-0.9.7.dist-info/RECORD,, -SQLAlchemy-0.9.7.dist-info/WHEEL,sha256=Er7DBTU_C2g_rTGCxcwhCKegQSKoYLj1ncusWiwlKwM,111 -SQLAlchemy-0.9.7.dist-info/metadata.json,sha256=L8ZvHjkvIuuc2wYjqxQXcMfbjzJukQrYyJWZgDtacI8,948 -SQLAlchemy-0.9.7.dist-info/top_level.txt,sha256=rp-ZgB7D8G11ivXON5VGPjupT1voYmWqkciDt5Uaw_Q,11 -sqlalchemy/__init__.py,sha256=1e-MTh9yzDNEKrfMPLTPbxzQxq2--QE4H-tc03eT5uE,2072 -sqlalchemy/cprocessors.cpython-35m-darwin.so,sha256=FEiXsJ5pdsq_PuqNiR7jrnTn99nThaja3fdifVbizV4,17012 -sqlalchemy/cresultproxy.cpython-35m-darwin.so,sha256=s_Y7BzjDh2KutKXsl9PyCZMdpMs1nmy1WlKCnAipmbI,18844 -sqlalchemy/cutils.cpython-35m-darwin.so,sha256=bGZ5-TxVNVjfkNxMTkbugVNeRQHI3mKXadQ2TXXvFu0,9756 -sqlalchemy/events.py,sha256=n0Z8zkW0Fdwdv82HLssEKhNNkCJP1nj3jZrztZinkYc,39724 -sqlalchemy/exc.py,sha256=aIZHSzr2SgBoyfNcjm4cWc5e81B953kSq2BskM6fBQY,11392 -sqlalchemy/inspection.py,sha256=zCZPzSx4EwImFRXU8vySI8xglZP_Nx4UEyKtjGMynhs,3093 -sqlalchemy/interfaces.py,sha256=SzmUZ1pL-7b4vEH361UFoCDW6GdM64UFXn5m-VIwgIA,10967 -sqlalchemy/log.py,sha256=4EG734XnC0sZJ-jFZJKJ1ONCJq1rAZCyu4SyAT8q3yQ,6712 -sqlalchemy/pool.py,sha256=LOxfK-5Mpuz6RO462sGSFCXieXauqcoMdMZt28XO0to,43830 -sqlalchemy/processors.py,sha256=h-deajMZbjXpfiOlOVNfBAxuIQf3fq9FemazYk1BGho,5220 -sqlalchemy/schema.py,sha256=Kr_6g5anin76KPkgWA_uagf-EzPLTWvsbFuKorJNHf4,1106 -sqlalchemy/types.py,sha256=E-EC4GrZeg_aEgGdXoXIw7iSgLtYJMNccYoDEXtF81s,1635 -sqlalchemy/connectors/__init__.py,sha256=-3OdiI200TYZzctdcC8z5tgV__t9sdhukPWJBHjlszA,278 -sqlalchemy/connectors/mxodbc.py,sha256=JZj-z_sY-BYiAM-T8PK9m-WN3vhfObmzAP6eb8Abpag,5348 -sqlalchemy/connectors/mysqldb.py,sha256=ZSi4E_f5Or7bqsI6Zv6cjLGEMAN0bedv7sHnCkrwwbM,4748 -sqlalchemy/connectors/pyodbc.py,sha256=vAvQwk3wDt1wRkiBwuwe0hC-7Fla4ekqBJLO8WPS_xs,5890 -sqlalchemy/connectors/zxJDBC.py,sha256=cVhbJ3PqmVD0KJ7m6FRtR9c5KhHIG-hhm-NX-4rgd5E,1868 -sqlalchemy/databases/__init__.py,sha256=CKXfBXKaWADu571n8lJR8cndndTHDUAAyK-a_0JLGjg,879 -sqlalchemy/dialects/__init__.py,sha256=XtI5s53JyccSQo2GIGNa89bXn8mtePccSukfR_-qipc,1027 -sqlalchemy/dialects/postgres.py,sha256=_FjxoU0BVULlj6PZBMB-2-c4WM6zviIvzxCgtXhxhsY,614 -sqlalchemy/dialects/drizzle/__init__.py,sha256=AOyB8JGeTbwWfpM5wVLGhbyzX2MRKPFIAoU00IMtrzw,573 -sqlalchemy/dialects/drizzle/base.py,sha256=dLQxRslE6oyugIeSCdlrVP4DYuCgWiH0kgbpuD59-KM,14995 -sqlalchemy/dialects/drizzle/mysqldb.py,sha256=myT7EXJg9ToVBrbUkkfmGgCb5Xu2PAr3xnFeA-pvS3s,1270 -sqlalchemy/dialects/firebird/__init__.py,sha256=bmFjix7gx64rr2luqs9O2mYm-NnpcgguH951XuW6eyc,664 -sqlalchemy/dialects/firebird/base.py,sha256=nnUrSBfI_chqZmA32KOeMzx8cgpEE5Tr9RNkoZA_Qpk,28061 -sqlalchemy/dialects/firebird/fdb.py,sha256=mr4KaJgHzpFk6W7g8qBPDLy6WAQkdnR5gMipWmM_6EE,4325 -sqlalchemy/dialects/firebird/kinterbasdb.py,sha256=WaIPAEIqQJ3QRpYk44IwwC-3t4X4jxv_LWK9CI0xBf4,6299 -sqlalchemy/dialects/mssql/__init__.py,sha256=dzp85H5bMoja-EsD08ctKaope5e-bjr9r6QoxL9TJXo,1081 -sqlalchemy/dialects/mssql/adodbapi.py,sha256=j1K_qpA_v8Uc6zX1PGSGsqnHECUQIx_mnxMr7h9pd3A,2493 -sqlalchemy/dialects/mssql/base.py,sha256=TY7YAXMg_ZKGtaWKrSOV1VTVpZ2oGh68ga_MbQYZvX4,58663 -sqlalchemy/dialects/mssql/information_schema.py,sha256=-E4WAgB0yYoncty676FvmZL9DEqXCEj0bGwvFc2aZD4,6418 -sqlalchemy/dialects/mssql/mxodbc.py,sha256=pIXO0sxf_5z2R68jIeILnC1aH5s8HyOadIzr7H13dxw,3856 -sqlalchemy/dialects/mssql/pymssql.py,sha256=DXoU2r3dcuxMh_QLB713iPAANfcsRqcMhhmBFKg_k9o,2978 -sqlalchemy/dialects/mssql/pyodbc.py,sha256=gHTjyRc9VOEJDZqs1daBmCPTDZGPQzhoOet2kxy7r2Q,9437 -sqlalchemy/dialects/mssql/zxjdbc.py,sha256=x33xz8OTS9ffrtRV9lWxbReLS4qW3pq2lp1aljDHGA8,2144 -sqlalchemy/dialects/mysql/__init__.py,sha256=Ii4p3TOckR9l50WvkstGq1piE_eoySn5ZXPw8R7D_rI,1171 -sqlalchemy/dialects/mysql/base.py,sha256=Oqh2Z2mcFDVlUL0DwPy3Q7bSzJQiC1JxCdMrAMwMyEo,109960 -sqlalchemy/dialects/mysql/cymysql.py,sha256=MWtGXcS4f5JpP1zBLzejWz3y7w5-saWlwQfH7_i1aao,2349 -sqlalchemy/dialects/mysql/gaerdbms.py,sha256=oe9BfWjFJglITk6s_wQQYe5vs5h9zYcMUuUx2FmE7J8,2724 -sqlalchemy/dialects/mysql/mysqlconnector.py,sha256=FX7HEsbiqE98IbpI3paFzfP2geLYsTadXox_lYmL9eE,3911 -sqlalchemy/dialects/mysql/mysqldb.py,sha256=Gf9QLAY2o4Eu9IeagLw4e-v5uQGHb7sJgP_COnwn8MM,3259 -sqlalchemy/dialects/mysql/oursql.py,sha256=DGwyO-b7etGb76XdDrcQ-hwtVu_yIzU-8IF_iYw5LqU,8756 -sqlalchemy/dialects/mysql/pymysql.py,sha256=TLsJeMHMZGvOTlbO2JhQqnjcALKmAgTedQJCBCnME0Q,1232 -sqlalchemy/dialects/mysql/pyodbc.py,sha256=agCzgILoQdSVOKup3OUMvt9RkDAtwyg8yQgIibo14XE,2640 -sqlalchemy/dialects/mysql/zxjdbc.py,sha256=MyCM6TGRxzjrhvsow6MhwLScB4qQTZn1DmyhhhPGcNg,3803 -sqlalchemy/dialects/oracle/__init__.py,sha256=hO304rf8aiIq9--QQSmuvi8MBZBcZhNzjI48cs1JTZo,797 -sqlalchemy/dialects/oracle/base.py,sha256=cG2Ov7EJ8GKg18mXLCiKLC0WO0ssdONykAPydaTCpJQ,49391 -sqlalchemy/dialects/oracle/cx_oracle.py,sha256=rneYv6pQOXVM2ztFBCO6bBcVAfR7vdtV8voEbvBYiIY,37737 -sqlalchemy/dialects/oracle/zxjdbc.py,sha256=c_nHf8X1GM0OkqXAKgQiTEved3ZDDzDop6dG_SpE55w,8034 -sqlalchemy/dialects/postgresql/__init__.py,sha256=JdIQ3kAuikIwYNEc4W39-BKnOepAb3jpdN7RXtwru9E,1251 -sqlalchemy/dialects/postgresql/base.py,sha256=EV642aX-WlOLDNirz33f50GXOGasLG8TBw8qaggo0GI,88366 -sqlalchemy/dialects/postgresql/constraints.py,sha256=OAIZmNYW3PEuWVagjj9p-CpCcdh6tM_PTObwjqD_vVs,2543 -sqlalchemy/dialects/postgresql/hstore.py,sha256=_HhwrAGGEk8KlKQZrqzJ_PSLHlH9Cd4wemN0ascv-Uk,11402 -sqlalchemy/dialects/postgresql/json.py,sha256=ezCEhBZKnobL2epNbT3re9AWQxvtYHpsQxK7fmB3XcI,11066 -sqlalchemy/dialects/postgresql/pg8000.py,sha256=Uiu6RhLLn42UpCu27Z957dhet59zZh4z3jmbjTfgLJg,5428 -sqlalchemy/dialects/postgresql/psycopg2.py,sha256=cjIOs6k-EKVGhbsec4sx8MNq7Nk-c1z4hnW5ZJYOn4U,20761 -sqlalchemy/dialects/postgresql/pypostgresql.py,sha256=ZxaL0d8xA0yhlhOdK09lLtJdWH90vdq57EITBrrdRms,2173 -sqlalchemy/dialects/postgresql/ranges.py,sha256=q3pc7jeUOc83lkgE3WVN8PlqKzeYJjuTHuXeRvY-s2s,4814 -sqlalchemy/dialects/postgresql/zxjdbc.py,sha256=c9_JUHsjiaTbmxqoe3v2YS0oIgkk5xOL0e6ZSaUM_EI,1397 -sqlalchemy/dialects/sqlite/__init__.py,sha256=evr3TsIXnZIKD7QY-CHC-MVvkt28SyV0sCJrIjpnQJM,723 -sqlalchemy/dialects/sqlite/base.py,sha256=jQ5kDBsYuP4yoENO6lBJ792YDY9cHJRfPXq4etec0mI,39398 -sqlalchemy/dialects/sqlite/pysqlite.py,sha256=dHrZk8Ut8sgNpVJ2-Byx_xJoxn55zLS_whmGSjABNCk,13249 -sqlalchemy/dialects/sybase/__init__.py,sha256=4G1LG5YqVaE2QDIJANqomedZXRQrVIwMW3y2lKWurVU,894 -sqlalchemy/dialects/sybase/base.py,sha256=ArOCZBItXEupylulxBzjr9Z81st06ZgGfqtfLJEurWE,28629 -sqlalchemy/dialects/sybase/mxodbc.py,sha256=1Qmk1XbcjhJwu0TH6U1QjHRhnncclR9jqdMCWsasbYM,901 -sqlalchemy/dialects/sybase/pyodbc.py,sha256=FsrKZP9k9UBMp_sTBIhe50QWwJmcpDw6FAzXmEhaq2s,2102 -sqlalchemy/dialects/sybase/pysybase.py,sha256=c9LCZ0IM4wW5fwMvvpgaflDlZJKcan4Pq7QwFE1LAUw,3208 -sqlalchemy/engine/__init__.py,sha256=o2daUscphvcFRkjOPMl0BJLGO6PXev_L8eDcx-7Zafg,15923 -sqlalchemy/engine/base.py,sha256=3_F3iPisYSVePe8G2vIsZqqEz29qfSvmsijwdgsyNYI,70311 -sqlalchemy/engine/default.py,sha256=W2TuN72wafDfLQU-ud74Fpl6pL8TjD5YdftrPrclyAY,34371 -sqlalchemy/engine/interfaces.py,sha256=pzFtwvtRIFze67WKMFKzVivZbSsXGQeEngoY-2DnL8g,30327 -sqlalchemy/engine/reflection.py,sha256=GocL2XvTxrmSdqn4ybWiRTlfsfiLSZcUISSJ_M1Bur8,21545 -sqlalchemy/engine/result.py,sha256=Is8N8TcQISjwBPwRxdsiOgyKqIDV8cJ15wBcwQWnsEw,34979 -sqlalchemy/engine/strategies.py,sha256=oVroyOyomN2q_OJfpHMhBa_0q-n_vWzI2H1cZIZb5G4,8715 -sqlalchemy/engine/threadlocal.py,sha256=UBtauPUQjOPHuPIcBIxpiKUmOm_jpcUrkftbJ32Gt4E,4103 -sqlalchemy/engine/url.py,sha256=9kL6hfESlqwlBcc5E_2a8YsuGF1EseMBm_DQyUAJ4XA,7521 -sqlalchemy/engine/util.py,sha256=wh2y0Uwt9O1ulnT6YhxAShcDXZTtigQZbiBnMxpvHeo,2338 -sqlalchemy/event/__init__.py,sha256=sk4pgB4dEPftPZMtgFctlqsPnWYjtcvqcpTVSg6mQ9M,419 -sqlalchemy/event/api.py,sha256=zLbcAKsKsYX5I3zmiwNIewwto1puGSWCm49crnAbHbk,3854 -sqlalchemy/event/attr.py,sha256=-ENxlontP0HnKFE9nyHMC4jS5pvMlXmkiK2ehAcdzLU,12566 -sqlalchemy/event/base.py,sha256=DU3EYBWflaGrwpEz_jocGQTsR3_nlKKWMFVThh4D6f4,7248 -sqlalchemy/event/legacy.py,sha256=vA9km6n_ZN1YSI5C-A9Jw4ptKfwZueTuvJbmtVYY1os,5818 -sqlalchemy/event/registry.py,sha256=bzPbXp2NTcYKQC7wsYUk-5rNMJMz5OwPGGsnzyI2ylQ,7470 -sqlalchemy/ext/__init__.py,sha256=wSCbYQ2KptpL8sNFiCbEzTjI2doWmcEAiRAqx58qLcY,235 -sqlalchemy/ext/associationproxy.py,sha256=XDr4UarpHG7ulrhiEsD8PYnYp4MUEUhpvjPfPdbayGw,32975 -sqlalchemy/ext/automap.py,sha256=r7F2VM0T--wecKH6uNFIWsYTElwwXCW9cOzF0llKz10,39713 -sqlalchemy/ext/compiler.py,sha256=m12MOPF6YbhY2OWJccWVRxfe2cigX0VcKzAhSr5FTI0,15770 -sqlalchemy/ext/horizontal_shard.py,sha256=T31IsHSpyJC27YLHsSxQ7R4uGIn4guICsVNNDWE994k,4814 -sqlalchemy/ext/hybrid.py,sha256=ZnPkE4ORZwA1md6rBYeyK-ySAcVsPZdLzRLXS3ZxmYo,27985 -sqlalchemy/ext/instrumentation.py,sha256=5MjuuikGz1x_itSm22PZMWr-los8v-5PK6HE5TKCSSM,14646 -sqlalchemy/ext/mutable.py,sha256=GCsgkaFyypUa1c3x7q9oNF2uoGM8d3LTv3DgDB9x5J8,23069 -sqlalchemy/ext/orderinglist.py,sha256=5vRTTK4Pdm2_IrbRIxWV8qHgDxktPVljrUBqwyLk-TE,13695 -sqlalchemy/ext/serializer.py,sha256=JiHdBiStZDlEvh5yzsd7lJAkmwfHJbsgDtcPwCRFOi0,5586 -sqlalchemy/ext/declarative/__init__.py,sha256=kGdbexw3SfbgS8Uq7og9iTLDXV-Hk8CJnXn_4nulql0,47618 -sqlalchemy/ext/declarative/api.py,sha256=idvoFBxhqzQvhTEPY764L1RpdrrSCJS3yU_J9xTbqJY,17780 -sqlalchemy/ext/declarative/base.py,sha256=oXplGZM3G81DPKcaI_PsNVGjmpj9g7eqmDOXod3DzvU,20036 -sqlalchemy/ext/declarative/clsregistry.py,sha256=niIyzC-WeITMJeFQqkdZdeLxuKni-vHrs7-LJ3WZi_g,10314 -sqlalchemy/orm/__init__.py,sha256=-RsTaAlLe9a2GvHw74fN5jyB2brV-i0XXViPzPGp2gc,7976 -sqlalchemy/orm/attributes.py,sha256=aoHu08zOuMziAlwgVyO2cr_86m26PFnTx6YnZ0QgcGQ,55522 -sqlalchemy/orm/base.py,sha256=lijsuavy2C4UJd7RrKKx8U6IdxsXwG8xV_XdqQ6B31c,13181 -sqlalchemy/orm/collections.py,sha256=RmOIb6b-XrRD8sSJ-9qMDM3aRyps1aOZcbQlWt4Ojss,52951 -sqlalchemy/orm/dependency.py,sha256=zEruE4dj9m7qHC9nS0xIAVx4L0WB58c6eZHPsZF25QM,46072 -sqlalchemy/orm/deprecated_interfaces.py,sha256=CXg9nh2XUyrRYDVS20XVc3G3niHx84J8ko7PBXOXcAI,21941 -sqlalchemy/orm/descriptor_props.py,sha256=b1GyOu45jjvxgBMwhVXBSyBxhYM7IVlOGG5F_qOl5co,24455 -sqlalchemy/orm/dynamic.py,sha256=XQTf7ozzdkloQMUPPkwM02eE_sr0sYt-uQgpkdFt2SU,13338 -sqlalchemy/orm/evaluator.py,sha256=ZrExCzWmvVZS2ovM8t09cPzfKOqbF7Zm3b88nPviPQM,5032 -sqlalchemy/orm/events.py,sha256=6AMKAa-V_72GsGj3dP158FdiqXbF3JM9IeBpAQFPsjo,70274 -sqlalchemy/orm/exc.py,sha256=Otfiun4oExJpUp8Tjk2JcSt9w3BkL1JRi95wXo-tv60,5439 -sqlalchemy/orm/identity.py,sha256=e_xaDoNI06hLaiDNomYuvuAUs-TAh901dwqeQhZs320,7091 -sqlalchemy/orm/instrumentation.py,sha256=rl72nSRqgk4PXlY3NsZJ_jtkrdszi_AwlBFRvXzRaug,16787 -sqlalchemy/orm/interfaces.py,sha256=vNj2Jl_U_S2rlITAlqg90mz9RTdRmtZvNJHCUnekWig,18983 -sqlalchemy/orm/loading.py,sha256=mvvth2KOU2CTNNnQuhAtzQRjuFoEf63jTjQ3Seop56M,21257 -sqlalchemy/orm/mapper.py,sha256=SZ1V59o6e10sUyAT9XytHjCvzyGALMF99QajZcWAAd8,108109 -sqlalchemy/orm/path_registry.py,sha256=VzaWNV7iA8Z5XkXcHLP379H6gQm1qSfunZ4dRaxyLDY,7672 -sqlalchemy/orm/persistence.py,sha256=Dz3prpO_nHfPO67dDwO4-6buSOziqXQktXfcyIGseYI,40862 -sqlalchemy/orm/properties.py,sha256=AB5fBY8EEchTU7QYgWQMn93i7KJoFdKI15O9ofETo8k,9557 -sqlalchemy/orm/query.py,sha256=GCMI29Hj3XXYFMo7cyGwpThv2rRqNuFa8T4lpt4bDcg,129823 -sqlalchemy/orm/relationships.py,sha256=IjCW_ng5YXoUODGj7EbkS3q9r1YnE4Y788O34ZeFeBI,111050 -sqlalchemy/orm/scoping.py,sha256=OoRgem4nICXPZAeK4-Sbe2kDzWDtyBHd6fZtK3DZT4c,6101 -sqlalchemy/orm/session.py,sha256=DHchZuztFAGH256oCA9dDKJCznyvvN9EeoFEXEpKW9E,95874 -sqlalchemy/orm/state.py,sha256=_hEslw-lhYnJTcSMmVfmsAzerR1yxwwtLDvCDrw3PK0,21014 -sqlalchemy/orm/strategies.py,sha256=0RsDUGe0i1Di_eFXkWAW0ZcV19V-m0K9YxmT8lM9Q-k,54293 -sqlalchemy/orm/strategy_options.py,sha256=q1_-h6Vh-59mRgM9dGNApxjF18TfrnwXMRk7wpHvTrE,32170 -sqlalchemy/orm/sync.py,sha256=NDLMpllJjk_zF7rgCSy4WlaeEfP1TVSj-UeVU_QZbVs,4736 -sqlalchemy/orm/unitofwork.py,sha256=F70Dfg7kBtcqLUp2qBYlcQZIDx3zI4bpNsIJFRxcf90,23234 -sqlalchemy/orm/util.py,sha256=-wzG6p6NGztBvxHiosvTvShwSQpZdd3KKT2mw26l86o,35695 -sqlalchemy/sql/__init__.py,sha256=158RHlIfn8A-OyihHBGd1jNcd4Ed2-laRkgeGJIvs4w,1721 -sqlalchemy/sql/annotation.py,sha256=JXguy7w1i3jPtr7AMiJCZ5B-TaECZquA8GmM2laEcC4,6111 -sqlalchemy/sql/base.py,sha256=z_dXcqIZsqKaCsSgl7dz5t97Ndb3v0l8gBe53jYRwTE,21416 -sqlalchemy/sql/compiler.py,sha256=7_GOyLILz5GBozTCXmQAnOResx0IivYH4DCf7ZV9acs,109419 -sqlalchemy/sql/ddl.py,sha256=WjCxmUAHmlbFXe5zofpj_lYU6-TEqvCJR9cxThHTIlc,28693 -sqlalchemy/sql/default_comparator.py,sha256=4DYyP32ubGMPei66c-IMAMsNKE-xoXpptyZDEju5jL4,13132 -sqlalchemy/sql/dml.py,sha256=ruyX-MuW_FcOW7DyjqGlyXwQUQlpAHtx73LHxfVXwqI,29526 -sqlalchemy/sql/elements.py,sha256=tyrdO9Bzm6em1sNEnwJO04LJCz-b1AmomRTp9OuerKQ,121164 -sqlalchemy/sql/expression.py,sha256=ERaOilpJJEWVXXCanhEY5x3Jeu5Q3pLuDSIQZCzq0bU,5668 -sqlalchemy/sql/functions.py,sha256=zI_Q6gqketUqehiNnDB6ezgDQC01iT7Up2jhPhWMTOg,16567 -sqlalchemy/sql/naming.py,sha256=FIdNBDvZwf1e-mW-Opjd_aKyeo986nWmYW0Dr9MOgCc,4588 -sqlalchemy/sql/operators.py,sha256=wsWdHm4sN6b6jfB8CTaFgBqww2jC-YjgpJJnmqzicVc,22510 -sqlalchemy/sql/schema.py,sha256=ts0hj8oYU9bx-amhfYpaDHdx4GFNwj1_avwHwm3eY5Q,132789 -sqlalchemy/sql/selectable.py,sha256=-CaVRHBIbgWpKe7kI5BSWY0x8AHTwUSJtq5d3XCQLwQ,109544 -sqlalchemy/sql/sqltypes.py,sha256=Zb7AMQlkMaVKTWQgR2o1JS4hwDjr6gjWl_POBaVGZ_0,54635 -sqlalchemy/sql/type_api.py,sha256=WsB-QZiEz1ez4g9OeXBKBqSnyluawX0ttG5stLmKJCI,37692 -sqlalchemy/sql/util.py,sha256=k2cBNkvVBl-B3AF5nD16Hq0NyWOE78iBbIf0b6PHA9U,19501 -sqlalchemy/sql/visitors.py,sha256=cohfnIfn4fD6O-qLhzhrwqrMaGhLlrRKvW0nTa72dHk,9943 -sqlalchemy/testing/__init__.py,sha256=RzQCY3RZ88UFBUhCGxnA82w1BsJ8M-0L-76-ex-Wt5o,1035 -sqlalchemy/testing/assertions.py,sha256=MoK89J6upTMKcPU23gTMIDb9Wk8qPvOkJd88y6lWNt0,15666 -sqlalchemy/testing/assertsql.py,sha256=fgA3QTe2vNQZzlGoBXmAhc0RA2JZKsbxyRAmr0FczZ8,11248 -sqlalchemy/testing/config.py,sha256=l34Qkqpz2Yu6BZxWm2zy7e5AqLyx_0adHLLpTq-bGew,2136 -sqlalchemy/testing/distutils_run.py,sha256=fzij-nmjhKKdS9j9THw8i4zWkC3syEzN5V4L9gx45YA,230 -sqlalchemy/testing/engines.py,sha256=uf4lllczl1qqmaOUrPNrW57cYgLLpJK8k3k8oduCTcg,13030 -sqlalchemy/testing/entities.py,sha256=1JpVCXMLwzSPpzetTqERD_3Zk3tiQBIDpBO9OVoVa9k,2992 -sqlalchemy/testing/exclusions.py,sha256=gjgLNk2PRBtsYbi_CsWGf6MxRLB8y4b9QkFbAWRfPuA,10053 -sqlalchemy/testing/fixtures.py,sha256=ys7TZ1uP9wF_OQXgTUNcXiUTOQZhQYgzc9fzjD420mQ,10587 -sqlalchemy/testing/mock.py,sha256=F0ticsEqquR82laaAqhJaTNDk-wJBjY8UqQmrFrvTLM,620 -sqlalchemy/testing/pickleable.py,sha256=9I1ADF_Tw-E6UgTHOuKGZP7_ZM72XhmEXNp_1qmh2l4,2641 -sqlalchemy/testing/profiling.py,sha256=FeAWcKQrVh_-ow06yUIF3P5gQU8YCeIkXKsV142U5eU,10280 -sqlalchemy/testing/requirements.py,sha256=xTZPvrj3wr-4xDsatmX1mp7mPMxgS_4XH5WLaZhi8Yg,17718 -sqlalchemy/testing/runner.py,sha256=q2HZNYXYgcJytV8IHw4btb7sD6muov4WtJzJFF6zuFc,1625 -sqlalchemy/testing/schema.py,sha256=nn4K5mjQbRQuvnuux43h9feYrXjZvZKpKEJLJioljGM,3433 -sqlalchemy/testing/util.py,sha256=nDHZOwsKgX1-ecRgZOzXzMrKp11HXfMCd5LsQB94ES4,5304 -sqlalchemy/testing/warnings.py,sha256=VskMM5G9imDqSfOQvWsriJ21b_CrfcgD5VOCWJtmwps,1682 -sqlalchemy/testing/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -sqlalchemy/testing/plugin/noseplugin.py,sha256=Ql26MG8e5ZWzCDisrvYR3thsA0wS__iEHFq-0yGSgsg,2771 -sqlalchemy/testing/plugin/plugin_base.py,sha256=VMClps-DhTrrmx4rRCX5eXktKwZ5R-z-RlcRSmnt0lo,15384 -sqlalchemy/testing/plugin/pytestplugin.py,sha256=B2Wsh3ANSi3mw9zzuheRJ_HSpXfzilnCgl0WEh1nDmE,4368 -sqlalchemy/testing/suite/__init__.py,sha256=lbOCv0BGIpGJmk9L4I4zUL_W6_n-LGXJRTPvlvM4kDQ,419 -sqlalchemy/testing/suite/test_ddl.py,sha256=Baw0ou9nKdADmrRuXgWzF1FZx0rvkkw3JHc6yw5BN0M,1838 -sqlalchemy/testing/suite/test_insert.py,sha256=QQVLHnw58kUZWwGCVH7E1LL3I3R2b9mxl7MWhTro0hA,6746 -sqlalchemy/testing/suite/test_reflection.py,sha256=5rjLsnHZvQx0GQ9s6rkQaI_JcBVdQl-KFw8KolgXTk0,19895 -sqlalchemy/testing/suite/test_results.py,sha256=oAcO1tD0I7c9ErMeSvSZBZfz1IBDMJHJTf64Y1pBodk,6685 -sqlalchemy/testing/suite/test_select.py,sha256=qmSQE2EaVSf1Zwi_4kiBWstLZD2NvC3l8NbCfcnAhr8,2506 -sqlalchemy/testing/suite/test_sequence.py,sha256=i7tWJnVqfZDTopHs8i4NEDZnhsxjDoOQW0khixKIAnU,3806 -sqlalchemy/testing/suite/test_types.py,sha256=UKa-ZPdpz16mVKvT-9ISRAfqdrqiKaE7IA-_phQQuxo,17088 -sqlalchemy/testing/suite/test_update_delete.py,sha256=r5p467r-EUsjEcWGfUE0VPIfN4LLXZpLRnnyBLyyjl4,1582 -sqlalchemy/util/__init__.py,sha256=goz0YsuGbgmDfmXJ5bmx9H0JjKDAKflB2glTMAfbtK0,2350 -sqlalchemy/util/_collections.py,sha256=G3xVqiiFI-stTqFwJ93epgHws5_el6W-0KsS0JXxeH4,26052 -sqlalchemy/util/compat.py,sha256=HhYut_7bky-P3acKHpXs5CsX9Bri8FDLRgfAnG9bEyg,5926 -sqlalchemy/util/deprecations.py,sha256=CYQ712rSop1CXF-0Kr0eAo-bEa4g8YJvHoOiBxbfIhw,4403 -sqlalchemy/util/langhelpers.py,sha256=Yc9l67mVY-O8NZLJqI4ZOvbsC9eibVDm_V8Rhjhzbj4,37539 -sqlalchemy/util/queue.py,sha256=XAJ2hKAwepp3mCw5M1-Ksn1t7XS-mHoGhIhwzusklWw,6548 -sqlalchemy/util/topological.py,sha256=wmuAjgNqxrGWFrI3KHzUAD8ppaD6gRsxLtoG1D3nKDI,2594 -sqlalchemy/sql/__pycache__/functions.cpython-35.pyc,, -sqlalchemy/dialects/sybase/__pycache__/base.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/deprecated_interfaces.cpython-35.pyc,, -sqlalchemy/ext/declarative/__pycache__/base.cpython-35.pyc,, -sqlalchemy/dialects/mssql/__pycache__/adodbapi.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/dependency.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/relationships.cpython-35.pyc,, -sqlalchemy/util/__pycache__/deprecations.cpython-35.pyc,, -sqlalchemy/__pycache__/log.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/interfaces.cpython-35.pyc,, -sqlalchemy/connectors/__pycache__/pyodbc.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/compiler.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/dynamic.cpython-35.pyc,, -sqlalchemy/dialects/drizzle/__pycache__/base.cpython-35.pyc,, -sqlalchemy/dialects/oracle/__pycache__/base.cpython-35.pyc,, -sqlalchemy/dialects/mssql/__pycache__/zxjdbc.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/mutable.cpython-35.pyc,, -sqlalchemy/__pycache__/pool.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/pg8000.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/mysqlconnector.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/elements.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/ddl.cpython-35.pyc,, -sqlalchemy/testing/suite/__pycache__/test_results.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/pyodbc.cpython-35.pyc,, -sqlalchemy/dialects/mssql/__pycache__/information_schema.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/dml.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/hybrid.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/exclusions.cpython-35.pyc,, -sqlalchemy/event/__pycache__/attr.cpython-35.pyc,, -sqlalchemy/util/__pycache__/queue.cpython-35.pyc,, -sqlalchemy/dialects/mssql/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/event/__pycache__/legacy.cpython-35.pyc,, -sqlalchemy/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/__pycache__/exc.cpython-35.pyc,, -sqlalchemy/__pycache__/schema.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/annotation.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/compiler.cpython-35.pyc,, -sqlalchemy/util/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/dialects/oracle/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/__pycache__/types.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/hstore.cpython-35.pyc,, -sqlalchemy/connectors/__pycache__/mxodbc.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/sqltypes.cpython-35.pyc,, -sqlalchemy/dialects/firebird/__pycache__/kinterbasdb.cpython-35.pyc,, -sqlalchemy/util/__pycache__/langhelpers.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/events.cpython-35.pyc,, -sqlalchemy/testing/suite/__pycache__/test_reflection.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/strategies.cpython-35.pyc,, -sqlalchemy/dialects/oracle/__pycache__/cx_oracle.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/gaerdbms.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/base.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/mapper.cpython-35.pyc,, -sqlalchemy/testing/suite/__pycache__/test_types.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/default.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/session.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/profiling.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/state.cpython-35.pyc,, -sqlalchemy/event/__pycache__/registry.cpython-35.pyc,, -sqlalchemy/util/__pycache__/compat.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/base.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/loading.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/persistence.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/attributes.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/zxjdbc.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/descriptor_props.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/base.cpython-35.pyc,, -sqlalchemy/testing/suite/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/dialects/sybase/__pycache__/mxodbc.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/default_comparator.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/schema.cpython-35.pyc,, -sqlalchemy/testing/plugin/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/scoping.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/horizontal_shard.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/operators.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/distutils_run.cpython-35.pyc,, -sqlalchemy/event/__pycache__/api.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/schema.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/orderinglist.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/assertsql.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/engines.cpython-35.pyc,, -sqlalchemy/connectors/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/reflection.cpython-35.pyc,, -sqlalchemy/dialects/firebird/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/psycopg2.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/type_api.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/config.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/util.cpython-35.pyc,, -sqlalchemy/testing/suite/__pycache__/test_ddl.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/util.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/unitofwork.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/pickleable.cpython-35.pyc,, -sqlalchemy/event/__pycache__/base.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/associationproxy.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/visitors.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/__pycache__/inspection.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/serializer.cpython-35.pyc,, -sqlalchemy/dialects/sybase/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/dialects/mssql/__pycache__/base.cpython-35.pyc,, -sqlalchemy/databases/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/selectable.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/fixtures.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/util.cpython-35.pyc,, -sqlalchemy/event/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/connectors/__pycache__/zxJDBC.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/runner.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/pypostgresql.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/automap.cpython-35.pyc,, -sqlalchemy/__pycache__/interfaces.cpython-35.pyc,, -sqlalchemy/ext/__pycache__/instrumentation.cpython-35.pyc,, -sqlalchemy/dialects/__pycache__/postgres.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/mock.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/exc.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/ranges.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/json.cpython-35.pyc,, -sqlalchemy/util/__pycache__/_collections.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/threadlocal.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/zxjdbc.cpython-35.pyc,, -sqlalchemy/dialects/mssql/__pycache__/pyodbc.cpython-35.pyc,, -sqlalchemy/dialects/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/constraints.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/mysqldb.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/strategies.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/strategy_options.cpython-35.pyc,, -sqlalchemy/dialects/drizzle/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/result.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/assertions.cpython-35.pyc,, -sqlalchemy/dialects/drizzle/__pycache__/mysqldb.cpython-35.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/dialects/mssql/__pycache__/mxodbc.cpython-35.pyc,, -sqlalchemy/testing/suite/__pycache__/test_insert.cpython-35.pyc,, -sqlalchemy/testing/suite/__pycache__/test_update_delete.cpython-35.pyc,, -sqlalchemy/util/__pycache__/topological.cpython-35.pyc,, -sqlalchemy/testing/suite/__pycache__/test_select.cpython-35.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/evaluator.cpython-35.pyc,, -sqlalchemy/ext/declarative/__pycache__/clsregistry.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/pymysql.cpython-35.pyc,, -sqlalchemy/__pycache__/events.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/warnings.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/naming.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/base.cpython-35.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/pysqlite.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/collections.cpython-35.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/base.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/instrumentation.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/properties.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/expression.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/identity.cpython-35.pyc,, -sqlalchemy/dialects/sybase/__pycache__/pyodbc.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/query.cpython-35.pyc,, -sqlalchemy/dialects/firebird/__pycache__/base.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/interfaces.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/requirements.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/entities.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/sync.cpython-35.pyc,, -sqlalchemy/engine/__pycache__/url.cpython-35.pyc,, -sqlalchemy/dialects/oracle/__pycache__/zxjdbc.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/base.cpython-35.pyc,, -sqlalchemy/dialects/mssql/__pycache__/pymssql.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/path_registry.cpython-35.pyc,, -sqlalchemy/sql/__pycache__/util.cpython-35.pyc,, -sqlalchemy/dialects/firebird/__pycache__/fdb.cpython-35.pyc,, -sqlalchemy/testing/plugin/__pycache__/plugin_base.cpython-35.pyc,, -sqlalchemy/testing/plugin/__pycache__/noseplugin.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/cymysql.cpython-35.pyc,, -sqlalchemy/ext/declarative/__pycache__/api.cpython-35.pyc,, -sqlalchemy/dialects/mysql/__pycache__/oursql.cpython-35.pyc,, -sqlalchemy/testing/plugin/__pycache__/pytestplugin.cpython-35.pyc,, -sqlalchemy/ext/declarative/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/orm/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/dialects/sybase/__pycache__/pysybase.cpython-35.pyc,, -sqlalchemy/testing/__pycache__/__init__.cpython-35.pyc,, -sqlalchemy/testing/suite/__pycache__/test_sequence.cpython-35.pyc,, -sqlalchemy/connectors/__pycache__/mysqldb.cpython-35.pyc,, -sqlalchemy/__pycache__/processors.cpython-35.pyc,, diff --git a/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/DESCRIPTION.rst b/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/DESCRIPTION.rst similarity index 100% rename from lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/DESCRIPTION.rst rename to lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/DESCRIPTION.rst diff --git a/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/METADATA b/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/METADATA similarity index 99% rename from lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/METADATA rename to lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/METADATA index a947267..f993bb0 100644 --- a/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/METADATA +++ b/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.0 Name: SQLAlchemy -Version: 0.9.7 +Version: 1.0.12 Summary: Database Abstraction Library Home-page: http://www.sqlalchemy.org Author: Mike Bayer diff --git a/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/RECORD b/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/RECORD new file mode 100644 index 0000000..14187b1 --- /dev/null +++ b/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/RECORD @@ -0,0 +1,375 @@ +SQLAlchemy-1.0.12.dist-info/DESCRIPTION.rst,sha256=ZN8fj2owI_rw0Emr3_RXqoNfTFkThjiZy7xcCzg1W_g,5013 +SQLAlchemy-1.0.12.dist-info/METADATA,sha256=fntYBelbmQAxIrj5_YGLpIGPzwQBxiA_6kJwVdrwMF4,5786 +SQLAlchemy-1.0.12.dist-info/RECORD,, +SQLAlchemy-1.0.12.dist-info/WHEEL,sha256=Er7DBTU_C2g_rTGCxcwhCKegQSKoYLj1ncusWiwlKwM,111 +SQLAlchemy-1.0.12.dist-info/metadata.json,sha256=QojGP97nxChuh8Zdlr9y3tpCLlGzDO622gq1_HEvFmo,965 +SQLAlchemy-1.0.12.dist-info/top_level.txt,sha256=rp-ZgB7D8G11ivXON5VGPjupT1voYmWqkciDt5Uaw_Q,11 +sqlalchemy/__init__.py,sha256=fTurvwmGkoRt_zdwxoZNWTHg6VdzvBpeHyPmUnexOK4,2112 +sqlalchemy/cprocessors.cpython-35m-darwin.so,sha256=U2_r5ou5V_I2IehYbMEPnqhrB5x0__8Zd7dpLTkWZO8,16924 +sqlalchemy/cresultproxy.cpython-35m-darwin.so,sha256=QSna4HE3XSia-2igDXCc_5VR9PyKwu89Tti5FttpmeM,18756 +sqlalchemy/cutils.cpython-35m-darwin.so,sha256=rUL24S3pi7e8KdIeS5oyseDDKpi2LwzIDQvzKNXCQ5Q,9668 +sqlalchemy/events.py,sha256=j8yref-XfuJxkPKbvnZmB4jeUAIujPcbLAzD2cKV4f4,43944 +sqlalchemy/exc.py,sha256=NhA5R5nDdducWkp0MXtlQ0-Q6iF_rhqkHWblIfuSYGk,11706 +sqlalchemy/inspection.py,sha256=zMa-2nt-OQ0Op1dqq0Z2XCnpdAFSTkqif5Kdi8Wz8AU,3093 +sqlalchemy/interfaces.py,sha256=XSx5y-HittAzc79lU4C7rPbTtSW_Hc2c89NqCy50tsQ,10967 +sqlalchemy/log.py,sha256=opX7UORq5N6_jWxN9aHX9OpiirwAcRA0qq-u5m4SMkQ,6712 +sqlalchemy/pool.py,sha256=-F51TIJYl0XGTV2_sdpV8C1m0jTTQaq0nAezdmSgr84,47220 +sqlalchemy/processors.py,sha256=Li1kdC-I0v03JxeOz4V7u4HAevK6LledyCPvaL06mYc,5220 +sqlalchemy/schema.py,sha256=rZzZJJ8dT9trLSYknFpHm0N1kRERYwhqHH3QD31SJjc,1182 +sqlalchemy/types.py,sha256=qcoy5xKaurDV4kaXr489GL2sz8FKkWX21Us3ZCqeasg,1650 +sqlalchemy/connectors/__init__.py,sha256=97YbriYu5mcljh7opc1JOScRlf3Tk8ldbn5urBVm4WY,278 +sqlalchemy/connectors/mxodbc.py,sha256=-0iqw2k8e-o3OkAKzoCWuAaEPxlEjslvfRM9hnVXENM,5348 +sqlalchemy/connectors/pyodbc.py,sha256=pG2yf3cEDtTr-w_m4to6jF5l8hZk6MJv69K3cg84NfY,6264 +sqlalchemy/connectors/zxJDBC.py,sha256=2KK_sVSgMsdW0ufZqAwgXjd1FsMb4hqbiUQRAkM0RYg,1868 +sqlalchemy/databases/__init__.py,sha256=BaQyAuMjXNpZYV47hCseHrDtPzTfSw-iqUQYxMWJddw,817 +sqlalchemy/dialects/__init__.py,sha256=7SMul8PL3gkbJRUwAwovHLae5qBBApRF-VcRwU-VtdU,1012 +sqlalchemy/dialects/postgres.py,sha256=heNVHys6E91DIBepXT3ls_4_6N8HTTahrZ49W5IR3M0,614 +sqlalchemy/dialects/firebird/__init__.py,sha256=QYmQ0SaGfq3YjDraCV9ALwqVW5A3KDUF0F6air_qp3Q,664 +sqlalchemy/dialects/firebird/base.py,sha256=IT0prWkh1TFSTke-BqGdVMGdof53zmWWk6zbJZ_TuuI,28170 +sqlalchemy/dialects/firebird/fdb.py,sha256=l4s6_8Z0HvqxgqGz0LNcKWP1qUmEc3M2XM718_drN34,4325 +sqlalchemy/dialects/firebird/kinterbasdb.py,sha256=kCsn2ed4u9fyjcyfEI3rXQdKvL05z9wtf5YjW9-NrvI,6299 +sqlalchemy/dialects/mssql/__init__.py,sha256=G12xmirGZgMzfUKZCA8BFfaCmqUDuYca9Fu2VP_eaks,1081 +sqlalchemy/dialects/mssql/adodbapi.py,sha256=dHZgS3pEDX39ixhlDfTtDcjCq6rdjF85VS7rIZ1TfYo,2493 +sqlalchemy/dialects/mssql/base.py,sha256=xqRmK_npoyH5gl626EjazVnu9TEArmrBIFme_avYFUg,66855 +sqlalchemy/dialects/mssql/information_schema.py,sha256=pwuTsgOCY5eSBW9w-g-pyJDRfyuZ_rOEXXNYRuAroCE,6418 +sqlalchemy/dialects/mssql/mxodbc.py,sha256=G9LypIeEizgxeShtDu2M7Vwm8NopnzaTmnZMD49mYeg,3856 +sqlalchemy/dialects/mssql/pymssql.py,sha256=w92w4YQzXdHb53AjCrBcIRHsf6jmie1iN9H7gJNGX4k,3079 +sqlalchemy/dialects/mssql/pyodbc.py,sha256=KRke1Hizrg3r5iYqxdBI0axXVQ_pZR_UPxLaAdF0mKk,9473 +sqlalchemy/dialects/mssql/zxjdbc.py,sha256=u4uBgwk0LbI7_I5CIvM3C4bBb0pmrw2_DqRh_ehJTkI,2282 +sqlalchemy/dialects/mysql/__init__.py,sha256=3cQ2juPT8LsZTicPa2J-0rCQjQIQaPgyBzxjV3O_7xs,1171 +sqlalchemy/dialects/mysql/base.py,sha256=rwC8fnhGZaAnsPB1Jhg4sTcrWE2hjxrZJ5deCS0rAOc,122869 +sqlalchemy/dialects/mysql/cymysql.py,sha256=nqsdQA8LBLIc6eilgX6qwkjm7szsUoqMTVYwK9kkfsE,2349 +sqlalchemy/dialects/mysql/gaerdbms.py,sha256=2MxtTsIqlpq_J32HHqDzz-5vu-mC51Lb7PvyGkJa73M,3387 +sqlalchemy/dialects/mysql/mysqlconnector.py,sha256=DMDm684Shk-ijVo7w-yidopYw7EC6EiOmJY56EPawok,5323 +sqlalchemy/dialects/mysql/mysqldb.py,sha256=McqROngxAknbLOXoUAG9o9mP9FQBLs-ouD-JqqI2Ses,6564 +sqlalchemy/dialects/mysql/oursql.py,sha256=rmdr-r66iJ2amqFeGvCohvE8WCl_i6R9KcgVG0uXOQs,8124 +sqlalchemy/dialects/mysql/pymysql.py,sha256=e-qehI-sASmAjEa0ajHqjZjlyJYWsb3RPQY4iBR5pz0,1504 +sqlalchemy/dialects/mysql/pyodbc.py,sha256=Ze9IOKw6ANVQj25IlmSGR8aaJhM0pMuRtbzKF7UsZCY,2665 +sqlalchemy/dialects/mysql/zxjdbc.py,sha256=LIhe2mHSRVgi8I7qmiTMVBRSpuWJVnuDtpHTUivIx0M,3942 +sqlalchemy/dialects/oracle/__init__.py,sha256=UhF2ZyPfT3EFAnP8ZjGng6GnWSzmAkjMax0Lucpn0Bg,797 +sqlalchemy/dialects/oracle/base.py,sha256=2KJO-sU2CVKK1rij6bAQ5ZFJv203_NmzT8dE5qor9wc,55961 +sqlalchemy/dialects/oracle/cx_oracle.py,sha256=-d5tHbNcCyjbgVtAvWfHgSY2yA8C9bvCzxhwkdWFNe0,38635 +sqlalchemy/dialects/oracle/zxjdbc.py,sha256=nC7XOCY3NdTLrEyIacNTnLDCaeVjWn59q8UYssJL8Wo,8112 +sqlalchemy/dialects/postgresql/__init__.py,sha256=SjCtM5b3EaGyRaTyg_i82sh_qjkLEIVUXW91XDihiCM,1299 +sqlalchemy/dialects/postgresql/base.py,sha256=xhdLeHuWioTv9LYW41pcIPsEjD2fyeh7JflkLKmZMB8,104230 +sqlalchemy/dialects/postgresql/constraints.py,sha256=8UDx_2TNQgqIUSRETZPhgninJigQ6rMfdRNI6vIt3Is,3119 +sqlalchemy/dialects/postgresql/hstore.py,sha256=n8Wsd7Uldk3bbg66tTa0NKjVqjhJUbF1mVeUsM7keXA,11402 +sqlalchemy/dialects/postgresql/json.py,sha256=MTlIGinMDa8iaVbZMOzYnremo0xL4tn2wyGTPwnvX6U,12215 +sqlalchemy/dialects/postgresql/pg8000.py,sha256=x6o3P8Ad0wKsuF9qeyip39BKc5ORJZ4nWxv-8qOdj0E,8375 +sqlalchemy/dialects/postgresql/psycopg2.py,sha256=4ac0upErNRJz6YWJYNbATCU3ncWFvat5kal_Cuq-Jhw,26953 +sqlalchemy/dialects/postgresql/psycopg2cffi.py,sha256=8R3POkJH8z8a2DxwKNmfmQOsxFqsg4tU_OnjGj3OfDA,1651 +sqlalchemy/dialects/postgresql/pypostgresql.py,sha256=raQRfZb8T9-c-jmq1w86Wci5QyiXgf_9_71OInT_sAw,2655 +sqlalchemy/dialects/postgresql/ranges.py,sha256=MihdGXMdmCM6ToIlrj7OJx9Qh_8BX8bv5PSaAepHmII,4814 +sqlalchemy/dialects/postgresql/zxjdbc.py,sha256=AhEGRiAy8q-GM0BStFcsLBgSwjxHkkwy2-BSroIoADo,1397 +sqlalchemy/dialects/sqlite/__init__.py,sha256=0wW0VOhE_RtFDpRcbwvvo3XtD6Y2-SDgG4K7468eh_w,736 +sqlalchemy/dialects/sqlite/base.py,sha256=_L9-854ITf8Fl2BgUymF9fKjDFvXSo7Pb2yuz1CMkDo,55007 +sqlalchemy/dialects/sqlite/pysqlcipher.py,sha256=sgXCqn8ZtNIeTDwyo253Kj5mn4TPlIW3AZCNNmURi2A,4129 +sqlalchemy/dialects/sqlite/pysqlite.py,sha256=G-Cg-iI-ErYsVjOH4UlQTEY9pLnLOLV89ik8q0-reuY,14980 +sqlalchemy/dialects/sybase/__init__.py,sha256=gwCgFR_C_hoj0Re7PiaW3zmKSWaLpsd96UVXdM7EnTM,894 +sqlalchemy/dialects/sybase/base.py,sha256=Xpl3vEd5VDyvoIRMg0DZa48Or--yBSrhaZ2CbTSCt0w,28853 +sqlalchemy/dialects/sybase/mxodbc.py,sha256=E_ask6yFSjyhNPvv7gQsvA41WmyxbBvRGWjCyPVr9Gs,901 +sqlalchemy/dialects/sybase/pyodbc.py,sha256=0a_gKwrIweJGcz3ZRYuQZb5BIvwjGmFEYBo9wGk66kI,2102 +sqlalchemy/dialects/sybase/pysybase.py,sha256=tu2V_EbtgxWYOvt-ybo5_lLiBQzsIFaAtF8e7S1_-rk,3208 +sqlalchemy/engine/__init__.py,sha256=fyIFw2R5wfLQzSbfE9Jz-28ZDP5RyB-5elNH92uTZYM,18803 +sqlalchemy/engine/base.py,sha256=cRqbbG0QuUG-NGs3GOPVQsU0WLsw5bLT0Y07Yf8OOfU,79399 +sqlalchemy/engine/default.py,sha256=U_yaliCazUHp6cfk_NVzhB4F_zOJSyy959rHyk40J4M,36548 +sqlalchemy/engine/interfaces.py,sha256=CmPYM_oDp1zAPH13sKmufO4Tuha6KA-fXRQq-K_3YTE,35908 +sqlalchemy/engine/reflection.py,sha256=jly5YN-cyjoBDxHs9qO6Mlgm1OZSb2NBNFALwZMEGxE,28590 +sqlalchemy/engine/result.py,sha256=ot5RQxa6kjoScXRUR-DTl0iJJISBhmyNTj1JZkZiNsk,44027 +sqlalchemy/engine/strategies.py,sha256=mwy-CTrnXzyaIA1TRQBQ_Z2O8wN0lnTNZwDefEWCR9A,8929 +sqlalchemy/engine/threadlocal.py,sha256=y4wOLjtbeY-dvp2GcJDtos6F2jzfP11JVAaSFwZ0zRM,4191 +sqlalchemy/engine/url.py,sha256=ZhS_Iqiu6V1kfIM2pcv3ud9fOPXkFOHBv8wiLOqbJhc,8228 +sqlalchemy/engine/util.py,sha256=Tvb9sIkyd6qOwIA-RsBmo5j877UXa5x-jQmhqnhHWRA,2338 +sqlalchemy/event/__init__.py,sha256=KnUVp-NVX6k276ntGffxgkjVmIWR22FSlzrbAKqQ6S4,419 +sqlalchemy/event/api.py,sha256=O2udbj5D7HdXcvsGBQk6-dK9CAFfePTypWOrUdqmhYY,5990 +sqlalchemy/event/attr.py,sha256=VfRJJl4RD24mQaIoDwArWL2hsGOX6ISSU6vKusVMNO0,12053 +sqlalchemy/event/base.py,sha256=DWDKZV19fFsLavu2cXOxXV8NhO3XuCbKcKamBKyXuME,9540 +sqlalchemy/event/legacy.py,sha256=ACnVeBUt8uwVfh1GNRu22cWCADC3CWZdrsBKzAd6UQQ,5814 +sqlalchemy/event/registry.py,sha256=13wx1qdEmcQeCoAmgf_WQEMuR43h3v7iyd2Re54QdOE,7786 +sqlalchemy/ext/__init__.py,sha256=smCZIGgjJprT4ddhuYSLZ8PrTn4NdXPP3j03a038SdE,322 +sqlalchemy/ext/associationproxy.py,sha256=y61Y4UIZNBit5lqk2WzdHTCXIWRrBg3hHbRVsqXjnqE,33422 +sqlalchemy/ext/automap.py,sha256=Aet-3zk2vbsJVLqigwZJYau0hB1D6Y21K65QVWeB5pc,41567 +sqlalchemy/ext/baked.py,sha256=BnVaB4pkQxHk-Fyz4nUw225vCxO_zrDuVC6t5cSF9x8,16967 +sqlalchemy/ext/compiler.py,sha256=aSSlySoTsqN-JkACWFIhv3pq2CuZwxKm6pSDfQoc10Q,16257 +sqlalchemy/ext/horizontal_shard.py,sha256=XEBYIfs0YrTt_2vRuaBY6C33ZOZMUHQb2E4X2s3Szns,4814 +sqlalchemy/ext/hybrid.py,sha256=wNXvuYEEmKy-Nc6z7fu1c2gNWCMOiQA0N14Y3FCq5lo,27989 +sqlalchemy/ext/instrumentation.py,sha256=HRgNiuYJ90_uSKC1iDwsEl8_KXscMQkEb9KeElk-yLE,14856 +sqlalchemy/ext/mutable.py,sha256=lx7b_ewFVe7O6I4gTXdi9M6C6TqxWCFiViqCM2VwUac,25444 +sqlalchemy/ext/orderinglist.py,sha256=UCkuZxTWAQ0num-b5oNm8zNJAmVuIFcbFXt5e7JPx-U,13816 +sqlalchemy/ext/serializer.py,sha256=fK3N1miYF16PSIZDjLFS2zI7y-scZ9qtmopXIfzPqrA,5586 +sqlalchemy/ext/declarative/__init__.py,sha256=Jpwf2EukqwNe4RzDfCmX1p-hQ6pPhJEIL_xunaER3tw,756 +sqlalchemy/ext/declarative/api.py,sha256=PdoO_jh50TWaMvXqnjNh-vX42VqB75ZyliluilphvsU,23317 +sqlalchemy/ext/declarative/base.py,sha256=96SJBOfxpTMsU2jAHrvuXbsjUUJ7TvbLm11R8Hy2Irc,25231 +sqlalchemy/ext/declarative/clsregistry.py,sha256=jaLLSr-66XvLnA1Z9kxjKatH_XHxWchqEXMKwvjKAXk,10817 +sqlalchemy/orm/__init__.py,sha256=UzDockQEVMaWvr-FE4y1rptrMb5uX5k8v_UNQs82qFY,8033 +sqlalchemy/orm/attributes.py,sha256=OmXkppJEZxRGc0acZZZkSbUhdfDl8ry3Skmvzl3OtLQ,56510 +sqlalchemy/orm/base.py,sha256=F0aRZGK2_1F8phwBHnVYaChkAb-nnTRoFE1VKSvmAwA,14634 +sqlalchemy/orm/collections.py,sha256=TFutWIn_c07DI48FDOKMsFMnAoQB3BG2FnEMGzEF3iI,53549 +sqlalchemy/orm/dependency.py,sha256=phB8nS1788FSd4dWa2j9d4uj6QFlRL7nzcXvh3Bb7Zo,46192 +sqlalchemy/orm/deprecated_interfaces.py,sha256=A63t6ivbZB3Wq8vWgL8I05uTRR6whcWnIPkquuTIPXU,18254 +sqlalchemy/orm/descriptor_props.py,sha256=uk5r77w1VUWVgn0bkgOItkAlMh9FRgeT6OCgOHz3_bM,25141 +sqlalchemy/orm/dynamic.py,sha256=I_YP7X-H9HLjeFHmYgsOas6JPdqg0Aqe0kaltt4HVzA,13283 +sqlalchemy/orm/evaluator.py,sha256=Hozggsd_Fi0YyqHrr9-tldtOA9NLX0MVBF4e2vSM6GY,4731 +sqlalchemy/orm/events.py,sha256=yRaoXlBL78b3l11itTrAy42UhLu42-7cgXKCFUGNXSg,69410 +sqlalchemy/orm/exc.py,sha256=P5lxi5RMFokiHL136VBK0AP3UmAlJcSDHtzgo-M6Kgs,5439 +sqlalchemy/orm/identity.py,sha256=zsb8xOZaPYKvs4sGhyxW21mILQDrtdSuzD4sTyeKdJs,9021 +sqlalchemy/orm/instrumentation.py,sha256=xtq9soM3mpMws7xqNJIFYXqKw65p2nnxCTfmMpuvpeI,17510 +sqlalchemy/orm/interfaces.py,sha256=AqitvZ_BBkB6L503uhdH55nxHplleJ2kQMwM7xKq9Sc,21552 +sqlalchemy/orm/loading.py,sha256=cjC8DQ5g8_rMxroYrYHfW5s35Z5OFSNBUu0-LpxW7hI,22878 +sqlalchemy/orm/mapper.py,sha256=sfooeslzwWAKN7WNIQoZ2Y3u_mCyIxd0tebp4yEUu8k,115074 +sqlalchemy/orm/path_registry.py,sha256=8Pah0P8yPVUyRjoET7DvIMGtM5PC8HZJC4GtxAyqVAs,8370 +sqlalchemy/orm/persistence.py,sha256=WzUUNm1UGm5mGxbv94hLTQowEDNoXfU1VoyGnoKeN_g,51028 +sqlalchemy/orm/properties.py,sha256=HR3eoY3Ze3FUPPNCXM_FruWz4pEMWrGlqtCGiK2G1qE,10426 +sqlalchemy/orm/query.py,sha256=2q2XprzbZhIlAbs0vihIr9dgqfJtcbrjNewgE9q26gE,147616 +sqlalchemy/orm/relationships.py,sha256=79LRGGz8MxsKsAlv0vuZ6MYZXzDXXtfiOCZg-IQ9hiU,116992 +sqlalchemy/orm/scoping.py,sha256=Ao-K4iqg4pBp7Si5JOAlro5zUL_r500TC3lVLcFMLDs,6421 +sqlalchemy/orm/session.py,sha256=yctpvCsLUcFv9Sy8keT1SElZ2VH5DNScYtO7Z77ptYI,111314 +sqlalchemy/orm/state.py,sha256=4LwwftOtPQldH12SKZV2UFgzqPOCj40QfQ08knZs0_E,22984 +sqlalchemy/orm/strategies.py,sha256=rdLEs2pPrF8nqcQqezyG-fGdmE11r22fUva4ES3KGOE,58529 +sqlalchemy/orm/strategy_options.py,sha256=_z7ZblWCnXh8bZpGSOXDoUwtdUqnXdCaWfKXYDgCuH0,34973 +sqlalchemy/orm/sync.py,sha256=B-d-H1Gzw1TkflpvgJeQghwTzqObzhZCQdvEdSPyDeE,5451 +sqlalchemy/orm/unitofwork.py,sha256=EQvZ7RZ-u5wJT51BWTeMJJi-tt22YRnmqywGUCn0Qrc,23343 +sqlalchemy/orm/util.py,sha256=Mj3NXDd8Mwp4O5Vr5zvRGFUZRlB65WpExdDBFJp04wQ,38092 +sqlalchemy/sql/__init__.py,sha256=IFCJYIilmmAQRnSDhv9Y6LQUSpx6pUU5zp9VT7sOx0c,1737 +sqlalchemy/sql/annotation.py,sha256=8ncgAVUo5QCoinApKjREi8esWNMFklcBqie8Q42KsaQ,6136 +sqlalchemy/sql/base.py,sha256=TuXOp7z0Q30qKAjhgcsts6WGvRbvg6F7OBojMQAxjX0,20990 +sqlalchemy/sql/compiler.py,sha256=G0Ft_Dmq1AousO66eagPhI0g9Vkqui_c_LjqY0AbImU,100710 +sqlalchemy/sql/crud.py,sha256=X86dyvzEnbj0-oeJO5ufi6zXxbSKBtDeu5JHlNg-BJU,19837 +sqlalchemy/sql/ddl.py,sha256=nkjd_B4lKwC2GeyPjE0ZtRB9RKXccQL1g1XoZ4p69sM,37540 +sqlalchemy/sql/default_comparator.py,sha256=QaowWtW4apULq_aohDvmj97j0sDtHQQjMRdNxXm83vk,10447 +sqlalchemy/sql/dml.py,sha256=7846H52IMJfMYi5Jd-Cv6Hy9hZM4dkonXbjfBjl5ED4,33330 +sqlalchemy/sql/elements.py,sha256=MLeecC5dMqeekZmFbPn0J-ODKJj5DBDE5v6kuSkq66I,132898 +sqlalchemy/sql/expression.py,sha256=vFZ9MmBlC9Fg8IYzLMAwXgcsnXZhkZbUstY6dO8BFGY,5833 +sqlalchemy/sql/functions.py,sha256=ZYKyvPnVKZMtHyyjyNwK0M5UWPrZmFz3vtTqHN-8658,18533 +sqlalchemy/sql/naming.py,sha256=foE2lAzngLCFXCeHrpv0S4zT23GCnZLCiata2MPo0kE,4662 +sqlalchemy/sql/operators.py,sha256=UeZgb7eRhWd4H7OfJZkx0ZWOjvo5chIUXQsBAIeeTDY,23013 +sqlalchemy/sql/schema.py,sha256=awhLY5YjUBah8ZYxW9FBfe6lH0v4fW0UJLTNApnx7E0,145511 +sqlalchemy/sql/selectable.py,sha256=o1Hom00WGHjI21Mdb5fkX-f0k2nksQNb_txT0KWK1zQ,118995 +sqlalchemy/sql/sqltypes.py,sha256=JGxizqIjO1WFuZpppWj1Yi5cvCyBczb1JqUQeuhQn8s,54879 +sqlalchemy/sql/type_api.py,sha256=Xe6yH4slgdLA8HRjT19GBOou51SS9o4oUhyK0xfn04c,42846 +sqlalchemy/sql/util.py,sha256=7AsOsyhIq2eSLMWtwvqfTLc2MdCotGzEKQKFE3wk5sk,20382 +sqlalchemy/sql/visitors.py,sha256=4ipGvAkqFaSAWgyNuKjx5x_ms8GIy9aq-wC5pj4-Z3g,10271 +sqlalchemy/testing/__init__.py,sha256=MwKimX0atzs_SmG2j74GXLiyI8O56e3DLq96tcoL0TM,1095 +sqlalchemy/testing/assertions.py,sha256=r1I2nHC599VZcY-5g0JYRQl8bl9kjkf6WFOooOmJ2eE,16112 +sqlalchemy/testing/assertsql.py,sha256=-fP9Iuhdu52BJoT1lEj_KED8jy5ay_XiJu7i4Ry9eWA,12335 +sqlalchemy/testing/config.py,sha256=nqvVm55Vk0BVNjk1Wj3aYR65j_EEEepfB-W9QSFLU-k,2469 +sqlalchemy/testing/distutils_run.py,sha256=tkURrZRwgFiSwseKm1iJRkSjKf2Rtsb3pOXRWtACTHI,247 +sqlalchemy/testing/engines.py,sha256=u6GlDMXt0FKqVTQe_QJ5JXAnkA6W-xdw6Fe_5gMAQhg,9359 +sqlalchemy/testing/entities.py,sha256=IXqTgAihV-1TZyxL0MWdZzu4rFtxdbWKWFetIJWNGM4,2992 +sqlalchemy/testing/exclusions.py,sha256=WuH_tVK5fZJWe8Hu2LzNB4HNQMa_iAUaGC-_6mHUdIM,12570 +sqlalchemy/testing/fixtures.py,sha256=q4nK-81z2EWs17TjeJtPmnaJUCtDdoUiIU7jgLq3l_w,10721 +sqlalchemy/testing/mock.py,sha256=vj5q-GzJrLW6mMVDLqsppxBu_p7K49VvjfiVt5tn0o8,630 +sqlalchemy/testing/pickleable.py,sha256=8I8M4H1XN29pZPMxZdYkmpKWfwzPsUn6WK5FX4UP9L4,2641 +sqlalchemy/testing/profiling.py,sha256=Q_wOTS5JtcGBcs2eCYIvoRoDS_FW_HcfEW3hXWB87Zg,8392 +sqlalchemy/testing/provision.py,sha256=mU9g6JZEHIshqUkE6PWu-t61FVPs_cUJtEtVFRavj9g,9377 +sqlalchemy/testing/replay_fixture.py,sha256=iAxg7XsFkKSCcJnrNPQNJfjMxOgeBAa-ShOkywWPJ4w,5429 +sqlalchemy/testing/requirements.py,sha256=aIdvbfugMzrlVdldEbpcwretX-zjiukPhPUSZgulrzU,19949 +sqlalchemy/testing/runner.py,sha256=hpNH6MNTif4TnBRySxpm92KgFwDK0mOa8eF7wZXumTI,1607 +sqlalchemy/testing/schema.py,sha256=agOzrIMvmuUCeVZY5mYjJ1eJmOP69-wa0gZALtNtJBk,3446 +sqlalchemy/testing/util.py,sha256=IJ688AWzichtXVwWgYf_A4BUbcXPGsK6BQP5fvY3h-U,7544 +sqlalchemy/testing/warnings.py,sha256=-KskRAh1RkJ_69UIY_WR7i15u21U3gDLQ6nKlnJT7_w,987 +sqlalchemy/testing/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +sqlalchemy/testing/plugin/bootstrap.py,sha256=Iw8R-d1gqoz_NKFtPyGfdX56QPcQHny_9Lvwov65aVY,1634 +sqlalchemy/testing/plugin/noseplugin.py,sha256=In79x6zs9DOngfoYpaHojihWlSd4PeS7Nwzh3M_KNM4,2847 +sqlalchemy/testing/plugin/plugin_base.py,sha256=h4RI4nPNdNq9kYABp6IP89Eknm29q8usgO-nWb8Eobc,17120 +sqlalchemy/testing/plugin/pytestplugin.py,sha256=Pbc62y7Km0PHXd4M9dm5ThBwrlXkM4WtIX-W1pOaM84,5812 +sqlalchemy/testing/suite/__init__.py,sha256=wqCTrb28i5FwhQZOyXVlnz3mA94iQOUBio7lszkFq-g,471 +sqlalchemy/testing/suite/test_ddl.py,sha256=Baw0ou9nKdADmrRuXgWzF1FZx0rvkkw3JHc6yw5BN0M,1838 +sqlalchemy/testing/suite/test_dialect.py,sha256=ORQPXUt53XtO-5ENlWgs8BpsSdPBDjyMRl4W2UjXLI4,1165 +sqlalchemy/testing/suite/test_insert.py,sha256=nP0mgVpsVs72MHMADmihB1oXLbFBpsYsLGO3BlQ7RLU,8132 +sqlalchemy/testing/suite/test_reflection.py,sha256=HtJRsJ_vuNMrOhnPTvuIvRg66OakSaSpeCU36zhaSPg,24616 +sqlalchemy/testing/suite/test_results.py,sha256=oAcO1tD0I7c9ErMeSvSZBZfz1IBDMJHJTf64Y1pBodk,6685 +sqlalchemy/testing/suite/test_select.py,sha256=u0wAz1g-GrAFdZpG4zwSrVckVtjULvjlbd0Z1U1jHAA,5729 +sqlalchemy/testing/suite/test_sequence.py,sha256=fmBR4Pc5tOLSkXFxfcqwGx1z3xaxeJeUyqDnTakKTBU,3831 +sqlalchemy/testing/suite/test_types.py,sha256=UKa-ZPdpz16mVKvT-9ISRAfqdrqiKaE7IA-_phQQuxo,17088 +sqlalchemy/testing/suite/test_update_delete.py,sha256=r5p467r-EUsjEcWGfUE0VPIfN4LLXZpLRnnyBLyyjl4,1582 +sqlalchemy/util/__init__.py,sha256=G06a5vBxg27RtWzY6dPZHt1FO8qtOiy_2C9PHTTMblI,2520 +sqlalchemy/util/_collections.py,sha256=JZkeYK4GcIE1A5s6MAvHhmUp_X4wp6r7vMGT-iMftZ8,27842 +sqlalchemy/util/compat.py,sha256=80OXp3D-F_R-pLf7s-zITPlfCqG1s_5o6KTlY1g2p0Q,6821 +sqlalchemy/util/deprecations.py,sha256=D_LTsfb9jHokJtPEWNDRMJOc372xRGNjputAiTIysRU,4403 +sqlalchemy/util/langhelpers.py,sha256=Nhe3Y9ieK6JaFYejjYosVOjOSSIBT2V385Hu6HGcyZk,41607 +sqlalchemy/util/queue.py,sha256=rs3W0LDhKt7M_dlQEjYpI9KS-bzQmmwN38LE_-RRVvU,6548 +sqlalchemy/util/topological.py,sha256=xKsYjjAat4p8cdqRHKwibLzr6WONbPTC0X8Mqg7jYno,2794 +sqlalchemy/sql/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/cymysql.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/util.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/test_reflection.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/path_registry.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/test_update_delete.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/baked.cpython-35.pyc,, +sqlalchemy/dialects/sybase/__pycache__/pysybase.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/entities.cpython-35.pyc,, +sqlalchemy/dialects/firebird/__pycache__/kinterbasdb.cpython-35.pyc,, +sqlalchemy/event/__pycache__/registry.cpython-35.pyc,, +sqlalchemy/connectors/__pycache__/zxJDBC.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/result.cpython-35.pyc,, +sqlalchemy/dialects/mssql/__pycache__/pyodbc.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/hybrid.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/interfaces.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/elements.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/naming.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/psycopg2.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/fixtures.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/base.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/compiler.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/state.cpython-35.pyc,, +sqlalchemy/util/__pycache__/topological.cpython-35.pyc,, +sqlalchemy/util/__pycache__/queue.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/test_select.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/dml.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/pyodbc.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/mapper.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/evaluator.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/crud.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/automap.cpython-35.pyc,, +sqlalchemy/util/__pycache__/langhelpers.cpython-35.pyc,, +sqlalchemy/__pycache__/exc.cpython-35.pyc,, +sqlalchemy/dialects/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/horizontal_shard.cpython-35.pyc,, +sqlalchemy/__pycache__/interfaces.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/properties.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/dynamic.cpython-35.pyc,, +sqlalchemy/dialects/firebird/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/base.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/test_sequence.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/reflection.cpython-35.pyc,, +sqlalchemy/testing/plugin/__pycache__/bootstrap.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/json.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/persistence.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/assertsql.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/compiler.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/annotation.cpython-35.pyc,, +sqlalchemy/util/__pycache__/_collections.cpython-35.pyc,, +sqlalchemy/dialects/mssql/__pycache__/zxjdbc.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/pypostgresql.cpython-35.pyc,, +sqlalchemy/dialects/sybase/__pycache__/mxodbc.cpython-35.pyc,, +sqlalchemy/testing/plugin/__pycache__/plugin_base.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/session.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/distutils_run.cpython-35.pyc,, +sqlalchemy/__pycache__/processors.cpython-35.pyc,, +sqlalchemy/dialects/mssql/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/functions.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/ranges.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/loading.cpython-35.pyc,, +sqlalchemy/ext/declarative/__pycache__/api.cpython-35.pyc,, +sqlalchemy/dialects/mssql/__pycache__/base.cpython-35.pyc,, +sqlalchemy/dialects/sqlite/__pycache__/base.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/exc.cpython-35.pyc,, +sqlalchemy/dialects/oracle/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/test_insert.cpython-35.pyc,, +sqlalchemy/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/selectable.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/test_types.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/strategy_options.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/collections.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/dependency.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/orderinglist.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/test_ddl.cpython-35.pyc,, +sqlalchemy/event/__pycache__/attr.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/sqltypes.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/type_api.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/schema.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/scoping.cpython-35.pyc,, +sqlalchemy/dialects/oracle/__pycache__/cx_oracle.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/provision.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/relationships.cpython-35.pyc,, +sqlalchemy/dialects/mssql/__pycache__/pymssql.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/zxjdbc.cpython-35.pyc,, +sqlalchemy/event/__pycache__/api.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/ddl.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/hstore.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/unitofwork.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/default.cpython-35.pyc,, +sqlalchemy/testing/plugin/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/databases/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/identity.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/util.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/util.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/config.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/descriptor_props.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/url.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/sync.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/warnings.cpython-35.pyc,, +sqlalchemy/util/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/strategies.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/test_results.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/schema.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/base.cpython-35.pyc,, +sqlalchemy/util/__pycache__/compat.cpython-35.pyc,, +sqlalchemy/__pycache__/events.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/zxjdbc.cpython-35.pyc,, +sqlalchemy/connectors/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/dialects/sqlite/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/testing/plugin/__pycache__/pytestplugin.cpython-35.pyc,, +sqlalchemy/ext/declarative/__pycache__/clsregistry.cpython-35.pyc,, +sqlalchemy/dialects/oracle/__pycache__/zxjdbc.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/base.cpython-35.pyc,, +sqlalchemy/event/__pycache__/legacy.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/constraints.cpython-35.pyc,, +sqlalchemy/dialects/sybase/__pycache__/pyodbc.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/instrumentation.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/profiling.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/pg8000.cpython-35.pyc,, +sqlalchemy/__pycache__/types.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/instrumentation.cpython-35.pyc,, +sqlalchemy/dialects/__pycache__/postgres.cpython-35.pyc,, +sqlalchemy/dialects/sybase/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/dialects/firebird/__pycache__/base.cpython-35.pyc,, +sqlalchemy/dialects/oracle/__pycache__/base.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/pymysql.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/engines.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/mysqlconnector.cpython-35.pyc,, +sqlalchemy/dialects/sqlite/__pycache__/pysqlcipher.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/psycopg2cffi.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/mysqldb.cpython-35.pyc,, +sqlalchemy/dialects/postgresql/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/event/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/requirements.cpython-35.pyc,, +sqlalchemy/dialects/mssql/__pycache__/adodbapi.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/query.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/mutable.cpython-35.pyc,, +sqlalchemy/event/__pycache__/base.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/base.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/expression.cpython-35.pyc,, +sqlalchemy/__pycache__/pool.cpython-35.pyc,, +sqlalchemy/connectors/__pycache__/pyodbc.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/gaerdbms.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/assertions.cpython-35.pyc,, +sqlalchemy/__pycache__/schema.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/oursql.cpython-35.pyc,, +sqlalchemy/dialects/firebird/__pycache__/fdb.cpython-35.pyc,, +sqlalchemy/__pycache__/log.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/strategies.cpython-35.pyc,, +sqlalchemy/dialects/sybase/__pycache__/base.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/visitors.cpython-35.pyc,, +sqlalchemy/connectors/__pycache__/mxodbc.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/threadlocal.cpython-35.pyc,, +sqlalchemy/ext/declarative/__pycache__/base.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/runner.cpython-35.pyc,, +sqlalchemy/util/__pycache__/deprecations.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/test_dialect.cpython-35.pyc,, +sqlalchemy/engine/__pycache__/interfaces.cpython-35.pyc,, +sqlalchemy/testing/suite/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/ext/declarative/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/testing/plugin/__pycache__/noseplugin.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/associationproxy.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/pickleable.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/attributes.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/default_comparator.cpython-35.pyc,, +sqlalchemy/dialects/mysql/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/dialects/mssql/__pycache__/mxodbc.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/__init__.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/exclusions.cpython-35.pyc,, +sqlalchemy/__pycache__/inspection.cpython-35.pyc,, +sqlalchemy/dialects/mssql/__pycache__/information_schema.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/mock.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/deprecated_interfaces.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/operators.cpython-35.pyc,, +sqlalchemy/testing/__pycache__/replay_fixture.cpython-35.pyc,, +sqlalchemy/orm/__pycache__/events.cpython-35.pyc,, +sqlalchemy/dialects/sqlite/__pycache__/pysqlite.cpython-35.pyc,, +sqlalchemy/ext/__pycache__/serializer.cpython-35.pyc,, +sqlalchemy/sql/__pycache__/util.cpython-35.pyc,, diff --git a/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/WHEEL b/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/WHEEL similarity index 100% rename from lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/WHEEL rename to lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/WHEEL diff --git a/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/metadata.json b/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/metadata.json similarity index 86% rename from lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/metadata.json rename to lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/metadata.json index f0f1347..c660fde 100644 --- a/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/metadata.json +++ b/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/metadata.json @@ -1 +1 @@ -{"generator": "bdist_wheel (0.26.0)", "summary": "Database Abstraction Library", "classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: Jython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends", "Operating System :: OS Independent"], "extensions": {"python.details": {"project_urls": {"Home": "http://www.sqlalchemy.org"}, "contacts": [{"email": "mike_mp@zzzcomputing.com", "name": "Mike Bayer", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "license": "MIT License", "metadata_version": "2.0", "name": "SQLAlchemy", "version": "0.9.7", "test_requires": [{"requires": ["mock", "pytest (>=2.5.2)"]}]} \ No newline at end of file +{"generator": "bdist_wheel (0.26.0)", "summary": "Database Abstraction Library", "classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: Jython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Database :: Front-Ends", "Operating System :: OS Independent"], "extensions": {"python.details": {"project_urls": {"Home": "http://www.sqlalchemy.org"}, "contacts": [{"email": "mike_mp@zzzcomputing.com", "name": "Mike Bayer", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "license": "MIT License", "metadata_version": "2.0", "name": "SQLAlchemy", "version": "1.0.12", "test_requires": [{"requires": ["mock", "pytest (>=2.5.2)", "pytest-xdist"]}]} \ No newline at end of file diff --git a/lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/top_level.txt b/lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/top_level.txt similarity index 100% rename from lib/python3.5/site-packages/SQLAlchemy-0.9.7.dist-info/top_level.txt rename to lib/python3.5/site-packages/SQLAlchemy-1.0.12.dist-info/top_level.txt diff --git a/lib/python3.5/site-packages/sqlalchemy/__init__.py b/lib/python3.5/site-packages/sqlalchemy/__init__.py index 2815b1f..3008120 100644 --- a/lib/python3.5/site-packages/sqlalchemy/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/__init__.py @@ -1,5 +1,5 @@ # sqlalchemy/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -15,6 +15,7 @@ from .sql import ( case, cast, collate, + column, delete, desc, distinct, @@ -24,6 +25,7 @@ from .sql import ( extract, false, func, + funcfilter, insert, intersect, intersect_all, @@ -39,6 +41,7 @@ from .sql import ( over, select, subquery, + table, text, true, tuple_, @@ -117,7 +120,7 @@ from .schema import ( from .inspection import inspect from .engine import create_engine, engine_from_config -__version__ = '0.9.7' +__version__ = '1.0.12' def __go(lcls): diff --git a/lib/python3.5/site-packages/sqlalchemy/connectors/__init__.py b/lib/python3.5/site-packages/sqlalchemy/connectors/__init__.py index 9253a21..d72c390 100644 --- a/lib/python3.5/site-packages/sqlalchemy/connectors/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/connectors/__init__.py @@ -1,5 +1,5 @@ # connectors/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/connectors/mxodbc.py b/lib/python3.5/site-packages/sqlalchemy/connectors/mxodbc.py index 851dc11..9fc0ce6 100644 --- a/lib/python3.5/site-packages/sqlalchemy/connectors/mxodbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/connectors/mxodbc.py @@ -1,5 +1,5 @@ # connectors/mxodbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/connectors/mysqldb.py b/lib/python3.5/site-packages/sqlalchemy/connectors/mysqldb.py deleted file mode 100644 index f936825..0000000 --- a/lib/python3.5/site-packages/sqlalchemy/connectors/mysqldb.py +++ /dev/null @@ -1,145 +0,0 @@ -# connectors/mysqldb.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Define behaviors common to MySQLdb dialects. - -Currently includes MySQL and Drizzle. - -""" - -from . import Connector -from ..engine import base as engine_base, default -from ..sql import operators as sql_operators -from .. import exc, log, schema, sql, types as sqltypes, util, processors -import re - - -# the subclassing of Connector by all classes -# here is not strictly necessary - - -class MySQLDBExecutionContext(Connector): - - @property - def rowcount(self): - if hasattr(self, '_rowcount'): - return self._rowcount - else: - return self.cursor.rowcount - - -class MySQLDBCompiler(Connector): - def visit_mod_binary(self, binary, operator, **kw): - return self.process(binary.left, **kw) + " %% " + \ - self.process(binary.right, **kw) - - def post_process_text(self, text): - return text.replace('%', '%%') - - -class MySQLDBIdentifierPreparer(Connector): - - def _escape_identifier(self, value): - value = value.replace(self.escape_quote, self.escape_to_quote) - return value.replace("%", "%%") - - -class MySQLDBConnector(Connector): - driver = 'mysqldb' - supports_unicode_statements = False - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - - supports_native_decimal = True - - default_paramstyle = 'format' - - @classmethod - def dbapi(cls): - # is overridden when pymysql is used - return __import__('MySQLdb') - - - def do_executemany(self, cursor, statement, parameters, context=None): - rowcount = cursor.executemany(statement, parameters) - if context is not None: - context._rowcount = rowcount - - def create_connect_args(self, url): - opts = url.translate_connect_args(database='db', username='user', - password='passwd') - opts.update(url.query) - - util.coerce_kw_type(opts, 'compress', bool) - util.coerce_kw_type(opts, 'connect_timeout', int) - util.coerce_kw_type(opts, 'read_timeout', int) - util.coerce_kw_type(opts, 'client_flag', int) - util.coerce_kw_type(opts, 'local_infile', int) - # Note: using either of the below will cause all strings to be returned - # as Unicode, both in raw SQL operations and with column types like - # String and MSString. - util.coerce_kw_type(opts, 'use_unicode', bool) - util.coerce_kw_type(opts, 'charset', str) - - # Rich values 'cursorclass' and 'conv' are not supported via - # query string. - - ssl = {} - keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher'] - for key in keys: - if key in opts: - ssl[key[4:]] = opts[key] - util.coerce_kw_type(ssl, key[4:], str) - del opts[key] - if ssl: - opts['ssl'] = ssl - - # FOUND_ROWS must be set in CLIENT_FLAGS to enable - # supports_sane_rowcount. - client_flag = opts.get('client_flag', 0) - if self.dbapi is not None: - try: - CLIENT_FLAGS = __import__( - self.dbapi.__name__ + '.constants.CLIENT' - ).constants.CLIENT - client_flag |= CLIENT_FLAGS.FOUND_ROWS - except (AttributeError, ImportError): - self.supports_sane_rowcount = False - opts['client_flag'] = client_flag - return [[], opts] - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - for n in r.split(dbapi_con.get_server_info()): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) - - def _extract_error_code(self, exception): - return exception.args[0] - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - - try: - # note: the SQL here would be - # "SHOW VARIABLES LIKE 'character_set%%'" - cset_name = connection.connection.character_set_name - except AttributeError: - util.warn( - "No 'character_set_name' can be detected with " - "this MySQL-Python version; " - "please upgrade to a recent version of MySQL-Python. " - "Assuming latin1.") - return 'latin1' - else: - return cset_name() - diff --git a/lib/python3.5/site-packages/sqlalchemy/connectors/pyodbc.py b/lib/python3.5/site-packages/sqlalchemy/connectors/pyodbc.py index ef72c80..68bbcc4 100644 --- a/lib/python3.5/site-packages/sqlalchemy/connectors/pyodbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/connectors/pyodbc.py @@ -1,5 +1,5 @@ # connectors/pyodbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -26,7 +26,7 @@ class PyODBCConnector(Connector): supports_native_decimal = True default_paramstyle = 'named' - # for non-DSN connections, this should + # for non-DSN connections, this *may* be used to # hold the desired driver name pyodbc_driver_name = None @@ -75,10 +75,21 @@ class PyODBCConnector(Connector): if 'port' in keys and 'port' not in query: port = ',%d' % int(keys.pop('port')) - connectors = ["DRIVER={%s}" % - keys.pop('driver', self.pyodbc_driver_name), - 'Server=%s%s' % (keys.pop('host', ''), port), - 'Database=%s' % keys.pop('database', '')] + connectors = [] + driver = keys.pop('driver', self.pyodbc_driver_name) + if driver is None: + util.warn( + "No driver name specified; " + "this is expected by PyODBC when using " + "DSN-less connections") + else: + connectors.append("DRIVER={%s}" % driver) + + connectors.extend( + [ + 'Server=%s%s' % (keys.pop('host', ''), port), + 'Database=%s' % keys.pop('database', '') + ]) user = keys.pop("user", None) if user: diff --git a/lib/python3.5/site-packages/sqlalchemy/connectors/zxJDBC.py b/lib/python3.5/site-packages/sqlalchemy/connectors/zxJDBC.py index c0af742..e7b2dc9 100644 --- a/lib/python3.5/site-packages/sqlalchemy/connectors/zxJDBC.py +++ b/lib/python3.5/site-packages/sqlalchemy/connectors/zxJDBC.py @@ -1,5 +1,5 @@ # connectors/zxJDBC.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/cprocessors.cpython-35m-darwin.so b/lib/python3.5/site-packages/sqlalchemy/cprocessors.cpython-35m-darwin.so index a76466e..a371601 100755 Binary files a/lib/python3.5/site-packages/sqlalchemy/cprocessors.cpython-35m-darwin.so and b/lib/python3.5/site-packages/sqlalchemy/cprocessors.cpython-35m-darwin.so differ diff --git a/lib/python3.5/site-packages/sqlalchemy/cresultproxy.cpython-35m-darwin.so b/lib/python3.5/site-packages/sqlalchemy/cresultproxy.cpython-35m-darwin.so index 9a9e8e9..47c9020 100755 Binary files a/lib/python3.5/site-packages/sqlalchemy/cresultproxy.cpython-35m-darwin.so and b/lib/python3.5/site-packages/sqlalchemy/cresultproxy.cpython-35m-darwin.so differ diff --git a/lib/python3.5/site-packages/sqlalchemy/cutils.cpython-35m-darwin.so b/lib/python3.5/site-packages/sqlalchemy/cutils.cpython-35m-darwin.so index c262378..53436ef 100755 Binary files a/lib/python3.5/site-packages/sqlalchemy/cutils.cpython-35m-darwin.so and b/lib/python3.5/site-packages/sqlalchemy/cutils.cpython-35m-darwin.so differ diff --git a/lib/python3.5/site-packages/sqlalchemy/databases/__init__.py b/lib/python3.5/site-packages/sqlalchemy/databases/__init__.py index 19a7ad6..0bfc937 100644 --- a/lib/python3.5/site-packages/sqlalchemy/databases/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/databases/__init__.py @@ -1,5 +1,5 @@ # databases/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -13,7 +13,6 @@ from ..dialects.sqlite import base as sqlite from ..dialects.postgresql import base as postgresql postgres = postgresql from ..dialects.mysql import base as mysql -from ..dialects.drizzle import base as drizzle from ..dialects.oracle import base as oracle from ..dialects.firebird import base as firebird from ..dialects.mssql import base as mssql @@ -21,7 +20,6 @@ from ..dialects.sybase import base as sybase __all__ = ( - 'drizzle', 'firebird', 'mssql', 'mysql', diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/__init__.py b/lib/python3.5/site-packages/sqlalchemy/dialects/__init__.py index f847a61..5653f5b 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/__init__.py @@ -1,12 +1,11 @@ # dialects/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php __all__ = ( - 'drizzle', 'firebird', 'mssql', 'mysql', diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/drizzle/__init__.py b/lib/python3.5/site-packages/sqlalchemy/dialects/drizzle/__init__.py deleted file mode 100644 index 1392b8e..0000000 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/drizzle/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from sqlalchemy.dialects.drizzle import base, mysqldb - -base.dialect = mysqldb.dialect - -from sqlalchemy.dialects.drizzle.base import \ - BIGINT, BINARY, BLOB, \ - BOOLEAN, CHAR, DATE, \ - DATETIME, DECIMAL, DOUBLE, \ - ENUM, FLOAT, INTEGER, \ - NUMERIC, REAL, TEXT, \ - TIME, TIMESTAMP, VARBINARY, \ - VARCHAR, dialect - -__all__ = ( - 'BIGINT', 'BINARY', 'BLOB', - 'BOOLEAN', 'CHAR', 'DATE', - 'DATETIME', 'DECIMAL', 'DOUBLE', - 'ENUM', 'FLOAT', 'INTEGER', - 'NUMERIC', 'REAL', 'TEXT', - 'TIME', 'TIMESTAMP', 'VARBINARY', - 'VARCHAR', 'dialect' -) diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/drizzle/base.py b/lib/python3.5/site-packages/sqlalchemy/dialects/drizzle/base.py deleted file mode 100644 index ea4e9bf..0000000 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/drizzle/base.py +++ /dev/null @@ -1,499 +0,0 @@ -# drizzle/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors -# -# Copyright (C) 2010-2011 Monty Taylor -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -""" - -.. dialect:: drizzle - :name: Drizzle - -Drizzle is a variant of MySQL. Unlike MySQL, Drizzle's default storage engine -is InnoDB (transactions, foreign-keys) rather than MyISAM. For more -`Notable Differences `_, visit -the `Drizzle Documentation `_. - -The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of -the :doc:`SQLAlchemy MySQL ` documentation is also relevant. - - -""" - -from sqlalchemy import exc -from sqlalchemy import log -from sqlalchemy import types as sqltypes -from sqlalchemy.engine import reflection -from sqlalchemy.dialects.mysql import base as mysql_dialect -from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \ - BLOB, BINARY, VARBINARY - - -class _NumericType(object): - """Base for Drizzle numeric types.""" - - def __init__(self, **kw): - super(_NumericType, self).__init__(**kw) - - -class _FloatType(_NumericType, sqltypes.Float): - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - if isinstance(self, (REAL, DOUBLE)) and \ - ( - (precision is None and scale is not None) or - (precision is not None and scale is None) - ): - raise exc.ArgumentError( - "You must specify both precision and scale or omit " - "both altogether.") - - super(_FloatType, self).__init__(precision=precision, - asdecimal=asdecimal, **kw) - self.scale = scale - - -class _StringType(mysql_dialect._StringType): - """Base for Drizzle string types.""" - - def __init__(self, collation=None, binary=False, **kw): - kw['national'] = False - super(_StringType, self).__init__(collation=collation, binary=binary, - **kw) - - -class NUMERIC(_NumericType, sqltypes.NUMERIC): - """Drizzle NUMERIC type.""" - - __visit_name__ = 'NUMERIC' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a NUMERIC. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - - super(NUMERIC, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - -class DECIMAL(_NumericType, sqltypes.DECIMAL): - """Drizzle DECIMAL type.""" - - __visit_name__ = 'DECIMAL' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a DECIMAL. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - super(DECIMAL, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - -class DOUBLE(_FloatType): - """Drizzle DOUBLE type.""" - - __visit_name__ = 'DOUBLE' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a DOUBLE. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - - super(DOUBLE, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - -class REAL(_FloatType, sqltypes.REAL): - """Drizzle REAL type.""" - - __visit_name__ = 'REAL' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a REAL. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - - super(REAL, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - -class FLOAT(_FloatType, sqltypes.FLOAT): - """Drizzle FLOAT type.""" - - __visit_name__ = 'FLOAT' - - def __init__(self, precision=None, scale=None, asdecimal=False, **kw): - """Construct a FLOAT. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - - super(FLOAT, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - def bind_processor(self, dialect): - return None - - -class INTEGER(sqltypes.INTEGER): - """Drizzle INTEGER type.""" - - __visit_name__ = 'INTEGER' - - def __init__(self, **kw): - """Construct an INTEGER.""" - - super(INTEGER, self).__init__(**kw) - - -class BIGINT(sqltypes.BIGINT): - """Drizzle BIGINTEGER type.""" - - __visit_name__ = 'BIGINT' - - def __init__(self, **kw): - """Construct a BIGINTEGER.""" - - super(BIGINT, self).__init__(**kw) - - -class TIME(mysql_dialect.TIME): - """Drizzle TIME type.""" - - -class TIMESTAMP(sqltypes.TIMESTAMP): - """Drizzle TIMESTAMP type.""" - - __visit_name__ = 'TIMESTAMP' - - -class TEXT(_StringType, sqltypes.TEXT): - """Drizzle TEXT type, for text up to 2^16 characters.""" - - __visit_name__ = 'TEXT' - - def __init__(self, length=None, **kw): - """Construct a TEXT. - - :param length: Optional, if provided the server may optimize storage - by substituting the smallest TEXT type sufficient to store - ``length`` characters. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - - super(TEXT, self).__init__(length=length, **kw) - - -class VARCHAR(_StringType, sqltypes.VARCHAR): - """Drizzle VARCHAR type, for variable-length character data.""" - - __visit_name__ = 'VARCHAR' - - def __init__(self, length=None, **kwargs): - """Construct a VARCHAR. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - - super(VARCHAR, self).__init__(length=length, **kwargs) - - -class CHAR(_StringType, sqltypes.CHAR): - """Drizzle CHAR type, for fixed-length character data.""" - - __visit_name__ = 'CHAR' - - def __init__(self, length=None, **kwargs): - """Construct a CHAR. - - :param length: Maximum data length, in characters. - - :param binary: Optional, use the default binary collation for the - national character set. This does not affect the type of data - stored, use a BINARY type for binary data. - - :param collation: Optional, request a particular collation. Must be - compatible with the national character set. - - """ - - super(CHAR, self).__init__(length=length, **kwargs) - - -class ENUM(mysql_dialect.ENUM): - """Drizzle ENUM type.""" - - def __init__(self, *enums, **kw): - """Construct an ENUM. - - Example: - - Column('myenum', ENUM("foo", "bar", "baz")) - - :param enums: The range of valid values for this ENUM. Values will be - quoted when generating the schema according to the quoting flag (see - below). - - :param strict: Defaults to False: ensure that a given value is in this - ENUM's range of permissible values when inserting or updating rows. - Note that Drizzle will not raise a fatal error if you attempt to - store an out of range value- an alternate value will be stored - instead. - (See Drizzle ENUM documentation.) - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - :param quoting: Defaults to 'auto': automatically determine enum value - quoting. If all enum values are surrounded by the same quoting - character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. - - 'quoted': values in enums are already quoted, they will be used - directly when generating the schema - this usage is deprecated. - - 'unquoted': values in enums are not quoted, they will be escaped and - surrounded by single quotes when generating the schema. - - Previous versions of this type always required manually quoted - values to be supplied; future versions will always quote the string - literals for you. This is a transitional option. - - """ - - super(ENUM, self).__init__(*enums, **kw) - - -class _DrizzleBoolean(sqltypes.Boolean): - def get_dbapi_type(self, dbapi): - return dbapi.NUMERIC - - -colspecs = { - sqltypes.Numeric: NUMERIC, - sqltypes.Float: FLOAT, - sqltypes.Time: TIME, - sqltypes.Enum: ENUM, - sqltypes.Boolean: _DrizzleBoolean, -} - - -# All the types we have in Drizzle -ischema_names = { - 'BIGINT': BIGINT, - 'BINARY': BINARY, - 'BLOB': BLOB, - 'BOOLEAN': BOOLEAN, - 'CHAR': CHAR, - 'DATE': DATE, - 'DATETIME': DATETIME, - 'DECIMAL': DECIMAL, - 'DOUBLE': DOUBLE, - 'ENUM': ENUM, - 'FLOAT': FLOAT, - 'INT': INTEGER, - 'INTEGER': INTEGER, - 'NUMERIC': NUMERIC, - 'TEXT': TEXT, - 'TIME': TIME, - 'TIMESTAMP': TIMESTAMP, - 'VARBINARY': VARBINARY, - 'VARCHAR': VARCHAR, -} - - -class DrizzleCompiler(mysql_dialect.MySQLCompiler): - - def visit_typeclause(self, typeclause): - type_ = typeclause.type.dialect_impl(self.dialect) - if isinstance(type_, sqltypes.Integer): - return 'INTEGER' - else: - return super(DrizzleCompiler, self).visit_typeclause(typeclause) - - def visit_cast(self, cast, **kwargs): - type_ = self.process(cast.typeclause) - if type_ is None: - return self.process(cast.clause) - - return 'CAST(%s AS %s)' % (self.process(cast.clause), type_) - - -class DrizzleDDLCompiler(mysql_dialect.MySQLDDLCompiler): - pass - - -class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler): - def _extend_numeric(self, type_, spec): - return spec - - def _extend_string(self, type_, defaults, spec): - """Extend a string-type declaration with standard SQL - COLLATE annotations and Drizzle specific extensions. - - """ - - def attr(name): - return getattr(type_, name, defaults.get(name)) - - if attr('collation'): - collation = 'COLLATE %s' % type_.collation - elif attr('binary'): - collation = 'BINARY' - else: - collation = None - - return ' '.join([c for c in (spec, collation) - if c is not None]) - - def visit_NCHAR(self, type): - raise NotImplementedError("Drizzle does not support NCHAR") - - def visit_NVARCHAR(self, type): - raise NotImplementedError("Drizzle does not support NVARCHAR") - - def visit_FLOAT(self, type_): - if type_.scale is not None and type_.precision is not None: - return "FLOAT(%s, %s)" % (type_.precision, type_.scale) - else: - return "FLOAT" - - def visit_BOOLEAN(self, type_): - return "BOOLEAN" - - def visit_BLOB(self, type_): - return "BLOB" - - -class DrizzleExecutionContext(mysql_dialect.MySQLExecutionContext): - pass - - -class DrizzleIdentifierPreparer(mysql_dialect.MySQLIdentifierPreparer): - pass - - -@log.class_logger -class DrizzleDialect(mysql_dialect.MySQLDialect): - """Details of the Drizzle dialect. - - Not used directly in application code. - """ - - name = 'drizzle' - - _supports_cast = True - supports_sequences = False - supports_native_boolean = True - supports_views = False - - default_paramstyle = 'format' - colspecs = colspecs - - statement_compiler = DrizzleCompiler - ddl_compiler = DrizzleDDLCompiler - type_compiler = DrizzleTypeCompiler - ischema_names = ischema_names - preparer = DrizzleIdentifierPreparer - - def on_connect(self): - """Force autocommit - Drizzle Bug#707842 doesn't set this properly""" - - def connect(conn): - conn.autocommit(False) - return connect - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - """Return a Unicode SHOW TABLES from a given schema.""" - - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - - charset = 'utf8' - rp = connection.execute("SHOW TABLES FROM %s" % - self.identifier_preparer.quote_identifier(current_schema)) - return [row[0] for row in self._compat_fetchall(rp, charset=charset)] - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - raise NotImplementedError - - def _detect_casing(self, connection): - """Sniff out identifier case sensitivity. - - Cached per-connection. This value can not change without a server - restart. - """ - - return 0 - - def _detect_collations(self, connection): - """Pull the active COLLATIONS list from the server. - - Cached per-connection. - """ - - collations = {} - charset = self._connection_charset - rs = connection.execute( - 'SELECT CHARACTER_SET_NAME, COLLATION_NAME FROM' - ' data_dictionary.COLLATIONS') - for row in self._compat_fetchall(rs, charset): - collations[row[0]] = row[1] - return collations - - def _detect_ansiquotes(self, connection): - """Detect and adjust for the ANSI_QUOTES sql mode.""" - - self._server_ansiquotes = False - self._backslash_escapes = False - - diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/drizzle/mysqldb.py b/lib/python3.5/site-packages/sqlalchemy/dialects/drizzle/mysqldb.py deleted file mode 100644 index 7d91cc3..0000000 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/drizzle/mysqldb.py +++ /dev/null @@ -1,48 +0,0 @@ -""" -.. dialect:: drizzle+mysqldb - :name: MySQL-Python - :dbapi: mysqldb - :connectstring: drizzle+mysqldb://:@[:]/ - :url: http://sourceforge.net/projects/mysql-python - - -""" - -from sqlalchemy.dialects.drizzle.base import ( - DrizzleDialect, - DrizzleExecutionContext, - DrizzleCompiler, - DrizzleIdentifierPreparer) -from sqlalchemy.connectors.mysqldb import ( - MySQLDBExecutionContext, - MySQLDBCompiler, - MySQLDBIdentifierPreparer, - MySQLDBConnector) - - -class DrizzleExecutionContext_mysqldb(MySQLDBExecutionContext, - DrizzleExecutionContext): - pass - - -class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler): - pass - - -class DrizzleIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer, - DrizzleIdentifierPreparer): - pass - - -class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect): - execution_ctx_cls = DrizzleExecutionContext_mysqldb - statement_compiler = DrizzleCompiler_mysqldb - preparer = DrizzleIdentifierPreparer_mysqldb - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - - return 'utf8' - - -dialect = DrizzleDialect_mysqldb diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/__init__.py b/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/__init__.py index 9e8a882..f27bdc0 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/__init__.py @@ -1,5 +1,5 @@ # firebird/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/base.py b/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/base.py index bcdb6d9..4dbf382 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/base.py @@ -1,5 +1,5 @@ # firebird/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -78,7 +78,6 @@ from sqlalchemy.sql import expression from sqlalchemy.engine import base, default, reflection from sqlalchemy.sql import compiler - from sqlalchemy.types import (BIGINT, BLOB, DATE, FLOAT, INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, Integer) @@ -181,16 +180,16 @@ ischema_names = { # _FBDate, etc. as bind/result functionality is required) class FBTypeCompiler(compiler.GenericTypeCompiler): - def visit_boolean(self, type_): - return self.visit_SMALLINT(type_) + def visit_boolean(self, type_, **kw): + return self.visit_SMALLINT(type_, **kw) - def visit_datetime(self, type_): - return self.visit_TIMESTAMP(type_) + def visit_datetime(self, type_, **kw): + return self.visit_TIMESTAMP(type_, **kw) - def visit_TEXT(self, type_): + def visit_TEXT(self, type_, **kw): return "BLOB SUB_TYPE 1" - def visit_BLOB(self, type_): + def visit_BLOB(self, type_, **kw): return "BLOB SUB_TYPE 0" def _extend_string(self, type_, basic): @@ -200,16 +199,16 @@ class FBTypeCompiler(compiler.GenericTypeCompiler): else: return '%s CHARACTER SET %s' % (basic, charset) - def visit_CHAR(self, type_): - basic = super(FBTypeCompiler, self).visit_CHAR(type_) + def visit_CHAR(self, type_, **kw): + basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw) return self._extend_string(type_, basic) - def visit_VARCHAR(self, type_): + def visit_VARCHAR(self, type_, **kw): if not type_.length: raise exc.CompileError( "VARCHAR requires a length on dialect %s" % self.dialect.name) - basic = super(FBTypeCompiler, self).visit_VARCHAR(type_) + basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw) return self._extend_string(type_, basic) @@ -294,22 +293,22 @@ class FBCompiler(sql.compiler.SQLCompiler): def visit_sequence(self, seq): return "gen_id(%s, 1)" % self.preparer.format_sequence(seq) - def get_select_precolumns(self, select): + def get_select_precolumns(self, select, **kw): """Called when building a ``SELECT`` statement, position is just before column list Firebird puts the limit and offset right after the ``SELECT``... """ result = "" - if select._limit: - result += "FIRST %s " % self.process(sql.literal(select._limit)) - if select._offset: - result += "SKIP %s " % self.process(sql.literal(select._offset)) + if select._limit_clause is not None: + result += "FIRST %s " % self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + result += "SKIP %s " % self.process(select._offset_clause, **kw) if select._distinct: result += "DISTINCT " return result - def limit_clause(self, select): + def limit_clause(self, select, **kw): """Already taken care of in the `get_select_precolumns` method.""" return "" diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/fdb.py b/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/fdb.py index ddffc80..aff8cff 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/fdb.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/fdb.py @@ -1,5 +1,5 @@ # firebird/fdb.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/kinterbasdb.py index 6bd7887..3df9f73 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/kinterbasdb.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/firebird/kinterbasdb.py @@ -1,5 +1,5 @@ # firebird/kinterbasdb.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/__init__.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/__init__.py index d004776..8c9e858 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/__init__.py @@ -1,5 +1,5 @@ # mssql/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/adodbapi.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/adodbapi.py index e9927f8..60fa25d 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/adodbapi.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/adodbapi.py @@ -1,5 +1,5 @@ # mssql/adodbapi.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/base.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/base.py index 3d10904..36a8a93 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/base.py @@ -1,5 +1,5 @@ # mssql/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -166,6 +166,55 @@ how SQLAlchemy handles this: This is an auxilliary use case suitable for testing and bulk insert scenarios. +.. _legacy_schema_rendering: + +Rendering of SQL statements that include schema qualifiers +--------------------------------------------------------- + +When using :class:`.Table` metadata that includes a "schema" qualifier, +such as:: + + account_table = Table( + 'account', metadata, + Column('id', Integer, primary_key=True), + Column('info', String(100)), + schema="customer_schema" + ) + +The SQL Server dialect has a long-standing behavior that it will attempt +to turn a schema-qualified table name into an alias, such as:: + + >>> eng = create_engine("mssql+pymssql://mydsn") + >>> print(account_table.select().compile(eng)) + SELECT account_1.id, account_1.info + FROM customer_schema.account AS account_1 + +This behavior is legacy, does not function correctly for many forms +of SQL statements, and will be disabled by default in the 1.1 series +of SQLAlchemy. As of 1.0.5, the above statement will produce the following +warning:: + + SAWarning: legacy_schema_aliasing flag is defaulted to True; + some schema-qualified queries may not function correctly. + Consider setting this flag to False for modern SQL Server versions; + this flag will default to False in version 1.1 + +This warning encourages the :class:`.Engine` to be created as follows:: + + >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=False) + +Where the above SELECT statement will produce:: + + >>> print(account_table.select().compile(eng)) + SELECT customer_schema.account.id, customer_schema.account.info + FROM customer_schema.account + +The warning will not emit if the ``legacy_schema_aliasing`` flag is set +to either True or False. + +.. versionadded:: 1.0.5 - Added the ``legacy_schema_aliasing`` flag to disable + the SQL Server dialect's legacy behavior with schema-qualified table + names. This flag will default to False in version 1.1. Collation Support ----------------- @@ -187,7 +236,7 @@ CREATE TABLE statement for this column will yield:: LIMIT/OFFSET Support -------------------- -MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is +MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is supported directly through the ``TOP`` Transact SQL keyword:: select.limit @@ -226,6 +275,53 @@ The DATE and TIME types are not available for MSSQL 2005 and previous - if a server version below 2008 is detected, DDL for these types will be issued as DATETIME. +.. _mssql_large_type_deprecation: + +Large Text/Binary Type Deprecation +---------------------------------- + +Per `SQL Server 2012/2014 Documentation `_, +the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server +in a future release. SQLAlchemy normally relates these types to the +:class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes. + +In order to accommodate this change, a new flag ``deprecate_large_types`` +is added to the dialect, which will be automatically set based on detection +of the server version in use, if not otherwise set by the user. The +behavior of this flag is as follows: + +* When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and + :class:`.LargeBinary` datatypes, when used to render DDL, will render the + types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``, + respectively. This is a new behavior as of the addition of this flag. + +* When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and + :class:`.LargeBinary` datatypes, when used to render DDL, will render the + types ``NTEXT``, ``TEXT``, and ``IMAGE``, + respectively. This is the long-standing behavior of these types. + +* The flag begins with the value ``None``, before a database connection is + established. If the dialect is used to render DDL without the flag being + set, it is interpreted the same as ``False``. + +* On first connection, the dialect detects if SQL Server version 2012 or greater + is in use; if the flag is still at ``None``, it sets it to ``True`` or + ``False`` based on whether 2012 or greater is detected. + +* The flag can be set to either ``True`` or ``False`` when the dialect + is created, typically via :func:`.create_engine`:: + + eng = create_engine("mssql+pymssql://user:pass@host/db", + deprecate_large_types=True) + +* Complete control over whether the "old" or "new" types are rendered is + available in all SQLAlchemy versions by using the UPPERCASE type objects + instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`, + :class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain + fixed and always output exactly that type. + +.. versionadded:: 1.0.0 + .. _mssql_indexes: Clustered Index Support @@ -367,19 +463,20 @@ import operator import re from ... import sql, schema as sa_schema, exc, util -from ...sql import compiler, expression, \ - util as sql_util, cast +from ...sql import compiler, expression, util as sql_util from ... import engine from ...engine import reflection, default from ... import types as sqltypes from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \ FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\ - VARBINARY, TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR + TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR from ...util import update_wrapper from . import information_schema as ischema +# http://sqlserverbuilds.blogspot.com/ +MS_2012_VERSION = (11,) MS_2008_VERSION = (10,) MS_2005_VERSION = (9,) MS_2000_VERSION = (8,) @@ -451,9 +548,13 @@ class _MSDate(sqltypes.Date): if isinstance(value, datetime.datetime): return value.date() elif isinstance(value, util.string_types): + m = self._reg.match(value) + if not m: + raise ValueError( + "could not parse %r as a date value" % (value, )) return datetime.date(*[ int(x or 0) - for x in self._reg.match(value).groups() + for x in m.groups() ]) else: return value @@ -485,9 +586,13 @@ class TIME(sqltypes.TIME): if isinstance(value, datetime.datetime): return value.time() elif isinstance(value, util.string_types): + m = self._reg.match(value) + if not m: + raise ValueError( + "could not parse %r as a time value" % (value, )) return datetime.time(*[ int(x or 0) - for x in self._reg.match(value).groups()]) + for x in m.groups()]) else: return value return process @@ -545,6 +650,26 @@ class NTEXT(sqltypes.UnicodeText): __visit_name__ = 'NTEXT' +class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary): + """The MSSQL VARBINARY type. + + This type extends both :class:`.types.VARBINARY` and + :class:`.types.LargeBinary`. In "deprecate_large_types" mode, + the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)`` + on SQL Server. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :ref:`mssql_large_type_deprecation` + + + + """ + __visit_name__ = 'VARBINARY' + + class IMAGE(sqltypes.LargeBinary): __visit_name__ = 'IMAGE' @@ -626,7 +751,6 @@ ischema_names = { class MSTypeCompiler(compiler.GenericTypeCompiler): - def _extend(self, spec, type_, length=None): """Extend a string-type declaration with standard SQL COLLATE annotations. @@ -647,103 +771,115 @@ class MSTypeCompiler(compiler.GenericTypeCompiler): return ' '.join([c for c in (spec, collation) if c is not None]) - def visit_FLOAT(self, type_): + def visit_FLOAT(self, type_, **kw): precision = getattr(type_, 'precision', None) if precision is None: return "FLOAT" else: return "FLOAT(%(precision)s)" % {'precision': precision} - def visit_TINYINT(self, type_): + def visit_TINYINT(self, type_, **kw): return "TINYINT" - def visit_DATETIMEOFFSET(self, type_): - if type_.precision: + def visit_DATETIMEOFFSET(self, type_, **kw): + if type_.precision is not None: return "DATETIMEOFFSET(%s)" % type_.precision else: return "DATETIMEOFFSET" - def visit_TIME(self, type_): + def visit_TIME(self, type_, **kw): precision = getattr(type_, 'precision', None) - if precision: + if precision is not None: return "TIME(%s)" % precision else: return "TIME" - def visit_DATETIME2(self, type_): + def visit_DATETIME2(self, type_, **kw): precision = getattr(type_, 'precision', None) - if precision: + if precision is not None: return "DATETIME2(%s)" % precision else: return "DATETIME2" - def visit_SMALLDATETIME(self, type_): + def visit_SMALLDATETIME(self, type_, **kw): return "SMALLDATETIME" - def visit_unicode(self, type_): - return self.visit_NVARCHAR(type_) + def visit_unicode(self, type_, **kw): + return self.visit_NVARCHAR(type_, **kw) - def visit_unicode_text(self, type_): - return self.visit_NTEXT(type_) + def visit_text(self, type_, **kw): + if self.dialect.deprecate_large_types: + return self.visit_VARCHAR(type_, **kw) + else: + return self.visit_TEXT(type_, **kw) - def visit_NTEXT(self, type_): + def visit_unicode_text(self, type_, **kw): + if self.dialect.deprecate_large_types: + return self.visit_NVARCHAR(type_, **kw) + else: + return self.visit_NTEXT(type_, **kw) + + def visit_NTEXT(self, type_, **kw): return self._extend("NTEXT", type_) - def visit_TEXT(self, type_): + def visit_TEXT(self, type_, **kw): return self._extend("TEXT", type_) - def visit_VARCHAR(self, type_): + def visit_VARCHAR(self, type_, **kw): return self._extend("VARCHAR", type_, length=type_.length or 'max') - def visit_CHAR(self, type_): + def visit_CHAR(self, type_, **kw): return self._extend("CHAR", type_) - def visit_NCHAR(self, type_): + def visit_NCHAR(self, type_, **kw): return self._extend("NCHAR", type_) - def visit_NVARCHAR(self, type_): + def visit_NVARCHAR(self, type_, **kw): return self._extend("NVARCHAR", type_, length=type_.length or 'max') - def visit_date(self, type_): + def visit_date(self, type_, **kw): if self.dialect.server_version_info < MS_2008_VERSION: - return self.visit_DATETIME(type_) + return self.visit_DATETIME(type_, **kw) else: - return self.visit_DATE(type_) + return self.visit_DATE(type_, **kw) - def visit_time(self, type_): + def visit_time(self, type_, **kw): if self.dialect.server_version_info < MS_2008_VERSION: - return self.visit_DATETIME(type_) + return self.visit_DATETIME(type_, **kw) else: - return self.visit_TIME(type_) + return self.visit_TIME(type_, **kw) - def visit_large_binary(self, type_): - return self.visit_IMAGE(type_) + def visit_large_binary(self, type_, **kw): + if self.dialect.deprecate_large_types: + return self.visit_VARBINARY(type_, **kw) + else: + return self.visit_IMAGE(type_, **kw) - def visit_IMAGE(self, type_): + def visit_IMAGE(self, type_, **kw): return "IMAGE" - def visit_VARBINARY(self, type_): + def visit_VARBINARY(self, type_, **kw): return self._extend( "VARBINARY", type_, length=type_.length or 'max') - def visit_boolean(self, type_): + def visit_boolean(self, type_, **kw): return self.visit_BIT(type_) - def visit_BIT(self, type_): + def visit_BIT(self, type_, **kw): return "BIT" - def visit_MONEY(self, type_): + def visit_MONEY(self, type_, **kw): return "MONEY" - def visit_SMALLMONEY(self, type_): + def visit_SMALLMONEY(self, type_, **kw): return 'SMALLMONEY' - def visit_UNIQUEIDENTIFIER(self, type_): + def visit_UNIQUEIDENTIFIER(self, type_, **kw): return "UNIQUEIDENTIFIER" - def visit_SQL_VARIANT(self, type_): + def visit_SQL_VARIANT(self, type_, **kw): return 'SQL_VARIANT' @@ -846,7 +982,7 @@ class MSExecutionContext(default.DefaultExecutionContext): "SET IDENTITY_INSERT %s OFF" % self.dialect.identifier_preparer. format_table( self.compiled.statement.table))) - except: + except Exception: pass def get_result_proxy(self): @@ -872,6 +1008,15 @@ class MSSQLCompiler(compiler.SQLCompiler): self.tablealiases = {} super(MSSQLCompiler, self).__init__(*args, **kwargs) + def _with_legacy_schema_aliasing(fn): + def decorate(self, *arg, **kw): + if self.dialect.legacy_schema_aliasing: + return fn(self, *arg, **kw) + else: + super_ = getattr(super(MSSQLCompiler, self), fn.__name__) + return super_(*arg, **kw) + return decorate + def visit_now_func(self, fn, **kw): return "CURRENT_TIMESTAMP" @@ -900,19 +1045,24 @@ class MSSQLCompiler(compiler.SQLCompiler): self.process(binary.left, **kw), self.process(binary.right, **kw)) - def get_select_precolumns(self, select): - """ MS-SQL puts TOP, its version of LIMIT, here """ - if select._distinct or select._limit is not None: - s = select._distinct and "DISTINCT " or "" + def get_select_precolumns(self, select, **kw): + """ MS-SQL puts TOP, it's version of LIMIT here """ + s = "" + if select._distinct: + s += "DISTINCT " + + if select._simple_int_limit and not select._offset: # ODBC drivers and possibly others # don't support bind params in the SELECT clause on SQL Server. # so have to use literal here. - if select._limit is not None: - if not select._offset: - s += "TOP %d " % select._limit + s += "TOP %d " % select._limit + + if s: return s - return compiler.SQLCompiler.get_select_precolumns(self, select) + else: + return compiler.SQLCompiler.get_select_precolumns( + self, select, **kw) def get_from_hint_text(self, table, text): return text @@ -920,7 +1070,7 @@ class MSSQLCompiler(compiler.SQLCompiler): def get_crud_hint_text(self, table, text): return text - def limit_clause(self, select): + def limit_clause(self, select, **kw): # Limit in mssql is after the select keyword return "" @@ -929,39 +1079,48 @@ class MSSQLCompiler(compiler.SQLCompiler): so tries to wrap it in a subquery with ``row_number()`` criterion. """ - if select._offset and not getattr(select, '_mssql_visit', None): + if ( + ( + not select._simple_int_limit and + select._limit_clause is not None + ) or ( + select._offset_clause is not None and + not select._simple_int_offset or select._offset + ) + ) and not getattr(select, '_mssql_visit', None): + # to use ROW_NUMBER(), an ORDER BY is required. if not select._order_by_clause.clauses: raise exc.CompileError('MSSQL requires an order_by when ' - 'using an offset.') - _offset = select._offset - _limit = select._limit + 'using an OFFSET or a non-simple ' + 'LIMIT clause') + _order_by_clauses = select._order_by_clause.clauses + limit_clause = select._limit_clause + offset_clause = select._offset_clause + kwargs['select_wraps_for'] = select select = select._generate() select._mssql_visit = True select = select.column( sql.func.ROW_NUMBER().over(order_by=_order_by_clauses) - .label("mssql_rn") - ).order_by(None).alias() + .label("mssql_rn")).order_by(None).alias() mssql_rn = sql.column('mssql_rn') limitselect = sql.select([c for c in select.c if c.key != 'mssql_rn']) - limitselect.append_whereclause(mssql_rn > _offset) - if _limit is not None: - limitselect.append_whereclause(mssql_rn <= (_limit + _offset)) - return self.process(limitselect, iswrapper=True, **kwargs) + if offset_clause is not None: + limitselect.append_whereclause(mssql_rn > offset_clause) + if limit_clause is not None: + limitselect.append_whereclause( + mssql_rn <= (limit_clause + offset_clause)) + else: + limitselect.append_whereclause( + mssql_rn <= (limit_clause)) + return self.process(limitselect, **kwargs) else: return compiler.SQLCompiler.visit_select(self, select, **kwargs) - def _schema_aliased_table(self, table): - if getattr(table, 'schema', None) is not None: - if table not in self.tablealiases: - self.tablealiases[table] = table.alias() - return self.tablealiases[table] - else: - return None - + @_with_legacy_schema_aliasing def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs): if mssql_aliased is table or iscrud: return super(MSSQLCompiler, self).visit_table(table, **kwargs) @@ -973,25 +1132,14 @@ class MSSQLCompiler(compiler.SQLCompiler): else: return super(MSSQLCompiler, self).visit_table(table, **kwargs) - def visit_alias(self, alias, **kwargs): + @_with_legacy_schema_aliasing + def visit_alias(self, alias, **kw): # translate for schema-qualified table aliases - kwargs['mssql_aliased'] = alias.original - return super(MSSQLCompiler, self).visit_alias(alias, **kwargs) + kw['mssql_aliased'] = alias.original + return super(MSSQLCompiler, self).visit_alias(alias, **kw) - def visit_extract(self, extract, **kw): - field = self.extract_map.get(extract.field, extract.field) - return 'DATEPART("%s", %s)' % \ - (field, self.process(extract.expr, **kw)) - - def visit_savepoint(self, savepoint_stmt): - return "SAVE TRANSACTION %s" % \ - self.preparer.format_savepoint(savepoint_stmt) - - def visit_rollback_to_savepoint(self, savepoint_stmt): - return ("ROLLBACK TRANSACTION %s" - % self.preparer.format_savepoint(savepoint_stmt)) - - def visit_column(self, column, add_to_result_map=None, **kwargs): + @_with_legacy_schema_aliasing + def visit_column(self, column, add_to_result_map=None, **kw): if column.table is not None and \ (not self.isupdate and not self.isdelete) or \ self.is_subquery(): @@ -1009,10 +1157,40 @@ class MSSQLCompiler(compiler.SQLCompiler): ) return super(MSSQLCompiler, self).\ - visit_column(converted, **kwargs) + visit_column(converted, **kw) return super(MSSQLCompiler, self).visit_column( - column, add_to_result_map=add_to_result_map, **kwargs) + column, add_to_result_map=add_to_result_map, **kw) + + def _schema_aliased_table(self, table): + if getattr(table, 'schema', None) is not None: + if self.dialect._warn_schema_aliasing and \ + table.schema.lower() != 'information_schema': + util.warn( + "legacy_schema_aliasing flag is defaulted to True; " + "some schema-qualified queries may not function " + "correctly. Consider setting this flag to False for " + "modern SQL Server versions; this flag will default to " + "False in version 1.1") + + if table not in self.tablealiases: + self.tablealiases[table] = table.alias() + return self.tablealiases[table] + else: + return None + + def visit_extract(self, extract, **kw): + field = self.extract_map.get(extract.field, extract.field) + return 'DATEPART(%s, %s)' % \ + (field, self.process(extract.expr, **kw)) + + def visit_savepoint(self, savepoint_stmt): + return "SAVE TRANSACTION %s" % \ + self.preparer.format_savepoint(savepoint_stmt) + + def visit_rollback_to_savepoint(self, savepoint_stmt): + return ("ROLLBACK TRANSACTION %s" + % self.preparer.format_savepoint(savepoint_stmt)) def visit_binary(self, binary, **kwargs): """Move bind parameters to the right-hand side of an operator, where @@ -1141,8 +1319,11 @@ class MSSQLStrictCompiler(MSSQLCompiler): class MSDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): - colspec = (self.preparer.format_column(column) + " " - + self.dialect.type_compiler.process(column.type)) + colspec = ( + self.preparer.format_column(column) + " " + + self.dialect.type_compiler.process( + column.type, type_expression=column) + ) if column.nullable is not None: if not column.nullable or column.primary_key or \ @@ -1321,6 +1502,10 @@ class MSDialect(default.DefaultDialect): sqltypes.Time: TIME, } + engine_config_types = default.DefaultDialect.engine_config_types.union([ + ('legacy_schema_aliasing', util.asbool), + ]) + ischema_names = ischema_names supports_native_boolean = False @@ -1351,13 +1536,24 @@ class MSDialect(default.DefaultDialect): query_timeout=None, use_scope_identity=True, max_identifier_length=None, - schema_name="dbo", **opts): + schema_name="dbo", + deprecate_large_types=None, + legacy_schema_aliasing=None, **opts): self.query_timeout = int(query_timeout or 0) self.schema_name = schema_name self.use_scope_identity = use_scope_identity self.max_identifier_length = int(max_identifier_length or 0) or \ self.max_identifier_length + self.deprecate_large_types = deprecate_large_types + + if legacy_schema_aliasing is None: + self.legacy_schema_aliasing = True + self._warn_schema_aliasing = True + else: + self.legacy_schema_aliasing = legacy_schema_aliasing + self._warn_schema_aliasing = False + super(MSDialect, self).__init__(**opts) def do_savepoint(self, connection, name): @@ -1371,21 +1567,31 @@ class MSDialect(default.DefaultDialect): def initialize(self, connection): super(MSDialect, self).initialize(connection) + self._setup_version_attributes() + + def _setup_version_attributes(self): if self.server_version_info[0] not in list(range(8, 17)): # FreeTDS with version 4.2 seems to report here # a number like "95.10.255". Don't know what # that is. So emit warning. + # Use TDS Version 7.0 through 7.3, per the MS information here: + # https://msdn.microsoft.com/en-us/library/dd339982.aspx + # and FreeTDS information here (7.3 highest supported version): + # http://www.freetds.org/userguide/choosingtdsprotocol.htm util.warn( "Unrecognized server version info '%s'. Version specific " "behaviors may not function properly. If using ODBC " - "with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, " - "is configured in the FreeTDS configuration." % + "with FreeTDS, ensure TDS_VERSION 7.0 through 7.3, not " + "4.2, is configured in the FreeTDS configuration." % ".".join(str(x) for x in self.server_version_info)) if self.server_version_info >= MS_2005_VERSION and \ 'implicit_returning' not in self.__dict__: self.implicit_returning = True if self.server_version_info >= MS_2008_VERSION: self.supports_multivalues_insert = True + if self.deprecate_large_types is None: + self.deprecate_large_types = \ + self.server_version_info >= MS_2012_VERSION def _get_default_schema_name(self, connection): if self.server_version_info < MS_2005_VERSION: @@ -1573,12 +1779,11 @@ class MSDialect(default.DefaultDialect): if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText, MSNText, MSBinary, MSVarBinary, sqltypes.LargeBinary): + if charlen == -1: + charlen = 'max' kwargs['length'] = charlen if collation: kwargs['collation'] = collation - if coltype == MSText or \ - (coltype in (MSString, MSNVarchar) and charlen == -1): - kwargs.pop('length') if coltype is None: util.warn( diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/information_schema.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/information_schema.py index 371a1ed..e2c0a46 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/information_schema.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/information_schema.py @@ -1,5 +1,5 @@ # mssql/information_schema.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/mxodbc.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/mxodbc.py index ffe38d8..5e20ed1 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/mxodbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/mxodbc.py @@ -1,5 +1,5 @@ # mssql/mxodbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/pymssql.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/pymssql.py index 8f76336..e3a4db8 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/pymssql.py @@ -1,5 +1,5 @@ # mssql/pymssql.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -46,11 +46,12 @@ class MSDialect_pymssql(MSDialect): @classmethod def dbapi(cls): module = __import__('pymssql') - # pymmsql doesn't have a Binary method. we use string - # TODO: monkeypatching here is less than ideal - module.Binary = lambda x: x if hasattr(x, 'decode') else str(x) - + # pymmsql < 2.1.1 doesn't have a Binary method. we use string client_ver = tuple(int(x) for x in module.__version__.split(".")) + if client_ver < (2, 1, 1): + # TODO: monkeypatching here is less than ideal + module.Binary = lambda x: x if hasattr(x, 'decode') else str(x) + if client_ver < (1, ): util.warn("The pymssql dialect expects at least " "the 1.0 series of the pymssql DBAPI.") @@ -63,7 +64,7 @@ class MSDialect_pymssql(MSDialect): def _get_server_version_info(self, connection): vers = connection.scalar("select @@version") m = re.match( - r"Microsoft SQL Server.*? - (\d+).(\d+).(\d+).(\d+)", vers) + r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers) if m: return tuple(int(x) for x in m.group(1, 2, 3, 4)) else: @@ -84,7 +85,8 @@ class MSDialect_pymssql(MSDialect): "message 20003", # connection timeout "Error 10054", "Not connected to any MS SQL server", - "Connection is closed" + "Connection is closed", + "message 20006", # Write to the server failed ): if msg in str(e): return True diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/pyodbc.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/pyodbc.py index 1c75fe1..c938368 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/pyodbc.py @@ -1,5 +1,5 @@ # mssql/pyodbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -12,74 +12,57 @@ :connectstring: mssql+pyodbc://:@ :url: http://pypi.python.org/pypi/pyodbc/ -Additional Connection Examples -------------------------------- +Connecting to PyODBC +-------------------- -Examples of pyodbc connection string URLs: +The URL here is to be translated to PyODBC connection strings, as +detailed in `ConnectionStrings `_. -* ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``. - The connection string that is created will appear like:: +DSN Connections +^^^^^^^^^^^^^^^ - dsn=mydsn;Trusted_Connection=Yes +A DSN-based connection is **preferred** overall when using ODBC. A +basic DSN-based connection looks like:: -* ``mssql+pyodbc://user:pass@mydsn`` - connects using the DSN named - ``mydsn`` passing in the ``UID`` and ``PWD`` information. The - connection string that is created will appear like:: + engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn") + +Which above, will pass the following connection string to PyODBC:: dsn=mydsn;UID=user;PWD=pass -* ``mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english`` - connects - using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD`` - information, plus the additional connection configuration option - ``LANGUAGE``. The connection string that is created will appear - like:: +If the username and password are omitted, the DSN form will also add +the ``Trusted_Connection=yes`` directive to the ODBC string. - dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english +Hostname Connections +^^^^^^^^^^^^^^^^^^^^ -* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection - that would appear like:: +Hostname-based connections are **not preferred**, however are supported. +The ODBC driver name must be explicitly specified:: - DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass + engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0") -* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection - string which includes the port - information using the comma syntax. This will create the following - connection string:: +.. versionchanged:: 1.0.0 Hostname-based PyODBC connections now require the + SQL Server driver name specified explicitly. SQLAlchemy cannot + choose an optimal default here as it varies based on platform + and installed drivers. - DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass +Other keywords interpreted by the Pyodbc dialect to be passed to +``pyodbc.connect()`` in both the DSN and hostname cases include: +``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``. -* ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection - string that includes the port - information as a separate ``port`` keyword. This will create the - following connection string:: +Pass through exact Pyodbc string +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123 +A PyODBC connection string can also be sent exactly as specified in +`ConnectionStrings `_ +into the driver using the parameter ``odbc_connect``. The delimeters must be URL escaped, however, +as illustrated below using ``urllib.quote_plus``:: -* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a - connection string that includes a custom ODBC driver name. This will create - the following connection string:: + import urllib + params = urllib.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password") - DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass + engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params) -If you require a connection string that is outside the options -presented above, use the ``odbc_connect`` keyword to pass in a -urlencoded connection string. What gets passed in will be urldecoded -and passed directly. - -For example:: - - mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb - -would create the following connection string:: - - dsn=mydsn;Database=db - -Encoding your connection string can be easily accomplished through -the python shell. For example:: - - >>> import urllib - >>> urllib.quote_plus('dsn=mydsn;Database=db') - 'dsn%3Dmydsn%3BDatabase%3Ddb' Unicode Binds ------------- @@ -112,7 +95,7 @@ for unix + PyODBC. """ -from .base import MSExecutionContext, MSDialect +from .base import MSExecutionContext, MSDialect, VARBINARY from ...connectors.pyodbc import PyODBCConnector from ... import types as sqltypes, util import decimal @@ -191,6 +174,22 @@ class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float): pass +class _VARBINARY_pyodbc(VARBINARY): + def bind_processor(self, dialect): + if dialect.dbapi is None: + return None + + DBAPIBinary = dialect.dbapi.Binary + + def process(value): + if value is not None: + return DBAPIBinary(value) + else: + # pyodbc-specific + return dialect.dbapi.BinaryNull + return process + + class MSExecutionContext_pyodbc(MSExecutionContext): _embedded_scope_identity = False @@ -243,13 +242,13 @@ class MSDialect_pyodbc(PyODBCConnector, MSDialect): execution_ctx_cls = MSExecutionContext_pyodbc - pyodbc_driver_name = 'SQL Server' - colspecs = util.update_copy( MSDialect.colspecs, { sqltypes.Numeric: _MSNumeric_pyodbc, - sqltypes.Float: _MSFloat_pyodbc + sqltypes.Float: _MSFloat_pyodbc, + VARBINARY: _VARBINARY_pyodbc, + sqltypes.LargeBinary: _VARBINARY_pyodbc, } ) diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py index b23a010..0bf68c2 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mssql/zxjdbc.py @@ -1,5 +1,5 @@ # mssql/zxjdbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -13,6 +13,8 @@ [?key=value&key=value...] :driverurl: http://jtds.sourceforge.net/ + .. note:: Jython is not supported by current versions of SQLAlchemy. The + zxjdbc dialect should be considered as experimental. """ from ...connectors.zxJDBC import ZxJDBCConnector diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/__init__.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/__init__.py index 498603c..fabd932 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/__init__.py @@ -1,5 +1,5 @@ # mysql/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/base.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/base.py index 6c5633f..3f9c599 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/base.py @@ -1,5 +1,5 @@ # mysql/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -32,6 +32,11 @@ the ``pool_recycle`` option which controls the maximum age of any connection:: engine = create_engine('mysql+mysqldb://...', pool_recycle=3600) +.. seealso:: + + :ref:`pool_setting_recycle` - full description of the pool recycle feature. + + .. _mysql_storage_engines: CREATE TABLE arguments including Storage Engines @@ -101,10 +106,12 @@ all lower case both within SQLAlchemy as well as on the MySQL database itself, especially if database reflection features are to be used. +.. _mysql_isolation_level: + Transaction Isolation Level --------------------------- -:func:`.create_engine` accepts an ``isolation_level`` +:func:`.create_engine` accepts an :paramref:`.create_engine.isolation_level` parameter which results in the command ``SET SESSION TRANSACTION ISOLATION LEVEL `` being invoked for every new connection. Valid values for this parameter are @@ -144,6 +151,90 @@ multi-column key for some storage engines:: Column('id', Integer, primary_key=True) ) +.. _mysql_unicode: + +Unicode +------- + +Charset Selection +~~~~~~~~~~~~~~~~~ + +Most MySQL DBAPIs offer the option to set the client character set for +a connection. This is typically delivered using the ``charset`` parameter +in the URL, such as:: + + e = create_engine("mysql+pymysql://scott:tiger@localhost/\ +test?charset=utf8") + +This charset is the **client character set** for the connection. Some +MySQL DBAPIs will default this to a value such as ``latin1``, and some +will make use of the ``default-character-set`` setting in the ``my.cnf`` +file as well. Documentation for the DBAPI in use should be consulted +for specific behavior. + +The encoding used for Unicode has traditionally been ``'utf8'``. However, +for MySQL versions 5.5.3 on forward, a new MySQL-specific encoding +``'utf8mb4'`` has been introduced. The rationale for this new encoding +is due to the fact that MySQL's utf-8 encoding only supports +codepoints up to three bytes instead of four. Therefore, +when communicating with a MySQL database +that includes codepoints more than three bytes in size, +this new charset is preferred, if supported by both the database as well +as the client DBAPI, as in:: + + e = create_engine("mysql+pymysql://scott:tiger@localhost/\ +test?charset=utf8mb4") + +At the moment, up-to-date versions of MySQLdb and PyMySQL support the +``utf8mb4`` charset. Other DBAPIs such as MySQL-Connector and OurSQL +may **not** support it as of yet. + +In order to use ``utf8mb4`` encoding, changes to +the MySQL schema and/or server configuration may be required. + +.. seealso:: + + `The utf8mb4 Character Set \ +`_ - \ +in the MySQL documentation + +Unicode Encoding / Decoding +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +All modern MySQL DBAPIs all offer the service of handling the encoding and +decoding of unicode data between the Python application space and the database. +As this was not always the case, SQLAlchemy also includes a comprehensive system +of performing the encode/decode task as well. As only one of these systems +should be in use at at time, SQLAlchemy has long included functionality +to automatically detect upon first connection whether or not the DBAPI is +automatically handling unicode. + +Whether or not the MySQL DBAPI will handle encoding can usually be configured +using a DBAPI flag ``use_unicode``, which is known to be supported at least +by MySQLdb, PyMySQL, and MySQL-Connector. Setting this value to ``0`` +in the "connect args" or query string will have the effect of disabling the +DBAPI's handling of unicode, such that it instead will return data of the +``str`` type or ``bytes`` type, with data in the configured charset:: + + # connect while disabling the DBAPI's unicode encoding/decoding + e = create_engine("mysql+mysqldb://scott:tiger@localhost/test?charset=utf8&use_unicode=0") + +Current recommendations for modern DBAPIs are as follows: + +* It is generally always safe to leave the ``use_unicode`` flag set at + its default; that is, don't use it at all. +* Under Python 3, the ``use_unicode=0`` flag should **never be used**. + SQLAlchemy under Python 3 generally assumes the DBAPI receives and returns + string values as Python 3 strings, which are inherently unicode objects. +* Under Python 2 with MySQLdb, the ``use_unicode=0`` flag will **offer + superior performance**, as MySQLdb's unicode converters under Python 2 only + have been observed to have unusually slow performance compared to SQLAlchemy's + fast C-based encoders/decoders. + +In short: don't specify ``use_unicode`` *at all*, with the possible +exception of ``use_unicode=0`` on MySQLdb with Python 2 **only** for a +potential performance gain. + Ansi Quoting Style ------------------ @@ -188,15 +279,13 @@ SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the usual definition of "number of rows matched by an UPDATE or DELETE" statement. This is in contradiction to the default setting on most MySQL DBAPI drivers, which is "number of rows actually modified/deleted". For this reason, the -SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag, -or whatever is equivalent for the DBAPI in use, on connect, unless the flag -value is overridden using DBAPI-specific options -(such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the -OurSQL driver). +SQLAlchemy MySQL dialects always add the ``constants.CLIENT.FOUND_ROWS`` +flag, or whatever is equivalent for the target dialect, upon connection. +This setting is currently hardcoded. -See also: +.. seealso:: -:attr:`.ResultProxy.rowcount` + :attr:`.ResultProxy.rowcount` CAST Support @@ -341,6 +430,110 @@ reflection will not include foreign keys. For these tables, you may supply a :ref:`mysql_storage_engines` +.. _mysql_unique_constraints: + +MySQL Unique Constraints and Reflection +--------------------------------------- + +SQLAlchemy supports both the :class:`.Index` construct with the +flag ``unique=True``, indicating a UNIQUE index, as well as the +:class:`.UniqueConstraint` construct, representing a UNIQUE constraint. +Both objects/syntaxes are supported by MySQL when emitting DDL to create +these constraints. However, MySQL does not have a unique constraint +construct that is separate from a unique index; that is, the "UNIQUE" +constraint on MySQL is equivalent to creating a "UNIQUE INDEX". + +When reflecting these constructs, the :meth:`.Inspector.get_indexes` +and the :meth:`.Inspector.get_unique_constraints` methods will **both** +return an entry for a UNIQUE index in MySQL. However, when performing +full table reflection using ``Table(..., autoload=True)``, +the :class:`.UniqueConstraint` construct is +**not** part of the fully reflected :class:`.Table` construct under any +circumstances; this construct is always represented by a :class:`.Index` +with the ``unique=True`` setting present in the :attr:`.Table.indexes` +collection. + + +.. _mysql_timestamp_null: + +TIMESTAMP Columns and NULL +-------------------------- + +MySQL historically enforces that a column which specifies the +TIMESTAMP datatype implicitly includes a default value of +CURRENT_TIMESTAMP, even though this is not stated, and additionally +sets the column as NOT NULL, the opposite behavior vs. that of all +other datatypes:: + + mysql> CREATE TABLE ts_test ( + -> a INTEGER, + -> b INTEGER NOT NULL, + -> c TIMESTAMP, + -> d TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + -> e TIMESTAMP NULL); + Query OK, 0 rows affected (0.03 sec) + + mysql> SHOW CREATE TABLE ts_test; + +---------+----------------------------------------------------- + | Table | Create Table + +---------+----------------------------------------------------- + | ts_test | CREATE TABLE `ts_test` ( + `a` int(11) DEFAULT NULL, + `b` int(11) NOT NULL, + `c` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + `d` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `e` timestamp NULL DEFAULT NULL + ) ENGINE=MyISAM DEFAULT CHARSET=latin1 + +Above, we see that an INTEGER column defaults to NULL, unless it is specified +with NOT NULL. But when the column is of type TIMESTAMP, an implicit +default of CURRENT_TIMESTAMP is generated which also coerces the column +to be a NOT NULL, even though we did not specify it as such. + +This behavior of MySQL can be changed on the MySQL side using the +`explicit_defaults_for_timestamp +`_ configuration flag introduced in +MySQL 5.6. With this server setting enabled, TIMESTAMP columns behave like +any other datatype on the MySQL side with regards to defaults and nullability. + +However, to accommodate the vast majority of MySQL databases that do not +specify this new flag, SQLAlchemy emits the "NULL" specifier explicitly with +any TIMESTAMP column that does not specify ``nullable=False``. In order +to accommodate newer databases that specify ``explicit_defaults_for_timestamp``, +SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify +``nullable=False``. The following example illustrates:: + + from sqlalchemy import MetaData, Integer, Table, Column, text + from sqlalchemy.dialects.mysql import TIMESTAMP + + m = MetaData() + t = Table('ts_test', m, + Column('a', Integer), + Column('b', Integer, nullable=False), + Column('c', TIMESTAMP), + Column('d', TIMESTAMP, nullable=False) + ) + + + from sqlalchemy import create_engine + e = create_engine("mysql://scott:tiger@localhost/test", echo=True) + m.create_all(e) + +output:: + + CREATE TABLE ts_test ( + a INTEGER, + b INTEGER NOT NULL, + c TIMESTAMP NULL, + d TIMESTAMP NOT NULL + ) + +.. versionchanged:: 1.0.0 - SQLAlchemy now renders NULL or NOT NULL in all + cases for TIMESTAMP columns, to accommodate + ``explicit_defaults_for_timestamp``. Prior to this version, it will + not render "NOT NULL" for a TIMESTAMP column that is ``nullable=False``. + """ import datetime @@ -409,6 +602,8 @@ RESERVED_WORDS = set( 'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot', 'partition', 'sql_after_gtids', 'sql_before_gtids', # 5.6 + 'generated', 'optimizer_costs', 'stored', 'virtual', # 5.7 + ]) AUTOCOMMIT_RE = re.compile( @@ -420,7 +615,6 @@ SET_RE = re.compile( class _NumericType(object): - """Base for MySQL numeric types. This is the base both for NUMERIC as well as INTEGER, hence @@ -439,7 +633,6 @@ class _NumericType(object): class _FloatType(_NumericType, sqltypes.Float): - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): if isinstance(self, (REAL, DOUBLE)) and \ ( @@ -460,7 +653,6 @@ class _FloatType(_NumericType, sqltypes.Float): class _IntegerType(_NumericType, sqltypes.Integer): - def __init__(self, display_width=None, **kw): self.display_width = display_width super(_IntegerType, self).__init__(**kw) @@ -472,7 +664,6 @@ class _IntegerType(_NumericType, sqltypes.Integer): class _StringType(sqltypes.String): - """Base for MySQL string types.""" def __init__(self, charset=None, collation=None, @@ -494,8 +685,15 @@ class _StringType(sqltypes.String): to_inspect=[_StringType, sqltypes.String]) -class NUMERIC(_NumericType, sqltypes.NUMERIC): +class _MatchType(sqltypes.Float, sqltypes.MatchType): + def __init__(self, **kw): + # TODO: float arguments? + sqltypes.Float.__init__(self) + sqltypes.MatchType.__init__(self) + + +class NUMERIC(_NumericType, sqltypes.NUMERIC): """MySQL NUMERIC type.""" __visit_name__ = 'NUMERIC' @@ -521,7 +719,6 @@ class NUMERIC(_NumericType, sqltypes.NUMERIC): class DECIMAL(_NumericType, sqltypes.DECIMAL): - """MySQL DECIMAL type.""" __visit_name__ = 'DECIMAL' @@ -547,7 +744,6 @@ class DECIMAL(_NumericType, sqltypes.DECIMAL): class DOUBLE(_FloatType): - """MySQL DOUBLE type.""" __visit_name__ = 'DOUBLE' @@ -581,7 +777,6 @@ class DOUBLE(_FloatType): class REAL(_FloatType, sqltypes.REAL): - """MySQL REAL type.""" __visit_name__ = 'REAL' @@ -615,7 +810,6 @@ class REAL(_FloatType, sqltypes.REAL): class FLOAT(_FloatType, sqltypes.FLOAT): - """MySQL FLOAT type.""" __visit_name__ = 'FLOAT' @@ -644,7 +838,6 @@ class FLOAT(_FloatType, sqltypes.FLOAT): class INTEGER(_IntegerType, sqltypes.INTEGER): - """MySQL INTEGER type.""" __visit_name__ = 'INTEGER' @@ -666,7 +859,6 @@ class INTEGER(_IntegerType, sqltypes.INTEGER): class BIGINT(_IntegerType, sqltypes.BIGINT): - """MySQL BIGINTEGER type.""" __visit_name__ = 'BIGINT' @@ -688,7 +880,6 @@ class BIGINT(_IntegerType, sqltypes.BIGINT): class MEDIUMINT(_IntegerType): - """MySQL MEDIUMINTEGER type.""" __visit_name__ = 'MEDIUMINT' @@ -710,7 +901,6 @@ class MEDIUMINT(_IntegerType): class TINYINT(_IntegerType): - """MySQL TINYINT type.""" __visit_name__ = 'TINYINT' @@ -732,7 +922,6 @@ class TINYINT(_IntegerType): class SMALLINT(_IntegerType, sqltypes.SMALLINT): - """MySQL SMALLINTEGER type.""" __visit_name__ = 'SMALLINT' @@ -754,7 +943,6 @@ class SMALLINT(_IntegerType, sqltypes.SMALLINT): class BIT(sqltypes.TypeEngine): - """MySQL BIT type. This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater @@ -784,7 +972,9 @@ class BIT(sqltypes.TypeEngine): def process(value): if value is not None: v = 0 - for i in map(ord, value): + for i in value: + if not isinstance(i, int): + i = ord(i) # convert byte to int on Python 2 v = v << 8 | i return v return value @@ -792,7 +982,6 @@ class BIT(sqltypes.TypeEngine): class TIME(sqltypes.TIME): - """MySQL TIME type. """ __visit_name__ = 'TIME' @@ -838,7 +1027,6 @@ class TIME(sqltypes.TIME): class TIMESTAMP(sqltypes.TIMESTAMP): - """MySQL TIMESTAMP type. """ @@ -869,7 +1057,6 @@ class TIMESTAMP(sqltypes.TIMESTAMP): class DATETIME(sqltypes.DATETIME): - """MySQL DATETIME type. """ @@ -900,7 +1087,6 @@ class DATETIME(sqltypes.DATETIME): class YEAR(sqltypes.TypeEngine): - """MySQL YEAR type, for single byte storage of years 1901-2155.""" __visit_name__ = 'YEAR' @@ -910,7 +1096,6 @@ class YEAR(sqltypes.TypeEngine): class TEXT(_StringType, sqltypes.TEXT): - """MySQL TEXT type, for text up to 2^16 characters.""" __visit_name__ = 'TEXT' @@ -947,7 +1132,6 @@ class TEXT(_StringType, sqltypes.TEXT): class TINYTEXT(_StringType): - """MySQL TINYTEXT type, for text up to 2^8 characters.""" __visit_name__ = 'TINYTEXT' @@ -980,7 +1164,6 @@ class TINYTEXT(_StringType): class MEDIUMTEXT(_StringType): - """MySQL MEDIUMTEXT type, for text up to 2^24 characters.""" __visit_name__ = 'MEDIUMTEXT' @@ -1013,7 +1196,6 @@ class MEDIUMTEXT(_StringType): class LONGTEXT(_StringType): - """MySQL LONGTEXT type, for text up to 2^32 characters.""" __visit_name__ = 'LONGTEXT' @@ -1046,7 +1228,6 @@ class LONGTEXT(_StringType): class VARCHAR(_StringType, sqltypes.VARCHAR): - """MySQL VARCHAR type, for variable-length character data.""" __visit_name__ = 'VARCHAR' @@ -1079,7 +1260,6 @@ class VARCHAR(_StringType, sqltypes.VARCHAR): class CHAR(_StringType, sqltypes.CHAR): - """MySQL CHAR type, for fixed-length character data.""" __visit_name__ = 'CHAR' @@ -1121,7 +1301,6 @@ class CHAR(_StringType, sqltypes.CHAR): class NVARCHAR(_StringType, sqltypes.NVARCHAR): - """MySQL NVARCHAR type. For variable-length character data in the server's configured national @@ -1148,7 +1327,6 @@ class NVARCHAR(_StringType, sqltypes.NVARCHAR): class NCHAR(_StringType, sqltypes.NCHAR): - """MySQL NCHAR type. For fixed-length character data in the server's configured national @@ -1175,28 +1353,24 @@ class NCHAR(_StringType, sqltypes.NCHAR): class TINYBLOB(sqltypes._Binary): - """MySQL TINYBLOB type, for binary data up to 2^8 bytes.""" __visit_name__ = 'TINYBLOB' class MEDIUMBLOB(sqltypes._Binary): - """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes.""" __visit_name__ = 'MEDIUMBLOB' class LONGBLOB(sqltypes._Binary): - """MySQL LONGBLOB type, for binary data up to 2^32 bytes.""" __visit_name__ = 'LONGBLOB' class _EnumeratedValues(_StringType): - def _init_values(self, values, kw): self.quoting = kw.pop('quoting', 'auto') @@ -1240,7 +1414,6 @@ class _EnumeratedValues(_StringType): class ENUM(sqltypes.Enum, _EnumeratedValues): - """MySQL ENUM type.""" __visit_name__ = 'ENUM' @@ -1302,6 +1475,7 @@ class ENUM(sqltypes.Enum, _EnumeratedValues): kw.pop('quote', None) kw.pop('native_enum', None) kw.pop('inherit_schema', None) + kw.pop('_create_events', None) _StringType.__init__(self, length=length, **kw) sqltypes.Enum.__init__(self, *values) @@ -1329,7 +1503,6 @@ class ENUM(sqltypes.Enum, _EnumeratedValues): class SET(_EnumeratedValues): - """MySQL SET type.""" __visit_name__ = 'SET' @@ -1341,32 +1514,28 @@ class SET(_EnumeratedValues): Column('myset', SET("foo", "bar", "baz")) - :param values: The range of valid values for this SET. Values will be - quoted when generating the schema according to the quoting flag (see - below). - .. versionchanged:: 0.9.0 quoting is applied automatically to - :class:`.mysql.SET` in the same way as for :class:`.mysql.ENUM`. + The list of potential values is required in the case that this + set will be used to generate DDL for a table, or if the + :paramref:`.SET.retrieve_as_bitwise` flag is set to True. - :param charset: Optional, a column-level character set for this string - value. Takes precedence to 'ascii' or 'unicode' short-hand. + :param values: The range of valid values for this SET. - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. + :param convert_unicode: Same flag as that of + :paramref:`.String.convert_unicode`. - :param ascii: Defaults to False: short-hand for the ``latin1`` - character set, generates ASCII in schema. + :param collation: same as that of :paramref:`.String.collation` - :param unicode: Defaults to False: short-hand for the ``ucs2`` - character set, generates UNICODE in schema. + :param charset: same as that of :paramref:`.VARCHAR.charset`. - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. + :param ascii: same as that of :paramref:`.VARCHAR.ascii`. - :param quoting: Defaults to 'auto': automatically determine enum value - quoting. If all enum values are surrounded by the same quoting + :param unicode: same as that of :paramref:`.VARCHAR.unicode`. + + :param binary: same as that of :paramref:`.VARCHAR.binary`. + + :param quoting: Defaults to 'auto': automatically determine set value + quoting. If all values are surrounded by the same quoting character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. 'quoted': values in enums are already quoted, they will be used @@ -1381,50 +1550,117 @@ class SET(_EnumeratedValues): .. versionadded:: 0.9.0 + :param retrieve_as_bitwise: if True, the data for the set type will be + persisted and selected using an integer value, where a set is coerced + into a bitwise mask for persistence. MySQL allows this mode which + has the advantage of being able to store values unambiguously, + such as the blank string ``''``. The datatype will appear + as the expression ``col + 0`` in a SELECT statement, so that the + value is coerced into an integer value in result sets. + This flag is required if one wishes + to persist a set that can store the blank string ``''`` as a value. + + .. warning:: + + When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is + essential that the list of set values is expressed in the + **exact same order** as exists on the MySQL database. + + .. versionadded:: 1.0.0 + + """ + self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False) values, length = self._init_values(values, kw) self.values = tuple(values) - + if not self.retrieve_as_bitwise and '' in values: + raise exc.ArgumentError( + "Can't use the blank value '' in a SET without " + "setting retrieve_as_bitwise=True") + if self.retrieve_as_bitwise: + self._bitmap = dict( + (value, 2 ** idx) + for idx, value in enumerate(self.values) + ) + self._bitmap.update( + (2 ** idx, value) + for idx, value in enumerate(self.values) + ) kw.setdefault('length', length) super(SET, self).__init__(**kw) + def column_expression(self, colexpr): + if self.retrieve_as_bitwise: + return colexpr + 0 + else: + return colexpr + def result_processor(self, dialect, coltype): - def process(value): - # The good news: - # No ',' quoting issues- commas aren't allowed in SET values - # The bad news: - # Plenty of driver inconsistencies here. - if isinstance(value, set): - # ..some versions convert '' to an empty set - if not value: - value.add('') - return value - # ...and some versions return strings - if value is not None: - return set(value.split(',')) - else: - return value + if self.retrieve_as_bitwise: + def process(value): + if value is not None: + value = int(value) + + return set( + util.map_bits(self._bitmap.__getitem__, value) + ) + else: + return None + else: + super_convert = super(SET, self).result_processor(dialect, coltype) + + def process(value): + if isinstance(value, util.string_types): + # MySQLdb returns a string, let's parse + if super_convert: + value = super_convert(value) + return set(re.findall(r'[^,]+', value)) + else: + # mysql-connector-python does a naive + # split(",") which throws in an empty string + if value is not None: + value.discard('') + return value return process def bind_processor(self, dialect): super_convert = super(SET, self).bind_processor(dialect) + if self.retrieve_as_bitwise: + def process(value): + if value is None: + return None + elif isinstance(value, util.int_types + util.string_types): + if super_convert: + return super_convert(value) + else: + return value + else: + int_value = 0 + for v in value: + int_value |= self._bitmap[v] + return int_value + else: - def process(value): - if value is None or isinstance( - value, util.int_types + util.string_types): - pass - else: - if None in value: - value = set(value) - value.remove(None) - value.add('') - value = ','.join(value) - if super_convert: - return super_convert(value) - else: - return value + def process(value): + # accept strings and int (actually bitflag) values directly + if value is not None and not isinstance( + value, util.int_types + util.string_types): + value = ",".join(value) + + if super_convert: + return super_convert(value) + else: + return value return process + def adapt(self, impltype, **kw): + kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise + return util.constructor_copy( + self, impltype, + *self.values, + **kw + ) + # old names MSTime = TIME MSSet = SET @@ -1465,6 +1701,7 @@ colspecs = { sqltypes.Float: FLOAT, sqltypes.Time: TIME, sqltypes.Enum: ENUM, + sqltypes.MatchType: _MatchType } # Everything 3.23 through 5.1 excepting OpenGIS types. @@ -1540,9 +1777,12 @@ class MySQLCompiler(compiler.SQLCompiler): def get_from_hint_text(self, table, text): return text - def visit_typeclause(self, typeclause): - type_ = typeclause.type.dialect_impl(self.dialect) - if isinstance(type_, sqltypes.Integer): + def visit_typeclause(self, typeclause, type_=None): + if type_ is None: + type_ = typeclause.type.dialect_impl(self.dialect) + if isinstance(type_, sqltypes.TypeDecorator): + return self.visit_typeclause(typeclause, type_.impl) + elif isinstance(type_, sqltypes.Integer): if getattr(type_, 'unsigned', False): return 'UNSIGNED INTEGER' else: @@ -1567,10 +1807,17 @@ class MySQLCompiler(compiler.SQLCompiler): def visit_cast(self, cast, **kwargs): # No cast until 4, no decimals until 5. if not self.dialect._supports_cast: + util.warn( + "Current MySQL version does not support " + "CAST; the CAST will be skipped.") return self.process(cast.clause.self_group()) type_ = self.process(cast.typeclause) if type_ is None: + util.warn( + "Datatype %s does not support CAST on MySQL; " + "the CAST will be skipped." % + self.dialect.type_compiler.process(cast.typeclause.type)) return self.process(cast.clause.self_group()) return 'CAST(%s AS %s)' % (self.process(cast.clause), type_) @@ -1581,7 +1828,15 @@ class MySQLCompiler(compiler.SQLCompiler): value = value.replace('\\', '\\\\') return value - def get_select_precolumns(self, select): + # override native_boolean=False behavior here, as + # MySQL still supports native boolean + def visit_true(self, element, **kw): + return "true" + + def visit_false(self, element, **kw): + return "false" + + def get_select_precolumns(self, select, **kw): """Add special MySQL keywords in place of DISTINCT. .. note:: @@ -1606,13 +1861,13 @@ class MySQLCompiler(compiler.SQLCompiler): " ON ", self.process(join.onclause, **kwargs))) - def for_update_clause(self, select): + def for_update_clause(self, select, **kw): if select._for_update_arg.read: return " LOCK IN SHARE MODE" else: return " FOR UPDATE" - def limit_clause(self, select): + def limit_clause(self, select, **kw): # MySQL supports: # LIMIT # LIMIT , @@ -1621,15 +1876,16 @@ class MySQLCompiler(compiler.SQLCompiler): # The latter is more readable for offsets but we're stuck with the # former until we can refine dialects by server revision. - limit, offset = select._limit, select._offset + limit_clause, offset_clause = select._limit_clause, \ + select._offset_clause - if (limit, offset) == (None, None): + if limit_clause is None and offset_clause is None: return '' - elif offset is not None: + elif offset_clause is not None: # As suggested by the MySQL docs, need to apply an # artificial limit if one wasn't provided # http://dev.mysql.com/doc/refman/5.0/en/select.html - if limit is None: + if limit_clause is None: # hardwire the upper limit. Currently # needed by OurSQL with Python 3 # (https://bugs.launchpad.net/oursql/+bug/686232), @@ -1637,15 +1893,15 @@ class MySQLCompiler(compiler.SQLCompiler): # bound as part of MySQL's "syntax" for OFFSET with # no LIMIT return ' \n LIMIT %s, %s' % ( - self.process(sql.literal(offset)), + self.process(offset_clause, **kw), "18446744073709551615") else: return ' \n LIMIT %s, %s' % ( - self.process(sql.literal(offset)), - self.process(sql.literal(limit))) + self.process(offset_clause, **kw), + self.process(limit_clause, **kw)) else: # No offset provided, so just use the limit - return ' \n LIMIT %s' % (self.process(sql.literal(limit)),) + return ' \n LIMIT %s' % (self.process(limit_clause, **kw),) def update_limit_clause(self, update_stmt): limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None) @@ -1670,11 +1926,10 @@ class MySQLCompiler(compiler.SQLCompiler): # creation of foreign key constraints fails." class MySQLDDLCompiler(compiler.DDLCompiler): - - def create_table_constraints(self, table): + def create_table_constraints(self, table, **kw): """Get table constraints.""" constraint_string = super( - MySQLDDLCompiler, self).create_table_constraints(table) + MySQLDDLCompiler, self).create_table_constraints(table, **kw) # why self.dialect.name and not 'mysql'? because of drizzle is_innodb = 'engine' in table.dialect_options[self.dialect.name] and \ @@ -1700,22 +1955,28 @@ class MySQLDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kw): """Builds column DDL.""" - colspec = [self.preparer.format_column(column), - self.dialect.type_compiler.process(column.type) - ] + colspec = [ + self.preparer.format_column(column), + self.dialect.type_compiler.process( + column.type, type_expression=column) + ] + + is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP) + + if not column.nullable: + colspec.append('NOT NULL') + + # see: http://docs.sqlalchemy.org/en/latest/dialects/ + # mysql.html#mysql_timestamp_null + elif column.nullable and is_timestamp: + colspec.append('NULL') default = self.get_column_default_string(column) if default is not None: colspec.append('DEFAULT ' + default) - is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP) - if not column.nullable and not is_timestamp: - colspec.append('NOT NULL') - - elif column.nullable and is_timestamp and default is None: - colspec.append('NULL') - - if column is column.table._autoincrement_column and \ + if column.table is not None \ + and column is column.table._autoincrement_column and \ column.server_default is None: colspec.append('AUTO_INCREMENT') @@ -1851,7 +2112,6 @@ class MySQLDDLCompiler(compiler.DDLCompiler): class MySQLTypeCompiler(compiler.GenericTypeCompiler): - def _extend_numeric(self, type_, spec): "Extend a numeric-type declaration with MySQL specific extensions." @@ -1899,7 +2159,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): def _mysql_type(self, type_): return isinstance(type_, (_StringType, _NumericType)) - def visit_NUMERIC(self, type_): + def visit_NUMERIC(self, type_, **kw): if type_.precision is None: return self._extend_numeric(type_, "NUMERIC") elif type_.scale is None: @@ -1912,7 +2172,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): {'precision': type_.precision, 'scale': type_.scale}) - def visit_DECIMAL(self, type_): + def visit_DECIMAL(self, type_, **kw): if type_.precision is None: return self._extend_numeric(type_, "DECIMAL") elif type_.scale is None: @@ -1925,7 +2185,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): {'precision': type_.precision, 'scale': type_.scale}) - def visit_DOUBLE(self, type_): + def visit_DOUBLE(self, type_, **kw): if type_.precision is not None and type_.scale is not None: return self._extend_numeric(type_, "DOUBLE(%(precision)s, %(scale)s)" % @@ -1934,7 +2194,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): else: return self._extend_numeric(type_, 'DOUBLE') - def visit_REAL(self, type_): + def visit_REAL(self, type_, **kw): if type_.precision is not None and type_.scale is not None: return self._extend_numeric(type_, "REAL(%(precision)s, %(scale)s)" % @@ -1943,7 +2203,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): else: return self._extend_numeric(type_, 'REAL') - def visit_FLOAT(self, type_): + def visit_FLOAT(self, type_, **kw): if self._mysql_type(type_) and \ type_.scale is not None and \ type_.precision is not None: @@ -1955,7 +2215,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): else: return self._extend_numeric(type_, "FLOAT") - def visit_INTEGER(self, type_): + def visit_INTEGER(self, type_, **kw): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric( type_, "INTEGER(%(display_width)s)" % @@ -1963,7 +2223,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): else: return self._extend_numeric(type_, "INTEGER") - def visit_BIGINT(self, type_): + def visit_BIGINT(self, type_, **kw): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric( type_, "BIGINT(%(display_width)s)" % @@ -1971,7 +2231,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): else: return self._extend_numeric(type_, "BIGINT") - def visit_MEDIUMINT(self, type_): + def visit_MEDIUMINT(self, type_, **kw): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric( type_, "MEDIUMINT(%(display_width)s)" % @@ -1979,14 +2239,14 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): else: return self._extend_numeric(type_, "MEDIUMINT") - def visit_TINYINT(self, type_): + def visit_TINYINT(self, type_, **kw): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric(type_, "TINYINT(%s)" % type_.display_width) else: return self._extend_numeric(type_, "TINYINT") - def visit_SMALLINT(self, type_): + def visit_SMALLINT(self, type_, **kw): if self._mysql_type(type_) and type_.display_width is not None: return self._extend_numeric(type_, "SMALLINT(%(display_width)s)" % @@ -1995,55 +2255,55 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): else: return self._extend_numeric(type_, "SMALLINT") - def visit_BIT(self, type_): + def visit_BIT(self, type_, **kw): if type_.length is not None: return "BIT(%s)" % type_.length else: return "BIT" - def visit_DATETIME(self, type_): + def visit_DATETIME(self, type_, **kw): if getattr(type_, 'fsp', None): return "DATETIME(%d)" % type_.fsp else: return "DATETIME" - def visit_DATE(self, type_): + def visit_DATE(self, type_, **kw): return "DATE" - def visit_TIME(self, type_): + def visit_TIME(self, type_, **kw): if getattr(type_, 'fsp', None): return "TIME(%d)" % type_.fsp else: return "TIME" - def visit_TIMESTAMP(self, type_): + def visit_TIMESTAMP(self, type_, **kw): if getattr(type_, 'fsp', None): return "TIMESTAMP(%d)" % type_.fsp else: return "TIMESTAMP" - def visit_YEAR(self, type_): + def visit_YEAR(self, type_, **kw): if type_.display_width is None: return "YEAR" else: return "YEAR(%s)" % type_.display_width - def visit_TEXT(self, type_): + def visit_TEXT(self, type_, **kw): if type_.length: return self._extend_string(type_, {}, "TEXT(%d)" % type_.length) else: return self._extend_string(type_, {}, "TEXT") - def visit_TINYTEXT(self, type_): + def visit_TINYTEXT(self, type_, **kw): return self._extend_string(type_, {}, "TINYTEXT") - def visit_MEDIUMTEXT(self, type_): + def visit_MEDIUMTEXT(self, type_, **kw): return self._extend_string(type_, {}, "MEDIUMTEXT") - def visit_LONGTEXT(self, type_): + def visit_LONGTEXT(self, type_, **kw): return self._extend_string(type_, {}, "LONGTEXT") - def visit_VARCHAR(self, type_): + def visit_VARCHAR(self, type_, **kw): if type_.length: return self._extend_string( type_, {}, "VARCHAR(%d)" % type_.length) @@ -2052,14 +2312,14 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): "VARCHAR requires a length on dialect %s" % self.dialect.name) - def visit_CHAR(self, type_): + def visit_CHAR(self, type_, **kw): if type_.length: return self._extend_string(type_, {}, "CHAR(%(length)s)" % {'length': type_.length}) else: return self._extend_string(type_, {}, "CHAR") - def visit_NVARCHAR(self, type_): + def visit_NVARCHAR(self, type_, **kw): # We'll actually generate the equiv. "NATIONAL VARCHAR" instead # of "NVARCHAR". if type_.length: @@ -2071,7 +2331,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): "NVARCHAR requires a length on dialect %s" % self.dialect.name) - def visit_NCHAR(self, type_): + def visit_NCHAR(self, type_, **kw): # We'll actually generate the equiv. # "NATIONAL CHAR" instead of "NCHAR". if type_.length: @@ -2081,31 +2341,31 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): else: return self._extend_string(type_, {'national': True}, "CHAR") - def visit_VARBINARY(self, type_): + def visit_VARBINARY(self, type_, **kw): return "VARBINARY(%d)" % type_.length - def visit_large_binary(self, type_): + def visit_large_binary(self, type_, **kw): return self.visit_BLOB(type_) - def visit_enum(self, type_): + def visit_enum(self, type_, **kw): if not type_.native_enum: return super(MySQLTypeCompiler, self).visit_enum(type_) else: return self._visit_enumerated_values("ENUM", type_, type_.enums) - def visit_BLOB(self, type_): + def visit_BLOB(self, type_, **kw): if type_.length: return "BLOB(%d)" % type_.length else: return "BLOB" - def visit_TINYBLOB(self, type_): + def visit_TINYBLOB(self, type_, **kw): return "TINYBLOB" - def visit_MEDIUMBLOB(self, type_): + def visit_MEDIUMBLOB(self, type_, **kw): return "MEDIUMBLOB" - def visit_LONGBLOB(self, type_): + def visit_LONGBLOB(self, type_, **kw): return "LONGBLOB" def _visit_enumerated_values(self, name, type_, enumerated_values): @@ -2116,15 +2376,15 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): name, ",".join(quoted_enums)) ) - def visit_ENUM(self, type_): + def visit_ENUM(self, type_, **kw): return self._visit_enumerated_values("ENUM", type_, type_._enumerated_values) - def visit_SET(self, type_): + def visit_SET(self, type_, **kw): return self._visit_enumerated_values("SET", type_, type_._enumerated_values) - def visit_BOOLEAN(self, type): + def visit_BOOLEAN(self, type, **kw): return "BOOL" @@ -2151,7 +2411,6 @@ class MySQLIdentifierPreparer(compiler.IdentifierPreparer): @log.class_logger class MySQLDialect(default.DefaultDialect): - """Details of the MySQL dialect. Not used directly in application code. """ @@ -2159,6 +2418,10 @@ class MySQLDialect(default.DefaultDialect): name = 'mysql' supports_alter = True + # MySQL has no true "boolean" type; we + # allow for the "true" and "false" keywords, however + supports_native_boolean = False + # identifiers are 64, however aliases can be 255... max_identifier_length = 255 max_index_name_length = 64 @@ -2249,7 +2512,7 @@ class MySQLDialect(default.DefaultDialect): # basic operations via autocommit fail. try: dbapi_connection.commit() - except: + except Exception: if self.server_version_info < (3, 23, 15): args = sys.exc_info()[1].args if args and args[0] == 1064: @@ -2261,7 +2524,7 @@ class MySQLDialect(default.DefaultDialect): try: dbapi_connection.rollback() - except: + except Exception: if self.server_version_info < (3, 23, 15): args = sys.exc_info()[1].args if args and args[0] == 1064: @@ -2345,7 +2608,8 @@ class MySQLDialect(default.DefaultDialect): rs = None try: try: - rs = connection.execute(st) + rs = connection.execution_options( + skip_user_error_events=True).execute(st) have = rs.fetchone() is not None rs.close() return have @@ -2501,13 +2765,14 @@ class MySQLDialect(default.DefaultDialect): pass else: self.logger.info( - "Converting unknown KEY type %s to a plain KEY" % flavor) + "Converting unknown KEY type %s to a plain KEY", flavor) pass index_d = {} index_d['name'] = spec['name'] index_d['column_names'] = [s[0] for s in spec['columns']] index_d['unique'] = unique - index_d['type'] = flavor + if flavor: + index_d['type'] = flavor indexes.append(index_d) return indexes @@ -2520,7 +2785,8 @@ class MySQLDialect(default.DefaultDialect): return [ { 'name': key['name'], - 'column_names': [col[0] for col in key['columns']] + 'column_names': [col[0] for col in key['columns']], + 'duplicates_index': key['name'], } for key in parsed_state.keys if key['type'] == 'UNIQUE' @@ -2568,7 +2834,7 @@ class MySQLDialect(default.DefaultDialect): schema, table_name)) sql = self._show_create_table(connection, None, charset, full_name=full_name) - if sql.startswith('CREATE ALGORITHM'): + if re.match(r'^CREATE (?:ALGORITHM)?.* VIEW', sql): # Adapt views to something table-like. columns = self._describe_table(connection, None, charset, full_name=full_name) @@ -2651,7 +2917,8 @@ class MySQLDialect(default.DefaultDialect): rp = None try: - rp = connection.execute(st) + rp = connection.execution_options( + skip_user_error_events=True).execute(st) except exc.DBAPIError as e: if self._extract_error_code(e.orig) == 1146: raise exc.NoSuchTableError(full_name) @@ -2675,7 +2942,8 @@ class MySQLDialect(default.DefaultDialect): rp, rows = None, None try: try: - rp = connection.execute(st) + rp = connection.execution_options( + skip_user_error_events=True).execute(st) except exc.DBAPIError as e: if self._extract_error_code(e.orig) == 1146: raise exc.NoSuchTableError(full_name) @@ -2689,7 +2957,6 @@ class MySQLDialect(default.DefaultDialect): class ReflectedState(object): - """Stores raw information about a SHOW CREATE TABLE statement.""" def __init__(self): @@ -2702,7 +2969,6 @@ class ReflectedState(object): @log.class_logger class MySQLTableDefinitionParser(object): - """Parses the results of a SHOW CREATE TABLE statement.""" def __init__(self, dialect, preparer): @@ -2839,8 +3105,7 @@ class MySQLTableDefinitionParser(object): if not spec['full']: util.warn("Incomplete reflection of column definition %r" % line) - name, type_, args, notnull = \ - spec['name'], spec['coltype'], spec['arg'], spec['notnull'] + name, type_, args = spec['name'], spec['coltype'], spec['arg'] try: col_type = self.dialect.ischema_names[type_] @@ -2859,23 +3124,31 @@ class MySQLTableDefinitionParser(object): # Column type keyword options type_kw = {} + + if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)): + if type_args: + type_kw['fsp'] = type_args.pop(0) + for kw in ('unsigned', 'zerofill'): if spec.get(kw, False): type_kw[kw] = True for kw in ('charset', 'collate'): if spec.get(kw, False): type_kw[kw] = spec[kw] - if issubclass(col_type, _EnumeratedValues): type_args = _EnumeratedValues._strip_values(type_args) + if issubclass(col_type, SET) and '' in type_args: + type_kw['retrieve_as_bitwise'] = True + type_instance = col_type(*type_args, **type_kw) - col_args, col_kw = [], {} + col_kw = {} # NOT NULL col_kw['nullable'] = True - if spec.get('notnull', False): + # this can be "NULL" in the case of TIMESTAMP + if spec.get('notnull', False) == 'NOT NULL': col_kw['nullable'] = False # AUTO_INCREMENT @@ -2994,7 +3267,7 @@ class MySQLTableDefinitionParser(object): r'(?: +(?PZEROFILL))?' r'(?: +CHARACTER SET +(?P[\w_]+))?' r'(?: +COLLATE +(?P[\w_]+))?' - r'(?: +(?PNOT NULL))?' + r'(?: +(?P(?:NOT )?NULL))?' r'(?: +DEFAULT +(?P' r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+' r'(?: +ON UPDATE \w+)?)' @@ -3014,7 +3287,7 @@ class MySQLTableDefinitionParser(object): r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' r'(?P\w+)' r'(?:\((?P(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?' - r'.*?(?PNOT NULL)?' + r'.*?(?P(?:NOT )NULL)?' % quotes ) @@ -3109,7 +3382,6 @@ _options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY', class _DecodingRowProxy(object): - """Return unicode-decoded values based on type inspection. Smooth over data type issues (esp. with alpha driver versions) and @@ -3122,9 +3394,17 @@ class _DecodingRowProxy(object): # sets.Set(['value']) (seriously) but thankfully that doesn't # seem to come up in DDL queries. + _encoding_compat = { + 'koi8r': 'koi8_r', + 'koi8u': 'koi8_u', + 'utf16': 'utf-16-be', # MySQL's uft16 is always bigendian + 'utf8mb4': 'utf8', # real utf8 + 'eucjpms': 'ujis', + } + def __init__(self, rowproxy, charset): self.rowproxy = rowproxy - self.charset = charset + self.charset = self._encoding_compat.get(charset, charset) def __getitem__(self, index): item = self.rowproxy[index] diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/cymysql.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/cymysql.py index 51b6304..8bc0ae3 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/cymysql.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/cymysql.py @@ -1,5 +1,5 @@ # mysql/cymysql.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py index 0059f5a..4e36588 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py @@ -1,5 +1,5 @@ # mysql/gaerdbms.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -17,6 +17,13 @@ developers-guide .. versionadded:: 0.7.8 + .. deprecated:: 1.0 This dialect is **no longer necessary** for + Google Cloud SQL; the MySQLdb dialect can be used directly. + Cloud SQL now recommends creating connections via the + mysql dialect using the URL format + + ``mysql+mysqldb://root@/?unix_socket=/cloudsql/:`` + Pooling ------- @@ -33,6 +40,7 @@ import os from .mysqldb import MySQLDialect_mysqldb from ...pool import NullPool import re +from sqlalchemy.util import warn_deprecated def _is_dev_environment(): @@ -43,6 +51,14 @@ class MySQLDialect_gaerdbms(MySQLDialect_mysqldb): @classmethod def dbapi(cls): + + warn_deprecated( + "Google Cloud SQL now recommends creating connections via the " + "MySQLdb dialect directly, using the URL format " + "mysql+mysqldb://root@/?unix_socket=/cloudsql/" + ":" + ) + # from django: # http://code.google.com/p/googleappengine/source/ # browse/trunk/python/google/storage/speckle/ diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py index a2535d0..a3a3f2b 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -1,5 +1,5 @@ # mysql/mysqlconnector.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -14,6 +14,12 @@ :url: http://dev.mysql.com/downloads/connector/python/ +Unicode +------- + +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. + """ from .base import (MySQLDialect, MySQLExecutionContext, @@ -21,6 +27,7 @@ from .base import (MySQLDialect, MySQLExecutionContext, BIT) from ... import util +import re class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext): @@ -31,18 +38,34 @@ class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext): class MySQLCompiler_mysqlconnector(MySQLCompiler): def visit_mod_binary(self, binary, operator, **kw): - return self.process(binary.left, **kw) + " %% " + \ - self.process(binary.right, **kw) + if self.dialect._mysqlconnector_double_percents: + return self.process(binary.left, **kw) + " %% " + \ + self.process(binary.right, **kw) + else: + return self.process(binary.left, **kw) + " % " + \ + self.process(binary.right, **kw) def post_process_text(self, text): - return text.replace('%', '%%') + if self.dialect._mysqlconnector_double_percents: + return text.replace('%', '%%') + else: + return text + + def escape_literal_column(self, text): + if self.dialect._mysqlconnector_double_percents: + return text.replace('%', '%%') + else: + return text class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer): def _escape_identifier(self, value): value = value.replace(self.escape_quote, self.escape_to_quote) - return value.replace("%", "%%") + if self.dialect._mysqlconnector_double_percents: + return value.replace("%", "%%") + else: + return value class _myconnpyBIT(BIT): @@ -55,8 +78,6 @@ class _myconnpyBIT(BIT): class MySQLDialect_mysqlconnector(MySQLDialect): driver = 'mysqlconnector' - if util.py2k: - supports_unicode_statements = False supports_unicode_binds = True supports_sane_rowcount = True @@ -77,6 +98,10 @@ class MySQLDialect_mysqlconnector(MySQLDialect): } ) + @util.memoized_property + def supports_unicode_statements(self): + return util.py3k or self._mysqlconnector_version_info > (2, 0) + @classmethod def dbapi(cls): from mysql import connector @@ -89,8 +114,10 @@ class MySQLDialect_mysqlconnector(MySQLDialect): util.coerce_kw_type(opts, 'buffered', bool) util.coerce_kw_type(opts, 'raise_on_warnings', bool) + + # unfortunately, MySQL/connector python refuses to release a + # cursor without reading fully, so non-buffered isn't an option opts.setdefault('buffered', True) - opts.setdefault('raise_on_warnings', True) # FOUND_ROWS must be set in ClientFlag to enable # supports_sane_rowcount. @@ -101,10 +128,25 @@ class MySQLDialect_mysqlconnector(MySQLDialect): 'client_flags', ClientFlag.get_default()) client_flags |= ClientFlag.FOUND_ROWS opts['client_flags'] = client_flags - except: + except Exception: pass return [[], opts] + @util.memoized_property + def _mysqlconnector_version_info(self): + if self.dbapi and hasattr(self.dbapi, '__version__'): + m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', + self.dbapi.__version__) + if m: + return tuple( + int(x) + for x in m.group(1, 2, 3) + if x is not None) + + @util.memoized_property + def _mysqlconnector_double_percents(self): + return not util.py3k and self._mysqlconnector_version_info < (2, 0) + def _get_server_version_info(self, connection): dbapi_con = connection.connection version = dbapi_con.get_server_version() diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/mysqldb.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/mysqldb.py index 7cb2e66..9c35eb7 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/mysqldb.py @@ -1,5 +1,5 @@ # mysql/mysqldb.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -13,78 +13,101 @@ :connectstring: mysql+mysqldb://:@[:]/ :url: http://sourceforge.net/projects/mysql-python +.. _mysqldb_unicode: Unicode ------- -MySQLdb requires a "charset" parameter to be passed in order for it -to handle non-ASCII characters correctly. When this parameter is passed, -MySQLdb will also implicitly set the "use_unicode" flag to true, which means -that it will return Python unicode objects instead of bytestrings. -However, SQLAlchemy's decode process, when C extensions are enabled, -is orders of magnitude faster than that of MySQLdb as it does not call into -Python functions to do so. Therefore, the **recommended URL to use for -unicode** will include both charset and use_unicode=0:: +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. - create_engine("mysql+mysqldb://user:pass@host/dbname?charset=utf8&use_unicode=0") +Py3K Support +------------ -As of this writing, MySQLdb only runs on Python 2. It is not known how -MySQLdb behaves on Python 3 as far as unicode decoding. +Currently, MySQLdb only runs on Python 2 and development has been stopped. +`mysqlclient`_ is fork of MySQLdb and provides Python 3 support as well +as some bugfixes. +.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python -Known Issues -------------- +Using MySQLdb with Google Cloud SQL +----------------------------------- -MySQL-python version 1.2.2 has a serious memory leak related -to unicode conversion, a feature which is disabled via ``use_unicode=0``. -It is strongly advised to use the latest version of MySQL-Python. +Google Cloud SQL now recommends use of the MySQLdb dialect. Connect +using a URL like the following:: + + mysql+mysqldb://root@/?unix_socket=/cloudsql/: """ from .base import (MySQLDialect, MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer) -from ...connectors.mysqldb import ( - MySQLDBExecutionContext, - MySQLDBCompiler, - MySQLDBIdentifierPreparer, - MySQLDBConnector -) from .base import TEXT from ... import sql +from ... import util +import re -class MySQLExecutionContext_mysqldb( - MySQLDBExecutionContext, - MySQLExecutionContext): - pass +class MySQLExecutionContext_mysqldb(MySQLExecutionContext): + + @property + def rowcount(self): + if hasattr(self, '_rowcount'): + return self._rowcount + else: + return self.cursor.rowcount -class MySQLCompiler_mysqldb(MySQLDBCompiler, MySQLCompiler): - pass +class MySQLCompiler_mysqldb(MySQLCompiler): + def visit_mod_binary(self, binary, operator, **kw): + return self.process(binary.left, **kw) + " %% " + \ + self.process(binary.right, **kw) + + def post_process_text(self, text): + return text.replace('%', '%%') -class MySQLIdentifierPreparer_mysqldb( - MySQLDBIdentifierPreparer, - MySQLIdentifierPreparer): - pass +class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer): + + def _escape_identifier(self, value): + value = value.replace(self.escape_quote, self.escape_to_quote) + return value.replace("%", "%%") -class MySQLDialect_mysqldb(MySQLDBConnector, MySQLDialect): +class MySQLDialect_mysqldb(MySQLDialect): + driver = 'mysqldb' + supports_unicode_statements = True + supports_sane_rowcount = True + supports_sane_multi_rowcount = True + + supports_native_decimal = True + + default_paramstyle = 'format' execution_ctx_cls = MySQLExecutionContext_mysqldb statement_compiler = MySQLCompiler_mysqldb preparer = MySQLIdentifierPreparer_mysqldb + @classmethod + def dbapi(cls): + return __import__('MySQLdb') + + def do_executemany(self, cursor, statement, parameters, context=None): + rowcount = cursor.executemany(statement, parameters) + if context is not None: + context._rowcount = rowcount + def _check_unicode_returns(self, connection): # work around issue fixed in # https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8 # specific issue w/ the utf8_bin collation and unicode returns - has_utf8_bin = connection.scalar( - "show collation where %s = 'utf8' and %s = 'utf8_bin'" - % ( - self.identifier_preparer.quote("Charset"), - self.identifier_preparer.quote("Collation") - )) + has_utf8_bin = self.server_version_info > (5, ) and \ + connection.scalar( + "show collation where %s = 'utf8' and %s = 'utf8_bin'" + % ( + self.identifier_preparer.quote("Charset"), + self.identifier_preparer.quote("Collation") + )) if has_utf8_bin: additional_tests = [ sql.collate(sql.cast( @@ -94,7 +117,82 @@ class MySQLDialect_mysqldb(MySQLDBConnector, MySQLDialect): ] else: additional_tests = [] - return super(MySQLDBConnector, self)._check_unicode_returns( + return super(MySQLDialect_mysqldb, self)._check_unicode_returns( connection, additional_tests) + def create_connect_args(self, url): + opts = url.translate_connect_args(database='db', username='user', + password='passwd') + opts.update(url.query) + + util.coerce_kw_type(opts, 'compress', bool) + util.coerce_kw_type(opts, 'connect_timeout', int) + util.coerce_kw_type(opts, 'read_timeout', int) + util.coerce_kw_type(opts, 'client_flag', int) + util.coerce_kw_type(opts, 'local_infile', int) + # Note: using either of the below will cause all strings to be + # returned as Unicode, both in raw SQL operations and with column + # types like String and MSString. + util.coerce_kw_type(opts, 'use_unicode', bool) + util.coerce_kw_type(opts, 'charset', str) + + # Rich values 'cursorclass' and 'conv' are not supported via + # query string. + + ssl = {} + keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher'] + for key in keys: + if key in opts: + ssl[key[4:]] = opts[key] + util.coerce_kw_type(ssl, key[4:], str) + del opts[key] + if ssl: + opts['ssl'] = ssl + + # FOUND_ROWS must be set in CLIENT_FLAGS to enable + # supports_sane_rowcount. + client_flag = opts.get('client_flag', 0) + if self.dbapi is not None: + try: + CLIENT_FLAGS = __import__( + self.dbapi.__name__ + '.constants.CLIENT' + ).constants.CLIENT + client_flag |= CLIENT_FLAGS.FOUND_ROWS + except (AttributeError, ImportError): + self.supports_sane_rowcount = False + opts['client_flag'] = client_flag + return [[], opts] + + def _get_server_version_info(self, connection): + dbapi_con = connection.connection + version = [] + r = re.compile('[.\-]') + for n in r.split(dbapi_con.get_server_info()): + try: + version.append(int(n)) + except ValueError: + version.append(n) + return tuple(version) + + def _extract_error_code(self, exception): + return exception.args[0] + + def _detect_charset(self, connection): + """Sniff out the character set in use for connection results.""" + + try: + # note: the SQL here would be + # "SHOW VARIABLES LIKE 'character_set%%'" + cset_name = connection.connection.character_set_name + except AttributeError: + util.warn( + "No 'character_set_name' can be detected with " + "this MySQL-Python version; " + "please upgrade to a recent version of MySQL-Python. " + "Assuming latin1.") + return 'latin1' + else: + return cset_name() + + dialect = MySQLDialect_mysqldb diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/oursql.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/oursql.py index fa127f3..b91db18 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/oursql.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/oursql.py @@ -1,5 +1,5 @@ # mysql/oursql.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -16,22 +16,10 @@ Unicode ------- -oursql defaults to using ``utf8`` as the connection charset, but other -encodings may be used instead. Like the MySQL-Python driver, unicode support -can be completely disabled:: +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. - # oursql sets the connection charset to utf8 automatically; all strings come - # back as utf8 str - create_engine('mysql+oursql:///mydb?use_unicode=0') -To not automatically use ``utf8`` and instead use whatever the connection -defaults to, there is a separate parameter:: - - # use the default connection charset; all strings come back as unicode - create_engine('mysql+oursql:///mydb?default_charset=1') - - # use latin1 as the connection charset; all strings come back as unicode - create_engine('mysql+oursql:///mydb?charset=latin1') """ import re diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/pymysql.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/pymysql.py index 31226ce..3c493fb 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/pymysql.py @@ -1,5 +1,5 @@ # mysql/pymysql.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -12,7 +12,13 @@ :dbapi: pymysql :connectstring: mysql+pymysql://:@/\ [?] - :url: http://code.google.com/p/pymysql/ + :url: http://www.pymysql.org/ + +Unicode +------- + +Please see :ref:`mysql_unicode` for current recommendations on unicode +handling. MySQL-Python Compatibility -------------------------- @@ -31,8 +37,12 @@ class MySQLDialect_pymysql(MySQLDialect_mysqldb): driver = 'pymysql' description_encoding = None - if py3k: - supports_unicode_statements = True + + # generally, these two values should be both True + # or both False. PyMySQL unicode tests pass all the way back + # to 0.4 either way. See [ticket:3337] + supports_unicode_statements = True + supports_unicode_binds = True @classmethod def dbapi(cls): diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/pyodbc.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/pyodbc.py index 58e8b30..882d3ea 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/pyodbc.py @@ -1,5 +1,5 @@ # mysql/pyodbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -14,14 +14,11 @@ :connectstring: mysql+pyodbc://:@ :url: http://pypi.python.org/pypi/pyodbc/ - -Limitations ------------ - -The mysql-pyodbc dialect is subject to unresolved character encoding issues -which exist within the current ODBC drivers available. -(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage -of OurSQL, MySQLdb, or MySQL-connector/Python. + .. note:: The PyODBC for MySQL dialect is not well supported, and + is subject to unresolved character encoding issues + which exist within the current ODBC drivers available. + (see http://code.google.com/p/pyodbc/issues/detail?id=25). + Other dialects for MySQL are recommended. """ diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/zxjdbc.py b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/zxjdbc.py index 0cf92cd..fe4c137 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/zxjdbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/mysql/zxjdbc.py @@ -1,5 +1,5 @@ # mysql/zxjdbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -14,6 +14,9 @@ :driverurl: http://dev.mysql.com/downloads/connector/j/ + .. note:: Jython is not supported by current versions of SQLAlchemy. The + zxjdbc dialect should be considered as experimental. + Character Sets -------------- diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/__init__.py b/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/__init__.py index fd32f22..0c5c317 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/__init__.py @@ -1,5 +1,5 @@ # oracle/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/base.py b/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/base.py index 4d45f85..eb63983 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/base.py @@ -1,5 +1,5 @@ # oracle/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -213,15 +213,81 @@ is reflected and the type is reported as ``DATE``, the time-supporting examining the type of column for use in special Python translations or for migrating schemas to other database backends. +.. _oracle_table_options: + +Oracle Table Options +------------------------- + +The CREATE TABLE phrase supports the following options with Oracle +in conjunction with the :class:`.Table` construct: + + +* ``ON COMMIT``:: + + Table( + "some_table", metadata, ..., + prefixes=['GLOBAL TEMPORARY'], oracle_on_commit='PRESERVE ROWS') + +.. versionadded:: 1.0.0 + +* ``COMPRESS``:: + + Table('mytable', metadata, Column('data', String(32)), + oracle_compress=True) + + Table('mytable', metadata, Column('data', String(32)), + oracle_compress=6) + + The ``oracle_compress`` parameter accepts either an integer compression + level, or ``True`` to use the default compression level. + +.. versionadded:: 1.0.0 + +.. _oracle_index_options: + +Oracle Specific Index Options +----------------------------- + +Bitmap Indexes +~~~~~~~~~~~~~~ + +You can specify the ``oracle_bitmap`` parameter to create a bitmap index +instead of a B-tree index:: + + Index('my_index', my_table.c.data, oracle_bitmap=True) + +Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not +check for such limitations, only the database will. + +.. versionadded:: 1.0.0 + +Index compression +~~~~~~~~~~~~~~~~~ + +Oracle has a more efficient storage mode for indexes containing lots of +repeated values. Use the ``oracle_compress`` parameter to turn on key c +ompression:: + + Index('my_index', my_table.c.data, oracle_compress=True) + + Index('my_index', my_table.c.data1, my_table.c.data2, unique=True, + oracle_compress=1) + +The ``oracle_compress`` parameter accepts either an integer specifying the +number of prefix columns to compress, or ``True`` to use the default (all +columns for non-unique indexes, all but the last column for unique indexes). + +.. versionadded:: 1.0.0 + """ import re from sqlalchemy import util, sql -from sqlalchemy.engine import default, base, reflection +from sqlalchemy.engine import default, reflection from sqlalchemy.sql import compiler, visitors, expression -from sqlalchemy.sql import (operators as sql_operators, - functions as sql_functions) +from sqlalchemy.sql import operators as sql_operators +from sqlalchemy.sql.elements import quoted_name from sqlalchemy import types as sqltypes, schema as sa_schema from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \ BLOB, CLOB, TIMESTAMP, FLOAT @@ -300,7 +366,6 @@ class LONG(sqltypes.Text): class DATE(sqltypes.DateTime): - """Provide the oracle DATE type. This type has no special Python behavior, except that it subclasses @@ -349,7 +414,6 @@ class INTERVAL(sqltypes.TypeEngine): class ROWID(sqltypes.TypeEngine): - """Oracle ROWID type. When used in a cast() or similar, generates ROWID. @@ -359,7 +423,6 @@ class ROWID(sqltypes.TypeEngine): class _OracleBoolean(sqltypes.Boolean): - def get_dbapi_type(self, dbapi): return dbapi.NUMBER @@ -395,19 +458,19 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler): # Oracle does not allow milliseconds in DATE # Oracle does not support TIME columns - def visit_datetime(self, type_): - return self.visit_DATE(type_) + def visit_datetime(self, type_, **kw): + return self.visit_DATE(type_, **kw) - def visit_float(self, type_): - return self.visit_FLOAT(type_) + def visit_float(self, type_, **kw): + return self.visit_FLOAT(type_, **kw) - def visit_unicode(self, type_): + def visit_unicode(self, type_, **kw): if self.dialect._supports_nchar: - return self.visit_NVARCHAR2(type_) + return self.visit_NVARCHAR2(type_, **kw) else: - return self.visit_VARCHAR2(type_) + return self.visit_VARCHAR2(type_, **kw) - def visit_INTERVAL(self, type_): + def visit_INTERVAL(self, type_, **kw): return "INTERVAL DAY%s TO SECOND%s" % ( type_.day_precision is not None and "(%d)" % type_.day_precision or @@ -417,22 +480,22 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler): "", ) - def visit_LONG(self, type_): + def visit_LONG(self, type_, **kw): return "LONG" - def visit_TIMESTAMP(self, type_): + def visit_TIMESTAMP(self, type_, **kw): if type_.timezone: return "TIMESTAMP WITH TIME ZONE" else: return "TIMESTAMP" - def visit_DOUBLE_PRECISION(self, type_): - return self._generate_numeric(type_, "DOUBLE PRECISION") + def visit_DOUBLE_PRECISION(self, type_, **kw): + return self._generate_numeric(type_, "DOUBLE PRECISION", **kw) def visit_NUMBER(self, type_, **kw): return self._generate_numeric(type_, "NUMBER", **kw) - def _generate_numeric(self, type_, name, precision=None, scale=None): + def _generate_numeric(self, type_, name, precision=None, scale=None, **kw): if precision is None: precision = type_.precision @@ -448,17 +511,17 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler): n = "%(name)s(%(precision)s, %(scale)s)" return n % {'name': name, 'precision': precision, 'scale': scale} - def visit_string(self, type_): - return self.visit_VARCHAR2(type_) + def visit_string(self, type_, **kw): + return self.visit_VARCHAR2(type_, **kw) - def visit_VARCHAR2(self, type_): + def visit_VARCHAR2(self, type_, **kw): return self._visit_varchar(type_, '', '2') - def visit_NVARCHAR2(self, type_): + def visit_NVARCHAR2(self, type_, **kw): return self._visit_varchar(type_, 'N', '2') visit_NVARCHAR = visit_NVARCHAR2 - def visit_VARCHAR(self, type_): + def visit_VARCHAR(self, type_, **kw): return self._visit_varchar(type_, '', '') def _visit_varchar(self, type_, n, num): @@ -471,36 +534,35 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler): varchar = "%(n)sVARCHAR%(two)s(%(length)s)" return varchar % {'length': type_.length, 'two': num, 'n': n} - def visit_text(self, type_): - return self.visit_CLOB(type_) + def visit_text(self, type_, **kw): + return self.visit_CLOB(type_, **kw) - def visit_unicode_text(self, type_): + def visit_unicode_text(self, type_, **kw): if self.dialect._supports_nchar: - return self.visit_NCLOB(type_) + return self.visit_NCLOB(type_, **kw) else: - return self.visit_CLOB(type_) + return self.visit_CLOB(type_, **kw) - def visit_large_binary(self, type_): - return self.visit_BLOB(type_) + def visit_large_binary(self, type_, **kw): + return self.visit_BLOB(type_, **kw) - def visit_big_integer(self, type_): - return self.visit_NUMBER(type_, precision=19) + def visit_big_integer(self, type_, **kw): + return self.visit_NUMBER(type_, precision=19, **kw) - def visit_boolean(self, type_): - return self.visit_SMALLINT(type_) + def visit_boolean(self, type_, **kw): + return self.visit_SMALLINT(type_, **kw) - def visit_RAW(self, type_): + def visit_RAW(self, type_, **kw): if type_.length: return "RAW(%(length)s)" % {'length': type_.length} else: return "RAW" - def visit_ROWID(self, type_): + def visit_ROWID(self, type_, **kw): return "ROWID" class OracleCompiler(compiler.SQLCompiler): - """Oracle compiler modifies the lexical structure of Select statements to work under non-ANSI configured Oracle databases, if the use_ansi flag is False. @@ -538,6 +600,9 @@ class OracleCompiler(compiler.SQLCompiler): def visit_false(self, expr, **kw): return '0' + def get_cte_preamble(self, recursive): + return "WITH" + def get_select_hint_text(self, byfroms): return " ".join( "/*+ %s */" % text for table, text in byfroms.items() @@ -601,29 +666,17 @@ class OracleCompiler(compiler.SQLCompiler): else: return sql.and_(*clauses) - def visit_outer_join_column(self, vc): - return self.process(vc.column) + "(+)" + def visit_outer_join_column(self, vc, **kw): + return self.process(vc.column, **kw) + "(+)" def visit_sequence(self, seq): return (self.dialect.identifier_preparer.format_sequence(seq) + ".nextval") - def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs): - """Oracle doesn't like ``FROM table AS alias``. Is the AS standard - SQL?? - """ + def get_render_as_alias_suffix(self, alias_name_text): + """Oracle doesn't like ``FROM table AS alias``""" - if asfrom or ashint: - alias_name = isinstance(alias.name, expression._truncated_label) and \ - self._truncated_identifier("alias", alias.name) or alias.name - - if ashint: - return alias_name - elif asfrom: - return self.process(alias.original, asfrom=asfrom, **kwargs) + \ - " " + self.preparer.format_alias(alias, alias_name) - else: - return self.process(alias.original, **kwargs) + return " " + alias_name_text def returning_clause(self, stmt, returning_cols): columns = [] @@ -640,8 +693,9 @@ class OracleCompiler(compiler.SQLCompiler): self.bindparam_string(self._truncate_bindparam(outparam))) columns.append( self.process(col_expr, within_columns_clause=False)) - self.result_map[outparam.key] = ( - outparam.key, + + self._add_to_result_map( + outparam.key, outparam.key, (column, getattr(column, 'name', None), getattr(column, 'key', None)), column.type @@ -669,9 +723,11 @@ class OracleCompiler(compiler.SQLCompiler): select = select.where(whereclause) select._oracle_visit = True - if select._limit is not None or select._offset is not None: + limit_clause = select._limit_clause + offset_clause = select._offset_clause + if limit_clause is not None or offset_clause is not None: # See http://www.oracle.com/technology/oramag/oracle/06-sep/\ - # o56asktom.html + # o56asktom.html # # Generalized form of an Oracle pagination query: # select ... from ( @@ -682,13 +738,15 @@ class OracleCompiler(compiler.SQLCompiler): # Outer select and "ROWNUM as ora_rn" can be dropped if # limit=0 - # TODO: use annotations instead of clone + attr set ? + kwargs['select_wraps_for'] = select select = select._generate() select._oracle_visit = True # Wrap the middle select and add the hint limitselect = sql.select([c for c in select.c]) - if select._limit and self.dialect.optimize_limits: + if limit_clause is not None and \ + self.dialect.optimize_limits and \ + select._simple_int_limit: limitselect = limitselect.prefix_with( "/*+ FIRST_ROWS(%d) */" % select._limit) @@ -697,17 +755,24 @@ class OracleCompiler(compiler.SQLCompiler): limitselect._is_wrapper = True # If needed, add the limiting clause - if select._limit is not None: - max_row = select._limit - if select._offset is not None: - max_row += select._offset + if limit_clause is not None: if not self.dialect.use_binds_for_limits: + # use simple int limits, will raise an exception + # if the limit isn't specified this way + max_row = select._limit + + if offset_clause is not None: + max_row += select._offset max_row = sql.literal_column("%d" % max_row) + else: + max_row = limit_clause + if offset_clause is not None: + max_row = max_row + offset_clause limitselect.append_whereclause( sql.literal_column("ROWNUM") <= max_row) # If needed, add the ora_rn, and wrap again with offset. - if select._offset is None: + if offset_clause is None: limitselect._for_update_arg = select._for_update_arg select = limitselect else: @@ -721,22 +786,21 @@ class OracleCompiler(compiler.SQLCompiler): offsetselect._oracle_visit = True offsetselect._is_wrapper = True - offset_value = select._offset if not self.dialect.use_binds_for_limits: - offset_value = sql.literal_column("%d" % offset_value) + offset_clause = sql.literal_column( + "%d" % select._offset) offsetselect.append_whereclause( - sql.literal_column("ora_rn") > offset_value) + sql.literal_column("ora_rn") > offset_clause) offsetselect._for_update_arg = select._for_update_arg select = offsetselect - kwargs['iswrapper'] = getattr(select, '_is_wrapper', False) return compiler.SQLCompiler.visit_select(self, select, **kwargs) - def limit_clause(self, select): + def limit_clause(self, select, **kw): return "" - def for_update_clause(self, select): + def for_update_clause(self, select, **kw): if self.is_subquery(): return "" @@ -744,7 +808,7 @@ class OracleCompiler(compiler.SQLCompiler): if select._for_update_arg.of: tmp += ' OF ' + ', '.join( - self.process(elem) for elem in + self.process(elem, **kw) for elem in select._for_update_arg.of ) @@ -773,15 +837,57 @@ class OracleDDLCompiler(compiler.DDLCompiler): return text - def visit_create_index(self, create, **kw): - return super(OracleDDLCompiler, self).\ - visit_create_index(create, include_schema=True) + def visit_create_index(self, create): + index = create.element + self._verify_index_table(index) + preparer = self.preparer + text = "CREATE " + if index.unique: + text += "UNIQUE " + if index.dialect_options['oracle']['bitmap']: + text += "BITMAP " + text += "INDEX %s ON %s (%s)" % ( + self._prepared_index_name(index, include_schema=True), + preparer.format_table(index.table, use_schema=True), + ', '.join( + self.sql_compiler.process( + expr, + include_table=False, literal_binds=True) + for expr in index.expressions) + ) + if index.dialect_options['oracle']['compress'] is not False: + if index.dialect_options['oracle']['compress'] is True: + text += " COMPRESS" + else: + text += " COMPRESS %d" % ( + index.dialect_options['oracle']['compress'] + ) + return text + + def post_create_table(self, table): + table_opts = [] + opts = table.dialect_options['oracle'] + + if opts['on_commit']: + on_commit_options = opts['on_commit'].replace("_", " ").upper() + table_opts.append('\n ON COMMIT %s' % on_commit_options) + + if opts['compress']: + if opts['compress'] is True: + table_opts.append("\n COMPRESS") + else: + table_opts.append("\n COMPRESS FOR %s" % ( + opts['compress'] + )) + + return ''.join(table_opts) class OracleIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = set([x.lower() for x in RESERVED_WORDS]) - illegal_initial_characters = set(range(0, 10)).union(["_", "$"]) + illegal_initial_characters = set( + (str(dig) for dig in range(0, 10))).union(["_", "$"]) def _bindparam_requires_quotes(self, value): """Return True if the given identifier requires quoting.""" @@ -798,7 +904,6 @@ class OracleIdentifierPreparer(compiler.IdentifierPreparer): class OracleExecutionContext(default.DefaultExecutionContext): - def fire_sequence(self, seq, type_): return self._execute_scalar( "SELECT " + @@ -815,6 +920,8 @@ class OracleDialect(default.DefaultDialect): supports_sane_rowcount = True supports_sane_multi_rowcount = False + supports_simple_order_by_label = False + supports_sequences = True sequences_optional = False postfetch_lastrowid = False @@ -836,7 +943,15 @@ class OracleDialect(default.DefaultDialect): reflection_options = ('oracle_resolve_synonyms', ) construct_arguments = [ - (sa_schema.Table, {"resolve_synonyms": False}) + (sa_schema.Table, { + "resolve_synonyms": False, + "on_commit": None, + "compress": False + }), + (sa_schema.Index, { + "bitmap": False, + "compress": False + }) ] def __init__(self, @@ -866,6 +981,16 @@ class OracleDialect(default.DefaultDialect): return self.server_version_info and \ self.server_version_info < (9, ) + @property + def _supports_table_compression(self): + return self.server_version_info and \ + self.server_version_info >= (9, 2, ) + + @property + def _supports_table_compress_for(self): + return self.server_version_info and \ + self.server_version_info >= (11, ) + @property def _supports_char_length(self): return not self._is_oracle_8 @@ -908,6 +1033,8 @@ class OracleDialect(default.DefaultDialect): if name.upper() == name and not \ self.identifier_preparer._requires_quotes(name.lower()): return name.lower() + elif name.lower() == name: + return quoted_name(name, quote=True) else: return name @@ -1023,7 +1150,21 @@ class OracleDialect(default.DefaultDialect): "WHERE nvl(tablespace_name, 'no tablespace') NOT IN " "('SYSTEM', 'SYSAUX') " "AND OWNER = :owner " - "AND IOT_NAME IS NULL") + "AND IOT_NAME IS NULL " + "AND DURATION IS NULL") + cursor = connection.execute(s, owner=schema) + return [self.normalize_name(row[0]) for row in cursor] + + @reflection.cache + def get_temp_table_names(self, connection, **kw): + schema = self.denormalize_name(self.default_schema_name) + s = sql.text( + "SELECT table_name FROM all_tables " + "WHERE nvl(tablespace_name, 'no tablespace') NOT IN " + "('SYSTEM', 'SYSAUX') " + "AND OWNER = :owner " + "AND IOT_NAME IS NULL " + "AND DURATION IS NOT NULL") cursor = connection.execute(s, owner=schema) return [self.normalize_name(row[0]) for row in cursor] @@ -1034,6 +1175,50 @@ class OracleDialect(default.DefaultDialect): cursor = connection.execute(s, owner=self.denormalize_name(schema)) return [self.normalize_name(row[0]) for row in cursor] + @reflection.cache + def get_table_options(self, connection, table_name, schema=None, **kw): + options = {} + + resolve_synonyms = kw.get('oracle_resolve_synonyms', False) + dblink = kw.get('dblink', '') + info_cache = kw.get('info_cache') + + (table_name, schema, dblink, synonym) = \ + self._prepare_reflection_args(connection, table_name, schema, + resolve_synonyms, dblink, + info_cache=info_cache) + + params = {"table_name": table_name} + + columns = ["table_name"] + if self._supports_table_compression: + columns.append("compression") + if self._supports_table_compress_for: + columns.append("compress_for") + + text = "SELECT %(columns)s "\ + "FROM ALL_TABLES%(dblink)s "\ + "WHERE table_name = :table_name" + + if schema is not None: + params['owner'] = schema + text += " AND owner = :owner " + text = text % {'dblink': dblink, 'columns': ", ".join(columns)} + + result = connection.execute(sql.text(text), **params) + + enabled = dict(DISABLED=False, ENABLED=True) + + row = result.first() + if row: + if "compression" in row and enabled.get(row.compression, False): + if "compress_for" in row: + options['oracle_compress'] = row.compress_for + else: + options['oracle_compress'] = True + + return options + @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): """ @@ -1119,7 +1304,8 @@ class OracleDialect(default.DefaultDialect): params = {'table_name': table_name} text = \ - "SELECT a.index_name, a.column_name, b.uniqueness "\ + "SELECT a.index_name, a.column_name, "\ + "\nb.index_type, b.uniqueness, b.compression, b.prefix_length "\ "\nFROM ALL_IND_COLUMNS%(dblink)s a, "\ "\nALL_INDEXES%(dblink)s b "\ "\nWHERE "\ @@ -1145,6 +1331,7 @@ class OracleDialect(default.DefaultDialect): dblink=dblink, info_cache=kw.get('info_cache')) pkeys = pk_constraint['constrained_columns'] uniqueness = dict(NONUNIQUE=False, UNIQUE=True) + enabled = dict(DISABLED=False, ENABLED=True) oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE) @@ -1164,10 +1351,15 @@ class OracleDialect(default.DefaultDialect): if rset.index_name != last_index_name: remove_if_primary_key(index) index = dict(name=self.normalize_name(rset.index_name), - column_names=[]) + column_names=[], dialect_options={}) indexes.append(index) index['unique'] = uniqueness.get(rset.uniqueness, False) + if rset.index_type in ('BITMAP', 'FUNCTION-BASED BITMAP'): + index['dialect_options']['oracle_bitmap'] = True + if enabled.get(rset.compression, False): + index['dialect_options']['oracle_compress'] = rset.prefix_length + # filter out Oracle SYS_NC names. could also do an outer join # to the all_tab_columns table and check for real col names there. if not oracle_sys_col.match(rset.column_name): diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py index 4a1ceec..cfbb87e 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/cx_oracle.py @@ -1,5 +1,5 @@ # oracle/cx_oracle.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -61,6 +61,14 @@ on the URL, or as keyword arguments to :func:`.create_engine()` are: Defaults to ``True``. Note that this is the opposite default of the cx_Oracle DBAPI itself. +* ``service_name`` - An option to use connection string (DSN) with + ``SERVICE_NAME`` instead of ``SID``. It can't be passed when a ``database`` + part is given. + E.g. ``oracle+cx_oracle://scott:tiger@host:1521/?service_name=hr`` + is a valid url. This value is only available as a URL query string argument. + + .. versionadded:: 1.0.0 + .. _cx_oracle_unicode: Unicode @@ -285,6 +293,7 @@ from .base import OracleCompiler, OracleDialect, OracleExecutionContext from . import base as oracle from ...engine import result as _result from sqlalchemy import types as sqltypes, util, exc, processors +from sqlalchemy import util import random import collections import decimal @@ -711,8 +720,10 @@ class OracleDialect_cx_oracle(OracleDialect): # this occurs in tests with mock DBAPIs self._cx_oracle_string_types = set() self._cx_oracle_with_unicode = False - elif self.cx_oracle_ver >= (5,) and not \ - hasattr(self.dbapi, 'UNICODE'): + elif util.py3k or ( + self.cx_oracle_ver >= (5,) and not \ + hasattr(self.dbapi, 'UNICODE') + ): # cx_Oracle WITH_UNICODE mode. *only* python # unicode objects accepted for anything self.supports_unicode_statements = True @@ -862,14 +873,26 @@ class OracleDialect_cx_oracle(OracleDialect): util.coerce_kw_type(dialect_opts, opt, bool) setattr(self, opt, dialect_opts[opt]) - if url.database: + database = url.database + service_name = dialect_opts.get('service_name', None) + if database or service_name: # if we have a database, then we have a remote host port = url.port if port: port = int(port) else: port = 1521 - dsn = self.dbapi.makedsn(url.host, port, url.database) + + if database and service_name: + raise exc.InvalidRequestError( + '"service_name" option shouldn\'t ' + 'be used with a "database" part of the url') + if database: + makedsn_kwargs = {'sid': database} + if service_name: + makedsn_kwargs = {'service_name': service_name} + + dsn = self.dbapi.makedsn(url.host, port, **makedsn_kwargs) else: # we have a local tnsname dsn = url.host diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/zxjdbc.py b/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/zxjdbc.py index 82c8e2f..c3259fe 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/zxjdbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/oracle/zxjdbc.py @@ -1,5 +1,5 @@ # oracle/zxjdbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -10,8 +10,10 @@ :name: zxJDBC for Jython :dbapi: zxjdbc :connectstring: oracle+zxjdbc://user:pass@host/dbname - :driverurl: http://www.oracle.com/technology/software/tech/java/\ -sqlj_jdbc/index.html. + :driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html + + .. note:: Jython is not supported by current versions of SQLAlchemy. The + zxjdbc dialect should be considered as experimental. """ import decimal @@ -68,8 +70,7 @@ class OracleCompiler_zxjdbc(OracleCompiler): expression._select_iterables(returning_cols)) # within_columns_clause=False so that labels (foo AS bar) don't render - columns = [self.process(c, within_columns_clause=False, - result_map=self.result_map) + columns = [self.process(c, within_columns_clause=False) for c in self.returning_cols] if not hasattr(self, 'returning_parameters'): diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgres.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgres.py index f813e00..04d37a2 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgres.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgres.py @@ -1,5 +1,5 @@ # dialects/postgres.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/__init__.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/__init__.py index d755e6a..006afbd 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/__init__.py @@ -1,11 +1,11 @@ # postgresql/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -from . import base, psycopg2, pg8000, pypostgresql, zxjdbc +from . import base, psycopg2, pg8000, pypostgresql, zxjdbc, psycopg2cffi base.dialect = psycopg2.dialect @@ -13,7 +13,7 @@ from .base import \ INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \ INET, CIDR, UUID, BIT, MACADDR, OID, DOUBLE_PRECISION, TIMESTAMP, TIME, \ DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect, array, Any, All, \ - TSVECTOR + TSVECTOR, DropEnumType from .constraints import ExcludeConstraint from .hstore import HSTORE, hstore from .json import JSON, JSONElement, JSONB @@ -26,5 +26,6 @@ __all__ = ( 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN', 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'Any', 'All', 'array', 'HSTORE', 'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE', - 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'JSONElement' + 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'JSONElement', + 'DropEnumType' ) diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py index 369a87f..6a60c22 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py @@ -1,5 +1,5 @@ # postgresql/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -48,7 +48,7 @@ Transaction Isolation Level --------------------------- All Postgresql dialects support setting of transaction isolation level -both via a dialect-specific parameter ``isolation_level`` +both via a dialect-specific parameter :paramref:`.create_engine.isolation_level` accepted by :func:`.create_engine`, as well as the ``isolation_level`` argument as passed to :meth:`.Connection.execution_options`. When using a non-psycopg2 dialect, @@ -102,7 +102,7 @@ via foreign key constraint, a decision must be made as to how the ``.schema`` is represented in those remote tables, in the case where that remote schema name is also a member of the current `Postgresql search path -`_. +`_. By default, the Postgresql dialect mimics the behavior encouraged by Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function @@ -266,7 +266,7 @@ will emit to the database:: The Postgresql text search functions such as ``to_tsquery()`` and ``to_tsvector()`` are available -explicitly using the standard :attr:`.func` construct. For example:: +explicitly using the standard :data:`.func` construct. For example:: select([ func.to_tsvector('fat cats ate rats').match('cat & rat') @@ -299,7 +299,7 @@ not re-compute the column on demand. In order to provide for this explicit query planning, or to use different search strategies, the ``match`` method accepts a ``postgresql_regconfig`` -keyword argument. +keyword argument:: select([mytable.c.id]).where( mytable.c.title.match('somestring', postgresql_regconfig='english') @@ -311,7 +311,7 @@ Emits the equivalent of:: WHERE mytable.title @@ to_tsquery('english', 'somestring') One can also specifically pass in a `'regconfig'` value to the -``to_tsvector()`` command as the initial argument. +``to_tsvector()`` command as the initial argument:: select([mytable.c.id]).where( func.to_tsvector('english', mytable.c.title )\ @@ -401,13 +401,179 @@ The value passed to the keyword argument will be simply passed through to the underlying CREATE INDEX command, so it *must* be a valid index type for your version of PostgreSQL. +.. _postgresql_index_storage: + +Index Storage Parameters +^^^^^^^^^^^^^^^^^^^^^^^^ + +PostgreSQL allows storage parameters to be set on indexes. The storage +parameters available depend on the index method used by the index. Storage +parameters can be specified on :class:`.Index` using the ``postgresql_with`` +keyword argument:: + + Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50}) + +.. versionadded:: 1.0.6 + +.. _postgresql_index_concurrently: + +Indexes with CONCURRENTLY +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Postgresql index option CONCURRENTLY is supported by passing the +flag ``postgresql_concurrently`` to the :class:`.Index` construct:: + + tbl = Table('testtbl', m, Column('data', Integer)) + + idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True) + +The above index construct will render SQL as:: + + CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data) + +.. versionadded:: 0.9.9 + +.. _postgresql_index_reflection: + +Postgresql Index Reflection +--------------------------- + +The Postgresql database creates a UNIQUE INDEX implicitly whenever the +UNIQUE CONSTRAINT construct is used. When inspecting a table using +:class:`.Inspector`, the :meth:`.Inspector.get_indexes` +and the :meth:`.Inspector.get_unique_constraints` will report on these +two constructs distinctly; in the case of the index, the key +``duplicates_constraint`` will be present in the index entry if it is +detected as mirroring a constraint. When performing reflection using +``Table(..., autoload=True)``, the UNIQUE INDEX is **not** returned +in :attr:`.Table.indexes` when it is detected as mirroring a +:class:`.UniqueConstraint` in the :attr:`.Table.constraints` collection. + +.. versionchanged:: 1.0.0 - :class:`.Table` reflection now includes + :class:`.UniqueConstraint` objects present in the :attr:`.Table.constraints` + collection; the Postgresql backend will no longer include a "mirrored" + :class:`.Index` construct in :attr:`.Table.indexes` if it is detected + as corresponding to a unique constraint. + +Special Reflection Options +-------------------------- + +The :class:`.Inspector` used for the Postgresql backend is an instance +of :class:`.PGInspector`, which offers additional methods:: + + from sqlalchemy import create_engine, inspect + + engine = create_engine("postgresql+psycopg2://localhost/test") + insp = inspect(engine) # will be a PGInspector + + print(insp.get_enums()) + +.. autoclass:: PGInspector + :members: + +.. _postgresql_table_options: + +PostgreSQL Table Options +------------------------- + +Several options for CREATE TABLE are supported directly by the PostgreSQL +dialect in conjunction with the :class:`.Table` construct: + +* ``TABLESPACE``:: + + Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace') + +* ``ON COMMIT``:: + + Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS') + +* ``WITH OIDS``:: + + Table("some_table", metadata, ..., postgresql_with_oids=True) + +* ``WITHOUT OIDS``:: + + Table("some_table", metadata, ..., postgresql_with_oids=False) + +* ``INHERITS``:: + + Table("some_table", metadata, ..., postgresql_inherits="some_supertable") + + Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...)) + +.. versionadded:: 1.0.0 + +.. seealso:: + + `Postgresql CREATE TABLE options + `_ + +ENUM Types +---------- + +Postgresql has an independently creatable TYPE structure which is used +to implement an enumerated type. This approach introduces significant +complexity on the SQLAlchemy side in terms of when this type should be +CREATED and DROPPED. The type object is also an independently reflectable +entity. The following sections should be consulted: + +* :class:`.postgresql.ENUM` - DDL and typing support for ENUM. + +* :meth:`.PGInspector.get_enums` - retrieve a listing of current ENUM types + +* :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual + CREATE and DROP commands for ENUM. + +.. _postgresql_array_of_enum: + +Using ENUM with ARRAY +^^^^^^^^^^^^^^^^^^^^^ + +The combination of ENUM and ARRAY is not directly supported by backend +DBAPIs at this time. In order to send and receive an ARRAY of ENUM, +use the following workaround type:: + + class ArrayOfEnum(ARRAY): + + def bind_expression(self, bindvalue): + return sa.cast(bindvalue, self) + + def result_processor(self, dialect, coltype): + super_rp = super(ArrayOfEnum, self).result_processor( + dialect, coltype) + + def handle_raw_string(value): + inner = re.match(r"^{(.*)}$", value).group(1) + return inner.split(",") if inner else [] + + def process(value): + if value is None: + return None + return super_rp(handle_raw_string(value)) + return process + +E.g.:: + + Table( + 'mydata', metadata, + Column('id', Integer, primary_key=True), + Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum'))) + + ) + +This type is not included as a built-in type as it would be incompatible +with a DBAPI that suddenly decides to support ARRAY of ENUM directly in +a new version. + """ from collections import defaultdict import re +import datetime as dt + from ... import sql, schema, exc, util from ...engine import default, reflection -from ...sql import compiler, expression, operators +from ...sql import compiler, expression, operators, default_comparator from ... import types as sqltypes try: @@ -510,6 +676,10 @@ class INTERVAL(sqltypes.TypeEngine): def _type_affinity(self): return sqltypes.Interval + @property + def python_type(self): + return dt.timedelta + PGInterval = INTERVAL @@ -604,10 +774,10 @@ class _Slice(expression.ColumnElement): type = sqltypes.NULLTYPE def __init__(self, slice_, source_comparator): - self.start = source_comparator._check_literal( + self.start = default_comparator._check_literal( source_comparator.expr, operators.getitem, slice_.start) - self.stop = source_comparator._check_literal( + self.stop = default_comparator._check_literal( source_comparator.expr, operators.getitem, slice_.stop) @@ -695,7 +865,7 @@ class array(expression.Tuple): self.type = ARRAY(self.type) def _bind_param(self, operator, obj): - return array(*[ + return array([ expression.BindParameter(None, o, _compared_to_operator=operator, _compared_to_type=self.type, unique=True) for o in obj @@ -760,6 +930,16 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): mytable.c.data[2:7]: [1, 2, 3] }) + .. note:: + + Multi-dimensional support for the ``[]`` operator is not supported + in SQLAlchemy 1.0. Please use the :func:`.type_coerce` function + to cast an intermediary expression to ARRAY again as a workaround:: + + expr = type_coerce(my_array_column[5], ARRAY(Integer))[6] + + Multi-dimensional support will be provided in a future release. + :class:`.ARRAY` provides special methods for containment operations, e.g.:: @@ -774,6 +954,10 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): The :class:`.ARRAY` type may not be supported on all DBAPIs. It is known to work on psycopg2 and not pg8000. + Additionally, the :class:`.ARRAY` type does not work directly in + conjunction with the :class:`.ENUM` type. For a workaround, see the + special type at :ref:`postgresql_array_of_enum`. + See also: :class:`.postgresql.array` - produce a literal array value. @@ -800,8 +984,9 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine): index += shift_indexes return_type = self.type.item_type - return self._binary_operate(self.expr, operators.getitem, index, - result_type=return_type) + return default_comparator._binary_operate( + self.expr, operators.getitem, index, + result_type=return_type) def any(self, other, operator=operators.eq): """Return ``other operator ANY (array)`` clause. @@ -1004,21 +1189,76 @@ class ENUM(sqltypes.Enum): """Postgresql ENUM type. This is a subclass of :class:`.types.Enum` which includes - support for PG's ``CREATE TYPE``. + support for PG's ``CREATE TYPE`` and ``DROP TYPE``. - :class:`~.postgresql.ENUM` is used automatically when - using the :class:`.types.Enum` type on PG assuming - the ``native_enum`` is left as ``True``. However, the - :class:`~.postgresql.ENUM` class can also be instantiated - directly in order to access some additional Postgresql-specific - options, namely finer control over whether or not - ``CREATE TYPE`` should be emitted. + When the builtin type :class:`.types.Enum` is used and the + :paramref:`.Enum.native_enum` flag is left at its default of + True, the Postgresql backend will use a :class:`.postgresql.ENUM` + type as the implementation, so the special create/drop rules + will be used. - Note that both :class:`.types.Enum` as well as - :class:`~.postgresql.ENUM` feature create/drop - methods; the base :class:`.types.Enum` type ultimately - delegates to the :meth:`~.postgresql.ENUM.create` and - :meth:`~.postgresql.ENUM.drop` methods present here. + The create/drop behavior of ENUM is necessarily intricate, due to the + awkward relationship the ENUM type has in relationship to the + parent table, in that it may be "owned" by just a single table, or + may be shared among many tables. + + When using :class:`.types.Enum` or :class:`.postgresql.ENUM` + in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted + corresponding to when the :meth:`.Table.create` and :meth:`.Table.drop` + methods are called:: + + table = Table('sometable', metadata, + Column('some_enum', ENUM('a', 'b', 'c', name='myenum')) + ) + + table.create(engine) # will emit CREATE ENUM and CREATE TABLE + table.drop(engine) # will emit DROP TABLE and DROP ENUM + + To use a common enumerated type between multiple tables, the best + practice is to declare the :class:`.types.Enum` or + :class:`.postgresql.ENUM` independently, and associate it with the + :class:`.MetaData` object itself:: + + my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata) + + t1 = Table('sometable_one', metadata, + Column('some_enum', myenum) + ) + + t2 = Table('sometable_two', metadata, + Column('some_enum', myenum) + ) + + When this pattern is used, care must still be taken at the level + of individual table creates. Emitting CREATE TABLE without also + specifying ``checkfirst=True`` will still cause issues:: + + t1.create(engine) # will fail: no such type 'myenum' + + If we specify ``checkfirst=True``, the individual table-level create + operation will check for the ``ENUM`` and create if not exists:: + + # will check if enum exists, and emit CREATE TYPE if not + t1.create(engine, checkfirst=True) + + When using a metadata-level ENUM type, the type will always be created + and dropped if either the metadata-wide create/drop is called:: + + metadata.create_all(engine) # will emit CREATE TYPE + metadata.drop_all(engine) # will emit DROP TYPE + + The type can also be created and dropped directly:: + + my_enum.create(engine) + my_enum.drop(engine) + + .. versionchanged:: 1.0.0 The Postgresql :class:`.postgresql.ENUM` type + now behaves more strictly with regards to CREATE/DROP. A metadata-level + ENUM type will only be created and dropped at the metadata level, + not the table level, with the exception of + ``table.create(checkfirst=True)``. + The ``table.drop()`` call will now emit a DROP TYPE for a table-level + enumerated type. """ @@ -1124,9 +1364,18 @@ class ENUM(sqltypes.Enum): return False def _on_table_create(self, target, bind, checkfirst, **kw): - if not self._check_for_name_in_memos(checkfirst, kw): + if checkfirst or ( + not self.metadata and + not kw.get('_is_metadata_operation', False)) and \ + not self._check_for_name_in_memos(checkfirst, kw): self.create(bind=bind, checkfirst=checkfirst) + def _on_table_drop(self, target, bind, checkfirst, **kw): + if not self.metadata and \ + not kw.get('_is_metadata_operation', False) and \ + not self._check_for_name_in_memos(checkfirst, kw): + self.drop(bind=bind, checkfirst=checkfirst) + def _on_metadata_create(self, target, bind, checkfirst, **kw): if not self._check_for_name_in_memos(checkfirst, kw): self.create(bind=bind, checkfirst=checkfirst) @@ -1256,14 +1505,14 @@ class PGCompiler(compiler.SQLCompiler): def visit_sequence(self, seq): return "nextval('%s')" % self.preparer.format_sequence(seq) - def limit_clause(self, select): + def limit_clause(self, select, **kw): text = "" - if select._limit is not None: - text += " \n LIMIT " + self.process(sql.literal(select._limit)) - if select._offset is not None: - if select._limit is None: + if select._limit_clause is not None: + text += " \n LIMIT " + self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + if select._limit_clause is None: text += " \n LIMIT ALL" - text += " OFFSET " + self.process(sql.literal(select._offset)) + text += " OFFSET " + self.process(select._offset_clause, **kw) return text def format_from_hint_text(self, sqltext, table, hint, iscrud): @@ -1271,7 +1520,7 @@ class PGCompiler(compiler.SQLCompiler): raise exc.CompileError("Unrecognized hint: %r" % hint) return "ONLY " + sqltext - def get_select_precolumns(self, select): + def get_select_precolumns(self, select, **kw): if select._distinct is not False: if select._distinct is True: return "DISTINCT " @@ -1280,11 +1529,12 @@ class PGCompiler(compiler.SQLCompiler): [self.process(col) for col in select._distinct] ) + ") " else: - return "DISTINCT ON (" + self.process(select._distinct) + ") " + return "DISTINCT ON (" + \ + self.process(select._distinct, **kw) + ") " else: return "" - def for_update_clause(self, select): + def for_update_clause(self, select, **kw): if select._for_update_arg.read: tmp = " FOR SHARE" @@ -1296,7 +1546,7 @@ class PGCompiler(compiler.SQLCompiler): c.table if isinstance(c, expression.ColumnClause) else c for c in select._for_update_arg.of) tmp += " OF " + ", ".join( - self.process(table, ashint=True) + self.process(table, ashint=True, use_schema=False, **kw) for table in tables ) @@ -1348,7 +1598,8 @@ class PGDDLCompiler(compiler.DDLCompiler): else: colspec += " SERIAL" else: - colspec += " " + self.dialect.type_compiler.process(column.type) + colspec += " " + self.dialect.type_compiler.process(column.type, + type_expression=column) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default @@ -1381,7 +1632,13 @@ class PGDDLCompiler(compiler.DDLCompiler): text = "CREATE " if index.unique: text += "UNIQUE " - text += "INDEX %s ON %s " % ( + text += "INDEX " + + concurrently = index.dialect_options['postgresql']['concurrently'] + if concurrently: + text += "CONCURRENTLY " + + text += "%s ON %s " % ( self._prepared_index_name(index, include_schema=False), preparer.format_table(index.table) @@ -1400,10 +1657,22 @@ class PGDDLCompiler(compiler.DDLCompiler): if not isinstance(expr, expression.ColumnClause) else expr, include_table=False, literal_binds=True) + - (c.key in ops and (' ' + ops[c.key]) or '') - for expr, c in zip(index.expressions, index.columns)]) + ( + (' ' + ops[expr.key]) + if hasattr(expr, 'key') + and expr.key in ops else '' + ) + for expr in index.expressions + ]) ) + withclause = index.dialect_options['postgresql']['with'] + + if withclause: + text += " WITH (%s)" % (', '.join( + ['%s = %s' % storage_parameter + for storage_parameter in withclause.items()])) + whereclause = index.dialect_options["postgresql"]["where"] if whereclause is not None: @@ -1413,15 +1682,17 @@ class PGDDLCompiler(compiler.DDLCompiler): text += " WHERE " + where_compiled return text - def visit_exclude_constraint(self, constraint): + def visit_exclude_constraint(self, constraint, **kw): text = "" if constraint.name is not None: text += "CONSTRAINT %s " % \ self.preparer.format_constraint(constraint) elements = [] - for c in constraint.columns: - op = constraint.operators[c.name] - elements.append(self.preparer.quote(c.name) + ' WITH ' + op) + for expr, name, op in constraint._render_exprs: + kw['include_table'] = False + elements.append( + "%s WITH %s" % (self.sql_compiler.process(expr, **kw), op) + ) text += "EXCLUDE USING %s (%s)" % (constraint.using, ', '.join(elements)) if constraint.where is not None: @@ -1431,96 +1702,125 @@ class PGDDLCompiler(compiler.DDLCompiler): text += self.define_constraint_deferrability(constraint) return text + def post_create_table(self, table): + table_opts = [] + pg_opts = table.dialect_options['postgresql'] + + inherits = pg_opts.get('inherits') + if inherits is not None: + if not isinstance(inherits, (list, tuple)): + inherits = (inherits, ) + table_opts.append( + '\n INHERITS ( ' + + ', '.join(self.preparer.quote(name) for name in inherits) + + ' )') + + if pg_opts['with_oids'] is True: + table_opts.append('\n WITH OIDS') + elif pg_opts['with_oids'] is False: + table_opts.append('\n WITHOUT OIDS') + + if pg_opts['on_commit']: + on_commit_options = pg_opts['on_commit'].replace("_", " ").upper() + table_opts.append('\n ON COMMIT %s' % on_commit_options) + + if pg_opts['tablespace']: + tablespace_name = pg_opts['tablespace'] + table_opts.append( + '\n TABLESPACE %s' % self.preparer.quote(tablespace_name) + ) + + return ''.join(table_opts) + class PGTypeCompiler(compiler.GenericTypeCompiler): - - def visit_TSVECTOR(self, type): + def visit_TSVECTOR(self, type, **kw): return "TSVECTOR" - def visit_INET(self, type_): + def visit_INET(self, type_, **kw): return "INET" - def visit_CIDR(self, type_): + def visit_CIDR(self, type_, **kw): return "CIDR" - def visit_MACADDR(self, type_): + def visit_MACADDR(self, type_, **kw): return "MACADDR" - def visit_OID(self, type_): + def visit_OID(self, type_, **kw): return "OID" - def visit_FLOAT(self, type_): + def visit_FLOAT(self, type_, **kw): if not type_.precision: return "FLOAT" else: return "FLOAT(%(precision)s)" % {'precision': type_.precision} - def visit_DOUBLE_PRECISION(self, type_): + def visit_DOUBLE_PRECISION(self, type_, **kw): return "DOUBLE PRECISION" - def visit_BIGINT(self, type_): + def visit_BIGINT(self, type_, **kw): return "BIGINT" - def visit_HSTORE(self, type_): + def visit_HSTORE(self, type_, **kw): return "HSTORE" - def visit_JSON(self, type_): + def visit_JSON(self, type_, **kw): return "JSON" - def visit_JSONB(self, type_): + def visit_JSONB(self, type_, **kw): return "JSONB" - def visit_INT4RANGE(self, type_): + def visit_INT4RANGE(self, type_, **kw): return "INT4RANGE" - def visit_INT8RANGE(self, type_): + def visit_INT8RANGE(self, type_, **kw): return "INT8RANGE" - def visit_NUMRANGE(self, type_): + def visit_NUMRANGE(self, type_, **kw): return "NUMRANGE" - def visit_DATERANGE(self, type_): + def visit_DATERANGE(self, type_, **kw): return "DATERANGE" - def visit_TSRANGE(self, type_): + def visit_TSRANGE(self, type_, **kw): return "TSRANGE" - def visit_TSTZRANGE(self, type_): + def visit_TSTZRANGE(self, type_, **kw): return "TSTZRANGE" - def visit_datetime(self, type_): - return self.visit_TIMESTAMP(type_) + def visit_datetime(self, type_, **kw): + return self.visit_TIMESTAMP(type_, **kw) - def visit_enum(self, type_): + def visit_enum(self, type_, **kw): if not type_.native_enum or not self.dialect.supports_native_enum: - return super(PGTypeCompiler, self).visit_enum(type_) + return super(PGTypeCompiler, self).visit_enum(type_, **kw) else: - return self.visit_ENUM(type_) + return self.visit_ENUM(type_, **kw) - def visit_ENUM(self, type_): + def visit_ENUM(self, type_, **kw): return self.dialect.identifier_preparer.format_type(type_) - def visit_TIMESTAMP(self, type_): + def visit_TIMESTAMP(self, type_, **kw): return "TIMESTAMP%s %s" % ( getattr(type_, 'precision', None) and "(%d)" % type_.precision or "", (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" ) - def visit_TIME(self, type_): + def visit_TIME(self, type_, **kw): return "TIME%s %s" % ( getattr(type_, 'precision', None) and "(%d)" % type_.precision or "", (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" ) - def visit_INTERVAL(self, type_): + def visit_INTERVAL(self, type_, **kw): if type_.precision is not None: return "INTERVAL(%d)" % type_.precision else: return "INTERVAL" - def visit_BIT(self, type_): + def visit_BIT(self, type_, **kw): if type_.varying: compiled = "BIT VARYING" if type_.length is not None: @@ -1529,16 +1829,16 @@ class PGTypeCompiler(compiler.GenericTypeCompiler): compiled = "BIT(%d)" % type_.length return compiled - def visit_UUID(self, type_): + def visit_UUID(self, type_, **kw): return "UUID" - def visit_large_binary(self, type_): - return self.visit_BYTEA(type_) + def visit_large_binary(self, type_, **kw): + return self.visit_BYTEA(type_, **kw) - def visit_BYTEA(self, type_): + def visit_BYTEA(self, type_, **kw): return "BYTEA" - def visit_ARRAY(self, type_): + def visit_ARRAY(self, type_, **kw): return self.process(type_.item_type) + ('[]' * (type_.dimensions if type_.dimensions is not None else 1)) @@ -1570,11 +1870,45 @@ class PGInspector(reflection.Inspector): reflection.Inspector.__init__(self, conn) def get_table_oid(self, table_name, schema=None): - """Return the oid from `table_name` and `schema`.""" + """Return the OID for the given table name.""" return self.dialect.get_table_oid(self.bind, table_name, schema, info_cache=self.info_cache) + def get_enums(self, schema=None): + """Return a list of ENUM objects. + + Each member is a dictionary containing these fields: + + * name - name of the enum + * schema - the schema name for the enum. + * visible - boolean, whether or not this enum is visible + in the default search path. + * labels - a list of string labels that apply to the enum. + + :param schema: schema name. If None, the default schema + (typically 'public') is used. May also be set to '*' to + indicate load enums for all schemas. + + .. versionadded:: 1.0.0 + + """ + schema = schema or self.default_schema_name + return self.dialect._load_enums(self.bind, schema) + + def get_foreign_table_names(self, schema=None): + """Return a list of FOREIGN TABLE names. + + Behavior is similar to that of :meth:`.Inspector.get_table_names`, + except that the list is limited to those tables tha report a + ``relkind`` value of ``f``. + + .. versionadded:: 1.0.0 + + """ + schema = schema or self.default_schema_name + return self.dialect._get_foreign_table_names(self.bind, schema) + class CreateEnumType(schema._CreateDropBase): __visit_name__ = "create_enum_type" @@ -1666,10 +2000,16 @@ class PGDialect(default.DefaultDialect): (schema.Index, { "using": False, "where": None, - "ops": {} + "ops": {}, + "concurrently": False, + "with": {} }), (schema.Table, { - "ignore_search_path": False + "ignore_search_path": False, + "tablespace": None, + "with_oids": None, + "on_commit": None, + "inherits": None }) ] @@ -1798,7 +2138,8 @@ class PGDialect(default.DefaultDialect): cursor = connection.execute( sql.text( "select relname from pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where n.nspname=current_schema() " + "n.oid=c.relnamespace where " + "pg_catalog.pg_table_is_visible(c.oid) " "and relname=:name", bindparams=[ sql.bindparam('name', util.text_type(table_name), @@ -1916,7 +2257,7 @@ class PGDialect(default.DefaultDialect): FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE (%s) - AND c.relname = :table_name AND c.relkind in ('r','v') + AND c.relname = :table_name AND c.relkind in ('r', 'v', 'm', 'f') """ % schema_where_clause # Since we're binding to unicode, table_name and schema_name must be # unicode. @@ -1969,6 +2310,24 @@ class PGDialect(default.DefaultDialect): ) return [row[0] for row in result] + @reflection.cache + def _get_foreign_table_names(self, connection, schema=None, **kw): + if schema is not None: + current_schema = schema + else: + current_schema = self.default_schema_name + + result = connection.execute( + sql.text("SELECT relname FROM pg_class c " + "WHERE relkind = 'f' " + "AND '%s' = (select nspname from pg_namespace n " + "where n.oid = c.relnamespace) " % + current_schema, + typemap={'relname': sqltypes.Unicode} + ) + ) + return [row[0] for row in result] + @reflection.cache def get_view_names(self, connection, schema=None, **kw): if schema is not None: @@ -1978,7 +2337,7 @@ class PGDialect(default.DefaultDialect): s = """ SELECT relname FROM pg_class c - WHERE relkind = 'v' + WHERE relkind IN ('m', 'v') AND '%(schema)s' = (select nspname from pg_namespace n where n.oid = c.relnamespace) """ % dict(schema=current_schema) @@ -2039,7 +2398,12 @@ class PGDialect(default.DefaultDialect): c = connection.execute(s, table_oid=table_oid) rows = c.fetchall() domains = self._load_domains(connection) - enums = self._load_enums(connection) + enums = dict( + ( + "%s.%s" % (rec['schema'], rec['name']) + if not rec['visible'] else rec['name'], rec) for rec in + self._load_enums(connection, schema='*') + ) # format columns columns = [] @@ -2113,10 +2477,9 @@ class PGDialect(default.DefaultDialect): elif attype in enums: enum = enums[attype] coltype = ENUM - if "." in attype: - kwargs['schema'], kwargs['name'] = attype.split('.') - else: - kwargs['name'] = attype + kwargs['name'] = enum['name'] + if not enum['visible']: + kwargs['schema'] = enum['schema'] args = tuple(enum['labels']) break elif attype in domains: @@ -2323,32 +2686,69 @@ class PGDialect(default.DefaultDialect): # cast indkey as varchar since it's an int2vector, # returned as a list by some drivers such as pypostgresql - IDX_SQL = """ - SELECT - i.relname as relname, - ix.indisunique, ix.indexprs, ix.indpred, - a.attname, a.attnum, ix.indkey%s - FROM - pg_class t - join pg_index ix on t.oid = ix.indrelid - join pg_class i on i.oid=ix.indexrelid - left outer join - pg_attribute a - on t.oid=a.attrelid and %s - WHERE - t.relkind = 'r' - and t.oid = :table_oid - and ix.indisprimary = 'f' - ORDER BY - t.relname, - i.relname - """ % ( - # version 8.3 here was based on observing the - # cast does not work in PG 8.2.4, does work in 8.3.0. - # nothing in PG changelogs regarding this. - "::varchar" if self.server_version_info >= (8, 3) else "", - self._pg_index_any("a.attnum", "ix.indkey") - ) + if self.server_version_info < (8, 5): + IDX_SQL = """ + SELECT + i.relname as relname, + ix.indisunique, ix.indexprs, ix.indpred, + a.attname, a.attnum, NULL, ix.indkey%s, + %s, am.amname + FROM + pg_class t + join pg_index ix on t.oid = ix.indrelid + join pg_class i on i.oid = ix.indexrelid + left outer join + pg_attribute a + on t.oid = a.attrelid and %s + left outer join + pg_am am + on i.relam = am.oid + WHERE + t.relkind IN ('r', 'v', 'f', 'm') + and t.oid = :table_oid + and ix.indisprimary = 'f' + ORDER BY + t.relname, + i.relname + """ % ( + # version 8.3 here was based on observing the + # cast does not work in PG 8.2.4, does work in 8.3.0. + # nothing in PG changelogs regarding this. + "::varchar" if self.server_version_info >= (8, 3) else "", + "i.reloptions" if self.server_version_info >= (8, 2) + else "NULL", + self._pg_index_any("a.attnum", "ix.indkey") + ) + else: + IDX_SQL = """ + SELECT + i.relname as relname, + ix.indisunique, ix.indexprs, ix.indpred, + a.attname, a.attnum, c.conrelid, ix.indkey::varchar, + i.reloptions, am.amname + FROM + pg_class t + join pg_index ix on t.oid = ix.indrelid + join pg_class i on i.oid = ix.indexrelid + left outer join + pg_attribute a + on t.oid = a.attrelid and a.attnum = ANY(ix.indkey) + left outer join + pg_constraint c + on (ix.indrelid = c.conrelid and + ix.indexrelid = c.conindid and + c.contype in ('p', 'u', 'x')) + left outer join + pg_am am + on i.relam = am.oid + WHERE + t.relkind IN ('r', 'v', 'f', 'm') + and t.oid = :table_oid + and ix.indisprimary = 'f' + ORDER BY + t.relname, + i.relname + """ t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode}) c = connection.execute(t, table_oid=table_oid) @@ -2357,7 +2757,8 @@ class PGDialect(default.DefaultDialect): sv_idx_name = None for row in c.fetchall(): - idx_name, unique, expr, prd, col, col_num, idx_key = row + (idx_name, unique, expr, prd, col, + col_num, conrelid, idx_key, options, amname) = row if expr: if idx_name != sv_idx_name: @@ -2374,18 +2775,43 @@ class PGDialect(default.DefaultDialect): % idx_name) sv_idx_name = idx_name + has_idx = idx_name in indexes index = indexes[idx_name] if col is not None: index['cols'][col_num] = col - index['key'] = [int(k.strip()) for k in idx_key.split()] - index['unique'] = unique + if not has_idx: + index['key'] = [int(k.strip()) for k in idx_key.split()] + index['unique'] = unique + if conrelid is not None: + index['duplicates_constraint'] = idx_name + if options: + index['options'] = dict( + [option.split("=") for option in options]) - return [ - {'name': name, - 'unique': idx['unique'], - 'column_names': [idx['cols'][i] for i in idx['key']]} - for name, idx in indexes.items() - ] + # it *might* be nice to include that this is 'btree' in the + # reflection info. But we don't want an Index object + # to have a ``postgresql_using`` in it that is just the + # default, so for the moment leaving this out. + if amname and amname != 'btree': + index['amname'] = amname + + result = [] + for name, idx in indexes.items(): + entry = { + 'name': name, + 'unique': idx['unique'], + 'column_names': [idx['cols'][i] for i in idx['key']] + } + if 'duplicates_constraint' in idx: + entry['duplicates_constraint'] = idx['duplicates_constraint'] + if 'options' in idx: + entry.setdefault( + 'dialect_options', {})["postgresql_with"] = idx['options'] + if 'amname' in idx: + entry.setdefault( + 'dialect_options', {})["postgresql_using"] = idx['amname'] + result.append(entry) + return result @reflection.cache def get_unique_constraints(self, connection, table_name, @@ -2424,7 +2850,8 @@ class PGDialect(default.DefaultDialect): for name, uc in uniques.items() ] - def _load_enums(self, connection): + def _load_enums(self, connection, schema=None): + schema = schema or self.default_schema_name if not self.supports_native_enum: return {} @@ -2440,31 +2867,37 @@ class PGDialect(default.DefaultDialect): LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid WHERE t.typtype = 'e' - ORDER BY "name", e.oid -- e.oid gives us label order """ + if schema != '*': + SQL_ENUMS += "AND n.nspname = :schema " + + # e.oid gives us label order within an enum + SQL_ENUMS += 'ORDER BY "schema", "name", e.oid' + s = sql.text(SQL_ENUMS, typemap={ 'attname': sqltypes.Unicode, 'label': sqltypes.Unicode}) + + if schema != '*': + s = s.bindparams(schema=schema) + c = connection.execute(s) - enums = {} + enums = [] + enum_by_name = {} for enum in c.fetchall(): - if enum['visible']: - # 'visible' just means whether or not the enum is in a - # schema that's on the search path -- or not overridden by - # a schema with higher precedence. If it's not visible, - # it will be prefixed with the schema-name when it's used. - name = enum['name'] + key = (enum['schema'], enum['name']) + if key in enum_by_name: + enum_by_name[key]['labels'].append(enum['label']) else: - name = "%s.%s" % (enum['schema'], enum['name']) - - if name in enums: - enums[name]['labels'].append(enum['label']) - else: - enums[name] = { + enum_by_name[key] = enum_rec = { + 'name': enum['name'], + 'schema': enum['schema'], + 'visible': enum['visible'], 'labels': [enum['label']], } + enums.append(enum_rec) return enums diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/constraints.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/constraints.py index e8ebc75..c6bb890 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/constraints.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/constraints.py @@ -1,10 +1,11 @@ -# Copyright (C) 2013-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2013-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -from sqlalchemy.schema import ColumnCollectionConstraint -from sqlalchemy.sql import expression +from ...sql.schema import ColumnCollectionConstraint +from ...sql import expression +from ... import util class ExcludeConstraint(ColumnCollectionConstraint): @@ -48,20 +49,42 @@ static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE for this constraint. """ + columns = [] + render_exprs = [] + self.operators = {} + + expressions, operators = zip(*elements) + + for (expr, column, strname, add_element), operator in zip( + self._extract_col_expression_collection(expressions), + operators + ): + if add_element is not None: + columns.append(add_element) + + name = column.name if column is not None else strname + + if name is not None: + # backwards compat + self.operators[name] = operator + + expr = expression._literal_as_text(expr) + + render_exprs.append( + (expr, name, operator) + ) + + self._render_exprs = render_exprs ColumnCollectionConstraint.__init__( self, - *[col for col, op in elements], + *columns, name=kw.get('name'), deferrable=kw.get('deferrable'), initially=kw.get('initially') ) - self.operators = {} - for col_or_string, op in elements: - name = getattr(col_or_string, 'name', col_or_string) - self.operators[name] = op self.using = kw.get('using', 'gist') where = kw.get('where') - if where: + if where is not None: self.where = expression._literal_as_text(where) def copy(self, **kw): diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/hstore.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/hstore.py index 9601edc..a4ff461 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/hstore.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/hstore.py @@ -1,5 +1,5 @@ # postgresql/hstore.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/json.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/json.py index 25ac342..f7ede85 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/json.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/json.py @@ -1,5 +1,5 @@ # postgresql/json.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -12,7 +12,7 @@ from .base import ischema_names from ... import types as sqltypes from ...sql.operators import custom_op from ... import sql -from ...sql import elements +from ...sql import elements, default_comparator from ... import util __all__ = ('JSON', 'JSONElement', 'JSONB') @@ -46,7 +46,8 @@ class JSONElement(elements.BinaryExpression): self._json_opstring = opstring operator = custom_op(opstring, precedence=5) - right = left._check_literal(left, operator, right) + right = default_comparator._check_literal( + left, operator, right) super(JSONElement, self).__init__( left, right, operator, type_=result_type) @@ -77,7 +78,7 @@ class JSONElement(elements.BinaryExpression): def cast(self, type_): """Convert this :class:`.JSONElement` to apply both the 'astext' operator - as well as an explicit type cast when evaulated. + as well as an explicit type cast when evaluated. E.g.:: @@ -164,6 +165,23 @@ class JSON(sqltypes.TypeEngine): __visit_name__ = 'JSON' + def __init__(self, none_as_null=False): + """Construct a :class:`.JSON` type. + + :param none_as_null: if True, persist the value ``None`` as a + SQL NULL value, not the JSON encoding of ``null``. Note that + when this flag is False, the :func:`.null` construct can still + be used to persist a NULL value:: + + from sqlalchemy import null + conn.execute(table.insert(), data=null()) + + .. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null` + is now supported in order to persist a NULL value. + + """ + self.none_as_null = none_as_null + class comparator_factory(sqltypes.Concatenable.Comparator): """Define comparison operations for :class:`.JSON`.""" @@ -185,9 +203,17 @@ class JSON(sqltypes.TypeEngine): encoding = dialect.encoding def process(value): + if isinstance(value, elements.Null) or ( + value is None and self.none_as_null + ): + return None return json_serializer(value).encode(encoding) else: def process(value): + if isinstance(value, elements.Null) or ( + value is None and self.none_as_null + ): + return None return json_serializer(value) return process @@ -197,9 +223,13 @@ class JSON(sqltypes.TypeEngine): encoding = dialect.encoding def process(value): + if value is None: + return None return json_deserializer(value.decode(encoding)) else: def process(value): + if value is None: + return None return json_deserializer(value) return process diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/pg8000.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/pg8000.py index 589567d..68e8e02 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/pg8000.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/pg8000.py @@ -1,5 +1,5 @@ # postgresql/pg8000.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under @@ -13,17 +13,30 @@ postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...] :url: https://pythonhosted.org/pg8000/ + +.. _pg8000_unicode: + Unicode ------- -When communicating with the server, pg8000 **always uses the server-side -character set**. SQLAlchemy has no ability to modify what character set -pg8000 chooses to use, and additionally SQLAlchemy does no unicode conversion -of any kind with the pg8000 backend. The origin of the client encoding setting -is ultimately the CLIENT_ENCODING setting in postgresql.conf. +pg8000 will encode / decode string values between it and the server using the +PostgreSQL ``client_encoding`` parameter; by default this is the value in +the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. +Typically, this can be changed to ``utf-8``, as a more useful default:: -It is not necessary, though is also harmless, to pass the "encoding" parameter -to :func:`.create_engine` when using pg8000. + #client_encoding = sql_ascii # actually, defaults to database + # encoding + client_encoding = utf8 + +The ``client_encoding`` can be overriden for a session by executing the SQL: + +SET CLIENT_ENCODING TO 'utf8'; + +SQLAlchemy will execute this SQL on all new connections based on the value +passed to :func:`.create_engine` using the ``client_encoding`` parameter:: + + engine = create_engine( + "postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8') .. _pg8000_isolation_level: @@ -58,6 +71,8 @@ from ... import types as sqltypes from .base import ( PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext, _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES) +import re +from sqlalchemy.dialects.postgresql.json import JSON class _PGNumeric(sqltypes.Numeric): @@ -88,6 +103,15 @@ class _PGNumericNoBind(_PGNumeric): return None +class _PGJSON(JSON): + + def result_processor(self, dialect, coltype): + if dialect._dbapi_version > (1, 10, 1): + return None # Has native JSON + else: + return super(_PGJSON, self).result_processor(dialect, coltype) + + class PGExecutionContext_pg8000(PGExecutionContext): pass @@ -119,7 +143,7 @@ class PGDialect_pg8000(PGDialect): supports_unicode_binds = True default_paramstyle = 'format' - supports_sane_multi_rowcount = False + supports_sane_multi_rowcount = True execution_ctx_cls = PGExecutionContext_pg8000 statement_compiler = PGCompiler_pg8000 preparer = PGIdentifierPreparer_pg8000 @@ -129,10 +153,29 @@ class PGDialect_pg8000(PGDialect): PGDialect.colspecs, { sqltypes.Numeric: _PGNumericNoBind, - sqltypes.Float: _PGNumeric + sqltypes.Float: _PGNumeric, + JSON: _PGJSON, } ) + def __init__(self, client_encoding=None, **kwargs): + PGDialect.__init__(self, **kwargs) + self.client_encoding = client_encoding + + def initialize(self, connection): + self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14) + super(PGDialect_pg8000, self).initialize(connection) + + @util.memoized_property + def _dbapi_version(self): + if self.dbapi and hasattr(self.dbapi, '__version__'): + return tuple( + [ + int(x) for x in re.findall( + r'(\d+)(?:[-\.]?|$)', self.dbapi.__version__)]) + else: + return (99, 99, 99) + @classmethod def dbapi(cls): return __import__('pg8000') @@ -171,4 +214,51 @@ class PGDialect_pg8000(PGDialect): (level, self.name, ", ".join(self._isolation_lookup)) ) + def set_client_encoding(self, connection, client_encoding): + # adjust for ConnectionFairy possibly being present + if hasattr(connection, 'connection'): + connection = connection.connection + + cursor = connection.cursor() + cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'") + cursor.execute("COMMIT") + cursor.close() + + def do_begin_twophase(self, connection, xid): + connection.connection.tpc_begin((0, xid, '')) + + def do_prepare_twophase(self, connection, xid): + connection.connection.tpc_prepare() + + def do_rollback_twophase( + self, connection, xid, is_prepared=True, recover=False): + connection.connection.tpc_rollback((0, xid, '')) + + def do_commit_twophase( + self, connection, xid, is_prepared=True, recover=False): + connection.connection.tpc_commit((0, xid, '')) + + def do_recover_twophase(self, connection): + return [row[1] for row in connection.connection.tpc_recover()] + + def on_connect(self): + fns = [] + if self.client_encoding is not None: + def on_connect(conn): + self.set_client_encoding(conn, self.client_encoding) + fns.append(on_connect) + + if self.isolation_level is not None: + def on_connect(conn): + self.set_isolation_level(conn, self.isolation_level) + fns.append(on_connect) + + if len(fns) > 0: + def on_connect(conn): + for fn in fns: + fn(conn) + return on_connect + else: + return None + dialect = PGDialect_pg8000 diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py index e6450c9..a0f0cca 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/psycopg2.py @@ -1,5 +1,5 @@ # postgresql/psycopg2.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -32,10 +32,25 @@ psycopg2-specific keyword arguments which are accepted by way of enabling this mode on a per-execution basis. * ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode per connection. True by default. + + .. seealso:: + + :ref:`psycopg2_disable_native_unicode` + * ``isolation_level``: This option, available for all PostgreSQL dialects, includes the ``AUTOCOMMIT`` isolation level when using the psycopg2 - dialect. See :ref:`psycopg2_isolation_level`. + dialect. + .. seealso:: + + :ref:`psycopg2_isolation_level` + +* ``client_encoding``: sets the client encoding in a libpq-agnostic way, + using psycopg2's ``set_client_encoding()`` method. + + .. seealso:: + + :ref:`psycopg2_unicode` Unix Domain Connections ------------------------ @@ -51,12 +66,15 @@ in ``/tmp``, or whatever socket directory was specified when PostgreSQL was built. This value can be overridden by passing a pathname to psycopg2, using ``host`` as an additional keyword argument:: - create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql") + create_engine("postgresql+psycopg2://user:password@/dbname?\ +host=/var/lib/postgresql") See also: -`PQconnectdbParams `_ +`PQconnectdbParams `_ + +.. _psycopg2_execution_options: Per-Statement/Connection Execution Options ------------------------------------------- @@ -65,18 +83,27 @@ The following DBAPI-specific options are respected when used with :meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`, :meth:`.Query.execution_options`, in addition to those not specific to DBAPIs: -* isolation_level - Set the transaction isolation level for the lifespan of a +* ``isolation_level`` - Set the transaction isolation level for the lifespan of a :class:`.Connection` (can only be set on a connection, not a statement or query). See :ref:`psycopg2_isolation_level`. -* stream_results - Enable or disable usage of psycopg2 server side cursors - +* ``stream_results`` - Enable or disable usage of psycopg2 server side cursors - this feature makes use of "named" cursors in combination with special result handling methods so that result rows are not fully buffered. If ``None`` or not set, the ``server_side_cursors`` option of the :class:`.Engine` is used. -Unicode -------- +* ``max_row_buffer`` - when using ``stream_results``, an integer value that + specifies the maximum number of rows to buffer at a time. This is + interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the + buffer will grow to ultimately store 1000 rows at a time. + + .. versionadded:: 1.0.6 + +.. _psycopg2_unicode: + +Unicode with Psycopg2 +---------------------- By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE`` extension, such that the DBAPI receives and returns all strings as Python @@ -84,27 +111,51 @@ Unicode objects directly - SQLAlchemy passes these values through without change. Psycopg2 here will encode/decode string values based on the current "client encoding" setting; by default this is the value in the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. -Typically, this can be changed to ``utf-8``, as a more useful default:: +Typically, this can be changed to ``utf8``, as a more useful default:: - #client_encoding = sql_ascii # actually, defaults to database + # postgresql.conf file + + # client_encoding = sql_ascii # actually, defaults to database # encoding client_encoding = utf8 A second way to affect the client encoding is to set it within Psycopg2 -locally. SQLAlchemy will call psycopg2's ``set_client_encoding()`` -method (see: -http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding) +locally. SQLAlchemy will call psycopg2's +:meth:`psycopg2:connection.set_client_encoding` method on all new connections based on the value passed to :func:`.create_engine` using the ``client_encoding`` parameter:: + # set_client_encoding() setting; + # works for *all* Postgresql versions engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8') This overrides the encoding specified in the Postgresql client configuration. +When using the parameter in this way, the psycopg2 driver emits +``SET client_encoding TO 'utf8'`` on the connection explicitly, and works +in all Postgresql versions. -.. versionadded:: 0.7.3 - The psycopg2-specific ``client_encoding`` parameter to - :func:`.create_engine`. +Note that the ``client_encoding`` setting as passed to :func:`.create_engine` +is **not the same** as the more recently added ``client_encoding`` parameter +now supported by libpq directly. This is enabled when ``client_encoding`` +is passed directly to ``psycopg2.connect()``, and from SQLAlchemy is passed +using the :paramref:`.create_engine.connect_args` parameter:: + + # libpq direct parameter setting; + # only works for Postgresql **9.1 and above** + engine = create_engine("postgresql://user:pass@host/dbname", + connect_args={'client_encoding': 'utf8'}) + + # using the query string is equivalent + engine = create_engine("postgresql://user:pass@host/dbname?client_encoding=utf8") + +The above parameter was only added to libpq as of version 9.1 of Postgresql, +so using the previous method is better for cross-version support. + +.. _psycopg2_disable_native_unicode: + +Disabling Native Unicode +^^^^^^^^^^^^^^^^^^^^^^^^ SQLAlchemy can also be instructed to skip the usage of the psycopg2 ``UNICODE`` extension and to instead utilize its own unicode encode/decode @@ -116,8 +167,56 @@ in and coerce from bytes on the way back, using the value of the :func:`.create_engine` ``encoding`` parameter, which defaults to ``utf-8``. SQLAlchemy's own unicode encode/decode functionality is steadily becoming -obsolete as more DBAPIs support unicode fully along with the approach of -Python 3; in modern usage psycopg2 should be relied upon to handle unicode. +obsolete as most DBAPIs now support unicode fully. + +Bound Parameter Styles +---------------------- + +The default parameter style for the psycopg2 dialect is "pyformat", where +SQL is rendered using ``%(paramname)s`` style. This format has the limitation +that it does not accommodate the unusual case of parameter names that +actually contain percent or parenthesis symbols; as SQLAlchemy in many cases +generates bound parameter names based on the name of a column, the presence +of these characters in a column name can lead to problems. + +There are two solutions to the issue of a :class:`.schema.Column` that contains +one of these characters in its name. One is to specify the +:paramref:`.schema.Column.key` for columns that have such names:: + + measurement = Table('measurement', metadata, + Column('Size (meters)', Integer, key='size_meters') + ) + +Above, an INSERT statement such as ``measurement.insert()`` will use +``size_meters`` as the parameter name, and a SQL expression such as +``measurement.c.size_meters > 10`` will derive the bound parameter name +from the ``size_meters`` key as well. + +.. versionchanged:: 1.0.0 - SQL expressions will use :attr:`.Column.key` + as the source of naming when anonymous bound parameters are created + in SQL expressions; previously, this behavior only applied to + :meth:`.Table.insert` and :meth:`.Table.update` parameter names. + +The other solution is to use a positional format; psycopg2 allows use of the +"format" paramstyle, which can be passed to +:paramref:`.create_engine.paramstyle`:: + + engine = create_engine( + 'postgresql://scott:tiger@localhost:5432/test', paramstyle='format') + +With the above engine, instead of a statement like:: + + INSERT INTO measurement ("Size (meters)") VALUES (%(Size (meters))s) + {'Size (meters)': 1} + +we instead see:: + + INSERT INTO measurement ("Size (meters)") VALUES (%s) + (1, ) + +Where above, the dictionary style is converted into a tuple with positional +style. + Transactions ------------ @@ -148,7 +247,7 @@ The psycopg2 dialect supports these constants for isolation level: * ``AUTOCOMMIT`` .. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using - psycopg2. + psycopg2. .. seealso:: @@ -173,14 +272,17 @@ HSTORE type The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension -by default when it is detected that the target database has the HSTORE -type set up for use. In other words, when the dialect makes the first +by default when psycopg2 version 2.4 or greater is used, and +it is detected that the target database has the HSTORE type set up for use. +In other words, when the dialect makes the first connection, a sequence like the following is performed: 1. Request the available HSTORE oids using ``psycopg2.extras.HstoreAdapter.get_oids()``. If this function returns a list of HSTORE identifiers, we then determine that the ``HSTORE`` extension is present. + This function is **skipped** if the version of psycopg2 installed is + less than version 2.4. 2. If the ``use_native_hstore`` flag is at its default of ``True``, and we've detected that ``HSTORE`` oids are available, the @@ -219,9 +321,14 @@ from ... import types as sqltypes from .base import PGDialect, PGCompiler, \ PGIdentifierPreparer, PGExecutionContext, \ ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\ - _INT_TYPES + _INT_TYPES, UUID from .hstore import HSTORE -from .json import JSON +from .json import JSON, JSONB + +try: + from uuid import UUID as _python_UUID +except ImportError: + _python_UUID = None logger = logging.getLogger('sqlalchemy.dialects.postgresql') @@ -256,7 +363,7 @@ class _PGNumeric(sqltypes.Numeric): class _PGEnum(ENUM): def result_processor(self, dialect, coltype): - if util.py2k and self.convert_unicode is True: + if self.native_enum and util.py2k and self.convert_unicode is True: # we can't easily use PG's extensions here because # the OID is on the fly, and we need to give it a python # function anyway - not really worth it. @@ -286,6 +393,35 @@ class _PGJSON(JSON): else: return super(_PGJSON, self).result_processor(dialect, coltype) + +class _PGJSONB(JSONB): + + def result_processor(self, dialect, coltype): + if dialect._has_native_jsonb: + return None + else: + return super(_PGJSONB, self).result_processor(dialect, coltype) + + +class _PGUUID(UUID): + def bind_processor(self, dialect): + if not self.as_uuid and dialect.use_native_uuid: + nonetype = type(None) + + def process(value): + if value is not None: + value = _python_UUID(value) + return value + return process + + def result_processor(self, dialect, coltype): + if not self.as_uuid and dialect.use_native_uuid: + def process(value): + if value is not None: + value = str(value) + return value + return process + # When we're handed literal SQL, ensure it's a SELECT query. Since # 8.3, combining cursors and "FOR UPDATE" has been fine. SERVER_SIDE_CURSOR_RE = re.compile( @@ -374,8 +510,21 @@ class PGDialect_psycopg2(PGDialect): preparer = PGIdentifierPreparer_psycopg2 psycopg2_version = (0, 0) + FEATURE_VERSION_MAP = dict( + native_json=(2, 5), + native_jsonb=(2, 5, 4), + sane_multi_rowcount=(2, 0, 9), + array_oid=(2, 4, 3), + hstore_adapter=(2, 4) + ) + _has_native_hstore = False _has_native_json = False + _has_native_jsonb = False + + engine_config_types = PGDialect.engine_config_types.union([ + ('use_native_unicode', util.asbool), + ]) colspecs = util.update_copy( PGDialect.colspecs, @@ -384,18 +533,21 @@ class PGDialect_psycopg2(PGDialect): ENUM: _PGEnum, # needs force_unicode sqltypes.Enum: _PGEnum, # needs force_unicode HSTORE: _PGHStore, - JSON: _PGJSON + JSON: _PGJSON, + JSONB: _PGJSONB, + UUID: _PGUUID } ) def __init__(self, server_side_cursors=False, use_native_unicode=True, client_encoding=None, - use_native_hstore=True, + use_native_hstore=True, use_native_uuid=True, **kwargs): PGDialect.__init__(self, **kwargs) self.server_side_cursors = server_side_cursors self.use_native_unicode = use_native_unicode self.use_native_hstore = use_native_hstore + self.use_native_uuid = use_native_uuid self.supports_unicode_binds = use_native_unicode self.client_encoding = client_encoding if self.dbapi and hasattr(self.dbapi, '__version__'): @@ -412,19 +564,34 @@ class PGDialect_psycopg2(PGDialect): self._has_native_hstore = self.use_native_hstore and \ self._hstore_oids(connection.connection) \ is not None - self._has_native_json = self.psycopg2_version >= (2, 5) + self._has_native_json = \ + self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_json'] + self._has_native_jsonb = \ + self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_jsonb'] # http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9 - self.supports_sane_multi_rowcount = self.psycopg2_version >= (2, 0, 9) + self.supports_sane_multi_rowcount = \ + self.psycopg2_version >= \ + self.FEATURE_VERSION_MAP['sane_multi_rowcount'] @classmethod def dbapi(cls): import psycopg2 return psycopg2 + @classmethod + def _psycopg2_extensions(cls): + from psycopg2 import extensions + return extensions + + @classmethod + def _psycopg2_extras(cls): + from psycopg2 import extras + return extras + @util.memoized_property def _isolation_lookup(self): - from psycopg2 import extensions + extensions = self._psycopg2_extensions() return { 'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT, 'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED, @@ -446,7 +613,8 @@ class PGDialect_psycopg2(PGDialect): connection.set_isolation_level(level) def on_connect(self): - from psycopg2 import extras, extensions + extras = self._psycopg2_extras() + extensions = self._psycopg2_extensions() fns = [] if self.client_encoding is not None: @@ -459,6 +627,11 @@ class PGDialect_psycopg2(PGDialect): self.set_isolation_level(conn, self.isolation_level) fns.append(on_connect) + if self.dbapi and self.use_native_uuid: + def on_connect(conn): + extras.register_uuid(None, conn) + fns.append(on_connect) + if self.dbapi and self.use_native_unicode: def on_connect(conn): extensions.register_type(extensions.UNICODE, conn) @@ -470,19 +643,23 @@ class PGDialect_psycopg2(PGDialect): hstore_oids = self._hstore_oids(conn) if hstore_oids is not None: oid, array_oid = hstore_oids + kw = {'oid': oid} if util.py2k: - extras.register_hstore(conn, oid=oid, - array_oid=array_oid, - unicode=True) - else: - extras.register_hstore(conn, oid=oid, - array_oid=array_oid) + kw['unicode'] = True + if self.psycopg2_version >= \ + self.FEATURE_VERSION_MAP['array_oid']: + kw['array_oid'] = array_oid + extras.register_hstore(conn, **kw) fns.append(on_connect) if self.dbapi and self._json_deserializer: def on_connect(conn): - extras.register_default_json( - conn, loads=self._json_deserializer) + if self._has_native_json: + extras.register_default_json( + conn, loads=self._json_deserializer) + if self._has_native_jsonb: + extras.register_default_jsonb( + conn, loads=self._json_deserializer) fns.append(on_connect) if fns: @@ -495,8 +672,8 @@ class PGDialect_psycopg2(PGDialect): @util.memoized_instancemethod def _hstore_oids(self, conn): - if self.psycopg2_version >= (2, 4): - from psycopg2 import extras + if self.psycopg2_version >= self.FEATURE_VERSION_MAP['hstore_adapter']: + extras = self._psycopg2_extras() oids = extras.HstoreAdapter.get_oids(conn) if oids is not None and oids[0]: return oids[0:2] @@ -512,12 +689,14 @@ class PGDialect_psycopg2(PGDialect): def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.Error): # check the "closed" flag. this might not be - # present on old psycopg2 versions + # present on old psycopg2 versions. Also, + # this flag doesn't actually help in a lot of disconnect + # situations, so don't rely on it. if getattr(connection, 'closed', False): return True - # legacy checks based on strings. the "closed" check - # above most likely obviates the need for any of these. + # checks based on strings. in the case that .closed + # didn't cut it, fall back onto these. str_e = str(e).partition("\n")[0] for msg in [ # these error messages from libpq: interfaces/libpq/fe-misc.c @@ -534,8 +713,10 @@ class PGDialect_psycopg2(PGDialect): # not sure where this path is originally from, it may # be obsolete. It really says "losed", not "closed". 'losed the connection unexpectedly', - # this can occur in newer SSL - 'connection has been closed unexpectedly' + # these can occur in newer SSL + 'connection has been closed unexpectedly', + 'SSL SYSCALL error: Bad file descriptor', + 'SSL SYSCALL error: EOF detected', ]: idx = str_e.find(msg) if idx >= 0 and '"' not in str_e[:idx]: diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.py new file mode 100644 index 0000000..ab99a83 --- /dev/null +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/psycopg2cffi.py @@ -0,0 +1,61 @@ +# testing/engines.py +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +""" +.. dialect:: postgresql+psycopg2cffi + :name: psycopg2cffi + :dbapi: psycopg2cffi + :connectstring: \ +postgresql+psycopg2cffi://user:password@host:port/dbname\ +[?key=value&key=value...] + :url: http://pypi.python.org/pypi/psycopg2cffi/ + +``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C +layer. This makes it suitable for use in e.g. PyPy. Documentation +is as per ``psycopg2``. + +.. versionadded:: 1.0.0 + +.. seealso:: + + :mod:`sqlalchemy.dialects.postgresql.psycopg2` + +""" +from .psycopg2 import PGDialect_psycopg2 + + +class PGDialect_psycopg2cffi(PGDialect_psycopg2): + driver = 'psycopg2cffi' + supports_unicode_statements = True + + # psycopg2cffi's first release is 2.5.0, but reports + # __version__ as 2.4.4. Subsequent releases seem to have + # fixed this. + + FEATURE_VERSION_MAP = dict( + native_json=(2, 4, 4), + native_jsonb=(2, 7, 1), + sane_multi_rowcount=(2, 4, 4), + array_oid=(2, 4, 4), + hstore_adapter=(2, 4, 4) + ) + + @classmethod + def dbapi(cls): + return __import__('psycopg2cffi') + + @classmethod + def _psycopg2_extensions(cls): + root = __import__('psycopg2cffi', fromlist=['extensions']) + return root.extensions + + @classmethod + def _psycopg2_extras(cls): + root = __import__('psycopg2cffi', fromlist=['extras']) + return root.extras + + +dialect = PGDialect_psycopg2cffi diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/pypostgresql.py index 3ebd013..f2b850a 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/pypostgresql.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/pypostgresql.py @@ -1,5 +1,5 @@ # postgresql/pypostgresql.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -65,6 +65,23 @@ class PGDialect_pypostgresql(PGDialect): from postgresql.driver import dbapi20 return dbapi20 + _DBAPI_ERROR_NAMES = [ + "Error", + "InterfaceError", "DatabaseError", "DataError", + "OperationalError", "IntegrityError", "InternalError", + "ProgrammingError", "NotSupportedError" + ] + + @util.memoized_property + def dbapi_exception_translation_map(self): + if self.dbapi is None: + return {} + + return dict( + (getattr(self.dbapi, name).__name__, name) + for name in self._DBAPI_ERROR_NAMES + ) + def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if 'port' in opts: diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/ranges.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/ranges.py index 28f80d0..42a1cd4 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/ranges.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/ranges.py @@ -1,4 +1,4 @@ -# Copyright (C) 2013-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2013-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/zxjdbc.py b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/zxjdbc.py index 00b428f..cc46460 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/zxjdbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/zxjdbc.py @@ -1,5 +1,5 @@ # postgresql/zxjdbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/__init__.py b/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/__init__.py index 0eceaa5..a8dec30 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/__init__.py @@ -1,11 +1,11 @@ # sqlite/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -from sqlalchemy.dialects.sqlite import base, pysqlite +from sqlalchemy.dialects.sqlite import base, pysqlite, pysqlcipher # default dialect base.dialect = pysqlite.dialect diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/base.py b/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/base.py index fbd580d..e623ff0 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/base.py @@ -1,5 +1,5 @@ # sqlite/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -9,6 +9,7 @@ .. dialect:: sqlite :name: SQLite +.. _sqlite_datetime: Date and Time Types ------------------- @@ -23,6 +24,20 @@ These types represent dates and times as ISO formatted strings, which also nicely support ordering. There's no reliance on typical "libc" internals for these functions so historical dates are fully supported. +Ensuring Text affinity +^^^^^^^^^^^^^^^^^^^^^^ + +The DDL rendered for these types is the standard ``DATE``, ``TIME`` +and ``DATETIME`` indicators. However, custom storage formats can also be +applied to these types. When the +storage format is detected as containing no alpha characters, the DDL for +these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``, +so that the column continues to have textual affinity. + +.. seealso:: + + `Type Affinity `_ - in the SQLite documentation + .. _sqlite_autoincrement: SQLite Auto Incrementing Behavior @@ -30,14 +45,20 @@ SQLite Auto Incrementing Behavior Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html -Two things to note: +Key concepts: -* The AUTOINCREMENT keyword is **not** required for SQLite tables to - generate primary key values automatically. AUTOINCREMENT only means that the - algorithm used to generate ROWID values should be slightly different. -* SQLite does **not** generate primary key (i.e. ROWID) values, even for - one column, if the table has a composite (i.e. multi-column) primary key. - This is regardless of the AUTOINCREMENT keyword being present or not. +* SQLite has an implicit "auto increment" feature that takes place for any + non-composite primary-key column that is specifically created using + "INTEGER PRIMARY KEY" for the type + primary key. + +* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not** + equivalent to the implicit autoincrement feature; this keyword is not + recommended for general use. SQLAlchemy does not render this keyword + unless a special SQLite-specific directive is used (see below). However, + it still requires that the column's type is named "INTEGER". + +Using the AUTOINCREMENT Keyword +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ To specifically render the AUTOINCREMENT keyword on the primary key column when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table @@ -47,50 +68,172 @@ construct:: Column('id', Integer, primary_key=True), sqlite_autoincrement=True) -Transaction Isolation Level ---------------------------- +Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:func:`.create_engine` accepts an ``isolation_level`` parameter which results -in the command ``PRAGMA read_uncommitted `` being invoked for every new -connection. Valid values for this parameter are ``SERIALIZABLE`` and ``READ -UNCOMMITTED`` corresponding to a value of 0 and 1, respectively. See the -section :ref:`pysqlite_serializable` for an important workaround when using -serializable isolation with Pysqlite. +SQLite's typing model is based on naming conventions. Among +other things, this means that any type name which contains the +substring ``"INT"`` will be determined to be of "integer affinity". A +type named ``"BIGINT"``, ``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by +SQLite to be of "integer" affinity. However, **the SQLite +autoincrement feature, whether implicitly or explicitly enabled, +requires that the name of the column's type +is exactly the string "INTEGER"**. Therefore, if an +application uses a type like :class:`.BigInteger` for a primary key, on +SQLite this type will need to be rendered as the name ``"INTEGER"`` when +emitting the initial ``CREATE TABLE`` statement in order for the autoincrement +behavior to be available. + +One approach to achieve this is to use :class:`.Integer` on SQLite +only using :meth:`.TypeEngine.with_variant`:: + + table = Table( + "my_table", metadata, + Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True) + ) + +Another is to use a subclass of :class:`.BigInteger` that overrides its DDL name +to be ``INTEGER`` when compiled against SQLite:: + + from sqlalchemy import BigInteger + from sqlalchemy.ext.compiler import compiles + + class SLBigInteger(BigInteger): + pass + + @compiles(SLBigInteger, 'sqlite') + def bi_c(element, compiler, **kw): + return "INTEGER" + + @compiles(SLBigInteger) + def bi_c(element, compiler, **kw): + return compiler.visit_BIGINT(element, **kw) + + + table = Table( + "my_table", metadata, + Column("id", SLBigInteger(), primary_key=True) + ) + +.. seealso:: + + :meth:`.TypeEngine.with_variant` + + :ref:`sqlalchemy.ext.compiler_toplevel` + + `Datatypes In SQLite Version 3 `_ + +.. _sqlite_concurrency: Database Locking Behavior / Concurrency --------------------------------------- -Note that SQLite is not designed for a high level of concurrency. The database -itself, being a file, is locked completely during write operations and within -transactions, meaning exactly one connection has exclusive access to the -database during this period - all other connections will be blocked during -this time. +SQLite is not designed for a high level of write concurrency. The database +itself, being a file, is locked completely during write operations within +transactions, meaning exactly one "connection" (in reality a file handle) +has exclusive access to the database during this period - all other +"connections" will be blocked during this time. The Python DBAPI specification also calls for a connection model that is -always in a transaction; there is no BEGIN method, only commit and rollback. -This implies that a SQLite DBAPI driver would technically allow only -serialized access to a particular database file at all times. The pysqlite -driver attempts to ameliorate this by deferring the actual BEGIN statement -until the first DML (INSERT, UPDATE, or DELETE) is received within a -transaction. While this breaks serializable isolation, it at least delays the -exclusive locking inherent in SQLite's design. +always in a transaction; there is no ``connection.begin()`` method, +only ``connection.commit()`` and ``connection.rollback()``, upon which a +new transaction is to be begun immediately. This may seem to imply +that the SQLite driver would in theory allow only a single filehandle on a +particular database file at any time; however, there are several +factors both within SQlite itself as well as within the pysqlite driver +which loosen this restriction significantly. -SQLAlchemy's default mode of usage with the ORM is known as -"autocommit=False", which means the moment the :class:`.Session` begins to be -used, a transaction is begun. As the :class:`.Session` is used, the autoflush -feature, also on by default, will flush out pending changes to the database -before each query. The effect of this is that a :class:`.Session` used in its -default mode will often emit DML early on, long before the transaction is -actually committed. This again will have the effect of serializing access to -the SQLite database. If highly concurrent reads are desired against the SQLite -database, it is advised that the autoflush feature be disabled, and -potentially even that autocommit be re-enabled, which has the effect of each -SQL statement and flush committing changes immediately. +However, no matter what locking modes are used, SQLite will still always +lock the database file once a transaction is started and DML (e.g. INSERT, +UPDATE, DELETE) has at least been emitted, and this will block +other transactions at least at the point that they also attempt to emit DML. +By default, the length of time on this block is very short before it times out +with an error. -For more information on SQLite's lack of concurrency by design, please see +This behavior becomes more critical when used in conjunction with the +SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs +within a transaction, and with its autoflush model, may emit DML preceding +any SELECT statement. This may lead to a SQLite database that locks +more quickly than is expected. The locking mode of SQLite and the pysqlite +driver can be manipulated to some degree, however it should be noted that +achieving a high degree of write-concurrency with SQLite is a losing battle. + +For more information on SQLite's lack of write concurrency by design, please +see `Situations Where Another RDBMS May Work Better - High Concurrency `_ near the bottom of the page. +The following subsections introduce areas that are impacted by SQLite's +file-based architecture and additionally will usually require workarounds to +work when using the pysqlite driver. + +.. _sqlite_isolation_level: + +Transaction Isolation Level +---------------------------- + +SQLite supports "transaction isolation" in a non-standard way, along two +axes. One is that of the `PRAGMA read_uncommitted `_ +instruction. This setting can essentially switch SQLite between its +default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation +mode normally referred to as ``READ UNCOMMITTED``. + +SQLAlchemy ties into this PRAGMA statement using the +:paramref:`.create_engine.isolation_level` parameter of :func:`.create_engine`. +Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"`` +and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively. +SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by +the pysqlite driver's default behavior. + +The other axis along which SQLite's transactional locking is impacted is +via the nature of the ``BEGIN`` statement used. The three varieties +are "deferred", "immediate", and "exclusive", as described at +`BEGIN TRANSACTION `_. A straight +``BEGIN`` statement uses the "deferred" mode, where the the database file is +not locked until the first read or write operation, and read access remains +open to other transactions until the first write operation. But again, +it is critical to note that the pysqlite driver interferes with this behavior +by *not even emitting BEGIN* until the first write operation. + +.. warning:: + + SQLite's transactional scope is impacted by unresolved + issues in the pysqlite driver, which defers BEGIN statements to a greater + degree than is often feasible. See the section :ref:`pysqlite_serializable` + for techniques to work around this behavior. + +SAVEPOINT Support +---------------------------- + +SQLite supports SAVEPOINTs, which only function once a transaction is +begun. SQLAlchemy's SAVEPOINT support is available using the +:meth:`.Connection.begin_nested` method at the Core level, and +:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs +won't work at all with pysqlite unless workarounds are taken. + +.. warning:: + + SQLite's SAVEPOINT feature is impacted by unresolved + issues in the pysqlite driver, which defers BEGIN statements to a greater + degree than is often feasible. See the section :ref:`pysqlite_serializable` + for techniques to work around this behavior. + +Transactional DDL +---------------------------- + +The SQLite database supports transactional :term:`DDL` as well. +In this case, the pysqlite driver is not only failing to start transactions, +it also is ending any existing transction when DDL is detected, so again, +workarounds are required. + +.. warning:: + + SQLite's transactional DDL is impacted by unresolved issues + in the pysqlite driver, which fails to emit BEGIN and additionally + forces a COMMIT to cancel any transaction when DDL is encountered. + See the section :ref:`pysqlite_serializable` + for techniques to work around this behavior. + .. _sqlite_foreign_keys: Foreign Key Support @@ -120,6 +263,15 @@ new connections through the usage of events:: cursor.execute("PRAGMA foreign_keys=ON") cursor.close() +.. warning:: + + When SQLite foreign keys are enabled, it is **not possible** + to emit CREATE or DROP statements for tables that contain + mutually-dependent foreign key constraints; + to emit the DDL for these tables requires that ALTER TABLE be used to + create or drop these constraints separately, for which SQLite has + no support. + .. seealso:: `SQLite Foreign Key Support `_ @@ -127,6 +279,9 @@ new connections through the usage of events:: :ref:`event_toplevel` - SQLAlchemy event API. + :ref:`use_alter` - more information on SQLAlchemy's facilities for handling + mutually-dependent foreign key constraints. + .. _sqlite_type_reflection: Type Reflection @@ -177,6 +332,133 @@ lookup is used instead: .. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting columns. + +.. _sqlite_partial_index: + +Partial Indexes +--------------- + +A partial index, e.g. one which uses a WHERE clause, can be specified +with the DDL system using the argument ``sqlite_where``:: + + tbl = Table('testtbl', m, Column('data', Integer)) + idx = Index('test_idx1', tbl.c.data, + sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10)) + +The index will be rendered at create time as:: + + CREATE INDEX test_idx1 ON testtbl (data) + WHERE data > 5 AND data < 10 + +.. versionadded:: 0.9.9 + +Dotted Column Names +------------------- + +Using table or column names that explicitly have periods in them is +**not recommended**. While this is generally a bad idea for relational +databases in general, as the dot is a syntactically significant character, +the SQLite driver up until version **3.10.0** of SQLite has a bug which +requires that SQLAlchemy filter out these dots in result sets. + +.. note:: + + The following SQLite issue has been resolved as of version 3.10.0 + of SQLite. SQLAlchemy as of **1.1** automatically disables its internal + workarounds based on detection of this version. + +The bug, entirely outside of SQLAlchemy, can be illustrated thusly:: + + import sqlite3 + + conn = sqlite3.connect(":memory:") + cursor = conn.cursor() + + cursor.execute("create table x (a integer, b integer)") + cursor.execute("insert into x (a, b) values (1, 1)") + cursor.execute("insert into x (a, b) values (2, 2)") + + cursor.execute("select x.a, x.b from x") + assert [c[0] for c in cursor.description] == ['a', 'b'] + + cursor.execute(''' + select x.a, x.b from x where a=1 + union + select x.a, x.b from x where a=2 + ''') + assert [c[0] for c in cursor.description] == ['a', 'b'], \\ + [c[0] for c in cursor.description] + +The second assertion fails:: + + Traceback (most recent call last): + File "test.py", line 19, in + [c[0] for c in cursor.description] + AssertionError: ['x.a', 'x.b'] + +Where above, the driver incorrectly reports the names of the columns +including the name of the table, which is entirely inconsistent vs. +when the UNION is not present. + +SQLAlchemy relies upon column names being predictable in how they match +to the original statement, so the SQLAlchemy dialect has no choice but +to filter these out:: + + + from sqlalchemy import create_engine + + eng = create_engine("sqlite://") + conn = eng.connect() + + conn.execute("create table x (a integer, b integer)") + conn.execute("insert into x (a, b) values (1, 1)") + conn.execute("insert into x (a, b) values (2, 2)") + + result = conn.execute("select x.a, x.b from x") + assert result.keys() == ["a", "b"] + + result = conn.execute(''' + select x.a, x.b from x where a=1 + union + select x.a, x.b from x where a=2 + ''') + assert result.keys() == ["a", "b"] + +Note that above, even though SQLAlchemy filters out the dots, *both +names are still addressable*:: + + >>> row = result.first() + >>> row["a"] + 1 + >>> row["x.a"] + 1 + >>> row["b"] + 1 + >>> row["x.b"] + 1 + +Therefore, the workaround applied by SQLAlchemy only impacts +:meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API. +In the very specific case where +an application is forced to use column names that contain dots, and the +functionality of :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` +is required to return these dotted names unmodified, the ``sqlite_raw_colnames`` +execution option may be provided, either on a per-:class:`.Connection` basis:: + + result = conn.execution_options(sqlite_raw_colnames=True).execute(''' + select x.a, x.b from x where a=1 + union + select x.a, x.b from x where a=2 + ''') + assert result.keys() == ["x.a", "x.b"] + +or on a per-:class:`.Engine` basis:: + + engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True}) + +When using the per-:class:`.Engine` execution option, note that +**Core and ORM queries that use UNION may not function properly**. + """ import datetime @@ -189,7 +471,7 @@ from ... import util from ...engine import default, reflection from ...sql import compiler -from ...types import (BLOB, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, +from ...types import (BLOB, BOOLEAN, CHAR, DECIMAL, FLOAT, INTEGER, REAL, NUMERIC, SMALLINT, TEXT, TIMESTAMP, VARCHAR) @@ -205,6 +487,25 @@ class _DateTimeMixin(object): if storage_format is not None: self._storage_format = storage_format + @property + def format_is_text_affinity(self): + """return True if the storage format will automatically imply + a TEXT affinity. + + If the storage format contains no non-numeric characters, + it will imply a NUMERIC storage format on SQLite; in this case, + the type will generate its DDL as DATE_CHAR, DATETIME_CHAR, + TIME_CHAR. + + .. versionadded:: 1.0.0 + + """ + spec = self._storage_format % { + "year": 0, "month": 0, "day": 0, "hour": 0, + "minute": 0, "second": 0, "microsecond": 0 + } + return bool(re.search(r'[^0-9]', spec)) + def adapt(self, cls, **kw): if issubclass(cls, _DateTimeMixin): if self._storage_format: @@ -460,7 +761,9 @@ ischema_names = { 'BOOLEAN': sqltypes.BOOLEAN, 'CHAR': sqltypes.CHAR, 'DATE': sqltypes.DATE, + 'DATE_CHAR': sqltypes.DATE, 'DATETIME': sqltypes.DATETIME, + 'DATETIME_CHAR': sqltypes.DATETIME, 'DOUBLE': sqltypes.FLOAT, 'DECIMAL': sqltypes.DECIMAL, 'FLOAT': sqltypes.FLOAT, @@ -471,6 +774,7 @@ ischema_names = { 'SMALLINT': sqltypes.SMALLINT, 'TEXT': sqltypes.TEXT, 'TIME': sqltypes.TIME, + 'TIME_CHAR': sqltypes.TIME, 'TIMESTAMP': sqltypes.TIMESTAMP, 'VARCHAR': sqltypes.VARCHAR, 'NVARCHAR': sqltypes.NVARCHAR, @@ -525,19 +829,19 @@ class SQLiteCompiler(compiler.SQLCompiler): raise exc.CompileError( "%s is not a valid extract argument." % extract.field) - def limit_clause(self, select): + def limit_clause(self, select, **kw): text = "" - if select._limit is not None: - text += "\n LIMIT " + self.process(sql.literal(select._limit)) - if select._offset is not None: - if select._limit is None: + if select._limit_clause is not None: + text += "\n LIMIT " + self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + if select._limit_clause is None: text += "\n LIMIT " + self.process(sql.literal(-1)) - text += " OFFSET " + self.process(sql.literal(select._offset)) + text += " OFFSET " + self.process(select._offset_clause, **kw) else: - text += " OFFSET " + self.process(sql.literal(0)) + text += " OFFSET " + self.process(sql.literal(0), **kw) return text - def for_update_clause(self, select): + def for_update_clause(self, select, **kw): # sqlite has no "FOR UPDATE" AFAICT return '' @@ -545,7 +849,8 @@ class SQLiteCompiler(compiler.SQLCompiler): class SQLiteDDLCompiler(compiler.DDLCompiler): def get_column_specification(self, column, **kwargs): - coltype = self.dialect.type_compiler.process(column.type) + coltype = self.dialect.type_compiler.process( + column.type, type_expression=column) colspec = self.preparer.format_column(column) + " " + coltype default = self.get_column_default_string(column) if default is not None: @@ -580,8 +885,8 @@ class SQLiteDDLCompiler(compiler.DDLCompiler): def visit_foreign_key_constraint(self, constraint): - local_table = list(constraint._elements.values())[0].parent.table - remote_table = list(constraint._elements.values())[0].column.table + local_table = constraint.elements[0].parent.table + remote_table = constraint.elements[0].column.table if local_table.schema != remote_table.schema: return None @@ -596,14 +901,46 @@ class SQLiteDDLCompiler(compiler.DDLCompiler): return preparer.format_table(table, use_schema=False) def visit_create_index(self, create): - return super(SQLiteDDLCompiler, self).visit_create_index( + index = create.element + + text = super(SQLiteDDLCompiler, self).visit_create_index( create, include_table_schema=False) + whereclause = index.dialect_options["sqlite"]["where"] + if whereclause is not None: + where_compiled = self.sql_compiler.process( + whereclause, include_table=False, + literal_binds=True) + text += " WHERE " + where_compiled + + return text + class SQLiteTypeCompiler(compiler.GenericTypeCompiler): - def visit_large_binary(self, type_): + def visit_large_binary(self, type_, **kw): return self.visit_BLOB(type_) + def visit_DATETIME(self, type_, **kw): + if not isinstance(type_, _DateTimeMixin) or \ + type_.format_is_text_affinity: + return super(SQLiteTypeCompiler, self).visit_DATETIME(type_) + else: + return "DATETIME_CHAR" + + def visit_DATE(self, type_, **kw): + if not isinstance(type_, _DateTimeMixin) or \ + type_.format_is_text_affinity: + return super(SQLiteTypeCompiler, self).visit_DATE(type_) + else: + return "DATE_CHAR" + + def visit_TIME(self, type_, **kw): + if not isinstance(type_, _DateTimeMixin) or \ + type_.format_is_text_affinity: + return super(SQLiteTypeCompiler, self).visit_TIME(type_) + else: + return "TIME_CHAR" + class SQLiteIdentifierPreparer(compiler.IdentifierPreparer): reserved_words = set([ @@ -647,10 +984,15 @@ class SQLiteExecutionContext(default.DefaultExecutionContext): return self.execution_options.get("sqlite_raw_colnames", False) def _translate_colname(self, colname): - # adjust for dotted column names. SQLite in the case of UNION may - # store col names as "tablename.colname" in cursor.description + # TODO: detect SQLite version 3.10.0 or greater; + # see [ticket:3633] + + # adjust for dotted column names. SQLite + # in the case of UNION may store col names as + # "tablename.colname", or if using an attached database, + # "database.tablename.colname", in cursor.description if not self._preserve_raw_colnames and "." in colname: - return colname.split(".")[1], colname + return colname.split(".")[-1], colname else: return colname, None @@ -664,6 +1006,9 @@ class SQLiteDialect(default.DefaultDialect): supports_empty_insert = False supports_cast = True supports_multivalues_insert = True + + # TODO: detect version 3.7.16 or greater; + # see [ticket:3634] supports_right_nested_joins = False default_paramstyle = 'qmark' @@ -682,7 +1027,10 @@ class SQLiteDialect(default.DefaultDialect): construct_arguments = [ (sa_schema.Table, { "autoincrement": False - }) + }), + (sa_schema.Index, { + "where": None, + }), ] _broken_fk_pragma_quotes = False @@ -763,60 +1111,44 @@ class SQLiteDialect(default.DefaultDialect): if schema is not None: qschema = self.identifier_preparer.quote_identifier(schema) master = '%s.sqlite_master' % qschema - s = ("SELECT name FROM %s " - "WHERE type='table' ORDER BY name") % (master,) - rs = connection.execute(s) else: - try: - s = ("SELECT name FROM " - " (SELECT * FROM sqlite_master UNION ALL " - " SELECT * FROM sqlite_temp_master) " - "WHERE type='table' ORDER BY name") - rs = connection.execute(s) - except exc.DBAPIError: - s = ("SELECT name FROM sqlite_master " - "WHERE type='table' ORDER BY name") - rs = connection.execute(s) + master = "sqlite_master" + s = ("SELECT name FROM %s " + "WHERE type='table' ORDER BY name") % (master,) + rs = connection.execute(s) + return [row[0] for row in rs] + + @reflection.cache + def get_temp_table_names(self, connection, **kw): + s = "SELECT name FROM sqlite_temp_master "\ + "WHERE type='table' ORDER BY name " + rs = connection.execute(s) + + return [row[0] for row in rs] + + @reflection.cache + def get_temp_view_names(self, connection, **kw): + s = "SELECT name FROM sqlite_temp_master "\ + "WHERE type='view' ORDER BY name " + rs = connection.execute(s) return [row[0] for row in rs] def has_table(self, connection, table_name, schema=None): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - qtable = quote(table_name) - statement = "%stable_info(%s)" % (pragma, qtable) - cursor = _pragma_cursor(connection.execute(statement)) - row = cursor.fetchone() - - # consume remaining rows, to work around - # http://www.sqlite.org/cvstrac/tktview?tn=1884 - while not cursor.closed and cursor.fetchone() is not None: - pass - - return row is not None + info = self._get_table_pragma( + connection, "table_info", table_name, schema=schema) + return bool(info) @reflection.cache def get_view_names(self, connection, schema=None, **kw): if schema is not None: qschema = self.identifier_preparer.quote_identifier(schema) master = '%s.sqlite_master' % qschema - s = ("SELECT name FROM %s " - "WHERE type='view' ORDER BY name") % (master,) - rs = connection.execute(s) else: - try: - s = ("SELECT name FROM " - " (SELECT * FROM sqlite_master UNION ALL " - " SELECT * FROM sqlite_temp_master) " - "WHERE type='view' ORDER BY name") - rs = connection.execute(s) - except exc.DBAPIError: - s = ("SELECT name FROM sqlite_master " - "WHERE type='view' ORDER BY name") - rs = connection.execute(s) + master = "sqlite_master" + s = ("SELECT name FROM %s " + "WHERE type='view' ORDER BY name") % (master,) + rs = connection.execute(s) return [row[0] for row in rs] @@ -847,18 +1179,11 @@ class SQLiteDialect(default.DefaultDialect): @reflection.cache def get_columns(self, connection, table_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - qtable = quote(table_name) - statement = "%stable_info(%s)" % (pragma, qtable) - c = _pragma_cursor(connection.execute(statement)) + info = self._get_table_pragma( + connection, "table_info", table_name, schema=schema) - rows = c.fetchall() columns = [] - for row in rows: + for row in info: (name, type_, nullable, default, primary_key) = ( row[1], row[2].upper(), not row[3], row[4], row[5]) @@ -945,116 +1270,219 @@ class SQLiteDialect(default.DefaultDialect): @reflection.cache def get_foreign_keys(self, connection, table_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - qtable = quote(table_name) - statement = "%sforeign_key_list(%s)" % (pragma, qtable) - c = _pragma_cursor(connection.execute(statement)) - fkeys = [] + # sqlite makes this *extremely difficult*. + # First, use the pragma to get the actual FKs. + pragma_fks = self._get_table_pragma( + connection, "foreign_key_list", + table_name, schema=schema + ) + fks = {} - while True: - row = c.fetchone() - if row is None: - break + + for row in pragma_fks: (numerical_id, rtbl, lcol, rcol) = ( row[0], row[2], row[3], row[4]) - self._parse_fk(fks, fkeys, numerical_id, rtbl, lcol, rcol) + if rcol is None: + rcol = lcol + + if self._broken_fk_pragma_quotes: + rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) + + if numerical_id in fks: + fk = fks[numerical_id] + else: + fk = fks[numerical_id] = { + 'name': None, + 'constrained_columns': [], + 'referred_schema': None, + 'referred_table': rtbl, + 'referred_columns': [], + } + fks[numerical_id] = fk + + fk['constrained_columns'].append(lcol) + fk['referred_columns'].append(rcol) + + def fk_sig(constrained_columns, referred_table, referred_columns): + return tuple(constrained_columns) + (referred_table,) + \ + tuple(referred_columns) + + # then, parse the actual SQL and attempt to find DDL that matches + # the names as well. SQLite saves the DDL in whatever format + # it was typed in as, so need to be liberal here. + + keys_by_signature = dict( + ( + fk_sig( + fk['constrained_columns'], + fk['referred_table'], fk['referred_columns']), + fk + ) for fk in fks.values() + ) + + table_data = self._get_table_sql(connection, table_name, schema=schema) + if table_data is None: + # system tables, etc. + return [] + + def parse_fks(): + FK_PATTERN = ( + '(?:CONSTRAINT (\w+) +)?' + 'FOREIGN KEY *\( *(.+?) *\) +' + 'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\)' + ) + + for match in re.finditer(FK_PATTERN, table_data, re.I): + ( + constraint_name, constrained_columns, + referred_quoted_name, referred_name, + referred_columns) = match.group(1, 2, 3, 4, 5) + constrained_columns = list( + self._find_cols_in_sig(constrained_columns)) + if not referred_columns: + referred_columns = constrained_columns + else: + referred_columns = list( + self._find_cols_in_sig(referred_columns)) + referred_name = referred_quoted_name or referred_name + yield ( + constraint_name, constrained_columns, + referred_name, referred_columns) + fkeys = [] + + for ( + constraint_name, constrained_columns, + referred_name, referred_columns) in parse_fks(): + sig = fk_sig( + constrained_columns, referred_name, referred_columns) + if sig not in keys_by_signature: + util.warn( + "WARNING: SQL-parsed foreign key constraint " + "'%s' could not be located in PRAGMA " + "foreign_keys for table %s" % ( + sig, + table_name + )) + continue + key = keys_by_signature.pop(sig) + key['name'] = constraint_name + fkeys.append(key) + # assume the remainders are the unnamed, inline constraints, just + # use them as is as it's extremely difficult to parse inline + # constraints + fkeys.extend(keys_by_signature.values()) return fkeys - def _parse_fk(self, fks, fkeys, numerical_id, rtbl, lcol, rcol): - # sqlite won't return rcol if the table was created with REFERENCES - # , no col - if rcol is None: - rcol = lcol - - if self._broken_fk_pragma_quotes: - rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) - - try: - fk = fks[numerical_id] - except KeyError: - fk = { - 'name': None, - 'constrained_columns': [], - 'referred_schema': None, - 'referred_table': rtbl, - 'referred_columns': [], - } - fkeys.append(fk) - fks[numerical_id] = fk - - if lcol not in fk['constrained_columns']: - fk['constrained_columns'].append(lcol) - if rcol not in fk['referred_columns']: - fk['referred_columns'].append(rcol) - return fk - - @reflection.cache - def get_indexes(self, connection, table_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - include_auto_indexes = kw.pop('include_auto_indexes', False) - qtable = quote(table_name) - statement = "%sindex_list(%s)" % (pragma, qtable) - c = _pragma_cursor(connection.execute(statement)) - indexes = [] - while True: - row = c.fetchone() - if row is None: - break - # ignore implicit primary key index. - # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html - elif (not include_auto_indexes and - row[1].startswith('sqlite_autoindex')): - continue - - indexes.append(dict(name=row[1], column_names=[], unique=row[2])) - # loop thru unique indexes to get the column names. - for idx in indexes: - statement = "%sindex_info(%s)" % (pragma, quote(idx['name'])) - c = connection.execute(statement) - cols = idx['column_names'] - while True: - row = c.fetchone() - if row is None: - break - cols.append(row[2]) - return indexes + def _find_cols_in_sig(self, sig): + for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I): + yield match.group(1) or match.group(2) @reflection.cache def get_unique_constraints(self, connection, table_name, schema=None, **kw): - UNIQUE_SQL = """ - SELECT sql - FROM - sqlite_master - WHERE - type='table' AND - name=:table_name - """ - c = connection.execute(UNIQUE_SQL, table_name=table_name) - table_data = c.fetchone()[0] - UNIQUE_PATTERN = 'CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)' - return [ - {'name': name, - 'column_names': [col.strip(' "') for col in cols.split(',')]} - for name, cols in re.findall(UNIQUE_PATTERN, table_data) - ] + auto_index_by_sig = {} + for idx in self.get_indexes( + connection, table_name, schema=schema, + include_auto_indexes=True, **kw): + if not idx['name'].startswith("sqlite_autoindex"): + continue + sig = tuple(idx['column_names']) + auto_index_by_sig[sig] = idx + table_data = self._get_table_sql( + connection, table_name, schema=schema, **kw) + if not table_data: + return [] -def _pragma_cursor(cursor): - """work around SQLite issue whereby cursor.description - is blank when PRAGMA returns no rows.""" + unique_constraints = [] - if cursor.closed: - cursor.fetchone = lambda: None - cursor.fetchall = lambda: [] - return cursor + def parse_uqs(): + UNIQUE_PATTERN = '(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)' + INLINE_UNIQUE_PATTERN = ( + '(?:(".+?")|([a-z0-9]+)) ' + '+[a-z0-9_ ]+? +UNIQUE') + + for match in re.finditer(UNIQUE_PATTERN, table_data, re.I): + name, cols = match.group(1, 2) + yield name, list(self._find_cols_in_sig(cols)) + + # we need to match inlines as well, as we seek to differentiate + # a UNIQUE constraint from a UNIQUE INDEX, even though these + # are kind of the same thing :) + for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I): + cols = list( + self._find_cols_in_sig(match.group(1) or match.group(2))) + yield None, cols + + for name, cols in parse_uqs(): + sig = tuple(cols) + if sig in auto_index_by_sig: + auto_index_by_sig.pop(sig) + parsed_constraint = { + 'name': name, + 'column_names': cols + } + unique_constraints.append(parsed_constraint) + # NOTE: auto_index_by_sig might not be empty here, + # the PRIMARY KEY may have an entry. + return unique_constraints + + @reflection.cache + def get_indexes(self, connection, table_name, schema=None, **kw): + pragma_indexes = self._get_table_pragma( + connection, "index_list", table_name, schema=schema) + indexes = [] + + include_auto_indexes = kw.pop('include_auto_indexes', False) + for row in pragma_indexes: + # ignore implicit primary key index. + # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html + if (not include_auto_indexes and + row[1].startswith('sqlite_autoindex')): + continue + + indexes.append(dict(name=row[1], column_names=[], unique=row[2])) + + # loop thru unique indexes to get the column names. + for idx in indexes: + pragma_index = self._get_table_pragma( + connection, "index_info", idx['name']) + + for row in pragma_index: + idx['column_names'].append(row[2]) + return indexes + + @reflection.cache + def _get_table_sql(self, connection, table_name, schema=None, **kw): + try: + s = ("SELECT sql FROM " + " (SELECT * FROM sqlite_master UNION ALL " + " SELECT * FROM sqlite_temp_master) " + "WHERE name = '%s' " + "AND type = 'table'") % table_name + rs = connection.execute(s) + except exc.DBAPIError: + s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " + "AND type = 'table'") % table_name + rs = connection.execute(s) + return rs.scalar() + + def _get_table_pragma(self, connection, pragma, table_name, schema=None): + quote = self.identifier_preparer.quote_identifier + if schema is not None: + statement = "PRAGMA %s." % quote(schema) + else: + statement = "PRAGMA " + qtable = quote(table_name) + statement = "%s%s(%s)" % (statement, pragma, qtable) + cursor = connection.execute(statement) + if not cursor._soft_closed: + # work around SQLite issue whereby cursor.description + # is blank when PRAGMA returns no rows: + # http://www.sqlite.org/cvstrac/tktview?tn=1884 + result = cursor.fetchall() + else: + result = [] + return result diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py b/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py new file mode 100644 index 0000000..bbafc8d --- /dev/null +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py @@ -0,0 +1,116 @@ +# sqlite/pysqlcipher.py +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +""" +.. dialect:: sqlite+pysqlcipher + :name: pysqlcipher + :dbapi: pysqlcipher + :connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=] + :url: https://pypi.python.org/pypi/pysqlcipher + + ``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make + use of the `SQLCipher `_ backend. + + .. versionadded:: 0.9.9 + +Driver +------ + +The driver here is the `pysqlcipher `_ +driver, which makes use of the SQLCipher engine. This system essentially +introduces new PRAGMA commands to SQLite which allows the setting of a +passphrase and other encryption parameters, allowing the database +file to be encrypted. + +Connect Strings +--------------- + +The format of the connect string is in every way the same as that +of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the +"password" field is now accepted, which should contain a passphrase:: + + e = create_engine('sqlite+pysqlcipher://:testing@/foo.db') + +For an absolute file path, two leading slashes should be used for the +database name:: + + e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db') + +A selection of additional encryption-related pragmas supported by SQLCipher +as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed +in the query string, and will result in that PRAGMA being called for each +new connection. Currently, ``cipher``, ``kdf_iter`` +``cipher_page_size`` and ``cipher_use_hmac`` are supported:: + + e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000') + + +Pooling Behavior +---------------- + +The driver makes a change to the default pool behavior of pysqlite +as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver +has been observed to be significantly slower on connection than the +pysqlite driver, most likely due to the encryption overhead, so the +dialect here defaults to using the :class:`.SingletonThreadPool` +implementation, +instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool +implementation is entirely configurable using the +:paramref:`.create_engine.poolclass` parameter; the :class:`.StaticPool` may +be more feasible for single-threaded use, or :class:`.NullPool` may be used +to prevent unencrypted connections from being held open for long periods of +time, at the expense of slower startup time for new connections. + + +""" +from __future__ import absolute_import +from .pysqlite import SQLiteDialect_pysqlite +from ...engine import url as _url +from ... import pool + + +class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite): + driver = 'pysqlcipher' + + pragmas = ('kdf_iter', 'cipher', 'cipher_page_size', 'cipher_use_hmac') + + @classmethod + def dbapi(cls): + from pysqlcipher import dbapi2 as sqlcipher + return sqlcipher + + @classmethod + def get_pool_class(cls, url): + return pool.SingletonThreadPool + + def connect(self, *cargs, **cparams): + passphrase = cparams.pop('passphrase', '') + + pragmas = dict( + (key, cparams.pop(key, None)) for key in + self.pragmas + ) + + conn = super(SQLiteDialect_pysqlcipher, self).\ + connect(*cargs, **cparams) + conn.execute('pragma key="%s"' % passphrase) + for prag, value in pragmas.items(): + if value is not None: + conn.execute('pragma %s=%s' % (prag, value)) + + return conn + + def create_connect_args(self, url): + super_url = _url.URL( + url.drivername, username=url.username, + host=url.host, database=url.database, query=url.query) + c_args, opts = super(SQLiteDialect_pysqlcipher, self).\ + create_connect_args(super_url) + opts['passphrase'] = url.password + return c_args, opts + +dialect = SQLiteDialect_pysqlcipher diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/pysqlite.py index c673332..33d04de 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/sqlite/pysqlite.py @@ -1,5 +1,5 @@ # sqlite/pysqlite.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -200,30 +200,68 @@ is passed containing non-ASCII characters. .. _pysqlite_serializable: -Serializable Transaction Isolation ----------------------------------- +Serializable isolation / Savepoints / Transactional DDL +------------------------------------------------------- -The pysqlite DBAPI driver has a long-standing bug in which transactional -state is not begun until the first DML statement, that is INSERT, UPDATE -or DELETE, is emitted. A SELECT statement will not cause transactional -state to begin. While this mode of usage is fine for typical situations -and has the advantage that the SQLite database file is not prematurely -locked, it breaks serializable transaction isolation, which requires -that the database file be locked upon any SQL being emitted. +In the section :ref:`sqlite_concurrency`, we refer to the pysqlite +driver's assortment of issues that prevent several features of SQLite +from working correctly. The pysqlite DBAPI driver has several +long-standing bugs which impact the correctness of its transactional +behavior. In its default mode of operation, SQLite features such as +SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are +non-functional, and in order to use these features, workarounds must +be taken. -To work around this issue, the ``BEGIN`` keyword can be emitted -at the start of each transaction. The following recipe establishes -a :meth:`.ConnectionEvents.begin` handler to achieve this:: +The issue is essentially that the driver attempts to second-guess the user's +intent, failing to start transactions and sometimes ending them prematurely, in +an effort to minimize the SQLite databases's file locking behavior, even +though SQLite itself uses "shared" locks for read-only activities. + +SQLAlchemy chooses to not alter this behavior by default, as it is the +long-expected behavior of the pysqlite driver; if and when the pysqlite +driver attempts to repair these issues, that will be more of a driver towards +defaults for SQLAlchemy. + +The good news is that with a few events, we can implement transactional +support fully, by disabling pysqlite's feature entirely and emitting BEGIN +ourselves. This is achieved using two event listeners:: from sqlalchemy import create_engine, event - engine = create_engine("sqlite:///myfile.db", - isolation_level='SERIALIZABLE') + engine = create_engine("sqlite:///myfile.db") + + @event.listens_for(engine, "connect") + def do_connect(dbapi_connection, connection_record): + # disable pysqlite's emitting of the BEGIN statement entirely. + # also stops it from emitting COMMIT before any DDL. + dbapi_connection.isolation_level = None @event.listens_for(engine, "begin") def do_begin(conn): + # emit our own BEGIN conn.execute("BEGIN") +Above, we intercept a new pysqlite connection and disable any transactional +integration. Then, at the point at which SQLAlchemy knows that transaction +scope is to begin, we emit ``"BEGIN"`` ourselves. + +When we take control of ``"BEGIN"``, we can also control directly SQLite's +locking modes, introduced at `BEGIN TRANSACTION `_, +by adding the desired locking mode to our ``"BEGIN"``:: + + @event.listens_for(engine, "begin") + def do_begin(conn): + conn.execute("BEGIN EXCLUSIVE") + +.. seealso:: + + `BEGIN TRANSACTION `_ - on the SQLite site + + `sqlite3 SELECT does not BEGIN a transaction `_ - on the Python bug tracker + + `sqlite3 module breaks transactions and potentially corrupts data `_ - on the Python bug tracker + + """ from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/__init__.py b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/__init__.py index eb31359..18535ed 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/__init__.py @@ -1,5 +1,5 @@ # sybase/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/base.py b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/base.py index 43ddc43..1e38534 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/base.py @@ -1,5 +1,5 @@ # sybase/base.py -# Copyright (C) 2010-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2016 the SQLAlchemy authors and contributors # # get_select_precolumns(), limit_clause() implementation # copyright (C) 2007 Fisch Asset Management @@ -98,7 +98,6 @@ RESERVED_WORDS = set([ class _SybaseUnitypeMixin(object): - """these types appear to return a buffer object.""" def result_processor(self, dialect, coltype): @@ -147,41 +146,40 @@ class IMAGE(sqltypes.LargeBinary): class SybaseTypeCompiler(compiler.GenericTypeCompiler): - - def visit_large_binary(self, type_): + def visit_large_binary(self, type_, **kw): return self.visit_IMAGE(type_) - def visit_boolean(self, type_): + def visit_boolean(self, type_, **kw): return self.visit_BIT(type_) - def visit_unicode(self, type_): + def visit_unicode(self, type_, **kw): return self.visit_NVARCHAR(type_) - def visit_UNICHAR(self, type_): + def visit_UNICHAR(self, type_, **kw): return "UNICHAR(%d)" % type_.length - def visit_UNIVARCHAR(self, type_): + def visit_UNIVARCHAR(self, type_, **kw): return "UNIVARCHAR(%d)" % type_.length - def visit_UNITEXT(self, type_): + def visit_UNITEXT(self, type_, **kw): return "UNITEXT" - def visit_TINYINT(self, type_): + def visit_TINYINT(self, type_, **kw): return "TINYINT" - def visit_IMAGE(self, type_): + def visit_IMAGE(self, type_, **kw): return "IMAGE" - def visit_BIT(self, type_): + def visit_BIT(self, type_, **kw): return "BIT" - def visit_MONEY(self, type_): + def visit_MONEY(self, type_, **kw): return "MONEY" - def visit_SMALLMONEY(self, type_): + def visit_SMALLMONEY(self, type_, **kw): return "SMALLMONEY" - def visit_UNIQUEIDENTIFIER(self, type_): + def visit_UNIQUEIDENTIFIER(self, type_, **kw): return "UNIQUEIDENTIFIER" ischema_names = { @@ -325,28 +323,30 @@ class SybaseSQLCompiler(compiler.SQLCompiler): 'milliseconds': 'millisecond' }) - def get_select_precolumns(self, select): + def get_select_precolumns(self, select, **kw): s = select._distinct and "DISTINCT " or "" # TODO: don't think Sybase supports # bind params for FIRST / TOP - if select._limit: + limit = select._limit + if limit: # if select._limit == 1: - # s += "FIRST " + # s += "FIRST " # else: - # s += "TOP %s " % (select._limit,) - s += "TOP %s " % (select._limit,) - if select._offset: - if not select._limit: + # s += "TOP %s " % (select._limit,) + s += "TOP %s " % (limit,) + offset = select._offset + if offset: + if not limit: # FIXME: sybase doesn't allow an offset without a limit # so use a huge value for TOP here s += "TOP 1000000 " - s += "START AT %s " % (select._offset + 1,) + s += "START AT %s " % (offset + 1,) return s def get_from_hint_text(self, table, text): return text - def limit_clause(self, select): + def limit_clause(self, select, **kw): # Limit in sybase is after the select keyword return "" @@ -375,10 +375,10 @@ class SybaseSQLCompiler(compiler.SQLCompiler): class SybaseDDLCompiler(compiler.DDLCompiler): - def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) + " " + \ - self.dialect.type_compiler.process(column.type) + self.dialect.type_compiler.process( + column.type, type_expression=column) if column.table is None: raise exc.CompileError( @@ -608,8 +608,8 @@ class SybaseDialect(default.DefaultDialect): FROM sysreferences r JOIN sysobjects o on r.tableid = o.id WHERE r.tableid = :table_id """) - referential_constraints = connection.execute(REFCONSTRAINT_SQL, - table_id=table_id) + referential_constraints = connection.execute( + REFCONSTRAINT_SQL, table_id=table_id).fetchall() REFTABLE_SQL = text(""" SELECT o.name AS name, u.name AS 'schema' @@ -740,10 +740,13 @@ class SybaseDialect(default.DefaultDialect): results.close() constrained_columns = [] - for i in range(1, pks["count"] + 1): - constrained_columns.append(pks["pk_%i" % (i,)]) - return {"constrained_columns": constrained_columns, - "name": pks["name"]} + if pks: + for i in range(1, pks["count"] + 1): + constrained_columns.append(pks["pk_%i" % (i,)]) + return {"constrained_columns": constrained_columns, + "name": pks["name"]} + else: + return {"constrained_columns": [], "name": None} @reflection.cache def get_schema_names(self, connection, **kw): diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/mxodbc.py b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/mxodbc.py index 373bea0..60e6510 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/mxodbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/mxodbc.py @@ -1,5 +1,5 @@ # sybase/mxodbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/pyodbc.py b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/pyodbc.py index cb76d13..348ca32 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/pyodbc.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/pyodbc.py @@ -1,5 +1,5 @@ # sybase/pyodbc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/pysybase.py b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/pysybase.py index 6843eb4..41ca47f 100644 --- a/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/pysybase.py +++ b/lib/python3.5/site-packages/sqlalchemy/dialects/sybase/pysybase.py @@ -1,5 +1,5 @@ # sybase/pysybase.py -# Copyright (C) 2010-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/__init__.py b/lib/python3.5/site-packages/sqlalchemy/engine/__init__.py index 9c9e038..09054d9 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/__init__.py @@ -1,5 +1,5 @@ # engine/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -72,6 +72,7 @@ from .base import ( ) from .result import ( + BaseRowProxy, BufferedColumnResultProxy, BufferedColumnRow, BufferedRowResultProxy, @@ -248,6 +249,34 @@ def create_engine(*args, **kwargs): Microsoft SQL Server. Set this to ``False`` to disable the automatic usage of RETURNING. + :param isolation_level: this string parameter is interpreted by various + dialects in order to affect the transaction isolation level of the + database connection. The parameter essentially accepts some subset of + these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``, + ``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``. + Behavior here varies per backend, and + individual dialects should be consulted directly. + + Note that the isolation level can also be set on a per-:class:`.Connection` + basis as well, using the + :paramref:`.Connection.execution_options.isolation_level` + feature. + + .. seealso:: + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.Connection.execution_options.isolation_level` + - set per :class:`.Connection` isolation level + + :ref:`SQLite Transaction Isolation ` + + :ref:`Postgresql Transaction Isolation ` + + :ref:`MySQL Transaction Isolation ` + + :ref:`session_transaction_isolation` - for the ORM + :param label_length=None: optional integer value which limits the size of dynamically generated column labels to that many characters. If less than 6, labels are generated as @@ -276,6 +305,17 @@ def create_engine(*args, **kwargs): be used instead. Can be used for testing of DBAPIs as well as to inject "mock" DBAPI implementations into the :class:`.Engine`. + :param paramstyle=None: The `paramstyle `_ + to use when rendering bound parameters. This style defaults to the + one recommended by the DBAPI itself, which is retrieved from the + ``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept + more than one paramstyle, and in particular it may be desirable + to change a "named" paramstyle into a "positional" one, or vice versa. + When this attribute is passed, it should be one of the values + ``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or + ``"pyformat"``, and should correspond to a parameter style known + to be supported by the DBAPI in use. + :param pool=None: an already-constructed instance of :class:`~sqlalchemy.pool.Pool`, such as a :class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this @@ -349,14 +389,33 @@ def create_engine(*args, **kwargs): def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs): """Create a new Engine instance using a configuration dictionary. - The dictionary is typically produced from a config file where keys - are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The - 'prefix' argument indicates the prefix to be searched for. + The dictionary is typically produced from a config file. + + The keys of interest to ``engine_from_config()`` should be prefixed, e.g. + ``sqlalchemy.url``, ``sqlalchemy.echo``, etc. The 'prefix' argument + indicates the prefix to be searched for. Each matching key (after the + prefix is stripped) is treated as though it were the corresponding keyword + argument to a :func:`.create_engine` call. + + The only required key is (assuming the default prefix) ``sqlalchemy.url``, + which provides the :ref:`database URL `. A select set of keyword arguments will be "coerced" to their - expected type based on string values. In a future release, this - functionality will be expanded and include dialect-specific - arguments. + expected type based on string values. The set of arguments + is extensible per-dialect using the ``engine_config_types`` accessor. + + :param configuration: A dictionary (typically produced from a config file, + but this is not a requirement). Items whose keys start with the value + of 'prefix' will have that prefix stripped, and will then be passed to + :ref:`create_engine`. + + :param prefix: Prefix to match and then strip from keys + in 'configuration'. + + :param kwargs: Each keyword argument to ``engine_from_config()`` itself + overrides the corresponding item taken from the 'configuration' + dictionary. Keyword arguments should *not* be prefixed. + """ options = dict((key[len(prefix):], configuration[key]) diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/base.py b/lib/python3.5/site-packages/sqlalchemy/engine/base.py index cf06896..80edd95 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/base.py @@ -1,5 +1,5 @@ # engine/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -45,7 +45,7 @@ class Connection(Connectable): """ def __init__(self, engine, connection=None, close_with_result=False, - _branch=False, _execution_options=None, + _branch_from=None, _execution_options=None, _dispatch=None, _has_events=None): """Construct a new Connection. @@ -57,48 +57,80 @@ class Connection(Connectable): """ self.engine = engine self.dialect = engine.dialect - self.__connection = connection or engine.raw_connection() - self.__transaction = None - self.should_close_with_result = close_with_result - self.__savepoint_seq = 0 - self.__branch = _branch - self.__invalid = False - self.__can_reconnect = True - if _dispatch: - self.dispatch = _dispatch - elif _has_events is None: - # if _has_events is sent explicitly as False, - # then don't join the dispatch of the engine; we don't - # want to handle any of the engine's events in that case. - self.dispatch = self.dispatch._join(engine.dispatch) - self._has_events = _has_events or ( - _has_events is None and engine._has_events) + self.__branch_from = _branch_from + self.__branch = _branch_from is not None - self._echo = self.engine._should_log_info() - if _execution_options: - self._execution_options =\ - engine._execution_options.union(_execution_options) + if _branch_from: + self.__connection = connection + self._execution_options = _execution_options + self._echo = _branch_from._echo + self.should_close_with_result = False + self.dispatch = _dispatch + self._has_events = _branch_from._has_events else: + self.__connection = connection \ + if connection is not None else engine.raw_connection() + self.__transaction = None + self.__savepoint_seq = 0 + self.should_close_with_result = close_with_result + self.__invalid = False + self.__can_reconnect = True + self._echo = self.engine._should_log_info() + + if _has_events is None: + # if _has_events is sent explicitly as False, + # then don't join the dispatch of the engine; we don't + # want to handle any of the engine's events in that case. + self.dispatch = self.dispatch._join(engine.dispatch) + self._has_events = _has_events or ( + _has_events is None and engine._has_events) + + assert not _execution_options self._execution_options = engine._execution_options if self._has_events or self.engine._has_events: - self.dispatch.engine_connect(self, _branch) + self.dispatch.engine_connect(self, self.__branch) def _branch(self): """Return a new Connection which references this Connection's engine and connection; but does not have close_with_result enabled, and also whose close() method does nothing. - This is used to execute "sub" statements within a single execution, - usually an INSERT statement. + The Core uses this very sparingly, only in the case of + custom SQL default functions that are to be INSERTed as the + primary key of a row where we need to get the value back, so we have + to invoke it distinctly - this is a very uncommon case. + + Userland code accesses _branch() when the connect() or + contextual_connect() methods are called. The branched connection + acts as much as possible like the parent, except that it stays + connected when a close() event occurs. + + """ + if self.__branch_from: + return self.__branch_from._branch() + else: + return self.engine._connection_cls( + self.engine, + self.__connection, + _branch_from=self, + _execution_options=self._execution_options, + _has_events=self._has_events, + _dispatch=self.dispatch) + + @property + def _root(self): + """return the 'root' connection. + + Returns 'self' if this connection is not a branch, else + returns the root connection from which we ultimately branched. + """ - return self.engine._connection_cls( - self.engine, - self.__connection, - _branch=True, - _has_events=self._has_events, - _dispatch=self.dispatch) + if self.__branch_from: + return self.__branch_from + else: + return self def _clone(self): """Create a shallow copy of this Connection. @@ -169,14 +201,19 @@ class Connection(Connectable): used by the ORM internally supersedes a cache dictionary specified here. - :param isolation_level: Available on: Connection. + :param isolation_level: Available on: :class:`.Connection`. Set the transaction isolation level for - the lifespan of this connection. Valid values include - those string values accepted by the ``isolation_level`` - parameter passed to :func:`.create_engine`, and are - database specific, including those for :ref:`sqlite_toplevel`, - :ref:`postgresql_toplevel` - see those dialect's documentation - for further info. + the lifespan of this :class:`.Connection` object (*not* the + underyling DBAPI connection, for which the level is reset + to its original setting upon termination of this + :class:`.Connection` object). + + Valid values include + those string values accepted by the + :paramref:`.create_engine.isolation_level` + parameter passed to :func:`.create_engine`. These levels are + semi-database specific; see individual dialect documentation for + valid levels. Note that this option necessarily affects the underlying DBAPI connection for the lifespan of the originating @@ -185,6 +222,41 @@ class Connection(Connectable): is returned to the connection pool, i.e. the :meth:`.Connection.close` method is called. + .. warning:: The ``isolation_level`` execution option should + **not** be used when a transaction is already established, that + is, the :meth:`.Connection.begin` method or similar has been + called. A database cannot change the isolation level on a + transaction in progress, and different DBAPIs and/or + SQLAlchemy dialects may implicitly roll back or commit + the transaction, or not affect the connection at all. + + .. versionchanged:: 0.9.9 A warning is emitted when the + ``isolation_level`` execution option is used after a + transaction has been started with :meth:`.Connection.begin` + or similar. + + .. note:: The ``isolation_level`` execution option is implicitly + reset if the :class:`.Connection` is invalidated, e.g. via + the :meth:`.Connection.invalidate` method, or if a + disconnection error occurs. The new connection produced after + the invalidation will not have the isolation level re-applied + to it automatically. + + .. seealso:: + + :paramref:`.create_engine.isolation_level` + - set per :class:`.Engine` isolation level + + :meth:`.Connection.get_isolation_level` - view current level + + :ref:`SQLite Transaction Isolation ` + + :ref:`Postgresql Transaction Isolation ` + + :ref:`MySQL Transaction Isolation ` + + :ref:`session_transaction_isolation` - for the ORM + :param no_parameters: When ``True``, if the final parameter list or dictionary is totally empty, will invoke the statement on the cursor as ``cursor.execute(statement)``, @@ -224,24 +296,101 @@ class Connection(Connectable): def invalidated(self): """Return True if this connection was invalidated.""" - return self.__invalid + return self._root.__invalid @property def connection(self): - "The underlying DB-API connection managed by this Connection." + """The underlying DB-API connection managed by this Connection. + + .. seealso:: + + + :ref:`dbapi_connections` + + """ try: return self.__connection except AttributeError: - return self._revalidate_connection() + try: + return self._revalidate_connection() + except Exception as e: + self._handle_dbapi_exception(e, None, None, None, None) + + def get_isolation_level(self): + """Return the current isolation level assigned to this + :class:`.Connection`. + + This will typically be the default isolation level as determined + by the dialect, unless if the + :paramref:`.Connection.execution_options.isolation_level` + feature has been used to alter the isolation level on a + per-:class:`.Connection` basis. + + This attribute will typically perform a live SQL operation in order + to procure the current isolation level, so the value returned is the + actual level on the underlying DBAPI connection regardless of how + this state was set. Compare to the + :attr:`.Connection.default_isolation_level` accessor + which returns the dialect-level setting without performing a SQL + query. + + .. versionadded:: 0.9.9 + + .. seealso:: + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.create_engine.isolation_level` + - set per :class:`.Engine` isolation level + + :paramref:`.Connection.execution_options.isolation_level` + - set per :class:`.Connection` isolation level + + """ + try: + return self.dialect.get_isolation_level(self.connection) + except Exception as e: + self._handle_dbapi_exception(e, None, None, None, None) + + @property + def default_isolation_level(self): + """The default isolation level assigned to this :class:`.Connection`. + + This is the isolation level setting that the :class:`.Connection` + has when first procured via the :meth:`.Engine.connect` method. + This level stays in place until the + :paramref:`.Connection.execution_options.isolation_level` is used + to change the setting on a per-:class:`.Connection` basis. + + Unlike :meth:`.Connection.get_isolation_level`, this attribute is set + ahead of time from the first connection procured by the dialect, + so SQL query is not invoked when this accessor is called. + + .. versionadded:: 0.9.9 + + .. seealso:: + + :meth:`.Connection.get_isolation_level` - view current level + + :paramref:`.create_engine.isolation_level` + - set per :class:`.Engine` isolation level + + :paramref:`.Connection.execution_options.isolation_level` + - set per :class:`.Connection` isolation level + + """ + return self.dialect.default_isolation_level def _revalidate_connection(self): + if self.__branch_from: + return self.__branch_from._revalidate_connection() if self.__can_reconnect and self.__invalid: if self.__transaction is not None: raise exc.InvalidRequestError( "Can't reconnect until invalid " "transaction is rolled back") - self.__connection = self.engine.raw_connection() + self.__connection = self.engine.raw_connection(_connection=self) self.__invalid = False return self.__connection raise exc.ResourceClosedError("This Connection is closed") @@ -343,16 +492,17 @@ class Connection(Connectable): :ref:`pool_connection_invalidation` """ + if self.invalidated: return if self.closed: raise exc.ResourceClosedError("This Connection is closed") - if self._connection_is_valid: - self.__connection.invalidate(exception) - del self.__connection - self.__invalid = True + if self._root._connection_is_valid: + self._root.__connection.invalidate(exception) + del self._root.__connection + self._root.__invalid = True def detach(self): """Detach the underlying DB-API connection from its connection pool. @@ -415,6 +565,8 @@ class Connection(Connectable): :class:`.Engine`. """ + if self.__branch_from: + return self.__branch_from.begin() if self.__transaction is None: self.__transaction = RootTransaction(self) @@ -436,6 +588,9 @@ class Connection(Connectable): See also :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`. """ + if self.__branch_from: + return self.__branch_from.begin_nested() + if self.__transaction is None: self.__transaction = RootTransaction(self) else: @@ -459,6 +614,9 @@ class Connection(Connectable): """ + if self.__branch_from: + return self.__branch_from.begin_twophase(xid=xid) + if self.__transaction is not None: raise exc.InvalidRequestError( "Cannot start a two phase transaction when a transaction " @@ -479,10 +637,11 @@ class Connection(Connectable): def in_transaction(self): """Return True if a transaction is in progress.""" - - return self.__transaction is not None + return self._root.__transaction is not None def _begin_impl(self, transaction): + assert not self.__branch_from + if self._echo: self.engine.logger.info("BEGIN (implicit)") @@ -497,6 +656,8 @@ class Connection(Connectable): self._handle_dbapi_exception(e, None, None, None, None) def _rollback_impl(self): + assert not self.__branch_from + if self._has_events or self.engine._has_events: self.dispatch.rollback(self) @@ -516,6 +677,8 @@ class Connection(Connectable): self.__transaction = None def _commit_impl(self, autocommit=False): + assert not self.__branch_from + if self._has_events or self.engine._has_events: self.dispatch.commit(self) @@ -532,6 +695,8 @@ class Connection(Connectable): self.__transaction = None def _savepoint_impl(self, name=None): + assert not self.__branch_from + if self._has_events or self.engine._has_events: self.dispatch.savepoint(self, name) @@ -543,6 +708,8 @@ class Connection(Connectable): return name def _rollback_to_savepoint_impl(self, name, context): + assert not self.__branch_from + if self._has_events or self.engine._has_events: self.dispatch.rollback_savepoint(self, name, context) @@ -551,6 +718,8 @@ class Connection(Connectable): self.__transaction = context def _release_savepoint_impl(self, name, context): + assert not self.__branch_from + if self._has_events or self.engine._has_events: self.dispatch.release_savepoint(self, name, context) @@ -559,6 +728,8 @@ class Connection(Connectable): self.__transaction = context def _begin_twophase_impl(self, transaction): + assert not self.__branch_from + if self._echo: self.engine.logger.info("BEGIN TWOPHASE (implicit)") if self._has_events or self.engine._has_events: @@ -571,6 +742,8 @@ class Connection(Connectable): self.connection._reset_agent = transaction def _prepare_twophase_impl(self, xid): + assert not self.__branch_from + if self._has_events or self.engine._has_events: self.dispatch.prepare_twophase(self, xid) @@ -579,6 +752,8 @@ class Connection(Connectable): self.engine.dialect.do_prepare_twophase(self, xid) def _rollback_twophase_impl(self, xid, is_prepared): + assert not self.__branch_from + if self._has_events or self.engine._has_events: self.dispatch.rollback_twophase(self, xid, is_prepared) @@ -595,6 +770,8 @@ class Connection(Connectable): self.__transaction = None def _commit_twophase_impl(self, xid, is_prepared): + assert not self.__branch_from + if self._has_events or self.engine._has_events: self.dispatch.commit_twophase(self, xid, is_prepared) @@ -610,8 +787,8 @@ class Connection(Connectable): self.__transaction = None def _autorollback(self): - if not self.in_transaction(): - self._rollback_impl() + if not self._root.in_transaction(): + self._root._rollback_impl() def close(self): """Close this :class:`.Connection`. @@ -632,13 +809,21 @@ class Connection(Connectable): and will allow no further operations. """ + if self.__branch_from: + try: + del self.__connection + except AttributeError: + pass + finally: + self.__can_reconnect = False + return try: conn = self.__connection except AttributeError: pass else: - if not self.__branch: - conn.close() + + conn.close() if conn._reset_agent is self.__transaction: conn._reset_agent = None @@ -670,7 +855,7 @@ class Connection(Connectable): a subclass of :class:`.Executable`, such as a :func:`~.expression.select` construct * a :class:`.FunctionElement`, such as that generated - by :attr:`.func`, will be automatically wrapped in + by :data:`.func`, will be automatically wrapped in a SELECT statement, which is then executed. * a :class:`.DDLElement` object * a :class:`.DefaultGenerator` object @@ -798,17 +983,16 @@ class Connection(Connectable): distilled_params = _distill_params(multiparams, params) if distilled_params: # note this is usually dict but we support RowProxy - # as well; but dict.keys() as an iterator is OK + # as well; but dict.keys() as an iterable is OK keys = distilled_params[0].keys() else: keys = [] dialect = self.dialect if 'compiled_cache' in self._execution_options: - key = dialect, elem, tuple(keys), len(distilled_params) > 1 - if key in self._execution_options['compiled_cache']: - compiled_sql = self._execution_options['compiled_cache'][key] - else: + key = dialect, elem, tuple(sorted(keys)), len(distilled_params) > 1 + compiled_sql = self._execution_options['compiled_cache'].get(key) + if compiled_sql is None: compiled_sql = elem.compile( dialect=dialect, column_keys=keys, inline=len(distilled_params) > 1) @@ -888,9 +1072,10 @@ class Connection(Connectable): context = constructor(dialect, self, conn, *args) except Exception as e: - self._handle_dbapi_exception(e, - util.text_type(statement), parameters, - None, None) + self._handle_dbapi_exception( + e, + util.text_type(statement), parameters, + None, None) if context.compiled: context.pre_exec() @@ -914,36 +1099,39 @@ class Connection(Connectable): "%r", sql_util._repr_params(parameters, batches=10) ) + + evt_handled = False try: if context.executemany: - for fn in () if not self.dialect._has_events \ - else self.dialect.dispatch.do_executemany: - if fn(cursor, statement, parameters, context): - break - else: + if self.dialect._has_events: + for fn in self.dialect.dispatch.do_executemany: + if fn(cursor, statement, parameters, context): + evt_handled = True + break + if not evt_handled: self.dialect.do_executemany( cursor, statement, parameters, context) - elif not parameters and context.no_parameters: - for fn in () if not self.dialect._has_events \ - else self.dialect.dispatch.do_execute_no_params: - if fn(cursor, statement, context): - break - else: + if self.dialect._has_events: + for fn in self.dialect.dispatch.do_execute_no_params: + if fn(cursor, statement, context): + evt_handled = True + break + if not evt_handled: self.dialect.do_execute_no_params( cursor, statement, context) - else: - for fn in () if not self.dialect._has_events \ - else self.dialect.dispatch.do_execute: - if fn(cursor, statement, parameters, context): - break - else: + if self.dialect._has_events: + for fn in self.dialect.dispatch.do_execute: + if fn(cursor, statement, parameters, context): + evt_handled = True + break + if not evt_handled: self.dialect.do_execute( cursor, statement, @@ -967,36 +1155,17 @@ class Connection(Connectable): if context.compiled: context.post_exec() - if context.isinsert and not context.executemany: - context.post_insert() + if context.is_crud or context.is_text: + result = context._setup_crud_result_proxy() + else: + result = context.get_result_proxy() + if result._metadata is None: + result._soft_close(_autoclose_connection=False) - # create a resultproxy, get rowcount/implicit RETURNING - # rows, close cursor if no further results pending - result = context.get_result_proxy() - if context.isinsert: - if context._is_implicit_returning: - context._fetch_implicit_returning(result) - result.close(_autoclose_connection=False) - result._metadata = None - elif not context._is_explicit_returning: - result.close(_autoclose_connection=False) - result._metadata = None - elif context.isupdate and context._is_implicit_returning: - context._fetch_implicit_update_returning(result) - result.close(_autoclose_connection=False) - result._metadata = None + if context.should_autocommit and self._root.__transaction is None: + self._root._commit_impl(autocommit=True) - elif result._metadata is None: - # no results, get rowcount - # (which requires open cursor on some drivers - # such as kintersbasdb, mxodbc), - result.rowcount - result.close(_autoclose_connection=False) - - if self.__transaction is None and context.should_autocommit: - self._commit_impl(autocommit=True) - - if result.closed and self.should_close_with_result: + if result._soft_closed and self.should_close_with_result: self.close() return result @@ -1055,8 +1224,6 @@ class Connection(Connectable): """ try: cursor.close() - except (SystemExit, KeyboardInterrupt): - raise except Exception: # log the error through the connection pool's logger. self.engine.pool.logger.error( @@ -1071,7 +1238,6 @@ class Connection(Connectable): parameters, cursor, context): - exc_info = sys.exc_info() if context and context.exception is None: @@ -1081,16 +1247,22 @@ class Connection(Connectable): self._is_disconnect = \ isinstance(e, self.dialect.dbapi.Error) and \ not self.closed and \ - self.dialect.is_disconnect(e, self.__connection, cursor) + self.dialect.is_disconnect( + e, + self.__connection if not self.invalidated else None, + cursor) if context: context.is_disconnect = self._is_disconnect + invalidate_pool_on_disconnect = True + if self._reentrant_error: util.raise_from_cause( exc.DBAPIError.instance(statement, parameters, e, - self.dialect.dbapi.Error), + self.dialect.dbapi.Error, + dialect=self.dialect), exc_info ) self._reentrant_error = True @@ -1106,13 +1278,16 @@ class Connection(Connectable): parameters, e, self.dialect.dbapi.Error, - connection_invalidated=self._is_disconnect) + connection_invalidated=self._is_disconnect, + dialect=self.dialect) else: sqlalchemy_exception = None newraise = None - if self._has_events or self.engine._has_events: + if (self._has_events or self.engine._has_events) and \ + not self._execution_options.get( + 'skip_user_error_events', False): # legacy dbapi_error event if should_wrap and context: self.dispatch.dbapi_error(self, @@ -1124,7 +1299,8 @@ class Connection(Connectable): # new handle_error event ctx = ExceptionContextImpl( - e, sqlalchemy_exception, self, cursor, statement, + e, sqlalchemy_exception, self.engine, + self, cursor, statement, parameters, context, self._is_disconnect) for fn in self.dispatch.handle_error: @@ -1144,6 +1320,11 @@ class Connection(Connectable): sqlalchemy_exception.connection_invalidated = \ self._is_disconnect = ctx.is_disconnect + # set up potentially user-defined value for + # invalidate pool. + invalidate_pool_on_disconnect = \ + ctx.invalidate_pool_on_disconnect + if should_wrap and context: context.handle_dbapi_exception(e) @@ -1166,12 +1347,66 @@ class Connection(Connectable): del self._reentrant_error if self._is_disconnect: del self._is_disconnect - dbapi_conn_wrapper = self.connection - self.engine.pool._invalidate(dbapi_conn_wrapper, e) - self.invalidate(e) + if not self.invalidated: + dbapi_conn_wrapper = self.__connection + if invalidate_pool_on_disconnect: + self.engine.pool._invalidate(dbapi_conn_wrapper, e) + self.invalidate(e) if self.should_close_with_result: self.close() + @classmethod + def _handle_dbapi_exception_noconnection(cls, e, dialect, engine): + + exc_info = sys.exc_info() + + is_disconnect = dialect.is_disconnect(e, None, None) + + should_wrap = isinstance(e, dialect.dbapi.Error) + + if should_wrap: + sqlalchemy_exception = exc.DBAPIError.instance( + None, + None, + e, + dialect.dbapi.Error, + connection_invalidated=is_disconnect) + else: + sqlalchemy_exception = None + + newraise = None + + if engine._has_events: + ctx = ExceptionContextImpl( + e, sqlalchemy_exception, engine, None, None, None, + None, None, is_disconnect) + for fn in engine.dispatch.handle_error: + try: + # handler returns an exception; + # call next handler in a chain + per_fn = fn(ctx) + if per_fn is not None: + ctx.chained_exception = newraise = per_fn + except Exception as _raised: + # handler raises an exception - stop processing + newraise = _raised + break + + if sqlalchemy_exception and \ + is_disconnect != ctx.is_disconnect: + sqlalchemy_exception.connection_invalidated = \ + is_disconnect = ctx.is_disconnect + + if newraise: + util.raise_from_cause(newraise, exc_info) + elif should_wrap: + util.raise_from_cause( + sqlalchemy_exception, + exc_info + ) + else: + util.reraise(*exc_info) + def default_schema_name(self): return self.engine.dialect.get_default_schema_name(self) @@ -1250,8 +1485,9 @@ class ExceptionContextImpl(ExceptionContext): """Implement the :class:`.ExceptionContext` interface.""" def __init__(self, exception, sqlalchemy_exception, - connection, cursor, statement, parameters, + engine, connection, cursor, statement, parameters, context, is_disconnect): + self.engine = engine self.connection = connection self.sqlalchemy_exception = sqlalchemy_exception self.original_exception = exception @@ -1295,9 +1531,13 @@ class Transaction(object): def __init__(self, connection, parent): self.connection = connection - self._parent = parent or self + self._actual_parent = parent self.is_active = True + @property + def _parent(self): + return self._actual_parent or self + def close(self): """Close this :class:`.Transaction`. @@ -1575,29 +1815,28 @@ class Engine(Connectable, log.Identified): def dispose(self): """Dispose of the connection pool used by this :class:`.Engine`. + This has the effect of fully closing all **currently checked in** + database connections. Connections that are still checked out + will **not** be closed, however they will no longer be associated + with this :class:`.Engine`, so when they are closed individually, + eventually the :class:`.Pool` which they are associated with will + be garbage collected and they will be closed out fully, if + not already closed on checkin. + A new connection pool is created immediately after the old one has been disposed. This new pool, like all SQLAlchemy connection pools, does not make any actual connections to the database until one is - first requested. + first requested, so as long as the :class:`.Engine` isn't used again, + no new connections will be made. - This method has two general use cases: + .. seealso:: - * When a dropped connection is detected, it is assumed that all - connections held by the pool are potentially dropped, and - the entire pool is replaced. - - * An application may want to use :meth:`dispose` within a test - suite that is creating multiple engines. - - It is critical to note that :meth:`dispose` does **not** guarantee - that the application will release all open database connections - only - those connections that are checked into the pool are closed. - Connections which remain checked out or have been detached from - the engine are not affected. + :ref:`engine_disposal` """ self.pool.dispose() self.pool = self.pool.recreate() + self.dispatch.engine_disposed(self) def _execute_default(self, default): with self.contextual_connect() as conn: @@ -1795,10 +2034,11 @@ class Engine(Connectable, log.Identified): """ - return self._connection_cls(self, - self.pool.connect(), - close_with_result=close_with_result, - **kwargs) + return self._connection_cls( + self, + self._wrap_pool_connect(self.pool.connect, None), + close_with_result=close_with_result, + **kwargs) def table_names(self, schema=None, connection=None): """Return a list of all table names available in the database. @@ -1828,7 +2068,18 @@ class Engine(Connectable, log.Identified): """ return self.run_callable(self.dialect.has_table, table_name, schema) - def raw_connection(self): + def _wrap_pool_connect(self, fn, connection): + dialect = self.dialect + try: + return fn() + except dialect.dbapi.Error as e: + if connection is None: + Connection._handle_dbapi_exception_noconnection( + e, dialect, self) + else: + util.reraise(*sys.exc_info()) + + def raw_connection(self, _connection=None): """Return a "raw" DBAPI connection from the connection pool. The returned object is a proxied version of the DBAPI @@ -1839,13 +2090,18 @@ class Engine(Connectable, log.Identified): for real. This method provides direct DBAPI connection access for - special situations. In most situations, the :class:`.Connection` - object should be used, which is procured using the - :meth:`.Engine.connect` method. + special situations when the API provided by :class:`.Connection` + is not needed. When a :class:`.Connection` object is already + present, the DBAPI connection is available using + the :attr:`.Connection.connection` accessor. + + .. seealso:: + + :ref:`dbapi_connections` """ - - return self.pool.unique_connection() + return self._wrap_pool_connect( + self.pool.unique_connection, _connection) class OptionEngine(Engine): diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/default.py b/lib/python3.5/site-packages/sqlalchemy/engine/default.py index 2fece76..9798d13 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/default.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/default.py @@ -1,5 +1,5 @@ # engine/default.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -61,14 +61,13 @@ class DefaultDialect(interfaces.Dialect): engine_config_types = util.immutabledict([ ('convert_unicode', util.bool_or_str('force')), - ('pool_timeout', int), + ('pool_timeout', util.asint), ('echo', util.bool_or_str('debug')), ('echo_pool', util.bool_or_str('debug')), - ('pool_recycle', int), - ('pool_size', int), - ('max_overflow', int), - ('pool_threadlocal', bool), - ('use_native_unicode', bool), + ('pool_recycle', util.asint), + ('pool_size', util.asint), + ('max_overflow', util.asint), + ('pool_threadlocal', util.asbool), ]) # if the NUMERIC type @@ -157,6 +156,15 @@ class DefaultDialect(interfaces.Dialect): reflection_options = () + dbapi_exception_translation_map = util.immutabledict() + """mapping used in the extremely unusual case that a DBAPI's + published exceptions don't actually have the __name__ that they + are linked towards. + + .. versionadded:: 1.0.5 + + """ + def __init__(self, convert_unicode=False, encoding='utf-8', paramstyle=None, dbapi=None, implicit_returning=None, @@ -395,6 +403,12 @@ class DefaultDialect(interfaces.Dialect): self._set_connection_isolation(connection, opts['isolation_level']) def _set_connection_isolation(self, connection, level): + if connection.in_transaction(): + util.warn( + "Connection is already established with a Transaction; " + "setting isolation_level may implicitly rollback or commit " + "the existing transaction, or have no effect until " + "next transaction") self.set_isolation_level(connection.connection, level) connection.connection._connection_record.\ finalize_callback.append(self.reset_isolation_level) @@ -452,14 +466,13 @@ class DefaultExecutionContext(interfaces.ExecutionContext): isinsert = False isupdate = False isdelete = False + is_crud = False + is_text = False isddl = False executemany = False - result_map = None compiled = None statement = None - postfetch_cols = None - prefetch_cols = None - returning_cols = None + result_column_struct = None _is_implicit_returning = False _is_explicit_returning = False @@ -472,10 +485,9 @@ class DefaultExecutionContext(interfaces.ExecutionContext): """Initialize execution context for a DDLElement construct.""" self = cls.__new__(cls) - self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection - self.engine = connection.engine + self.dialect = connection.dialect self.compiled = compiled = compiled_ddl self.isddl = True @@ -507,25 +519,20 @@ class DefaultExecutionContext(interfaces.ExecutionContext): """Initialize execution context for a Compiled construct.""" self = cls.__new__(cls) - self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection - self.engine = connection.engine + self.dialect = connection.dialect self.compiled = compiled if not compiled.can_execute: raise exc.ArgumentError("Not an executable clause") - self.execution_options = compiled.statement._execution_options - if connection._execution_options: - self.execution_options = dict(self.execution_options) - self.execution_options.update(connection._execution_options) + self.execution_options = compiled.statement._execution_options.union( + connection._execution_options) - # compiled clauseelement. process bind params, process table defaults, - # track collections used by ResultProxy to target and process results - - self.result_map = compiled.result_map + self.result_column_struct = ( + compiled._result_columns, compiled._ordered_columns) self.unicode_statement = util.text_type(compiled) if not dialect.supports_unicode_statements: @@ -537,11 +544,7 @@ class DefaultExecutionContext(interfaces.ExecutionContext): self.isinsert = compiled.isinsert self.isupdate = compiled.isupdate self.isdelete = compiled.isdelete - - if self.isinsert or self.isupdate or self.isdelete: - self._is_explicit_returning = bool(compiled.statement._returning) - self._is_implicit_returning = bool( - compiled.returning and not compiled.statement._returning) + self.is_text = compiled.isplaintext if not parameters: self.compiled_parameters = [compiled.construct_params()] @@ -553,11 +556,19 @@ class DefaultExecutionContext(interfaces.ExecutionContext): self.executemany = len(parameters) > 1 self.cursor = self.create_cursor() - if self.isinsert or self.isupdate: - self.postfetch_cols = self.compiled.postfetch - self.prefetch_cols = self.compiled.prefetch - self.returning_cols = self.compiled.returning - self.__process_defaults() + + if self.isinsert or self.isupdate or self.isdelete: + self.is_crud = True + self._is_explicit_returning = bool(compiled.statement._returning) + self._is_implicit_returning = bool( + compiled.returning and not compiled.statement._returning) + + if not self.isdelete: + if self.compiled.prefetch: + if self.executemany: + self._process_executemany_defaults() + else: + self._process_executesingle_defaults() processors = compiled._bind_processors @@ -577,21 +588,28 @@ class DefaultExecutionContext(interfaces.ExecutionContext): else: encode = not dialect.supports_unicode_statements for compiled_params in self.compiled_parameters: - param = {} + if encode: - for key in compiled_params: - if key in processors: - param[dialect._encoder(key)[0]] = \ - processors[key](compiled_params[key]) - else: - param[dialect._encoder(key)[0]] = \ - compiled_params[key] + param = dict( + ( + dialect._encoder(key)[0], + processors[key](compiled_params[key]) + if key in processors + else compiled_params[key] + ) + for key in compiled_params + ) else: - for key in compiled_params: - if key in processors: - param[key] = processors[key](compiled_params[key]) - else: - param[key] = compiled_params[key] + param = dict( + ( + key, + processors[key](compiled_params[key]) + if key in processors + else compiled_params[key] + ) + for key in compiled_params + ) + parameters.append(param) self.parameters = dialect.execute_sequence_format(parameters) @@ -603,10 +621,10 @@ class DefaultExecutionContext(interfaces.ExecutionContext): """Initialize execution context for a string SQL statement.""" self = cls.__new__(cls) - self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection - self.engine = connection.engine + self.dialect = connection.dialect + self.is_text = True # plain text statement self.execution_options = connection._execution_options @@ -647,21 +665,32 @@ class DefaultExecutionContext(interfaces.ExecutionContext): """Initialize execution context for a ColumnDefault construct.""" self = cls.__new__(cls) - self.dialect = dialect self.root_connection = connection self._dbapi_connection = dbapi_connection - self.engine = connection.engine + self.dialect = connection.dialect self.execution_options = connection._execution_options self.cursor = self.create_cursor() return self @util.memoized_property - def no_parameters(self): - return self.execution_options.get("no_parameters", False) + def engine(self): + return self.root_connection.engine @util.memoized_property - def is_crud(self): - return self.isinsert or self.isupdate or self.isdelete + def postfetch_cols(self): + return self.compiled.postfetch + + @util.memoized_property + def prefetch_cols(self): + return self.compiled.prefetch + + @util.memoized_property + def returning_cols(self): + self.compiled.returning + + @util.memoized_property + def no_parameters(self): + return self.execution_options.get("no_parameters", False) @util.memoized_property def should_autocommit(self): @@ -778,16 +807,51 @@ class DefaultExecutionContext(interfaces.ExecutionContext): def supports_sane_multi_rowcount(self): return self.dialect.supports_sane_multi_rowcount - def post_insert(self): - if not self._is_implicit_returning and \ - not self._is_explicit_returning and \ - not self.compiled.inline and \ - self.dialect.postfetch_lastrowid and \ - (not self.inserted_primary_key or - None in self.inserted_primary_key): + def _setup_crud_result_proxy(self): + if self.isinsert and \ + not self.executemany: + if not self._is_implicit_returning and \ + not self.compiled.inline and \ + self.dialect.postfetch_lastrowid: - table = self.compiled.statement.table - lastrowid = self.get_lastrowid() + self._setup_ins_pk_from_lastrowid() + + elif not self._is_implicit_returning: + self._setup_ins_pk_from_empty() + + result = self.get_result_proxy() + + if self.isinsert: + if self._is_implicit_returning: + row = result.fetchone() + self.returned_defaults = row + self._setup_ins_pk_from_implicit_returning(row) + result._soft_close(_autoclose_connection=False) + result._metadata = None + elif not self._is_explicit_returning: + result._soft_close(_autoclose_connection=False) + result._metadata = None + elif self.isupdate and self._is_implicit_returning: + row = result.fetchone() + self.returned_defaults = row + result._soft_close(_autoclose_connection=False) + result._metadata = None + + elif result._metadata is None: + # no results, get rowcount + # (which requires open cursor on some drivers + # such as kintersbasdb, mxodbc) + result.rowcount + result._soft_close(_autoclose_connection=False) + return result + + def _setup_ins_pk_from_lastrowid(self): + key_getter = self.compiled._key_getters_for_crud_column[2] + table = self.compiled.statement.table + compiled_params = self.compiled_parameters[0] + + lastrowid = self.get_lastrowid() + if lastrowid is not None: autoinc_col = table._autoincrement_column if autoinc_col is not None: # apply type post processors to the lastrowid @@ -795,35 +859,44 @@ class DefaultExecutionContext(interfaces.ExecutionContext): self.dialect, None) if proc is not None: lastrowid = proc(lastrowid) - self.inserted_primary_key = [ - lastrowid if c is autoinc_col else v - for c, v in zip( - table.primary_key, - self.inserted_primary_key) + lastrowid if c is autoinc_col else + compiled_params.get(key_getter(c), None) + for c in table.primary_key + ] + else: + # don't have a usable lastrowid, so + # do the same as _setup_ins_pk_from_empty + self.inserted_primary_key = [ + compiled_params.get(key_getter(c), None) + for c in table.primary_key ] - def _fetch_implicit_returning(self, resultproxy): + def _setup_ins_pk_from_empty(self): + key_getter = self.compiled._key_getters_for_crud_column[2] table = self.compiled.statement.table - row = resultproxy.fetchone() + compiled_params = self.compiled_parameters[0] + self.inserted_primary_key = [ + compiled_params.get(key_getter(c), None) + for c in table.primary_key + ] - ipk = [] - for c, v in zip(table.primary_key, self.inserted_primary_key): - if v is not None: - ipk.append(v) - else: - ipk.append(row[c]) + def _setup_ins_pk_from_implicit_returning(self, row): + key_getter = self.compiled._key_getters_for_crud_column[2] + table = self.compiled.statement.table + compiled_params = self.compiled_parameters[0] - self.inserted_primary_key = ipk - self.returned_defaults = row - - def _fetch_implicit_update_returning(self, resultproxy): - row = resultproxy.fetchone() - self.returned_defaults = row + self.inserted_primary_key = [ + row[col] if value is None else value + for col, value in [ + (col, compiled_params.get(key_getter(col), None)) + for col in table.primary_key + ] + ] def lastrow_has_defaults(self): return (self.isinsert or self.isupdate) and \ - bool(self.postfetch_cols) + bool(self.compiled.postfetch) def set_input_sizes(self, translate=None, exclude_types=None): """Given a cursor and ClauseParameters, call the appropriate @@ -901,58 +974,53 @@ class DefaultExecutionContext(interfaces.ExecutionContext): else: return self._exec_default(column.onupdate, column.type) - def __process_defaults(self): - """Generate default values for compiled insert/update statements, - and generate inserted_primary_key collection. - """ - + def _process_executemany_defaults(self): key_getter = self.compiled._key_getters_for_crud_column[2] - if self.executemany: - if len(self.compiled.prefetch): - scalar_defaults = {} + prefetch = self.compiled.prefetch + scalar_defaults = {} - # pre-determine scalar Python-side defaults - # to avoid many calls of get_insert_default()/ - # get_update_default() - for c in self.prefetch_cols: - if self.isinsert and c.default and c.default.is_scalar: - scalar_defaults[c] = c.default.arg - elif self.isupdate and c.onupdate and c.onupdate.is_scalar: - scalar_defaults[c] = c.onupdate.arg + # pre-determine scalar Python-side defaults + # to avoid many calls of get_insert_default()/ + # get_update_default() + for c in prefetch: + if self.isinsert and c.default and c.default.is_scalar: + scalar_defaults[c] = c.default.arg + elif self.isupdate and c.onupdate and c.onupdate.is_scalar: + scalar_defaults[c] = c.onupdate.arg - for param in self.compiled_parameters: - self.current_parameters = param - for c in self.prefetch_cols: - if c in scalar_defaults: - val = scalar_defaults[c] - elif self.isinsert: - val = self.get_insert_default(c) - else: - val = self.get_update_default(c) - if val is not None: - param[key_getter(c)] = val - del self.current_parameters - else: - self.current_parameters = compiled_parameters = \ - self.compiled_parameters[0] - - for c in self.compiled.prefetch: - if self.isinsert: + for param in self.compiled_parameters: + self.current_parameters = param + for c in prefetch: + if c in scalar_defaults: + val = scalar_defaults[c] + elif self.isinsert: val = self.get_insert_default(c) else: val = self.get_update_default(c) - if val is not None: - compiled_parameters[key_getter(c)] = val - del self.current_parameters + param[key_getter(c)] = val + del self.current_parameters + def _process_executesingle_defaults(self): + key_getter = self.compiled._key_getters_for_crud_column[2] + prefetch = self.compiled.prefetch + self.current_parameters = compiled_parameters = \ + self.compiled_parameters[0] + + for c in prefetch: if self.isinsert: - self.inserted_primary_key = [ - self.compiled_parameters[0].get(key_getter(c), None) - for c in self.compiled. - statement.table.primary_key - ] + if c.default and \ + not c.default.is_sequence and c.default.is_scalar: + val = c.default.arg + else: + val = self.get_insert_default(c) + else: + val = self.get_update_default(c) + + if val is not None: + compiled_parameters[key_getter(c)] = val + del self.current_parameters DefaultDialect.execution_ctx_cls = DefaultExecutionContext diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/interfaces.py b/lib/python3.5/site-packages/sqlalchemy/engine/interfaces.py index 71df29c..1948342 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/interfaces.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/interfaces.py @@ -1,5 +1,5 @@ # engine/interfaces.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -150,6 +150,16 @@ class Dialect(object): This will prevent types.Boolean from generating a CHECK constraint when that type is used. + dbapi_exception_translation_map + A dictionary of names that will contain as values the names of + pep-249 exceptions ("IntegrityError", "OperationalError", etc) + keyed to alternate class names, to support the case where a + DBAPI has exception classes that aren't named as they are + referred to (e.g. IntegrityError = MyException). In the vast + majority of cases this dictionary is empty. + + .. versionadded:: 1.0.5 + """ _has_events = False @@ -242,7 +252,9 @@ class Dialect(object): sequence a dictionary of the form - {'name' : str, 'start' :int, 'increment': int} + {'name' : str, 'start' :int, 'increment': int, 'minvalue': int, + 'maxvalue': int, 'nominvalue': bool, 'nomaxvalue': bool, + 'cycle': bool} Additional column attributes may be present. """ @@ -308,7 +320,15 @@ class Dialect(object): def get_table_names(self, connection, schema=None, **kw): """Return a list of table names for `schema`.""" - raise NotImplementedError + raise NotImplementedError() + + def get_temp_table_names(self, connection, schema=None, **kw): + """Return a list of temporary table names on the given connection, + if supported by the underlying backend. + + """ + + raise NotImplementedError() def get_view_names(self, connection, schema=None, **kw): """Return a list of all view names available in the database. @@ -319,6 +339,14 @@ class Dialect(object): raise NotImplementedError() + def get_temp_view_names(self, connection, schema=None, **kw): + """Return a list of temporary view names on the given connection, + if supported by the underlying backend. + + """ + + raise NotImplementedError() + def get_view_definition(self, connection, view_name, schema=None, **kw): """Return view definition. @@ -638,20 +666,120 @@ class Dialect(object): return None def reset_isolation_level(self, dbapi_conn): - """Given a DBAPI connection, revert its isolation to the default.""" + """Given a DBAPI connection, revert its isolation to the default. + + Note that this is a dialect-level method which is used as part + of the implementation of the :class:`.Connection` and + :class:`.Engine` + isolation level facilities; these APIs should be preferred for + most typical use cases. + + .. seealso:: + + :meth:`.Connection.get_isolation_level` - view current level + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.Connection.execution_options.isolation_level` - + set per :class:`.Connection` isolation level + + :paramref:`.create_engine.isolation_level` - + set per :class:`.Engine` isolation level + + """ raise NotImplementedError() def set_isolation_level(self, dbapi_conn, level): - """Given a DBAPI connection, set its isolation level.""" + """Given a DBAPI connection, set its isolation level. + + Note that this is a dialect-level method which is used as part + of the implementation of the :class:`.Connection` and + :class:`.Engine` + isolation level facilities; these APIs should be preferred for + most typical use cases. + + .. seealso:: + + :meth:`.Connection.get_isolation_level` - view current level + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.Connection.execution_options.isolation_level` - + set per :class:`.Connection` isolation level + + :paramref:`.create_engine.isolation_level` - + set per :class:`.Engine` isolation level + + """ raise NotImplementedError() def get_isolation_level(self, dbapi_conn): - """Given a DBAPI connection, return its isolation level.""" + """Given a DBAPI connection, return its isolation level. + + When working with a :class:`.Connection` object, the corresponding + DBAPI connection may be procured using the + :attr:`.Connection.connection` accessor. + + Note that this is a dialect-level method which is used as part + of the implementation of the :class:`.Connection` and + :class:`.Engine` isolation level facilities; + these APIs should be preferred for most typical use cases. + + + .. seealso:: + + :meth:`.Connection.get_isolation_level` - view current level + + :attr:`.Connection.default_isolation_level` - view default level + + :paramref:`.Connection.execution_options.isolation_level` - + set per :class:`.Connection` isolation level + + :paramref:`.create_engine.isolation_level` - + set per :class:`.Engine` isolation level + + + """ raise NotImplementedError() + @classmethod + def get_dialect_cls(cls, url): + """Given a URL, return the :class:`.Dialect` that will be used. + + This is a hook that allows an external plugin to provide functionality + around an existing dialect, by allowing the plugin to be loaded + from the url based on an entrypoint, and then the plugin returns + the actual dialect to be used. + + By default this just returns the cls. + + .. versionadded:: 1.0.3 + + """ + return cls + + @classmethod + def engine_created(cls, engine): + """A convenience hook called before returning the final :class:`.Engine`. + + If the dialect returned a different class from the + :meth:`.get_dialect_cls` + method, then the hook is called on both classes, first on + the dialect class returned by the :meth:`.get_dialect_cls` method and + then on the class on which the method was called. + + The hook should be used by dialects and/or wrappers to apply special + events to the engine or its components. In particular, it allows + a dialect-wrapping class to apply dialect-level events. + + .. versionadded:: 1.0.3 + + """ + pass + class ExecutionContext(object): """A messenger object for a Dialect that corresponds to a single @@ -901,7 +1029,23 @@ class ExceptionContext(object): connection = None """The :class:`.Connection` in use during the exception. - This member is always present. + This member is present, except in the case of a failure when + first connecting. + + .. seealso:: + + :attr:`.ExceptionContext.engine` + + + """ + + engine = None + """The :class:`.Engine` in use during the exception. + + This member should always be present, even in the case of a failure + when first connecting. + + .. versionadded:: 1.0.0 """ @@ -988,3 +1132,21 @@ class ExceptionContext(object): changing this flag. """ + + invalidate_pool_on_disconnect = True + """Represent whether all connections in the pool should be invalidated + when a "disconnect" condition is in effect. + + Setting this flag to False within the scope of the + :meth:`.ConnectionEvents.handle_error` event will have the effect such + that the full collection of connections in the pool will not be + invalidated during a disconnect; only the current connection that is the + subject of the error will actually be invalidated. + + The purpose of this flag is for custom disconnect-handling schemes where + the invalidation of other connections in the pool is to be performed + based on other conditions, or even on a per-connection basis. + + .. versionadded:: 1.0.3 + + """ diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/reflection.py b/lib/python3.5/site-packages/sqlalchemy/engine/reflection.py index 012d1d3..98fcfa0 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/reflection.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/reflection.py @@ -1,5 +1,5 @@ # engine/reflection.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -173,7 +173,14 @@ class Inspector(object): passed as ``None``. For special quoting, use :class:`.quoted_name`. :param order_by: Optional, may be the string "foreign_key" to sort - the result on foreign key dependencies. + the result on foreign key dependencies. Does not automatically + resolve cycles, and will raise :class:`.CircularDependencyError` + if cycles exist. + + .. deprecated:: 1.0.0 - see + :meth:`.Inspector.get_sorted_table_and_fkc_names` for a version + of this which resolves foreign key cycles between tables + automatically. .. versionchanged:: 0.8 the "foreign_key" sorting sorts tables in order of dependee to dependent; that is, in creation @@ -183,6 +190,8 @@ class Inspector(object): .. seealso:: + :meth:`.Inspector.get_sorted_table_and_fkc_names` + :attr:`.MetaData.sorted_tables` """ @@ -201,6 +210,88 @@ class Inspector(object): tnames = list(topological.sort(tuples, tnames)) return tnames + def get_sorted_table_and_fkc_names(self, schema=None): + """Return dependency-sorted table and foreign key constraint names in + referred to within a particular schema. + + This will yield 2-tuples of + ``(tablename, [(tname, fkname), (tname, fkname), ...])`` + consisting of table names in CREATE order grouped with the foreign key + constraint names that are not detected as belonging to a cycle. + The final element + will be ``(None, [(tname, fkname), (tname, fkname), ..])`` + which will consist of remaining + foreign key constraint names that would require a separate CREATE + step after-the-fact, based on dependencies between tables. + + .. versionadded:: 1.0.- + + .. seealso:: + + :meth:`.Inspector.get_table_names` + + :func:`.sort_tables_and_constraints` - similar method which works + with an already-given :class:`.MetaData`. + + """ + if hasattr(self.dialect, 'get_table_names'): + tnames = self.dialect.get_table_names( + self.bind, schema, info_cache=self.info_cache) + else: + tnames = self.engine.table_names(schema) + + tuples = set() + remaining_fkcs = set() + + fknames_for_table = {} + for tname in tnames: + fkeys = self.get_foreign_keys(tname, schema) + fknames_for_table[tname] = set( + [fk['name'] for fk in fkeys] + ) + for fkey in fkeys: + if tname != fkey['referred_table']: + tuples.add((fkey['referred_table'], tname)) + try: + candidate_sort = list(topological.sort(tuples, tnames)) + except exc.CircularDependencyError as err: + for edge in err.edges: + tuples.remove(edge) + remaining_fkcs.update( + (edge[1], fkc) + for fkc in fknames_for_table[edge[1]] + ) + + candidate_sort = list(topological.sort(tuples, tnames)) + return [ + (tname, fknames_for_table[tname].difference(remaining_fkcs)) + for tname in candidate_sort + ] + [(None, list(remaining_fkcs))] + + def get_temp_table_names(self): + """return a list of temporary table names for the current bind. + + This method is unsupported by most dialects; currently + only SQLite implements it. + + .. versionadded:: 1.0.0 + + """ + return self.dialect.get_temp_table_names( + self.bind, info_cache=self.info_cache) + + def get_temp_view_names(self): + """return a list of temporary view names for the current bind. + + This method is unsupported by most dialects; currently + only SQLite implements it. + + .. versionadded:: 1.0.0 + + """ + return self.dialect.get_temp_view_names( + self.bind, info_cache=self.info_cache) + def get_table_options(self, table_name, schema=None, **kw): """Return a dictionary of options specified when the table of the given name was created. @@ -370,6 +461,12 @@ class Inspector(object): unique boolean + dialect_options + dict of dialect-specific index options. May not be present + for all dialects. + + .. versionadded:: 1.0.0 + :param table_name: string name of the table. For special quoting, use :class:`.quoted_name`. @@ -465,55 +562,87 @@ class Inspector(object): for col_d in self.get_columns( table_name, schema, **table.dialect_kwargs): found_table = True - orig_name = col_d['name'] - table.dispatch.column_reflect(self, table, col_d) - - name = col_d['name'] - if include_columns and name not in include_columns: - continue - if exclude_columns and name in exclude_columns: - continue - - coltype = col_d['type'] - - col_kw = dict( - (k, col_d[k]) - for k in ['nullable', 'autoincrement', 'quote', 'info', 'key'] - if k in col_d - ) - - colargs = [] - if col_d.get('default') is not None: - # the "default" value is assumed to be a literal SQL - # expression, so is wrapped in text() so that no quoting - # occurs on re-issuance. - colargs.append( - sa_schema.DefaultClause( - sql.text(col_d['default']), _reflected=True - ) - ) - - if 'sequence' in col_d: - # TODO: mssql and sybase are using this. - seq = col_d['sequence'] - sequence = sa_schema.Sequence(seq['name'], 1, 1) - if 'start' in seq: - sequence.start = seq['start'] - if 'increment' in seq: - sequence.increment = seq['increment'] - colargs.append(sequence) - - cols_by_orig_name[orig_name] = col = \ - sa_schema.Column(name, coltype, *colargs, **col_kw) - - if col.key in table.primary_key: - col.primary_key = True - table.append_column(col) + self._reflect_column( + table, col_d, include_columns, + exclude_columns, cols_by_orig_name) if not found_table: raise exc.NoSuchTableError(table.name) + self._reflect_pk( + table_name, schema, table, cols_by_orig_name, exclude_columns) + + self._reflect_fk( + table_name, schema, table, cols_by_orig_name, + exclude_columns, reflection_options) + + self._reflect_indexes( + table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options) + + self._reflect_unique_constraints( + table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options) + + def _reflect_column( + self, table, col_d, include_columns, + exclude_columns, cols_by_orig_name): + + orig_name = col_d['name'] + + table.dispatch.column_reflect(self, table, col_d) + + # fetch name again as column_reflect is allowed to + # change it + name = col_d['name'] + if (include_columns and name not in include_columns) \ + or (exclude_columns and name in exclude_columns): + return + + coltype = col_d['type'] + + col_kw = dict( + (k, col_d[k]) + for k in ['nullable', 'autoincrement', 'quote', 'info', 'key'] + if k in col_d + ) + + colargs = [] + if col_d.get('default') is not None: + # the "default" value is assumed to be a literal SQL + # expression, so is wrapped in text() so that no quoting + # occurs on re-issuance. + colargs.append( + sa_schema.DefaultClause( + sql.text(col_d['default']), _reflected=True + ) + ) + + if 'sequence' in col_d: + self._reflect_col_sequence(col_d, colargs) + + cols_by_orig_name[orig_name] = col = \ + sa_schema.Column(name, coltype, *colargs, **col_kw) + + if col.key in table.primary_key: + col.primary_key = True + table.append_column(col) + + def _reflect_col_sequence(self, col_d, colargs): + if 'sequence' in col_d: + # TODO: mssql and sybase are using this. + seq = col_d['sequence'] + sequence = sa_schema.Sequence(seq['name'], 1, 1) + if 'start' in seq: + sequence.start = seq['start'] + if 'increment' in seq: + sequence.increment = seq['increment'] + colargs.append(sequence) + + def _reflect_pk( + self, table_name, schema, table, + cols_by_orig_name, exclude_columns): pk_cons = self.get_pk_constraint( table_name, schema, **table.dialect_kwargs) if pk_cons: @@ -530,6 +659,9 @@ class Inspector(object): # its column collection table.primary_key._reload(pk_cols) + def _reflect_fk( + self, table_name, schema, table, cols_by_orig_name, + exclude_columns, reflection_options): fkeys = self.get_foreign_keys( table_name, schema, **table.dialect_kwargs) for fkey_d in fkeys: @@ -572,24 +704,85 @@ class Inspector(object): sa_schema.ForeignKeyConstraint(constrained_columns, refspec, conname, link_to_name=True, **options)) + + def _reflect_indexes( + self, table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options): # Indexes indexes = self.get_indexes(table_name, schema) for index_d in indexes: name = index_d['name'] columns = index_d['column_names'] unique = index_d['unique'] - flavor = index_d.get('type', 'unknown type') + flavor = index_d.get('type', 'index') + dialect_options = index_d.get('dialect_options', {}) + + duplicates = index_d.get('duplicates_constraint') if include_columns and \ not set(columns).issubset(include_columns): util.warn( - "Omitting %s KEY for (%s), key covers omitted columns." % + "Omitting %s key for (%s), key covers omitted columns." % (flavor, ', '.join(columns))) continue + if duplicates: + continue # look for columns by orig name in cols_by_orig_name, # but support columns that are in-Python only as fallback - sa_schema.Index(name, *[ - cols_by_orig_name[c] if c in cols_by_orig_name - else table.c[c] - for c in columns - ], - **dict(unique=unique)) + idx_cols = [] + for c in columns: + try: + idx_col = cols_by_orig_name[c] \ + if c in cols_by_orig_name else table.c[c] + except KeyError: + util.warn( + "%s key '%s' was not located in " + "columns for table '%s'" % ( + flavor, c, table_name + )) + else: + idx_cols.append(idx_col) + + sa_schema.Index( + name, *idx_cols, + **dict(list(dialect_options.items()) + [('unique', unique)]) + ) + + def _reflect_unique_constraints( + self, table_name, schema, table, cols_by_orig_name, + include_columns, exclude_columns, reflection_options): + + # Unique Constraints + try: + constraints = self.get_unique_constraints(table_name, schema) + except NotImplementedError: + # optional dialect feature + return + + for const_d in constraints: + conname = const_d['name'] + columns = const_d['column_names'] + duplicates = const_d.get('duplicates_index') + if include_columns and \ + not set(columns).issubset(include_columns): + util.warn( + "Omitting unique constraint key for (%s), " + "key covers omitted columns." % + ', '.join(columns)) + continue + if duplicates: + continue + # look for columns by orig name in cols_by_orig_name, + # but support columns that are in-Python only as fallback + constrained_cols = [] + for c in columns: + try: + constrained_col = cols_by_orig_name[c] \ + if c in cols_by_orig_name else table.c[c] + except KeyError: + util.warn( + "unique constraint key '%s' was not located in " + "columns for table '%s'" % (c, table_name)) + else: + constrained_cols.append(constrained_col) + table.append_constraint( + sa_schema.UniqueConstraint(*constrained_cols, name=conname)) diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/result.py b/lib/python3.5/site-packages/sqlalchemy/engine/result.py index e7d67c0..689382f 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/result.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/result.py @@ -1,5 +1,5 @@ # engine/result.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -110,7 +110,7 @@ class RowProxy(BaseRowProxy): __slots__ = () def __contains__(self, key): - return self._parent._has_key(self._row, key) + return self._parent._has_key(key) def __getstate__(self): return { @@ -155,7 +155,7 @@ class RowProxy(BaseRowProxy): def has_key(self, key): """Return True if this RowProxy contains the given key.""" - return self._parent._has_key(self._row, key) + return self._parent._has_key(key) def items(self): """Return a list of tuples, each tuple containing a key/value pair.""" @@ -187,90 +187,165 @@ class ResultMetaData(object): context.""" def __init__(self, parent, metadata): - self._processors = processors = [] - - # We do not strictly need to store the processor in the key mapping, - # though it is faster in the Python version (probably because of the - # saved attribute lookup self._processors) - self._keymap = keymap = {} - self.keys = [] context = parent.context dialect = context.dialect typemap = dialect.dbapi_type_map translate_colname = context._translate_colname - self.case_sensitive = dialect.case_sensitive + self.case_sensitive = case_sensitive = dialect.case_sensitive - # high precedence key values. - primary_keymap = {} + if context.result_column_struct: + result_columns, cols_are_ordered = context.result_column_struct + num_ctx_cols = len(result_columns) + else: + num_ctx_cols = None - for i, rec in enumerate(metadata): - colname = rec[0] - coltype = rec[1] + if num_ctx_cols and \ + cols_are_ordered and \ + num_ctx_cols == len(metadata): + # case 1 - SQL expression statement, number of columns + # in result matches number of cols in compiled. This is the + # vast majority case for SQL expression constructs. In this + # case we don't bother trying to parse or match up to + # the colnames in the result description. + raw = [ + ( + idx, + key, + name.lower() if not case_sensitive else name, + context.get_result_processor( + type_, key, metadata[idx][1] + ), + obj, + None + ) for idx, (key, name, obj, type_) + in enumerate(result_columns) + ] + self.keys = [ + elem[0] for elem in result_columns + ] + else: + # case 2 - raw string, or number of columns in result does + # not match number of cols in compiled. The raw string case + # is very common. The latter can happen + # when text() is used with only a partial typemap, or + # in the extremely unlikely cases where the compiled construct + # has a single element with multiple col expressions in it + # (e.g. has commas embedded) or there's some kind of statement + # that is adding extra columns. + # In all these cases we fall back to the "named" approach + # that SQLAlchemy has used up through 0.9. - if dialect.description_encoding: - colname = dialect._description_decoder(colname) + if num_ctx_cols: + result_map = self._create_result_map( + result_columns, case_sensitive) + raw = [] + self.keys = [] + untranslated = None + for idx, rec in enumerate(metadata): + colname = rec[0] + coltype = rec[1] + + if dialect.description_encoding: + colname = dialect._description_decoder(colname) + + if translate_colname: + colname, untranslated = translate_colname(colname) + + if dialect.requires_name_normalize: + colname = dialect.normalize_name(colname) + + self.keys.append(colname) + if not case_sensitive: + colname = colname.lower() + + if num_ctx_cols: + try: + ctx_rec = result_map[colname] + except KeyError: + mapped_type = typemap.get(coltype, sqltypes.NULLTYPE) + obj = None + else: + obj = ctx_rec[1] + mapped_type = ctx_rec[2] + else: + mapped_type = typemap.get(coltype, sqltypes.NULLTYPE) + obj = None + processor = context.get_result_processor( + mapped_type, colname, coltype) + + raw.append( + (idx, colname, colname, processor, obj, untranslated) + ) + + # keymap indexes by integer index... + self._keymap = dict([ + (elem[0], (elem[3], elem[4], elem[0])) + for elem in raw + ]) + + # processors in key order for certain per-row + # views like __iter__ and slices + self._processors = [elem[3] for elem in raw] + + if num_ctx_cols: + # keymap by primary string... + by_key = dict([ + (elem[2], (elem[3], elem[4], elem[0])) + for elem in raw + ]) + + # if by-primary-string dictionary smaller (or bigger?!) than + # number of columns, assume we have dupes, rewrite + # dupe records with "None" for index which results in + # ambiguous column exception when accessed. + if len(by_key) != num_ctx_cols: + seen = set() + for rec in raw: + key = rec[1] + if key in seen: + by_key[key] = (None, by_key[key][1], None) + seen.add(key) + + # update keymap with secondary "object"-based keys + self._keymap.update([ + (obj_elem, by_key[elem[2]]) + for elem in raw if elem[4] + for obj_elem in elem[4] + ]) + + # update keymap with primary string names taking + # precedence + self._keymap.update(by_key) + else: + self._keymap.update([ + (elem[2], (elem[3], elem[4], elem[0])) + for elem in raw + ]) + # update keymap with "translated" names (sqlite-only thing) if translate_colname: - colname, untranslated = translate_colname(colname) + self._keymap.update([ + (elem[5], self._keymap[elem[2]]) + for elem in raw if elem[5] + ]) - if dialect.requires_name_normalize: - colname = dialect.normalize_name(colname) - - if context.result_map: - try: - name, obj, type_ = context.result_map[ - colname if self.case_sensitive else colname.lower()] - except KeyError: - name, obj, type_ = \ - colname, None, typemap.get(coltype, sqltypes.NULLTYPE) + @classmethod + def _create_result_map(cls, result_columns, case_sensitive=True): + d = {} + for elem in result_columns: + key, rec = elem[0], elem[1:] + if not case_sensitive: + key = key.lower() + if key in d: + # conflicting keyname, just double up the list + # of objects. this will cause an "ambiguous name" + # error if an attempt is made by the result set to + # access. + e_name, e_obj, e_type = d[key] + d[key] = e_name, e_obj + rec[1], e_type else: - name, obj, type_ = \ - colname, None, typemap.get(coltype, sqltypes.NULLTYPE) - - processor = context.get_result_processor(type_, colname, coltype) - - processors.append(processor) - rec = (processor, obj, i) - - # indexes as keys. This is only needed for the Python version of - # RowProxy (the C version uses a faster path for integer indexes). - primary_keymap[i] = rec - - # populate primary keymap, looking for conflicts. - if primary_keymap.setdefault( - name if self.case_sensitive - else name.lower(), - rec) is not rec: - # place a record that doesn't have the "index" - this - # is interpreted later as an AmbiguousColumnError, - # but only when actually accessed. Columns - # colliding by name is not a problem if those names - # aren't used; integer access is always - # unambiguous. - primary_keymap[name - if self.case_sensitive - else name.lower()] = rec = (None, obj, None) - - self.keys.append(colname) - if obj: - for o in obj: - keymap[o] = rec - # technically we should be doing this but we - # are saving on callcounts by not doing so. - # if keymap.setdefault(o, rec) is not rec: - # keymap[o] = (None, obj, None) - - if translate_colname and \ - untranslated: - keymap[untranslated] = rec - - # overwrite keymap values with those of the - # high precedence keymap. - keymap.update(primary_keymap) - - if parent._echo: - context.engine.logger.debug( - "Col %r", tuple(x[0] for x in metadata)) + d[key] = rec + return d @util.pending_deprecation("0.8", "sqlite dialect uses " "_translate_colname() now") @@ -335,12 +410,28 @@ class ResultMetaData(object): map[key] = result return result - def _has_key(self, row, key): + def _has_key(self, key): if key in self._keymap: return True else: return self._key_fallback(key, False) is not None + def _getter(self, key): + if key in self._keymap: + processor, obj, index = self._keymap[key] + else: + ret = self._key_fallback(key, False) + if ret is None: + return None + processor, obj, index = ret + + if index is None: + raise exc.InvalidRequestError( + "Ambiguous column name '%s' in result set! " + "try 'use_labels' option on select statement." % key) + + return operator.itemgetter(index) + def __getstate__(self): return { '_pickled_keymap': dict( @@ -391,21 +482,49 @@ class ResultProxy(object): out_parameters = None _can_close_connection = False _metadata = None + _soft_closed = False + closed = False def __init__(self, context): self.context = context self.dialect = context.dialect - self.closed = False self.cursor = self._saved_cursor = context.cursor self.connection = context.root_connection self._echo = self.connection._echo and \ context.engine._should_log_debug() self._init_metadata() + def _getter(self, key): + try: + getter = self._metadata._getter + except AttributeError: + return self._non_result(None) + else: + return getter(key) + + def _has_key(self, key): + try: + has_key = self._metadata._has_key + except AttributeError: + return self._non_result(None) + else: + return has_key(key) + def _init_metadata(self): metadata = self._cursor_description() if metadata is not None: - self._metadata = ResultMetaData(self, metadata) + if self.context.compiled and \ + 'compiled_cache' in self.context.execution_options: + if self.context.compiled._cached_metadata: + self._metadata = self.context.compiled._cached_metadata + else: + self._metadata = self.context.compiled._cached_metadata = \ + ResultMetaData(self, metadata) + else: + self._metadata = ResultMetaData(self, metadata) + if self._echo: + self.context.engine.logger.debug( + "Col %r", tuple(x[0] for x in metadata)) def keys(self): """Return the current set of string keys for rows.""" @@ -515,39 +634,85 @@ class ResultProxy(object): return self._saved_cursor.description - def close(self, _autoclose_connection=True): - """Close this ResultProxy. + def _soft_close(self, _autoclose_connection=True): + """Soft close this :class:`.ResultProxy`. - Closes the underlying DBAPI cursor corresponding to the execution. - - Note that any data cached within this ResultProxy is still available. - For some types of results, this may include buffered rows. - - If this ResultProxy was generated from an implicit execution, - the underlying Connection will also be closed (returns the - underlying DBAPI connection to the connection pool.) + This releases all DBAPI cursor resources, but leaves the + ResultProxy "open" from a semantic perspective, meaning the + fetchXXX() methods will continue to return empty results. This method is called automatically when: * all result rows are exhausted using the fetchXXX() methods. * cursor.description is None. + This method is **not public**, but is documented in order to clarify + the "autoclose" process used. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.ResultProxy.close` + + + """ + if self._soft_closed: + return + self._soft_closed = True + cursor = self.cursor + self.connection._safe_close_cursor(cursor) + if _autoclose_connection and \ + self.connection.should_close_with_result: + self.connection.close() + self.cursor = None + + def close(self): + """Close this ResultProxy. + + This closes out the underlying DBAPI cursor corresonding + to the statement execution, if one is stil present. Note that the + DBAPI cursor is automatically released when the :class:`.ResultProxy` + exhausts all available rows. :meth:`.ResultProxy.close` is generally + an optional method except in the case when discarding a + :class:`.ResultProxy` that still has additional rows pending for fetch. + + In the case of a result that is the product of + :ref:`connectionless execution `, + the underyling :class:`.Connection` object is also closed, which + :term:`releases` DBAPI connection resources. + + After this method is called, it is no longer valid to call upon + the fetch methods, which will raise a :class:`.ResourceClosedError` + on subsequent use. + + .. versionchanged:: 1.0.0 - the :meth:`.ResultProxy.close` method + has been separated out from the process that releases the underlying + DBAPI cursor resource. The "auto close" feature of the + :class:`.Connection` now performs a so-called "soft close", which + releases the underlying DBAPI cursor, but allows the + :class:`.ResultProxy` to still behave as an open-but-exhausted + result set; the actual :meth:`.ResultProxy.close` method is never + called. It is still safe to discard a :class:`.ResultProxy` + that has been fully exhausted without calling this method. + + .. seealso:: + + :ref:`connections_toplevel` + + :meth:`.ResultProxy._soft_close` + """ if not self.closed: + self._soft_close() self.closed = True - self.connection._safe_close_cursor(self.cursor) - if _autoclose_connection and \ - self.connection.should_close_with_result: - self.connection.close() - # allow consistent errors - self.cursor = None def __iter__(self): while True: row = self.fetchone() if row is None: - raise StopIteration + return else: yield row @@ -732,7 +897,7 @@ class ResultProxy(object): try: return self.cursor.fetchone() except AttributeError: - self._non_result() + return self._non_result(None) def _fetchmany_impl(self, size=None): try: @@ -741,22 +906,24 @@ class ResultProxy(object): else: return self.cursor.fetchmany(size) except AttributeError: - self._non_result() + return self._non_result([]) def _fetchall_impl(self): try: return self.cursor.fetchall() except AttributeError: - self._non_result() + return self._non_result([]) - def _non_result(self): + def _non_result(self, default): if self._metadata is None: raise exc.ResourceClosedError( "This result object does not return rows. " "It has been closed automatically.", ) - else: + elif self.closed: raise exc.ResourceClosedError("This result object is closed.") + else: + return default def process_rows(self, rows): process_row = self._process_row @@ -775,11 +942,25 @@ class ResultProxy(object): for row in rows] def fetchall(self): - """Fetch all rows, just like DB-API ``cursor.fetchall()``.""" + """Fetch all rows, just like DB-API ``cursor.fetchall()``. + + After all rows have been exhausted, the underlying DBAPI + cursor resource is released, and the object may be safely + discarded. + + Subsequent calls to :meth:`.ResultProxy.fetchall` will return + an empty list. After the :meth:`.ResultProxy.close` method is + called, the method will raise :class:`.ResourceClosedError`. + + .. versionchanged:: 1.0.0 - Added "soft close" behavior which + allows the result to be used in an "exhausted" state prior to + calling the :meth:`.ResultProxy.close` method. + + """ try: l = self.process_rows(self._fetchall_impl()) - self.close() + self._soft_close() return l except Exception as e: self.connection._handle_dbapi_exception( @@ -790,15 +971,25 @@ class ResultProxy(object): """Fetch many rows, just like DB-API ``cursor.fetchmany(size=cursor.arraysize)``. - If rows are present, the cursor remains open after this is called. - Else the cursor is automatically closed and an empty list is returned. + After all rows have been exhausted, the underlying DBAPI + cursor resource is released, and the object may be safely + discarded. + + Calls to :meth:`.ResultProxy.fetchmany` after all rows have been + exhuasted will return + an empty list. After the :meth:`.ResultProxy.close` method is + called, the method will raise :class:`.ResourceClosedError`. + + .. versionchanged:: 1.0.0 - Added "soft close" behavior which + allows the result to be used in an "exhausted" state prior to + calling the :meth:`.ResultProxy.close` method. """ try: l = self.process_rows(self._fetchmany_impl(size)) if len(l) == 0: - self.close() + self._soft_close() return l except Exception as e: self.connection._handle_dbapi_exception( @@ -808,8 +999,18 @@ class ResultProxy(object): def fetchone(self): """Fetch one row, just like DB-API ``cursor.fetchone()``. - If a row is present, the cursor remains open after this is called. - Else the cursor is automatically closed and None is returned. + After all rows have been exhausted, the underlying DBAPI + cursor resource is released, and the object may be safely + discarded. + + Calls to :meth:`.ResultProxy.fetchone` after all rows have + been exhausted will return ``None``. + After the :meth:`.ResultProxy.close` method is + called, the method will raise :class:`.ResourceClosedError`. + + .. versionchanged:: 1.0.0 - Added "soft close" behavior which + allows the result to be used in an "exhausted" state prior to + calling the :meth:`.ResultProxy.close` method. """ try: @@ -817,7 +1018,7 @@ class ResultProxy(object): if row is not None: return self.process_rows([row])[0] else: - self.close() + self._soft_close() return None except Exception as e: self.connection._handle_dbapi_exception( @@ -829,9 +1030,12 @@ class ResultProxy(object): Returns None if no row is present. + After calling this method, the object is fully closed, + e.g. the :meth:`.ResultProxy.close` method will have been called. + """ if self._metadata is None: - self._non_result() + return self._non_result(None) try: row = self._fetchone_impl() @@ -853,6 +1057,9 @@ class ResultProxy(object): Returns None if no row is present. + After calling this method, the object is fully closed, + e.g. the :meth:`.ResultProxy.close` method will have been called. + """ row = self.first() if row is not None: @@ -873,10 +1080,27 @@ class BufferedRowResultProxy(ResultProxy): The pre-fetching behavior fetches only one row initially, and then grows its buffer size by a fixed amount with each successive need - for additional rows up to a size of 100. + for additional rows up to a size of 1000. + + The size argument is configurable using the ``max_row_buffer`` + execution option:: + + with psycopg2_engine.connect() as conn: + + result = conn.execution_options( + stream_results=True, max_row_buffer=50 + ).execute("select * from table") + + .. versionadded:: 1.0.6 Added the ``max_row_buffer`` option. + + .. seealso:: + + :ref:`psycopg2_execution_options` """ def _init_metadata(self): + self._max_row_buffer = self.context.execution_options.get( + 'max_row_buffer', None) self.__buffer_rows() super(BufferedRowResultProxy, self)._init_metadata() @@ -896,13 +1120,21 @@ class BufferedRowResultProxy(ResultProxy): } def __buffer_rows(self): + if self.cursor is None: + return size = getattr(self, '_bufsize', 1) self.__rowbuffer = collections.deque(self.cursor.fetchmany(size)) self._bufsize = self.size_growth.get(size, size) + if self._max_row_buffer is not None: + self._bufsize = min(self._max_row_buffer, self._bufsize) + + def _soft_close(self, **kw): + self.__rowbuffer.clear() + super(BufferedRowResultProxy, self)._soft_close(**kw) def _fetchone_impl(self): - if self.closed: - return None + if self.cursor is None: + return self._non_result(None) if not self.__rowbuffer: self.__buffer_rows() if not self.__rowbuffer: @@ -921,6 +1153,8 @@ class BufferedRowResultProxy(ResultProxy): return result def _fetchall_impl(self): + if self.cursor is None: + return self._non_result([]) self.__rowbuffer.extend(self.cursor.fetchall()) ret = self.__rowbuffer self.__rowbuffer = collections.deque() @@ -943,11 +1177,15 @@ class FullyBufferedResultProxy(ResultProxy): def _buffer_rows(self): return collections.deque(self.cursor.fetchall()) + def _soft_close(self, **kw): + self.__rowbuffer.clear() + super(FullyBufferedResultProxy, self)._soft_close(**kw) + def _fetchone_impl(self): if self.__rowbuffer: return self.__rowbuffer.popleft() else: - return None + return self._non_result(None) def _fetchmany_impl(self, size=None): if size is None: @@ -961,6 +1199,8 @@ class FullyBufferedResultProxy(ResultProxy): return result def _fetchall_impl(self): + if not self.cursor: + return self._non_result([]) ret = self.__rowbuffer self.__rowbuffer = collections.deque() return ret diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/strategies.py b/lib/python3.5/site-packages/sqlalchemy/engine/strategies.py index 38206be..2a018f8 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/strategies.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/strategies.py @@ -1,5 +1,5 @@ # engine/strategies.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -48,7 +48,8 @@ class DefaultEngineStrategy(EngineStrategy): # create url.URL object u = url.make_url(name_or_url) - dialect_cls = u.get_dialect() + entrypoint = u._get_entrypoint() + dialect_cls = entrypoint.get_dialect_cls(u) if kwargs.pop('_coerce_config', False): def pop_kwarg(key, default=None): @@ -81,21 +82,19 @@ class DefaultEngineStrategy(EngineStrategy): # assemble connection arguments (cargs, cparams) = dialect.create_connect_args(u) cparams.update(pop_kwarg('connect_args', {})) + cargs = list(cargs) # allow mutability # look for existing pool or create pool = pop_kwarg('pool', None) if pool is None: - def connect(): - try: - return dialect.connect(*cargs, **cparams) - except dialect.dbapi.Error as e: - invalidated = dialect.is_disconnect(e, None, None) - util.raise_from_cause( - exc.DBAPIError.instance( - None, None, e, dialect.dbapi.Error, - connection_invalidated=invalidated - ) - ) + def connect(connection_record=None): + if dialect._has_events: + for fn in dialect.dispatch.do_connect: + connection = fn( + dialect, connection_record, cargs, cparams) + if connection is not None: + return connection + return dialect.connect(*cargs, **cparams) creator = pop_kwarg('creator', connect) @@ -162,9 +161,14 @@ class DefaultEngineStrategy(EngineStrategy): def first_connect(dbapi_connection, connection_record): c = base.Connection(engine, connection=dbapi_connection, _has_events=False) + c._execution_options = util.immutabledict() dialect.initialize(c) event.listen(pool, 'first_connect', first_connect, once=True) + dialect_cls.engine_created(engine) + if entrypoint is not dialect_cls: + entrypoint.engine_created(engine) + return engine diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/threadlocal.py b/lib/python3.5/site-packages/sqlalchemy/engine/threadlocal.py index 637523a..505d1fa 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/threadlocal.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/threadlocal.py @@ -1,5 +1,5 @@ # engine/threadlocal.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -59,7 +59,10 @@ class TLEngine(base.Engine): # guards against pool-level reapers, if desired. # or not connection.connection.is_valid: connection = self._tl_connection_cls( - self, self.pool.connect(), **kw) + self, + self._wrap_pool_connect( + self.pool.connect, connection), + **kw) self._connections.conn = weakref.ref(connection) return connection._increment_connect() diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/url.py b/lib/python3.5/site-packages/sqlalchemy/engine/url.py index e362961..3cc2f35 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/url.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/url.py @@ -1,5 +1,5 @@ # engine/url.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -105,11 +105,25 @@ class URL(object): self.database == other.database and \ self.query == other.query - def get_dialect(self): - """Return the SQLAlchemy database dialect class corresponding - to this URL's driver name. - """ + def get_backend_name(self): + if '+' not in self.drivername: + return self.drivername + else: + return self.drivername.split('+')[0] + def get_driver_name(self): + if '+' not in self.drivername: + return self.get_dialect().driver + else: + return self.drivername.split('+')[1] + + def _get_entrypoint(self): + """Return the "entry point" dialect class. + + This is normally the dialect itself except in the case when the + returned class implements the get_dialect_cls() method. + + """ if '+' not in self.drivername: name = self.drivername else: @@ -125,6 +139,14 @@ class URL(object): else: return cls + def get_dialect(self): + """Return the SQLAlchemy database dialect class corresponding + to this URL's driver name. + """ + entrypoint = self._get_entrypoint() + dialect_cls = entrypoint.get_dialect_cls(self) + return dialect_cls + def translate_connect_args(self, names=[], **kw): """Translate url attributes into a dictionary of connection arguments. diff --git a/lib/python3.5/site-packages/sqlalchemy/engine/util.py b/lib/python3.5/site-packages/sqlalchemy/engine/util.py index d9eb1df..d28d870 100644 --- a/lib/python3.5/site-packages/sqlalchemy/engine/util.py +++ b/lib/python3.5/site-packages/sqlalchemy/engine/util.py @@ -1,5 +1,5 @@ # engine/util.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/event/__init__.py b/lib/python3.5/site-packages/sqlalchemy/event/__init__.py index b93c0ef..dddb924 100644 --- a/lib/python3.5/site-packages/sqlalchemy/event/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/event/__init__.py @@ -1,5 +1,5 @@ # event/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/event/api.py b/lib/python3.5/site-packages/sqlalchemy/event/api.py index 270e95c..0af48df 100644 --- a/lib/python3.5/site-packages/sqlalchemy/event/api.py +++ b/lib/python3.5/site-packages/sqlalchemy/event/api.py @@ -1,5 +1,5 @@ # event/api.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -58,6 +58,32 @@ def listen(target, identifier, fn, *args, **kw): .. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen` and :func:`.event.listens_for`. + .. note:: + + The :func:`.listen` function cannot be called at the same time + that the target event is being run. This has implications + for thread safety, and also means an event cannot be added + from inside the listener function for itself. The list of + events to be run are present inside of a mutable collection + that can't be changed during iteration. + + Event registration and removal is not intended to be a "high + velocity" operation; it is a configurational operation. For + systems that need to quickly associate and deassociate with + events at high scale, use a mutable structure that is handled + from inside of a single listener. + + .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now + used as the container for the list of events, which explicitly + disallows collection mutation while the collection is being + iterated. + + .. seealso:: + + :func:`.listens_for` + + :func:`.remove` + """ _event_key(target, identifier, fn).listen(*args, **kw) @@ -89,6 +115,10 @@ def listens_for(target, identifier, *args, **kw): .. versionadded:: 0.9.4 Added ``once=True`` to :func:`.event.listen` and :func:`.event.listens_for`. + .. seealso:: + + :func:`.listen` - general description of event listening + """ def decorate(fn): listen(target, identifier, fn, *args, **kw) @@ -120,6 +150,30 @@ def remove(target, identifier, fn): .. versionadded:: 0.9.0 + .. note:: + + The :func:`.remove` function cannot be called at the same time + that the target event is being run. This has implications + for thread safety, and also means an event cannot be removed + from inside the listener function for itself. The list of + events to be run are present inside of a mutable collection + that can't be changed during iteration. + + Event registration and removal is not intended to be a "high + velocity" operation; it is a configurational operation. For + systems that need to quickly associate and deassociate with + events at high scale, use a mutable structure that is handled + from inside of a single listener. + + .. versionchanged:: 1.0.0 - a ``collections.deque()`` object is now + used as the container for the list of events, which explicitly + disallows collection mutation while the collection is being + iterated. + + .. seealso:: + + :func:`.listen` + """ _event_key(target, identifier, fn).remove() diff --git a/lib/python3.5/site-packages/sqlalchemy/event/attr.py b/lib/python3.5/site-packages/sqlalchemy/event/attr.py index 7641b59..1494013 100644 --- a/lib/python3.5/site-packages/sqlalchemy/event/attr.py +++ b/lib/python3.5/site-packages/sqlalchemy/event/attr.py @@ -1,5 +1,5 @@ # event/attr.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -37,19 +37,24 @@ from . import registry from . import legacy from itertools import chain import weakref +import collections -class RefCollection(object): - @util.memoized_property - def ref(self): +class RefCollection(util.MemoizedSlots): + __slots__ = 'ref', + + def _memoized_attr_ref(self): return weakref.ref(self, registry._collection_gced) -class _DispatchDescriptor(RefCollection): - """Class-level attributes on :class:`._Dispatch` classes.""" +class _ClsLevelDispatch(RefCollection): + """Class-level events on :class:`._Dispatch` classes.""" + + __slots__ = ('name', 'arg_names', 'has_kw', + 'legacy_signatures', '_clslevel', '__weakref__') def __init__(self, parent_dispatch_cls, fn): - self.__name__ = fn.__name__ + self.name = fn.__name__ argspec = util.inspect_getargspec(fn) self.arg_names = argspec.args[1:] self.has_kw = bool(argspec.keywords) @@ -59,11 +64,9 @@ class _DispatchDescriptor(RefCollection): key=lambda s: s[0] ) )) - self.__doc__ = fn.__doc__ = legacy._augment_fn_docs( - self, parent_dispatch_cls, fn) + fn.__doc__ = legacy._augment_fn_docs(self, parent_dispatch_cls, fn) self._clslevel = weakref.WeakKeyDictionary() - self._empty_listeners = weakref.WeakKeyDictionary() def _adjust_fn_spec(self, fn, named): if named: @@ -96,8 +99,8 @@ class _DispatchDescriptor(RefCollection): self.update_subclass(cls) else: if cls not in self._clslevel: - self._clslevel[cls] = [] - self._clslevel[cls].insert(0, event_key._listen_fn) + self._clslevel[cls] = collections.deque() + self._clslevel[cls].appendleft(event_key._listen_fn) registry._stored_in_collection(event_key, self) def append(self, event_key, propagate): @@ -113,13 +116,13 @@ class _DispatchDescriptor(RefCollection): self.update_subclass(cls) else: if cls not in self._clslevel: - self._clslevel[cls] = [] + self._clslevel[cls] = collections.deque() self._clslevel[cls].append(event_key._listen_fn) registry._stored_in_collection(event_key, self) def update_subclass(self, target): if target not in self._clslevel: - self._clslevel[target] = [] + self._clslevel[target] = collections.deque() clslevel = self._clslevel[target] for cls in target.__mro__[1:]: if cls in self._clslevel: @@ -145,40 +148,29 @@ class _DispatchDescriptor(RefCollection): to_clear = set() for dispatcher in self._clslevel.values(): to_clear.update(dispatcher) - dispatcher[:] = [] + dispatcher.clear() registry._clear(self, to_clear) def for_modify(self, obj): """Return an event collection which can be modified. - For _DispatchDescriptor at the class level of + For _ClsLevelDispatch at the class level of a dispatcher, this returns self. """ return self - def __get__(self, obj, cls): - if obj is None: - return self - elif obj._parent_cls in self._empty_listeners: - ret = self._empty_listeners[obj._parent_cls] - else: - self._empty_listeners[obj._parent_cls] = ret = \ - _EmptyListener(self, obj._parent_cls) - # assigning it to __dict__ means - # memoized for fast re-access. but more memory. - obj.__dict__[self.__name__] = ret - return ret +class _InstanceLevelDispatch(RefCollection): + __slots__ = () -class _HasParentDispatchDescriptor(object): def _adjust_fn_spec(self, fn, named): return self.parent._adjust_fn_spec(fn, named) -class _EmptyListener(_HasParentDispatchDescriptor): - """Serves as a class-level interface to the events - served by a _DispatchDescriptor, when there are no +class _EmptyListener(_InstanceLevelDispatch): + """Serves as a proxy interface to the events + served by a _ClsLevelDispatch, when there are no instance-level events present. Is replaced by _ListenerCollection when instance-level @@ -186,14 +178,17 @@ class _EmptyListener(_HasParentDispatchDescriptor): """ + propagate = frozenset() + listeners = () + + __slots__ = 'parent', 'parent_listeners', 'name' + def __init__(self, parent, target_cls): if target_cls not in parent._clslevel: parent.update_subclass(target_cls) - self.parent = parent # _DispatchDescriptor + self.parent = parent # _ClsLevelDispatch self.parent_listeners = parent._clslevel[target_cls] - self.name = parent.__name__ - self.propagate = frozenset() - self.listeners = () + self.name = parent.name def for_modify(self, obj): """Return an event collection which can be modified. @@ -204,9 +199,11 @@ class _EmptyListener(_HasParentDispatchDescriptor): and returns it. """ - result = _ListenerCollection(self.parent, obj._parent_cls) - if obj.__dict__[self.name] is self: - obj.__dict__[self.name] = result + result = _ListenerCollection(self.parent, obj._instance_cls) + if getattr(obj, self.name) is self: + setattr(obj, self.name, result) + else: + assert isinstance(getattr(obj, self.name), _JoinedListener) return result def _needs_modify(self, *args, **kw): @@ -232,11 +229,10 @@ class _EmptyListener(_HasParentDispatchDescriptor): __nonzero__ = __bool__ -class _CompoundListener(_HasParentDispatchDescriptor): - _exec_once = False +class _CompoundListener(_InstanceLevelDispatch): + __slots__ = '_exec_once_mutex', '_exec_once' - @util.memoized_property - def _exec_once_mutex(self): + def _memoized_attr__exec_once_mutex(self): return threading.Lock() def exec_once(self, *args, **kw): @@ -271,7 +267,7 @@ class _CompoundListener(_HasParentDispatchDescriptor): __nonzero__ = __bool__ -class _ListenerCollection(RefCollection, _CompoundListener): +class _ListenerCollection(_CompoundListener): """Instance-level attributes on instances of :class:`._Dispatch`. Represents a collection of listeners. @@ -281,13 +277,18 @@ class _ListenerCollection(RefCollection, _CompoundListener): """ + __slots__ = ( + 'parent_listeners', 'parent', 'name', 'listeners', + 'propagate', '__weakref__') + def __init__(self, parent, target_cls): if target_cls not in parent._clslevel: parent.update_subclass(target_cls) + self._exec_once = False self.parent_listeners = parent._clslevel[target_cls] self.parent = parent - self.name = parent.__name__ - self.listeners = [] + self.name = parent.name + self.listeners = collections.deque() self.propagate = set() def for_modify(self, obj): @@ -318,14 +319,12 @@ class _ListenerCollection(RefCollection, _CompoundListener): registry._stored_in_collection_multi(self, other, to_associate) def insert(self, event_key, propagate): - if event_key._listen_fn not in self.listeners: - event_key.prepend_to_list(self, self.listeners) + if event_key.prepend_to_list(self, self.listeners): if propagate: self.propagate.add(event_key._listen_fn) def append(self, event_key, propagate): - if event_key._listen_fn not in self.listeners: - event_key.append_to_list(self, self.listeners) + if event_key.append_to_list(self, self.listeners): if propagate: self.propagate.add(event_key._listen_fn) @@ -337,28 +336,14 @@ class _ListenerCollection(RefCollection, _CompoundListener): def clear(self): registry._clear(self, self.listeners) self.propagate.clear() - self.listeners[:] = [] - - -class _JoinedDispatchDescriptor(object): - def __init__(self, name): - self.name = name - - def __get__(self, obj, cls): - if obj is None: - return self - else: - obj.__dict__[self.name] = ret = _JoinedListener( - obj.parent, self.name, - getattr(obj.local, self.name) - ) - return ret + self.listeners.clear() class _JoinedListener(_CompoundListener): - _exec_once = False + __slots__ = 'parent', 'name', 'local', 'parent_listeners' def __init__(self, parent, name, local): + self._exec_once = False self.parent = parent self.name = name self.local = local diff --git a/lib/python3.5/site-packages/sqlalchemy/event/base.py b/lib/python3.5/site-packages/sqlalchemy/event/base.py index 4925f6f..81ef5d8 100644 --- a/lib/python3.5/site-packages/sqlalchemy/event/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/event/base.py @@ -1,5 +1,5 @@ # event/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -17,9 +17,11 @@ instances of ``_Dispatch``. """ from __future__ import absolute_import +import weakref + from .. import util -from .attr import _JoinedDispatchDescriptor, \ - _EmptyListener, _DispatchDescriptor +from .attr import _JoinedListener, \ + _EmptyListener, _ClsLevelDispatch _registrars = util.defaultdict(list) @@ -34,10 +36,11 @@ class _UnpickleDispatch(object): """ - def __call__(self, _parent_cls): - for cls in _parent_cls.__mro__: + def __call__(self, _instance_cls): + for cls in _instance_cls.__mro__: if 'dispatch' in cls.__dict__: - return cls.__dict__['dispatch'].dispatch_cls(_parent_cls) + return cls.__dict__['dispatch'].\ + dispatch_cls._for_class(_instance_cls) else: raise AttributeError("No class with a 'dispatch' member present.") @@ -62,16 +65,53 @@ class _Dispatch(object): """ - _events = None - """reference the :class:`.Events` class which this - :class:`._Dispatch` is created for.""" + # in one ORM edge case, an attribute is added to _Dispatch, + # so __dict__ is used in just that case and potentially others. + __slots__ = '_parent', '_instance_cls', '__dict__', '_empty_listeners' - def __init__(self, _parent_cls): - self._parent_cls = _parent_cls + _empty_listener_reg = weakref.WeakKeyDictionary() - @util.classproperty - def _listen(cls): - return cls._events._listen + def __init__(self, parent, instance_cls=None): + self._parent = parent + self._instance_cls = instance_cls + if instance_cls: + try: + self._empty_listeners = self._empty_listener_reg[instance_cls] + except KeyError: + self._empty_listeners = \ + self._empty_listener_reg[instance_cls] = dict( + (ls.name, _EmptyListener(ls, instance_cls)) + for ls in parent._event_descriptors + ) + else: + self._empty_listeners = {} + + def __getattr__(self, name): + # assign EmptyListeners as attributes on demand + # to reduce startup time for new dispatch objects + try: + ls = self._empty_listeners[name] + except KeyError: + raise AttributeError(name) + else: + setattr(self, ls.name, ls) + return ls + + @property + def _event_descriptors(self): + for k in self._event_names: + yield getattr(self, k) + + def _for_class(self, instance_cls): + return self.__class__(self, instance_cls) + + def _for_instance(self, instance): + instance_cls = instance.__class__ + return self._for_class(instance_cls) + + @property + def _listen(self): + return self._events._listen def _join(self, other): """Create a 'join' of this :class:`._Dispatch` and another. @@ -83,36 +123,27 @@ class _Dispatch(object): if '_joined_dispatch_cls' not in self.__class__.__dict__: cls = type( "Joined%s" % self.__class__.__name__, - (_JoinedDispatcher, self.__class__), {} + (_JoinedDispatcher, ), {'__slots__': self._event_names} ) - for ls in _event_descriptors(self): - setattr(cls, ls.name, _JoinedDispatchDescriptor(ls.name)) self.__class__._joined_dispatch_cls = cls return self._joined_dispatch_cls(self, other) def __reduce__(self): - return _UnpickleDispatch(), (self._parent_cls, ) + return _UnpickleDispatch(), (self._instance_cls, ) def _update(self, other, only_propagate=True): """Populate from the listeners in another :class:`_Dispatch` object.""" - - for ls in _event_descriptors(other): + for ls in other._event_descriptors: if isinstance(ls, _EmptyListener): continue getattr(self, ls.name).\ for_modify(self)._update(ls, only_propagate=only_propagate) - @util.hybridmethod def _clear(self): - for attr in dir(self): - if _is_event_name(attr): - getattr(self, attr).for_modify(self).clear() - - -def _event_descriptors(target): - return [getattr(target, k) for k in dir(target) if _is_event_name(k)] + for ls in self._event_descriptors: + ls.for_modify(self).clear() class _EventMeta(type): @@ -131,26 +162,37 @@ def _create_dispatcher_class(cls, classname, bases, dict_): # there's all kinds of ways to do this, # i.e. make a Dispatch class that shares the '_listen' method # of the Event class, this is the straight monkeypatch. - dispatch_base = getattr(cls, 'dispatch', _Dispatch) - dispatch_cls = type("%sDispatch" % classname, - (dispatch_base, ), {}) - cls._set_dispatch(cls, dispatch_cls) + if hasattr(cls, 'dispatch'): + dispatch_base = cls.dispatch.__class__ + else: + dispatch_base = _Dispatch - for k in dict_: - if _is_event_name(k): - setattr(dispatch_cls, k, _DispatchDescriptor(cls, dict_[k])) - _registrars[k].append(cls) + event_names = [k for k in dict_ if _is_event_name(k)] + dispatch_cls = type("%sDispatch" % classname, + (dispatch_base, ), {'__slots__': event_names}) + + dispatch_cls._event_names = event_names + + dispatch_inst = cls._set_dispatch(cls, dispatch_cls) + for k in dispatch_cls._event_names: + setattr(dispatch_inst, k, _ClsLevelDispatch(cls, dict_[k])) + _registrars[k].append(cls) + + for super_ in dispatch_cls.__bases__: + if issubclass(super_, _Dispatch) and super_ is not _Dispatch: + for ls in super_._events.dispatch._event_descriptors: + setattr(dispatch_inst, ls.name, ls) + dispatch_cls._event_names.append(ls.name) if getattr(cls, '_dispatch_target', None): cls._dispatch_target.dispatch = dispatcher(cls) def _remove_dispatcher(cls): - for k in dir(cls): - if _is_event_name(k): - _registrars[k].remove(cls) - if not _registrars[k]: - del _registrars[k] + for k in cls.dispatch._event_names: + _registrars[k].remove(cls) + if not _registrars[k]: + del _registrars[k] class Events(util.with_metaclass(_EventMeta, object)): @@ -163,17 +205,30 @@ class Events(util.with_metaclass(_EventMeta, object)): # "self.dispatch._events." # @staticemethod to allow easy "super" calls while in a metaclass # constructor. - cls.dispatch = dispatch_cls + cls.dispatch = dispatch_cls(None) dispatch_cls._events = cls + return cls.dispatch @classmethod def _accept_with(cls, target): # Mapper, ClassManager, Session override this to # also accept classes, scoped_sessions, sessionmakers, etc. if hasattr(target, 'dispatch') and ( - isinstance(target.dispatch, cls.dispatch) or - isinstance(target.dispatch, type) and - issubclass(target.dispatch, cls.dispatch) + + isinstance(target.dispatch, cls.dispatch.__class__) or + + + ( + isinstance(target.dispatch, type) and + isinstance(target.dispatch, cls.dispatch.__class__) + ) or + + ( + isinstance(target.dispatch, _JoinedDispatcher) and + isinstance(target.dispatch.parent, cls.dispatch.__class__) + ) + + ): return target else: @@ -195,10 +250,24 @@ class Events(util.with_metaclass(_EventMeta, object)): class _JoinedDispatcher(object): """Represent a connection between two _Dispatch objects.""" + __slots__ = 'local', 'parent', '_instance_cls' + def __init__(self, local, parent): self.local = local self.parent = parent - self._parent_cls = local._parent_cls + self._instance_cls = self.local._instance_cls + + def __getattr__(self, name): + # assign _JoinedListeners as attributes on demand + # to reduce startup time for new dispatch objects + ls = getattr(self.local, name) + jl = _JoinedListener(self.parent, ls.name, ls) + setattr(self, ls.name, jl) + return jl + + @property + def _listen(self): + return self.parent._listen class dispatcher(object): @@ -216,5 +285,5 @@ class dispatcher(object): def __get__(self, obj, cls): if obj is None: return self.dispatch_cls - obj.__dict__['dispatch'] = disp = self.dispatch_cls(cls) + obj.__dict__['dispatch'] = disp = self.dispatch_cls._for_instance(obj) return disp diff --git a/lib/python3.5/site-packages/sqlalchemy/event/legacy.py b/lib/python3.5/site-packages/sqlalchemy/event/legacy.py index 3b1519c..b359bf4 100644 --- a/lib/python3.5/site-packages/sqlalchemy/event/legacy.py +++ b/lib/python3.5/site-packages/sqlalchemy/event/legacy.py @@ -1,5 +1,5 @@ # event/legacy.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -22,8 +22,8 @@ def _legacy_signature(since, argnames, converter=None): return leg -def _wrap_fn_for_legacy(dispatch_descriptor, fn, argspec): - for since, argnames, conv in dispatch_descriptor.legacy_signatures: +def _wrap_fn_for_legacy(dispatch_collection, fn, argspec): + for since, argnames, conv in dispatch_collection.legacy_signatures: if argnames[-1] == "**kw": has_kw = True argnames = argnames[0:-1] @@ -40,7 +40,7 @@ def _wrap_fn_for_legacy(dispatch_descriptor, fn, argspec): return fn(*conv(*args)) else: def wrap_leg(*args, **kw): - argdict = dict(zip(dispatch_descriptor.arg_names, args)) + argdict = dict(zip(dispatch_collection.arg_names, args)) args = [argdict[name] for name in argnames] if has_kw: return fn(*args, **kw) @@ -58,16 +58,16 @@ def _indent(text, indent): ) -def _standard_listen_example(dispatch_descriptor, sample_target, fn): +def _standard_listen_example(dispatch_collection, sample_target, fn): example_kw_arg = _indent( "\n".join( "%(arg)s = kw['%(arg)s']" % {"arg": arg} - for arg in dispatch_descriptor.arg_names[0:2] + for arg in dispatch_collection.arg_names[0:2] ), " ") - if dispatch_descriptor.legacy_signatures: + if dispatch_collection.legacy_signatures: current_since = max(since for since, args, conv - in dispatch_descriptor.legacy_signatures) + in dispatch_collection.legacy_signatures) else: current_since = None text = ( @@ -80,7 +80,7 @@ def _standard_listen_example(dispatch_descriptor, sample_target, fn): "\n # ... (event handling logic) ...\n" ) - if len(dispatch_descriptor.arg_names) > 3: + if len(dispatch_collection.arg_names) > 3: text += ( "\n# named argument style (new in 0.9)\n" @@ -96,17 +96,17 @@ def _standard_listen_example(dispatch_descriptor, sample_target, fn): "current_since": " (arguments as of %s)" % current_since if current_since else "", "event_name": fn.__name__, - "has_kw_arguments": ", **kw" if dispatch_descriptor.has_kw else "", - "named_event_arguments": ", ".join(dispatch_descriptor.arg_names), + "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "", + "named_event_arguments": ", ".join(dispatch_collection.arg_names), "example_kw_arg": example_kw_arg, "sample_target": sample_target } return text -def _legacy_listen_examples(dispatch_descriptor, sample_target, fn): +def _legacy_listen_examples(dispatch_collection, sample_target, fn): text = "" - for since, args, conv in dispatch_descriptor.legacy_signatures: + for since, args, conv in dispatch_collection.legacy_signatures: text += ( "\n# legacy calling style (pre-%(since)s)\n" "@event.listens_for(%(sample_target)s, '%(event_name)s')\n" @@ -117,7 +117,7 @@ def _legacy_listen_examples(dispatch_descriptor, sample_target, fn): "since": since, "event_name": fn.__name__, "has_kw_arguments": " **kw" - if dispatch_descriptor.has_kw else "", + if dispatch_collection.has_kw else "", "named_event_arguments": ", ".join(args), "sample_target": sample_target } @@ -125,8 +125,8 @@ def _legacy_listen_examples(dispatch_descriptor, sample_target, fn): return text -def _version_signature_changes(dispatch_descriptor): - since, args, conv = dispatch_descriptor.legacy_signatures[0] +def _version_signature_changes(dispatch_collection): + since, args, conv = dispatch_collection.legacy_signatures[0] return ( "\n.. versionchanged:: %(since)s\n" " The ``%(event_name)s`` event now accepts the \n" @@ -135,14 +135,14 @@ def _version_signature_changes(dispatch_descriptor): " signature(s) listed above will be automatically \n" " adapted to the new signature." % { "since": since, - "event_name": dispatch_descriptor.__name__, - "named_event_arguments": ", ".join(dispatch_descriptor.arg_names), - "has_kw_arguments": ", **kw" if dispatch_descriptor.has_kw else "" + "event_name": dispatch_collection.name, + "named_event_arguments": ", ".join(dispatch_collection.arg_names), + "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "" } ) -def _augment_fn_docs(dispatch_descriptor, parent_dispatch_cls, fn): +def _augment_fn_docs(dispatch_collection, parent_dispatch_cls, fn): header = ".. container:: event_signatures\n\n"\ " Example argument forms::\n"\ "\n" @@ -152,16 +152,16 @@ def _augment_fn_docs(dispatch_descriptor, parent_dispatch_cls, fn): header + _indent( _standard_listen_example( - dispatch_descriptor, sample_target, fn), + dispatch_collection, sample_target, fn), " " * 8) ) - if dispatch_descriptor.legacy_signatures: + if dispatch_collection.legacy_signatures: text += _indent( _legacy_listen_examples( - dispatch_descriptor, sample_target, fn), + dispatch_collection, sample_target, fn), " " * 8) - text += _version_signature_changes(dispatch_descriptor) + text += _version_signature_changes(dispatch_collection) return util.inject_docstring_text(fn.__doc__, text, diff --git a/lib/python3.5/site-packages/sqlalchemy/event/registry.py b/lib/python3.5/site-packages/sqlalchemy/event/registry.py index a34de3c..e1e9262 100644 --- a/lib/python3.5/site-packages/sqlalchemy/event/registry.py +++ b/lib/python3.5/site-packages/sqlalchemy/event/registry.py @@ -1,5 +1,5 @@ # event/registry.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -37,7 +37,7 @@ listener collections and the listener fn contained _collection_to_key = collections.defaultdict(dict) """ -Given a _ListenerCollection or _DispatchDescriptor, can locate +Given a _ListenerCollection or _ClsLevelListener, can locate all the original listen() arguments and the listener fn contained ref(listenercollection) -> { @@ -71,13 +71,15 @@ def _stored_in_collection(event_key, owner): listen_ref = weakref.ref(event_key._listen_fn) if owner_ref in dispatch_reg: - assert dispatch_reg[owner_ref] == listen_ref - else: - dispatch_reg[owner_ref] = listen_ref + return False + + dispatch_reg[owner_ref] = listen_ref listener_to_key = _collection_to_key[owner_ref] listener_to_key[listen_ref] = key + return True + def _removed_from_collection(event_key, owner): key = event_key._key @@ -138,6 +140,10 @@ class _EventKey(object): """Represent :func:`.listen` arguments. """ + __slots__ = ( + 'target', 'identifier', 'fn', 'fn_key', 'fn_wrap', 'dispatch_target' + ) + def __init__(self, target, identifier, fn, dispatch_target, _fn_wrap=None): self.target = target @@ -180,6 +186,17 @@ class _EventKey(object): def listen(self, *args, **kw): once = kw.pop("once", False) + named = kw.pop("named", False) + + target, identifier, fn = \ + self.dispatch_target, self.identifier, self._listen_fn + + dispatch_collection = getattr(target.dispatch, identifier) + + adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named) + + self = self.with_wrapper(adjusted_fn) + if once: self.with_wrapper( util.only_once(self._listen_fn)).listen(*args, **kw) @@ -213,34 +230,33 @@ class _EventKey(object): target, identifier, fn = \ self.dispatch_target, self.identifier, self._listen_fn - dispatch_descriptor = getattr(target.dispatch, identifier) - - fn = dispatch_descriptor._adjust_fn_spec(fn, named) - self = self.with_wrapper(fn) + dispatch_collection = getattr(target.dispatch, identifier) if insert: - dispatch_descriptor.\ + dispatch_collection.\ for_modify(target.dispatch).insert(self, propagate) else: - dispatch_descriptor.\ + dispatch_collection.\ for_modify(target.dispatch).append(self, propagate) @property def _listen_fn(self): return self.fn_wrap or self.fn - def append_value_to_list(self, owner, list_, value): - _stored_in_collection(self, owner) - list_.append(value) - def append_to_list(self, owner, list_): - _stored_in_collection(self, owner) - list_.append(self._listen_fn) + if _stored_in_collection(self, owner): + list_.append(self._listen_fn) + return True + else: + return False def remove_from_list(self, owner, list_): _removed_from_collection(self, owner) list_.remove(self._listen_fn) def prepend_to_list(self, owner, list_): - _stored_in_collection(self, owner) - list_.insert(0, self._listen_fn) + if _stored_in_collection(self, owner): + list_.appendleft(self._listen_fn) + return True + else: + return False diff --git a/lib/python3.5/site-packages/sqlalchemy/events.py b/lib/python3.5/site-packages/sqlalchemy/events.py index 42bbbfc..1abef26 100644 --- a/lib/python3.5/site-packages/sqlalchemy/events.py +++ b/lib/python3.5/site-packages/sqlalchemy/events.py @@ -1,5 +1,5 @@ # sqlalchemy/events.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -338,7 +338,7 @@ class PoolEvents(event.Events): """ - def reset(self, dbapi_connnection, connection_record): + def reset(self, dbapi_connection, connection_record): """Called before the "reset" action occurs for a pooled connection. This event represents @@ -371,7 +371,9 @@ class PoolEvents(event.Events): """Called when a DBAPI connection is to be "invalidated". This event is called any time the :meth:`._ConnectionRecord.invalidate` - method is invoked, either from API usage or via "auto-invalidation". + method is invoked, either from API usage or via "auto-invalidation", + without the ``soft`` flag. + The event occurs before a final attempt to call ``.close()`` on the connection occurs. @@ -392,6 +394,21 @@ class PoolEvents(event.Events): """ + def soft_invalidate(self, dbapi_connection, connection_record, exception): + """Called when a DBAPI connection is to be "soft invalidated". + + This event is called any time the :meth:`._ConnectionRecord.invalidate` + method is invoked with the ``soft`` flag. + + Soft invalidation refers to when the connection record that tracks + this connection will force a reconnect after the current connection + is checked in. It does not actively close the dbapi_connection + at the point at which it is called. + + .. versionadded:: 1.0.3 + + """ + class ConnectionEvents(event.Events): """Available events for :class:`.Connectable`, which includes @@ -420,6 +437,12 @@ class ConnectionEvents(event.Events): context, executemany): log.info("Received statement: %s" % statement) + When the methods are called with a `statement` parameter, such as in + :meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and + :meth:`.dbapi_error`, the statement is the exact SQL string that was + prepared for transmission to the DBAPI ``cursor`` in the connection's + :class:`.Dialect`. + The :meth:`.before_execute` and :meth:`.before_cursor_execute` events can also be established with the ``retval=True`` flag, which allows modification of the statement and parameters to be sent @@ -470,7 +493,8 @@ class ConnectionEvents(event.Events): @classmethod def _listen(cls, event_key, retval=False): target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, event_key.fn + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn target._has_events = True @@ -548,9 +572,8 @@ class ConnectionEvents(event.Events): def before_cursor_execute(self, conn, cursor, statement, parameters, context, executemany): """Intercept low-level cursor execute() events before execution, - receiving the string - SQL statement and DBAPI-specific parameter list to be invoked - against a cursor. + receiving the string SQL statement and DBAPI-specific parameter list to + be invoked against a cursor. This event is a good choice for logging as well as late modifications to the SQL string. It's less ideal for parameter modifications except @@ -570,7 +593,7 @@ class ConnectionEvents(event.Events): :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object - :param statement: string SQL statement + :param statement: string SQL statement, as to be passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. @@ -595,7 +618,7 @@ class ConnectionEvents(event.Events): :param cursor: DBAPI cursor object. Will have results pending if the statement was a SELECT, but these should not be consumed as they will be needed by the :class:`.ResultProxy`. - :param statement: string SQL statement + :param statement: string SQL statement, as passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. @@ -639,7 +662,7 @@ class ConnectionEvents(event.Events): :param conn: :class:`.Connection` object :param cursor: DBAPI cursor object - :param statement: string SQL statement + :param statement: string SQL statement, as passed to the DBAPI :param parameters: Dictionary, tuple, or list of parameters being passed to the ``execute()`` or ``executemany()`` method of the DBAPI ``cursor``. In some cases may be ``None``. @@ -701,6 +724,16 @@ class ConnectionEvents(event.Events): "failed" in str(context.original_exception): raise MySpecialException("failed operation") + .. warning:: Because the :meth:`.ConnectionEvents.handle_error` + event specifically provides for exceptions to be re-thrown as + the ultimate exception raised by the failed statement, + **stack traces will be misleading** if the user-defined event + handler itself fails and throws an unexpected exception; + the stack trace may not illustrate the actual code line that + failed! It is advised to code carefully here and use + logging and/or inline debugging if unexpected exceptions are + occurring. + Alternatively, a "chained" style of event handling can be used, by configuring the handler with the ``retval=True`` modifier and returning the new exception instance from the @@ -733,6 +766,22 @@ class ConnectionEvents(event.Events): .. versionadded:: 0.9.7 Added the :meth:`.ConnectionEvents.handle_error` hook. + .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now + invoked when an :class:`.Engine` fails during the initial + call to :meth:`.Engine.connect`, as well as when a + :class:`.Connection` object encounters an error during a + reconnect operation. + + .. versionchanged:: 1.0.0 The :meth:`.handle_error` event is + not fired off when a dialect makes use of the + ``skip_user_error_events`` execution option. This is used + by dialects which intend to catch SQLAlchemy-specific exceptions + within specific operations, such as when the MySQL dialect detects + a table not present within the ``has_table()`` dialect method. + Prior to 1.0.0, code which implements :meth:`.handle_error` needs + to ensure that exceptions thrown in these scenarios are re-raised + without modification. + """ def engine_connect(self, conn, branch): @@ -770,6 +819,11 @@ class ConnectionEvents(event.Events): .. seealso:: + :ref:`pool_disconnects_pessimistic` - illustrates how to use + :meth:`.ConnectionEvents.engine_connect` + to transparently ensure pooled connections are connected to the + database. + :meth:`.PoolEvents.checkout` the lower-level pool checkout event for an individual DBAPI connection @@ -833,6 +887,23 @@ class ConnectionEvents(event.Events): """ + def engine_disposed(self, engine): + """Intercept when the :meth:`.Engine.dispose` method is called. + + The :meth:`.Engine.dispose` method instructs the engine to + "dispose" of it's connection pool (e.g. :class:`.Pool`), and + replaces it with a new one. Disposing of the old pool has the + effect that existing checked-in connections are closed. The new + pool does not establish any new connections until it is first used. + + This event can be used to indicate that resources related to the + :class:`.Engine` should also be cleaned up, keeping in mind that the + :class:`.Engine` can still be used for new requests in which case + it re-acquires connection resources. + + .. versionadded:: 1.0.5 + + """ def begin(self, conn): """Intercept begin() events. @@ -985,6 +1056,23 @@ class DialectEvents(event.Events): else: return target + def do_connect(self, dialect, conn_rec, cargs, cparams): + """Receive connection arguments before a connection is made. + + Return a DBAPI connection to halt further events from invoking; + the returned connection will be used. + + Alternatively, the event can manipulate the cargs and/or cparams + collections; cargs will always be a Python list that can be mutated + in-place and cparams a Python dictionary. Return None to + allow control to pass to the next event handler and ultimately + to allow the dialect to connect normally, given the updated + arguments. + + .. versionadded:: 1.0.3 + + """ + def do_executemany(self, cursor, statement, parameters, context): """Receive a cursor to have executemany() called. diff --git a/lib/python3.5/site-packages/sqlalchemy/exc.py b/lib/python3.5/site-packages/sqlalchemy/exc.py index 7d333fc..2729842 100644 --- a/lib/python3.5/site-packages/sqlalchemy/exc.py +++ b/lib/python3.5/site-packages/sqlalchemy/exc.py @@ -1,5 +1,5 @@ # sqlalchemy/exc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -13,8 +13,6 @@ raised as a result of DBAPI exceptions are all subclasses of """ -import traceback - class SQLAlchemyError(Exception): """Generic error class.""" @@ -54,8 +52,7 @@ class CircularDependencyError(SQLAlchemyError): or pre-deassociate one of the foreign key constrained values. The ``post_update`` flag described at :ref:`post_update` can resolve this cycle. - * In a :meth:`.MetaData.create_all`, :meth:`.MetaData.drop_all`, - :attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey` + * In a :attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey` or :class:`.ForeignKeyConstraint` objects mutually refer to each other. Apply the ``use_alter=True`` flag to one or both, see :ref:`use_alter`. @@ -63,7 +60,7 @@ class CircularDependencyError(SQLAlchemyError): """ def __init__(self, message, cycles, edges, msg=None): if msg is None: - message += " Cycles: %r all edges: %r" % (cycles, edges) + message += " (%s)" % ", ".join(repr(s) for s in cycles) else: message = msg SQLAlchemyError.__init__(self, message) @@ -238,14 +235,16 @@ class StatementError(SQLAlchemyError): def __str__(self): from sqlalchemy.sql import util - params_repr = util._repr_params(self.params, 10) + details = [SQLAlchemyError.__str__(self)] + if self.statement: + details.append("[SQL: %r]" % self.statement) + if self.params: + params_repr = util._repr_params(self.params, 10) + details.append("[parameters: %r]" % params_repr) return ' '.join([ "(%s)" % det for det in self.detail - ] + [ - SQLAlchemyError.__str__(self), - repr(self.statement), repr(params_repr) - ]) + ] + details) def __unicode__(self): return self.__str__() @@ -277,26 +276,35 @@ class DBAPIError(StatementError): @classmethod def instance(cls, statement, params, orig, dbapi_base_err, - connection_invalidated=False): + connection_invalidated=False, + dialect=None): # Don't ever wrap these, just return them directly as if # DBAPIError didn't exist. - if isinstance(orig, (KeyboardInterrupt, SystemExit, DontWrapMixin)): + if (isinstance(orig, BaseException) and + not isinstance(orig, Exception)) or \ + isinstance(orig, DontWrapMixin): return orig if orig is not None: # not a DBAPI error, statement is present. # raise a StatementError if not isinstance(orig, dbapi_base_err) and statement: - msg = traceback.format_exception_only( - orig.__class__, orig)[-1].strip() return StatementError( - "%s (original cause: %s)" % (str(orig), msg), + "(%s.%s) %s" % + (orig.__class__.__module__, orig.__class__.__name__, + orig), statement, params, orig ) - name, glob = orig.__class__.__name__, globals() - if name in glob and issubclass(glob[name], DBAPIError): - cls = glob[name] + glob = globals() + for super_ in orig.__class__.__mro__: + name = super_.__name__ + if dialect: + name = dialect.dbapi_exception_translation_map.get( + name, name) + if name in glob and issubclass(glob[name], DBAPIError): + cls = glob[name] + break return cls(statement, params, orig, connection_invalidated) @@ -307,13 +315,12 @@ class DBAPIError(StatementError): def __init__(self, statement, params, orig, connection_invalidated=False): try: text = str(orig) - except (KeyboardInterrupt, SystemExit): - raise except Exception as e: text = 'Error in str() of DB-API-generated exception: ' + str(e) StatementError.__init__( self, - '(%s) %s' % (orig.__class__.__name__, text), + '(%s.%s) %s' % ( + orig.__class__.__module__, orig.__class__.__name__, text, ), statement, params, orig diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/__init__.py b/lib/python3.5/site-packages/sqlalchemy/ext/__init__.py index d213a0d..1c8a59a 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/__init__.py @@ -1,6 +1,11 @@ # ext/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .. import util as _sa_util + +_sa_util.dependencies.resolve_all("sqlalchemy.ext") + diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/associationproxy.py b/lib/python3.5/site-packages/sqlalchemy/ext/associationproxy.py index a987ab4..fdc44f3 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/associationproxy.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/associationproxy.py @@ -1,5 +1,5 @@ # ext/associationproxy.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -77,16 +77,16 @@ def association_proxy(target_collection, attr, **kw): ASSOCIATION_PROXY = util.symbol('ASSOCIATION_PROXY') -"""Symbol indicating an :class:`_InspectionAttr` that's +"""Symbol indicating an :class:`InspectionAttr` that's of type :class:`.AssociationProxy`. - Is assigned to the :attr:`._InspectionAttr.extension_type` + Is assigned to the :attr:`.InspectionAttr.extension_type` attibute. """ -class AssociationProxy(interfaces._InspectionAttr): +class AssociationProxy(interfaces.InspectionAttrInfo): """A descriptor that presents a read/write view of an object attribute.""" is_attribute = False @@ -94,7 +94,7 @@ class AssociationProxy(interfaces._InspectionAttr): def __init__(self, target_collection, attr, creator=None, getset_factory=None, proxy_factory=None, - proxy_bulk_set=None): + proxy_bulk_set=None, info=None): """Construct a new :class:`.AssociationProxy`. The :func:`.association_proxy` function is provided as the usual @@ -138,6 +138,11 @@ class AssociationProxy(interfaces._InspectionAttr): :param proxy_bulk_set: Optional, use with proxy_factory. See the _set() method for details. + :param info: optional, will be assigned to + :attr:`.AssociationProxy.info` if present. + + .. versionadded:: 1.0.9 + """ self.target_collection = target_collection self.value_attr = attr @@ -150,6 +155,8 @@ class AssociationProxy(interfaces._InspectionAttr): self.key = '_%s_%s_%s' % ( type(self).__name__, target_collection, id(self)) self.collection_class = None + if info: + self.info = info @property def remote_attr(self): @@ -365,13 +372,17 @@ class AssociationProxy(interfaces._InspectionAttr): operators of the underlying proxied attributes. """ - - if self._value_is_scalar: - value_expr = getattr( - self.target_class, self.value_attr).has(criterion, **kwargs) + if self._target_is_object: + if self._value_is_scalar: + value_expr = getattr( + self.target_class, self.value_attr).has( + criterion, **kwargs) + else: + value_expr = getattr( + self.target_class, self.value_attr).any( + criterion, **kwargs) else: - value_expr = getattr( - self.target_class, self.value_attr).any(criterion, **kwargs) + value_expr = criterion # check _value_is_scalar here, otherwise # we're scalar->scalar - call .any() so that @@ -527,7 +538,10 @@ class _AssociationList(_AssociationCollection): return self.setter(object, value) def __getitem__(self, index): - return self._get(self.col[index]) + if not isinstance(index, slice): + return self._get(self.col[index]) + else: + return [self._get(member) for member in self.col[index]] def __setitem__(self, index, value): if not isinstance(index, slice): @@ -589,7 +603,7 @@ class _AssociationList(_AssociationCollection): for member in self.col: yield self._get(member) - raise StopIteration + return def append(self, value): item = self._create(value) @@ -893,7 +907,7 @@ class _AssociationSet(_AssociationCollection): """ for member in self.col: yield self._get(member) - raise StopIteration + return def add(self, value): if value not in self: diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/automap.py b/lib/python3.5/site-packages/sqlalchemy/ext/automap.py index 121285a..023d11c 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/automap.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/automap.py @@ -1,5 +1,5 @@ # ext/automap.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -11,12 +11,6 @@ schema, typically though not necessarily one which is reflected. .. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`. -.. note:: - - The :mod:`sqlalchemy.ext.automap` extension should be considered - **experimental** as of 0.9.1. Featureset and API stability is - not guaranteed at this time. - It is hoped that the :class:`.AutomapBase` system provides a quick and modernized solution to the problem that the very famous `SQLSoup `_ @@ -67,7 +61,7 @@ asking it to reflect the schema and produce mappings:: Above, calling :meth:`.AutomapBase.prepare` while passing along the :paramref:`.AutomapBase.prepare.reflect` parameter indicates that the :meth:`.MetaData.reflect` method will be called on this declarative base -classes' :class:`.MetaData` collection; then, each viable +classes' :class:`.MetaData` collection; then, each **viable** :class:`.Table` within the :class:`.MetaData` will get a new mapped class generated automatically. The :class:`.ForeignKeyConstraint` objects which link the various tables together will be used to produce new, bidirectional @@ -76,6 +70,12 @@ follow along a default naming scheme that we can customize. At this point, our basic mapping consisting of related ``User`` and ``Address`` classes is ready to use in the traditional way. +.. note:: By **viable**, we mean that for a table to be mapped, it must + specify a primary key. Additionally, if the table is detected as being + a pure association table between two other tables, it will not be directly + mapped and will instead be configured as a many-to-many table between + the mappings for the two referring tables. + Generating Mappings from an Existing MetaData ============================================= @@ -111,8 +111,8 @@ explicit table declaration:: User, Address, Order = Base.classes.user, Base.classes.address,\ Base.classes.user_order -Specifying Classes Explcitly -============================ +Specifying Classes Explicitly +============================= The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined explicitly, in a way similar to that of the :class:`.DeferredReflection` class. @@ -188,7 +188,7 @@ scheme for class names and a "pluralizer" for collection names using the "'words_and_underscores' -> 'WordsAndUnderscores'" return str(tablename[0].upper() + \\ - re.sub(r'_(\w)', lambda m: m.group(1).upper(), tablename[1:])) + re.sub(r'_([a-z])', lambda m: m.group(1).upper(), tablename[1:])) _pluralizer = inflect.engine() def pluralize_collection(base, local_cls, referred_cls, constraint): @@ -196,10 +196,9 @@ scheme for class names and a "pluralizer" for collection names using the "'SomeTerm' -> 'some_terms'" referred_name = referred_cls.__name__ - uncamelized = referred_name[0].lower() + \\ - re.sub(r'\W', - lambda m: "_%s" % m.group(0).lower(), - referred_name[1:]) + uncamelized = re.sub(r'[A-Z]', + lambda m: "_%s" % m.group(0).lower(), + referred_name)[1:] pluralized = _pluralizer.plural(uncamelized) return pluralized @@ -243,7 +242,26 @@ follows: one-to-many backref will be created on the referred class referring to this class. -4. The names of the relationships are determined using the +4. If any of the columns that are part of the :class:`.ForeignKeyConstraint` + are not nullable (e.g. ``nullable=False``), a + :paramref:`~.relationship.cascade` keyword argument + of ``all, delete-orphan`` will be added to the keyword arguments to + be passed to the relationship or backref. If the + :class:`.ForeignKeyConstraint` reports that + :paramref:`.ForeignKeyConstraint.ondelete` + is set to ``CASCADE`` for a not null or ``SET NULL`` for a nullable + set of columns, the option :paramref:`~.relationship.passive_deletes` + flag is set to ``True`` in the set of relationship keyword arguments. + Note that not all backends support reflection of ON DELETE. + + .. versionadded:: 1.0.0 - automap will detect non-nullable foreign key + constraints when producing a one-to-many relationship and establish + a default cascade of ``all, delete-orphan`` if so; additionally, + if the constraint specifies :paramref:`.ForeignKeyConstraint.ondelete` + of ``CASCADE`` for non-nullable or ``SET NULL`` for nullable columns, + the ``passive_deletes=True`` option is also added. + +5. The names of the relationships are determined using the :paramref:`.AutomapBase.prepare.name_for_scalar_relationship` and :paramref:`.AutomapBase.prepare.name_for_collection_relationship` callable functions. It is important to note that the default relationship @@ -252,18 +270,18 @@ follows: alternate class naming scheme, that's the name from which the relationship name will be derived. -5. The classes are inspected for an existing mapped property matching these +6. The classes are inspected for an existing mapped property matching these names. If one is detected on one side, but none on the other side, :class:`.AutomapBase` attempts to create a relationship on the missing side, then uses the :paramref:`.relationship.back_populates` parameter in order to point the new relationship to the other side. -6. In the usual case where no relationship is on either side, +7. In the usual case where no relationship is on either side, :meth:`.AutomapBase.prepare` produces a :func:`.relationship` on the "many-to-one" side and matches it to the other using the :paramref:`.relationship.backref` parameter. -7. Production of the :func:`.relationship` and optionally the :func:`.backref` +8. Production of the :func:`.relationship` and optionally the :func:`.backref` is handed off to the :paramref:`.AutomapBase.prepare.generate_relationship` function, which can be supplied by the end-user in order to augment the arguments passed to :func:`.relationship` or :func:`.backref` or to @@ -606,7 +624,7 @@ def generate_relationship( :param base: the :class:`.AutomapBase` class doing the prepare. :param direction: indicate the "direction" of the relationship; this will - be one of :data:`.ONETOMANY`, :data:`.MANYTOONE`, :data:`.MANYTOONE`. + be one of :data:`.ONETOMANY`, :data:`.MANYTOONE`, :data:`.MANYTOMANY`. :param return_fn: the function that is used by default to create the relationship. This will be either :func:`.relationship` or @@ -877,6 +895,19 @@ def _relationships_for_fks(automap_base, map_config, table_to_map_config, constraint ) + o2m_kws = {} + nullable = False not in set([fk.parent.nullable for fk in fks]) + if not nullable: + o2m_kws['cascade'] = "all, delete-orphan" + + if constraint.ondelete and \ + constraint.ondelete.lower() == "cascade": + o2m_kws['passive_deletes'] = True + else: + if constraint.ondelete and \ + constraint.ondelete.lower() == "set null": + o2m_kws['passive_deletes'] = True + create_backref = backref_name not in referred_cfg.properties if relationship_name not in map_config.properties: @@ -885,7 +916,8 @@ def _relationships_for_fks(automap_base, map_config, table_to_map_config, automap_base, interfaces.ONETOMANY, backref, backref_name, referred_cls, local_cls, - collection_class=collection_class) + collection_class=collection_class, + **o2m_kws) else: backref_obj = None rel = generate_relationship(automap_base, @@ -916,7 +948,8 @@ def _relationships_for_fks(automap_base, map_config, table_to_map_config, fk.parent for fk in constraint.elements], back_populates=relationship_name, - collection_class=collection_class) + collection_class=collection_class, + **o2m_kws) if rel is not None: referred_cfg.properties[backref_name] = rel map_config.properties[ diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/baked.py b/lib/python3.5/site-packages/sqlalchemy/ext/baked.py new file mode 100644 index 0000000..2504be9 --- /dev/null +++ b/lib/python3.5/site-packages/sqlalchemy/ext/baked.py @@ -0,0 +1,523 @@ +# sqlalchemy/ext/baked.py +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php +"""Baked query extension. + +Provides a creational pattern for the :class:`.query.Query` object which +allows the fully constructed object, Core select statement, and string +compiled result to be fully cached. + + +""" + +from ..orm.query import Query +from ..orm import strategies, attributes, properties, \ + strategy_options, util as orm_util, interfaces +from .. import log as sqla_log +from ..sql import util as sql_util +from ..orm import exc as orm_exc +from .. import exc as sa_exc +from .. import util + +import copy +import logging + +log = logging.getLogger(__name__) + + +class BakedQuery(object): + """A builder object for :class:`.query.Query` objects.""" + + __slots__ = 'steps', '_bakery', '_cache_key', '_spoiled' + + def __init__(self, bakery, initial_fn, args=()): + self._cache_key = () + self._update_cache_key(initial_fn, args) + self.steps = [initial_fn] + self._spoiled = False + self._bakery = bakery + + @classmethod + def bakery(cls, size=200): + """Construct a new bakery.""" + + _bakery = util.LRUCache(size) + + def call(initial_fn, *args): + return cls(_bakery, initial_fn, args) + + return call + + def _clone(self): + b1 = BakedQuery.__new__(BakedQuery) + b1._cache_key = self._cache_key + b1.steps = list(self.steps) + b1._bakery = self._bakery + b1._spoiled = self._spoiled + return b1 + + def _update_cache_key(self, fn, args=()): + self._cache_key += (fn.__code__,) + args + + def __iadd__(self, other): + if isinstance(other, tuple): + self.add_criteria(*other) + else: + self.add_criteria(other) + return self + + def __add__(self, other): + if isinstance(other, tuple): + return self.with_criteria(*other) + else: + return self.with_criteria(other) + + def add_criteria(self, fn, *args): + """Add a criteria function to this :class:`.BakedQuery`. + + This is equivalent to using the ``+=`` operator to + modify a :class:`.BakedQuery` in-place. + + """ + self._update_cache_key(fn, args) + self.steps.append(fn) + return self + + def with_criteria(self, fn, *args): + """Add a criteria function to a :class:`.BakedQuery` cloned from this one. + + This is equivalent to using the ``+`` operator to + produce a new :class:`.BakedQuery` with modifications. + + """ + return self._clone().add_criteria(fn, *args) + + def for_session(self, session): + """Return a :class:`.Result` object for this :class:`.BakedQuery`. + + This is equivalent to calling the :class:`.BakedQuery` as a + Python callable, e.g. ``result = my_baked_query(session)``. + + """ + return Result(self, session) + + def __call__(self, session): + return self.for_session(session) + + def spoil(self, full=False): + """Cancel any query caching that will occur on this BakedQuery object. + + The BakedQuery can continue to be used normally, however additional + creational functions will not be cached; they will be called + on every invocation. + + This is to support the case where a particular step in constructing + a baked query disqualifies the query from being cacheable, such + as a variant that relies upon some uncacheable value. + + :param full: if False, only functions added to this + :class:`.BakedQuery` object subsequent to the spoil step will be + non-cached; the state of the :class:`.BakedQuery` up until + this point will be pulled from the cache. If True, then the + entire :class:`.Query` object is built from scratch each + time, with all creational functions being called on each + invocation. + + """ + if not full: + _spoil_point = self._clone() + _spoil_point._cache_key += ('_query_only', ) + self.steps = [_spoil_point._retrieve_baked_query] + self._spoiled = True + return self + + def _retrieve_baked_query(self, session): + query = self._bakery.get(self._cache_key, None) + if query is None: + query = self._as_query(session) + self._bakery[self._cache_key] = query.with_session(None) + return query.with_session(session) + + def _bake(self, session): + query = self._as_query(session) + + context = query._compile_context() + self._bake_subquery_loaders(session, context) + context.session = None + context.query = query = context.query.with_session(None) + query._execution_options = query._execution_options.union( + {"compiled_cache": self._bakery} + ) + # we'll be holding onto the query for some of its state, + # so delete some compilation-use-only attributes that can take up + # space + for attr in ( + '_correlate', '_from_obj', '_mapper_adapter_map', + '_joinpath', '_joinpoint'): + query.__dict__.pop(attr, None) + self._bakery[self._cache_key] = context + return context + + def _as_query(self, session): + query = self.steps[0](session) + + for step in self.steps[1:]: + query = step(query) + return query + + def _bake_subquery_loaders(self, session, context): + """convert subquery eager loaders in the cache into baked queries. + + For subquery eager loading to work, all we need here is that the + Query point to the correct session when it is run. However, since + we are "baking" anyway, we may as well also turn the query into + a "baked" query so that we save on performance too. + + """ + context.attributes['baked_queries'] = baked_queries = [] + for k, v in list(context.attributes.items()): + if isinstance(v, Query): + if 'subquery' in k: + bk = BakedQuery(self._bakery, lambda *args: v) + bk._cache_key = self._cache_key + k + bk._bake(session) + baked_queries.append((k, bk._cache_key, v)) + del context.attributes[k] + + def _unbake_subquery_loaders(self, session, context, params): + """Retrieve subquery eager loaders stored by _bake_subquery_loaders + and turn them back into Result objects that will iterate just + like a Query object. + + """ + for k, cache_key, query in context.attributes["baked_queries"]: + bk = BakedQuery(self._bakery, lambda sess: query.with_session(sess)) + bk._cache_key = cache_key + context.attributes[k] = bk.for_session(session).params(**params) + + +class Result(object): + """Invokes a :class:`.BakedQuery` against a :class:`.Session`. + + The :class:`.Result` object is where the actual :class:`.query.Query` + object gets created, or retrieved from the cache, + against a target :class:`.Session`, and is then invoked for results. + + """ + __slots__ = 'bq', 'session', '_params' + + def __init__(self, bq, session): + self.bq = bq + self.session = session + self._params = {} + + def params(self, *args, **kw): + """Specify parameters to be replaced into the string SQL statement.""" + + if len(args) == 1: + kw.update(args[0]) + elif len(args) > 0: + raise sa_exc.ArgumentError( + "params() takes zero or one positional argument, " + "which is a dictionary.") + self._params.update(kw) + return self + + def _as_query(self): + return self.bq._as_query(self.session).params(self._params) + + def __str__(self): + return str(self._as_query()) + + def __iter__(self): + bq = self.bq + if bq._spoiled: + return iter(self._as_query()) + + baked_context = bq._bakery.get(bq._cache_key, None) + if baked_context is None: + baked_context = bq._bake(self.session) + + context = copy.copy(baked_context) + context.session = self.session + context.attributes = context.attributes.copy() + + bq._unbake_subquery_loaders(self.session, context, self._params) + + context.statement.use_labels = True + if context.autoflush and not context.populate_existing: + self.session._autoflush() + return context.query.params(self._params).\ + with_session(self.session)._execute_and_instances(context) + + def first(self): + """Return the first row. + + Equivalent to :meth:`.Query.first`. + + """ + bq = self.bq.with_criteria(lambda q: q.slice(0, 1)) + ret = list(bq.for_session(self.session).params(self._params)) + if len(ret) > 0: + return ret[0] + else: + return None + + def one(self): + """Return exactly one result or raise an exception. + + Equivalent to :meth:`.Query.one`. + + """ + ret = list(self) + + l = len(ret) + if l == 1: + return ret[0] + elif l == 0: + raise orm_exc.NoResultFound("No row was found for one()") + else: + raise orm_exc.MultipleResultsFound( + "Multiple rows were found for one()") + + def one_or_none(self): + """Return one or zero results, or raise an exception for multiple + rows. + + Equivalent to :meth:`.Query.one_or_none`. + + .. versionadded:: 1.0.9 + + """ + ret = list(self) + + l = len(ret) + if l == 1: + return ret[0] + elif l == 0: + return None + else: + raise orm_exc.MultipleResultsFound( + "Multiple rows were found for one_or_none()") + + def all(self): + """Return all rows. + + Equivalent to :meth:`.Query.all`. + + """ + return list(self) + + def get(self, ident): + """Retrieve an object based on identity. + + Equivalent to :meth:`.Query.get`. + + """ + + query = self.bq.steps[0](self.session) + return query._get_impl(ident, self._load_on_ident) + + def _load_on_ident(self, query, key): + """Load the given identity key from the database.""" + + ident = key[1] + + mapper = query._mapper_zero() + + _get_clause, _get_params = mapper._get_clause + + def setup(query): + _lcl_get_clause = _get_clause + q = query._clone() + q._get_condition() + q._order_by = None + + # None present in ident - turn those comparisons + # into "IS NULL" + if None in ident: + nones = set([ + _get_params[col].key for col, value in + zip(mapper.primary_key, ident) if value is None + ]) + _lcl_get_clause = sql_util.adapt_criterion_to_null( + _lcl_get_clause, nones) + + _lcl_get_clause = q._adapt_clause(_lcl_get_clause, True, False) + q._criterion = _lcl_get_clause + return q + + # cache the query against a key that includes + # which positions in the primary key are NULL + # (remember, we can map to an OUTER JOIN) + bq = self.bq + + # add the clause we got from mapper._get_clause to the cache + # key so that if a race causes multiple calls to _get_clause, + # we've cached on ours + bq = bq._clone() + bq._cache_key += (_get_clause, ) + + bq = bq.with_criteria(setup, tuple(elem is None for elem in ident)) + + params = dict([ + (_get_params[primary_key].key, id_val) + for id_val, primary_key in zip(ident, mapper.primary_key) + ]) + + result = list(bq.for_session(self.session).params(**params)) + l = len(result) + if l > 1: + raise orm_exc.MultipleResultsFound() + elif l: + return result[0] + else: + return None + + +def bake_lazy_loaders(): + """Enable the use of baked queries for all lazyloaders systemwide. + + This operation should be safe for all lazy loaders, and will reduce + Python overhead for these operations. + + """ + BakedLazyLoader._strategy_keys[:] = [] + + properties.RelationshipProperty.strategy_for( + lazy="select")(BakedLazyLoader) + properties.RelationshipProperty.strategy_for( + lazy=True)(BakedLazyLoader) + properties.RelationshipProperty.strategy_for( + lazy="baked_select")(BakedLazyLoader) + + strategies.LazyLoader._strategy_keys[:] = BakedLazyLoader._strategy_keys[:] + + +def unbake_lazy_loaders(): + """Disable the use of baked queries for all lazyloaders systemwide. + + This operation reverts the changes produced by :func:`.bake_lazy_loaders`. + + """ + strategies.LazyLoader._strategy_keys[:] = [] + BakedLazyLoader._strategy_keys[:] = [] + + properties.RelationshipProperty.strategy_for( + lazy="select")(strategies.LazyLoader) + properties.RelationshipProperty.strategy_for( + lazy=True)(strategies.LazyLoader) + properties.RelationshipProperty.strategy_for( + lazy="baked_select")(BakedLazyLoader) + assert strategies.LazyLoader._strategy_keys + + +@sqla_log.class_logger +@properties.RelationshipProperty.strategy_for(lazy="baked_select") +class BakedLazyLoader(strategies.LazyLoader): + + def _emit_lazyload(self, session, state, ident_key, passive): + q = BakedQuery( + self.mapper._compiled_cache, + lambda session: session.query(self.mapper)) + q.add_criteria( + lambda q: q._adapt_all_clauses()._with_invoke_all_eagers(False), + self.parent_property) + + if not self.parent_property.bake_queries: + q.spoil(full=True) + + if self.parent_property.secondary is not None: + q.add_criteria( + lambda q: + q.select_from(self.mapper, self.parent_property.secondary)) + + pending = not state.key + + # don't autoflush on pending + if pending or passive & attributes.NO_AUTOFLUSH: + q.add_criteria(lambda q: q.autoflush(False)) + + if state.load_path: + q.spoil() + q.add_criteria( + lambda q: + q._with_current_path(state.load_path[self.parent_property])) + + if state.load_options: + q.spoil() + q.add_criteria( + lambda q: q._conditional_options(*state.load_options)) + + if self.use_get: + return q(session)._load_on_ident( + session.query(self.mapper), ident_key) + + if self.parent_property.order_by: + q.add_criteria( + lambda q: + q.order_by(*util.to_list(self.parent_property.order_by))) + + for rev in self.parent_property._reverse_property: + # reverse props that are MANYTOONE are loading *this* + # object from get(), so don't need to eager out to those. + if rev.direction is interfaces.MANYTOONE and \ + rev._use_get and \ + not isinstance(rev.strategy, strategies.LazyLoader): + q.add_criteria( + lambda q: + q.options( + strategy_options.Load( + rev.parent).baked_lazyload(rev.key))) + + lazy_clause, params = self._generate_lazy_clause(state, passive) + + if pending: + if orm_util._none_set.intersection(params.values()): + return None + + q.add_criteria(lambda q: q.filter(lazy_clause)) + result = q(session).params(**params).all() + if self.uselist: + return result + else: + l = len(result) + if l: + if l > 1: + util.warn( + "Multiple rows returned with " + "uselist=False for lazily-loaded attribute '%s' " + % self.parent_property) + + return result[0] + else: + return None + + +@strategy_options.loader_option() +def baked_lazyload(loadopt, attr): + """Indicate that the given attribute should be loaded using "lazy" + loading with a "baked" query used in the load. + + """ + return loadopt.set_relationship_strategy(attr, {"lazy": "baked_select"}) + + +@baked_lazyload._add_unbound_fn +def baked_lazyload(*keys): + return strategy_options._UnboundLoad._from_keys( + strategy_options._UnboundLoad.baked_lazyload, keys, False, {}) + + +@baked_lazyload._add_unbound_all_fn +def baked_lazyload_all(*keys): + return strategy_options._UnboundLoad._from_keys( + strategy_options._UnboundLoad.baked_lazyload, keys, True, {}) + +baked_lazyload = baked_lazyload._unbound_fn +baked_lazyload_all = baked_lazyload_all._unbound_all_fn + +bakery = BakedQuery.bakery diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/compiler.py b/lib/python3.5/site-packages/sqlalchemy/ext/compiler.py index 8d169aa..86156be 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/compiler.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/compiler.py @@ -1,5 +1,5 @@ # ext/compiler.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -121,9 +121,19 @@ below where we generate a CHECK constraint that embeds a SQL expression:: def compile_my_constraint(constraint, ddlcompiler, **kw): return "CONSTRAINT %s CHECK (%s)" % ( constraint.name, - ddlcompiler.sql_compiler.process(constraint.expression) + ddlcompiler.sql_compiler.process( + constraint.expression, literal_binds=True) ) +Above, we add an additional flag to the process step as called by +:meth:`.SQLCompiler.process`, which is the ``literal_binds`` flag. This +indicates that any SQL expression which refers to a :class:`.BindParameter` +object or other "literal" object such as those which refer to strings or +integers should be rendered **in-place**, rather than being referred to as +a bound parameter; when emitting DDL, bound parameters are typically not +supported. + + .. _enabling_compiled_autocommit: Enabling Autocommit on a Construct diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/declarative/__init__.py b/lib/python3.5/site-packages/sqlalchemy/ext/declarative/__init__.py index 3cbc85c..f96a402 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/declarative/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/declarative/__init__.py @@ -1,1310 +1,10 @@ # ext/declarative/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -""" -Synopsis -======== - -SQLAlchemy object-relational configuration involves the -combination of :class:`.Table`, :func:`.mapper`, and class -objects to define a mapped class. -:mod:`~sqlalchemy.ext.declarative` allows all three to be -expressed at once within the class declaration. As much as -possible, regular SQLAlchemy schema and ORM constructs are -used directly, so that configuration between "classical" ORM -usage and declarative remain highly similar. - -As a simple example:: - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - - class SomeClass(Base): - __tablename__ = 'some_table' - id = Column(Integer, primary_key=True) - name = Column(String(50)) - -Above, the :func:`declarative_base` callable returns a new base class from -which all mapped classes should inherit. When the class definition is -completed, a new :class:`.Table` and :func:`.mapper` will have been generated. - -The resulting table and mapper are accessible via -``__table__`` and ``__mapper__`` attributes on the -``SomeClass`` class:: - - # access the mapped Table - SomeClass.__table__ - - # access the Mapper - SomeClass.__mapper__ - -Defining Attributes -=================== - -In the previous example, the :class:`.Column` objects are -automatically named with the name of the attribute to which they are -assigned. - -To name columns explicitly with a name distinct from their mapped attribute, -just give the column a name. Below, column "some_table_id" is mapped to the -"id" attribute of `SomeClass`, but in SQL will be represented as -"some_table_id":: - - class SomeClass(Base): - __tablename__ = 'some_table' - id = Column("some_table_id", Integer, primary_key=True) - -Attributes may be added to the class after its construction, and they will be -added to the underlying :class:`.Table` and -:func:`.mapper` definitions as appropriate:: - - SomeClass.data = Column('data', Unicode) - SomeClass.related = relationship(RelatedInfo) - -Classes which are constructed using declarative can interact freely -with classes that are mapped explicitly with :func:`.mapper`. - -It is recommended, though not required, that all tables -share the same underlying :class:`~sqlalchemy.schema.MetaData` object, -so that string-configured :class:`~sqlalchemy.schema.ForeignKey` -references can be resolved without issue. - -Accessing the MetaData -======================= - -The :func:`declarative_base` base class contains a -:class:`.MetaData` object where newly defined -:class:`.Table` objects are collected. This object is -intended to be accessed directly for -:class:`.MetaData`-specific operations. Such as, to issue -CREATE statements for all tables:: - - engine = create_engine('sqlite://') - Base.metadata.create_all(engine) - -:func:`declarative_base` can also receive a pre-existing -:class:`.MetaData` object, which allows a -declarative setup to be associated with an already -existing traditional collection of :class:`~sqlalchemy.schema.Table` -objects:: - - mymetadata = MetaData() - Base = declarative_base(metadata=mymetadata) - - -.. _declarative_configuring_relationships: - -Configuring Relationships -========================= - -Relationships to other classes are done in the usual way, with the added -feature that the class specified to :func:`~sqlalchemy.orm.relationship` -may be a string name. The "class registry" associated with ``Base`` -is used at mapper compilation time to resolve the name into the actual -class object, which is expected to have been defined once the mapper -configuration is used:: - - class User(Base): - __tablename__ = 'users' - - id = Column(Integer, primary_key=True) - name = Column(String(50)) - addresses = relationship("Address", backref="user") - - class Address(Base): - __tablename__ = 'addresses' - - id = Column(Integer, primary_key=True) - email = Column(String(50)) - user_id = Column(Integer, ForeignKey('users.id')) - -Column constructs, since they are just that, are immediately usable, -as below where we define a primary join condition on the ``Address`` -class using them:: - - class Address(Base): - __tablename__ = 'addresses' - - id = Column(Integer, primary_key=True) - email = Column(String(50)) - user_id = Column(Integer, ForeignKey('users.id')) - user = relationship(User, primaryjoin=user_id == User.id) - -In addition to the main argument for :func:`~sqlalchemy.orm.relationship`, -other arguments which depend upon the columns present on an as-yet -undefined class may also be specified as strings. These strings are -evaluated as Python expressions. The full namespace available within -this evaluation includes all classes mapped for this declarative base, -as well as the contents of the ``sqlalchemy`` package, including -expression functions like :func:`~sqlalchemy.sql.expression.desc` and -:attr:`~sqlalchemy.sql.expression.func`:: - - class User(Base): - # .... - addresses = relationship("Address", - order_by="desc(Address.email)", - primaryjoin="Address.user_id==User.id") - -For the case where more than one module contains a class of the same name, -string class names can also be specified as module-qualified paths -within any of these string expressions:: - - class User(Base): - # .... - addresses = relationship("myapp.model.address.Address", - order_by="desc(myapp.model.address.Address.email)", - primaryjoin="myapp.model.address.Address.user_id==" - "myapp.model.user.User.id") - -The qualified path can be any partial path that removes ambiguity between -the names. For example, to disambiguate between -``myapp.model.address.Address`` and ``myapp.model.lookup.Address``, -we can specify ``address.Address`` or ``lookup.Address``:: - - class User(Base): - # .... - addresses = relationship("address.Address", - order_by="desc(address.Address.email)", - primaryjoin="address.Address.user_id==" - "User.id") - -.. versionadded:: 0.8 - module-qualified paths can be used when specifying string arguments - with Declarative, in order to specify specific modules. - -Two alternatives also exist to using string-based attributes. A lambda -can also be used, which will be evaluated after all mappers have been -configured:: - - class User(Base): - # ... - addresses = relationship(lambda: Address, - order_by=lambda: desc(Address.email), - primaryjoin=lambda: Address.user_id==User.id) - -Or, the relationship can be added to the class explicitly after the classes -are available:: - - User.addresses = relationship(Address, - primaryjoin=Address.user_id==User.id) - - - -.. _declarative_many_to_many: - -Configuring Many-to-Many Relationships -====================================== - -Many-to-many relationships are also declared in the same way -with declarative as with traditional mappings. The -``secondary`` argument to -:func:`.relationship` is as usual passed a -:class:`.Table` object, which is typically declared in the -traditional way. The :class:`.Table` usually shares -the :class:`.MetaData` object used by the declarative base:: - - keywords = Table( - 'keywords', Base.metadata, - Column('author_id', Integer, ForeignKey('authors.id')), - Column('keyword_id', Integer, ForeignKey('keywords.id')) - ) - - class Author(Base): - __tablename__ = 'authors' - id = Column(Integer, primary_key=True) - keywords = relationship("Keyword", secondary=keywords) - -Like other :func:`~sqlalchemy.orm.relationship` arguments, a string is accepted -as well, passing the string name of the table as defined in the -``Base.metadata.tables`` collection:: - - class Author(Base): - __tablename__ = 'authors' - id = Column(Integer, primary_key=True) - keywords = relationship("Keyword", secondary="keywords") - -As with traditional mapping, its generally not a good idea to use -a :class:`.Table` as the "secondary" argument which is also mapped to -a class, unless the :func:`.relationship` is declared with ``viewonly=True``. -Otherwise, the unit-of-work system may attempt duplicate INSERT and -DELETE statements against the underlying table. - -.. _declarative_sql_expressions: - -Defining SQL Expressions -======================== - -See :ref:`mapper_sql_expressions` for examples on declaratively -mapping attributes to SQL expressions. - -.. _declarative_table_args: - -Table Configuration -=================== - -Table arguments other than the name, metadata, and mapped Column -arguments are specified using the ``__table_args__`` class attribute. -This attribute accommodates both positional as well as keyword -arguments that are normally sent to the -:class:`~sqlalchemy.schema.Table` constructor. -The attribute can be specified in one of two forms. One is as a -dictionary:: - - class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = {'mysql_engine':'InnoDB'} - -The other, a tuple, where each argument is positional -(usually constraints):: - - class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = ( - ForeignKeyConstraint(['id'], ['remote_table.id']), - UniqueConstraint('foo'), - ) - -Keyword arguments can be specified with the above form by -specifying the last argument as a dictionary:: - - class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = ( - ForeignKeyConstraint(['id'], ['remote_table.id']), - UniqueConstraint('foo'), - {'autoload':True} - ) - -Using a Hybrid Approach with __table__ -======================================= - -As an alternative to ``__tablename__``, a direct -:class:`~sqlalchemy.schema.Table` construct may be used. The -:class:`~sqlalchemy.schema.Column` objects, which in this case require -their names, will be added to the mapping just like a regular mapping -to a table:: - - class MyClass(Base): - __table__ = Table('my_table', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) - ) - -``__table__`` provides a more focused point of control for establishing -table metadata, while still getting most of the benefits of using declarative. -An application that uses reflection might want to load table metadata elsewhere -and pass it to declarative classes:: - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - Base.metadata.reflect(some_engine) - - class User(Base): - __table__ = metadata.tables['user'] - - class Address(Base): - __table__ = metadata.tables['address'] - -Some configuration schemes may find it more appropriate to use ``__table__``, -such as those which already take advantage of the data-driven nature of -:class:`.Table` to customize and/or automate schema definition. - -Note that when the ``__table__`` approach is used, the object is immediately -usable as a plain :class:`.Table` within the class declaration body itself, -as a Python class is only another syntactical block. Below this is illustrated -by using the ``id`` column in the ``primaryjoin`` condition of a -:func:`.relationship`:: - - class MyClass(Base): - __table__ = Table('my_table', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) - ) - - widgets = relationship(Widget, - primaryjoin=Widget.myclass_id==__table__.c.id) - -Similarly, mapped attributes which refer to ``__table__`` can be placed inline, -as below where we assign the ``name`` column to the attribute ``_name``, -generating a synonym for ``name``:: - - from sqlalchemy.ext.declarative import synonym_for - - class MyClass(Base): - __table__ = Table('my_table', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) - ) - - _name = __table__.c.name - - @synonym_for("_name") - def name(self): - return "Name: %s" % _name - -Using Reflection with Declarative -================================= - -It's easy to set up a :class:`.Table` that uses ``autoload=True`` -in conjunction with a mapped class:: - - class MyClass(Base): - __table__ = Table('mytable', Base.metadata, - autoload=True, autoload_with=some_engine) - -However, one improvement that can be made here is to not -require the :class:`.Engine` to be available when classes are -being first declared. To achieve this, use the -:class:`.DeferredReflection` mixin, which sets up mappings -only after a special ``prepare(engine)`` step is called:: - - from sqlalchemy.ext.declarative import declarative_base, DeferredReflection - - Base = declarative_base(cls=DeferredReflection) - - class Foo(Base): - __tablename__ = 'foo' - bars = relationship("Bar") - - class Bar(Base): - __tablename__ = 'bar' - - # illustrate overriding of "bar.foo_id" to have - # a foreign key constraint otherwise not - # reflected, such as when using MySQL - foo_id = Column(Integer, ForeignKey('foo.id')) - - Base.prepare(e) - -.. versionadded:: 0.8 - Added :class:`.DeferredReflection`. - -Mapper Configuration -==================== - -Declarative makes use of the :func:`~.orm.mapper` function internally -when it creates the mapping to the declared table. The options -for :func:`~.orm.mapper` are passed directly through via the -``__mapper_args__`` class attribute. As always, arguments which reference -locally mapped columns can reference them directly from within the -class declaration:: - - from datetime import datetime - - class Widget(Base): - __tablename__ = 'widgets' - - id = Column(Integer, primary_key=True) - timestamp = Column(DateTime, nullable=False) - - __mapper_args__ = { - 'version_id_col': timestamp, - 'version_id_generator': lambda v:datetime.now() - } - -.. _declarative_inheritance: - -Inheritance Configuration -========================= - -Declarative supports all three forms of inheritance as intuitively -as possible. The ``inherits`` mapper keyword argument is not needed -as declarative will determine this from the class itself. The various -"polymorphic" keyword arguments are specified using ``__mapper_args__``. - -Joined Table Inheritance -~~~~~~~~~~~~~~~~~~~~~~~~ - -Joined table inheritance is defined as a subclass that defines its own -table:: - - class Person(Base): - __tablename__ = 'people' - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - __tablename__ = 'engineers' - __mapper_args__ = {'polymorphic_identity': 'engineer'} - id = Column(Integer, ForeignKey('people.id'), primary_key=True) - primary_language = Column(String(50)) - -Note that above, the ``Engineer.id`` attribute, since it shares the -same attribute name as the ``Person.id`` attribute, will in fact -represent the ``people.id`` and ``engineers.id`` columns together, -with the "Engineer.id" column taking precedence if queried directly. -To provide the ``Engineer`` class with an attribute that represents -only the ``engineers.id`` column, give it a different attribute name:: - - class Engineer(Person): - __tablename__ = 'engineers' - __mapper_args__ = {'polymorphic_identity': 'engineer'} - engineer_id = Column('id', Integer, ForeignKey('people.id'), - primary_key=True) - primary_language = Column(String(50)) - - -.. versionchanged:: 0.7 joined table inheritance favors the subclass - column over that of the superclass, such as querying above - for ``Engineer.id``. Prior to 0.7 this was the reverse. - -.. _declarative_single_table: - -Single Table Inheritance -~~~~~~~~~~~~~~~~~~~~~~~~ - -Single table inheritance is defined as a subclass that does not have -its own table; you just leave out the ``__table__`` and ``__tablename__`` -attributes:: - - class Person(Base): - __tablename__ = 'people' - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - __mapper_args__ = {'polymorphic_identity': 'engineer'} - primary_language = Column(String(50)) - -When the above mappers are configured, the ``Person`` class is mapped -to the ``people`` table *before* the ``primary_language`` column is -defined, and this column will not be included in its own mapping. -When ``Engineer`` then defines the ``primary_language`` column, the -column is added to the ``people`` table so that it is included in the -mapping for ``Engineer`` and is also part of the table's full set of -columns. Columns which are not mapped to ``Person`` are also excluded -from any other single or joined inheriting classes using the -``exclude_properties`` mapper argument. Below, ``Manager`` will have -all the attributes of ``Person`` and ``Manager`` but *not* the -``primary_language`` attribute of ``Engineer``:: - - class Manager(Person): - __mapper_args__ = {'polymorphic_identity': 'manager'} - golf_swing = Column(String(50)) - -The attribute exclusion logic is provided by the -``exclude_properties`` mapper argument, and declarative's default -behavior can be disabled by passing an explicit ``exclude_properties`` -collection (empty or otherwise) to the ``__mapper_args__``. - -Resolving Column Conflicts -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Note above that the ``primary_language`` and ``golf_swing`` columns -are "moved up" to be applied to ``Person.__table__``, as a result of their -declaration on a subclass that has no table of its own. A tricky case -comes up when two subclasses want to specify *the same* column, as below:: - - class Person(Base): - __tablename__ = 'people' - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - __mapper_args__ = {'polymorphic_identity': 'engineer'} - start_date = Column(DateTime) - - class Manager(Person): - __mapper_args__ = {'polymorphic_identity': 'manager'} - start_date = Column(DateTime) - -Above, the ``start_date`` column declared on both ``Engineer`` and ``Manager`` -will result in an error:: - - sqlalchemy.exc.ArgumentError: Column 'start_date' on class - conflicts with existing - column 'people.start_date' - -In a situation like this, Declarative can't be sure -of the intent, especially if the ``start_date`` columns had, for example, -different types. A situation like this can be resolved by using -:class:`.declared_attr` to define the :class:`.Column` conditionally, taking -care to return the **existing column** via the parent ``__table__`` if it -already exists:: - - from sqlalchemy.ext.declarative import declared_attr - - class Person(Base): - __tablename__ = 'people' - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - __mapper_args__ = {'polymorphic_identity': 'engineer'} - - @declared_attr - def start_date(cls): - "Start date column, if not present already." - return Person.__table__.c.get('start_date', Column(DateTime)) - - class Manager(Person): - __mapper_args__ = {'polymorphic_identity': 'manager'} - - @declared_attr - def start_date(cls): - "Start date column, if not present already." - return Person.__table__.c.get('start_date', Column(DateTime)) - -Above, when ``Manager`` is mapped, the ``start_date`` column is -already present on the ``Person`` class. Declarative lets us return -that :class:`.Column` as a result in this case, where it knows to skip -re-assigning the same column. If the mapping is mis-configured such -that the ``start_date`` column is accidentally re-assigned to a -different table (such as, if we changed ``Manager`` to be joined -inheritance without fixing ``start_date``), an error is raised which -indicates an existing :class:`.Column` is trying to be re-assigned to -a different owning :class:`.Table`. - -.. versionadded:: 0.8 :class:`.declared_attr` can be used on a non-mixin - class, and the returned :class:`.Column` or other mapped attribute - will be applied to the mapping as any other attribute. Previously, - the resulting attribute would be ignored, and also result in a warning - being emitted when a subclass was created. - -.. versionadded:: 0.8 :class:`.declared_attr`, when used either with a - mixin or non-mixin declarative class, can return an existing - :class:`.Column` already assigned to the parent :class:`.Table`, - to indicate that the re-assignment of the :class:`.Column` should be - skipped, however should still be mapped on the target class, - in order to resolve duplicate column conflicts. - -The same concept can be used with mixin classes (see -:ref:`declarative_mixins`):: - - class Person(Base): - __tablename__ = 'people' - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class HasStartDate(object): - @declared_attr - def start_date(cls): - return cls.__table__.c.get('start_date', Column(DateTime)) - - class Engineer(HasStartDate, Person): - __mapper_args__ = {'polymorphic_identity': 'engineer'} - - class Manager(HasStartDate, Person): - __mapper_args__ = {'polymorphic_identity': 'manager'} - -The above mixin checks the local ``__table__`` attribute for the column. -Because we're using single table inheritance, we're sure that in this case, -``cls.__table__`` refers to ``People.__table__``. If we were mixing joined- -and single-table inheritance, we might want our mixin to check more carefully -if ``cls.__table__`` is really the :class:`.Table` we're looking for. - -Concrete Table Inheritance -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Concrete is defined as a subclass which has its own table and sets the -``concrete`` keyword argument to ``True``:: - - class Person(Base): - __tablename__ = 'people' - id = Column(Integer, primary_key=True) - name = Column(String(50)) - - class Engineer(Person): - __tablename__ = 'engineers' - __mapper_args__ = {'concrete':True} - id = Column(Integer, primary_key=True) - primary_language = Column(String(50)) - name = Column(String(50)) - -Usage of an abstract base class is a little less straightforward as it -requires usage of :func:`~sqlalchemy.orm.util.polymorphic_union`, -which needs to be created with the :class:`.Table` objects -before the class is built:: - - engineers = Table('engineers', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('primary_language', String(50)) - ) - managers = Table('managers', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('golf_swing', String(50)) - ) - - punion = polymorphic_union({ - 'engineer':engineers, - 'manager':managers - }, 'type', 'punion') - - class Person(Base): - __table__ = punion - __mapper_args__ = {'polymorphic_on':punion.c.type} - - class Engineer(Person): - __table__ = engineers - __mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True} - - class Manager(Person): - __table__ = managers - __mapper_args__ = {'polymorphic_identity':'manager', 'concrete':True} - -.. _declarative_concrete_helpers: - -Using the Concrete Helpers -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Helper classes provides a simpler pattern for concrete inheritance. -With these objects, the ``__declare_first__`` helper is used to configure the -"polymorphic" loader for the mapper after all subclasses have been declared. - -.. versionadded:: 0.7.3 - -An abstract base can be declared using the -:class:`.AbstractConcreteBase` class:: - - from sqlalchemy.ext.declarative import AbstractConcreteBase - - class Employee(AbstractConcreteBase, Base): - pass - -To have a concrete ``employee`` table, use :class:`.ConcreteBase` instead:: - - from sqlalchemy.ext.declarative import ConcreteBase - - class Employee(ConcreteBase, Base): - __tablename__ = 'employee' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'employee', - 'concrete':True} - - -Either ``Employee`` base can be used in the normal fashion:: - - class Manager(Employee): - __tablename__ = 'manager' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - manager_data = Column(String(40)) - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'concrete':True} - - class Engineer(Employee): - __tablename__ = 'engineer' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - engineer_info = Column(String(40)) - __mapper_args__ = {'polymorphic_identity':'engineer', - 'concrete':True} - - -The :class:`.AbstractConcreteBase` class is itself mapped, and can be -used as a target of relationships:: - - class Company(Base): - __tablename__ = 'company' - - id = Column(Integer, primary_key=True) - employees = relationship("Employee", - primaryjoin="Company.id == Employee.company_id") - - -.. versionchanged:: 0.9.3 Support for use of :class:`.AbstractConcreteBase` - as the target of a :func:`.relationship` has been improved. - -It can also be queried directly:: - - for employee in session.query(Employee).filter(Employee.name == 'qbert'): - print(employee) - - -.. _declarative_mixins: - -Mixin and Custom Base Classes -============================== - -A common need when using :mod:`~sqlalchemy.ext.declarative` is to -share some functionality, such as a set of common columns, some common -table options, or other mapped properties, across many -classes. The standard Python idioms for this is to have the classes -inherit from a base which includes these common features. - -When using :mod:`~sqlalchemy.ext.declarative`, this idiom is allowed -via the usage of a custom declarative base class, as well as a "mixin" class -which is inherited from in addition to the primary base. Declarative -includes several helper features to make this work in terms of how -mappings are declared. An example of some commonly mixed-in -idioms is below:: - - from sqlalchemy.ext.declarative import declared_attr - - class MyMixin(object): - - @declared_attr - def __tablename__(cls): - return cls.__name__.lower() - - __table_args__ = {'mysql_engine': 'InnoDB'} - __mapper_args__= {'always_refresh': True} - - id = Column(Integer, primary_key=True) - - class MyModel(MyMixin, Base): - name = Column(String(1000)) - -Where above, the class ``MyModel`` will contain an "id" column -as the primary key, a ``__tablename__`` attribute that derives -from the name of the class itself, as well as ``__table_args__`` -and ``__mapper_args__`` defined by the ``MyMixin`` mixin class. - -There's no fixed convention over whether ``MyMixin`` precedes -``Base`` or not. Normal Python method resolution rules apply, and -the above example would work just as well with:: - - class MyModel(Base, MyMixin): - name = Column(String(1000)) - -This works because ``Base`` here doesn't define any of the -variables that ``MyMixin`` defines, i.e. ``__tablename__``, -``__table_args__``, ``id``, etc. If the ``Base`` did define -an attribute of the same name, the class placed first in the -inherits list would determine which attribute is used on the -newly defined class. - -Augmenting the Base -~~~~~~~~~~~~~~~~~~~ - -In addition to using a pure mixin, most of the techniques in this -section can also be applied to the base class itself, for patterns that -should apply to all classes derived from a particular base. This is achieved -using the ``cls`` argument of the :func:`.declarative_base` function:: - - from sqlalchemy.ext.declarative import declared_attr - - class Base(object): - @declared_attr - def __tablename__(cls): - return cls.__name__.lower() - - __table_args__ = {'mysql_engine': 'InnoDB'} - - id = Column(Integer, primary_key=True) - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base(cls=Base) - - class MyModel(Base): - name = Column(String(1000)) - -Where above, ``MyModel`` and all other classes that derive from ``Base`` will -have a table name derived from the class name, an ``id`` primary key column, -as well as the "InnoDB" engine for MySQL. - -Mixing in Columns -~~~~~~~~~~~~~~~~~ - -The most basic way to specify a column on a mixin is by simple -declaration:: - - class TimestampMixin(object): - created_at = Column(DateTime, default=func.now()) - - class MyModel(TimestampMixin, Base): - __tablename__ = 'test' - - id = Column(Integer, primary_key=True) - name = Column(String(1000)) - -Where above, all declarative classes that include ``TimestampMixin`` -will also have a column ``created_at`` that applies a timestamp to -all row insertions. - -Those familiar with the SQLAlchemy expression language know that -the object identity of clause elements defines their role in a schema. -Two ``Table`` objects ``a`` and ``b`` may both have a column called -``id``, but the way these are differentiated is that ``a.c.id`` -and ``b.c.id`` are two distinct Python objects, referencing their -parent tables ``a`` and ``b`` respectively. - -In the case of the mixin column, it seems that only one -:class:`.Column` object is explicitly created, yet the ultimate -``created_at`` column above must exist as a distinct Python object -for each separate destination class. To accomplish this, the declarative -extension creates a **copy** of each :class:`.Column` object encountered on -a class that is detected as a mixin. - -This copy mechanism is limited to simple columns that have no foreign -keys, as a :class:`.ForeignKey` itself contains references to columns -which can't be properly recreated at this level. For columns that -have foreign keys, as well as for the variety of mapper-level constructs -that require destination-explicit context, the -:class:`~.declared_attr` decorator is provided so that -patterns common to many classes can be defined as callables:: - - from sqlalchemy.ext.declarative import declared_attr - - class ReferenceAddressMixin(object): - @declared_attr - def address_id(cls): - return Column(Integer, ForeignKey('address.id')) - - class User(ReferenceAddressMixin, Base): - __tablename__ = 'user' - id = Column(Integer, primary_key=True) - -Where above, the ``address_id`` class-level callable is executed at the -point at which the ``User`` class is constructed, and the declarative -extension can use the resulting :class:`.Column` object as returned by -the method without the need to copy it. - -.. versionchanged:: > 0.6.5 - Rename 0.6.5 ``sqlalchemy.util.classproperty`` - into :class:`~.declared_attr`. - -Columns generated by :class:`~.declared_attr` can also be -referenced by ``__mapper_args__`` to a limited degree, currently -by ``polymorphic_on`` and ``version_id_col``, by specifying the -classdecorator itself into the dictionary - the declarative extension -will resolve them at class construction time:: - - class MyMixin: - @declared_attr - def type_(cls): - return Column(String(50)) - - __mapper_args__= {'polymorphic_on':type_} - - class MyModel(MyMixin, Base): - __tablename__='test' - id = Column(Integer, primary_key=True) - - - -Mixing in Relationships -~~~~~~~~~~~~~~~~~~~~~~~ - -Relationships created by :func:`~sqlalchemy.orm.relationship` are provided -with declarative mixin classes exclusively using the -:class:`.declared_attr` approach, eliminating any ambiguity -which could arise when copying a relationship and its possibly column-bound -contents. Below is an example which combines a foreign key column and a -relationship so that two classes ``Foo`` and ``Bar`` can both be configured to -reference a common target class via many-to-one:: - - class RefTargetMixin(object): - @declared_attr - def target_id(cls): - return Column('target_id', ForeignKey('target.id')) - - @declared_attr - def target(cls): - return relationship("Target") - - class Foo(RefTargetMixin, Base): - __tablename__ = 'foo' - id = Column(Integer, primary_key=True) - - class Bar(RefTargetMixin, Base): - __tablename__ = 'bar' - id = Column(Integer, primary_key=True) - - class Target(Base): - __tablename__ = 'target' - id = Column(Integer, primary_key=True) - -Using Advanced Relationship Arguments (e.g. ``primaryjoin``, etc.) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:func:`~sqlalchemy.orm.relationship` definitions which require explicit -primaryjoin, order_by etc. expressions should in all but the most -simplistic cases use **late bound** forms -for these arguments, meaning, using either the string form or a lambda. -The reason for this is that the related :class:`.Column` objects which are to -be configured using ``@declared_attr`` are not available to another -``@declared_attr`` attribute; while the methods will work and return new -:class:`.Column` objects, those are not the :class:`.Column` objects that -Declarative will be using as it calls the methods on its own, thus using -*different* :class:`.Column` objects. - -The canonical example is the primaryjoin condition that depends upon -another mixed-in column:: - - class RefTargetMixin(object): - @declared_attr - def target_id(cls): - return Column('target_id', ForeignKey('target.id')) - - @declared_attr - def target(cls): - return relationship(Target, - primaryjoin=Target.id==cls.target_id # this is *incorrect* - ) - -Mapping a class using the above mixin, we will get an error like:: - - sqlalchemy.exc.InvalidRequestError: this ForeignKey's parent column is not - yet associated with a Table. - -This is because the ``target_id`` :class:`.Column` we've called upon in our -``target()`` method is not the same :class:`.Column` that declarative is -actually going to map to our table. - -The condition above is resolved using a lambda:: - - class RefTargetMixin(object): - @declared_attr - def target_id(cls): - return Column('target_id', ForeignKey('target.id')) - - @declared_attr - def target(cls): - return relationship(Target, - primaryjoin=lambda: Target.id==cls.target_id - ) - -or alternatively, the string form (which ultimately generates a lambda):: - - class RefTargetMixin(object): - @declared_attr - def target_id(cls): - return Column('target_id', ForeignKey('target.id')) - - @declared_attr - def target(cls): - return relationship("Target", - primaryjoin="Target.id==%s.target_id" % cls.__name__ - ) - -Mixing in deferred(), column_property(), and other MapperProperty classes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Like :func:`~sqlalchemy.orm.relationship`, all -:class:`~sqlalchemy.orm.interfaces.MapperProperty` subclasses such as -:func:`~sqlalchemy.orm.deferred`, :func:`~sqlalchemy.orm.column_property`, -etc. ultimately involve references to columns, and therefore, when -used with declarative mixins, have the :class:`.declared_attr` -requirement so that no reliance on copying is needed:: - - class SomethingMixin(object): - - @declared_attr - def dprop(cls): - return deferred(Column(Integer)) - - class Something(SomethingMixin, Base): - __tablename__ = "something" - -Mixing in Association Proxy and Other Attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Mixins can specify user-defined attributes as well as other extension -units such as :func:`.association_proxy`. The usage of -:class:`.declared_attr` is required in those cases where the attribute must -be tailored specifically to the target subclass. An example is when -constructing multiple :func:`.association_proxy` attributes which each -target a different type of child object. Below is an -:func:`.association_proxy` / mixin example which provides a scalar list of -string values to an implementing class:: - - from sqlalchemy import Column, Integer, ForeignKey, String - from sqlalchemy.orm import relationship - from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.ext.declarative import declarative_base, declared_attr - - Base = declarative_base() - - class HasStringCollection(object): - @declared_attr - def _strings(cls): - class StringAttribute(Base): - __tablename__ = cls.string_table_name - id = Column(Integer, primary_key=True) - value = Column(String(50), nullable=False) - parent_id = Column(Integer, - ForeignKey('%s.id' % cls.__tablename__), - nullable=False) - def __init__(self, value): - self.value = value - - return relationship(StringAttribute) - - @declared_attr - def strings(cls): - return association_proxy('_strings', 'value') - - class TypeA(HasStringCollection, Base): - __tablename__ = 'type_a' - string_table_name = 'type_a_strings' - id = Column(Integer(), primary_key=True) - - class TypeB(HasStringCollection, Base): - __tablename__ = 'type_b' - string_table_name = 'type_b_strings' - id = Column(Integer(), primary_key=True) - -Above, the ``HasStringCollection`` mixin produces a :func:`.relationship` -which refers to a newly generated class called ``StringAttribute``. The -``StringAttribute`` class is generated with its own :class:`.Table` -definition which is local to the parent class making usage of the -``HasStringCollection`` mixin. It also produces an :func:`.association_proxy` -object which proxies references to the ``strings`` attribute onto the ``value`` -attribute of each ``StringAttribute`` instance. - -``TypeA`` or ``TypeB`` can be instantiated given the constructor -argument ``strings``, a list of strings:: - - ta = TypeA(strings=['foo', 'bar']) - tb = TypeA(strings=['bat', 'bar']) - -This list will generate a collection -of ``StringAttribute`` objects, which are persisted into a table that's -local to either the ``type_a_strings`` or ``type_b_strings`` table:: - - >>> print ta._strings - [<__main__.StringAttribute object at 0x10151cd90>, - <__main__.StringAttribute object at 0x10151ce10>] - -When constructing the :func:`.association_proxy`, the -:class:`.declared_attr` decorator must be used so that a distinct -:func:`.association_proxy` object is created for each of the ``TypeA`` -and ``TypeB`` classes. - -.. versionadded:: 0.8 :class:`.declared_attr` is usable with non-mapped - attributes, including user-defined attributes as well as - :func:`.association_proxy`. - - -Controlling table inheritance with mixins -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The ``__tablename__`` attribute in conjunction with the hierarchy of -classes involved in a declarative mixin scenario controls what type of -table inheritance, if any, -is configured by the declarative extension. - -If the ``__tablename__`` is computed by a mixin, you may need to -control which classes get the computed attribute in order to get the -type of table inheritance you require. - -For example, if you had a mixin that computes ``__tablename__`` but -where you wanted to use that mixin in a single table inheritance -hierarchy, you can explicitly specify ``__tablename__`` as ``None`` to -indicate that the class should not have a table mapped:: - - from sqlalchemy.ext.declarative import declared_attr - - class Tablename: - @declared_attr - def __tablename__(cls): - return cls.__name__.lower() - - class Person(Tablename, Base): - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - __tablename__ = None - __mapper_args__ = {'polymorphic_identity': 'engineer'} - primary_language = Column(String(50)) - -Alternatively, you can make the mixin intelligent enough to only -return a ``__tablename__`` in the event that no table is already -mapped in the inheritance hierarchy. To help with this, a -:func:`~sqlalchemy.ext.declarative.has_inherited_table` helper -function is provided that returns ``True`` if a parent class already -has a mapped table. - -As an example, here's a mixin that will only allow single table -inheritance:: - - from sqlalchemy.ext.declarative import declared_attr - from sqlalchemy.ext.declarative import has_inherited_table - - class Tablename(object): - @declared_attr - def __tablename__(cls): - if has_inherited_table(cls): - return None - return cls.__name__.lower() - - class Person(Tablename, Base): - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - primary_language = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} - - -Combining Table/Mapper Arguments from Multiple Mixins -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the case of ``__table_args__`` or ``__mapper_args__`` -specified with declarative mixins, you may want to combine -some parameters from several mixins with those you wish to -define on the class iteself. The -:class:`.declared_attr` decorator can be used -here to create user-defined collation routines that pull -from multiple collections:: - - from sqlalchemy.ext.declarative import declared_attr - - class MySQLSettings(object): - __table_args__ = {'mysql_engine':'InnoDB'} - - class MyOtherMixin(object): - __table_args__ = {'info':'foo'} - - class MyModel(MySQLSettings, MyOtherMixin, Base): - __tablename__='my_model' - - @declared_attr - def __table_args__(cls): - args = dict() - args.update(MySQLSettings.__table_args__) - args.update(MyOtherMixin.__table_args__) - return args - - id = Column(Integer, primary_key=True) - -Creating Indexes with Mixins -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To define a named, potentially multicolumn :class:`.Index` that applies to all -tables derived from a mixin, use the "inline" form of :class:`.Index` and -establish it as part of ``__table_args__``:: - - class MyMixin(object): - a = Column(Integer) - b = Column(Integer) - - @declared_attr - def __table_args__(cls): - return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),) - - class MyModel(MyMixin, Base): - __tablename__ = 'atable' - c = Column(Integer,primary_key=True) - -Special Directives -================== - -``__declare_last__()`` -~~~~~~~~~~~~~~~~~~~~~~ - -The ``__declare_last__()`` hook allows definition of -a class level function that is automatically called by the -:meth:`.MapperEvents.after_configured` event, which occurs after mappings are -assumed to be completed and the 'configure' step has finished:: - - class MyClass(Base): - @classmethod - def __declare_last__(cls): - "" - # do something with mappings - -.. versionadded:: 0.7.3 - -``__declare_first__()`` -~~~~~~~~~~~~~~~~~~~~~~~ - -Like ``__declare_last__()``, but is called at the beginning of mapper -configuration via the :meth:`.MapperEvents.before_configured` event:: - - class MyClass(Base): - @classmethod - def __declare_first__(cls): - "" - # do something before mappings are configured - -.. versionadded:: 0.9.3 - -.. _declarative_abstract: - -``__abstract__`` -~~~~~~~~~~~~~~~~~~~ - -``__abstract__`` causes declarative to skip the production -of a table or mapper for the class entirely. A class can be added within a -hierarchy in the same way as mixin (see :ref:`declarative_mixins`), allowing -subclasses to extend just from the special class:: - - class SomeAbstractBase(Base): - __abstract__ = True - - def some_helpful_method(self): - "" - - @declared_attr - def __mapper_args__(cls): - return {"helpful mapper arguments":True} - - class MyMappedClass(SomeAbstractBase): - "" - -One possible use of ``__abstract__`` is to use a distinct -:class:`.MetaData` for different bases:: - - Base = declarative_base() - - class DefaultBase(Base): - __abstract__ = True - metadata = MetaData() - - class OtherBase(Base): - __abstract__ = True - metadata = MetaData() - -Above, classes which inherit from ``DefaultBase`` will use one -:class:`.MetaData` as the registry of tables, and those which inherit from -``OtherBase`` will use a different one. The tables themselves can then be -created perhaps within distinct databases:: - - DefaultBase.metadata.create_all(some_engine) - OtherBase.metadata_create_all(some_other_engine) - -.. versionadded:: 0.7.3 - -Class Constructor -================= - -As a convenience feature, the :func:`declarative_base` sets a default -constructor on classes which takes keyword arguments, and assigns them -to the named attributes:: - - e = Engineer(primary_language='python') - -Sessions -======== - -Note that ``declarative`` does nothing special with sessions, and is -only intended as an easier way to configure mappers and -:class:`~sqlalchemy.schema.Table` objects. A typical application -setup using :class:`~sqlalchemy.orm.scoping.scoped_session` might look like:: - - engine = create_engine('postgresql://scott:tiger@localhost/test') - Session = scoped_session(sessionmaker(autocommit=False, - autoflush=False, - bind=engine)) - Base = declarative_base() - -Mapped instances then make usage of -:class:`~sqlalchemy.orm.session.Session` in the usual way. - -""" - from .api import declarative_base, synonym_for, comparable_using, \ instrument_declarative, ConcreteBase, AbstractConcreteBase, \ DeclarativeMeta, DeferredReflection, has_inherited_table,\ @@ -1313,5 +13,6 @@ from .api import declarative_base, synonym_for, comparable_using, \ __all__ = ['declarative_base', 'synonym_for', 'has_inherited_table', 'comparable_using', 'instrument_declarative', 'declared_attr', + 'as_declarative', 'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta', 'DeferredReflection'] diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/declarative/api.py b/lib/python3.5/site-packages/sqlalchemy/ext/declarative/api.py index daf8bff..54e78ee 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/declarative/api.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/declarative/api.py @@ -1,5 +1,5 @@ # ext/declarative/api.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -7,13 +7,14 @@ """Public API functions and helpers for declarative.""" -from ...schema import Table, MetaData -from ...orm import synonym as _orm_synonym, mapper,\ +from ...schema import Table, MetaData, Column +from ...orm import synonym as _orm_synonym, \ comparable_property,\ - interfaces, properties + interfaces, properties, attributes from ...orm.util import polymorphic_union from ...orm.base import _mapper_or_none -from ...util import OrderedDict +from ...util import OrderedDict, hybridmethod, hybridproperty +from ... import util from ... import exc import weakref @@ -21,7 +22,6 @@ from .base import _as_declarative, \ _declarative_constructor,\ _DeferredMapperConfig, _add_attribute from .clsregistry import _class_resolver -from . import clsregistry def instrument_declarative(cls, registry, metadata): @@ -157,12 +157,90 @@ class declared_attr(interfaces._MappedAttribute, property): """ - def __init__(self, fget, *arg, **kw): - super(declared_attr, self).__init__(fget, *arg, **kw) + def __init__(self, fget, cascading=False): + super(declared_attr, self).__init__(fget) self.__doc__ = fget.__doc__ + self._cascading = cascading def __get__(desc, self, cls): - return desc.fget(cls) + reg = cls.__dict__.get('_sa_declared_attr_reg', None) + if reg is None: + manager = attributes.manager_of_class(cls) + if manager is None: + util.warn( + "Unmanaged access of declarative attribute %s from " + "non-mapped class %s" % + (desc.fget.__name__, cls.__name__)) + return desc.fget(cls) + + if reg is None: + return desc.fget(cls) + elif desc in reg: + return reg[desc] + else: + reg[desc] = obj = desc.fget(cls) + return obj + + @hybridmethod + def _stateful(cls, **kw): + return _stateful_declared_attr(**kw) + + @hybridproperty + def cascading(cls): + """Mark a :class:`.declared_attr` as cascading. + + This is a special-use modifier which indicates that a column + or MapperProperty-based declared attribute should be configured + distinctly per mapped subclass, within a mapped-inheritance scenario. + + Below, both MyClass as well as MySubClass will have a distinct + ``id`` Column object established:: + + class HasSomeAttribute(object): + @declared_attr.cascading + def some_id(cls): + if has_inherited_table(cls): + return Column( + ForeignKey('myclass.id'), primary_key=True) + else: + return Column(Integer, primary_key=True) + + return Column('id', Integer, primary_key=True) + + class MyClass(HasSomeAttribute, Base): + "" + # ... + + class MySubClass(MyClass): + "" + # ... + + The behavior of the above configuration is that ``MySubClass`` + will refer to both its own ``id`` column as well as that of + ``MyClass`` underneath the attribute named ``some_id``. + + .. seealso:: + + :ref:`declarative_inheritance` + + :ref:`mixin_inheritance_columns` + + + """ + return cls._stateful(cascading=True) + + +class _stateful_declared_attr(declared_attr): + def __init__(self, **kw): + self.kw = kw + + def _stateful(self, **kw): + new_kw = self.kw.copy() + new_kw.update(kw) + return _stateful_declared_attr(**new_kw) + + def __call__(self, fn): + return declared_attr(fn, **self.kw) def declarative_base(bind=None, metadata=None, mapper=None, cls=object, @@ -319,6 +397,15 @@ class ConcreteBase(object): 'polymorphic_identity':'manager', 'concrete':True} + .. seealso:: + + :class:`.AbstractConcreteBase` + + :ref:`concrete_inheritance` + + :ref:`inheritance_concrete_helpers` + + """ @classmethod @@ -349,9 +436,11 @@ class AbstractConcreteBase(ConcreteBase): ``__declare_last__()`` function, which is essentially a hook for the :meth:`.after_configured` event. - :class:`.AbstractConcreteBase` does not produce a mapped - table for the class itself. Compare to :class:`.ConcreteBase`, - which does. + :class:`.AbstractConcreteBase` does produce a mapped class + for the base class, however it is not persisted to any table; it + is instead mapped directly to the "polymorphic" selectable directly + and is only used for selecting. Compare to :class:`.ConcreteBase`, + which does create a persisted table for the base class. Example:: @@ -365,20 +454,79 @@ class AbstractConcreteBase(ConcreteBase): employee_id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) + __mapper_args__ = { - 'polymorphic_identity':'manager', - 'concrete':True} + 'polymorphic_identity':'manager', + 'concrete':True} + + The abstract base class is handled by declarative in a special way; + at class configuration time, it behaves like a declarative mixin + or an ``__abstract__`` base class. Once classes are configured + and mappings are produced, it then gets mapped itself, but + after all of its decscendants. This is a very unique system of mapping + not found in any other SQLAlchemy system. + + Using this approach, we can specify columns and properties + that will take place on mapped subclasses, in the way that + we normally do as in :ref:`declarative_mixins`:: + + class Company(Base): + __tablename__ = 'company' + id = Column(Integer, primary_key=True) + + class Employee(AbstractConcreteBase, Base): + employee_id = Column(Integer, primary_key=True) + + @declared_attr + def company_id(cls): + return Column(ForeignKey('company.id')) + + @declared_attr + def company(cls): + return relationship("Company") + + class Manager(Employee): + __tablename__ = 'manager' + + name = Column(String(50)) + manager_data = Column(String(40)) + + __mapper_args__ = { + 'polymorphic_identity':'manager', + 'concrete':True} + + When we make use of our mappings however, both ``Manager`` and + ``Employee`` will have an independently usable ``.company`` attribute:: + + session.query(Employee).filter(Employee.company.has(id=5)) + + .. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase` + have been reworked to support relationships established directly + on the abstract base, without any special configurational steps. + + .. seealso:: + + :class:`.ConcreteBase` + + :ref:`concrete_inheritance` + + :ref:`inheritance_concrete_helpers` """ - __abstract__ = True + __no_table__ = True @classmethod def __declare_first__(cls): - if hasattr(cls, '__mapper__'): + cls._sa_decl_prepare_nocascade() + + @classmethod + def _sa_decl_prepare_nocascade(cls): + if getattr(cls, '__mapper__', None): return - clsregistry.add_class(cls.__name__, cls) + to_map = _DeferredMapperConfig.config_for_cls(cls) + # can't rely on 'self_and_descendants' here # since technically an immediate subclass # might not be mapped, but a subclass @@ -392,11 +540,33 @@ class AbstractConcreteBase(ConcreteBase): if mn is not None: mappers.append(mn) pjoin = cls._create_polymorphic_union(mappers) - cls.__mapper__ = m = mapper(cls, pjoin, polymorphic_on=pjoin.c.type) + + # For columns that were declared on the class, these + # are normally ignored with the "__no_table__" mapping, + # unless they have a different attribute key vs. col name + # and are in the properties argument. + # In that case, ensure we update the properties entry + # to the correct column from the pjoin target table. + declared_cols = set(to_map.declared_columns) + for k, v in list(to_map.properties.items()): + if v in declared_cols: + to_map.properties[k] = pjoin.c[v.key] + + to_map.local_table = pjoin + + m_args = to_map.mapper_args_fn or dict + + def mapper_args(): + args = m_args() + args['polymorphic_on'] = pjoin.c.type + return args + to_map.mapper_args_fn = mapper_args + + m = to_map.map() for scls in cls.__subclasses__(): sm = _mapper_or_none(scls) - if sm.concrete and cls in scls.__bases__: + if sm and sm.concrete and cls in scls.__bases__: sm._set_concrete_base(m) diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/declarative/base.py b/lib/python3.5/site-packages/sqlalchemy/ext/declarative/base.py index 94baeeb..59ebe37 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/declarative/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/declarative/base.py @@ -1,5 +1,5 @@ # ext/declarative/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -19,6 +19,9 @@ from ... import event from . import clsregistry import collections import weakref +from sqlalchemy.orm import instrumentation + +declared_attr = declarative_props = None def _declared_mapping_info(cls): @@ -32,322 +35,431 @@ def _declared_mapping_info(cls): return None -def _as_declarative(cls, classname, dict_): - from .api import declared_attr +def _resolve_for_abstract(cls): + if cls is object: + return None - # dict_ will be a dictproxy, which we can't write to, and we need to! - dict_ = dict(dict_) + if _get_immediate_cls_attr(cls, '__abstract__', strict=True): + for sup in cls.__bases__: + sup = _resolve_for_abstract(sup) + if sup is not None: + return sup + else: + return None + else: + return cls - column_copies = {} - potential_columns = {} - mapper_args_fn = None - table_args = inherited_table_args = None - tablename = None +def _get_immediate_cls_attr(cls, attrname, strict=False): + """return an attribute of the class that is either present directly + on the class, e.g. not on a superclass, or is from a superclass but + this superclass is a mixin, that is, not a descendant of + the declarative base. - declarative_props = (declared_attr, util.classproperty) + This is used to detect attributes that indicate something about + a mapped class independently from any mapped classes that it may + inherit from. + + """ + if not issubclass(cls, object): + return None for base in cls.__mro__: _is_declarative_inherits = hasattr(base, '_decl_class_registry') - - if '__declare_last__' in base.__dict__: - @event.listens_for(mapper, "after_configured") - def go(): - cls.__declare_last__() - if '__declare_first__' in base.__dict__: - @event.listens_for(mapper, "before_configured") - def go(): - cls.__declare_first__() - if '__abstract__' in base.__dict__ and base.__abstract__: - if (base is cls or - (base in cls.__bases__ and not _is_declarative_inherits)): - return - - class_mapped = _declared_mapping_info(base) is not None - - for name, obj in vars(base).items(): - if name == '__mapper_args__': - if not mapper_args_fn and ( - not class_mapped or - isinstance(obj, declarative_props) - ): - # don't even invoke __mapper_args__ until - # after we've determined everything about the - # mapped table. - # make a copy of it so a class-level dictionary - # is not overwritten when we update column-based - # arguments. - mapper_args_fn = lambda: dict(cls.__mapper_args__) - elif name == '__tablename__': - if not tablename and ( - not class_mapped or - isinstance(obj, declarative_props) - ): - tablename = cls.__tablename__ - elif name == '__table_args__': - if not table_args and ( - not class_mapped or - isinstance(obj, declarative_props) - ): - table_args = cls.__table_args__ - if not isinstance(table_args, (tuple, dict, type(None))): - raise exc.ArgumentError( - "__table_args__ value must be a tuple, " - "dict, or None") - if base is not cls: - inherited_table_args = True - elif class_mapped: - if isinstance(obj, declarative_props): - util.warn("Regular (i.e. not __special__) " - "attribute '%s.%s' uses @declared_attr, " - "but owning class %s is mapped - " - "not applying to subclass %s." - % (base.__name__, name, base, cls)) - continue - elif base is not cls: - # we're a mixin. - if isinstance(obj, Column): - if getattr(cls, name) is not obj: - # if column has been overridden - # (like by the InstrumentedAttribute of the - # superclass), skip - continue - if obj.foreign_keys: - raise exc.InvalidRequestError( - "Columns with foreign keys to other columns " - "must be declared as @declared_attr callables " - "on declarative mixin classes. ") - if name not in dict_ and not ( - '__table__' in dict_ and - (obj.name or name) in dict_['__table__'].c - ) and name not in potential_columns: - potential_columns[name] = \ - column_copies[obj] = \ - obj.copy() - column_copies[obj]._creation_order = \ - obj._creation_order - elif isinstance(obj, MapperProperty): - raise exc.InvalidRequestError( - "Mapper properties (i.e. deferred," - "column_property(), relationship(), etc.) must " - "be declared as @declared_attr callables " - "on declarative mixin classes.") - elif isinstance(obj, declarative_props): - dict_[name] = ret = \ - column_copies[obj] = getattr(cls, name) - if isinstance(ret, (Column, MapperProperty)) and \ - ret.doc is None: - ret.doc = obj.__doc__ - - # apply inherited columns as we should - for k, v in potential_columns.items(): - dict_[k] = v - - if inherited_table_args and not tablename: - table_args = None - - clsregistry.add_class(classname, cls) - our_stuff = util.OrderedDict() - - for k in list(dict_): - - # TODO: improve this ? all dunders ? - if k in ('__table__', '__tablename__', '__mapper_args__'): - continue - - value = dict_[k] - if isinstance(value, declarative_props): - value = getattr(cls, k) - - elif isinstance(value, QueryableAttribute) and \ - value.class_ is not cls and \ - value.key != k: - # detect a QueryableAttribute that's already mapped being - # assigned elsewhere in userland, turn into a synonym() - value = synonym(value.key) - setattr(cls, k, value) - - if (isinstance(value, tuple) and len(value) == 1 and - isinstance(value[0], (Column, MapperProperty))): - util.warn("Ignoring declarative-like tuple value of attribute " - "%s: possibly a copy-and-paste error with a comma " - "left at the end of the line?" % k) - continue - if not isinstance(value, (Column, MapperProperty)): - if not k.startswith('__'): - dict_.pop(k) - setattr(cls, k, value) - continue - if k == 'metadata': - raise exc.InvalidRequestError( - "Attribute name 'metadata' is reserved " - "for the MetaData instance when using a " - "declarative base class." - ) - prop = clsregistry._deferred_relationship(cls, value) - our_stuff[k] = prop - - # set up attributes in the order they were created - our_stuff.sort(key=lambda key: our_stuff[key]._creation_order) - - # extract columns from the class dict - declared_columns = set() - name_to_prop_key = collections.defaultdict(set) - for key, c in list(our_stuff.items()): - if isinstance(c, (ColumnProperty, CompositeProperty)): - for col in c.columns: - if isinstance(col, Column) and \ - col.table is None: - _undefer_column_name(key, col) - if not isinstance(c, CompositeProperty): - name_to_prop_key[col.name].add(key) - declared_columns.add(col) - elif isinstance(c, Column): - _undefer_column_name(key, c) - name_to_prop_key[c.name].add(key) - declared_columns.add(c) - # if the column is the same name as the key, - # remove it from the explicit properties dict. - # the normal rules for assigning column-based properties - # will take over, including precedence of columns - # in multi-column ColumnProperties. - if key == c.key: - del our_stuff[key] - - for name, keys in name_to_prop_key.items(): - if len(keys) > 1: - util.warn( - "On class %r, Column object %r named directly multiple times, " - "only one will be used: %s" % - (classname, name, (", ".join(sorted(keys)))) - ) - - declared_columns = sorted( - declared_columns, key=lambda c: c._creation_order) - table = None - - if hasattr(cls, '__table_cls__'): - table_cls = util.unbound_method_to_callable(cls.__table_cls__) + if attrname in base.__dict__ and ( + base is cls or + ((base in cls.__bases__ if strict else True) + and not _is_declarative_inherits) + ): + return getattr(base, attrname) else: - table_cls = Table + return None - if '__table__' not in dict_: - if tablename is not None: - args, table_kw = (), {} - if table_args: - if isinstance(table_args, dict): - table_kw = table_args - elif isinstance(table_args, tuple): - if isinstance(table_args[-1], dict): - args, table_kw = table_args[0:-1], table_args[-1] - else: - args = table_args +def _as_declarative(cls, classname, dict_): + global declared_attr, declarative_props + if declared_attr is None: + from .api import declared_attr + declarative_props = (declared_attr, util.classproperty) - autoload = dict_.get('__autoload__') - if autoload: - table_kw['autoload'] = True + if _get_immediate_cls_attr(cls, '__abstract__', strict=True): + return - cls.__table__ = table = table_cls( - tablename, cls.metadata, - *(tuple(declared_columns) + tuple(args)), - **table_kw) - else: - table = cls.__table__ - if declared_columns: - for c in declared_columns: - if not table.c.contains_column(c): - raise exc.ArgumentError( - "Can't add additional column %r when " - "specifying __table__" % c.key - ) - - if hasattr(cls, '__mapper_cls__'): - mapper_cls = util.unbound_method_to_callable(cls.__mapper_cls__) - else: - mapper_cls = mapper - - for c in cls.__bases__: - if _declared_mapping_info(c) is not None: - inherits = c - break - else: - inherits = None - - if table is None and inherits is None: - raise exc.InvalidRequestError( - "Class %r does not have a __table__ or __tablename__ " - "specified and does not inherit from an existing " - "table-mapped class." % cls - ) - elif inherits: - inherited_mapper = _declared_mapping_info(inherits) - inherited_table = inherited_mapper.local_table - inherited_mapped_table = inherited_mapper.mapped_table - - if table is None: - # single table inheritance. - # ensure no table args - if table_args: - raise exc.ArgumentError( - "Can't place __table_args__ on an inherited class " - "with no table." - ) - # add any columns declared here to the inherited table. - for c in declared_columns: - if c.primary_key: - raise exc.ArgumentError( - "Can't place primary key columns on an inherited " - "class with no table." - ) - if c.name in inherited_table.c: - if inherited_table.c[c.name] is c: - continue - raise exc.ArgumentError( - "Column '%s' on class %s conflicts with " - "existing column '%s'" % - (c, cls, inherited_table.c[c.name]) - ) - inherited_table.append_column(c) - if inherited_mapped_table is not None and \ - inherited_mapped_table is not inherited_table: - inherited_mapped_table._refresh_for_new_column(c) - - defer_map = hasattr(cls, '_sa_decl_prepare') - if defer_map: - cfg_cls = _DeferredMapperConfig - else: - cfg_cls = _MapperConfig - mt = cfg_cls(mapper_cls, - cls, table, - inherits, - declared_columns, - column_copies, - our_stuff, - mapper_args_fn) - if not defer_map: - mt.map() + _MapperConfig.setup_mapping(cls, classname, dict_) class _MapperConfig(object): - mapped_table = None + @classmethod + def setup_mapping(cls, cls_, classname, dict_): + defer_map = _get_immediate_cls_attr( + cls_, '_sa_decl_prepare_nocascade', strict=True) or \ + hasattr(cls_, '_sa_decl_prepare') - def __init__(self, mapper_cls, - cls, - table, - inherits, - declared_columns, - column_copies, - properties, mapper_args_fn): - self.mapper_cls = mapper_cls - self.cls = cls - self.local_table = table - self.inherits = inherits - self.properties = properties + if defer_map: + cfg_cls = _DeferredMapperConfig + else: + cfg_cls = _MapperConfig + cfg_cls(cls_, classname, dict_) + + def __init__(self, cls_, classname, dict_): + + self.cls = cls_ + + # dict_ will be a dictproxy, which we can't write to, and we need to! + self.dict_ = dict(dict_) + self.classname = classname + self.mapped_table = None + self.properties = util.OrderedDict() + self.declared_columns = set() + self.column_copies = {} + self._setup_declared_events() + + # temporary registry. While early 1.0 versions + # set up the ClassManager here, by API contract + # we can't do that until there's a mapper. + self.cls._sa_declared_attr_reg = {} + + self._scan_attributes() + + clsregistry.add_class(self.classname, self.cls) + + self._extract_mappable_attributes() + + self._extract_declared_columns() + + self._setup_table() + + self._setup_inheritance() + + self._early_mapping() + + def _early_mapping(self): + self.map() + + def _setup_declared_events(self): + if _get_immediate_cls_attr(self.cls, '__declare_last__'): + @event.listens_for(mapper, "after_configured") + def after_configured(): + self.cls.__declare_last__() + + if _get_immediate_cls_attr(self.cls, '__declare_first__'): + @event.listens_for(mapper, "before_configured") + def before_configured(): + self.cls.__declare_first__() + + def _scan_attributes(self): + cls = self.cls + dict_ = self.dict_ + column_copies = self.column_copies + mapper_args_fn = None + table_args = inherited_table_args = None + tablename = None + + for base in cls.__mro__: + class_mapped = base is not cls and \ + _declared_mapping_info(base) is not None and \ + not _get_immediate_cls_attr( + base, '_sa_decl_prepare_nocascade', strict=True) + + if not class_mapped and base is not cls: + self._produce_column_copies(base) + + for name, obj in vars(base).items(): + if name == '__mapper_args__': + if not mapper_args_fn and ( + not class_mapped or + isinstance(obj, declarative_props) + ): + # don't even invoke __mapper_args__ until + # after we've determined everything about the + # mapped table. + # make a copy of it so a class-level dictionary + # is not overwritten when we update column-based + # arguments. + mapper_args_fn = lambda: dict(cls.__mapper_args__) + elif name == '__tablename__': + if not tablename and ( + not class_mapped or + isinstance(obj, declarative_props) + ): + tablename = cls.__tablename__ + elif name == '__table_args__': + if not table_args and ( + not class_mapped or + isinstance(obj, declarative_props) + ): + table_args = cls.__table_args__ + if not isinstance( + table_args, (tuple, dict, type(None))): + raise exc.ArgumentError( + "__table_args__ value must be a tuple, " + "dict, or None") + if base is not cls: + inherited_table_args = True + elif class_mapped: + if isinstance(obj, declarative_props): + util.warn("Regular (i.e. not __special__) " + "attribute '%s.%s' uses @declared_attr, " + "but owning class %s is mapped - " + "not applying to subclass %s." + % (base.__name__, name, base, cls)) + continue + elif base is not cls: + # we're a mixin, abstract base, or something that is + # acting like that for now. + if isinstance(obj, Column): + # already copied columns to the mapped class. + continue + elif isinstance(obj, MapperProperty): + raise exc.InvalidRequestError( + "Mapper properties (i.e. deferred," + "column_property(), relationship(), etc.) must " + "be declared as @declared_attr callables " + "on declarative mixin classes.") + elif isinstance(obj, declarative_props): + oldclassprop = isinstance(obj, util.classproperty) + if not oldclassprop and obj._cascading: + dict_[name] = column_copies[obj] = \ + ret = obj.__get__(obj, cls) + setattr(cls, name, ret) + else: + if oldclassprop: + util.warn_deprecated( + "Use of sqlalchemy.util.classproperty on " + "declarative classes is deprecated.") + dict_[name] = column_copies[obj] = \ + ret = getattr(cls, name) + if isinstance(ret, (Column, MapperProperty)) and \ + ret.doc is None: + ret.doc = obj.__doc__ + + if inherited_table_args and not tablename: + table_args = None + + self.table_args = table_args + self.tablename = tablename self.mapper_args_fn = mapper_args_fn - self.declared_columns = declared_columns - self.column_copies = column_copies + + def _produce_column_copies(self, base): + cls = self.cls + dict_ = self.dict_ + column_copies = self.column_copies + # copy mixin columns to the mapped class + for name, obj in vars(base).items(): + if isinstance(obj, Column): + if getattr(cls, name) is not obj: + # if column has been overridden + # (like by the InstrumentedAttribute of the + # superclass), skip + continue + elif obj.foreign_keys: + raise exc.InvalidRequestError( + "Columns with foreign keys to other columns " + "must be declared as @declared_attr callables " + "on declarative mixin classes. ") + elif name not in dict_ and not ( + '__table__' in dict_ and + (obj.name or name) in dict_['__table__'].c + ): + column_copies[obj] = copy_ = obj.copy() + copy_._creation_order = obj._creation_order + setattr(cls, name, copy_) + dict_[name] = copy_ + + def _extract_mappable_attributes(self): + cls = self.cls + dict_ = self.dict_ + + our_stuff = self.properties + + for k in list(dict_): + + if k in ('__table__', '__tablename__', '__mapper_args__'): + continue + + value = dict_[k] + if isinstance(value, declarative_props): + value = getattr(cls, k) + + elif isinstance(value, QueryableAttribute) and \ + value.class_ is not cls and \ + value.key != k: + # detect a QueryableAttribute that's already mapped being + # assigned elsewhere in userland, turn into a synonym() + value = synonym(value.key) + setattr(cls, k, value) + + if (isinstance(value, tuple) and len(value) == 1 and + isinstance(value[0], (Column, MapperProperty))): + util.warn("Ignoring declarative-like tuple value of attribute " + "%s: possibly a copy-and-paste error with a comma " + "left at the end of the line?" % k) + continue + elif not isinstance(value, (Column, MapperProperty)): + # using @declared_attr for some object that + # isn't Column/MapperProperty; remove from the dict_ + # and place the evaluated value onto the class. + if not k.startswith('__'): + dict_.pop(k) + setattr(cls, k, value) + continue + # we expect to see the name 'metadata' in some valid cases; + # however at this point we see it's assigned to something trying + # to be mapped, so raise for that. + elif k == 'metadata': + raise exc.InvalidRequestError( + "Attribute name 'metadata' is reserved " + "for the MetaData instance when using a " + "declarative base class." + ) + prop = clsregistry._deferred_relationship(cls, value) + our_stuff[k] = prop + + def _extract_declared_columns(self): + our_stuff = self.properties + + # set up attributes in the order they were created + our_stuff.sort(key=lambda key: our_stuff[key]._creation_order) + + # extract columns from the class dict + declared_columns = self.declared_columns + name_to_prop_key = collections.defaultdict(set) + for key, c in list(our_stuff.items()): + if isinstance(c, (ColumnProperty, CompositeProperty)): + for col in c.columns: + if isinstance(col, Column) and \ + col.table is None: + _undefer_column_name(key, col) + if not isinstance(c, CompositeProperty): + name_to_prop_key[col.name].add(key) + declared_columns.add(col) + elif isinstance(c, Column): + _undefer_column_name(key, c) + name_to_prop_key[c.name].add(key) + declared_columns.add(c) + # if the column is the same name as the key, + # remove it from the explicit properties dict. + # the normal rules for assigning column-based properties + # will take over, including precedence of columns + # in multi-column ColumnProperties. + if key == c.key: + del our_stuff[key] + + for name, keys in name_to_prop_key.items(): + if len(keys) > 1: + util.warn( + "On class %r, Column object %r named " + "directly multiple times, " + "only one will be used: %s" % + (self.classname, name, (", ".join(sorted(keys)))) + ) + + def _setup_table(self): + cls = self.cls + tablename = self.tablename + table_args = self.table_args + dict_ = self.dict_ + declared_columns = self.declared_columns + + declared_columns = self.declared_columns = sorted( + declared_columns, key=lambda c: c._creation_order) + table = None + + if hasattr(cls, '__table_cls__'): + table_cls = util.unbound_method_to_callable(cls.__table_cls__) + else: + table_cls = Table + + if '__table__' not in dict_: + if tablename is not None: + + args, table_kw = (), {} + if table_args: + if isinstance(table_args, dict): + table_kw = table_args + elif isinstance(table_args, tuple): + if isinstance(table_args[-1], dict): + args, table_kw = table_args[0:-1], table_args[-1] + else: + args = table_args + + autoload = dict_.get('__autoload__') + if autoload: + table_kw['autoload'] = True + + cls.__table__ = table = table_cls( + tablename, cls.metadata, + *(tuple(declared_columns) + tuple(args)), + **table_kw) + else: + table = cls.__table__ + if declared_columns: + for c in declared_columns: + if not table.c.contains_column(c): + raise exc.ArgumentError( + "Can't add additional column %r when " + "specifying __table__" % c.key + ) + self.local_table = table + + def _setup_inheritance(self): + table = self.local_table + cls = self.cls + table_args = self.table_args + declared_columns = self.declared_columns + for c in cls.__bases__: + c = _resolve_for_abstract(c) + if c is None: + continue + if _declared_mapping_info(c) is not None and \ + not _get_immediate_cls_attr( + c, '_sa_decl_prepare_nocascade', strict=True): + self.inherits = c + break + else: + self.inherits = None + + if table is None and self.inherits is None and \ + not _get_immediate_cls_attr(cls, '__no_table__'): + + raise exc.InvalidRequestError( + "Class %r does not have a __table__ or __tablename__ " + "specified and does not inherit from an existing " + "table-mapped class." % cls + ) + elif self.inherits: + inherited_mapper = _declared_mapping_info(self.inherits) + inherited_table = inherited_mapper.local_table + inherited_mapped_table = inherited_mapper.mapped_table + + if table is None: + # single table inheritance. + # ensure no table args + if table_args: + raise exc.ArgumentError( + "Can't place __table_args__ on an inherited class " + "with no table." + ) + # add any columns declared here to the inherited table. + for c in declared_columns: + if c.primary_key: + raise exc.ArgumentError( + "Can't place primary key columns on an inherited " + "class with no table." + ) + if c.name in inherited_table.c: + if inherited_table.c[c.name] is c: + continue + raise exc.ArgumentError( + "Column '%s' on class %s conflicts with " + "existing column '%s'" % + (c, cls, inherited_table.c[c.name]) + ) + inherited_table.append_column(c) + if inherited_mapped_table is not None and \ + inherited_mapped_table is not inherited_table: + inherited_mapped_table._refresh_for_new_column(c) def _prepare_mapper_arguments(self): properties = self.properties @@ -401,20 +513,31 @@ class _MapperConfig(object): properties[k] = [col] + p.columns result_mapper_args = mapper_args.copy() result_mapper_args['properties'] = properties - return result_mapper_args + self.mapper_args = result_mapper_args def map(self): - mapper_args = self._prepare_mapper_arguments() - self.cls.__mapper__ = self.mapper_cls( + self._prepare_mapper_arguments() + if hasattr(self.cls, '__mapper_cls__'): + mapper_cls = util.unbound_method_to_callable( + self.cls.__mapper_cls__) + else: + mapper_cls = mapper + + self.cls.__mapper__ = mp_ = mapper_cls( self.cls, self.local_table, - **mapper_args + **self.mapper_args ) + del self.cls._sa_declared_attr_reg + return mp_ class _DeferredMapperConfig(_MapperConfig): _configs = util.OrderedDict() + def _early_mapping(self): + pass + @property def cls(self): return self._cls() @@ -466,7 +589,7 @@ class _DeferredMapperConfig(_MapperConfig): def map(self): self._configs.pop(self._cls, None) - super(_DeferredMapperConfig, self).map() + return super(_DeferredMapperConfig, self).map() def _add_attribute(cls, key, value): diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/declarative/clsregistry.py b/lib/python3.5/site-packages/sqlalchemy/ext/declarative/clsregistry.py index 4595b85..0d62bd2 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/declarative/clsregistry.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/declarative/clsregistry.py @@ -1,5 +1,5 @@ # ext/declarative/clsregistry.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -71,6 +71,8 @@ class _MultipleClassMarker(object): """ + __slots__ = 'on_remove', 'contents', '__weakref__' + def __init__(self, classes, on_remove=None): self.on_remove = on_remove self.contents = set([ @@ -103,7 +105,12 @@ class _MultipleClassMarker(object): self.on_remove() def add_item(self, item): - modules = set([cls().__module__ for cls in self.contents]) + # protect against class registration race condition against + # asynchronous garbage collection calling _remove_item, + # [ticket:3208] + modules = set([ + cls.__module__ for cls in + [ref() for ref in self.contents] if cls is not None]) if item.__module__ in modules: util.warn( "This declarative base already contains a class with the " @@ -122,6 +129,8 @@ class _ModuleMarker(object): """ + __slots__ = 'parent', 'name', 'contents', 'mod_ns', 'path', '__weakref__' + def __init__(self, name, parent): self.parent = parent self.name = name @@ -167,6 +176,8 @@ class _ModuleMarker(object): class _ModNS(object): + __slots__ = '__parent', + def __init__(self, parent): self.__parent = parent @@ -188,6 +199,8 @@ class _ModNS(object): class _GetColumns(object): + __slots__ = 'cls', + def __init__(self, cls): self.cls = cls @@ -216,6 +229,8 @@ inspection._inspects(_GetColumns)( class _GetTable(object): + __slots__ = 'key', 'metadata' + def __init__(self, key, metadata): self.key = key self.metadata = metadata @@ -306,7 +321,8 @@ def _deferred_relationship(cls, prop): key, kwargs = prop.backref for attr in ('primaryjoin', 'secondaryjoin', 'secondary', 'foreign_keys', 'remote_side', 'order_by'): - if attr in kwargs and isinstance(kwargs[attr], str): + if attr in kwargs and isinstance(kwargs[attr], + util.string_types): kwargs[attr] = resolve_arg(kwargs[attr]) return prop diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/horizontal_shard.py b/lib/python3.5/site-packages/sqlalchemy/ext/horizontal_shard.py index d311fb2..996e81f 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/horizontal_shard.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/horizontal_shard.py @@ -1,5 +1,5 @@ # ext/horizontal_shard.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/hybrid.py b/lib/python3.5/site-packages/sqlalchemy/ext/hybrid.py index 9f4e09e..bbf3867 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/hybrid.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/hybrid.py @@ -1,5 +1,5 @@ # ext/hybrid.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -45,8 +45,8 @@ as the class itself:: return self.end - self.start @hybrid_method - def contains(self,point): - return (self.start <= point) & (point < self.end) + def contains(self, point): + return (self.start <= point) & (point <= self.end) @hybrid_method def intersects(self, other): @@ -145,7 +145,7 @@ usage of the absolute value function:: return func.abs(cls.length) / 2 Above the Python function ``abs()`` is used for instance-level -operations, the SQL function ``ABS()`` is used via the :attr:`.func` +operations, the SQL function ``ABS()`` is used via the :data:`.func` object for class-level expressions:: >>> i1.radius @@ -634,10 +634,10 @@ from .. import util from ..orm import attributes, interfaces HYBRID_METHOD = util.symbol('HYBRID_METHOD') -"""Symbol indicating an :class:`_InspectionAttr` that's +"""Symbol indicating an :class:`InspectionAttr` that's of type :class:`.hybrid_method`. - Is assigned to the :attr:`._InspectionAttr.extension_type` + Is assigned to the :attr:`.InspectionAttr.extension_type` attibute. .. seealso:: @@ -647,10 +647,10 @@ HYBRID_METHOD = util.symbol('HYBRID_METHOD') """ HYBRID_PROPERTY = util.symbol('HYBRID_PROPERTY') -"""Symbol indicating an :class:`_InspectionAttr` that's +"""Symbol indicating an :class:`InspectionAttr` that's of type :class:`.hybrid_method`. - Is assigned to the :attr:`._InspectionAttr.extension_type` + Is assigned to the :attr:`.InspectionAttr.extension_type` attibute. .. seealso:: @@ -660,7 +660,7 @@ HYBRID_PROPERTY = util.symbol('HYBRID_PROPERTY') """ -class hybrid_method(interfaces._InspectionAttr): +class hybrid_method(interfaces.InspectionAttrInfo): """A decorator which allows definition of a Python object method with both instance-level and class-level behavior. @@ -703,7 +703,7 @@ class hybrid_method(interfaces._InspectionAttr): return self -class hybrid_property(interfaces._InspectionAttr): +class hybrid_property(interfaces.InspectionAttrInfo): """A decorator which allows definition of a Python descriptor with both instance-level and class-level behavior. diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/instrumentation.py b/lib/python3.5/site-packages/sqlalchemy/ext/instrumentation.py index 0241366..30a0ab7 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/instrumentation.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/instrumentation.py @@ -166,7 +166,13 @@ class ExtendedInstrumentationRegistry(InstrumentationFactory): def manager_of_class(self, cls): if cls is None: return None - return self._manager_finders.get(cls, _default_manager_getter)(cls) + try: + finder = self._manager_finders.get(cls, _default_manager_getter) + except TypeError: + # due to weakref lookup on invalid object + return None + else: + return finder(cls) def state_of(self, instance): if instance is None: @@ -392,6 +398,7 @@ def _reinstall_default_lookups(): manager_of_class=_default_manager_getter ) ) + _instrumentation_factory._extended = False def _install_lookups(lookups): diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/mutable.py b/lib/python3.5/site-packages/sqlalchemy/ext/mutable.py index 7469bcb..97f720c 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/mutable.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/mutable.py @@ -1,5 +1,5 @@ # ext/mutable.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -402,6 +402,27 @@ class MutableBase(object): msg = "Attribute '%s' does not accept objects of type %s" raise ValueError(msg % (key, type(value))) + @classmethod + def _get_listen_keys(cls, attribute): + """Given a descriptor attribute, return a ``set()`` of the attribute + keys which indicate a change in the state of this attribute. + + This is normally just ``set([attribute.key])``, but can be overridden + to provide for additional keys. E.g. a :class:`.MutableComposite` + augments this set with the attribute keys associated with the columns + that comprise the composite value. + + This collection is consulted in the case of intercepting the + :meth:`.InstanceEvents.refresh` and + :meth:`.InstanceEvents.refresh_flush` events, which pass along a list + of attribute names that have been refreshed; the list is compared + against this set to determine if action needs to be taken. + + .. versionadded:: 1.0.5 + + """ + return set([attribute.key]) + @classmethod def _listen_on_attribute(cls, attribute, coerce, parent_cls): """Establish this type as a mutation listener for the given @@ -415,6 +436,8 @@ class MutableBase(object): # rely on "propagate" here parent_cls = attribute.class_ + listen_keys = cls._get_listen_keys(attribute) + def load(state, *args): """Listen for objects loaded or refreshed. @@ -429,6 +452,10 @@ class MutableBase(object): state.dict[key] = val val._parents[state.obj()] = key + def load_attrs(state, ctx, attrs): + if not attrs or listen_keys.intersection(attrs): + load(state) + def set(target, value, oldvalue, initiator): """Listen for set/replace events on the target data member. @@ -463,7 +490,9 @@ class MutableBase(object): event.listen(parent_cls, 'load', load, raw=True, propagate=True) - event.listen(parent_cls, 'refresh', load, + event.listen(parent_cls, 'refresh', load_attrs, + raw=True, propagate=True) + event.listen(parent_cls, 'refresh_flush', load_attrs, raw=True, propagate=True) event.listen(attribute, 'set', set, raw=True, retval=True, propagate=True) @@ -574,6 +603,10 @@ class MutableComposite(MutableBase): """ + @classmethod + def _get_listen_keys(cls, attribute): + return set([attribute.key]).union(attribute.property._attribute_keys) + def changed(self): """Subclasses should call this method whenever change events occur.""" @@ -602,6 +635,18 @@ _setup_composite_listener() class MutableDict(Mutable, dict): """A dictionary type that implements :class:`.Mutable`. + The :class:`.MutableDict` object implements a dictionary that will + emit change events to the underlying mapping when the contents of + the dictionary are altered, including when values are added or removed. + + Note that :class:`.MutableDict` does **not** apply mutable tracking to the + *values themselves* inside the dictionary. Therefore it is not a sufficient + solution for the use case of tracking deep changes to a *recursive* + dictionary structure, such as a JSON structure. To support this use case, + build a subclass of :class:`.MutableDict` that provides appropriate + coersion to the values placed in the dictionary so that they too are + "mutable", and emit events up to their parent structure. + .. versionadded:: 0.8 """ @@ -621,16 +666,30 @@ class MutableDict(Mutable, dict): dict.__delitem__(self, key) self.changed() + def update(self, *a, **kw): + dict.update(self, *a, **kw) + self.changed() + + def pop(self, *arg): + result = dict.pop(self, *arg) + self.changed() + return result + + def popitem(self): + result = dict.popitem(self) + self.changed() + return result + def clear(self): dict.clear(self) self.changed() @classmethod def coerce(cls, key, value): - """Convert plain dictionary to MutableDict.""" - if not isinstance(value, MutableDict): + """Convert plain dictionary to instance of this class.""" + if not isinstance(value, cls): if isinstance(value, dict): - return MutableDict(value) + return cls(value) return Mutable.coerce(key, value) else: return value diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/orderinglist.py b/lib/python3.5/site-packages/sqlalchemy/ext/orderinglist.py index 67fda44..d060a4f 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/orderinglist.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/orderinglist.py @@ -1,5 +1,5 @@ # ext/orderinglist.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -119,7 +119,7 @@ start numbering at 1 or some other integer, provide ``count_from=1``. """ -from ..orm.collections import collection +from ..orm.collections import collection, collection_adapter from .. import util __all__ = ['ordering_list'] @@ -319,7 +319,10 @@ class OrderingList(list): def remove(self, entity): super(OrderingList, self).remove(entity) - self._reorder() + + adapter = collection_adapter(self) + if adapter and adapter._referenced_by_owner: + self._reorder() def pop(self, index=-1): entity = super(OrderingList, self).pop(index) diff --git a/lib/python3.5/site-packages/sqlalchemy/ext/serializer.py b/lib/python3.5/site-packages/sqlalchemy/ext/serializer.py index bf8d67d..893f7be 100644 --- a/lib/python3.5/site-packages/sqlalchemy/ext/serializer.py +++ b/lib/python3.5/site-packages/sqlalchemy/ext/serializer.py @@ -1,5 +1,5 @@ # ext/serializer.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/inspection.py b/lib/python3.5/site-packages/sqlalchemy/inspection.py index ab9f2ae..5c16c45 100644 --- a/lib/python3.5/site-packages/sqlalchemy/inspection.py +++ b/lib/python3.5/site-packages/sqlalchemy/inspection.py @@ -1,5 +1,5 @@ # sqlalchemy/inspect.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/interfaces.py b/lib/python3.5/site-packages/sqlalchemy/interfaces.py index ae11d19..464ad9f 100644 --- a/lib/python3.5/site-packages/sqlalchemy/interfaces.py +++ b/lib/python3.5/site-packages/sqlalchemy/interfaces.py @@ -1,5 +1,5 @@ # sqlalchemy/interfaces.py -# Copyright (C) 2007-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2007-2016 the SQLAlchemy authors and contributors # # Copyright (C) 2007 Jason Kirtland jek@discorporate.us # diff --git a/lib/python3.5/site-packages/sqlalchemy/log.py b/lib/python3.5/site-packages/sqlalchemy/log.py index b3c9ae0..b23de90 100644 --- a/lib/python3.5/site-packages/sqlalchemy/log.py +++ b/lib/python3.5/site-packages/sqlalchemy/log.py @@ -1,5 +1,5 @@ # sqlalchemy/log.py -# Copyright (C) 2006-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2006-2016 the SQLAlchemy authors and contributors # # Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk # diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/__init__.py b/lib/python3.5/site-packages/sqlalchemy/orm/__init__.py index 741e79b..7425737 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/__init__.py @@ -1,5 +1,5 @@ # orm/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -149,7 +149,12 @@ def backref(name, **kwargs): 'items':relationship( SomeItem, backref=backref('parent', lazy='subquery')) + .. seealso:: + + :ref:`relationships_backref` + """ + return (name, kwargs) diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/attributes.py b/lib/python3.5/site-packages/sqlalchemy/orm/attributes.py index 9ffe5c8..16b3264 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/attributes.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/attributes.py @@ -1,5 +1,5 @@ # orm/attributes.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -30,9 +30,8 @@ from .base import state_str, instance_str @inspection._self_inspects class QueryableAttribute(interfaces._MappedAttribute, - interfaces._InspectionAttr, + interfaces.InspectionAttr, interfaces.PropComparator): - """Base class for :term:`descriptor` objects that intercept attribute events on behalf of a :class:`.MapperProperty` object. The actual :class:`.MapperProperty` is accessible @@ -212,7 +211,6 @@ class QueryableAttribute(interfaces._MappedAttribute, class InstrumentedAttribute(QueryableAttribute): - """Class bound instrumented attribute which adds basic :term:`descriptor` methods. @@ -250,7 +248,6 @@ def create_proxied_attribute(descriptor): # function is removed from ext/hybrid.py class Proxy(QueryableAttribute): - """Presents the :class:`.QueryableAttribute` interface as a proxy on top of a Python descriptor / :class:`.PropComparator` combination. @@ -330,7 +327,6 @@ OP_REPLACE = util.symbol("REPLACE") class Event(object): - """A token propagated throughout the course of a chain of attribute events. @@ -349,23 +345,26 @@ class Event(object): .. versionadded:: 0.9.0 - """ + :var impl: The :class:`.AttributeImpl` which is the current event + initiator. - impl = None - """The :class:`.AttributeImpl` which is the current event initiator. - """ - - op = None - """The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE` or :attr:`.OP_REPLACE`, - indicating the source operation. + :var op: The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE` or + :attr:`.OP_REPLACE`, indicating the source operation. """ + __slots__ = 'impl', 'op', 'parent_token' + def __init__(self, attribute_impl, op): self.impl = attribute_impl self.op = op self.parent_token = self.impl.parent_token + def __eq__(self, other): + return isinstance(other, Event) and \ + other.impl is self.impl and \ + other.op == self.op + @property def key(self): return self.impl.key @@ -375,7 +374,6 @@ class Event(object): class AttributeImpl(object): - """internal implementation for instrumented attributes.""" def __init__(self, class_, key, @@ -455,6 +453,11 @@ class AttributeImpl(object): self.expire_missing = expire_missing + __slots__ = ( + 'class_', 'key', 'callable_', 'dispatch', 'trackparent', + 'parent_token', 'send_modified_events', 'is_equal', 'expire_missing' + ) + def __str__(self): return "%s.%s" % (self.class_.__name__, self.key) @@ -524,23 +527,6 @@ class AttributeImpl(object): state.parents[id_] = False - def set_callable(self, state, callable_): - """Set a callable function for this attribute on the given object. - - This callable will be executed when the attribute is next - accessed, and is assumed to construct part of the instances - previously stored state. When its value or values are loaded, - they will be established as part of the instance's *committed - state*. While *trackparent* information will be assembled for - these instances, attribute-level event handlers will not be - fired. - - The callable overrides the class level callable set in the - ``InstrumentedAttribute`` constructor. - - """ - state.callables[self.key] = callable_ - def get_history(self, state, dict_, passive=PASSIVE_OFF): raise NotImplementedError() @@ -565,7 +551,11 @@ class AttributeImpl(object): def initialize(self, state, dict_): """Initialize the given state's attribute with an empty value.""" - dict_[self.key] = None + # As of 1.0, we don't actually set a value in + # dict_. This is so that the state of the object does not get + # modified without emitting the appropriate events. + + return None def get(self, state, dict_, passive=PASSIVE_OFF): @@ -584,7 +574,9 @@ class AttributeImpl(object): if not passive & CALLABLES_OK: return PASSIVE_NO_RESULT - if key in state.callables: + if key in state.expired_attributes: + value = state._load_expired(state, passive) + elif key in state.callables: callable_ = state.callables[key] value = callable_(state, passive) elif self.callable_: @@ -632,7 +624,7 @@ class AttributeImpl(object): if self.key in state.committed_state: value = state.committed_state[self.key] - if value is NO_VALUE: + if value in (NO_VALUE, NEVER_SET): return None else: return value @@ -648,7 +640,6 @@ class AttributeImpl(object): class ScalarAttributeImpl(AttributeImpl): - """represents a scalar value-holding InstrumentedAttribute.""" accepts_scalar_loader = True @@ -656,6 +647,23 @@ class ScalarAttributeImpl(AttributeImpl): supports_population = True collection = False + __slots__ = '_replace_token', '_append_token', '_remove_token' + + def __init__(self, *arg, **kw): + super(ScalarAttributeImpl, self).__init__(*arg, **kw) + self._replace_token = self._append_token = None + self._remove_token = None + + def _init_append_token(self): + self._replace_token = self._append_token = Event(self, OP_REPLACE) + return self._replace_token + + _init_append_or_replace_token = _init_append_token + + def _init_remove_token(self): + self._remove_token = Event(self, OP_REMOVE) + return self._remove_token + def delete(self, state, dict_): # TODO: catch key errors, convert to attributeerror? @@ -694,27 +702,18 @@ class ScalarAttributeImpl(AttributeImpl): state._modified_event(dict_, self, old) dict_[self.key] = value - @util.memoized_property - def _replace_token(self): - return Event(self, OP_REPLACE) - - @util.memoized_property - def _append_token(self): - return Event(self, OP_REPLACE) - - @util.memoized_property - def _remove_token(self): - return Event(self, OP_REMOVE) - def fire_replace_event(self, state, dict_, value, previous, initiator): for fn in self.dispatch.set: value = fn( - state, value, previous, initiator or self._replace_token) + state, value, previous, + initiator or self._replace_token or + self._init_append_or_replace_token()) return value def fire_remove_event(self, state, dict_, value, initiator): for fn in self.dispatch.remove: - fn(state, value, initiator or self._remove_token) + fn(state, value, + initiator or self._remove_token or self._init_remove_token()) @property def type(self): @@ -722,7 +721,6 @@ class ScalarAttributeImpl(AttributeImpl): class ScalarObjectAttributeImpl(ScalarAttributeImpl): - """represents a scalar-holding InstrumentedAttribute, where the target object is also instrumented. @@ -735,9 +733,13 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl): supports_population = True collection = False + __slots__ = () + def delete(self, state, dict_): old = self.get(state, dict_) - self.fire_remove_event(state, dict_, old, self._remove_token) + self.fire_remove_event( + state, dict_, old, + self._remove_token or self._init_remove_token()) del dict_[self.key] def get_history(self, state, dict_, passive=PASSIVE_OFF): @@ -787,14 +789,7 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl): old = self.get( state, dict_, passive=PASSIVE_ONLY_PERSISTENT | NO_AUTOFLUSH) else: - # would like to call with PASSIVE_NO_FETCH ^ INIT_OK. However, - # we have a long-standing behavior that a "get()" on never set - # should implicitly set the value to None. Leaving INIT_OK - # set here means we are consistent whether or not we did a get - # first. - # see test_use_object_set_None vs. - # test_use_object_get_first_set_None in test_attributes.py - old = self.get(state, dict_, passive=PASSIVE_NO_FETCH) + old = self.get(state, dict_, passive=PASSIVE_NO_FETCH ^ INIT_OK) if check_old is not None and \ old is not PASSIVE_NO_RESULT and \ @@ -817,7 +812,8 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl): self.sethasparent(instance_state(value), state, False) for fn in self.dispatch.remove: - fn(state, value, initiator or self._remove_token) + fn(state, value, initiator or + self._remove_token or self._init_remove_token()) state._modified_event(dict_, self, value) @@ -829,7 +825,8 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl): for fn in self.dispatch.set: value = fn( - state, value, previous, initiator or self._replace_token) + state, value, previous, initiator or + self._replace_token or self._init_append_or_replace_token()) state._modified_event(dict_, self, previous) @@ -841,7 +838,6 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl): class CollectionAttributeImpl(AttributeImpl): - """A collection-holding attribute that instruments changes in membership. Only handles collections of instrumented objects. @@ -857,6 +853,8 @@ class CollectionAttributeImpl(AttributeImpl): supports_population = True collection = True + __slots__ = 'copy', 'collection_factory', '_append_token', '_remove_token' + def __init__(self, class_, key, callable_, dispatch, typecallable=None, trackparent=False, extension=None, copy_function=None, compare_function=None, **kwargs): @@ -873,6 +871,26 @@ class CollectionAttributeImpl(AttributeImpl): copy_function = self.__copy self.copy = copy_function self.collection_factory = typecallable + self._append_token = None + self._remove_token = None + + if getattr(self.collection_factory, "_sa_linker", None): + + @event.listens_for(self, "init_collection") + def link(target, collection, collection_adapter): + collection._sa_linker(collection_adapter) + + @event.listens_for(self, "dispose_collection") + def unlink(target, collection, collection_adapter): + collection._sa_linker(None) + + def _init_append_token(self): + self._append_token = Event(self, OP_APPEND) + return self._append_token + + def _init_remove_token(self): + self._remove_token = Event(self, OP_REMOVE) + return self._remove_token def __copy(self, item): return [y for y in collections.collection_adapter(item)] @@ -916,17 +934,11 @@ class CollectionAttributeImpl(AttributeImpl): return [(instance_state(o), o) for o in current] - @util.memoized_property - def _append_token(self): - return Event(self, OP_APPEND) - - @util.memoized_property - def _remove_token(self): - return Event(self, OP_REMOVE) - def fire_append_event(self, state, dict_, value, initiator): for fn in self.dispatch.append: - value = fn(state, value, initiator or self._append_token) + value = fn( + state, value, + initiator or self._append_token or self._init_append_token()) state._modified_event(dict_, self, NEVER_SET, True) @@ -943,7 +955,8 @@ class CollectionAttributeImpl(AttributeImpl): self.sethasparent(instance_state(value), state, False) for fn in self.dispatch.remove: - fn(state, value, initiator or self._remove_token) + fn(state, value, + initiator or self._remove_token or self._init_remove_token()) state._modified_event(dict_, self, NEVER_SET, True) @@ -966,9 +979,14 @@ class CollectionAttributeImpl(AttributeImpl): return user_data def _initialize_collection(self, state): - return state.manager.initialize_collection( + + adapter, collection = state.manager.initialize_collection( self.key, state, self.collection_factory) + self.dispatch.init_collection(state, collection, adapter) + + return adapter, collection + def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF): collection = self.get_collection(state, dict_, passive=passive) if collection is PASSIVE_NO_RESULT: @@ -1037,12 +1055,14 @@ class CollectionAttributeImpl(AttributeImpl): # place a copy of "old" in state.committed_state state._modified_event(dict_, self, old, True) - old_collection = getattr(old, '_sa_adapter') + old_collection = old._sa_adapter dict_[self.key] = user_data collections.bulk_replace(new_values, old_collection, new_collection) - old_collection.unlink(old) + + del old._sa_adapter + self.dispatch.dispose_collection(state, old, old_collection) def _invalidate_collection(self, collection): adapter = getattr(collection, '_sa_adapter') @@ -1128,7 +1148,8 @@ def backref_listeners(attribute, key, uselist): impl.pop(old_state, old_dict, state.obj(), - parent_impl._append_token, + parent_impl._append_token or + parent_impl._init_append_token(), passive=PASSIVE_NO_FETCH) if child is not None: @@ -1208,7 +1229,6 @@ History = util.namedtuple("History", [ class History(History): - """A 3-tuple of added, unchanged and deleted values, representing the changes which have occurred on an instrumented attribute. diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/base.py b/lib/python3.5/site-packages/sqlalchemy/orm/base.py index 61630b9..7947cd7 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/base.py @@ -1,5 +1,5 @@ # orm/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -144,40 +144,50 @@ _INSTRUMENTOR = ('mapper', 'instrumentor') EXT_CONTINUE = util.symbol('EXT_CONTINUE') EXT_STOP = util.symbol('EXT_STOP') -ONETOMANY = util.symbol('ONETOMANY', - """Indicates the one-to-many direction for a :func:`.relationship`. +ONETOMANY = util.symbol( + 'ONETOMANY', + """Indicates the one-to-many direction for a :func:`.relationship`. -This symbol is typically used by the internals but may be exposed within -certain API features. + This symbol is typically used by the internals but may be exposed within + certain API features. -""") + """) -MANYTOONE = util.symbol('MANYTOONE', - """Indicates the many-to-one direction for a :func:`.relationship`. +MANYTOONE = util.symbol( + 'MANYTOONE', + """Indicates the many-to-one direction for a :func:`.relationship`. -This symbol is typically used by the internals but may be exposed within -certain API features. + This symbol is typically used by the internals but may be exposed within + certain API features. -""") + """) -MANYTOMANY = util.symbol('MANYTOMANY', - """Indicates the many-to-many direction for a :func:`.relationship`. +MANYTOMANY = util.symbol( + 'MANYTOMANY', + """Indicates the many-to-many direction for a :func:`.relationship`. -This symbol is typically used by the internals but may be exposed within -certain API features. + This symbol is typically used by the internals but may be exposed within + certain API features. -""") + """) -NOT_EXTENSION = util.symbol('NOT_EXTENSION', - """Symbol indicating an :class:`_InspectionAttr` that's - not part of sqlalchemy.ext. +NOT_EXTENSION = util.symbol( + 'NOT_EXTENSION', + """Symbol indicating an :class:`InspectionAttr` that's + not part of sqlalchemy.ext. - Is assigned to the :attr:`._InspectionAttr.extension_type` - attibute. + Is assigned to the :attr:`.InspectionAttr.extension_type` + attibute. -""") + """) -_none_set = frozenset([None]) +_never_set = frozenset([NEVER_SET]) + +_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT]) + +_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED") + +_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE") def _generative(*assertions): @@ -319,10 +329,9 @@ def _is_mapped_class(entity): insp = inspection.inspect(entity, False) return insp is not None and \ - hasattr(insp, "mapper") and \ + not insp.is_clause_element and \ ( - insp.is_mapper - or insp.is_aliased_class + insp.is_mapper or insp.is_aliased_class ) @@ -419,7 +428,7 @@ def class_mapper(class_, configure=True): return mapper -class _InspectionAttr(object): +class InspectionAttr(object): """A base class applied to all ORM objects that can be returned by the :func:`.inspect` function. @@ -433,6 +442,7 @@ class _InspectionAttr(object): here intact for forwards-compatibility. """ + __slots__ = () is_selectable = False """Return True if this object is an instance of :class:`.Selectable`.""" @@ -456,7 +466,7 @@ class _InspectionAttr(object): :class:`.QueryableAttribute` which handles attributes events on behalf of a :class:`.MapperProperty`. But can also be an extension type such as :class:`.AssociationProxy` or :class:`.hybrid_property`. - The :attr:`._InspectionAttr.extension_type` will refer to a constant + The :attr:`.InspectionAttr.extension_type` will refer to a constant identifying the specific subtype. .. seealso:: @@ -485,8 +495,46 @@ class _InspectionAttr(object): """ +class InspectionAttrInfo(InspectionAttr): + """Adds the ``.info`` attribute to :class:`.InspectionAttr`. + + The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo` + is that the former is compatible as a mixin for classes that specify + ``__slots__``; this is essentially an implementation artifact. + + """ + + @util.memoized_property + def info(self): + """Info dictionary associated with the object, allowing user-defined + data to be associated with this :class:`.InspectionAttr`. + + The dictionary is generated when first accessed. Alternatively, + it can be specified as a constructor argument to the + :func:`.column_property`, :func:`.relationship`, or :func:`.composite` + functions. + + .. versionadded:: 0.8 Added support for .info to all + :class:`.MapperProperty` subclasses. + + .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also + available on extension types via the + :attr:`.InspectionAttrInfo.info` attribute, so that it can apply + to a wider variety of ORM and extension constructs. + + .. seealso:: + + :attr:`.QueryableAttribute.info` + + :attr:`.SchemaItem.info` + + """ + return {} + + class _MappedAttribute(object): """Mixin for attributes which should be replaced by mapper-assigned attributes. """ + __slots__ = () diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/collections.py b/lib/python3.5/site-packages/sqlalchemy/orm/collections.py index 698677a..f3c609f 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/collections.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/collections.py @@ -1,5 +1,5 @@ # orm/collections.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -111,6 +111,7 @@ from ..sql import expression from .. import util, exc as sa_exc from . import base +from sqlalchemy.util.compat import inspect_getargspec __all__ = ['collection', 'collection_adapter', 'mapped_collection', 'column_mapped_collection', @@ -429,6 +430,10 @@ class collection(object): the instance. A single argument is passed: the collection adapter that has been linked, or None if unlinking. + .. deprecated:: 1.0.0 - the :meth:`.collection.linker` handler + is superseded by the :meth:`.AttributeEvents.init_collection` + and :meth:`.AttributeEvents.dispose_collection` handlers. + """ fn._sa_instrument_role = 'linker' return fn @@ -575,7 +580,7 @@ class CollectionAdapter(object): self._key = attr.key self._data = weakref.ref(data) self.owner_state = owner_state - self.link_to_self(data) + data._sa_adapter = self def _warn_invalidated(self): util.warn("This collection has been invalidated.") @@ -585,24 +590,20 @@ class CollectionAdapter(object): "The entity collection being adapted." return self._data() + @property + def _referenced_by_owner(self): + """return True if the owner state still refers to this collection. + + This will return False within a bulk replace operation, + where this collection is the one being replaced. + + """ + return self.owner_state.dict[self._key] is self._data() + @util.memoized_property def attr(self): return self.owner_state.manager[self._key].impl - def link_to_self(self, data): - """Link a collection to this adapter""" - - data._sa_adapter = self - if data._sa_linker: - data._sa_linker(self) - - def unlink(self, data): - """Unlink a collection from any adapter""" - - del data._sa_adapter - if data._sa_linker: - data._sa_linker(None) - def adapt_like_to_iterable(self, obj): """Converts collection-compatible objects to an iterable of values. @@ -861,11 +862,24 @@ def _instrument_class(cls): "Can not instrument a built-in type. Use a " "subclass, even a trivial one.") + roles, methods = _locate_roles_and_methods(cls) + + _setup_canned_roles(cls, roles, methods) + + _assert_required_roles(cls, roles, methods) + + _set_collection_attributes(cls, roles, methods) + + +def _locate_roles_and_methods(cls): + """search for _sa_instrument_role-decorated methods in + method resolution order, assign to roles. + + """ + roles = {} methods = {} - # search for _sa_instrument_role-decorated methods in - # method resolution order, assign to roles for supercls in cls.__mro__: for name, method in vars(supercls).items(): if not util.callable(method): @@ -890,14 +904,19 @@ def _instrument_class(cls): assert op in ('fire_append_event', 'fire_remove_event') after = op if before: - methods[name] = before[0], before[1], after + methods[name] = before + (after, ) elif after: methods[name] = None, None, after + return roles, methods - # see if this class has "canned" roles based on a known - # collection type (dict, set, list). Apply those roles - # as needed to the "roles" dictionary, and also - # prepare "decorator" methods + +def _setup_canned_roles(cls, roles, methods): + """see if this class has "canned" roles based on a known + collection type (dict, set, list). Apply those roles + as needed to the "roles" dictionary, and also + prepare "decorator" methods + + """ collection_type = util.duck_type_collection(cls) if collection_type in __interfaces: canned_roles, decorators = __interfaces[collection_type] @@ -911,8 +930,12 @@ def _instrument_class(cls): not hasattr(fn, '_sa_instrumented')): setattr(cls, method, decorator(fn)) - # ensure all roles are present, and apply implicit instrumentation if - # needed + +def _assert_required_roles(cls, roles, methods): + """ensure all roles are present, and apply implicit instrumentation if + needed + + """ if 'appender' not in roles or not hasattr(cls, roles['appender']): raise sa_exc.ArgumentError( "Type %s must elect an appender method to be " @@ -934,8 +957,12 @@ def _instrument_class(cls): "Type %s must elect an iterator method to be " "a collection class" % cls.__name__) - # apply ad-hoc instrumentation from decorators, class-level defaults - # and implicit role declarations + +def _set_collection_attributes(cls, roles, methods): + """apply ad-hoc instrumentation from decorators, class-level defaults + and implicit role declarations + + """ for method_name, (before, argument, after) in methods.items(): setattr(cls, method_name, _instrument_membership_mutator(getattr(cls, method_name), @@ -945,8 +972,7 @@ def _instrument_class(cls): setattr(cls, '_sa_%s' % role, getattr(cls, method_name)) cls._sa_adapter = None - if not hasattr(cls, '_sa_linker'): - cls._sa_linker = None + if not hasattr(cls, '_sa_converter'): cls._sa_converter = None cls._sa_instrumented = id(cls) @@ -957,7 +983,7 @@ def _instrument_membership_mutator(method, before, argument, after): adapter.""" # This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))' if before: - fn_args = list(util.flatten_iterator(inspect.getargspec(method)[0])) + fn_args = list(util.flatten_iterator(inspect_getargspec(method)[0])) if isinstance(argument, int): pos_arg = argument named_arg = len(fn_args) > argument and fn_args[argument] or None @@ -1482,8 +1508,8 @@ class MappedCollection(dict): def __init__(self, keyfunc): """Create a new collection with keying provided by keyfunc. - keyfunc may be any callable any callable that takes an object and - returns an object for use as a dictionary key. + keyfunc may be any callable that takes an object and returns an object + for use as a dictionary key. The keyfunc will be called every time the ORM needs to add a member by value-only (such as when loading instances from the database) or diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/dependency.py b/lib/python3.5/site-packages/sqlalchemy/orm/dependency.py index c1cf66f..a3e5b12 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/dependency.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/dependency.py @@ -1,5 +1,5 @@ # orm/dependency.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -303,9 +303,9 @@ class DependencyProcessor(object): set ) - def _post_update(self, state, uowcommit, related): + def _post_update(self, state, uowcommit, related, is_m2o_delete=False): for x in related: - if x is not None: + if not is_m2o_delete or x is not None: uowcommit.issue_post_update( state, [r for l, r in self.prop.synchronize_pairs] @@ -740,7 +740,9 @@ class ManyToOneDP(DependencyProcessor): self.key, self._passive_delete_flag) if history: - self._post_update(state, uowcommit, history.sum()) + self._post_update( + state, uowcommit, history.sum(), + is_m2o_delete=True) def process_saves(self, uowcommit, states): for state in states: @@ -1119,6 +1121,7 @@ class ManyToManyDP(DependencyProcessor): if c.key in associationrow ])) result = connection.execute(statement, secondary_update) + if result.supports_sane_multi_rowcount() and \ result.rowcount != len(secondary_update): raise exc.StaleDataError( diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/deprecated_interfaces.py b/lib/python3.5/site-packages/sqlalchemy/orm/deprecated_interfaces.py index fa693c9..6477e82 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/deprecated_interfaces.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/deprecated_interfaces.py @@ -1,5 +1,5 @@ # orm/deprecated_interfaces.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -67,10 +67,6 @@ class MapperExtension(object): ( 'init_instance', 'init_failed', - 'translate_row', - 'create_instance', - 'append_result', - 'populate_instance', 'reconstruct_instance', 'before_insert', 'after_insert', @@ -156,108 +152,6 @@ class MapperExtension(object): """ return EXT_CONTINUE - def translate_row(self, mapper, context, row): - """Perform pre-processing on the given result row and return a - new row instance. - - This is called when the mapper first receives a row, before - the object identity or the instance itself has been derived - from that row. The given row may or may not be a - ``RowProxy`` object - it will always be a dictionary-like - object which contains mapped columns as keys. The - returned object should also be a dictionary-like object - which recognizes mapped columns as keys. - - If the ultimate return value is EXT_CONTINUE, the row - is not translated. - - """ - return EXT_CONTINUE - - def create_instance(self, mapper, selectcontext, row, class_): - """Receive a row when a new object instance is about to be - created from that row. - - The method can choose to create the instance itself, or it can return - EXT_CONTINUE to indicate normal object creation should take place. - - mapper - The mapper doing the operation - - selectcontext - The QueryContext generated from the Query. - - row - The result row from the database - - class\_ - The class we are mapping. - - return value - A new object instance, or EXT_CONTINUE - - """ - return EXT_CONTINUE - - def append_result(self, mapper, selectcontext, row, instance, - result, **flags): - """Receive an object instance before that instance is appended - to a result list. - - If this method returns EXT_CONTINUE, result appending will proceed - normally. if this method returns any other value or None, - result appending will not proceed for this instance, giving - this extension an opportunity to do the appending itself, if - desired. - - mapper - The mapper doing the operation. - - selectcontext - The QueryContext generated from the Query. - - row - The result row from the database. - - instance - The object instance to be appended to the result. - - result - List to which results are being appended. - - \**flags - extra information about the row, same as criterion in - ``create_row_processor()`` method of - :class:`~sqlalchemy.orm.interfaces.MapperProperty` - """ - - return EXT_CONTINUE - - def populate_instance(self, mapper, selectcontext, row, - instance, **flags): - """Receive an instance before that instance has - its attributes populated. - - This usually corresponds to a newly loaded instance but may - also correspond to an already-loaded instance which has - unloaded attributes to be populated. The method may be called - many times for a single instance, as multiple result rows are - used to populate eagerly loaded collections. - - If this method returns EXT_CONTINUE, instance population will - proceed normally. If any other value or None is returned, - instance population will not proceed, giving this extension an - opportunity to populate the instance itself, if desired. - - .. deprecated:: 0.5 - Most usages of this hook are obsolete. For a - generic "object has been newly created from a row" hook, use - ``reconstruct_instance()``, or the ``@orm.reconstructor`` - decorator. - - """ - return EXT_CONTINUE - def reconstruct_instance(self, mapper, instance): """Receive an object instance after it has been created via ``__new__``, and after initial attribute population has diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/descriptor_props.py b/lib/python3.5/site-packages/sqlalchemy/orm/descriptor_props.py index 5ed24b8..6c87ef9 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/descriptor_props.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/descriptor_props.py @@ -1,5 +1,5 @@ # orm/descriptor_props.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -143,6 +143,7 @@ class CompositeProperty(DescriptorProperty): class. **Deprecated.** Please see :class:`.AttributeEvents`. """ + super(CompositeProperty, self).__init__() self.attrs = attrs self.composite_class = class_ @@ -372,9 +373,9 @@ class CompositeProperty(DescriptorProperty): property.key, *expr) def create_row_processor(self, query, procs, labels): - def proc(row, result): + def proc(row): return self.property.composite_class( - *[proc(row, result) for proc in procs]) + *[proc(row) for proc in procs]) return proc class Comparator(PropComparator): @@ -471,6 +472,7 @@ class ConcreteInheritedProperty(DescriptorProperty): return comparator_callable def __init__(self): + super(ConcreteInheritedProperty, self).__init__() def warn(): raise AttributeError("Concrete %s does not implement " "attribute %r at the instance level. Add " @@ -496,7 +498,7 @@ class SynonymProperty(DescriptorProperty): def __init__(self, name, map_column=None, descriptor=None, comparator_factory=None, - doc=None): + doc=None, info=None): """Denote an attribute name as a synonym to a mapped property, in that the attribute will mirror the value and expression behavior of another attribute. @@ -531,6 +533,11 @@ class SynonymProperty(DescriptorProperty): conjunction with the ``descriptor`` argument in order to link a user-defined descriptor as a "wrapper" for an existing column. + :param info: Optional data dictionary which will be populated into the + :attr:`.InspectionAttr.info` attribute of this object. + + .. versionadded:: 1.0.0 + :param comparator_factory: A subclass of :class:`.PropComparator` that will provide custom comparison behavior at the SQL expression level. @@ -550,12 +557,15 @@ class SynonymProperty(DescriptorProperty): more complicated attribute-wrapping schemes than synonyms. """ + super(SynonymProperty, self).__init__() self.name = name self.map_column = map_column self.descriptor = descriptor self.comparator_factory = comparator_factory self.doc = doc or (descriptor and descriptor.__doc__) or None + if info: + self.info = info util.set_creation_order(self) @@ -608,7 +618,8 @@ class SynonymProperty(DescriptorProperty): class ComparableProperty(DescriptorProperty): """Instruments a Python property for use in query expressions.""" - def __init__(self, comparator_factory, descriptor=None, doc=None): + def __init__( + self, comparator_factory, descriptor=None, doc=None, info=None): """Provides a method of applying a :class:`.PropComparator` to any Python descriptor attribute. @@ -670,10 +681,18 @@ class ComparableProperty(DescriptorProperty): The like-named descriptor will be automatically retrieved from the mapped class if left blank in a ``properties`` declaration. + :param info: Optional data dictionary which will be populated into the + :attr:`.InspectionAttr.info` attribute of this object. + + .. versionadded:: 1.0.0 + """ + super(ComparableProperty, self).__init__() self.descriptor = descriptor self.comparator_factory = comparator_factory self.doc = doc or (descriptor and descriptor.__doc__) or None + if info: + self.info = info util.set_creation_order(self) def _comparator_factory(self, mapper): diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/dynamic.py b/lib/python3.5/site-packages/sqlalchemy/orm/dynamic.py index 51db1b1..88187cd 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/dynamic.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/dynamic.py @@ -1,5 +1,5 @@ # orm/dynamic.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -221,10 +221,8 @@ class AppenderMixin(object): mapper = object_mapper(instance) prop = mapper._props[self.attr.key] - self._criterion = prop.compare( - operators.eq, + self._criterion = prop._with_parent( instance, - value_is_parent=True, alias_secondary=False) if self.attr.order_by: diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/evaluator.py b/lib/python3.5/site-packages/sqlalchemy/orm/evaluator.py index efd2bfe..534e7fa 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/evaluator.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/evaluator.py @@ -1,5 +1,5 @@ # orm/evaluator.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -7,7 +7,6 @@ import operator from ..sql import operators -from .. import util class UnevaluatableError(Exception): @@ -27,7 +26,6 @@ _notimplemented_ops = set(getattr(operators, op) class EvaluatorCompiler(object): - def __init__(self, target_cls=None): self.target_cls = target_cls @@ -55,14 +53,9 @@ class EvaluatorCompiler(object): parentmapper = clause._annotations['parentmapper'] if self.target_cls and not issubclass( self.target_cls, parentmapper.class_): - util.warn( - "Can't do in-Python evaluation of criteria against " - "alternate class %s; " - "expiration of objects will not be accurate " - "and/or may fail. synchronize_session should be set to " - "False or 'fetch'. " - "This warning will be an exception " - "in 1.0." % parentmapper.class_ + raise UnevaluatableError( + "Can't evaluate criteria against alternate class %s" % + parentmapper.class_ ) key = parentmapper._columntoproperty[clause].key else: diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/events.py b/lib/python3.5/site-packages/sqlalchemy/orm/events.py index aa99673..67ce46e 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/events.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/events.py @@ -1,5 +1,5 @@ # orm/events.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -17,7 +17,8 @@ from . import mapperlib, instrumentation from .session import Session, sessionmaker from .scoping import scoped_session from .attributes import QueryableAttribute - +from .query import Query +from sqlalchemy.util.compat import inspect_getargspec class InstrumentationEvents(event.Events): """Events related to class instrumentation events. @@ -61,7 +62,8 @@ class InstrumentationEvents(event.Events): @classmethod def _listen(cls, event_key, propagate=True, **kw): target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, event_key.fn + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn def listen(target_cls, *arg): listen_cls = target() @@ -192,7 +194,8 @@ class InstanceEvents(event.Events): @classmethod def _listen(cls, event_key, raw=False, propagate=False, **kw): target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, event_key.fn + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn if not raw: def wrap(state, *arg, **kw): @@ -214,14 +217,41 @@ class InstanceEvents(event.Events): def first_init(self, manager, cls): """Called when the first instance of a particular mapping is called. + This event is called when the ``__init__`` method of a class + is called the first time for that particular class. The event + invokes before ``__init__`` actually proceeds as well as before + the :meth:`.InstanceEvents.init` event is invoked. + """ def init(self, target, args, kwargs): """Receive an instance when its constructor is called. This method is only called during a userland construction of - an object. It is not called when an object is loaded from the - database. + an object, in conjunction with the object's constructor, e.g. + its ``__init__`` method. It is not called when an object is + loaded from the database; see the :meth:`.InstanceEvents.load` + event in order to intercept a database load. + + The event is called before the actual ``__init__`` constructor + of the object is called. The ``kwargs`` dictionary may be + modified in-place in order to affect what is passed to + ``__init__``. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param args: positional arguments passed to the ``__init__`` method. + This is passed as a tuple and is currently immutable. + :param kwargs: keyword arguments passed to the ``__init__`` method. + This structure *can* be altered in place. + + .. seealso:: + + :meth:`.InstanceEvents.init_failure` + + :meth:`.InstanceEvents.load` """ @@ -230,8 +260,31 @@ class InstanceEvents(event.Events): and raised an exception. This method is only called during a userland construction of - an object. It is not called when an object is loaded from the - database. + an object, in conjunction with the object's constructor, e.g. + its ``__init__`` method. It is not called when an object is loaded + from the database. + + The event is invoked after an exception raised by the ``__init__`` + method is caught. After the event + is invoked, the original exception is re-raised outwards, so that + the construction of the object still raises an exception. The + actual exception and stack trace raised should be present in + ``sys.exc_info()``. + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param args: positional arguments that were passed to the ``__init__`` + method. + :param kwargs: keyword arguments that were passed to the ``__init__`` + method. + + .. seealso:: + + :meth:`.InstanceEvents.init` + + :meth:`.InstanceEvents.load` """ @@ -258,22 +311,58 @@ class InstanceEvents(event.Events): ``None`` if the load does not correspond to a :class:`.Query`, such as during :meth:`.Session.merge`. + .. seealso:: + + :meth:`.InstanceEvents.init` + + :meth:`.InstanceEvents.refresh` + """ def refresh(self, target, context, attrs): """Receive an object instance after one or more attributes have been refreshed from a query. + Contrast this to the :meth:`.InstanceEvents.load` method, which + is invoked when the object is first loaded from a query. + :param target: the mapped instance. If the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. :param context: the :class:`.QueryContext` corresponding to the current :class:`.Query` in progress. - :param attrs: iterable collection of attribute names which + :param attrs: sequence of attribute names which were populated, or None if all column-mapped, non-deferred attributes were populated. + .. seealso:: + + :meth:`.InstanceEvents.load` + + """ + + def refresh_flush(self, target, flush_context, attrs): + """Receive an object instance after one or more attributes have + been refreshed within the persistence of the object. + + This event is the same as :meth:`.InstanceEvents.refresh` except + it is invoked within the unit of work flush process, and the values + here typically come from the process of handling an INSERT or + UPDATE, such as via the RETURNING clause or from Python-side default + values. + + .. versionadded:: 1.0.5 + + :param target: the mapped instance. If + the event is configured with ``raw=True``, this will + instead be the :class:`.InstanceState` state-management + object associated with the instance. + :param flush_context: Internal :class:`.UOWTransaction` object + which handles the details of the flush. + :param attrs: sequence of attribute names which + were populated. + """ def expire(self, target, attrs): @@ -287,24 +376,12 @@ class InstanceEvents(event.Events): the event is configured with ``raw=True``, this will instead be the :class:`.InstanceState` state-management object associated with the instance. - :param attrs: iterable collection of attribute + :param attrs: sequence of attribute names which were expired, or None if all attributes were expired. """ - def resurrect(self, target): - """Receive an object instance as it is 'resurrected' from - garbage collection, which occurs when a "dirty" state falls - out of scope. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - - """ - def pickle(self, target, state_dict): """Receive an object instance when its associated state is being pickled. @@ -510,7 +587,8 @@ class MapperEvents(event.Events): def _listen( cls, event_key, raw=False, retval=False, propagate=False, **kw): target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, event_key.fn + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn if identifier in ("before_configured", "after_configured") and \ target is not mapperlib.Mapper: @@ -524,7 +602,7 @@ class MapperEvents(event.Events): meth = getattr(cls, identifier) try: target_index = \ - inspect.getargspec(meth)[0].index('target') - 1 + inspect_getargspec(meth)[0].index('target') - 1 except ValueError: target_index = None @@ -575,32 +653,67 @@ class MapperEvents(event.Events): """ def mapper_configured(self, mapper, class_): - """Called when the mapper for the class is fully configured. + """Called when a specific mapper has completed its own configuration + within the scope of the :func:`.configure_mappers` call. - This event is the latest phase of mapper construction, and - is invoked when the mapped classes are first used, so that - relationships between mappers can be resolved. When the event is - called, the mapper should be in its final state. + The :meth:`.MapperEvents.mapper_configured` event is invoked + for each mapper that is encountered when the + :func:`.orm.configure_mappers` function proceeds through the current + list of not-yet-configured mappers. + :func:`.orm.configure_mappers` is typically invoked + automatically as mappings are first used, as well as each time + new mappers have been made available and new mapper use is + detected. - While the configuration event normally occurs automatically, - it can be forced to occur ahead of time, in the case where the event - is needed before any actual mapper usage, by using the - :func:`.configure_mappers` function. + When the event is called, the mapper should be in its final + state, but **not including backrefs** that may be invoked from + other mappers; they might still be pending within the + configuration operation. Bidirectional relationships that + are instead configured via the + :paramref:`.orm.relationship.back_populates` argument + *will* be fully available, since this style of relationship does not + rely upon other possibly-not-configured mappers to know that they + exist. + For an event that is guaranteed to have **all** mappers ready + to go including backrefs that are defined only on other + mappings, use the :meth:`.MapperEvents.after_configured` + event; this event invokes only after all known mappings have been + fully configured. + + The :meth:`.MapperEvents.mapper_configured` event, unlike + :meth:`.MapperEvents.before_configured` or + :meth:`.MapperEvents.after_configured`, + is called for each mapper/class individually, and the mapper is + passed to the event itself. It also is called exactly once for + a particular mapper. The event is therefore useful for + configurational steps that benefit from being invoked just once + on a specific mapper basis, which don't require that "backref" + configurations are necessarily ready yet. :param mapper: the :class:`.Mapper` which is the target of this event. :param class\_: the mapped class. + .. seealso:: + + :meth:`.MapperEvents.before_configured` + + :meth:`.MapperEvents.after_configured` + """ # TODO: need coverage for this event def before_configured(self): """Called before a series of mappers have been configured. - This corresponds to the :func:`.orm.configure_mappers` call, which - note is usually called automatically as mappings are first - used. + The :meth:`.MapperEvents.before_configured` event is invoked + each time the :func:`.orm.configure_mappers` function is + invoked, before the function has done any of its work. + :func:`.orm.configure_mappers` is typically invoked + automatically as mappings are first used, as well as each time + new mappers have been made available and new mapper use is + detected. This event can **only** be applied to the :class:`.Mapper` class or :func:`.mapper` function, and not to individual mappings or @@ -612,11 +725,16 @@ class MapperEvents(event.Events): def go(): # ... + Constrast this event to :meth:`.MapperEvents.after_configured`, + which is invoked after the series of mappers has been configured, + as well as :meth:`.MapperEvents.mapper_configured`, which is invoked + on a per-mapper basis as each one is configured to the extent possible. + Theoretically this event is called once per application, but is actually called any time new mappers are to be affected by a :func:`.orm.configure_mappers` call. If new mappings are constructed after existing ones have - already been used, this event can be called again. To ensure + already been used, this event will likely be called again. To ensure that a particular event is only called once and no further, the ``once=True`` argument (new in 0.9.4) can be applied:: @@ -629,14 +747,33 @@ class MapperEvents(event.Events): .. versionadded:: 0.9.3 + + .. seealso:: + + :meth:`.MapperEvents.mapper_configured` + + :meth:`.MapperEvents.after_configured` + """ def after_configured(self): """Called after a series of mappers have been configured. - This corresponds to the :func:`.orm.configure_mappers` call, which - note is usually called automatically as mappings are first - used. + The :meth:`.MapperEvents.after_configured` event is invoked + each time the :func:`.orm.configure_mappers` function is + invoked, after the function has completed its work. + :func:`.orm.configure_mappers` is typically invoked + automatically as mappings are first used, as well as each time + new mappers have been made available and new mapper use is + detected. + + Contrast this event to the :meth:`.MapperEvents.mapper_configured` + event, which is called on a per-mapper basis while the configuration + operation proceeds; unlike that event, when this event is invoked, + all cross-configurations (e.g. backrefs) will also have been made + available for any mappers that were pending. + Also constrast to :meth:`.MapperEvents.before_configured`, + which is invoked before the series of mappers has been configured. This event can **only** be applied to the :class:`.Mapper` class or :func:`.mapper` function, and not to individual mappings or @@ -652,7 +789,7 @@ class MapperEvents(event.Events): application, but is actually called any time new mappers have been affected by a :func:`.orm.configure_mappers` call. If new mappings are constructed after existing ones have - already been used, this event can be called again. To ensure + already been used, this event will likely be called again. To ensure that a particular event is only called once and no further, the ``once=True`` argument (new in 0.9.4) can be applied:: @@ -662,144 +799,11 @@ class MapperEvents(event.Events): def go(): # ... - """ + .. seealso:: - def translate_row(self, mapper, context, row): - """Perform pre-processing on the given result row and return a - new row instance. + :meth:`.MapperEvents.mapper_configured` - .. deprecated:: 0.9 the :meth:`.translate_row` event should - be considered as legacy. The row as delivered in a mapper - load operation typically requires that highly technical - details be accommodated in order to identity the correct - column keys are present in the row, rendering this particular - event hook as difficult to use and unreliable. - - This listener is typically registered with ``retval=True``. - It is called when the mapper first receives a row, before - the object identity or the instance itself has been derived - from that row. The given row may or may not be a - :class:`.RowProxy` object - it will always be a dictionary-like - object which contains mapped columns as keys. The - returned object should also be a dictionary-like object - which recognizes mapped columns as keys. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param context: the :class:`.QueryContext`, which includes - a handle to the current :class:`.Query` in progress as well - as additional state information. - :param row: the result row being handled. This may be - an actual :class:`.RowProxy` or may be a dictionary containing - :class:`.Column` objects as keys. - :return: When configured with ``retval=True``, the function - should return a dictionary-like row object, or ``EXT_CONTINUE``, - indicating the original row should be used. - - - """ - - def create_instance(self, mapper, context, row, class_): - """Receive a row when a new object instance is about to be - created from that row. - - .. deprecated:: 0.9 the :meth:`.create_instance` event should - be considered as legacy. Manipulation of the object construction - mechanics during a load should not be necessary. - - The method can choose to create the instance itself, or it can return - EXT_CONTINUE to indicate normal object creation should take place. - This listener is typically registered with ``retval=True``. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param context: the :class:`.QueryContext`, which includes - a handle to the current :class:`.Query` in progress as well - as additional state information. - :param row: the result row being handled. This may be - an actual :class:`.RowProxy` or may be a dictionary containing - :class:`.Column` objects as keys. - :param class\_: the mapped class. - :return: When configured with ``retval=True``, the return value - should be a newly created instance of the mapped class, - or ``EXT_CONTINUE`` indicating that default object construction - should take place. - - """ - - def append_result(self, mapper, context, row, target, - result, **flags): - """Receive an object instance before that instance is appended - to a result list. - - .. deprecated:: 0.9 the :meth:`.append_result` event should - be considered as legacy. It is a difficult to use method - whose original purpose is better suited by custom collection - classes. - - This is a rarely used hook which can be used to alter - the construction of a result list returned by :class:`.Query`. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param context: the :class:`.QueryContext`, which includes - a handle to the current :class:`.Query` in progress as well - as additional state information. - :param row: the result row being handled. This may be - an actual :class:`.RowProxy` or may be a dictionary containing - :class:`.Column` objects as keys. - :param target: the mapped instance being populated. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param result: a list-like object where results are being - appended. - :param \**flags: Additional state information about the - current handling of the row. - :return: If this method is registered with ``retval=True``, - a return value of ``EXT_STOP`` will prevent the instance - from being appended to the given result list, whereas a - return value of ``EXT_CONTINUE`` will result in the default - behavior of appending the value to the result list. - - """ - - def populate_instance(self, mapper, context, row, - target, **flags): - """Receive an instance before that instance has - its attributes populated. - - .. deprecated:: 0.9 the :meth:`.populate_instance` event should - be considered as legacy. The mechanics of instance population - should not need modification; special "on load" rules can as always - be accommodated by the :class:`.InstanceEvents.load` event. - - This usually corresponds to a newly loaded instance but may - also correspond to an already-loaded instance which has - unloaded attributes to be populated. The method may be called - many times for a single instance, as multiple result rows are - used to populate eagerly loaded collections. - - Most usages of this hook are obsolete. For a - generic "object has been newly created from a row" hook, use - :meth:`.InstanceEvents.load`. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param context: the :class:`.QueryContext`, which includes - a handle to the current :class:`.Query` in progress as well - as additional state information. - :param row: the result row being handled. This may be - an actual :class:`.RowProxy` or may be a dictionary containing - :class:`.Column` objects as keys. - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: When configured with ``retval=True``, a return - value of ``EXT_STOP`` will bypass instance population by - the mapper. A value of ``EXT_CONTINUE`` indicates that - default instance population should take place. + :meth:`.MapperEvents.before_configured` """ @@ -822,30 +826,14 @@ class MapperEvents(event.Events): steps. .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** - :class:`.Connection` **only.** Handlers here should **not** make - alterations to the state of the :class:`.Session` overall, and - in general should not affect any :func:`.relationship` -mapped - attributes, as session cascade rules will not function properly, - nor is it always known if the related class has already been - handled. Operations that **are not supported in mapper - events** include: - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, - i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, or - another method designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. :param mapper: the :class:`.Mapper` which is the target of this event. @@ -859,6 +847,10 @@ class MapperEvents(event.Events): object associated with the instance. :return: No return value is supported by this event. + .. seealso:: + + :ref:`session_persistence_events` + """ def after_insert(self, mapper, connection, target): @@ -880,30 +872,14 @@ class MapperEvents(event.Events): event->persist->event steps. .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** - :class:`.Connection` **only.** Handlers here should **not** make - alterations to the state of the :class:`.Session` overall, and in - general should not affect any :func:`.relationship` -mapped - attributes, as session cascade rules will not function properly, - nor is it always known if the related class has already been - handled. Operations that **are not supported in mapper - events** include: - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, - i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, - or another method designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. :param mapper: the :class:`.Mapper` which is the target of this event. @@ -917,6 +893,10 @@ class MapperEvents(event.Events): object associated with the instance. :return: No return value is supported by this event. + .. seealso:: + + :ref:`session_persistence_events` + """ def before_update(self, mapper, connection, target): @@ -957,29 +937,14 @@ class MapperEvents(event.Events): steps. .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` - **only.** Handlers here should **not** make alterations to the - state of the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, - i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, - or another method designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. :param mapper: the :class:`.Mapper` which is the target of this event. @@ -992,6 +957,11 @@ class MapperEvents(event.Events): instead be the :class:`.InstanceState` state-management object associated with the instance. :return: No return value is supported by this event. + + .. seealso:: + + :ref:`session_persistence_events` + """ def after_update(self, mapper, connection, target): @@ -1031,29 +1001,14 @@ class MapperEvents(event.Events): steps. .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` - **only.** Handlers here should **not** make alterations to the - state of the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, - i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, - or another method designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. :param mapper: the :class:`.Mapper` which is the target of this event. @@ -1067,6 +1022,10 @@ class MapperEvents(event.Events): object associated with the instance. :return: No return value is supported by this event. + .. seealso:: + + :ref:`session_persistence_events` + """ def before_delete(self, mapper, connection, target): @@ -1082,29 +1041,14 @@ class MapperEvents(event.Events): once in a later step. .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` - **only.** Handlers here should **not** make alterations to the - state of the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, - i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, - or another method designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. :param mapper: the :class:`.Mapper` which is the target of this event. @@ -1118,6 +1062,10 @@ class MapperEvents(event.Events): object associated with the instance. :return: No return value is supported by this event. + .. seealso:: + + :ref:`session_persistence_events` + """ def after_delete(self, mapper, connection, target): @@ -1133,29 +1081,14 @@ class MapperEvents(event.Events): once in a previous step. .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` - **only.** Handlers here should **not** make alterations to the - state of the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, - i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, - or another method designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. + Mapper-level flush events only allow **very limited operations**, + on attributes local to the row being operated upon only, + as well as allowing any SQL to be emitted on the given + :class:`.Connection`. **Please read fully** the notes + at :ref:`session_persistence_mapper` for guidelines on using + these methods; generally, the :meth:`.SessionEvents.before_flush` + method should be preferred for general on-flush changes. :param mapper: the :class:`.Mapper` which is the target of this event. @@ -1169,6 +1102,10 @@ class MapperEvents(event.Events): object associated with the instance. :return: No return value is supported by this event. + .. seealso:: + + :ref:`session_persistence_events` + """ @@ -1409,6 +1346,8 @@ class SessionEvents(event.Events): :meth:`~.SessionEvents.after_flush_postexec` + :ref:`session_persistence_events` + """ def after_flush(self, session, flush_context): @@ -1429,6 +1368,8 @@ class SessionEvents(event.Events): :meth:`~.SessionEvents.after_flush_postexec` + :ref:`session_persistence_events` + """ def after_flush_postexec(self, session, flush_context): @@ -1451,6 +1392,8 @@ class SessionEvents(event.Events): :meth:`~.SessionEvents.after_flush` + :ref:`session_persistence_events` + """ def after_begin(self, session, transaction, connection): @@ -1488,6 +1431,8 @@ class SessionEvents(event.Events): :meth:`~.SessionEvents.after_attach` + :ref:`session_lifecycle_events` + """ def after_attach(self, session, instance): @@ -1510,6 +1455,8 @@ class SessionEvents(event.Events): :meth:`~.SessionEvents.before_attach` + :ref:`session_lifecycle_events` + """ @event._legacy_signature("0.9", @@ -1627,8 +1574,9 @@ class AttributeEvents(event.Events): @staticmethod def _set_dispatch(cls, dispatch_cls): - event.Events._set_dispatch(cls, dispatch_cls) + dispatch = event.Events._set_dispatch(cls, dispatch_cls) dispatch_cls._active_history = False + return dispatch @classmethod def _accept_with(cls, target): @@ -1644,7 +1592,8 @@ class AttributeEvents(event.Events): propagate=False): target, identifier, fn = \ - event_key.dispatch_target, event_key.identifier, event_key.fn + event_key.dispatch_target, event_key.identifier, \ + event_key._listen_fn if active_history: target.dispatch._active_history = True @@ -1744,3 +1693,109 @@ class AttributeEvents(event.Events): the given value, or a new effective value, should be returned. """ + + def init_collection(self, target, collection, collection_adapter): + """Receive a 'collection init' event. + + This event is triggered for a collection-based attribute, when + the initial "empty collection" is first generated for a blank + attribute, as well as for when the collection is replaced with + a new one, such as via a set event. + + E.g., given that ``User.addresses`` is a relationship-based + collection, the event is triggered here:: + + u1 = User() + u1.addresses.append(a1) # <- new collection + + and also during replace operations:: + + u1.addresses = [a2, a3] # <- new collection + + :param target: the object instance receiving the event. + If the listener is registered with ``raw=True``, this will + be the :class:`.InstanceState` object. + :param collection: the new collection. This will always be generated + from what was specified as + :paramref:`.RelationshipProperty.collection_class`, and will always + be empty. + :param collection_adpater: the :class:`.CollectionAdapter` that will + mediate internal access to the collection. + + .. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection` + and :meth:`.AttributeEvents.dispose_collection` events supersede + the :class:`.collection.linker` hook. + + """ + + def dispose_collection(self, target, collection, collection_adpater): + """Receive a 'collection dispose' event. + + This event is triggered for a collection-based attribute when + a collection is replaced, that is:: + + u1.addresses.append(a1) + + u1.addresses = [a2, a3] # <- old collection is disposed + + The mechanics of the event will typically include that the given + collection is empty, even if it stored objects while being replaced. + + .. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection` + and :meth:`.AttributeEvents.dispose_collection` events supersede + the :class:`.collection.linker` hook. + + """ + + +class QueryEvents(event.Events): + """Represent events within the construction of a :class:`.Query` object. + + The events here are intended to be used with an as-yet-unreleased + inspection system for :class:`.Query`. Some very basic operations + are possible now, however the inspection system is intended to allow + complex query manipulations to be automated. + + .. versionadded:: 1.0.0 + + """ + + _target_class_doc = "SomeQuery" + _dispatch_target = Query + + def before_compile(self, query): + """Receive the :class:`.Query` object before it is composed into a + core :class:`.Select` object. + + This event is intended to allow changes to the query given:: + + @event.listens_for(Query, "before_compile", retval=True) + def no_deleted(query): + for desc in query.column_descriptions: + if desc['type'] is User: + entity = desc['entity'] + query = query.filter(entity.deleted == False) + return query + + The event should normally be listened with the ``retval=True`` + parameter set, so that the modified query may be returned. + + + """ + + @classmethod + def _listen( + cls, event_key, retval=False, **kw): + fn = event_key._listen_fn + + if not retval: + def wrap(*arg, **kw): + if not retval: + query = arg[0] + fn(*arg, **kw) + return query + else: + return fn(*arg, **kw) + event_key = event_key.with_wrapper(wrap) + + event_key.base_listen(**kw) diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/exc.py b/lib/python3.5/site-packages/sqlalchemy/orm/exc.py index ff0ece4..db99322 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/exc.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/exc.py @@ -1,5 +1,5 @@ # orm/exc.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/identity.py b/lib/python3.5/site-packages/sqlalchemy/orm/identity.py index e48203d..5646732 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/identity.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/identity.py @@ -1,5 +1,5 @@ # orm/identity.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -10,18 +10,26 @@ from . import attributes from .. import util -class IdentityMap(dict): - +class IdentityMap(object): def __init__(self): + self._dict = {} self._modified = set() self._wr = weakref.ref(self) + def keys(self): + return self._dict.keys() + def replace(self, state): raise NotImplementedError() def add(self, state): raise NotImplementedError() + def _add_unpresent(self, state, key): + """optional inlined form of add() which can assume item isn't present + in the map""" + self.add(state) + def update(self, dict): raise NotImplementedError("IdentityMap uses add() to insert data") @@ -36,7 +44,8 @@ class IdentityMap(dict): def _manage_removed_state(self, state): del state._instance_dict - self._modified.discard(state) + if state.modified: + self._modified.discard(state) def _dirty_states(self): return self._modified @@ -60,6 +69,9 @@ class IdentityMap(dict): def setdefault(self, key, default=None): raise NotImplementedError("IdentityMap uses add() to insert data") + def __len__(self): + return len(self._dict) + def copy(self): raise NotImplementedError() @@ -72,11 +84,8 @@ class IdentityMap(dict): class WeakInstanceDict(IdentityMap): - def __init__(self): - IdentityMap.__init__(self) - def __getitem__(self, key): - state = dict.__getitem__(self, key) + state = self._dict[key] o = state.obj() if o is None: raise KeyError(key) @@ -84,8 +93,8 @@ class WeakInstanceDict(IdentityMap): def __contains__(self, key): try: - if dict.__contains__(self, key): - state = dict.__getitem__(self, key) + if key in self._dict: + state = self._dict[key] o = state.obj() else: return False @@ -95,25 +104,25 @@ class WeakInstanceDict(IdentityMap): return o is not None def contains_state(self, state): - return dict.get(self, state.key) is state + return state.key in self._dict and self._dict[state.key] is state def replace(self, state): - if dict.__contains__(self, state.key): - existing = dict.__getitem__(self, state.key) + if state.key in self._dict: + existing = self._dict[state.key] if existing is not state: self._manage_removed_state(existing) else: return - dict.__setitem__(self, state.key, state) + self._dict[state.key] = state self._manage_incoming_state(state) def add(self, state): key = state.key # inline of self.__contains__ - if dict.__contains__(self, key): + if key in self._dict: try: - existing_state = dict.__getitem__(self, key) + existing_state = self._dict[key] if existing_state is not state: o = existing_state.obj() if o is not None: @@ -125,19 +134,24 @@ class WeakInstanceDict(IdentityMap): return except KeyError: pass - dict.__setitem__(self, key, state) + self._dict[key] = state self._manage_incoming_state(state) + def _add_unpresent(self, state, key): + # inlined form of add() called by loading.py + self._dict[key] = state + state._instance_dict = self._wr + def get(self, key, default=None): - state = dict.get(self, key, default) - if state is default: + if key not in self._dict: return default + state = self._dict[key] o = state.obj() if o is None: return default return o - def _items(self): + def items(self): values = self.all_states() result = [] for state in values: @@ -146,7 +160,7 @@ class WeakInstanceDict(IdentityMap): result.append((state.key, value)) return result - def _values(self): + def values(self): values = self.all_states() result = [] for state in values: @@ -156,39 +170,80 @@ class WeakInstanceDict(IdentityMap): return result + def __iter__(self): + return iter(self.keys()) + if util.py2k: - items = _items - values = _values def iteritems(self): return iter(self.items()) def itervalues(self): return iter(self.values()) - else: - def items(self): - return iter(self._items()) - - def values(self): - return iter(self._values()) def all_states(self): if util.py2k: - return dict.values(self) + return self._dict.values() else: - return list(dict.values(self)) + return list(self._dict.values()) + + def _fast_discard(self, state): + self._dict.pop(state.key, None) def discard(self, state): - st = dict.get(self, state.key, None) - if st is state: - dict.pop(self, state.key, None) + st = self._dict.pop(state.key, None) + if st: + assert st is state self._manage_removed_state(state) + def safe_discard(self, state): + if state.key in self._dict: + st = self._dict[state.key] + if st is state: + self._dict.pop(state.key, None) + self._manage_removed_state(state) + def prune(self): return 0 class StrongInstanceDict(IdentityMap): + """A 'strong-referencing' version of the identity map. + + .. deprecated:: this object is present in order to fulfill + the ``weak_identity_map=False`` option of the Session. + This option is present to allow compatibility with older applications, + but it is recommended that strong references to objects + be maintained by the calling application + externally to the :class:`.Session` itself, to the degree + that is needed by the application. + + """ + + if util.py2k: + def itervalues(self): + return self._dict.itervalues() + + def iteritems(self): + return self._dict.iteritems() + + def __iter__(self): + return iter(self.dict_) + + def __getitem__(self, key): + return self._dict[key] + + def __contains__(self, key): + return key in self._dict + + def get(self, key, default=None): + return self._dict.get(key, default) + + def values(self): + return self._dict.values() + + def items(self): + return self._dict.items() def all_states(self): return [attributes.instance_state(o) for o in self.values()] @@ -199,36 +254,48 @@ class StrongInstanceDict(IdentityMap): attributes.instance_state(self[state.key]) is state) def replace(self, state): - if dict.__contains__(self, state.key): - existing = dict.__getitem__(self, state.key) + if state.key in self._dict: + existing = self._dict[state.key] existing = attributes.instance_state(existing) if existing is not state: self._manage_removed_state(existing) else: return - dict.__setitem__(self, state.key, state.obj()) + self._dict[state.key] = state.obj() self._manage_incoming_state(state) def add(self, state): if state.key in self: - if attributes.instance_state( - dict.__getitem__( - self, - state.key)) is not state: + if attributes.instance_state(self._dict[state.key]) is not state: raise AssertionError('A conflicting state is already ' 'present in the identity map for key %r' % (state.key, )) else: - dict.__setitem__(self, state.key, state.obj()) + self._dict[state.key] = state.obj() self._manage_incoming_state(state) + def _add_unpresent(self, state, key): + # inlined form of add() called by loading.py + self._dict[key] = state.obj() + state._instance_dict = self._wr + + def _fast_discard(self, state): + self._dict.pop(state.key, None) + def discard(self, state): - obj = dict.get(self, state.key, None) + obj = self._dict.pop(state.key, None) if obj is not None: + self._manage_removed_state(state) + st = attributes.instance_state(obj) + assert st is state + + def safe_discard(self, state): + if state.key in self._dict: + obj = self._dict[state.key] st = attributes.instance_state(obj) if st is state: - dict.pop(self, state.key, None) + self._dict.pop(state.key, None) self._manage_removed_state(state) def prune(self): @@ -241,7 +308,7 @@ class StrongInstanceDict(IdentityMap): keepers = weakref.WeakValueDictionary() keepers.update(self) - dict.clear(self) - dict.update(self, keepers) + self._dict.clear() + self._dict.update(keepers) self.modified = bool(dirty) return ref_count - len(self) diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/instrumentation.py b/lib/python3.5/site-packages/sqlalchemy/orm/instrumentation.py index cc390a1..d41ee59 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/instrumentation.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/instrumentation.py @@ -1,5 +1,5 @@ # orm/instrumentation.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -35,13 +35,17 @@ from .. import util from . import base -class ClassManager(dict): +_memoized_key_collection = util.group_expirable_memoized_property() + +class ClassManager(dict): """tracks state information at the class level.""" MANAGER_ATTR = base.DEFAULT_MANAGER_ATTR STATE_ATTR = base.DEFAULT_STATE_ATTR + _state_setter = staticmethod(util.attrsetter(STATE_ATTR)) + deferred_scalar_loader = None original_init = object.__init__ @@ -91,6 +95,21 @@ class ClassManager(dict): def is_mapped(self): return 'mapper' in self.__dict__ + @_memoized_key_collection + def _all_key_set(self): + return frozenset(self) + + @_memoized_key_collection + def _collection_impl_keys(self): + return frozenset([ + attr.key for attr in self.values() if attr.impl.collection]) + + @_memoized_key_collection + def _scalar_loader_impls(self): + return frozenset([ + attr.impl for attr in + self.values() if attr.impl.accepts_scalar_loader]) + @util.memoized_property def mapper(self): # raises unless self.mapper has been assigned @@ -98,7 +117,7 @@ class ClassManager(dict): def _all_sqla_attributes(self, exclude=None): """return an iterator of all classbound attributes that are - implement :class:`._InspectionAttr`. + implement :class:`.InspectionAttr`. This includes :class:`.QueryableAttribute` as well as extension types such as :class:`.hybrid_property` and @@ -111,7 +130,7 @@ class ClassManager(dict): for key in set(supercls.__dict__).difference(exclude): exclude.add(key) val = supercls.__dict__[key] - if isinstance(val, interfaces._InspectionAttr): + if isinstance(val, interfaces.InspectionAttr): yield key, val def _attr_has_impl(self, key): @@ -194,6 +213,7 @@ class ClassManager(dict): else: self.local_attrs[key] = inst self.install_descriptor(key, inst) + _memoized_key_collection.expire_instance(self) self[key] = inst for cls in self.class_.__subclasses__(): @@ -222,6 +242,7 @@ class ClassManager(dict): else: del self.local_attrs[key] self.uninstall_descriptor(key) + _memoized_key_collection.expire_instance(self) del self[key] for cls in self.class_.__subclasses__(): manager = manager_of_class(cls) @@ -289,13 +310,15 @@ class ClassManager(dict): def new_instance(self, state=None): instance = self.class_.__new__(self.class_) - setattr(instance, self.STATE_ATTR, - state or self._state_constructor(instance, self)) + if state is None: + state = self._state_constructor(instance, self) + self._state_setter(instance, state) return instance def setup_instance(self, instance, state=None): - setattr(instance, self.STATE_ATTR, - state or self._state_constructor(instance, self)) + if state is None: + state = self._state_constructor(instance, self) + self._state_setter(instance, state) def teardown_instance(self, instance): delattr(instance, self.STATE_ATTR) @@ -322,7 +345,7 @@ class ClassManager(dict): _new_state_if_none(instance) else: state = self._state_constructor(instance, self) - setattr(instance, self.STATE_ATTR, state) + self._state_setter(instance, state) return state def has_state(self, instance): @@ -344,7 +367,6 @@ class ClassManager(dict): class _SerializeManager(object): - """Provide serialization of a :class:`.ClassManager`. The :class:`.InstanceState` uses ``__init__()`` on serialize @@ -379,7 +401,6 @@ class _SerializeManager(object): class InstrumentationFactory(object): - """Factory for new ClassManager instances.""" def create_manager_for_cls(self, class_): diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/interfaces.py b/lib/python3.5/site-packages/sqlalchemy/orm/interfaces.py index 9bc1c3d..2ff00ae 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/interfaces.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/interfaces.py @@ -1,5 +1,5 @@ # orm/interfaces.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -9,25 +9,28 @@ Contains various base classes used throughout the ORM. -Defines the now deprecated ORM extension classes as well -as ORM internals. +Defines some key base classes prominent within the internals, +as well as the now-deprecated ORM extension classes. Other than the deprecated extensions, this module and the -classes within should be considered mostly private. +classes within are mostly private, though some attributes +are exposed when inspecting mappings. """ from __future__ import absolute_import -from .. import exc as sa_exc, util, inspect +from .. import util from ..sql import operators -from collections import deque from .base import (ONETOMANY, MANYTOONE, MANYTOMANY, EXT_CONTINUE, EXT_STOP, NOT_EXTENSION) -from .base import _InspectionAttr, _MappedAttribute -from .path_registry import PathRegistry +from .base import (InspectionAttr, InspectionAttr, + InspectionAttrInfo, _MappedAttribute) import collections +from .. import inspect +# imported later +MapperExtension = SessionExtension = AttributeExtension = None __all__ = ( 'AttributeExtension', @@ -47,11 +50,8 @@ __all__ = ( ) -class MapperProperty(_MappedAttribute, _InspectionAttr): - """Manage the relationship of a ``Mapper`` to a single class - attribute, as well as that attribute as it appears on individual - instances of the class, including attribute instrumentation, - attribute access, loading behavior, and dependency calculations. +class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots): + """Represent a particular class attribute mapped by :class:`.Mapper`. The most common occurrences of :class:`.MapperProperty` are the mapped :class:`.Column`, which is represented in a mapping as @@ -62,14 +62,51 @@ class MapperProperty(_MappedAttribute, _InspectionAttr): """ + __slots__ = ( + '_configure_started', '_configure_finished', 'parent', 'key', + 'info' + ) + cascade = frozenset() """The set of 'cascade' attribute names. This collection is checked before the 'cascade_iterator' method is called. + The collection typically only applies to a RelationshipProperty. + """ is_property = True + """Part of the InspectionAttr interface; states this object is a + mapper property. + + """ + + def _memoized_attr_info(self): + """Info dictionary associated with the object, allowing user-defined + data to be associated with this :class:`.InspectionAttr`. + + The dictionary is generated when first accessed. Alternatively, + it can be specified as a constructor argument to the + :func:`.column_property`, :func:`.relationship`, or :func:`.composite` + functions. + + .. versionadded:: 0.8 Added support for .info to all + :class:`.MapperProperty` subclasses. + + .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also + available on extension types via the + :attr:`.InspectionAttrInfo.info` attribute, so that it can apply + to a wider variety of ORM and extension constructs. + + .. seealso:: + + :attr:`.QueryableAttribute.info` + + :attr:`.SchemaItem.info` + + """ + return {} def setup(self, context, entity, path, adapter, **kwargs): """Called by Query for the purposes of constructing a SQL statement. @@ -77,16 +114,15 @@ class MapperProperty(_MappedAttribute, _InspectionAttr): Each MapperProperty associated with the target mapper processes the statement referenced by the query context, adding columns and/or criterion as appropriate. - """ - pass + """ def create_row_processor(self, context, path, - mapper, row, adapter): - """Return a 3-tuple consisting of three row processing functions. + mapper, result, adapter, populators): + """Produce row processing functions and append to the given + set of populators lists. """ - return None, None, None def cascade_iterator(self, type_, state, visited_instances=None, halt_on=None): @@ -98,41 +134,44 @@ class MapperProperty(_MappedAttribute, _InspectionAttr): Note that the 'cascade' collection on this MapperProperty is checked first for the given type before cascade_iterator is called. - See PropertyLoader for the related instance implementation. + This method typically only applies to RelationshipProperty. + """ return iter(()) def set_parent(self, parent, init): - self.parent = parent + """Set the parent mapper that references this MapperProperty. - def instrument_class(self, mapper): # pragma: no-coverage - raise NotImplementedError() - - @util.memoized_property - def info(self): - """Info dictionary associated with the object, allowing user-defined - data to be associated with this :class:`.MapperProperty`. - - The dictionary is generated when first accessed. Alternatively, - it can be specified as a constructor argument to the - :func:`.column_property`, :func:`.relationship`, or :func:`.composite` - functions. - - .. versionadded:: 0.8 Added support for .info to all - :class:`.MapperProperty` subclasses. - - .. seealso:: - - :attr:`.QueryableAttribute.info` - - :attr:`.SchemaItem.info` + This method is overridden by some subclasses to perform extra + setup when the mapper is first known. """ - return {} + self.parent = parent - _configure_started = False - _configure_finished = False + def instrument_class(self, mapper): + """Hook called by the Mapper to the property to initiate + instrumentation of the class attribute managed by this + MapperProperty. + + The MapperProperty here will typically call out to the + attributes module to set up an InstrumentedAttribute. + + This step is the first of two steps to set up an InstrumentedAttribute, + and is called early in the mapper setup process. + + The second step is typically the init_class_attribute step, + called from StrategizedProperty via the post_instrument_class() + hook. This step assigns additional state to the InstrumentedAttribute + (specifically the "impl") which has been determined after the + MapperProperty has determined what kind of persistence + management it needs to do (e.g. scalar, object, collection, etc). + + """ + + def __init__(self): + self._configure_started = False + self._configure_finished = False def init(self): """Called after all mappers are created to assemble @@ -179,45 +218,28 @@ class MapperProperty(_MappedAttribute, _InspectionAttr): """ - pass - def post_instrument_class(self, mapper): """Perform instrumentation adjustments that need to occur after init() has completed. + The given Mapper is the Mapper invoking the operation, which + may not be the same Mapper as self.parent in an inheritance + scenario; however, Mapper will always at least be a sub-mapper of + self.parent. + + This method is typically used by StrategizedProperty, which delegates + it to LoaderStrategy.init_class_attribute() to perform final setup + on the class-bound InstrumentedAttribute. + """ - pass - - def is_primary(self): - """Return True if this ``MapperProperty``'s mapper is the - primary mapper for its class. - - This flag is used to indicate that the ``MapperProperty`` can - define attribute instrumentation for the class at the class - level (as opposed to the individual instance level). - """ - - return not self.parent.non_primary def merge(self, session, source_state, source_dict, dest_state, dest_dict, load, _recursive): """Merge the attribute represented by this ``MapperProperty`` - from source to destination object""" + from source to destination object. - pass - - def compare(self, operator, value, **kw): - """Return a compare operation for the columns represented by - this ``MapperProperty`` to the given value, which may be a - column value or an instance. 'operator' is an operator from - the operators module, or from sql.Comparator. - - By default uses the PropComparator attached to this MapperProperty - under the attribute name "comparator". """ - return operator(self.comparator, value) - def __repr__(self): return '<%s at 0x%x; %s>' % ( self.__class__.__name__, @@ -225,8 +247,7 @@ class MapperProperty(_MappedAttribute, _InspectionAttr): class PropComparator(operators.ColumnOperators): - """Defines boolean, comparison, and other operators for - :class:`.MapperProperty` objects. + """Defines SQL operators for :class:`.MapperProperty` objects. SQLAlchemy allows for operators to be redefined at both the Core and ORM level. :class:`.PropComparator` @@ -313,9 +334,11 @@ class PropComparator(operators.ColumnOperators): """ + __slots__ = 'prop', 'property', '_parententity', '_adapt_to_entity' + def __init__(self, prop, parentmapper, adapt_to_entity=None): self.prop = self.property = prop - self._parentmapper = parentmapper + self._parententity = adapt_to_entity or parentmapper self._adapt_to_entity = adapt_to_entity def __clause_element__(self): @@ -328,7 +351,13 @@ class PropComparator(operators.ColumnOperators): """Return a copy of this PropComparator which will use the given :class:`.AliasedInsp` to produce corresponding expressions. """ - return self.__class__(self.prop, self._parentmapper, adapt_to_entity) + return self.__class__(self.prop, self._parententity, adapt_to_entity) + + @property + def _parentmapper(self): + """legacy; this is renamed to _parententity to be + compatible with QueryableAttribute.""" + return inspect(self._parententity).mapper @property def adapter(self): @@ -341,7 +370,7 @@ class PropComparator(operators.ColumnOperators): else: return self._adapt_to_entity._adapt_element - @util.memoized_property + @property def info(self): return self.property.info @@ -421,8 +450,17 @@ class StrategizedProperty(MapperProperty): strategies can be selected at Query time through the usage of ``StrategizedOption`` objects via the Query.options() method. + The mechanics of StrategizedProperty are used for every Query + invocation for every mapped attribute participating in that Query, + to determine first how the attribute will be rendered in SQL + and secondly how the attribute will retrieve a value from a result + row and apply it to a mapped object. The routines here are very + performance-critical. + """ + __slots__ = '_strategies', 'strategy' + strategy_wildcard_key = None def _get_context_loader(self, context, path): @@ -457,7 +495,8 @@ class StrategizedProperty(MapperProperty): def _get_strategy_by_cls(self, cls): return self._get_strategy(cls._strategy_keys[0]) - def setup(self, context, entity, path, adapter, **kwargs): + def setup( + self, context, entity, path, adapter, **kwargs): loader = self._get_context_loader(context, path) if loader and loader.strategy: strat = self._get_strategy(loader.strategy) @@ -465,32 +504,38 @@ class StrategizedProperty(MapperProperty): strat = self.strategy strat.setup_query(context, entity, path, loader, adapter, **kwargs) - def create_row_processor(self, context, path, mapper, row, adapter): + def create_row_processor( + self, context, path, mapper, + result, adapter, populators): loader = self._get_context_loader(context, path) if loader and loader.strategy: strat = self._get_strategy(loader.strategy) else: strat = self.strategy - return strat.create_row_processor(context, path, loader, - mapper, row, adapter) + strat.create_row_processor( + context, path, loader, + mapper, result, adapter, populators) def do_init(self): self._strategies = {} self.strategy = self._get_strategy_by_cls(self.strategy_class) def post_instrument_class(self, mapper): - if self.is_primary() and \ + if not self.parent.non_primary and \ not mapper.class_manager._attr_has_impl(self.key): self.strategy.init_class_attribute(mapper) - _strategies = collections.defaultdict(dict) + _all_strategies = collections.defaultdict(dict) @classmethod def strategy_for(cls, **kw): def decorate(dec_cls): - dec_cls._strategy_keys = [] + # ensure each subclass of the strategy has its + # own _strategy_keys collection + if '_strategy_keys' not in dec_cls.__dict__: + dec_cls._strategy_keys = [] key = tuple(sorted(kw.items())) - cls._strategies[cls][key] = dec_cls + cls._all_strategies[cls][key] = dec_cls dec_cls._strategy_keys.append(key) return dec_cls return decorate @@ -498,8 +543,8 @@ class StrategizedProperty(MapperProperty): @classmethod def _strategy_lookup(cls, *key): for prop_cls in cls.__mro__: - if prop_cls in cls._strategies: - strategies = cls._strategies[prop_cls] + if prop_cls in cls._all_strategies: + strategies = cls._all_strategies[prop_cls] try: return strategies[key] except KeyError: @@ -512,18 +557,24 @@ class MapperOption(object): propagate_to_loaders = False """if True, indicate this option should be carried along - Query object generated by scalar or object lazy loaders. + to "secondary" Query objects produced during lazy loads + or refresh operations. + """ def process_query(self, query): - pass + """Apply a modification to the given :class:`.Query`.""" def process_query_conditionally(self, query): """same as process_query(), except that this option may not apply to the given query. - Used when secondary loaders resend existing options to a new - Query.""" + This is typically used during a lazy load or scalar refresh + operation to propagate options stated in the original Query to the + new Query being used for the load. It occurs for those options that + specify propagate_to_loaders=True. + + """ self.process_query(query) @@ -542,9 +593,9 @@ class LoaderStrategy(object): * it processes the ``QueryContext`` at statement construction time, where it can modify the SQL statement that is being produced. - Simple column attributes may add their represented column to the - list of selected columns, *eager loading* properties may add - ``LEFT OUTER JOIN`` clauses to the statement. + For example, simple column attributes will add their represented + column to the list of selected columns, a joined eager loader + may establish join clauses to add to the statement. * It produces "row processor" functions at result fetching time. These "row processor" functions populate a particular attribute @@ -552,6 +603,8 @@ class LoaderStrategy(object): """ + __slots__ = 'parent_property', 'is_class_level', 'parent', 'key' + def __init__(self, parent): self.parent_property = parent self.is_class_level = False @@ -562,17 +615,26 @@ class LoaderStrategy(object): pass def setup_query(self, context, entity, path, loadopt, adapter, **kwargs): - pass + """Establish column and other state for a given QueryContext. + + This method fulfills the contract specified by MapperProperty.setup(). + + StrategizedProperty delegates its setup() method + directly to this method. + + """ def create_row_processor(self, context, path, loadopt, mapper, - row, adapter): - """Return row processing functions which fulfill the contract - specified by MapperProperty.create_row_processor. + result, adapter, populators): + """Establish row processing functions for a given QueryContext. - StrategizedProperty delegates its create_row_processor method - directly to this method. """ + This method fulfills the contract specified by + MapperProperty.create_row_processor(). - return None, None, None + StrategizedProperty delegates its create_row_processor() method + directly to this method. + + """ def __str__(self): return str(self.parent_property) diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/loading.py b/lib/python3.5/site-packages/sqlalchemy/orm/loading.py index 923ab22..d3e719d 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/loading.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/loading.py @@ -1,5 +1,5 @@ # orm/loading.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -12,26 +12,27 @@ the functions here are called primarily by Query, Mapper, as well as some of the attribute loading strategies. """ - +from __future__ import absolute_import from .. import util -from . import attributes, exc as orm_exc, state as statelib -from .interfaces import EXT_CONTINUE +from . import attributes, exc as orm_exc from ..sql import util as sql_util +from . import strategy_options + from .util import _none_set, state_str +from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE from .. import exc as sa_exc +import collections _new_runid = util.counter() def instances(query, cursor, context): """Return an ORM result as an iterator.""" - session = query.session context.runid = _new_runid() - filter_fns = [ent.filter_fn - for ent in query._entities] + filter_fns = [ent.filter_fn for ent in query._entities] filtered = id in filter_fns single_entity = len(query._entities) == 1 and \ @@ -44,59 +45,45 @@ def instances(query, cursor, context): def filter_fn(row): return tuple(fn(x) for x, fn in zip(row, filter_fns)) - custom_rows = single_entity and \ - query._entities[0].custom_rows + try: + (process, labels) = \ + list(zip(*[ + query_entity.row_processor(query, + context, cursor) + for query_entity in query._entities + ])) - (process, labels) = \ - list(zip(*[ - query_entity.row_processor(query, - context, custom_rows) - for query_entity in query._entities - ])) + if not single_entity: + keyed_tuple = util.lightweight_named_tuple('result', labels) - while True: - context.progress = {} - context.partials = {} + while True: + context.partials = {} - if query._yield_per: - fetch = cursor.fetchmany(query._yield_per) - if not fetch: + if query._yield_per: + fetch = cursor.fetchmany(query._yield_per) + if not fetch: + break + else: + fetch = cursor.fetchall() + + if single_entity: + proc = process[0] + rows = [proc(row) for row in fetch] + else: + rows = [keyed_tuple([proc(row) for proc in process]) + for row in fetch] + + if filtered: + rows = util.unique_list(rows, filter_fn) + + for row in rows: + yield row + + if not query._yield_per: break - else: - fetch = cursor.fetchall() - - if custom_rows: - rows = [] - for row in fetch: - process[0](row, rows) - elif single_entity: - rows = [process[0](row, None) for row in fetch] - else: - rows = [util.KeyedTuple([proc(row, None) for proc in process], - labels) for row in fetch] - - if filtered: - rows = util.unique_list(rows, filter_fn) - - if context.refresh_state and query._only_load_props \ - and context.refresh_state in context.progress: - context.refresh_state._commit( - context.refresh_state.dict, query._only_load_props) - context.progress.pop(context.refresh_state) - - statelib.InstanceState._commit_all_states( - list(context.progress.items()), - session.identity_map - ) - - for state, (dict_, attrs) in context.partials.items(): - state._commit(dict_, attrs) - - for row in rows: - yield row - - if not query._yield_per: - break + except Exception as err: + cursor.close() + util.raise_from_cause(err) @util.dependencies("sqlalchemy.orm.query") @@ -126,6 +113,7 @@ def merge_result(querylib, query, iterator, load=True): if isinstance(e, querylib._MapperEntity)] result = [] keys = [ent._label_name for ent in query._entities] + keyed_tuple = util.lightweight_named_tuple('result', keys) for row in iterator: newrow = list(row) for i in mapped_entities: @@ -134,7 +122,7 @@ def merge_result(querylib, query, iterator, load=True): attributes.instance_state(newrow[i]), attributes.instance_dict(newrow[i]), load=load, _recursive={}) - result.append(util.KeyedTuple(newrow, keys)) + result.append(keyed_tuple(newrow)) return iter(result) finally: @@ -161,7 +149,7 @@ def get_from_identity(session, key, passive): # expired state will be checked soon enough, if necessary return instance try: - state(state, passive) + state._load_expired(state, passive) except orm_exc.ObjectDeletedError: session._remove_newly_deleted([state]) return None @@ -233,11 +221,56 @@ def load_on_ident(query, key, return None -def instance_processor(mapper, context, path, adapter, - polymorphic_from=None, - only_load_props=None, - refresh_state=None, - polymorphic_discriminator=None): +def _setup_entity_query( + context, mapper, query_entity, + path, adapter, column_collection, + with_polymorphic=None, only_load_props=None, + polymorphic_discriminator=None, **kw): + + if with_polymorphic: + poly_properties = mapper._iterate_polymorphic_properties( + with_polymorphic) + else: + poly_properties = mapper._polymorphic_properties + + quick_populators = {} + + path.set( + context.attributes, + "memoized_setups", + quick_populators) + + for value in poly_properties: + if only_load_props and \ + value.key not in only_load_props: + continue + value.setup( + context, + query_entity, + path, + adapter, + only_load_props=only_load_props, + column_collection=column_collection, + memoized_populators=quick_populators, + **kw + ) + + if polymorphic_discriminator is not None and \ + polymorphic_discriminator \ + is not mapper.polymorphic_on: + + if adapter: + pd = adapter.columns[polymorphic_discriminator] + else: + pd = polymorphic_discriminator + column_collection.append(pd) + + +def _instance_processor( + mapper, context, result, path, adapter, + only_load_props=None, refresh_state=None, + polymorphic_discriminator=None, + _polymorphic_from=None): """Produce a mapper level row processor callable which processes rows into mapped instances.""" @@ -249,288 +282,292 @@ def instance_processor(mapper, context, path, adapter, pk_cols = mapper.primary_key - if polymorphic_from or refresh_state: - polymorphic_on = None - else: - if polymorphic_discriminator is not None: - polymorphic_on = polymorphic_discriminator - else: - polymorphic_on = mapper.polymorphic_on - polymorphic_instances = util.PopulateDict( - _configure_subclass_mapper( - mapper, - context, path, adapter) - ) - - version_id_col = mapper.version_id_col - if adapter: pk_cols = [adapter.columns[c] for c in pk_cols] - if polymorphic_on is not None: - polymorphic_on = adapter.columns[polymorphic_on] - if version_id_col is not None: - version_id_col = adapter.columns[version_id_col] identity_class = mapper._identity_class - new_populators = [] - existing_populators = [] - eager_populators = [] + populators = collections.defaultdict(list) - load_path = context.query._current_path + path \ - if context.query._current_path.path \ - else path + props = mapper._prop_set + if only_load_props is not None: + props = props.intersection( + mapper._props[k] for k in only_load_props) - def populate_state(state, dict_, row, isnew, only_load_props): - if isnew: - if context.propagate_options: - state.load_options = context.propagate_options - if state.load_options: - state.load_path = load_path + quick_populators = path.get( + context.attributes, "memoized_setups", _none_set) - if not new_populators: - _populators(mapper, context, path, row, adapter, - new_populators, - existing_populators, - eager_populators - ) - - if isnew: - populators = new_populators + for prop in props: + if prop in quick_populators: + # this is an inlined path just for column-based attributes. + col = quick_populators[prop] + if col is _DEFER_FOR_STATE: + populators["new"].append( + (prop.key, prop._deferred_column_loader)) + elif col is _SET_DEFERRED_EXPIRED: + # note that in this path, we are no longer + # searching in the result to see if the column might + # be present in some unexpected way. + populators["expire"].append((prop.key, False)) + else: + if adapter: + col = adapter.columns[col] + getter = result._getter(col) + if getter: + populators["quick"].append((prop.key, getter)) + else: + # fall back to the ColumnProperty itself, which + # will iterate through all of its columns + # to see if one fits + prop.create_row_processor( + context, path, mapper, result, adapter, populators) else: - populators = existing_populators + prop.create_row_processor( + context, path, mapper, result, adapter, populators) - if only_load_props is None: - for key, populator in populators: - populator(state, dict_, row) - elif only_load_props: - for key, populator in populators: - if key in only_load_props: - populator(state, dict_, row) + propagate_options = context.propagate_options + if propagate_options: + load_path = context.query._current_path + path \ + if context.query._current_path.path else path session_identity_map = context.session.identity_map - listeners = mapper.dispatch - - # legacy events - I'd very much like to yank these totally - translate_row = listeners.translate_row or None - create_instance = listeners.create_instance or None - populate_instance = listeners.populate_instance or None - append_result = listeners.append_result or None - #### - populate_existing = context.populate_existing or mapper.always_refresh - invoke_all_eagers = context.invoke_all_eagers + load_evt = bool(mapper.class_manager.dispatch.load) + refresh_evt = bool(mapper.class_manager.dispatch.refresh) + instance_state = attributes.instance_state + instance_dict = attributes.instance_dict + session_id = context.session.hash_key + version_check = context.version_check + runid = context.runid + + if refresh_state: + refresh_identity_key = refresh_state.key + if refresh_identity_key is None: + # super-rare condition; a refresh is being called + # on a non-instance-key instance; this is meant to only + # occur within a flush() + refresh_identity_key = \ + mapper._identity_key_from_state(refresh_state) + else: + refresh_identity_key = None if mapper.allow_partial_pks: is_not_primary_key = _none_set.issuperset else: - is_not_primary_key = _none_set.issubset + is_not_primary_key = _none_set.intersection - def _instance(row, result): - if not new_populators and invoke_all_eagers: - _populators(mapper, context, path, row, adapter, - new_populators, - existing_populators, - eager_populators - ) + def _instance(row): - if translate_row: - for fn in translate_row: - ret = fn(mapper, context, row) - if ret is not EXT_CONTINUE: - row = ret - break - - if polymorphic_on is not None: - discriminator = row[polymorphic_on] - if discriminator is not None: - _instance = polymorphic_instances[discriminator] - if _instance: - return _instance(row, result) - - # determine identity key - if refresh_state: - identitykey = refresh_state.key - if identitykey is None: - # super-rare condition; a refresh is being called - # on a non-instance-key instance; this is meant to only - # occur within a flush() - identitykey = mapper._identity_key_from_state(refresh_state) + # determine the state that we'll be populating + if refresh_identity_key: + # fixed state that we're refreshing + state = refresh_state + instance = state.obj() + dict_ = instance_dict(instance) + isnew = state.runid != runid + currentload = True + loaded_instance = False else: + # look at the row, see if that identity is in the + # session, or we have to create a new one identitykey = ( identity_class, tuple([row[column] for column in pk_cols]) ) - instance = session_identity_map.get(identitykey) + instance = session_identity_map.get(identitykey) - if instance is not None: - state = attributes.instance_state(instance) - dict_ = attributes.instance_dict(instance) + if instance is not None: + # existing instance + state = instance_state(instance) + dict_ = instance_dict(instance) - isnew = state.runid != context.runid - currentload = not isnew - loaded_instance = False + isnew = state.runid != runid + currentload = not isnew + loaded_instance = False - if not currentload and \ - version_id_col is not None and \ - context.version_check and \ - mapper._get_state_attr_by_column( - state, - dict_, - mapper.version_id_col) != \ - row[version_id_col]: + if version_check and not currentload: + _validate_version_id(mapper, state, dict_, row, adapter) - raise orm_exc.StaleDataError( - "Instance '%s' has version id '%s' which " - "does not match database-loaded version id '%s'." - % (state_str(state), - mapper._get_state_attr_by_column( - state, dict_, - mapper.version_id_col), - row[version_id_col])) - elif refresh_state: - # out of band refresh_state detected (i.e. its not in the - # session.identity_map) honor it anyway. this can happen - # if a _get() occurs within save_obj(), such as - # when eager_defaults is True. - state = refresh_state - instance = state.obj() - dict_ = attributes.instance_dict(instance) - isnew = state.runid != context.runid - currentload = True - loaded_instance = False - else: - # check for non-NULL values in the primary key columns, - # else no entity is returned for the row - if is_not_primary_key(identitykey[1]): - return None - - isnew = True - currentload = True - loaded_instance = True - - if create_instance: - for fn in create_instance: - instance = fn(mapper, context, - row, mapper.class_) - if instance is not EXT_CONTINUE: - manager = attributes.manager_of_class( - instance.__class__) - # TODO: if manager is None, raise a friendly error - # about returning instances of unmapped types - manager.setup_instance(instance) - break - else: - instance = mapper.class_manager.new_instance() else: + # create a new instance + + # check for non-NULL values in the primary key columns, + # else no entity is returned for the row + if is_not_primary_key(identitykey[1]): + return None + + isnew = True + currentload = True + loaded_instance = True + instance = mapper.class_manager.new_instance() - dict_ = attributes.instance_dict(instance) - state = attributes.instance_state(instance) - state.key = identitykey + dict_ = instance_dict(instance) + state = instance_state(instance) + state.key = identitykey - # attach instance to session. - state.session_id = context.session.hash_key - session_identity_map.add(state) + # attach instance to session. + state.session_id = session_id + session_identity_map._add_unpresent(state, identitykey) + # populate. this looks at whether this state is new + # for this load or was existing, and whether or not this + # row is the first row with this identity. if currentload or populate_existing: - # state is being fully loaded, so populate. - # add to the "context.progress" collection. - if isnew: - state.runid = context.runid - context.progress[state] = dict_ + # full population routines. Objects here are either + # just created, or we are doing a populate_existing - if populate_instance: - for fn in populate_instance: - ret = fn(mapper, context, row, state, - only_load_props=only_load_props, - instancekey=identitykey, isnew=isnew) - if ret is not EXT_CONTINUE: - break - else: - populate_state(state, dict_, row, isnew, only_load_props) - else: - populate_state(state, dict_, row, isnew, only_load_props) + if isnew and propagate_options: + state.load_options = propagate_options + state.load_path = load_path - if loaded_instance: - state.manager.dispatch.load(state, context) - elif isnew: - state.manager.dispatch.refresh(state, context, only_load_props) - - elif state in context.partials or state.unloaded or eager_populators: - # state is having a partial set of its attributes - # refreshed. Populate those attributes, - # and add to the "context.partials" collection. - if state in context.partials: - isnew = False - (d_, attrs) = context.partials[state] - else: - isnew = True - attrs = state.unloaded - context.partials[state] = (dict_, attrs) - - if populate_instance: - for fn in populate_instance: - ret = fn(mapper, context, row, state, - only_load_props=attrs, - instancekey=identitykey, isnew=isnew) - if ret is not EXT_CONTINUE: - break - else: - populate_state(state, dict_, row, isnew, attrs) - else: - populate_state(state, dict_, row, isnew, attrs) - - for key, pop in eager_populators: - if key not in state.unloaded: - pop(state, dict_, row) + _populate_full( + context, row, state, dict_, isnew, + loaded_instance, populate_existing, populators) if isnew: - state.manager.dispatch.refresh(state, context, attrs) + if loaded_instance and load_evt: + state.manager.dispatch.load(state, context) + elif refresh_evt: + state.manager.dispatch.refresh( + state, context, only_load_props) - if result is not None: - if append_result: - for fn in append_result: - if fn(mapper, context, row, state, - result, instancekey=identitykey, - isnew=isnew) is not EXT_CONTINUE: - break - else: - result.append(instance) - else: - result.append(instance) + if populate_existing or state.modified: + if refresh_state and only_load_props: + state._commit(dict_, only_load_props) + else: + state._commit_all(dict_, session_identity_map) + + else: + # partial population routines, for objects that were already + # in the Session, but a row matches them; apply eager loaders + # on existing objects, etc. + unloaded = state.unloaded + isnew = state not in context.partials + + if not isnew or unloaded or populators["eager"]: + # state is having a partial set of its attributes + # refreshed. Populate those attributes, + # and add to the "context.partials" collection. + + to_load = _populate_partial( + context, row, state, dict_, isnew, + unloaded, populators) + + if isnew: + if refresh_evt: + state.manager.dispatch.refresh( + state, context, to_load) + + state._commit(dict_, to_load) return instance + + if mapper.polymorphic_map and not _polymorphic_from and not refresh_state: + # if we are doing polymorphic, dispatch to a different _instance() + # method specific to the subclass mapper + _instance = _decorate_polymorphic_switch( + _instance, context, mapper, result, path, + polymorphic_discriminator, adapter) + return _instance -def _populators(mapper, context, path, row, adapter, - new_populators, existing_populators, eager_populators): - """Produce a collection of attribute level row processor - callables.""" +def _populate_full( + context, row, state, dict_, isnew, + loaded_instance, populate_existing, populators): + if isnew: + # first time we are seeing a row with this identity. + state.runid = context.runid - delayed_populators = [] - pops = (new_populators, existing_populators, delayed_populators, - eager_populators) - - for prop in mapper._props.values(): - - for i, pop in enumerate(prop.create_row_processor( - context, - path, - mapper, row, adapter)): - if pop is not None: - pops[i].append((prop.key, pop)) - - if delayed_populators: - new_populators.extend(delayed_populators) + for key, getter in populators["quick"]: + dict_[key] = getter(row) + if populate_existing: + for key, set_callable in populators["expire"]: + dict_.pop(key, None) + if set_callable: + state.expired_attributes.add(key) + else: + for key, set_callable in populators["expire"]: + if set_callable: + state.expired_attributes.add(key) + for key, populator in populators["new"]: + populator(state, dict_, row) + for key, populator in populators["delayed"]: + populator(state, dict_, row) + else: + # have already seen rows with this identity. + for key, populator in populators["existing"]: + populator(state, dict_, row) -def _configure_subclass_mapper(mapper, context, path, adapter): - """Produce a mapper level row processor callable factory for mappers - inheriting this one.""" +def _populate_partial( + context, row, state, dict_, isnew, + unloaded, populators): + if not isnew: + to_load = context.partials[state] + for key, populator in populators["existing"]: + if key in to_load: + populator(state, dict_, row) + else: + to_load = unloaded + context.partials[state] = to_load + + for key, getter in populators["quick"]: + if key in to_load: + dict_[key] = getter(row) + for key, set_callable in populators["expire"]: + if key in to_load: + dict_.pop(key, None) + if set_callable: + state.expired_attributes.add(key) + for key, populator in populators["new"]: + if key in to_load: + populator(state, dict_, row) + for key, populator in populators["delayed"]: + if key in to_load: + populator(state, dict_, row) + for key, populator in populators["eager"]: + if key not in unloaded: + populator(state, dict_, row) + + return to_load + + +def _validate_version_id(mapper, state, dict_, row, adapter): + + version_id_col = mapper.version_id_col + + if version_id_col is None: + return + + if adapter: + version_id_col = adapter.columns[version_id_col] + + if mapper._get_state_attr_by_column( + state, dict_, mapper.version_id_col) != row[version_id_col]: + raise orm_exc.StaleDataError( + "Instance '%s' has version id '%s' which " + "does not match database-loaded version id '%s'." + % (state_str(state), mapper._get_state_attr_by_column( + state, dict_, mapper.version_id_col), + row[version_id_col])) + + +def _decorate_polymorphic_switch( + instance_fn, context, mapper, result, path, + polymorphic_discriminator, adapter): + if polymorphic_discriminator is not None: + polymorphic_on = polymorphic_discriminator + else: + polymorphic_on = mapper.polymorphic_on + if polymorphic_on is None: + return instance_fn + + if adapter: + polymorphic_on = adapter.columns[polymorphic_on] def configure_subclass_mapper(discriminator): try: @@ -539,16 +576,26 @@ def _configure_subclass_mapper(mapper, context, path, adapter): raise AssertionError( "No such polymorphic_identity %r is defined" % discriminator) - if sub_mapper is mapper: - return None + else: + if sub_mapper is mapper: + return None - return instance_processor( - sub_mapper, - context, - path, - adapter, - polymorphic_from=mapper) - return configure_subclass_mapper + return _instance_processor( + sub_mapper, context, result, + path, adapter, _polymorphic_from=mapper) + + polymorphic_instances = util.PopulateDict( + configure_subclass_mapper + ) + + def polymorphic_instance(row): + discriminator = row[polymorphic_on] + if discriminator is not None: + _instance = polymorphic_instances[discriminator] + if _instance: + return _instance(row) + return instance_fn(row) + return polymorphic_instance def load_scalar_attributes(mapper, state, attribute_names): @@ -567,10 +614,17 @@ def load_scalar_attributes(mapper, state, attribute_names): result = False if mapper.inherits and not mapper.concrete: + # because we are using Core to produce a select() that we + # pass to the Query, we aren't calling setup() for mapped + # attributes; in 1.0 this means deferred attrs won't get loaded + # by default statement = mapper._optimized_get_statement(state, attribute_names) if statement is not None: result = load_on_ident( - session.query(mapper).from_statement(statement), + session.query(mapper). + options( + strategy_options.Load(mapper).undefer("*") + ).from_statement(statement), None, only_load_props=attribute_names, refresh_state=state @@ -596,10 +650,11 @@ def load_scalar_attributes(mapper, state, attribute_names): if (_none_set.issubset(identity_key) and not mapper.allow_partial_pks) or \ _none_set.issuperset(identity_key): - util.warn("Instance %s to be refreshed doesn't " - "contain a full primary key - can't be refreshed " - "(and shouldn't be expired, either)." - % state_str(state)) + util.warn_limited( + "Instance %s to be refreshed doesn't " + "contain a full primary key - can't be refreshed " + "(and shouldn't be expired, either).", + state_str(state)) return result = load_on_ident( diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/mapper.py b/lib/python3.5/site-packages/sqlalchemy/orm/mapper.py index 8fa2443..97e4638 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/mapper.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/mapper.py @@ -1,5 +1,5 @@ # orm/mapper.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -25,7 +25,8 @@ from .. import sql, util, log, exc as sa_exc, event, schema, inspection from ..sql import expression, visitors, operators, util as sql_util from . import instrumentation, attributes, exc as orm_exc, loading from . import properties -from .interfaces import MapperProperty, _InspectionAttr, _MappedAttribute +from . import util as orm_util +from .interfaces import MapperProperty, InspectionAttr, _MappedAttribute from .base import _class_to_mapper, _state_mapper, class_mapper, \ state_str, _INSTRUMENTOR @@ -51,8 +52,7 @@ _CONFIGURE_MUTEX = util.threading.RLock() @inspection._self_inspects @log.class_logger -class Mapper(_InspectionAttr): - +class Mapper(InspectionAttr): """Define the correlation of class attributes to database table columns. @@ -426,6 +426,12 @@ class Mapper(_InspectionAttr): thus persisting the value to the ``discriminator`` column in the database. + .. warning:: + + Currently, **only one discriminator column may be set**, typically + on the base-most class in the hierarchy. "Cascading" polymorphic + columns are not yet supported. + .. seealso:: :ref:`inheritance_toplevel` @@ -968,6 +974,15 @@ class Mapper(_InspectionAttr): self._all_tables = self.inherits._all_tables if self.polymorphic_identity is not None: + if self.polymorphic_identity in self.polymorphic_map: + util.warn( + "Reassigning polymorphic association for identity %r " + "from %r to %r: Check for duplicate use of %r as " + "value for polymorphic_identity." % + (self.polymorphic_identity, + self.polymorphic_map[self.polymorphic_identity], + self, self.polymorphic_identity) + ) self.polymorphic_map[self.polymorphic_identity] = self else: @@ -1080,6 +1095,7 @@ class Mapper(_InspectionAttr): auto-session attachment logic. """ + manager = attributes.manager_of_class(self.class_) if self.non_primary: @@ -1109,6 +1125,8 @@ class Mapper(_InspectionAttr): _mapper_registry[self] = True + # note: this *must be called before instrumentation.register_class* + # to maintain the documented behavior of instrument_class self.dispatch.instrument_class(self, self.class_) if manager is None: @@ -1127,7 +1145,6 @@ class Mapper(_InspectionAttr): event.listen(manager, 'first_init', _event_on_first_init, raw=True) event.listen(manager, 'init', _event_on_init, raw=True) - event.listen(manager, 'resurrect', _event_on_resurrect, raw=True) for key, method in util.iterate_attributes(self.class_): if isinstance(method, types.FunctionType): @@ -1189,14 +1206,6 @@ class Mapper(_InspectionAttr): util.ordered_column_set(t.c).\ intersection(all_cols) - # determine cols that aren't expressed within our tables; mark these - # as "read only" properties which are refreshed upon INSERT/UPDATE - self._readonly_props = set( - self._columntoproperty[col] - for col in self._columntoproperty - if not hasattr(col, 'table') or - col.table not in self._cols_by_table) - # if explicit PK argument sent, add those columns to the # primary key mappings if self._primary_key_argument: @@ -1247,6 +1256,15 @@ class Mapper(_InspectionAttr): self.primary_key = tuple(primary_key) self._log("Identified primary key columns: %s", primary_key) + # determine cols that aren't expressed within our tables; mark these + # as "read only" properties which are refreshed upon INSERT/UPDATE + self._readonly_props = set( + self._columntoproperty[col] + for col in self._columntoproperty + if self._columntoproperty[col] not in self._identity_key_props and + (not hasattr(col, 'table') or + col.table not in self._cols_by_table)) + def _configure_properties(self): # Column and other ClauseElement objects which are mapped @@ -1452,13 +1470,11 @@ class Mapper(_InspectionAttr): if polymorphic_key in dict_ and \ dict_[polymorphic_key] not in \ mapper._acceptable_polymorphic_identities: - util.warn( + util.warn_limited( "Flushing object %s with " "incompatible polymorphic identity %r; the " - "object may not refresh and/or load correctly" % ( - state_str(state), - dict_[polymorphic_key] - ) + "object may not refresh and/or load correctly", + (state_str(state), dict_[polymorphic_key]) ) self._set_polymorphic_identity = _set_polymorphic_identity @@ -1489,6 +1505,10 @@ class Mapper(_InspectionAttr): return identities + @_memoized_configured_property + def _prop_set(self): + return frozenset(self._props.values()) + def _adapt_inherited_property(self, key, prop, init): if not self.concrete: self._configure_property(key, prop, init=False, setparent=False) @@ -1578,6 +1598,8 @@ class Mapper(_InspectionAttr): self, prop, )) + oldprop = self._props[key] + self._path_registry.pop(oldprop, None) self._props[key] = prop @@ -1892,6 +1914,66 @@ class Mapper(_InspectionAttr): """ + @_memoized_configured_property + def _insert_cols_as_none(self): + return dict( + ( + table, + frozenset( + col.key for col in columns + if not col.primary_key and + not col.server_default and not col.default) + ) + for table, columns in self._cols_by_table.items() + ) + + @_memoized_configured_property + def _propkey_to_col(self): + return dict( + ( + table, + dict( + (self._columntoproperty[col].key, col) + for col in columns + ) + ) + for table, columns in self._cols_by_table.items() + ) + + @_memoized_configured_property + def _pk_keys_by_table(self): + return dict( + ( + table, + frozenset([col.key for col in pks]) + ) + for table, pks in self._pks_by_table.items() + ) + + @_memoized_configured_property + def _server_default_cols(self): + return dict( + ( + table, + frozenset([ + col.key for col in columns + if col.server_default is not None]) + ) + for table, columns in self._cols_by_table.items() + ) + + @_memoized_configured_property + def _server_onupdate_default_cols(self): + return dict( + ( + table, + frozenset([ + col.key for col in columns + if col.server_onupdate is not None]) + ) + for table, columns in self._cols_by_table.items() + ) + @property def selectable(self): """The :func:`.select` construct this :class:`.Mapper` selects from @@ -1968,6 +2050,17 @@ class Mapper(_InspectionAttr): returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`, :attr:`.relationships`, and :attr:`.composites`. + .. warning:: + + the :attr:`.Mapper.relationships` accessor namespace is an + instance of :class:`.OrderedProperties`. This is + a dictionary-like object which includes a small number of + named methods such as :meth:`.OrderedProperties.items` + and :meth:`.OrderedProperties.values`. When + accessing attributes dynamically, favor using the dict-access + scheme, e.g. ``mapper.attrs[somename]`` over + ``getattr(mapper.attrs, somename)`` to avoid name collisions. + .. seealso:: :attr:`.Mapper.all_orm_descriptors` @@ -1979,7 +2072,7 @@ class Mapper(_InspectionAttr): @util.memoized_property def all_orm_descriptors(self): - """A namespace of all :class:`._InspectionAttr` attributes associated + """A namespace of all :class:`.InspectionAttr` attributes associated with the mapped class. These attributes are in all cases Python :term:`descriptors` @@ -1988,13 +2081,13 @@ class Mapper(_InspectionAttr): This namespace includes attributes that are mapped to the class as well as attributes declared by extension modules. It includes any Python descriptor type that inherits from - :class:`._InspectionAttr`. This includes + :class:`.InspectionAttr`. This includes :class:`.QueryableAttribute`, as well as extension types such as :class:`.hybrid_property`, :class:`.hybrid_method` and :class:`.AssociationProxy`. To distinguish between mapped attributes and extension attributes, - the attribute :attr:`._InspectionAttr.extension_type` will refer + the attribute :attr:`.InspectionAttr.extension_type` will refer to a constant that distinguishes between different extension types. When dealing with a :class:`.QueryableAttribute`, the @@ -2003,6 +2096,17 @@ class Mapper(_InspectionAttr): referring to the collection of mapped properties via :attr:`.Mapper.attrs`. + .. warning:: + + the :attr:`.Mapper.relationships` accessor namespace is an + instance of :class:`.OrderedProperties`. This is + a dictionary-like object which includes a small number of + named methods such as :meth:`.OrderedProperties.items` + and :meth:`.OrderedProperties.values`. When + accessing attributes dynamically, favor using the dict-access + scheme, e.g. ``mapper.attrs[somename]`` over + ``getattr(mapper.attrs, somename)`` to avoid name collisions. + .. versionadded:: 0.8.0 .. seealso:: @@ -2044,6 +2148,17 @@ class Mapper(_InspectionAttr): """Return a namespace of all :class:`.RelationshipProperty` properties maintained by this :class:`.Mapper`. + .. warning:: + + the :attr:`.Mapper.relationships` accessor namespace is an + instance of :class:`.OrderedProperties`. This is + a dictionary-like object which includes a small number of + named methods such as :meth:`.OrderedProperties.items` + and :meth:`.OrderedProperties.values`. When + accessing attributes dynamically, favor using the dict-access + scheme, e.g. ``mapper.attrs[somename]`` over + ``getattr(mapper.attrs, somename)`` to avoid name collisions. + .. seealso:: :attr:`.Mapper.attrs` - namespace of all :class:`.MapperProperty` @@ -2238,6 +2353,16 @@ class Mapper(_InspectionAttr): def primary_base_mapper(self): return self.class_manager.mapper.base_mapper + def _result_has_identity_key(self, result, adapter=None): + pk_cols = self.primary_key + if adapter: + pk_cols = [adapter.columns[c] for c in pk_cols] + for col in pk_cols: + if not result._has_key(col): + return False + else: + return True + def identity_key_from_row(self, row, adapter=None): """Return an identity-map key for use in storing/retrieving an item from the identity map. @@ -2286,7 +2411,7 @@ class Mapper(_InspectionAttr): manager = state.manager return self._identity_class, tuple([ manager[self._columntoproperty[col].key]. - impl.get(state, dict_, attributes.PASSIVE_OFF) + impl.get(state, dict_, attributes.PASSIVE_RETURN_NEVER_SET) for col in self.primary_key ]) @@ -2301,22 +2426,50 @@ class Mapper(_InspectionAttr): """ state = attributes.instance_state(instance) - return self._primary_key_from_state(state) + return self._primary_key_from_state(state, attributes.PASSIVE_OFF) - def _primary_key_from_state(self, state): + def _primary_key_from_state( + self, state, passive=attributes.PASSIVE_RETURN_NEVER_SET): dict_ = state.dict manager = state.manager return [ - manager[self._columntoproperty[col].key]. - impl.get(state, dict_, attributes.PASSIVE_OFF) - for col in self.primary_key + manager[prop.key]. + impl.get(state, dict_, passive) + for prop in self._identity_key_props ] - def _get_state_attr_by_column(self, state, dict_, column, - passive=attributes.PASSIVE_OFF): + @_memoized_configured_property + def _identity_key_props(self): + return [self._columntoproperty[col] for col in self.primary_key] + + @_memoized_configured_property + def _all_pk_props(self): + collection = set() + for table in self.tables: + collection.update(self._pks_by_table[table]) + return collection + + @_memoized_configured_property + def _should_undefer_in_wildcard(self): + cols = set(self.primary_key) + if self.polymorphic_on is not None: + cols.add(self.polymorphic_on) + return cols + + @_memoized_configured_property + def _primary_key_propkeys(self): + return set([prop.key for prop in self._all_pk_props]) + + def _get_state_attr_by_column( + self, state, dict_, column, + passive=attributes.PASSIVE_RETURN_NEVER_SET): prop = self._columntoproperty[column] return state.manager[prop.key].impl.get(state, dict_, passive=passive) + def _set_committed_state_attr_by_column(self, state, dict_, column, value): + prop = self._columntoproperty[column] + state.manager[prop.key].impl.set_committed_value(state, dict_, value) + def _set_state_attr_by_column(self, state, dict_, column, value): prop = self._columntoproperty[column] state.manager[prop.key].impl.set(state, dict_, value, None) @@ -2324,14 +2477,12 @@ class Mapper(_InspectionAttr): def _get_committed_attr_by_column(self, obj, column): state = attributes.instance_state(obj) dict_ = attributes.instance_dict(obj) - return self._get_committed_state_attr_by_column(state, dict_, column) + return self._get_committed_state_attr_by_column( + state, dict_, column, passive=attributes.PASSIVE_OFF) def _get_committed_state_attr_by_column( - self, - state, - dict_, - column, - passive=attributes.PASSIVE_OFF): + self, state, dict_, column, + passive=attributes.PASSIVE_RETURN_NEVER_SET): prop = self._columntoproperty[column] return state.manager[prop.key].impl.\ @@ -2372,7 +2523,7 @@ class Mapper(_InspectionAttr): state, state.dict, leftcol, passive=attributes.PASSIVE_NO_INITIALIZE) - if leftval is attributes.PASSIVE_NO_RESULT or leftval is None: + if leftval in orm_util._none_set: raise ColumnsNotAvailable() binary.left = sql.bindparam(None, leftval, type_=binary.right.type) @@ -2381,8 +2532,7 @@ class Mapper(_InspectionAttr): state, state.dict, rightcol, passive=attributes.PASSIVE_NO_INITIALIZE) - if rightval is attributes.PASSIVE_NO_RESULT or \ - rightval is None: + if rightval in orm_util._none_set: raise ColumnsNotAvailable() binary.right = sql.bindparam(None, rightval, type_=binary.right.type) @@ -2419,15 +2569,24 @@ class Mapper(_InspectionAttr): for all relationships that meet the given cascade rule. :param type_: - The name of the cascade rule (i.e. save-update, delete, - etc.) + The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``, + etc.). + + .. note:: the ``"all"`` cascade is not accepted here. For a generic + object traversal function, see :ref:`faq_walk_objects`. :param state: The lead InstanceState. child items will be processed per the relationships defined for this object's mapper. - the return value are object instances; this provides a strong - reference so that they don't fall out of scope immediately. + :return: the method yields individual object instances. + + .. seealso:: + + :ref:`unitofwork_cascades` + + :ref:`faq_walk_objects` - illustrates a generic function to + traverse all objects without relying on cascades. """ visited_states = set() @@ -2544,7 +2703,33 @@ def configure_mappers(): have been constructed thus far. This function can be called any number of times, but in - most cases is handled internally. + most cases is invoked automatically, the first time mappings are used, + as well as whenever mappings are used and additional not-yet-configured + mappers have been constructed. + + Points at which this occur include when a mapped class is instantiated + into an instance, as well as when the :meth:`.Session.query` method + is used. + + The :func:`.configure_mappers` function provides several event hooks + that can be used to augment its functionality. These methods include: + + * :meth:`.MapperEvents.before_configured` - called once before + :func:`.configure_mappers` does any work; this can be used to establish + additional options, properties, or related mappings before the operation + proceeds. + + * :meth:`.MapperEvents.mapper_configured` - called as each indivudal + :class:`.Mapper` is configured within the process; will include all + mapper state except for backrefs set up by other mappers that are still + to be configured. + + * :meth:`.MapperEvents.after_configured` - called once after + :func:`.configure_mappers` is complete; at this stage, all + :class:`.Mapper` objects that are known to SQLAlchemy will be fully + configured. Note that the calling application may still have other + mappings that haven't been produced yet, such as if they are in modules + as yet unimported. """ @@ -2563,7 +2748,7 @@ def configure_mappers(): if not Mapper._new_mappers: return - Mapper.dispatch(Mapper).before_configured() + Mapper.dispatch._for_class(Mapper).before_configured() # initialize properties on all mappers # note that _mapper_registry is unordered, which # may randomly conceal/reveal issues related to @@ -2584,7 +2769,7 @@ def configure_mappers(): mapper._expire_memoizations() mapper.dispatch.mapper_configured( mapper, mapper.class_) - except: + except Exception: exc = sys.exc_info()[1] if not hasattr(exc, '_configure_failed'): mapper._configure_failed = exc @@ -2595,7 +2780,7 @@ def configure_mappers(): _already_compiling = False finally: _CONFIGURE_MUTEX.release() - Mapper.dispatch(Mapper).after_configured() + Mapper.dispatch._for_class(Mapper).after_configured() def reconstructor(fn): @@ -2704,20 +2889,11 @@ def _event_on_init(state, args, kwargs): instrumenting_mapper._set_polymorphic_identity(state) -def _event_on_resurrect(state): - # re-populate the primary key elements - # of the dict based on the mapping. - instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR) - if instrumenting_mapper: - for col, val in zip(instrumenting_mapper.primary_key, state.key[1]): - instrumenting_mapper._set_state_attr_by_column( - state, state.dict, col, val) - - class _ColumnMapping(dict): - """Error reporting helper for mapper._columntoproperty.""" + __slots__ = 'mapper', + def __init__(self, mapper): self.mapper = mapper diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/path_registry.py b/lib/python3.5/site-packages/sqlalchemy/orm/path_registry.py index f10a125..cf18465 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/path_registry.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/path_registry.py @@ -1,5 +1,5 @@ # orm/path_registry.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -13,6 +13,9 @@ from .. import util from .. import exc from itertools import chain from .base import class_mapper +import logging + +log = logging.getLogger(__name__) def _unreduce_path(path): @@ -49,14 +52,19 @@ class PathRegistry(object): """ + is_token = False + is_root = False + def __eq__(self, other): return other is not None and \ self.path == other.path def set(self, attributes, key, value): + log.debug("set '%s' on path '%s' to '%s'", key, self, value) attributes[(key, self.path)] = value def setdefault(self, attributes, key, value): + log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value) attributes.setdefault((key, self.path), value) def get(self, attributes, key, value=None): @@ -148,6 +156,8 @@ class RootRegistry(PathRegistry): """ path = () has_entity = False + is_aliased_class = False + is_root = True def __getitem__(self, entity): return entity._path_registry @@ -163,6 +173,15 @@ class TokenRegistry(PathRegistry): has_entity = False + is_token = True + + def generate_for_superclasses(self): + if not self.parent.is_aliased_class and not self.parent.is_root: + for ent in self.parent.mapper.iterate_to_root(): + yield TokenRegistry(self.parent.parent[ent], self.token) + else: + yield self + def __getitem__(self, entity): raise NotImplementedError() @@ -184,6 +203,11 @@ class PropRegistry(PathRegistry): self.parent = parent self.path = parent.path + (prop,) + def __str__(self): + return " -> ".join( + str(elem) for elem in self.path + ) + @util.memoized_property def has_entity(self): return hasattr(self.prop, "mapper") diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/persistence.py b/lib/python3.5/site-packages/sqlalchemy/orm/persistence.py index f3c6959..5d69f51 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/persistence.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/persistence.py @@ -1,5 +1,5 @@ # orm/persistence.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -15,15 +15,114 @@ in unitofwork.py. """ import operator -from itertools import groupby -from .. import sql, util, exc as sa_exc, schema +from itertools import groupby, chain +from .. import sql, util, exc as sa_exc from . import attributes, sync, exc as orm_exc, evaluator -from .base import _state_mapper, state_str, _attr_as_key +from .base import state_str, _attr_as_key, _entity_descriptor from ..sql import expression +from ..sql.base import _from_objects from . import loading -def save_obj(base_mapper, states, uowtransaction, single=False): +def _bulk_insert( + mapper, mappings, session_transaction, isstates, return_defaults): + base_mapper = mapper.base_mapper + + cached_connections = _cached_connection_dict(base_mapper) + + if session_transaction.session.connection_callable: + raise NotImplementedError( + "connection_callable / per-instance sharding " + "not supported in bulk_insert()") + + if isstates: + if return_defaults: + states = [(state, state.dict) for state in mappings] + mappings = [dict_ for (state, dict_) in states] + else: + mappings = [state.dict for state in mappings] + else: + mappings = list(mappings) + + connection = session_transaction.connection(base_mapper) + for table, super_mapper in base_mapper._sorted_tables.items(): + if not mapper.isa(super_mapper): + continue + + records = ( + (None, state_dict, params, mapper, + connection, value_params, has_all_pks, has_all_defaults) + for + state, state_dict, params, mp, + conn, value_params, has_all_pks, + has_all_defaults in _collect_insert_commands(table, ( + (None, mapping, mapper, connection) + for mapping in mappings), + bulk=True, return_defaults=return_defaults + ) + ) + _emit_insert_statements(base_mapper, None, + cached_connections, + super_mapper, table, records, + bookkeeping=return_defaults) + + if return_defaults and isstates: + identity_cls = mapper._identity_class + identity_props = [p.key for p in mapper._identity_key_props] + for state, dict_ in states: + state.key = ( + identity_cls, + tuple([dict_[key] for key in identity_props]) + ) + + +def _bulk_update(mapper, mappings, session_transaction, + isstates, update_changed_only): + base_mapper = mapper.base_mapper + + cached_connections = _cached_connection_dict(base_mapper) + + def _changed_dict(mapper, state): + return dict( + (k, v) + for k, v in state.dict.items() if k in state.committed_state or k + in mapper._primary_key_propkeys + ) + + if isstates: + if update_changed_only: + mappings = [_changed_dict(mapper, state) for state in mappings] + else: + mappings = [state.dict for state in mappings] + else: + mappings = list(mappings) + + if session_transaction.session.connection_callable: + raise NotImplementedError( + "connection_callable / per-instance sharding " + "not supported in bulk_update()") + + connection = session_transaction.connection(base_mapper) + + for table, super_mapper in base_mapper._sorted_tables.items(): + if not mapper.isa(super_mapper): + continue + + records = _collect_update_commands(None, table, ( + (None, mapping, mapper, connection, + (mapping[mapper._version_id_prop.key] + if mapper._version_id_prop else None)) + for mapping in mappings + ), bulk=True) + + _emit_update_statements(base_mapper, None, + cached_connections, + super_mapper, table, records, + bookkeeping=False) + + +def save_obj( + base_mapper, states, uowtransaction, single=False): """Issue ``INSERT`` and/or ``UPDATE`` statements for a list of objects. @@ -40,32 +139,54 @@ def save_obj(base_mapper, states, uowtransaction, single=False): save_obj(base_mapper, [state], uowtransaction, single=True) return - states_to_insert, states_to_update = _organize_states_for_save( - base_mapper, - states, - uowtransaction) - + states_to_update = [] + states_to_insert = [] cached_connections = _cached_connection_dict(base_mapper) + for (state, dict_, mapper, connection, + has_identity, + row_switch, update_version_id) in _organize_states_for_save( + base_mapper, states, uowtransaction + ): + if has_identity or row_switch: + states_to_update.append( + (state, dict_, mapper, connection, update_version_id) + ) + else: + states_to_insert.append( + (state, dict_, mapper, connection) + ) + for table, mapper in base_mapper._sorted_tables.items(): - insert = _collect_insert_commands(base_mapper, uowtransaction, - table, states_to_insert) + if table not in mapper._pks_by_table: + continue + insert = _collect_insert_commands(table, states_to_insert) - update = _collect_update_commands(base_mapper, uowtransaction, - table, states_to_update) + update = _collect_update_commands( + uowtransaction, table, states_to_update) - if update: - _emit_update_statements(base_mapper, uowtransaction, - cached_connections, - mapper, table, update) + _emit_update_statements(base_mapper, uowtransaction, + cached_connections, + mapper, table, update) - if insert: - _emit_insert_statements(base_mapper, uowtransaction, - cached_connections, - mapper, table, insert) + _emit_insert_statements(base_mapper, uowtransaction, + cached_connections, + mapper, table, insert) - _finalize_insert_update_commands(base_mapper, uowtransaction, - states_to_insert, states_to_update) + _finalize_insert_update_commands( + base_mapper, uowtransaction, + chain( + ( + (state, state_dict, mapper, connection, False) + for state, state_dict, mapper, connection in states_to_insert + ), + ( + (state, state_dict, mapper, connection, True) + for state, state_dict, mapper, connection, + update_version_id in states_to_update + ) + ) + ) def post_update(base_mapper, states, uowtransaction, post_update_cols): @@ -75,19 +196,28 @@ def post_update(base_mapper, states, uowtransaction, post_update_cols): """ cached_connections = _cached_connection_dict(base_mapper) - states_to_update = _organize_states_for_post_update( + states_to_update = list(_organize_states_for_post_update( base_mapper, - states, uowtransaction) + states, uowtransaction)) for table, mapper in base_mapper._sorted_tables.items(): + if table not in mapper._pks_by_table: + continue + + update = ( + (state, state_dict, sub_mapper, connection) + for + state, state_dict, sub_mapper, connection in states_to_update + if table in sub_mapper._pks_by_table + ) + update = _collect_post_update_commands(base_mapper, uowtransaction, - table, states_to_update, + table, update, post_update_cols) - if update: - _emit_post_update_statements(base_mapper, uowtransaction, - cached_connections, - mapper, table, update) + _emit_post_update_statements(base_mapper, uowtransaction, + cached_connections, + mapper, table, update) def delete_obj(base_mapper, states, uowtransaction): @@ -100,24 +230,26 @@ def delete_obj(base_mapper, states, uowtransaction): cached_connections = _cached_connection_dict(base_mapper) - states_to_delete = _organize_states_for_delete( + states_to_delete = list(_organize_states_for_delete( base_mapper, states, - uowtransaction) + uowtransaction)) table_to_mapper = base_mapper._sorted_tables for table in reversed(list(table_to_mapper.keys())): + mapper = table_to_mapper[table] + if table not in mapper._pks_by_table: + continue + delete = _collect_delete_commands(base_mapper, uowtransaction, table, states_to_delete) - mapper = table_to_mapper[table] - _emit_delete_statements(base_mapper, uowtransaction, cached_connections, mapper, table, delete) - for state, state_dict, mapper, has_identity, connection \ - in states_to_delete: + for state, state_dict, mapper, connection, \ + update_version_id in states_to_delete: mapper.dispatch.after_delete(mapper, connection, state) @@ -133,17 +265,15 @@ def _organize_states_for_save(base_mapper, states, uowtransaction): """ - states_to_insert = [] - states_to_update = [] - for state, dict_, mapper, connection in _connections_for_states( base_mapper, uowtransaction, states): has_identity = bool(state.key) + instance_key = state.key or mapper._identity_key_from_state(state) - row_switch = None + row_switch = update_version_id = None # call before_XXX extensions if not has_identity: @@ -180,18 +310,14 @@ def _organize_states_for_save(base_mapper, states, uowtransaction): uowtransaction.remove_state_actions(existing) row_switch = existing - if not has_identity and not row_switch: - states_to_insert.append( - (state, dict_, mapper, connection, - has_identity, instance_key, row_switch) - ) - else: - states_to_update.append( - (state, dict_, mapper, connection, - has_identity, instance_key, row_switch) - ) + if (has_identity or row_switch) and mapper.version_id_col is not None: + update_version_id = mapper._get_committed_state_attr_by_column( + row_switch if row_switch else state, + row_switch.dict if row_switch else dict_, + mapper.version_id_col) - return states_to_insert, states_to_update + yield (state, dict_, mapper, connection, + has_identity, row_switch, update_version_id) def _organize_states_for_post_update(base_mapper, states, @@ -204,8 +330,7 @@ def _organize_states_for_post_update(base_mapper, states, the execution per state. """ - return list(_connections_for_states(base_mapper, uowtransaction, - states)) + return _connections_for_states(base_mapper, uowtransaction, states) def _organize_states_for_delete(base_mapper, states, uowtransaction): @@ -216,72 +341,81 @@ def _organize_states_for_delete(base_mapper, states, uowtransaction): mapper, the connection to use for the execution per state. """ - states_to_delete = [] - for state, dict_, mapper, connection in _connections_for_states( base_mapper, uowtransaction, states): mapper.dispatch.before_delete(mapper, connection, state) - states_to_delete.append((state, dict_, mapper, - bool(state.key), connection)) - return states_to_delete + if mapper.version_id_col is not None: + update_version_id = \ + mapper._get_committed_state_attr_by_column( + state, dict_, + mapper.version_id_col) + else: + update_version_id = None + + yield ( + state, dict_, mapper, connection, update_version_id) -def _collect_insert_commands(base_mapper, uowtransaction, table, - states_to_insert): +def _collect_insert_commands( + table, states_to_insert, + bulk=False, return_defaults=False): """Identify sets of values to use in INSERT statements for a list of states. """ - insert = [] - for state, state_dict, mapper, connection, has_identity, \ - instance_key, row_switch in states_to_insert: + for state, state_dict, mapper, connection in states_to_insert: if table not in mapper._pks_by_table: continue - pks = mapper._pks_by_table[table] - params = {} value_params = {} - has_all_pks = True - has_all_defaults = True - for col in mapper._cols_by_table[table]: - if col is mapper.version_id_col and \ - mapper.version_id_generator is not False: - val = mapper.version_id_generator(None) - params[col.key] = val + propkey_to_col = mapper._propkey_to_col[table] + + for propkey in set(propkey_to_col).intersection(state_dict): + value = state_dict[propkey] + col = propkey_to_col[propkey] + if value is None: + continue + elif not bulk and isinstance(value, sql.ClauseElement): + value_params[col.key] = value else: - # pull straight from the dict for - # pending objects - prop = mapper._columntoproperty[col] - value = state_dict.get(prop.key, None) + params[col.key] = value - if value is None: - if col in pks: - has_all_pks = False - elif col.default is None and \ - col.server_default is None: - params[col.key] = value - elif col.server_default is not None and \ - mapper.base_mapper.eager_defaults: - has_all_defaults = False + if not bulk: + for colkey in mapper._insert_cols_as_none[table].\ + difference(params).difference(value_params): + params[colkey] = None - elif isinstance(value, sql.ClauseElement): - value_params[col] = value - else: - params[col.key] = value + if not bulk or return_defaults: + has_all_pks = mapper._pk_keys_by_table[table].issubset(params) - insert.append((state, state_dict, params, mapper, - connection, value_params, has_all_pks, - has_all_defaults)) - return insert + if mapper.base_mapper.eager_defaults: + has_all_defaults = mapper._server_default_cols[table].\ + issubset(params) + else: + has_all_defaults = True + else: + has_all_defaults = has_all_pks = True + + if mapper.version_id_generator is not False \ + and mapper.version_id_col is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: + params[mapper.version_id_col.key] = \ + mapper.version_id_generator(None) + + yield ( + state, state_dict, params, mapper, + connection, value_params, has_all_pks, + has_all_defaults) -def _collect_update_commands(base_mapper, uowtransaction, - table, states_to_update): +def _collect_update_commands( + uowtransaction, table, states_to_update, + bulk=False): """Identify sets of values to use in UPDATE statements for a list of states. @@ -293,114 +427,114 @@ def _collect_update_commands(base_mapper, uowtransaction, """ - update = [] - for state, state_dict, mapper, connection, has_identity, \ - instance_key, row_switch in states_to_update: + for state, state_dict, mapper, connection, \ + update_version_id in states_to_update: + if table not in mapper._pks_by_table: continue pks = mapper._pks_by_table[table] - params = {} value_params = {} - hasdata = hasnull = False - for col in mapper._cols_by_table[table]: - if col is mapper.version_id_col: - params[col._label] = \ - mapper._get_committed_state_attr_by_column( - row_switch or state, - row_switch and row_switch.dict - or state_dict, - col) + propkey_to_col = mapper._propkey_to_col[table] - prop = mapper._columntoproperty[col] - history = state.manager[prop.key].impl.get_history( - state, state_dict, attributes.PASSIVE_NO_INITIALIZE - ) - if history.added: - params[col.key] = history.added[0] - hasdata = True - else: - if mapper.version_id_generator is not False: - val = mapper.version_id_generator(params[col._label]) - params[col.key] = val + if bulk: + params = dict( + (propkey_to_col[propkey].key, state_dict[propkey]) + for propkey in + set(propkey_to_col).intersection(state_dict).difference( + mapper._pk_keys_by_table[table]) + ) + has_all_defaults = True + else: + params = {} + for propkey in set(propkey_to_col).intersection( + state.committed_state): + value = state_dict[propkey] + col = propkey_to_col[propkey] - # HACK: check for history, in case the - # history is only - # in a different table than the one - # where the version_id_col is. - for prop in mapper._columntoproperty.values(): - history = ( - state.manager[prop.key].impl.get_history( - state, state_dict, - attributes.PASSIVE_NO_INITIALIZE)) - if history.added: - hasdata = True + if isinstance(value, sql.ClauseElement): + value_params[col] = value + # guard against values that generate non-__nonzero__ + # objects for __eq__() + elif state.manager[propkey].impl.is_equal( + value, state.committed_state[propkey]) is not True: + params[col.key] = value + + if mapper.base_mapper.eager_defaults: + has_all_defaults = mapper._server_onupdate_default_cols[table].\ + issubset(params) else: - prop = mapper._columntoproperty[col] - history = state.manager[prop.key].impl.get_history( - state, state_dict, - attributes.PASSIVE_NO_INITIALIZE) + has_all_defaults = True + + if update_version_id is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: + + if not bulk and not (params or value_params): + # HACK: check for history in other tables, in case the + # history is only in a different table than the one + # where the version_id_col is. This logic was lost + # from 0.9 -> 1.0.0 and restored in 1.0.6. + for prop in mapper._columntoproperty.values(): + history = ( + state.manager[prop.key].impl.get_history( + state, state_dict, + attributes.PASSIVE_NO_INITIALIZE)) + if history.added: + break + else: + # no net change, break + continue + + col = mapper.version_id_col + params[col._label] = update_version_id + + if (bulk or col.key not in params) and \ + mapper.version_id_generator is not False: + val = mapper.version_id_generator(update_version_id) + params[col.key] = val + + elif not (params or value_params): + continue + + if bulk: + pk_params = dict( + (propkey_to_col[propkey]._label, state_dict.get(propkey)) + for propkey in + set(propkey_to_col). + intersection(mapper._pk_keys_by_table[table]) + ) + else: + pk_params = {} + for col in pks: + propkey = mapper._columntoproperty[col].key + + history = state.manager[propkey].impl.get_history( + state, state_dict, attributes.PASSIVE_OFF) + if history.added: - if isinstance(history.added[0], - sql.ClauseElement): - value_params[col] = history.added[0] + if not history.deleted or \ + ("pk_cascaded", state, col) in \ + uowtransaction.attributes: + pk_params[col._label] = history.added[0] + params.pop(col.key, None) else: - value = history.added[0] - params[col.key] = value + # else, use the old value to locate the row + pk_params[col._label] = history.deleted[0] + params[col.key] = history.added[0] + else: + pk_params[col._label] = history.unchanged[0] + if pk_params[col._label] is None: + raise orm_exc.FlushError( + "Can't update table %s using NULL for primary " + "key value on column %s" % (table, col)) - if col in pks: - if history.deleted and \ - not row_switch: - # if passive_updates and sync detected - # this was a pk->pk sync, use the new - # value to locate the row, since the - # DB would already have set this - if ("pk_cascaded", state, col) in \ - uowtransaction.attributes: - value = history.added[0] - params[col._label] = value - else: - # use the old value to - # locate the row - value = history.deleted[0] - params[col._label] = value - hasdata = True - else: - # row switch logic can reach us here - # remove the pk from the update params - # so the update doesn't - # attempt to include the pk in the - # update statement - del params[col.key] - value = history.added[0] - params[col._label] = value - if value is None: - hasnull = True - else: - hasdata = True - elif col in pks: - value = state.manager[prop.key].impl.get( - state, state_dict) - if value is None: - hasnull = True - params[col._label] = value - - # see #3060. Need to consider an "unchanged" None - # as potentially history for now. - elif row_switch and history.unchanged == [None]: - params[col.key] = None - hasdata = True - if hasdata: - if hasnull: - raise orm_exc.FlushError( - "Can't update table " - "using NULL for primary " - "key value") - update.append((state, state_dict, params, mapper, - connection, value_params)) - return update + if params or value_params: + params.update(pk_params) + yield ( + state, state_dict, params, mapper, + connection, value_params, has_all_defaults) def _collect_post_update_commands(base_mapper, uowtransaction, table, @@ -410,10 +544,10 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table, """ - update = [] for state, state_dict, mapper, connection in states_to_update: - if table not in mapper._pks_by_table: - continue + + # assert table in mapper._pks_by_table + pks = mapper._pks_by_table[table] params = {} hasdata = False @@ -422,8 +556,8 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table, if col in pks: params[col._label] = \ mapper._get_state_attr_by_column( - state, - state_dict, col) + state, + state_dict, col, passive=attributes.PASSIVE_OFF) elif col in post_update_cols: prop = mapper._columntoproperty[col] @@ -435,9 +569,7 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table, params[col.key] = value hasdata = True if hasdata: - update.append((state, state_dict, params, mapper, - connection)) - return update + yield params, connection def _collect_delete_commands(base_mapper, uowtransaction, table, @@ -445,42 +577,38 @@ def _collect_delete_commands(base_mapper, uowtransaction, table, """Identify values to use in DELETE statements for a list of states to be deleted.""" - delete = util.defaultdict(list) + for state, state_dict, mapper, connection, \ + update_version_id in states_to_delete: - for state, state_dict, mapper, has_identity, connection \ - in states_to_delete: - if not has_identity or table not in mapper._pks_by_table: + if table not in mapper._pks_by_table: continue params = {} - delete[connection].append(params) for col in mapper._pks_by_table[table]: params[col.key] = \ value = \ mapper._get_committed_state_attr_by_column( - state, state_dict, col) + state, state_dict, col) if value is None: raise orm_exc.FlushError( - "Can't delete from table " + "Can't delete from table %s " "using NULL for primary " - "key value") + "key value on column %s" % (table, col)) - if mapper.version_id_col is not None and \ - table.c.contains_column(mapper.version_id_col): - params[mapper.version_id_col.key] = \ - mapper._get_committed_state_attr_by_column( - state, state_dict, - mapper.version_id_col) - return delete + if update_version_id is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: + params[mapper.version_id_col.key] = update_version_id + yield params, connection def _emit_update_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, update): + cached_connections, mapper, table, update, + bookkeeping=True): """Emit UPDATE statements corresponding to value lists collected by _collect_update_commands().""" needs_version_id = mapper.version_id_col is not None and \ - table.c.contains_column(mapper.version_id_col) + mapper.version_id_col in mapper._cols_by_table[table] def update_stmt(): clause = sql.and_() @@ -496,67 +624,136 @@ def _emit_update_statements(base_mapper, uowtransaction, type_=mapper.version_id_col.type)) stmt = table.update(clause) - if mapper.base_mapper.eager_defaults: - stmt = stmt.return_defaults() - elif mapper.version_id_col is not None: - stmt = stmt.return_defaults(mapper.version_id_col) - return stmt - statement = base_mapper._memo(('update', table), update_stmt) + cached_stmt = base_mapper._memo(('update', table), update_stmt) - rows = 0 - for state, state_dict, params, mapper, \ - connection, value_params in update: + for (connection, paramkeys, hasvalue, has_all_defaults), \ + records in groupby( + update, + lambda rec: ( + rec[4], # connection + set(rec[2]), # set of parameter keys + bool(rec[5]), # whether or not we have "value" parameters + rec[6] # has_all_defaults + ) + ): + rows = 0 + records = list(records) - if value_params: - c = connection.execute( - statement.values(value_params), - params) + statement = cached_stmt + + # TODO: would be super-nice to not have to determine this boolean + # inside the loop here, in the 99.9999% of the time there's only + # one connection in use + assert_singlerow = connection.dialect.supports_sane_rowcount + assert_multirow = assert_singlerow and \ + connection.dialect.supports_sane_multi_rowcount + allow_multirow = has_all_defaults and not needs_version_id + + if bookkeeping and not has_all_defaults and \ + mapper.base_mapper.eager_defaults: + statement = statement.return_defaults() + elif mapper.version_id_col is not None: + statement = statement.return_defaults(mapper.version_id_col) + + if hasvalue: + for state, state_dict, params, mapper, \ + connection, value_params, has_all_defaults in records: + c = connection.execute( + statement.values(value_params), + params) + if bookkeeping: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params) + rows += c.rowcount + check_rowcount = True else: - c = cached_connections[connection].\ - execute(statement, params) + if not allow_multirow: + check_rowcount = assert_singlerow + for state, state_dict, params, mapper, \ + connection, value_params, has_all_defaults in records: + c = cached_connections[connection].\ + execute(statement, params) - _postfetch( - mapper, - uowtransaction, - table, - state, - state_dict, - c, - c.context.compiled_parameters[0], - value_params) - rows += c.rowcount + # TODO: why with bookkeeping=False? + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params) + rows += c.rowcount + else: + multiparams = [rec[2] for rec in records] - if connection.dialect.supports_sane_rowcount: - if rows != len(update): - raise orm_exc.StaleDataError( - "UPDATE statement on table '%s' expected to " - "update %d row(s); %d were matched." % - (table.description, len(update), rows)) + check_rowcount = assert_multirow or ( + assert_singlerow and + len(multiparams) == 1 + ) - elif needs_version_id: - util.warn("Dialect %s does not support updated rowcount " - "- versioning cannot be verified." % - c.dialect.dialect_description, - stacklevel=12) + c = cached_connections[connection].\ + execute(statement, multiparams) + + rows += c.rowcount + + # TODO: why with bookkeeping=False? + for state, state_dict, params, mapper, \ + connection, value_params, has_all_defaults in records: + _postfetch( + mapper, + uowtransaction, + table, + state, + state_dict, + c, + c.context.compiled_parameters[0], + value_params) + + if check_rowcount: + if rows != len(records): + raise orm_exc.StaleDataError( + "UPDATE statement on table '%s' expected to " + "update %d row(s); %d were matched." % + (table.description, len(records), rows)) + + elif needs_version_id: + util.warn("Dialect %s does not support updated rowcount " + "- versioning cannot be verified." % + c.dialect.dialect_description) def _emit_insert_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, insert): + cached_connections, mapper, table, insert, + bookkeeping=True): """Emit INSERT statements corresponding to value lists collected by _collect_insert_commands().""" - statement = base_mapper._memo(('insert', table), table.insert) + cached_stmt = base_mapper._memo(('insert', table), table.insert) for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \ - records in groupby(insert, - lambda rec: (rec[4], - list(rec[2].keys()), - bool(rec[5]), - rec[6], rec[7]) - ): - if \ + records in groupby( + insert, + lambda rec: ( + rec[4], # connection + set(rec[2]), # parameter keys + bool(rec[5]), # whether we have "value" parameters + rec[6], + rec[7])): + + statement = cached_stmt + + if not bookkeeping or \ ( has_all_defaults or not base_mapper.eager_defaults @@ -569,19 +766,20 @@ def _emit_insert_statements(base_mapper, uowtransaction, c = cached_connections[connection].\ execute(statement, multiparams) - for (state, state_dict, params, mapper_rec, - conn, value_params, has_all_pks, has_all_defaults), \ - last_inserted_params in \ - zip(records, c.context.compiled_parameters): - _postfetch( - mapper_rec, - uowtransaction, - table, - state, - state_dict, - c, - last_inserted_params, - value_params) + if bookkeeping: + for (state, state_dict, params, mapper_rec, + conn, value_params, has_all_pks, has_all_defaults), \ + last_inserted_params in \ + zip(records, c.context.compiled_parameters): + _postfetch( + mapper_rec, + uowtransaction, + table, + state, + state_dict, + c, + last_inserted_params, + value_params) else: if not has_all_defaults and base_mapper.eager_defaults: @@ -609,13 +807,7 @@ def _emit_insert_statements(base_mapper, uowtransaction, mapper._pks_by_table[table]): prop = mapper_rec._columntoproperty[col] if state_dict.get(prop.key) is None: - # TODO: would rather say: - # state_dict[prop.key] = pk - mapper_rec._set_state_attr_by_column( - state, - state_dict, - col, pk) - + state_dict[prop.key] = pk _postfetch( mapper_rec, uowtransaction, @@ -648,11 +840,13 @@ def _emit_post_update_statements(base_mapper, uowtransaction, # also group them into common (connection, cols) sets # to support executemany(). for key, grouper in groupby( - update, lambda rec: (rec[4], list(rec[2].keys())) + update, lambda rec: ( + rec[1], # connection + set(rec[0]) # parameter keys + ) ): connection = key[0] - multiparams = [params for state, state_dict, - params, mapper, conn in grouper] + multiparams = [params for params, conn in grouper] cached_connections[connection].\ execute(statement, multiparams) @@ -663,7 +857,7 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, by _collect_delete_commands().""" need_version_id = mapper.version_id_col is not None and \ - table.c.contains_column(mapper.version_id_col) + mapper.version_id_col in mapper._cols_by_table[table] def delete_stmt(): clause = sql.and_() @@ -682,8 +876,12 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, return table.delete(clause) - for connection, del_objects in delete.items(): - statement = base_mapper._memo(('delete', table), delete_stmt) + statement = base_mapper._memo(('delete', table), delete_stmt) + for connection, recs in groupby( + delete, + lambda rec: rec[1] # connection + ): + del_objects = [params for params, connection in recs] connection = cached_connections[connection] @@ -736,15 +934,12 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, ) -def _finalize_insert_update_commands(base_mapper, uowtransaction, - states_to_insert, states_to_update): +def _finalize_insert_update_commands(base_mapper, uowtransaction, states): """finalize state on states that have been inserted or updated, including calling after_insert/after_update events. """ - for state, state_dict, mapper, connection, has_identity, \ - instance_key, row_switch in states_to_insert + \ - states_to_update: + for state, state_dict, mapper, connection, has_identity in states: if mapper._readonly_props: readonly = state.unmodified_intersection( @@ -763,9 +958,8 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, toload_now.extend(state._unloaded_non_object) elif mapper.version_id_col is not None and \ mapper.version_id_generator is False: - prop = mapper._columntoproperty[mapper.version_id_col] - if prop.key in state.unloaded: - toload_now.extend([prop.key]) + if mapper._version_id_prop.key in state.unloaded: + toload_now.extend([mapper._version_id_prop.key]) if toload_now: state.key = base_mapper._identity_key_from_state(state) @@ -782,31 +976,46 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, def _postfetch(mapper, uowtransaction, table, - state, dict_, result, params, value_params): + state, dict_, result, params, value_params, bulk=False): """Expire attributes in need of newly persisted database state, after an INSERT or UPDATE statement has proceeded for that state.""" - prefetch_cols = result.context.prefetch_cols - postfetch_cols = result.context.postfetch_cols - returning_cols = result.context.returning_cols + # TODO: bulk is never non-False, need to clean this up - if mapper.version_id_col is not None: + prefetch_cols = result.context.compiled.prefetch + postfetch_cols = result.context.compiled.postfetch + returning_cols = result.context.compiled.returning + + if mapper.version_id_col is not None and \ + mapper.version_id_col in mapper._cols_by_table[table]: prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] + refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush) + if refresh_flush: + load_evt_attrs = [] + if returning_cols: row = result.context.returned_defaults if row is not None: for col in returning_cols: if col.primary_key: continue - mapper._set_state_attr_by_column(state, dict_, col, row[col]) + dict_[mapper._columntoproperty[col].key] = row[col] + if refresh_flush: + load_evt_attrs.append(mapper._columntoproperty[col].key) for c in prefetch_cols: if c.key in params and c in mapper._columntoproperty: - mapper._set_state_attr_by_column(state, dict_, c, params[c.key]) + dict_[mapper._columntoproperty[c].key] = params[c.key] + if refresh_flush: + load_evt_attrs.append(mapper._columntoproperty[c].key) - if postfetch_cols: + if refresh_flush and load_evt_attrs: + mapper.class_manager.dispatch.refresh_flush( + state, uowtransaction, load_evt_attrs) + + if postfetch_cols and state: state._expire_attributes(state.dict, [mapper._columntoproperty[c].key for c in postfetch_cols if c in @@ -817,10 +1026,13 @@ def _postfetch(mapper, uowtransaction, table, # TODO: this still goes a little too often. would be nice to # have definitive list of "columns that changed" here for m, equated_pairs in mapper._table_to_equated[table]: - sync.populate(state, m, state, m, - equated_pairs, - uowtransaction, - mapper.passive_updates) + if state is None: + sync.bulk_populate_inherit_keys(dict_, m, equated_pairs) + else: + sync.populate(state, m, state, m, + equated_pairs, + uowtransaction, + mapper.passive_updates) def _connections_for_states(base_mapper, uowtransaction, states): @@ -838,17 +1050,14 @@ def _connections_for_states(base_mapper, uowtransaction, states): connection_callable = \ uowtransaction.session.connection_callable else: - connection = None + connection = uowtransaction.transaction.connection(base_mapper) connection_callable = None for state in _sort_states(states): if connection_callable: connection = connection_callable(base_mapper, state.obj()) - elif not connection: - connection = uowtransaction.transaction.connection( - base_mapper) - mapper = _state_mapper(state) + mapper = state.manager.mapper yield state, state.dict, mapper, connection @@ -874,6 +1083,27 @@ class BulkUD(object): def __init__(self, query): self.query = query.enable_eagerloads(False) + self.mapper = self.query._bind_mapper() + self._validate_query_state() + + def _validate_query_state(self): + for attr, methname, notset, op in ( + ('_limit', 'limit()', None, operator.is_), + ('_offset', 'offset()', None, operator.is_), + ('_order_by', 'order_by()', False, operator.is_), + ('_group_by', 'group_by()', False, operator.is_), + ('_distinct', 'distinct()', False, operator.is_), + ( + '_from_obj', + 'join(), outerjoin(), select_from(), or from_self()', + (), operator.eq) + ): + if not op(getattr(self.query, attr), notset): + raise sa_exc.InvalidRequestError( + "Can't call Query.update() or Query.delete() " + "when %s has been called" % + (methname, ) + ) @property def session(self): @@ -898,18 +1128,34 @@ class BulkUD(object): self._do_post_synchronize() self._do_post() - def _do_pre(self): + @util.dependencies("sqlalchemy.orm.query") + def _do_pre(self, querylib): query = self.query - self.context = context = query._compile_context() - if len(context.statement.froms) != 1 or \ - not isinstance(context.statement.froms[0], schema.Table): + self.context = querylib.QueryContext(query) + if isinstance(query._entities[0], querylib._ColumnEntity): + # check for special case of query(table) + tables = set() + for ent in query._entities: + if not isinstance(ent, querylib._ColumnEntity): + tables.clear() + break + else: + tables.update(_from_objects(ent.column)) + + if len(tables) != 1: + raise sa_exc.InvalidRequestError( + "This operation requires only one Table or " + "entity be specified as the target." + ) + else: + self.primary_table = tables.pop() + + else: self.primary_table = query._only_entity_zero( "This operation requires only one Table or " "entity be specified as the target." ).mapper.local_table - else: - self.primary_table = context.statement.froms[0] session = query.session @@ -964,35 +1210,76 @@ class BulkFetch(BulkUD): def _do_pre_synchronize(self): query = self.query session = query.session - select_stmt = self.context.statement.with_only_columns( + context = query._compile_context() + select_stmt = context.statement.with_only_columns( self.primary_table.primary_key) self.matched_rows = session.execute( select_stmt, + mapper=self.mapper, params=query._params).fetchall() class BulkUpdate(BulkUD): """BulkUD which handles UPDATEs.""" - def __init__(self, query, values): + def __init__(self, query, values, update_kwargs): super(BulkUpdate, self).__init__(query) - self.query._no_select_modifiers("update") self.values = values + self.update_kwargs = update_kwargs @classmethod - def factory(cls, query, synchronize_session, values): + def factory(cls, query, synchronize_session, values, update_kwargs): return BulkUD._factory({ "evaluate": BulkUpdateEvaluate, "fetch": BulkUpdateFetch, False: BulkUpdate - }, synchronize_session, query, values) + }, synchronize_session, query, values, update_kwargs) + + def _resolve_string_to_expr(self, key): + if self.mapper and isinstance(key, util.string_types): + attr = _entity_descriptor(self.mapper, key) + return attr.__clause_element__() + else: + return key + + def _resolve_key_to_attrname(self, key): + if self.mapper and isinstance(key, util.string_types): + attr = _entity_descriptor(self.mapper, key) + return attr.property.key + elif isinstance(key, attributes.InstrumentedAttribute): + return key.key + elif hasattr(key, '__clause_element__'): + key = key.__clause_element__() + + if self.mapper and isinstance(key, expression.ColumnElement): + try: + attr = self.mapper._columntoproperty[key] + except orm_exc.UnmappedColumnError: + return None + else: + return attr.key + else: + raise sa_exc.InvalidRequestError( + "Invalid expression type: %r" % key) def _do_exec(self): + + values = [ + (self._resolve_string_to_expr(k), v) + for k, v in ( + self.values.items() if hasattr(self.values, 'items') + else self.values) + ] + if not self.update_kwargs.get('preserve_parameter_order', False): + values = dict(values) + update_stmt = sql.update(self.primary_table, - self.context.whereclause, self.values) + self.context.whereclause, values, + **self.update_kwargs) self.result = self.query.session.execute( - update_stmt, params=self.query._params) + update_stmt, params=self.query._params, + mapper=self.mapper) self.rowcount = self.result.rowcount def _do_post(self): @@ -1005,7 +1292,6 @@ class BulkDelete(BulkUD): def __init__(self, query): super(BulkDelete, self).__init__(query) - self.query._no_select_modifiers("delete") @classmethod def factory(cls, query, synchronize_session): @@ -1019,8 +1305,10 @@ class BulkDelete(BulkUD): delete_stmt = sql.delete(self.primary_table, self.context.whereclause) - self.result = self.query.session.execute(delete_stmt, - params=self.query._params) + self.result = self.query.session.execute( + delete_stmt, + params=self.query._params, + mapper=self.mapper) self.rowcount = self.result.rowcount def _do_post(self): @@ -1034,10 +1322,13 @@ class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate): def _additional_evaluators(self, evaluator_compiler): self.value_evaluators = {} - for key, value in self.values.items(): - key = _attr_as_key(key) - self.value_evaluators[key] = evaluator_compiler.process( - expression._literal_as_binds(value)) + values = (self.values.items() if hasattr(self.values, 'items') + else self.values) + for key, value in values: + key = self._resolve_key_to_attrname(key) + if key is not None: + self.value_evaluators[key] = evaluator_compiler.process( + expression._literal_as_binds(value)) def _do_post_synchronize(self): session = self.query.session diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/properties.py b/lib/python3.5/site-packages/sqlalchemy/orm/properties.py index 62ea93f..f8a3532 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/properties.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/properties.py @@ -1,5 +1,5 @@ # orm/properties.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -34,6 +34,13 @@ class ColumnProperty(StrategizedProperty): strategy_wildcard_key = 'column' + __slots__ = ( + '_orig_columns', 'columns', 'group', 'deferred', + 'instrument', 'comparator_factory', 'descriptor', 'extension', + 'active_history', 'expire_on_flush', 'info', 'doc', + 'strategy_class', '_creation_order', '_is_polymorphic_discriminator', + '_mapped_by_synonym', '_deferred_column_loader') + def __init__(self, *columns, **kwargs): """Provide a column-level property for use with a Mapper. @@ -109,6 +116,7 @@ class ColumnProperty(StrategizedProperty): **Deprecated.** Please see :class:`.AttributeEvents`. """ + super(ColumnProperty, self).__init__() self._orig_columns = [expression._labeled(c) for c in columns] self.columns = [expression._labeled(_orm_full_deannotate(c)) for c in columns] @@ -149,6 +157,12 @@ class ColumnProperty(StrategizedProperty): ("instrument", self.instrument) ) + @util.dependencies("sqlalchemy.orm.state", "sqlalchemy.orm.strategies") + def _memoized_attr__deferred_column_loader(self, state, strategies): + return state.InstanceState._instance_level_callable_processor( + self.parent.class_manager, + strategies.LoadDeferredColumns(self.key), self.key) + @property def expression(self): """Return the primary column or expression for this ColumnProperty. @@ -206,7 +220,7 @@ class ColumnProperty(StrategizedProperty): elif dest_state.has_identity and self.key not in dest_dict: dest_state._expire_attributes(dest_dict, [self.key]) - class Comparator(PropComparator): + class Comparator(util.MemoizedSlots, PropComparator): """Produce boolean, comparison, and other operators for :class:`.ColumnProperty` attributes. @@ -224,24 +238,27 @@ class ColumnProperty(StrategizedProperty): :attr:`.TypeEngine.comparator_factory` """ - @util.memoized_instancemethod - def __clause_element__(self): + + __slots__ = '__clause_element__', 'info' + + def _memoized_method___clause_element__(self): if self.adapter: return self.adapter(self.prop.columns[0]) else: + # no adapter, so we aren't aliased + # assert self._parententity is self._parentmapper return self.prop.columns[0]._annotate({ - "parententity": self._parentmapper, - "parentmapper": self._parentmapper}) + "parententity": self._parententity, + "parentmapper": self._parententity}) - @util.memoized_property - def info(self): + def _memoized_attr_info(self): ce = self.__clause_element__() try: return ce.info except AttributeError: return self.prop.info - def __getattr__(self, key): + def _fallback_getattr(self, key): """proxy attribute access down to the mapped column. this allows user-defined comparison methods to be accessed. diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/query.py b/lib/python3.5/site-packages/sqlalchemy/orm/query.py index 12e11b2..335832d 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/query.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/query.py @@ -1,5 +1,5 @@ # orm/query.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -26,7 +26,7 @@ from . import ( exc as orm_exc, loading ) from .base import _entity_descriptor, _is_aliased_class, \ - _is_mapped_class, _orm_columns, _generative + _is_mapped_class, _orm_columns, _generative, InspectionAttr from .path_registry import PathRegistry from .util import ( AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased @@ -75,6 +75,7 @@ class Query(object): _having = None _distinct = False _prefixes = None + _suffixes = None _offset = None _limit = None _for_update_arg = None @@ -99,7 +100,8 @@ class Query(object): _with_options = () _with_hints = () _enable_single_crit = True - + _orm_only_adapt = True + _orm_only_from_obj_alias = True _current_path = _path_registry def __init__(self, entities, session=None): @@ -137,10 +139,7 @@ class Query(object): ) aliased_adapter = None elif ext_info.is_aliased_class: - aliased_adapter = sql_util.ColumnAdapter( - ext_info.selectable, - ext_info.mapper._equivalent_columns - ) + aliased_adapter = ext_info._adapter else: aliased_adapter = None @@ -162,7 +161,6 @@ class Query(object): for from_obj in obj: info = inspect(from_obj) - if hasattr(info, 'mapper') and \ (info.is_mapper or info.is_aliased_class): self._select_from_entity = from_obj @@ -218,7 +216,7 @@ class Query(object): def _adapt_col_list(self, cols): return [ self._adapt_clause( - expression._literal_as_text(o), + expression._literal_as_label_reference(o), True, True) for o in cols ] @@ -234,7 +232,8 @@ class Query(object): adapters = [] # do we adapt all expression elements or only those # tagged as 'ORM' constructs ? - orm_only = getattr(self, '_orm_only_adapt', orm_only) + if not self._orm_only_adapt: + orm_only = False if as_filter and self._filter_aliases: for fa in self._filter_aliases._visitor_iterator: @@ -251,7 +250,7 @@ class Query(object): # to all SQL constructs. adapters.append( ( - getattr(self, '_orm_only_from_obj_alias', orm_only), + orm_only if self._orm_only_from_obj_alias else False, self._from_obj_alias.replace ) ) @@ -288,8 +287,11 @@ class Query(object): return self._entities[0] def _mapper_zero(self): - return self._select_from_entity or \ - self._entity_zero().entity_zero + # TODO: self._select_from_entity is not a mapper + # so this method is misnamed + return self._select_from_entity \ + if self._select_from_entity is not None \ + else self._entity_zero().entity_zero @property def _mapper_entities(self): @@ -303,11 +305,14 @@ class Query(object): self._mapper_zero() ) - def _mapper_zero_or_none(self): - if self._primary_entity: - return self._primary_entity.mapper - else: - return None + def _bind_mapper(self): + ezero = self._mapper_zero() + if ezero is not None: + insp = inspect(ezero) + if not insp.is_clause_element: + return insp.mapper + + return None def _only_mapper_zero(self, rationale=None): if len(self._entities) > 1: @@ -396,22 +401,6 @@ class Query(object): % (meth, meth) ) - def _no_select_modifiers(self, meth): - if not self._enable_assertions: - return - for attr, methname, notset in ( - ('_limit', 'limit()', None), - ('_offset', 'offset()', None), - ('_order_by', 'order_by()', False), - ('_group_by', 'group_by()', False), - ('_distinct', 'distinct()', False), - ): - if getattr(self, attr) is not notset: - raise sa_exc.InvalidRequestError( - "Can't call Query.%s() when %s has been called" % - (meth, methname) - ) - def _get_options(self, populate_existing=None, version_check=None, only_load_props=None, @@ -595,11 +584,19 @@ class Query(object): This is used primarily when nesting the Query's statement into a subquery or other - selectable. + selectable, or when using :meth:`.Query.yield_per`. """ self._enable_eagerloads = value + def _no_yield_per(self, message): + raise sa_exc.InvalidRequestError( + "The yield_per Query option is currently not " + "compatible with %s eager loading. Please " + "specify lazyload('*') or query.enable_eagerloads(False) in " + "order to " + "proceed with query.yield_per()." % message) + @_generative() def with_labels(self): """Apply column labels to the return value of Query.statement. @@ -613,6 +610,16 @@ class Query(object): When the `Query` actually issues SQL to load rows, it always uses column labeling. + .. note:: The :meth:`.Query.with_labels` method *only* applies + the output of :attr:`.Query.statement`, and *not* to any of + the result-row invoking systems of :class:`.Query` itself, e.g. + :meth:`.Query.first`, :meth:`.Query.all`, etc. To execute + a query using :meth:`.Query.with_labels`, invoke the + :attr:`.Query.statement` using :meth:`.Session.execute`:: + + result = session.execute(query.with_labels().statement) + + """ self._with_labels = True @@ -705,29 +712,64 @@ class Query(object): def yield_per(self, count): """Yield only ``count`` rows at a time. - WARNING: use this method with caution; if the same instance is present - in more than one batch of rows, end-user changes to attributes will be - overwritten. + The purpose of this method is when fetching very large result sets + (> 10K rows), to batch results in sub-collections and yield them + out partially, so that the Python interpreter doesn't need to declare + very large areas of memory which is both time consuming and leads + to excessive memory use. The performance from fetching hundreds of + thousands of rows can often double when a suitable yield-per setting + (e.g. approximately 1000) is used, even with DBAPIs that buffer + rows (which are most). - In particular, it's usually impossible to use this setting with - eagerly loaded collections (i.e. any lazy='joined' or 'subquery') - since those collections will be cleared for a new load when - encountered in a subsequent result batch. In the case of 'subquery' - loading, the full result for all rows is fetched which generally - defeats the purpose of :meth:`~sqlalchemy.orm.query.Query.yield_per`. + The :meth:`.Query.yield_per` method **is not compatible with most + eager loading schemes, including subqueryload and joinedload with + collections**. For this reason, it may be helpful to disable + eager loads, either unconditionally with + :meth:`.Query.enable_eagerloads`:: - Also note that while :meth:`~sqlalchemy.orm.query.Query.yield_per` - will set the ``stream_results`` execution option to True, currently - this is only understood by - :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect which will - stream results using server side cursors instead of pre-buffer all - rows for this query. Other DBAPIs pre-buffer all rows before making - them available. + q = sess.query(Object).yield_per(100).enable_eagerloads(False) + + Or more selectively using :func:`.lazyload`; such as with + an asterisk to specify the default loader scheme:: + + q = sess.query(Object).yield_per(100).\\ + options(lazyload('*'), joinedload(Object.some_related)) + + .. warning:: + + Use this method with caution; if the same instance is + present in more than one batch of rows, end-user changes + to attributes will be overwritten. + + In particular, it's usually impossible to use this setting + with eagerly loaded collections (i.e. any lazy='joined' or + 'subquery') since those collections will be cleared for a + new load when encountered in a subsequent result batch. + In the case of 'subquery' loading, the full result for all + rows is fetched which generally defeats the purpose of + :meth:`~sqlalchemy.orm.query.Query.yield_per`. + + Also note that while + :meth:`~sqlalchemy.orm.query.Query.yield_per` will set the + ``stream_results`` execution option to True, currently + this is only understood by + :mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect + which will stream results using server side cursors + instead of pre-buffer all rows for this query. Other + DBAPIs **pre-buffer all rows** before making them + available. The memory use of raw database rows is much less + than that of an ORM-mapped object, but should still be taken into + consideration when benchmarking. + + .. seealso:: + + :meth:`.Query.enable_eagerloads` """ self._yield_per = count self._execution_options = self._execution_options.union( - {"stream_results": True}) + {"stream_results": True, + "max_row_buffer": count}) def get(self, ident): """Return an instance based on the given primary key identifier, @@ -771,7 +813,7 @@ class Query(object): foreign-key-to-primary-key criterion, will also use an operation equivalent to :meth:`~.Query.get` in order to retrieve the target value from the local identity map - before querying the database. See :doc:`/orm/loading` + before querying the database. See :doc:`/orm/loading_relationships` for further details on relationship loading. :param ident: A scalar or tuple value representing @@ -786,7 +828,9 @@ class Query(object): :return: The object instance, or ``None``. """ + return self._get_impl(ident, loading.load_on_ident) + def _get_impl(self, ident, fallback_fn): # convert composite types to individual args if hasattr(ident, '__composite_values__'): ident = ident.__composite_values__() @@ -817,7 +861,7 @@ class Query(object): return None return instance - return loading.load_on_ident(self, key) + return fallback_fn(self, key) @_generative() def correlate(self, *args): @@ -898,11 +942,13 @@ class Query(object): """ if property is None: + mapper_zero = inspect(self._mapper_zero()).mapper + mapper = object_mapper(instance) for prop in mapper.iterate_properties: if isinstance(prop, properties.RelationshipProperty) and \ - prop.mapper is self._mapper_zero(): + prop.mapper is mapper_zero: property = prop break else: @@ -940,14 +986,176 @@ class Query(object): """return a Query that selects from this Query's SELECT statement. - \*entities - optional list of entities which will replace - those being selected. + :meth:`.Query.from_self` essentially turns the SELECT statement + into a SELECT of itself. Given a query such as:: + + q = session.query(User).filter(User.name.like('e%')) + + Given the :meth:`.Query.from_self` version:: + + q = session.query(User).filter(User.name.like('e%')).from_self() + + This query renders as: + + .. sourcecode:: sql + + SELECT anon_1.user_id AS anon_1_user_id, + anon_1.user_name AS anon_1_user_name + FROM (SELECT "user".id AS user_id, "user".name AS user_name + FROM "user" + WHERE "user".name LIKE :name_1) AS anon_1 + + There are lots of cases where :meth:`.Query.from_self` may be useful. + A simple one is where above, we may want to apply a row LIMIT to + the set of user objects we query against, and then apply additional + joins against that row-limited set:: + + q = session.query(User).filter(User.name.like('e%')).\\ + limit(5).from_self().\\ + join(User.addresses).filter(Address.email.like('q%')) + + The above query joins to the ``Address`` entity but only against the + first five results of the ``User`` query: + + .. sourcecode:: sql + + SELECT anon_1.user_id AS anon_1_user_id, + anon_1.user_name AS anon_1_user_name + FROM (SELECT "user".id AS user_id, "user".name AS user_name + FROM "user" + WHERE "user".name LIKE :name_1 + LIMIT :param_1) AS anon_1 + JOIN address ON anon_1.user_id = address.user_id + WHERE address.email LIKE :email_1 + + **Automatic Aliasing** + + Another key behavior of :meth:`.Query.from_self` is that it applies + **automatic aliasing** to the entities inside the subquery, when + they are referenced on the outside. Above, if we continue to + refer to the ``User`` entity without any additional aliasing applied + to it, those references wil be in terms of the subquery:: + + q = session.query(User).filter(User.name.like('e%')).\\ + limit(5).from_self().\\ + join(User.addresses).filter(Address.email.like('q%')).\\ + order_by(User.name) + + The ORDER BY against ``User.name`` is aliased to be in terms of the + inner subquery: + + .. sourcecode:: sql + + SELECT anon_1.user_id AS anon_1_user_id, + anon_1.user_name AS anon_1_user_name + FROM (SELECT "user".id AS user_id, "user".name AS user_name + FROM "user" + WHERE "user".name LIKE :name_1 + LIMIT :param_1) AS anon_1 + JOIN address ON anon_1.user_id = address.user_id + WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name + + The automatic aliasing feature only works in a **limited** way, + for simple filters and orderings. More ambitious constructions + such as referring to the entity in joins should prefer to use + explicit subquery objects, typically making use of the + :meth:`.Query.subquery` method to produce an explicit subquery object. + Always test the structure of queries by viewing the SQL to ensure + a particular structure does what's expected! + + **Changing the Entities** + + :meth:`.Query.from_self` also includes the ability to modify what + columns are being queried. In our example, we want ``User.id`` + to be queried by the inner query, so that we can join to the + ``Address`` entity on the outside, but we only wanted the outer + query to return the ``Address.email`` column:: + + q = session.query(User).filter(User.name.like('e%')).\\ + limit(5).from_self(Address.email).\\ + join(User.addresses).filter(Address.email.like('q%')) + + yielding: + + .. sourcecode:: sql + + SELECT address.email AS address_email + FROM (SELECT "user".id AS user_id, "user".name AS user_name + FROM "user" + WHERE "user".name LIKE :name_1 + LIMIT :param_1) AS anon_1 + JOIN address ON anon_1.user_id = address.user_id + WHERE address.email LIKE :email_1 + + **Looking out for Inner / Outer Columns** + + Keep in mind that when referring to columns that originate from + inside the subquery, we need to ensure they are present in the + columns clause of the subquery itself; this is an ordinary aspect of + SQL. For example, if we wanted to load from a joined entity inside + the subquery using :func:`.contains_eager`, we need to add those + columns. Below illustrates a join of ``Address`` to ``User``, + then a subquery, and then we'd like :func:`.contains_eager` to access + the ``User`` columns:: + + q = session.query(Address).join(Address.user).\\ + filter(User.name.like('e%')) + + q = q.add_entity(User).from_self().\\ + options(contains_eager(Address.user)) + + We use :meth:`.Query.add_entity` above **before** we call + :meth:`.Query.from_self` so that the ``User`` columns are present + in the inner subquery, so that they are available to the + :func:`.contains_eager` modifier we are using on the outside, + producing: + + .. sourcecode:: sql + + SELECT anon_1.address_id AS anon_1_address_id, + anon_1.address_email AS anon_1_address_email, + anon_1.address_user_id AS anon_1_address_user_id, + anon_1.user_id AS anon_1_user_id, + anon_1.user_name AS anon_1_user_name + FROM ( + SELECT address.id AS address_id, + address.email AS address_email, + address.user_id AS address_user_id, + "user".id AS user_id, + "user".name AS user_name + FROM address JOIN "user" ON "user".id = address.user_id + WHERE "user".name LIKE :name_1) AS anon_1 + + If we didn't call ``add_entity(User)``, but still asked + :func:`.contains_eager` to load the ``User`` entity, it would be + forced to add the table on the outside without the correct + join criteria - note the ``anon1, "user"`` phrase at + the end: + + .. sourcecode:: sql + + -- incorrect query + SELECT anon_1.address_id AS anon_1_address_id, + anon_1.address_email AS anon_1_address_email, + anon_1.address_user_id AS anon_1_address_user_id, + "user".id AS user_id, + "user".name AS user_name + FROM ( + SELECT address.id AS address_id, + address.email AS address_email, + address.user_id AS address_user_id + FROM address JOIN "user" ON "user".id = address.user_id + WHERE "user".name LIKE :name_1) AS anon_1, "user" + + :param \*entities: optional list of entities which will replace + those being selected. """ fromclause = self.with_labels().enable_eagerloads(False).\ - _set_enable_single_crit(False).\ statement.correlate(None) q = self._from_selectable(fromclause) + q._enable_single_crit = False + q._select_from_entity = self._mapper_zero() if entities: q._set_entities(entities) return q @@ -964,7 +1172,7 @@ class Query(object): '_limit', '_offset', '_joinpath', '_joinpoint', '_distinct', '_having', - '_prefixes', + '_prefixes', '_suffixes' ): self.__dict__.pop(attr, None) self._set_select_from([fromclause], True) @@ -1060,7 +1268,7 @@ class Query(object): Most supplied options regard changing how column- and relationship-mapped attributes are loaded. See the sections - :ref:`deferred` and :doc:`/orm/loading` for reference + :ref:`deferred` and :doc:`/orm/loading_relationships` for reference documentation. """ @@ -1106,7 +1314,8 @@ class Query(object): @_generative() def with_hint(self, selectable, text, dialect_name='*'): - """Add an indexing hint for the given entity or selectable to + """Add an indexing or other executional context + hint for the given entity or selectable to this :class:`.Query`. Functionality is passed straight through to @@ -1114,11 +1323,35 @@ class Query(object): with the addition that ``selectable`` can be a :class:`.Table`, :class:`.Alias`, or ORM entity / mapped class /etc. + + .. seealso:: + + :meth:`.Query.with_statement_hint` + """ - selectable = inspect(selectable).selectable + if selectable is not None: + selectable = inspect(selectable).selectable self._with_hints += ((selectable, text, dialect_name),) + def with_statement_hint(self, text, dialect_name='*'): + """add a statement hint to this :class:`.Select`. + + This method is similar to :meth:`.Select.with_hint` except that + it does not require an individual table, and instead applies to the + statement as a whole. + + This feature calls down into :meth:`.Select.with_statement_hint`. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.Query.with_hint` + + """ + return self.with_hint(None, text, dialect_name) + @_generative() def execution_options(self, **kwargs): """ Set non-SQL options which take effect during execution. @@ -1222,7 +1455,9 @@ class Query(object): session.query(MyClass).filter(MyClass.name == 'some name') - Multiple criteria are joined together by AND:: + Multiple criteria may be specified as comma separated; the effect + is that they will be joined together using the :func:`.and_` + function:: session.query(MyClass).\\ filter(MyClass.name == 'some name', MyClass.id > 5) @@ -1231,16 +1466,13 @@ class Query(object): WHERE clause of a select. String expressions are coerced into SQL expression constructs via the :func:`.text` construct. - .. versionchanged:: 0.7.5 - Multiple criteria joined by AND. - .. seealso:: :meth:`.Query.filter_by` - filter on keyword expressions. """ for criterion in list(criterion): - criterion = expression._literal_as_text(criterion) + criterion = expression._expression_literal_as_text(criterion) criterion = self._adapt_clause(criterion, True, True) @@ -1257,7 +1489,9 @@ class Query(object): session.query(MyClass).filter_by(name = 'some name') - Multiple criteria are joined together by AND:: + Multiple criteria may be specified as comma separated; the effect + is that they will be joined together using the :func:`.and_` + function:: session.query(MyClass).\\ filter_by(name = 'some name', id = 5) @@ -1339,8 +1573,7 @@ class Query(object): """ - if isinstance(criterion, util.string_types): - criterion = sql.text(criterion) + criterion = expression._expression_literal_as_text(criterion) if criterion is not None and \ not isinstance(criterion, sql.ClauseElement): @@ -1677,6 +1910,14 @@ class Query(object): anonymously aliased. Subsequent calls to :meth:`~.Query.filter` and similar will adapt the incoming criterion to the target alias, until :meth:`~.Query.reset_joinpoint` is called. + :param isouter=False: If True, the join used will be a left outer join, + just as if the :meth:`.Query.outerjoin` method were called. This + flag is here to maintain consistency with the same flag as accepted + by :meth:`.FromClause.join` and other Core constructs. + + + .. versionadded:: 1.0.0 + :param from_joinpoint=False: When using ``aliased=True``, a setting of True here will cause the join to be from the most recent joined target, rather than starting back from the original @@ -1694,13 +1935,14 @@ class Query(object): SQLAlchemy versions was the primary ORM-level joining interface. """ - aliased, from_joinpoint = kwargs.pop('aliased', False),\ - kwargs.pop('from_joinpoint', False) + aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\ + kwargs.pop('from_joinpoint', False),\ + kwargs.pop('isouter', False) if kwargs: raise TypeError("unknown arguments: %s" % - ','.join(kwargs.keys)) + ', '.join(sorted(kwargs))) return self._join(props, - outerjoin=False, create_aliases=aliased, + outerjoin=isouter, create_aliases=aliased, from_joinpoint=from_joinpoint) def outerjoin(self, *props, **kwargs): @@ -1714,7 +1956,7 @@ class Query(object): kwargs.pop('from_joinpoint', False) if kwargs: raise TypeError("unknown arguments: %s" % - ','.join(kwargs)) + ', '.join(sorted(kwargs))) return self._join(props, outerjoin=True, create_aliases=aliased, from_joinpoint=from_joinpoint) @@ -1750,7 +1992,8 @@ class Query(object): # convert to a tuple. keys = (keys,) - for arg1 in util.to_list(keys): + keylist = util.to_list(keys) + for idx, arg1 in enumerate(keylist): if isinstance(arg1, tuple): # "tuple" form of join, multiple # tuples are accepted as well. The simpler @@ -1772,6 +2015,11 @@ class Query(object): left_entity = prop = None + if isinstance(onclause, interfaces.PropComparator): + of_type = getattr(onclause, '_of_type', None) + else: + of_type = None + if isinstance(onclause, util.string_types): left_entity = self._joinpoint_zero() @@ -1798,8 +2046,6 @@ class Query(object): if isinstance(onclause, interfaces.PropComparator): if right_entity is None: - right_entity = onclause.property.mapper - of_type = getattr(onclause, '_of_type', None) if of_type: right_entity = of_type else: @@ -1826,6 +2072,11 @@ class Query(object): jp = self._joinpoint[edge].copy() jp['prev'] = (edge, self._joinpoint) self._update_joinpoint(jp) + + if idx == len(keylist) - 1: + util.warn( + "Pathed join target %s has already " + "been joined to; skipping" % prop) continue elif onclause is not None and right_entity is None: @@ -1881,11 +2132,9 @@ class Query(object): from_obj, r_info.selectable): overlap = True break - elif sql_util.selectables_overlap(l_info.selectable, - r_info.selectable): - overlap = True - if overlap and l_info.selectable is r_info.selectable: + if (overlap or not create_aliases) and \ + l_info.selectable is r_info.selectable: raise sa_exc.InvalidRequestError( "Can't join table/selectable '%s' to itself" % l_info.selectable) @@ -2250,6 +2499,19 @@ class Query(object): """Apply a ``DISTINCT`` to the query and return the newly resulting ``Query``. + + .. note:: + + The :meth:`.distinct` call includes logic that will automatically + add columns from the ORDER BY of the query to the columns + clause of the SELECT statement, to satisfy the common need + of the database backend that ORDER BY columns be part of the + SELECT list when DISTINCT is used. These columns *are not* + added to the list of columns actually fetched by the + :class:`.Query`, however, so would not affect results. + The columns are passed through when using the + :attr:`.Query.statement` accessor, however. + :param \*expr: optional column expressions. When present, the Postgresql dialect will render a ``DISTINCT ON (>)`` construct. @@ -2285,12 +2547,38 @@ class Query(object): .. versionadded:: 0.7.7 + .. seealso:: + + :meth:`.HasPrefixes.prefix_with` + """ if self._prefixes: self._prefixes += prefixes else: self._prefixes = prefixes + @_generative() + def suffix_with(self, *suffixes): + """Apply the suffix to the query and return the newly resulting + ``Query``. + + :param \*suffixes: optional suffixes, typically strings, + not using any commas. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :meth:`.Query.prefix_with` + + :meth:`.HasSuffixes.suffix_with` + + """ + if self._suffixes: + self._suffixes += suffixes + else: + self._suffixes = suffixes + def all(self): """Return the results represented by this ``Query`` as a list. @@ -2306,13 +2594,18 @@ class Query(object): This method bypasses all internal statement compilation, and the statement is executed without modification. - The statement argument is either a string, a ``select()`` construct, - or a ``text()`` construct, and should return the set of columns - appropriate to the entity class represented by this ``Query``. + The statement is typically either a :func:`~.expression.text` + or :func:`~.expression.select` construct, and should return the set + of columns + appropriate to the entity class represented by this :class:`.Query`. + + .. seealso:: + + :ref:`orm_tutorial_literal_sql` - usage examples in the + ORM tutorial """ - if isinstance(statement, util.string_types): - statement = sql.text(statement) + statement = expression._expression_literal_as_text(statement) if not isinstance(statement, (expression.TextClause, @@ -2332,7 +2625,7 @@ class Query(object): (note this may consist of multiple result rows if join-loaded collections are present). - Calling ``first()`` results in an execution of the underlying query. + Calling :meth:`.Query.first` results in an execution of the underlying query. """ if self._statement is not None: @@ -2344,26 +2637,57 @@ class Query(object): else: return None + def one_or_none(self): + """Return at most one result or raise an exception. + + Returns ``None`` if the query selects + no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` + if multiple object identities are returned, or if multiple + rows are returned for a query that returns only scalar values + as opposed to full identity-mapped entities. + + Calling :meth:`.Query.one_or_none` results in an execution of the underlying + query. + + .. versionadded:: 1.0.9 + + Added :meth:`.Query.one_or_none` + + .. seealso:: + + :meth:`.Query.first` + + :meth:`.Query.one` + + + """ + ret = list(self) + + l = len(ret) + if l == 1: + return ret[0] + elif l == 0: + return None + else: + raise orm_exc.MultipleResultsFound( + "Multiple rows were found for one_or_none()") + def one(self): """Return exactly one result or raise an exception. Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` if multiple object identities are returned, or if multiple - rows are returned for a query that does not return object - identities. + rows are returned for a query that returns only scalar values + as opposed to full identity-mapped entities. - Note that an entity query, that is, one which selects one or - more mapped classes as opposed to individual column attributes, - may ultimately represent many rows but only one row of - unique entity or entities - this is a successful result for one(). + Calling :meth:`.one` results in an execution of the underlying query. - Calling ``one()`` results in an execution of the underlying query. + .. seealso:: - .. versionchanged:: 0.6 - ``one()`` fully fetches all results instead of applying - any kind of limit, so that the "unique"-ing of entities does not - conceal multiple object identities. + :meth:`.Query.first` + + :meth:`.Query.one_or_none` """ ret = list(self) @@ -2420,12 +2744,12 @@ class Query(object): def _execute_and_instances(self, querycontext): conn = self._connection_from_session( - mapper=self._mapper_zero_or_none(), + mapper=self._bind_mapper(), clause=querycontext.statement, close_with_result=True) result = conn.execute(querycontext.statement, self._params) - return loading.instances(self, result, querycontext) + return loading.instances(querycontext.query, result, querycontext) @property def column_descriptions(self): @@ -2447,30 +2771,46 @@ class Query(object): 'type':User, 'aliased':False, 'expr':User, + 'entity': User }, { 'name':'id', 'type':Integer(), 'aliased':False, 'expr':User.id, + 'entity': User }, { 'name':'user2', 'type':User, 'aliased':True, - 'expr':user_alias + 'expr':user_alias, + 'entity': user_alias } ] """ + return [ { 'name': ent._label_name, 'type': ent.type, - 'aliased': getattr(ent, 'is_aliased_class', False), - 'expr': ent.expr + 'aliased': getattr(insp_ent, 'is_aliased_class', False), + 'expr': ent.expr, + 'entity': + getattr(insp_ent, "entity", None) + if ent.entity_zero is not None + and not insp_ent.is_clause_element + else None } - for ent in self._entities + for ent, insp_ent in [ + ( + _ent, + (inspect(_ent.entity_zero) + if _ent.entity_zero is not None else None) + ) + for _ent in self._entities + ] ] def instances(self, cursor, __context=None): @@ -2522,6 +2862,7 @@ class Query(object): 'offset': self._offset, 'distinct': self._distinct, 'prefixes': self._prefixes, + 'suffixes': self._suffixes, 'group_by': self._group_by or None, 'having': self._having } @@ -2548,6 +2889,19 @@ class Query(object): SELECT 1 FROM users WHERE users.name = :name_1 ) AS anon_1 + The EXISTS construct is usually used in the WHERE clause:: + + session.query(User.id).filter(q.exists()).scalar() + + Note that some databases such as SQL Server don't allow an + EXISTS expression to be present in the columns clause of a + SELECT. To select a simple boolean value based on the exists + as a WHERE, use :func:`.literal`:: + + from sqlalchemy import literal + + session.query(literal(True)).filter(q.exists()).scalar() + .. versionadded:: 0.8.1 """ @@ -2558,7 +2912,7 @@ class Query(object): # .with_only_columns() after we have a core select() so that # we get just "SELECT 1" without any entities. return sql.exists(self.add_columns('1').with_labels(). - statement.with_only_columns(['1'])) + statement.with_only_columns([1])) def count(self): """Return a count of rows this Query would return. @@ -2605,6 +2959,18 @@ class Query(object): Deletes rows matched by this query from the database. + E.g.:: + + sess.query(User).filter(User.age == 25).\\ + delete(synchronize_session=False) + + sess.query(User).filter(User.age == 25).\\ + delete(synchronize_session='evaluate') + + .. warning:: The :meth:`.Query.delete` method is a "bulk" operation, + which bypasses ORM unit-of-work automation in favor of greater + performance. **Please read all caveats and warnings below.** + :param synchronize_session: chooses the strategy for the removal of matched objects from the session. Valid values are: @@ -2623,8 +2989,7 @@ class Query(object): ``'evaluate'`` - Evaluate the query's criteria in Python straight on the objects in the session. If evaluation of the criteria isn't - implemented, an error is raised. In that case you probably - want to use the 'fetch' strategy as a fallback. + implemented, an error is raised. The expression evaluator currently doesn't account for differing string collations between the database and Python. @@ -2632,29 +2997,42 @@ class Query(object): :return: the count of rows matched as returned by the database's "row count" feature. - This method has several key caveats: + .. warning:: **Additional Caveats for bulk query deletes** - * The method does **not** offer in-Python cascading of relationships - - it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured - for any foreign key references which require it, otherwise the - database may emit an integrity violation if foreign key references - are being enforced. + * The method does **not** offer in-Python cascading of + relationships - it is assumed that ON DELETE CASCADE/SET + NULL/etc. is configured for any foreign key references + which require it, otherwise the database may emit an + integrity violation if foreign key references are being + enforced. - After the DELETE, dependent objects in the :class:`.Session` which - were impacted by an ON DELETE may not contain the current - state, or may have been deleted. This issue is resolved once the - :class:`.Session` is expired, - which normally occurs upon :meth:`.Session.commit` or can be forced - by using :meth:`.Session.expire_all`. Accessing an expired object - whose row has been deleted will invoke a SELECT to locate the - row; when the row is not found, an - :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. + After the DELETE, dependent objects in the + :class:`.Session` which were impacted by an ON DELETE + may not contain the current state, or may have been + deleted. This issue is resolved once the + :class:`.Session` is expired, which normally occurs upon + :meth:`.Session.commit` or can be forced by using + :meth:`.Session.expire_all`. Accessing an expired + object whose row has been deleted will invoke a SELECT + to locate the row; when the row is not found, an + :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is + raised. - * The :meth:`.MapperEvents.before_delete` and - :meth:`.MapperEvents.after_delete` - events are **not** invoked from this method. Instead, the - :meth:`.SessionEvents.after_bulk_delete` method is provided to act - upon a mass DELETE of entity rows. + * The ``'fetch'`` strategy results in an additional + SELECT statement emitted and will significantly reduce + performance. + + * The ``'evaluate'`` strategy performs a scan of + all matching objects within the :class:`.Session`; if the + contents of the :class:`.Session` are expired, such as + via a proceeding :meth:`.Session.commit` call, **this will + result in SELECT queries emitted for every matching object**. + + * The :meth:`.MapperEvents.before_delete` and + :meth:`.MapperEvents.after_delete` + events **are not invoked** from this method. Instead, the + :meth:`.SessionEvents.after_bulk_delete` method is provided to + act upon a mass DELETE of entity rows. .. seealso:: @@ -2670,16 +3048,41 @@ class Query(object): delete_op.exec_() return delete_op.rowcount - def update(self, values, synchronize_session='evaluate'): + def update(self, values, synchronize_session='evaluate', update_args=None): """Perform a bulk update query. Updates rows matched by this query in the database. - :param values: a dictionary with attributes names as keys and literal - values or sql expressions as values. + E.g.:: + + sess.query(User).filter(User.age == 25).\\ + update({User.age: User.age - 10}, synchronize_session=False) + + sess.query(User).filter(User.age == 25).\\ + update({"age": User.age - 10}, synchronize_session='evaluate') + + + .. warning:: The :meth:`.Query.update` method is a "bulk" operation, + which bypasses ORM unit-of-work automation in favor of greater + performance. **Please read all caveats and warnings below.** + + + :param values: a dictionary with attributes names, or alternatively + mapped attributes or SQL expressions, as keys, and literal + values or sql expressions as values. If :ref:`parameter-ordered + mode ` is desired, the values can be + passed as a list of 2-tuples; + this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` + flag is passed to the :paramref:`.Query.update.update_args` dictionary + as well. + + .. versionchanged:: 1.0.0 - string names in the values dictionary + are now resolved against the mapped entity; previously, these + strings were passed as literal column names with no mapper-level + translation. :param synchronize_session: chooses the strategy to update the - attributes on objects in the session. Valid values are: + attributes on objects in the session. Valid values are: ``False`` - don't synchronize the session. This option is the most efficient and is reliable once the session is expired, which @@ -2699,44 +3102,65 @@ class Query(object): The expression evaluator currently doesn't account for differing string collations between the database and Python. + :param update_args: Optional dictionary, if present will be passed + to the underlying :func:`.update` construct as the ``**kw`` for + the object. May be used to pass dialect-specific arguments such + as ``mysql_limit``, as well as other special arguments such as + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`. + + .. versionadded:: 1.0.0 + :return: the count of rows matched as returned by the database's - "row count" feature. + "row count" feature. - This method has several key caveats: + .. warning:: **Additional Caveats for bulk query updates** - * The method does **not** offer in-Python cascading of relationships - - it is assumed that ON UPDATE CASCADE is configured for any foreign - key references which require it, otherwise the database may emit an - integrity violation if foreign key references are being enforced. + * The method does **not** offer in-Python cascading of + relationships - it is assumed that ON UPDATE CASCADE is + configured for any foreign key references which require + it, otherwise the database may emit an integrity + violation if foreign key references are being enforced. - After the UPDATE, dependent objects in the :class:`.Session` which - were impacted by an ON UPDATE CASCADE may not contain the current - state; this issue is resolved once the :class:`.Session` is expired, - which normally occurs upon :meth:`.Session.commit` or can be forced - by using :meth:`.Session.expire_all`. + After the UPDATE, dependent objects in the + :class:`.Session` which were impacted by an ON UPDATE + CASCADE may not contain the current state; this issue is + resolved once the :class:`.Session` is expired, which + normally occurs upon :meth:`.Session.commit` or can be + forced by using :meth:`.Session.expire_all`. - * As of 0.8, this method will support multiple table updates, as - detailed in :ref:`multi_table_updates`, and this behavior does - extend to support updates of joined-inheritance and other multiple - table mappings. However, the **join condition of an inheritance - mapper is currently not automatically rendered**. - Care must be taken in any multiple-table update to explicitly - include the joining condition between those tables, even in mappings - where this is normally automatic. - E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of - the ``Engineer`` local table using criteria against the ``Employee`` - local table might look like:: + * The ``'fetch'`` strategy results in an additional + SELECT statement emitted and will significantly reduce + performance. - session.query(Engineer).\\ - filter(Engineer.id == Employee.id).\\ - filter(Employee.name == 'dilbert').\\ - update({"engineer_type": "programmer"}) + * The ``'evaluate'`` strategy performs a scan of + all matching objects within the :class:`.Session`; if the + contents of the :class:`.Session` are expired, such as + via a proceeding :meth:`.Session.commit` call, **this will + result in SELECT queries emitted for every matching object**. - * The :meth:`.MapperEvents.before_update` and - :meth:`.MapperEvents.after_update` - events are **not** invoked from this method. Instead, the - :meth:`.SessionEvents.after_bulk_update` method is provided to act - upon a mass UPDATE of entity rows. + * The method supports multiple table updates, as detailed + in :ref:`multi_table_updates`, and this behavior does + extend to support updates of joined-inheritance and + other multiple table mappings. However, the **join + condition of an inheritance mapper is not + automatically rendered**. Care must be taken in any + multiple-table update to explicitly include the joining + condition between those tables, even in mappings where + this is normally automatic. E.g. if a class ``Engineer`` + subclasses ``Employee``, an UPDATE of the ``Engineer`` + local table using criteria against the ``Employee`` + local table might look like:: + + session.query(Engineer).\\ + filter(Engineer.id == Employee.id).\\ + filter(Employee.name == 'dilbert').\\ + update({"engineer_type": "programmer"}) + + * The :meth:`.MapperEvents.before_update` and + :meth:`.MapperEvents.after_update` + events **are not invoked from this method**. Instead, the + :meth:`.SessionEvents.after_bulk_update` method is provided to + act upon a mass UPDATE of entity rows. .. seealso:: @@ -2746,18 +3170,19 @@ class Query(object): """ - # TODO: value keys need to be mapped to corresponding sql cols and - # instr.attr.s to string keys - # TODO: updates of manytoone relationships need to be converted to - # fk assignments - # TODO: cascades need handling. - + update_args = update_args or {} update_op = persistence.BulkUpdate.factory( - self, synchronize_session, values) + self, synchronize_session, values, update_args) update_op.exec_() return update_op.rowcount def _compile_context(self, labels=True): + if self.dispatch.before_compile: + for fn in self.dispatch.before_compile: + new_query = fn(self) + if new_query is not None: + self = new_query + context = QueryContext(self) if context.statement is not None: @@ -2778,10 +3203,8 @@ class Query(object): # "load from explicit FROMs" mode, # i.e. when select_from() or join() is used context.froms = list(context.from_clause) - else: - # "load from discrete FROMs" mode, - # i.e. when each _MappedEntity has its own FROM - context.froms = context.froms + # else "load from discrete FROMs" mode, + # i.e. when each _MappedEntity has its own FROM if self._enable_single_crit: self._adjust_for_single_inheritance(context) @@ -2801,6 +3224,7 @@ class Query(object): context.statement = self._compound_eager_statement(context) else: context.statement = self._simple_statement(context) + return context def _compound_eager_statement(self, context): @@ -3003,7 +3427,6 @@ class _MapperEntity(_QueryEntity): else: self._label_name = self.mapper.class_.__name__ self.path = self.entity_zero._path_registry - self.custom_rows = bool(self.mapper.dispatch.append_result) def set_with_polymorphic(self, query, cls_or_mappers, selectable, polymorphic_on): @@ -3082,7 +3505,7 @@ class _MapperEntity(_QueryEntity): return ret - def row_processor(self, query, context, custom_rows): + def row_processor(self, query, context, result): adapter = self._get_entity_clauses(query, context) if context.adapter and adapter: @@ -3099,23 +3522,21 @@ class _MapperEntity(_QueryEntity): self.mapper._equivalent_columns) if query._primary_entity is self: - _instance = loading.instance_processor( - self.mapper, - context, - self.path, - adapter, - only_load_props=query._only_load_props, - refresh_state=context.refresh_state, - polymorphic_discriminator=self._polymorphic_discriminator - ) + only_load_props = query._only_load_props + refresh_state = context.refresh_state else: - _instance = loading.instance_processor( - self.mapper, - context, - self.path, - adapter, - polymorphic_discriminator=self._polymorphic_discriminator - ) + only_load_props = refresh_state = None + + _instance = loading._instance_processor( + self.mapper, + context, + result, + self.path, + adapter, + only_load_props=only_load_props, + refresh_state=refresh_state, + polymorphic_discriminator=self._polymorphic_discriminator + ) return _instance, self._label_name @@ -3136,41 +3557,19 @@ class _MapperEntity(_QueryEntity): ) ) - if self._with_polymorphic: - poly_properties = self.mapper._iterate_polymorphic_properties( - self._with_polymorphic) - else: - poly_properties = self.mapper._polymorphic_properties - - for value in poly_properties: - if query._only_load_props and \ - value.key not in query._only_load_props: - continue - value.setup( - context, - self, - self.path, - adapter, - only_load_props=query._only_load_props, - column_collection=context.primary_columns - ) - - if self._polymorphic_discriminator is not None and \ - self._polymorphic_discriminator \ - is not self.mapper.polymorphic_on: - - if adapter: - pd = adapter.columns[self._polymorphic_discriminator] - else: - pd = self._polymorphic_discriminator - context.primary_columns.append(pd) + loading._setup_entity_query( + context, self.mapper, self, + self.path, adapter, context.primary_columns, + with_polymorphic=self._with_polymorphic, + only_load_props=query._only_load_props, + polymorphic_discriminator=self._polymorphic_discriminator) def __str__(self): return str(self.mapper) @inspection._self_inspects -class Bundle(object): +class Bundle(InspectionAttr): """A grouping of SQL expressions that are returned by a :class:`.Query` under one namespace. @@ -3193,6 +3592,12 @@ class Bundle(object): """If True, queries for a single Bundle will be returned as a single entity, rather than an element within a keyed tuple.""" + is_clause_element = False + + is_mapper = False + + is_aliased_class = False + def __init__(self, name, *exprs, **kw): """Construct a new :class:`.Bundle`. @@ -3275,9 +3680,10 @@ class Bundle(object): :ref:`bundles` - includes an example of subclassing. """ - def proc(row, result): - return util.KeyedTuple( - [proc(row, None) for proc in procs], labels) + keyed_tuple = util.lightweight_named_tuple('result', labels) + + def proc(row): + return keyed_tuple([proc(row) for proc in procs]) return proc @@ -3302,8 +3708,6 @@ class _BundleEntity(_QueryEntity): self.supports_single_entity = self.bundle.single_entity - custom_rows = False - @property def entity_zero(self): for ent in self._entities: @@ -3344,9 +3748,9 @@ class _BundleEntity(_QueryEntity): for ent in self._entities: ent.setup_context(query, context) - def row_processor(self, query, context, custom_rows): + def row_processor(self, query, context, result): procs, labels = zip( - *[ent.row_processor(query, context, custom_rows) + *[ent.row_processor(query, context, result) for ent in self._entities] ) @@ -3361,36 +3765,47 @@ class _ColumnEntity(_QueryEntity): def __init__(self, query, column, namespace=None): self.expr = column self.namespace = namespace + search_entities = True + check_column = False if isinstance(column, util.string_types): column = sql.literal_column(column) self._label_name = column.name + search_entities = False + check_column = True + _entity = None elif isinstance(column, ( attributes.QueryableAttribute, interfaces.PropComparator )): + _entity = getattr(column, '_parententity', None) + if _entity is not None: + search_entities = False self._label_name = column.key column = column._query_clause_element() - else: - self._label_name = getattr(column, 'key', None) - - if not isinstance(column, expression.ColumnElement) and \ - hasattr(column, '_select_iterable'): - for c in column._select_iterable: - if c is column: - break - _ColumnEntity(query, c, namespace=column) - else: + check_column = True + if isinstance(column, Bundle): + _BundleEntity(query, column) return - elif isinstance(column, Bundle): - _BundleEntity(query, column) - return if not isinstance(column, sql.ColumnElement): + if hasattr(column, '_select_iterable'): + # break out an object like Table into + # individual columns + for c in column._select_iterable: + if c is column: + break + _ColumnEntity(query, c, namespace=column) + else: + return + raise sa_exc.InvalidRequestError( "SQL expression, column, or mapped entity " "expected - got '%r'" % (column, ) ) + elif not check_column: + self._label_name = getattr(column, 'key', None) + search_entities = True self.type = type_ = column.type if type_.hashable: @@ -3421,22 +3836,39 @@ class _ColumnEntity(_QueryEntity): # leaking out their entities into the main select construct self.actual_froms = actual_froms = set(column._from_objects) - self.entities = util.OrderedSet( - elem._annotations['parententity'] - for elem in visitors.iterate(column, {}) - if 'parententity' in elem._annotations - and actual_froms.intersection(elem._from_objects) - ) - - if self.entities: - self.entity_zero = list(self.entities)[0] - elif self.namespace is not None: - self.entity_zero = self.namespace + if not search_entities: + self.entity_zero = _entity + if _entity: + self.entities = [_entity] + else: + self.entities = [] + self._from_entities = set(self.entities) else: - self.entity_zero = None + all_elements = [ + elem for elem in visitors.iterate(column, {}) + if 'parententity' in elem._annotations + ] + + self.entities = util.unique_list([ + elem._annotations['parententity'] + for elem in all_elements + if 'parententity' in elem._annotations + ]) + + self._from_entities = set([ + elem._annotations['parententity'] + for elem in all_elements + if 'parententity' in elem._annotations + and actual_froms.intersection(elem._from_objects) + ]) + if self.entities: + self.entity_zero = self.entities[0] + elif self.namespace is not None: + self.entity_zero = self.namespace + else: + self.entity_zero = None supports_single_entity = False - custom_rows = False @property def entity_zero_or_selectable(self): @@ -3456,7 +3888,9 @@ class _ColumnEntity(_QueryEntity): def setup_entity(self, ext_info, aliased_adapter): if 'selectable' not in self.__dict__: self.selectable = ext_info.selectable - self.froms.add(ext_info.selectable) + + if self.actual_froms.intersection(ext_info.selectable._from_objects): + self.froms.add(ext_info.selectable) def corresponds_to(self, entity): # TODO: just returning False here, @@ -3470,36 +3904,39 @@ class _ColumnEntity(_QueryEntity): return not _is_aliased_class(self.entity_zero) and \ entity.common_parent(self.entity_zero) - def _resolve_expr_against_query_aliases(self, query, expr, context): - return query._adapt_clause(expr, False, True) - - def row_processor(self, query, context, custom_rows): - column = self._resolve_expr_against_query_aliases( - query, self.column, context) + def row_processor(self, query, context, result): + if ('fetch_column', self) in context.attributes: + column = context.attributes[('fetch_column', self)] + else: + column = query._adapt_clause(self.column, False, True) if context.adapter: column = context.adapter.columns[column] - def proc(row, result): - return row[column] - - return proc, self._label_name + getter = result._getter(column) + return getter, self._label_name def setup_context(self, query, context): - column = self._resolve_expr_against_query_aliases( - query, self.column, context) + column = query._adapt_clause(self.column, False, True) context.froms += tuple(self.froms) context.primary_columns.append(column) + context.attributes[('fetch_column', self)] = column + def __str__(self): return str(self.column) class QueryContext(object): - multi_row_eager_loaders = False - adapter = None - froms = () - for_update = None + __slots__ = ( + 'multi_row_eager_loaders', 'adapter', 'froms', 'for_update', + 'query', 'session', 'autoflush', 'populate_existing', + 'invoke_all_eagers', 'version_check', 'refresh_state', + 'primary_columns', 'secondary_columns', 'eager_order_by', + 'eager_joins', 'create_eager_joins', 'propagate_options', + 'attributes', 'statement', 'from_clause', 'whereclause', + 'order_by', 'labels', '_for_update_arg', 'runid', 'partials' + ) def __init__(self, query): @@ -3516,8 +3953,13 @@ class QueryContext(object): self.whereclause = query._criterion self.order_by = query._order_by + self.multi_row_eager_loaders = False + self.adapter = None + self.froms = () + self.for_update = None self.query = query self.session = query.session + self.autoflush = query._autoflush self.populate_existing = query._populate_existing self.invoke_all_eagers = query._invoke_all_eagers self.version_check = query._version_check diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/relationships.py b/lib/python3.5/site-packages/sqlalchemy/orm/relationships.py index c2debda..c58dd98 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/relationships.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/relationships.py @@ -1,5 +1,5 @@ # orm/relationships.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -16,13 +16,14 @@ and `secondaryjoin` aspects of :func:`.relationship`. from __future__ import absolute_import from .. import sql, util, exc as sa_exc, schema, log +import weakref from .util import CascadeOptions, _orm_annotate, _orm_deannotate from . import dependency from . import attributes from ..sql.util import ( ClauseAdapter, join_condition, _shallow_annotate, visit_binary_product, - _deep_deannotate, selectables_overlap + _deep_deannotate, selectables_overlap, adapt_criterion_to_null ) from ..sql import operators, expression, visitors from .interfaces import (MANYTOMANY, MANYTOONE, ONETOMANY, @@ -112,6 +113,7 @@ class RelationshipProperty(StrategizedProperty): active_history=False, cascade_backrefs=True, load_on_pending=False, + bake_queries=True, strategy_class=None, _local_remote_pairs=None, query_class=None, info=None): @@ -193,7 +195,7 @@ class RelationshipProperty(StrategizedProperty): The :paramref:`~.relationship.secondary` keyword argument is typically applied in the case where the intermediary :class:`.Table` - is not otherwise exprssed in any direct class mapping. If the + is not otherwise expressed in any direct class mapping. If the "secondary" table is also explicitly mapped elsewhere (e.g. as in :ref:`association_pattern`), one should consider applying the :paramref:`~.relationship.viewonly` flag so that this @@ -273,6 +275,31 @@ class RelationshipProperty(StrategizedProperty): :paramref:`~.relationship.backref` - alternative form of backref specification. + :param bake_queries=True: + Use the :class:`.BakedQuery` cache to cache the construction of SQL + used in lazy loads, when the :func:`.bake_lazy_loaders` function has + first been called. Defaults to True and is intended to provide an + "opt out" flag per-relationship when the baked query cache system is + in use. + + .. warning:: + + This flag **only** has an effect when the application-wide + :func:`.bake_lazy_loaders` function has been called. It + defaults to True so is an "opt out" flag. + + Setting this flag to False when baked queries are otherwise in + use might be to reduce + ORM memory use for this :func:`.relationship`, or to work around + unresolved stability issues observed within the baked query + cache system. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :ref:`baked_toplevel` + :param cascade: a comma-separated list of cascade rules which determines how Session operations should be "cascaded" from parent to child. @@ -459,22 +486,18 @@ class RelationshipProperty(StrategizedProperty): nullable, or when the reference is one-to-one or a collection that is guaranteed to have one or at least one entry. - If the joined-eager load is chained onto an existing LEFT OUTER - JOIN, ``innerjoin=True`` will be bypassed and the join will continue - to chain as LEFT OUTER JOIN so that the results don't change. As an - alternative, specify the value ``"nested"``. This will instead nest - the join on the right side, e.g. using the form "a LEFT OUTER JOIN - (b JOIN c)". - - .. versionadded:: 0.9.4 Added ``innerjoin="nested"`` option to - support nesting of eager "inner" joins. + The option supports the same "nested" and "unnested" options as + that of :paramref:`.joinedload.innerjoin`. See that flag + for details on nested / unnested behaviors. .. seealso:: + :paramref:`.joinedload.innerjoin` - the option as specified by + loader option, including detail on nesting behavior. + :ref:`what_kind_of_loading` - Discussion of some details of various loader options. - :paramref:`.joinedload.innerjoin` - loader option version :param join_depth: when non-``None``, an integer value indicating how many levels @@ -531,7 +554,7 @@ class RelationshipProperty(StrategizedProperty): .. seealso:: - :doc:`/orm/loading` - Full documentation on relationship loader + :doc:`/orm/loading_relationships` - Full documentation on relationship loader configuration. :ref:`dynamic_relationship` - detail on the ``dynamic`` option. @@ -597,30 +620,26 @@ class RelationshipProperty(StrategizedProperty): and examples. :param passive_updates=True: - Indicates loading and INSERT/UPDATE/DELETE behavior when the - source of a foreign key value changes (i.e. an "on update" - cascade), which are typically the primary key columns of the - source row. + Indicates the persistence behavior to take when a referenced + primary key value changes in place, indicating that the referencing + foreign key columns will also need their value changed. - When True, it is assumed that ON UPDATE CASCADE is configured on + When True, it is assumed that ``ON UPDATE CASCADE`` is configured on the foreign key in the database, and that the database will handle propagation of an UPDATE from a source column to - dependent rows. Note that with databases which enforce - referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables), - ON UPDATE CASCADE is required for this operation. The - relationship() will update the value of the attribute on related - items which are locally present in the session during a flush. + dependent rows. When False, the SQLAlchemy :func:`.relationship` + construct will attempt to emit its own UPDATE statements to + modify related targets. However note that SQLAlchemy **cannot** + emit an UPDATE for more than one level of cascade. Also, + setting this flag to False is not compatible in the case where + the database is in fact enforcing referential integrity, unless + those constraints are explicitly "deferred", if the target backend + supports it. - When False, it is assumed that the database does not enforce - referential integrity and will not be issuing its own CASCADE - operation for an update. The relationship() will issue the - appropriate UPDATE statements to the database in response to the - change of a referenced key, and items locally present in the - session during a flush will also be refreshed. - - This flag should probably be set to False if primary key changes - are expected and the database in use doesn't support CASCADE - (i.e. SQLite, MySQL MyISAM tables). + It is highly advised that an application which is employing + mutable primary keys keeps ``passive_updates`` set to True, + and instead uses the referential integrity features of the database + itself in order to handle the change efficiently and fully. .. seealso:: @@ -778,6 +797,7 @@ class RelationshipProperty(StrategizedProperty): """ + super(RelationshipProperty, self).__init__() self.uselist = uselist self.argument = argument @@ -804,6 +824,7 @@ class RelationshipProperty(StrategizedProperty): self.join_depth = join_depth self.local_remote_pairs = _local_remote_pairs self.extension = extension + self.bake_queries = bake_queries self.load_on_pending = load_on_pending self.comparator_factory = comparator_factory or \ RelationshipProperty.Comparator @@ -875,13 +896,13 @@ class RelationshipProperty(StrategizedProperty): """ self.prop = prop - self._parentmapper = parentmapper + self._parententity = parentmapper self._adapt_to_entity = adapt_to_entity if of_type: self._of_type = of_type def adapt_to_entity(self, adapt_to_entity): - return self.__class__(self.property, self._parentmapper, + return self.__class__(self.property, self._parententity, adapt_to_entity=adapt_to_entity, of_type=self._of_type) @@ -933,7 +954,7 @@ class RelationshipProperty(StrategizedProperty): """ return RelationshipProperty.Comparator( self.property, - self._parentmapper, + self._parententity, adapt_to_entity=self._adapt_to_entity, of_type=cls) @@ -1224,11 +1245,15 @@ class RelationshipProperty(StrategizedProperty): state = attributes.instance_state(other) def state_bindparam(x, state, col): - o = state.obj() # strong ref + dict_ = state.dict return sql.bindparam( - x, unique=True, callable_=lambda: - self.property.mapper. - _get_committed_attr_by_column(o, col)) + x, unique=True, + callable_=self.property._get_attr_w_warn_on_none( + col, + self.property.mapper._get_state_attr_by_column, + state, dict_, col, passive=attributes.PASSIVE_OFF + ) + ) def adapt(col): if self.adapter: @@ -1243,13 +1268,14 @@ class RelationshipProperty(StrategizedProperty): adapt(x) == None) for (x, y) in self.property.local_remote_pairs]) - criterion = sql.and_(*[x == y for (x, y) in - zip( - self.property.mapper.primary_key, - self.property. - mapper. - primary_key_from_instance(other)) + criterion = sql.and_(*[ + x == y for (x, y) in + zip( + self.property.mapper.primary_key, + self.property.mapper.primary_key_from_instance(other) + ) ]) + return ~self._criterion_exists(criterion) def __ne__(self, other): @@ -1293,8 +1319,9 @@ class RelationshipProperty(StrategizedProperty): """ if isinstance(other, (util.NoneType, expression.Null)): if self.property.direction == MANYTOONE: - return sql.or_(*[x != None for x in - self.property._calculated_foreign_keys]) + return _orm_annotate(~self.property._optimized_compare( + None, adapt_source=self.adapter)) + else: return self._criterion_exists() elif self.property.uselist: @@ -1303,7 +1330,7 @@ class RelationshipProperty(StrategizedProperty): " to an object or collection; use " "contains() to test for membership.") else: - return self.__negated_contains_or_equals(other) + return _orm_annotate(self.__negated_contains_or_equals(other)) @util.memoized_property def property(self): @@ -1311,36 +1338,88 @@ class RelationshipProperty(StrategizedProperty): mapperlib.Mapper._configure_all() return self.prop - def compare(self, op, value, - value_is_parent=False, - alias_secondary=True): - if op == operators.eq: - if value is None: - if self.uselist: - return ~sql.exists([1], self.primaryjoin) - else: - return self._optimized_compare( - None, - value_is_parent=value_is_parent, - alias_secondary=alias_secondary) - else: - return self._optimized_compare( - value, - value_is_parent=value_is_parent, - alias_secondary=alias_secondary) - else: - return op(self.comparator, value) + def _with_parent(self, instance, alias_secondary=True): + assert instance is not None + return self._optimized_compare( + instance, value_is_parent=True, alias_secondary=alias_secondary) - def _optimized_compare(self, value, value_is_parent=False, + def _optimized_compare(self, state, value_is_parent=False, adapt_source=None, alias_secondary=True): - if value is not None: - value = attributes.instance_state(value) - return self._lazy_strategy.lazy_clause( - value, - reverse_direction=not value_is_parent, - alias_secondary=alias_secondary, - adapt_source=adapt_source) + if state is not None: + state = attributes.instance_state(state) + + reverse_direction = not value_is_parent + + if state is None: + return self._lazy_none_clause( + reverse_direction, + adapt_source=adapt_source) + + if not reverse_direction: + criterion, bind_to_col = \ + self._lazy_strategy._lazywhere, \ + self._lazy_strategy._bind_to_col + else: + criterion, bind_to_col = \ + self._lazy_strategy._rev_lazywhere, \ + self._lazy_strategy._rev_bind_to_col + + if reverse_direction: + mapper = self.mapper + else: + mapper = self.parent + + dict_ = attributes.instance_dict(state.obj()) + + def visit_bindparam(bindparam): + if bindparam._identifying_key in bind_to_col: + bindparam.callable = self._get_attr_w_warn_on_none( + bind_to_col[bindparam._identifying_key], + mapper._get_state_attr_by_column, + state, dict_, + bind_to_col[bindparam._identifying_key], + passive=attributes.PASSIVE_OFF) + + if self.secondary is not None and alias_secondary: + criterion = ClauseAdapter( + self.secondary.alias()).\ + traverse(criterion) + + criterion = visitors.cloned_traverse( + criterion, {}, {'bindparam': visit_bindparam}) + + if adapt_source: + criterion = adapt_source(criterion) + return criterion + + def _get_attr_w_warn_on_none(self, column, fn, *arg, **kw): + def _go(): + value = fn(*arg, **kw) + if value is None: + util.warn( + "Got None for value of column %s; this is unsupported " + "for a relationship comparison and will not " + "currently produce an IS comparison " + "(but may in a future release)" % column) + return value + return _go + + def _lazy_none_clause(self, reverse_direction=False, adapt_source=None): + if not reverse_direction: + criterion, bind_to_col = \ + self._lazy_strategy._lazywhere, \ + self._lazy_strategy._bind_to_col + else: + criterion, bind_to_col = \ + self._lazy_strategy._rev_lazywhere, \ + self._lazy_strategy._rev_bind_to_col + + criterion = adapt_criterion_to_null(criterion, bind_to_col) + + if adapt_source: + criterion = adapt_source(criterion) + return criterion def __str__(self): return str(self.parent.class_.__name__) + "." + self.key @@ -1551,6 +1630,7 @@ class RelationshipProperty(StrategizedProperty): self._check_cascade_settings(self._cascade) self._post_init() self._generate_backref() + self._join_condition._warn_for_conflicting_sync_targets() super(RelationshipProperty, self).do_init() self._lazy_strategy = self._get_strategy((("lazy", "select"),)) @@ -1637,7 +1717,7 @@ class RelationshipProperty(StrategizedProperty): """Test that this relationship is legal, warn about inheritance conflicts.""" - if not self.is_primary() and not mapperlib.class_mapper( + if self.parent.non_primary and not mapperlib.class_mapper( self.parent.class_, configure=False).has_property(self.key): raise sa_exc.ArgumentError( @@ -1723,7 +1803,7 @@ class RelationshipProperty(StrategizedProperty): """Interpret the 'backref' instruction to create a :func:`.relationship` complementary to this one.""" - if not self.is_primary(): + if self.parent.non_primary: return if self.backref is not None and not self.back_populates: if isinstance(self.backref, util.string_types): @@ -2200,7 +2280,7 @@ class JoinCondition(object): elif self._local_remote_pairs or self._remote_side: self._annotate_remote_from_args() elif self._refers_to_parent_table(): - self._annotate_selfref(lambda col: "foreign" in col._annotations) + self._annotate_selfref(lambda col: "foreign" in col._annotations, False) elif self._tables_overlap(): self._annotate_remote_with_overlap() else: @@ -2219,7 +2299,7 @@ class JoinCondition(object): self.secondaryjoin = visitors.replacement_traverse( self.secondaryjoin, {}, repl) - def _annotate_selfref(self, fn): + def _annotate_selfref(self, fn, remote_side_given): """annotate 'remote' in primaryjoin, secondaryjoin when the relationship is detected as self-referential. @@ -2234,7 +2314,7 @@ class JoinCondition(object): if fn(binary.right) and not equated: binary.right = binary.right._annotate( {"remote": True}) - else: + elif not remote_side_given: self._warn_non_column_elements() self.primaryjoin = visitors.cloned_traverse( @@ -2259,7 +2339,7 @@ class JoinCondition(object): remote_side = self._remote_side if self._refers_to_parent_table(): - self._annotate_selfref(lambda col: col in remote_side) + self._annotate_selfref(lambda col: col in remote_side, True) else: def repl(element): if element in remote_side: @@ -2280,12 +2360,21 @@ class JoinCondition(object): binary.right, binary.left = proc_left_right(binary.right, binary.left) + check_entities = self.prop is not None and \ + self.prop.mapper is not self.prop.parent + def proc_left_right(left, right): if isinstance(left, expression.ColumnClause) and \ isinstance(right, expression.ColumnClause): if self.child_selectable.c.contains_column(right) and \ self.parent_selectable.c.contains_column(left): right = right._annotate({"remote": True}) + elif check_entities and \ + right._annotations.get('parentmapper') is self.prop.mapper: + right = right._annotate({"remote": True}) + elif check_entities and \ + left._annotations.get('parentmapper') is self.prop.mapper: + left = left._annotate({"remote": True}) else: self._warn_non_column_elements() @@ -2538,6 +2627,60 @@ class JoinCondition(object): self.secondary_synchronize_pairs = \ self._deannotate_pairs(secondary_sync_pairs) + _track_overlapping_sync_targets = weakref.WeakKeyDictionary() + + def _warn_for_conflicting_sync_targets(self): + if not self.support_sync: + return + + # we would like to detect if we are synchronizing any column + # pairs in conflict with another relationship that wishes to sync + # an entirely different column to the same target. This is a + # very rare edge case so we will try to minimize the memory/overhead + # impact of this check + for from_, to_ in [ + (from_, to_) for (from_, to_) in self.synchronize_pairs + ] + [ + (from_, to_) for (from_, to_) in self.secondary_synchronize_pairs + ]: + # save ourselves a ton of memory and overhead by only + # considering columns that are subject to a overlapping + # FK constraints at the core level. This condition can arise + # if multiple relationships overlap foreign() directly, but + # we're going to assume it's typically a ForeignKeyConstraint- + # level configuration that benefits from this warning. + if len(to_.foreign_keys) < 2: + continue + + if to_ not in self._track_overlapping_sync_targets: + self._track_overlapping_sync_targets[to_] = \ + weakref.WeakKeyDictionary({self.prop: from_}) + else: + other_props = [] + prop_to_from = self._track_overlapping_sync_targets[to_] + for pr, fr_ in prop_to_from.items(): + if pr.mapper in mapperlib._mapper_registry and \ + fr_ is not from_ and \ + pr not in self.prop._reverse_property: + other_props.append((pr, fr_)) + + if other_props: + util.warn( + "relationship '%s' will copy column %s to column %s, " + "which conflicts with relationship(s): %s. " + "Consider applying " + "viewonly=True to read-only relationships, or provide " + "a primaryjoin condition marking writable columns " + "with the foreign() annotation." % ( + self.prop, + from_, to_, + ", ".join( + "'%s' (copies %s to %s)" % (pr, fr_, to_) + for (pr, fr_) in other_props) + ) + ) + self._track_overlapping_sync_targets[to_][self.prop] = from_ + @util.memoized_property def remote_columns(self): return self._gather_join_annotations("remote") @@ -2654,27 +2797,31 @@ class JoinCondition(object): def create_lazy_clause(self, reverse_direction=False): binds = util.column_dict() - lookup = collections.defaultdict(list) equated_columns = util.column_dict() - if reverse_direction and self.secondaryjoin is None: - for l, r in self.local_remote_pairs: - lookup[r].append((r, l)) - equated_columns[l] = r - else: - # replace all "local side" columns, which is - # anything that isn't marked "remote" + has_secondary = self.secondaryjoin is not None + + if has_secondary: + lookup = collections.defaultdict(list) for l, r in self.local_remote_pairs: lookup[l].append((l, r)) equated_columns[r] = l + elif not reverse_direction: + for l, r in self.local_remote_pairs: + equated_columns[r] = l + else: + for l, r in self.local_remote_pairs: + equated_columns[l] = r def col_to_bind(col): - if (reverse_direction and col in lookup) or \ - (not reverse_direction and "local" in col._annotations): - if col in lookup: - for tobind, equated in lookup[col]: - if equated in binds: - return None + + if ( + (not reverse_direction and 'local' in col._annotations) or + reverse_direction and ( + (has_secondary and col in lookup) or + (not has_secondary and 'remote' in col._annotations) + ) + ): if col not in binds: binds[col] = sql.bindparam( None, None, type_=col.type, unique=True) diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/scoping.py b/lib/python3.5/site-packages/sqlalchemy/orm/scoping.py index 71648d1..6306514 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/scoping.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/scoping.py @@ -1,5 +1,5 @@ # orm/scoping.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -21,6 +21,12 @@ class scoped_session(object): """ + session_factory = None + """The `session_factory` provided to `__init__` is stored in this + attribute and may be accessed at a later time. This can be useful when + a new non-scoped :class:`.Session` or :class:`.Connection` to the + database is needed.""" + def __init__(self, session_factory, scopefunc=None): """Construct a new :class:`.scoped_session`. @@ -38,6 +44,7 @@ class scoped_session(object): """ self.session_factory = session_factory + if scopefunc: self.registry = ScopedRegistry(session_factory, scopefunc) else: @@ -45,12 +52,12 @@ class scoped_session(object): def __call__(self, **kw): """Return the current :class:`.Session`, creating it - using the session factory if not present. + using the :attr:`.scoped_session.session_factory` if not present. :param \**kw: Keyword arguments will be passed to the - session factory callable, if an existing :class:`.Session` - is not present. If the :class:`.Session` is present and - keyword arguments have been passed, + :attr:`.scoped_session.session_factory` callable, if an existing + :class:`.Session` is not present. If the :class:`.Session` is present + and keyword arguments have been passed, :exc:`~sqlalchemy.exc.InvalidRequestError` is raised. """ diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/session.py b/lib/python3.5/site-packages/sqlalchemy/orm/session.py index dd82fa0..000441f 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/session.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/session.py @@ -1,5 +1,5 @@ # orm/session.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -20,6 +20,8 @@ from .base import ( _class_to_mapper, _state_mapper, object_state, _none_set, state_str, instance_str ) +import itertools +from . import persistence from .unitofwork import UOWTransaction from . import state as statelib import sys @@ -45,7 +47,6 @@ def _state_session(state): class _SessionClassMethods(object): - """Class-level methods for :class:`.Session`, :class:`.sessionmaker`.""" @classmethod @@ -84,7 +85,6 @@ CLOSED = util.symbol('CLOSED') class SessionTransaction(object): - """A :class:`.Session`-level transaction. :class:`.SessionTransaction` is a mostly behind-the-scenes object @@ -226,10 +226,10 @@ class SessionTransaction(object): def _is_transaction_boundary(self): return self.nested or not self._parent - def connection(self, bindkey, **kwargs): + def connection(self, bindkey, execution_options=None, **kwargs): self._assert_active() bind = self.session.get_bind(bindkey, **kwargs) - return self._connection_for_bind(bind) + return self._connection_for_bind(bind, execution_options) def _begin(self, nested=False): self._assert_active() @@ -237,14 +237,21 @@ class SessionTransaction(object): self.session, self, nested=nested) def _iterate_parents(self, upto=None): - if self._parent is upto: - return (self,) - else: - if self._parent is None: + + current = self + result = () + while current: + result += (current, ) + if current._parent is upto: + break + elif current._parent is None: raise sa_exc.InvalidRequestError( "Transaction %s is not on the active transaction list" % ( upto)) - return (self,) + self._parent._iterate_parents(upto) + else: + current = current._parent + + return result def _take_snapshot(self): if not self._is_transaction_boundary: @@ -271,7 +278,7 @@ class SessionTransaction(object): del s.key for s, (oldkey, newkey) in self._key_switches.items(): - self.session.identity_map.discard(s) + self.session.identity_map.safe_discard(s) s.key = oldkey self.session.identity_map.replace(s) @@ -293,22 +300,27 @@ class SessionTransaction(object): if not self.nested and self.session.expire_on_commit: for s in self.session.identity_map.all_states(): s._expire(s.dict, self.session.identity_map._modified) - for s in self._deleted: - s.session_id = None + for s in list(self._deleted): + s._detach() self._deleted.clear() elif self.nested: self._parent._new.update(self._new) + self._parent._dirty.update(self._dirty) self._parent._deleted.update(self._deleted) self._parent._key_switches.update(self._key_switches) - def _connection_for_bind(self, bind): + def _connection_for_bind(self, bind, execution_options): self._assert_active() if bind in self._connections: + if execution_options: + util.warn( + "Connection is already established for the " + "given bind; execution_options ignored") return self._connections[bind][0] if self._parent: - conn = self._parent._connection_for_bind(bind) + conn = self._parent._connection_for_bind(bind, execution_options) if not self.nested: return conn else: @@ -321,6 +333,9 @@ class SessionTransaction(object): else: conn = bind.contextual_connect() + if execution_options: + conn = conn.execution_options(**execution_options) + if self.session.twophase and self._parent is None: transaction = conn.begin_twophase() elif self.nested: @@ -397,26 +412,29 @@ class SessionTransaction(object): for subtransaction in stx._iterate_parents(upto=self): subtransaction.close() + boundary = self if self._state in (ACTIVE, PREPARED): for transaction in self._iterate_parents(): if transaction._parent is None or transaction.nested: transaction._rollback_impl() transaction._state = DEACTIVE + boundary = transaction break else: transaction._state = DEACTIVE sess = self.session - if self.session._enable_transaction_accounting and \ + if sess._enable_transaction_accounting and \ not sess._is_clean(): + # if items were added, deleted, or mutated # here, we need to re-restore the snapshot util.warn( "Session's state has been changed on " "a non-active transaction - this state " "will be discarded.") - self._restore_snapshot(dirty_only=self.nested) + boundary._restore_snapshot(dirty_only=boundary.nested) self.close() if self._parent and _capture_exception: @@ -435,11 +453,13 @@ class SessionTransaction(object): self.session.dispatch.after_rollback(self.session) - def close(self): + def close(self, invalidate=False): self.session.transaction = self._parent if self._parent is None: for connection, transaction, autoclose in \ set(self._connections.values()): + if invalidate: + connection.invalidate() if autoclose: connection.close() else: @@ -473,7 +493,6 @@ class SessionTransaction(object): class Session(_SessionClassMethods): - """Manages persistence operations for ORM-mapped objects. The Session's usage paradigm is described at :doc:`/orm/session`. @@ -485,7 +504,8 @@ class Session(_SessionClassMethods): '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested', 'close', 'commit', 'connection', 'delete', 'execute', 'expire', 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', - 'is_modified', + 'is_modified', 'bulk_save_objects', 'bulk_insert_mappings', + 'bulk_update_mappings', 'merge', 'query', 'refresh', 'rollback', 'scalar') @@ -529,8 +549,8 @@ class Session(_SessionClassMethods): :meth:`~.Session.flush` call to this ``Session`` before proceeding. This is a convenience feature so that :meth:`~.Session.flush` need not be called repeatedly in order for database queries to retrieve - results. It's typical that ``autoflush`` is used in conjunction with - ``autocommit=False``. In this scenario, explicit calls to + results. It's typical that ``autoflush`` is used in conjunction + with ``autocommit=False``. In this scenario, explicit calls to :meth:`~.Session.flush` are rarely needed; you usually only need to call :meth:`~.Session.commit` (which flushes) to finalize changes. @@ -546,8 +566,8 @@ class Session(_SessionClassMethods): :class:`.Engine` or :class:`.Connection` objects. Operations which proceed relative to a particular :class:`.Mapper` will consult this dictionary for the direct :class:`.Mapper` instance as - well as the mapper's ``mapped_table`` attribute in order to locate a - connectable to use. The full resolution is described in the + well as the mapper's ``mapped_table`` attribute in order to locate + a connectable to use. The full resolution is described in the :meth:`.Session.get_bind`. Usage looks like:: @@ -594,8 +614,8 @@ class Session(_SessionClassMethods): .. versionadded:: 0.9.0 :param query_cls: Class which should be used to create new Query - objects, as returned by the :meth:`~.Session.query` method. Defaults - to :class:`.Query`. + objects, as returned by the :meth:`~.Session.query` method. + Defaults to :class:`.Query`. :param twophase: When ``True``, all transactions will be started as a "two phase" transaction, i.e. using the "two phase" semantics @@ -610,15 +630,26 @@ class Session(_SessionClassMethods): ``False``, objects placed in the :class:`.Session` will be strongly referenced until explicitly removed or the :class:`.Session` is closed. **Deprecated** - this option - is obsolete. + is present to allow compatibility with older applications, but + it is recommended that strong references to objects + be maintained by the calling application + externally to the :class:`.Session` itself, + to the extent that is required by the application. """ if weak_identity_map: self._identity_cls = identity.WeakInstanceDict else: - util.warn_deprecated("weak_identity_map=False is deprecated. " - "This feature is not needed.") + util.warn_deprecated( + "weak_identity_map=False is deprecated. " + "It is present to allow compatibility with older " + "applications, but " + "it is recommended that strong references to " + "objects be maintained by the calling application " + "externally to the :class:`.Session` itself, " + "to the extent that is required by the application.") + self._identity_cls = identity.StrongInstanceDict self.identity_map = self._identity_cls() @@ -644,14 +675,8 @@ class Session(_SessionClassMethods): SessionExtension._adapt_listener(self, ext) if binds is not None: - for mapperortable, bind in binds.items(): - insp = inspect(mapperortable) - if insp.is_selectable: - self.bind_table(mapperortable, bind) - elif insp.is_mapper: - self.bind_mapper(mapperortable, bind) - else: - assert False + for key, bind in binds.items(): + self._add_bind(key, bind) if not self.autocommit: self.begin() @@ -666,7 +691,7 @@ class Session(_SessionClassMethods): def info(self): """A user-modifiable dictionary. - The initial value of this dictioanry can be populated using the + The initial value of this dictionary can be populated using the ``info`` argument to the :class:`.Session` constructor or :class:`.sessionmaker` constructor or factory methods. The dictionary here is always local to this :class:`.Session` and can be modified @@ -797,6 +822,7 @@ class Session(_SessionClassMethods): def connection(self, mapper=None, clause=None, bind=None, close_with_result=False, + execution_options=None, **kw): """Return a :class:`.Connection` object corresponding to this :class:`.Session` object's transactional state. @@ -841,6 +867,18 @@ class Session(_SessionClassMethods): configured with ``autocommit=True`` and does not already have a transaction in progress. + :param execution_options: a dictionary of execution options that will + be passed to :meth:`.Connection.execution_options`, **when the + connection is first procured only**. If the connection is already + present within the :class:`.Session`, a warning is emitted and + the arguments are ignored. + + .. versionadded:: 0.9.9 + + .. seealso:: + + :ref:`session_transaction_isolation` + :param \**kw: Additional keyword arguments are sent to :meth:`get_bind()`, allowing additional arguments to be passed to custom @@ -851,13 +889,18 @@ class Session(_SessionClassMethods): bind = self.get_bind(mapper, clause=clause, **kw) return self._connection_for_bind(bind, - close_with_result=close_with_result) + close_with_result=close_with_result, + execution_options=execution_options) - def _connection_for_bind(self, engine, **kwargs): + def _connection_for_bind(self, engine, execution_options=None, **kw): if self.transaction is not None: - return self.transaction._connection_for_bind(engine) + return self.transaction._connection_for_bind( + engine, execution_options) else: - return engine.contextual_connect(**kwargs) + conn = engine.contextual_connect(**kw) + if execution_options: + conn = conn.execution_options(**execution_options) + return conn def execute(self, clause, params=None, mapper=None, bind=None, **kw): """Execute a SQL expression construct or string statement within @@ -1006,10 +1049,46 @@ class Session(_SessionClassMethods): not use any connection resources until they are first needed. """ + self._close_impl(invalidate=False) + + def invalidate(self): + """Close this Session, using connection invalidation. + + This is a variant of :meth:`.Session.close` that will additionally + ensure that the :meth:`.Connection.invalidate` method will be called + on all :class:`.Connection` objects. This can be called when + the database is known to be in a state where the connections are + no longer safe to be used. + + E.g.:: + + try: + sess = Session() + sess.add(User()) + sess.commit() + except gevent.Timeout: + sess.invalidate() + raise + except: + sess.rollback() + raise + + This clears all items and ends any transaction in progress. + + If this session were created with ``autocommit=False``, a new + transaction is immediately begun. Note that this new transaction does + not use any connection resources until they are first needed. + + .. versionadded:: 0.9.9 + + """ + self._close_impl(invalidate=True) + + def _close_impl(self, invalidate): self.expunge_all() if self.transaction is not None: for transaction in self.transaction._iterate_parents(): - transaction.close() + transaction.close(invalidate) def expunge_all(self): """Remove all object instances from this ``Session``. @@ -1029,40 +1108,47 @@ class Session(_SessionClassMethods): # TODO: + crystallize + document resolution order # vis. bind_mapper/bind_table + def _add_bind(self, key, bind): + try: + insp = inspect(key) + except sa_exc.NoInspectionAvailable: + if not isinstance(key, type): + raise exc.ArgumentError( + "Not acceptable bind target: %s" % + key) + else: + self.__binds[key] = bind + else: + if insp.is_selectable: + self.__binds[insp] = bind + elif insp.is_mapper: + self.__binds[insp.class_] = bind + for selectable in insp._all_tables: + self.__binds[selectable] = bind + else: + raise exc.ArgumentError( + "Not acceptable bind target: %s" % + key) + def bind_mapper(self, mapper, bind): - """Bind operations for a mapper to a Connectable. + """Associate a :class:`.Mapper` with a "bind", e.g. a :class:`.Engine` + or :class:`.Connection`. - mapper - A mapper instance or mapped class - - bind - Any Connectable: a :class:`.Engine` or :class:`.Connection`. - - All subsequent operations involving this mapper will use the given - `bind`. + The given mapper is added to a lookup used by the + :meth:`.Session.get_bind` method. """ - if isinstance(mapper, type): - mapper = class_mapper(mapper) - - self.__binds[mapper.base_mapper] = bind - for t in mapper._all_tables: - self.__binds[t] = bind + self._add_bind(mapper, bind) def bind_table(self, table, bind): - """Bind operations on a Table to a Connectable. + """Associate a :class:`.Table` with a "bind", e.g. a :class:`.Engine` + or :class:`.Connection`. - table - A :class:`.Table` instance - - bind - Any Connectable: a :class:`.Engine` or :class:`.Connection`. - - All subsequent operations involving this :class:`.Table` will use the - given `bind`. + The given mapper is added to a lookup used by the + :meth:`.Session.get_bind` method. """ - self.__binds[table] = bind + self._add_bind(table, bind) def get_bind(self, mapper=None, clause=None): """Return a "bind" to which this :class:`.Session` is bound. @@ -1116,6 +1202,7 @@ class Session(_SessionClassMethods): bound :class:`.MetaData`. """ + if mapper is clause is None: if self.bind: return self.bind @@ -1125,15 +1212,23 @@ class Session(_SessionClassMethods): "Connection, and no context was provided to locate " "a binding.") - c_mapper = mapper is not None and _class_to_mapper(mapper) or None + if mapper is not None: + try: + mapper = inspect(mapper) + except sa_exc.NoInspectionAvailable: + if isinstance(mapper, type): + raise exc.UnmappedClassError(mapper) + else: + raise - # manually bound? if self.__binds: - if c_mapper: - if c_mapper.base_mapper in self.__binds: - return self.__binds[c_mapper.base_mapper] - elif c_mapper.mapped_table in self.__binds: - return self.__binds[c_mapper.mapped_table] + if mapper: + for cls in mapper.class_.__mro__: + if cls in self.__binds: + return self.__binds[cls] + if clause is None: + clause = mapper.mapped_table + if clause is not None: for t in sql_util.find_tables(clause, include_crud=True): if t in self.__binds: @@ -1145,12 +1240,12 @@ class Session(_SessionClassMethods): if isinstance(clause, sql.expression.ClauseElement) and clause.bind: return clause.bind - if c_mapper and c_mapper.mapped_table.bind: - return c_mapper.mapped_table.bind + if mapper and mapper.mapped_table.bind: + return mapper.mapped_table.bind context = [] if mapper is not None: - context.append('mapper %s' % c_mapper) + context.append('mapper %s' % mapper) if clause is not None: context.append('SQL expression') @@ -1397,11 +1492,12 @@ class Session(_SessionClassMethods): self._new.pop(state) state._detach() elif self.identity_map.contains_state(state): - self.identity_map.discard(state) + self.identity_map.safe_discard(state) self._deleted.pop(state, None) state._detach() elif self.transaction: self.transaction._deleted.pop(state, None) + state._detach() def _register_newly_persistent(self, states): for state in states: @@ -1413,7 +1509,7 @@ class Session(_SessionClassMethods): instance_key = mapper._identity_key_from_state(state) - if _none_set.issubset(instance_key[1]) and \ + if _none_set.intersection(instance_key[1]) and \ not mapper.allow_partial_pks or \ _none_set.issuperset(instance_key[1]): raise exc.FlushError( @@ -1430,10 +1526,10 @@ class Session(_SessionClassMethods): if state.key is None: state.key = instance_key elif state.key != instance_key: - # primary key switch. use discard() in case another + # primary key switch. use safe_discard() in case another # state has already replaced this one in the identity # map (see test/orm/test_naturalpks.py ReversePKsTest) - self.identity_map.discard(state) + self.identity_map.safe_discard(state) if state in self.transaction._key_switches: orig_key = self.transaction._key_switches[state][0] else: @@ -1467,7 +1563,7 @@ class Session(_SessionClassMethods): if self._enable_transaction_accounting and self.transaction: self.transaction._deleted[state] = True - self.identity_map.discard(state) + self.identity_map.safe_discard(state) self._deleted.pop(state, None) state.deleted = True @@ -1630,6 +1726,9 @@ class Session(_SessionClassMethods): "all changes on mapped instances before merging with " "load=False.") key = mapper._identity_key_from_state(state) + key_is_persistent = attributes.NEVER_SET not in key[1] + else: + key_is_persistent = True if key in self.identity_map: merged = self.identity_map[key] @@ -1646,9 +1745,10 @@ class Session(_SessionClassMethods): self._update_impl(merged_state) new_instance = True - elif not _none_set.issubset(key[1]) or \ + elif key_is_persistent and ( + not _none_set.intersection(key[1]) or (mapper.allow_partial_pks and - not _none_set.issuperset(key[1])): + not _none_set.issuperset(key[1]))): merged = self.query(mapper.class_).get(key[1]) else: merged = None @@ -1746,7 +1846,7 @@ class Session(_SessionClassMethods): "function to send this object back to the transient state." % state_str(state) ) - self._before_attach(state) + self._before_attach(state, check_identity_map=False) self._deleted.pop(state, None) if discard_existing: self.identity_map.replace(state) @@ -1826,13 +1926,12 @@ class Session(_SessionClassMethods): self._attach(state, include_before=True) state._load_pending = True - def _before_attach(self, state): + def _before_attach(self, state, check_identity_map=True): if state.session_id != self.hash_key and \ self.dispatch.before_attach: self.dispatch.before_attach(self, state.obj()) - def _attach(self, state, include_before=False): - if state.key and \ + if check_identity_map and state.key and \ state.key in self.identity_map and \ not self.identity_map.contains_state(state): raise sa_exc.InvalidRequestError( @@ -1848,10 +1947,11 @@ class Session(_SessionClassMethods): "(this is '%s')" % (state_str(state), state.session_id, self.hash_key)) + def _attach(self, state, include_before=False): + if state.session_id != self.hash_key: - if include_before and \ - self.dispatch.before_attach: - self.dispatch.before_attach(self, state.obj()) + if include_before: + self._before_attach(state) state.session_id = self.hash_key if state.modified and state._strong_obj is None: state._strong_obj = state.obj() @@ -1898,7 +1998,7 @@ class Session(_SessionClassMethods): For ``autocommit`` Sessions with no active manual transaction, flush() will create a transaction on the fly that surrounds the entire set of - operations int the flush. + operations into the flush. :param objects: Optional; restricts the flush operation to operate only on elements that are in the given collection. @@ -2036,6 +2136,226 @@ class Session(_SessionClassMethods): with util.safe_reraise(): transaction.rollback(_capture_exception=True) + def bulk_save_objects( + self, objects, return_defaults=False, update_changed_only=True): + """Perform a bulk save of the given list of objects. + + The bulk save feature allows mapped objects to be used as the + source of simple INSERT and UPDATE operations which can be more easily + grouped together into higher performing "executemany" + operations; the extraction of data from the objects is also performed + using a lower-latency process that ignores whether or not attributes + have actually been modified in the case of UPDATEs, and also ignores + SQL expressions. + + The objects as given are not added to the session and no additional + state is established on them, unless the ``return_defaults`` flag + is also set, in which case primary key attributes and server-side + default values will be populated. + + .. versionadded:: 1.0.0 + + .. warning:: + + The bulk save feature allows for a lower-latency INSERT/UPDATE + of rows at the expense of most other unit-of-work features. + Features such as object management, relationship handling, + and SQL clause support are **silently omitted** in favor of raw + INSERT/UPDATES of records. + + **Please read the list of caveats at** :ref:`bulk_operations` + **before using this method, and fully test and confirm the + functionality of all code developed using these systems.** + + :param objects: a list of mapped object instances. The mapped + objects are persisted as is, and are **not** associated with the + :class:`.Session` afterwards. + + For each object, whether the object is sent as an INSERT or an + UPDATE is dependent on the same rules used by the :class:`.Session` + in traditional operation; if the object has the + :attr:`.InstanceState.key` + attribute set, then the object is assumed to be "detached" and + will result in an UPDATE. Otherwise, an INSERT is used. + + In the case of an UPDATE, statements are grouped based on which + attributes have changed, and are thus to be the subject of each + SET clause. If ``update_changed_only`` is False, then all + attributes present within each object are applied to the UPDATE + statement, which may help in allowing the statements to be grouped + together into a larger executemany(), and will also reduce the + overhead of checking history on attributes. + + :param return_defaults: when True, rows that are missing values which + generate defaults, namely integer primary key defaults and sequences, + will be inserted **one at a time**, so that the primary key value + is available. In particular this will allow joined-inheritance + and other multi-table mappings to insert correctly without the need + to provide primary key values ahead of time; however, + :paramref:`.Session.bulk_save_objects.return_defaults` **greatly + reduces the performance gains** of the method overall. + + :param update_changed_only: when True, UPDATE statements are rendered + based on those attributes in each state that have logged changes. + When False, all attributes present are rendered into the SET clause + with the exception of primary key attributes. + + .. seealso:: + + :ref:`bulk_operations` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_update_mappings` + + """ + for (mapper, isupdate), states in itertools.groupby( + (attributes.instance_state(obj) for obj in objects), + lambda state: (state.mapper, state.key is not None) + ): + self._bulk_save_mappings( + mapper, states, isupdate, True, + return_defaults, update_changed_only) + + def bulk_insert_mappings(self, mapper, mappings, return_defaults=False): + """Perform a bulk insert of the given list of mapping dictionaries. + + The bulk insert feature allows plain Python dictionaries to be used as + the source of simple INSERT operations which can be more easily + grouped together into higher performing "executemany" + operations. Using dictionaries, there is no "history" or session + state management features in use, reducing latency when inserting + large numbers of simple rows. + + The values within the dictionaries as given are typically passed + without modification into Core :meth:`.Insert` constructs, after + organizing the values within them across the tables to which + the given mapper is mapped. + + .. versionadded:: 1.0.0 + + .. warning:: + + The bulk insert feature allows for a lower-latency INSERT + of rows at the expense of most other unit-of-work features. + Features such as object management, relationship handling, + and SQL clause support are **silently omitted** in favor of raw + INSERT of records. + + **Please read the list of caveats at** :ref:`bulk_operations` + **before using this method, and fully test and confirm the + functionality of all code developed using these systems.** + + :param mapper: a mapped class, or the actual :class:`.Mapper` object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a list of dictionaries, each one containing the state + of the mapped row to be inserted, in terms of the attribute names + on the mapped class. If the mapping refers to multiple tables, + such as a joined-inheritance mapping, each dictionary must contain + all keys to be populated into all tables. + + :param return_defaults: when True, rows that are missing values which + generate defaults, namely integer primary key defaults and sequences, + will be inserted **one at a time**, so that the primary key value + is available. In particular this will allow joined-inheritance + and other multi-table mappings to insert correctly without the need + to provide primary + key values ahead of time; however, + :paramref:`.Session.bulk_insert_mappings.return_defaults` + **greatly reduces the performance gains** of the method overall. + If the rows + to be inserted only refer to a single table, then there is no + reason this flag should be set as the returned default information + is not used. + + + .. seealso:: + + :ref:`bulk_operations` + + :meth:`.Session.bulk_save_objects` + + :meth:`.Session.bulk_update_mappings` + + """ + self._bulk_save_mappings( + mapper, mappings, False, False, return_defaults, False) + + def bulk_update_mappings(self, mapper, mappings): + """Perform a bulk update of the given list of mapping dictionaries. + + The bulk update feature allows plain Python dictionaries to be used as + the source of simple UPDATE operations which can be more easily + grouped together into higher performing "executemany" + operations. Using dictionaries, there is no "history" or session + state management features in use, reducing latency when updating + large numbers of simple rows. + + .. versionadded:: 1.0.0 + + .. warning:: + + The bulk update feature allows for a lower-latency UPDATE + of rows at the expense of most other unit-of-work features. + Features such as object management, relationship handling, + and SQL clause support are **silently omitted** in favor of raw + UPDATES of records. + + **Please read the list of caveats at** :ref:`bulk_operations` + **before using this method, and fully test and confirm the + functionality of all code developed using these systems.** + + :param mapper: a mapped class, or the actual :class:`.Mapper` object, + representing the single kind of object represented within the mapping + list. + + :param mappings: a list of dictionaries, each one containing the state + of the mapped row to be updated, in terms of the attribute names + on the mapped class. If the mapping refers to multiple tables, + such as a joined-inheritance mapping, each dictionary may contain + keys corresponding to all tables. All those keys which are present + and are not part of the primary key are applied to the SET clause + of the UPDATE statement; the primary key values, which are required, + are applied to the WHERE clause. + + + .. seealso:: + + :ref:`bulk_operations` + + :meth:`.Session.bulk_insert_mappings` + + :meth:`.Session.bulk_save_objects` + + """ + self._bulk_save_mappings(mapper, mappings, True, False, False, False) + + def _bulk_save_mappings( + self, mapper, mappings, isupdate, isstates, + return_defaults, update_changed_only): + mapper = _class_to_mapper(mapper) + self._flushing = True + + transaction = self.begin( + subtransactions=True) + try: + if isupdate: + persistence._bulk_update( + mapper, mappings, transaction, + isstates, update_changed_only) + else: + persistence._bulk_insert( + mapper, mappings, transaction, isstates, return_defaults) + transaction.commit() + + except: + with util.safe_reraise(): + transaction.rollback(_capture_exception=True) + finally: + self._flushing = False + def is_modified(self, instance, include_collections=True, passive=True): """Return ``True`` if the given instance has locally @@ -2251,7 +2571,6 @@ class Session(_SessionClassMethods): class sessionmaker(_SessionClassMethods): - """A configurable :class:`.Session` factory. The :class:`.sessionmaker` factory generates new @@ -2379,18 +2698,49 @@ class sessionmaker(_SessionClassMethods): def make_transient(instance): - """Make the given instance 'transient'. + """Alter the state of the given instance so that it is :term:`transient`. - This will remove its association with any - session and additionally will remove its "identity key", - such that it's as though the object were newly constructed, - except retaining its values. It also resets the - "deleted" flag on the state if this object - had been explicitly deleted by its session. + .. note:: - Attributes which were "expired" or deferred at the - instance level are reverted to undefined, and - will not trigger any loads. + :func:`.make_transient` is a special-case function for + advanced use cases only. + + The given mapped instance is assumed to be in the :term:`persistent` or + :term:`detached` state. The function will remove its association with any + :class:`.Session` as well as its :attr:`.InstanceState.identity`. The + effect is that the object will behave as though it were newly constructed, + except retaining any attribute / collection values that were loaded at the + time of the call. The :attr:`.InstanceState.deleted` flag is also reset + if this object had been deleted as a result of using + :meth:`.Session.delete`. + + .. warning:: + + :func:`.make_transient` does **not** "unexpire" or otherwise eagerly + load ORM-mapped attributes that are not currently loaded at the time + the function is called. This includes attributes which: + + * were expired via :meth:`.Session.expire` + + * were expired as the natural effect of committing a session + transaction, e.g. :meth:`.Session.commit` + + * are normally :term:`lazy loaded` but are not currently loaded + + * are "deferred" via :ref:`deferred` and are not yet loaded + + * were not present in the query which loaded this object, such as that + which is common in joined table inheritance and other scenarios. + + After :func:`.make_transient` is called, unloaded attributes such + as those above will normally resolve to the value ``None`` when + accessed, or an empty collection for a collection-oriented attribute. + As the object is transient and un-associated with any database + identity, it will no longer retrieve these values. + + .. seealso:: + + :func:`.make_transient_to_detached` """ state = attributes.instance_state(instance) @@ -2398,9 +2748,13 @@ def make_transient(instance): if s: s._expunge_state(state) - # remove expired state and - # deferred callables - state.callables.clear() + # remove expired state + state.expired_attributes.clear() + + # remove deferred callables + if state.callables: + del state.callables + if state.key: del state.key if state.deleted: @@ -2408,7 +2762,12 @@ def make_transient(instance): def make_transient_to_detached(instance): - """Make the given transient instance 'detached'. + """Make the given transient instance :term:`detached`. + + .. note:: + + :func:`.make_transient_to_detached` is a special-case function for + advanced use cases only. All attribute history on the given instance will be reset as though the instance were freshly loaded @@ -2443,16 +2802,19 @@ def make_transient_to_detached(instance): def object_session(instance): - """Return the ``Session`` to which instance belongs. + """Return the :class:`.Session` to which the given instance belongs. - If the instance is not a mapped instance, an error is raised. + This is essentially the same as the :attr:`.InstanceState.session` + accessor. See that attribute for details. """ try: - return _state_session(attributes.instance_state(instance)) + state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) + else: + return _state_session(state) _new_sessionid = util.counter() diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/state.py b/lib/python3.5/site-packages/sqlalchemy/orm/state.py index b9aff7a..c66507d 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/state.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/state.py @@ -1,5 +1,5 @@ # orm/state.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -21,7 +21,7 @@ from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \ from . import base -class InstanceState(interfaces._InspectionAttr): +class InstanceState(interfaces.InspectionAttr): """tracks state information at the instance level. The :class:`.InstanceState` is a key object used by the @@ -58,15 +58,35 @@ class InstanceState(interfaces._InspectionAttr): expired = False deleted = False _load_pending = False - is_instance = True + callables = () + """A namespace where a per-state loader callable can be associated. + + In SQLAlchemy 1.0, this is only used for lazy loaders / deferred + loaders that were set up via query option. + + Previously, callables was used also to indicate expired attributes + by storing a link to the InstanceState itself in this dictionary. + This role is now handled by the expired_attributes set. + + """ + def __init__(self, obj, manager): self.class_ = obj.__class__ self.manager = manager self.obj = weakref.ref(obj, self._cleanup) - self.callables = {} self.committed_state = {} + self.expired_attributes = set() + + expired_attributes = None + """The set of keys which are 'expired' to be loaded by + the manager's deferred scalar loader, assuming no pending + changes. + + see also the ``unmodified`` collection which is intersected + against this set when a refresh operation occurs.""" + @util.memoized_property def attrs(self): @@ -146,7 +166,16 @@ class InstanceState(interfaces._InspectionAttr): @util.dependencies("sqlalchemy.orm.session") def session(self, sessionlib): """Return the owning :class:`.Session` for this instance, - or ``None`` if none available.""" + or ``None`` if none available. + + Note that the result here can in some cases be *different* + from that of ``obj in session``; an object that's been deleted + will report as not ``in session``, however if the transaction is + still in progress, this attribute will still refer to that session. + Only when the transaction is completed does the object become + fully detached under normal circumstances. + + """ return sessionlib._state_session(self) @property @@ -165,7 +194,7 @@ class InstanceState(interfaces._InspectionAttr): Returns ``None`` if the object has no primary key identity. .. note:: - An object which is transient or pending + An object which is :term:`transient` or :term:`pending` does **not** have a mapped identity until it is flushed, even if its attributes include primary key values. @@ -220,11 +249,25 @@ class InstanceState(interfaces._InspectionAttr): del self.obj def _cleanup(self, ref): - instance_dict = self._instance_dict() - if instance_dict: - instance_dict.discard(self) + """Weakref callback cleanup. + + This callable cleans out the state when it is being garbage + collected. + + this _cleanup **assumes** that there are no strong refs to us! + Will not work otherwise! + + """ + instance_dict = self._instance_dict() + if instance_dict is not None: + instance_dict._fast_discard(self) + del self._instance_dict + + # we can't possibly be in instance_dict._modified + # b.c. this is weakref cleanup only, that set + # is strong referencing! + # assert self not in instance_dict._modified - self.callables = {} self.session_id = self._strong_obj = None del self.obj @@ -251,7 +294,7 @@ class InstanceState(interfaces._InspectionAttr): return {} def _initialize_instance(*mixed, **kwargs): - self, instance, args = mixed[0], mixed[1], mixed[2:] + self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa manager = self.manager manager.dispatch.init(self, args, kwargs) @@ -259,8 +302,8 @@ class InstanceState(interfaces._InspectionAttr): try: return manager.original_init(*mixed[1:], **kwargs) except: - manager.dispatch.init_failure(self, args, kwargs) - raise + with util.safe_reraise(): + manager.dispatch.init_failure(self, args, kwargs) def get_history(self, key, passive): return self.manager[key].impl.get_history(self, self.dict, passive) @@ -279,7 +322,7 @@ class InstanceState(interfaces._InspectionAttr): (k, self.__dict__[k]) for k in ( 'committed_state', '_pending_mutations', 'modified', 'expired', 'callables', 'key', 'parents', 'load_options', - 'class_', + 'class_', 'expired_attributes' ) if k in self.__dict__ ) if self.load_path: @@ -306,7 +349,18 @@ class InstanceState(interfaces._InspectionAttr): self.parents = state_dict.get('parents', {}) self.modified = state_dict.get('modified', False) self.expired = state_dict.get('expired', False) - self.callables = state_dict.get('callables', {}) + if 'callables' in state_dict: + self.callables = state_dict['callables'] + + try: + self.expired_attributes = state_dict['expired_attributes'] + except KeyError: + self.expired_attributes = set() + # 0.9 and earlier compat + for k in list(self.callables): + if self.callables[k] is self: + self.expired_attributes.add(k) + del self.callables[k] self.__dict__.update([ (k, state_dict[k]) for k in ( @@ -320,12 +374,6 @@ class InstanceState(interfaces._InspectionAttr): state_dict['manager'](self, inst, state_dict) - def _initialize(self, key): - """Set this attribute to an empty value or collection, - based on the AttributeImpl in use.""" - - self.manager.get_impl(key).initialize(self, self.dict) - def _reset(self, dict_, key): """Remove the given attribute and any callables associated with it.""" @@ -333,71 +381,73 @@ class InstanceState(interfaces._InspectionAttr): old = dict_.pop(key, None) if old is not None and self.manager[key].impl.collection: self.manager[key].impl._invalidate_collection(old) - self.callables.pop(key, None) - - def _expire_attribute_pre_commit(self, dict_, key): - """a fast expire that can be called by column loaders during a load. - - The additional bookkeeping is finished up in commit_all(). - - Should only be called for scalar attributes. - - This method is actually called a lot with joined-table - loading, when the second table isn't present in the result. - - """ - dict_.pop(key, None) - self.callables[key] = self + self.expired_attributes.discard(key) + if self.callables: + self.callables.pop(key, None) @classmethod - def _row_processor(cls, manager, fn, key): + def _instance_level_callable_processor(cls, manager, fn, key): impl = manager[key].impl if impl.collection: def _set_callable(state, dict_, row): + if 'callables' not in state.__dict__: + state.callables = {} old = dict_.pop(key, None) if old is not None: impl._invalidate_collection(old) state.callables[key] = fn else: def _set_callable(state, dict_, row): + if 'callables' not in state.__dict__: + state.callables = {} state.callables[key] = fn return _set_callable def _expire(self, dict_, modified_set): self.expired = True + if self.modified: modified_set.discard(self) + self.committed_state.clear() + self.modified = False - self.modified = False self._strong_obj = None - self.committed_state.clear() + if '_pending_mutations' in self.__dict__: + del self.__dict__['_pending_mutations'] - InstanceState._pending_mutations._reset(self) + if 'parents' in self.__dict__: + del self.__dict__['parents'] - # clear out 'parents' collection. not - # entirely clear how we can best determine - # which to remove, or not. - InstanceState.parents._reset(self) + self.expired_attributes.update( + [impl.key for impl in self.manager._scalar_loader_impls + if impl.expire_missing or impl.key in dict_] + ) - for key in self.manager: - impl = self.manager[key].impl - if impl.accepts_scalar_loader and \ - (impl.expire_missing or key in dict_): - self.callables[key] = self - old = dict_.pop(key, None) - if impl.collection and old is not None: - impl._invalidate_collection(old) + if self.callables: + for k in self.expired_attributes.intersection(self.callables): + del self.callables[k] + + for k in self.manager._collection_impl_keys.intersection(dict_): + collection = dict_.pop(k) + collection._sa_adapter.invalidated = True + + for key in self.manager._all_key_set.intersection(dict_): + del dict_[key] self.manager.dispatch.expire(self, None) def _expire_attributes(self, dict_, attribute_names): pending = self.__dict__.get('_pending_mutations', None) + callables = self.callables + for key in attribute_names: impl = self.manager[key].impl if impl.accepts_scalar_loader: - self.callables[key] = self + self.expired_attributes.add(key) + if callables and key in callables: + del callables[key] old = dict_.pop(key, None) if impl.collection and old is not None: impl._invalidate_collection(old) @@ -408,7 +458,7 @@ class InstanceState(interfaces._InspectionAttr): self.manager.dispatch.expire(self, attribute_names) - def __call__(self, state, passive): + def _load_expired(self, state, passive): """__call__ allows the InstanceState to act as a deferred callable for loading expired attributes, which is also serializable (picklable). @@ -427,8 +477,7 @@ class InstanceState(interfaces._InspectionAttr): # instance state didn't have an identity, # the attributes still might be in the callables # dict. ensure they are removed. - for k in toload.intersection(self.callables): - del self.callables[k] + self.expired_attributes.clear() return ATTR_WAS_SET @@ -463,18 +512,6 @@ class InstanceState(interfaces._InspectionAttr): if self.manager[attr].impl.accepts_scalar_loader ) - @property - def expired_attributes(self): - """Return the set of keys which are 'expired' to be loaded by - the manager's deferred scalar loader, assuming no pending - changes. - - see also the ``unmodified`` collection which is intersected - against this set when a refresh operation occurs. - - """ - return set([k for k, v in self.callables.items() if v is self]) - def _instance_dict(self): return None @@ -497,6 +534,7 @@ class InstanceState(interfaces._InspectionAttr): if (self.session_id and self._strong_obj is None) \ or not self.modified: + self.modified = True instance_dict = self._instance_dict() if instance_dict: instance_dict._modified.add(self) @@ -517,7 +555,6 @@ class InstanceState(interfaces._InspectionAttr): self.manager[attr.key], base.state_class_str(self) )) - self.modified = True def _commit(self, dict_, keys): """Commit attributes. @@ -534,10 +571,18 @@ class InstanceState(interfaces._InspectionAttr): self.expired = False - for key in set(self.callables).\ + self.expired_attributes.difference_update( + set(keys).intersection(dict_)) + + # the per-keys commit removes object-level callables, + # while that of commit_all does not. it's not clear + # if this behavior has a clear rationale, however tests do + # ensure this is what it does. + if self.callables: + for key in set(self.callables).\ intersection(keys).\ - intersection(dict_): - del self.callables[key] + intersection(dict_): + del self.callables[key] def _commit_all(self, dict_, instance_dict=None): """commit all attributes unconditionally. @@ -548,7 +593,8 @@ class InstanceState(interfaces._InspectionAttr): - all attributes are marked as "committed" - the "strong dirty reference" is removed - the "modified" flag is set to False - - any "expired" markers/callables for attributes loaded are removed. + - any "expired" markers for scalar attributes loaded are removed. + - lazy load callables for objects / collections *stay* Attributes marked as "expired" can potentially remain "expired" after this step if a value was not populated in state.dict. @@ -558,16 +604,17 @@ class InstanceState(interfaces._InspectionAttr): @classmethod def _commit_all_states(self, iter, instance_dict=None): - """Mass version of commit_all().""" + """Mass / highly inlined version of commit_all().""" for state, dict_ in iter: - state.committed_state.clear() - InstanceState._pending_mutations._reset(state) + state_dict = state.__dict__ - callables = state.callables - for key in list(callables): - if key in dict_ and callables[key] is state: - del callables[key] + state.committed_state.clear() + + if '_pending_mutations' in state_dict: + del state_dict['_pending_mutations'] + + state.expired_attributes.difference_update(dict_) if instance_dict and state.modified: instance_dict._modified.discard(state) diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/strategies.py b/lib/python3.5/site-packages/sqlalchemy/orm/strategies.py index 33be868..7942b14 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/strategies.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/strategies.py @@ -1,5 +1,5 @@ # orm/strategies.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -22,6 +22,7 @@ from . import properties from .interfaces import ( LoaderStrategy, StrategizedProperty ) +from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE from .session import _state_session import itertools @@ -105,6 +106,8 @@ class UninstrumentedColumnLoader(LoaderStrategy): if the argument is against the with_polymorphic selectable. """ + __slots__ = 'columns', + def __init__(self, parent): super(UninstrumentedColumnLoader, self).__init__(parent) self.columns = self.parent_property.columns @@ -119,8 +122,8 @@ class UninstrumentedColumnLoader(LoaderStrategy): def create_row_processor( self, context, path, loadopt, - mapper, row, adapter): - return None, None, None + mapper, result, adapter, populators): + pass @log.class_logger @@ -128,6 +131,8 @@ class UninstrumentedColumnLoader(LoaderStrategy): class ColumnLoader(LoaderStrategy): """Provide loading behavior for a :class:`.ColumnProperty`.""" + __slots__ = 'columns', 'is_composite' + def __init__(self, parent): super(ColumnLoader, self).__init__(parent) self.columns = self.parent_property.columns @@ -135,12 +140,18 @@ class ColumnLoader(LoaderStrategy): def setup_query( self, context, entity, path, loadopt, - adapter, column_collection, **kwargs): + adapter, column_collection, memoized_populators, **kwargs): + for c in self.columns: if adapter: c = adapter.columns[c] column_collection.append(c) + fetch = self.columns[0] + if adapter: + fetch = adapter.columns[fetch] + memoized_populators[self.parent_property] = fetch + def init_class_attribute(self, mapper): self.is_class_level = True coltype = self.columns[0].type @@ -157,21 +168,18 @@ class ColumnLoader(LoaderStrategy): def create_row_processor( self, context, path, - loadopt, mapper, row, adapter): - key = self.key + loadopt, mapper, result, adapter, populators): # look through list of columns represented here # to see which, if any, is present in the row. for col in self.columns: if adapter: col = adapter.columns[col] - if col is not None and col in row: - def fetch_col(state, dict_, row): - dict_[key] = row[col] - return fetch_col, None, None + getter = result._getter(col) + if getter: + populators["quick"].append((self.key, getter)) + break else: - def expire_for_non_present_col(state, dict_, row): - state._expire_attribute_pre_commit(dict_, key) - return expire_for_non_present_col, None, None + populators["expire"].append((self.key, True)) @log.class_logger @@ -179,6 +187,8 @@ class ColumnLoader(LoaderStrategy): class DeferredColumnLoader(LoaderStrategy): """Provide loading behavior for a deferred :class:`.ColumnProperty`.""" + __slots__ = 'columns', 'group' + def __init__(self, parent): super(DeferredColumnLoader, self).__init__(parent) if hasattr(self.parent_property, 'composite_class'): @@ -189,28 +199,18 @@ class DeferredColumnLoader(LoaderStrategy): def create_row_processor( self, context, path, loadopt, - mapper, row, adapter): - col = self.columns[0] - if adapter: - col = adapter.columns[col] + mapper, result, adapter, populators): - key = self.key - if col in row: - return self.parent_property._get_strategy_by_cls(ColumnLoader).\ - create_row_processor( - context, path, loadopt, mapper, row, adapter) - - elif not self.is_class_level: - set_deferred_for_local_state = InstanceState._row_processor( - mapper.class_manager, - LoadDeferredColumns(key), key) - return set_deferred_for_local_state, None, None + # this path currently does not check the result + # for the column; this is because in most cases we are + # working just with the setup_query() directive which does + # not support this, and the behavior here should be consistent. + if not self.is_class_level: + set_deferred_for_local_state = \ + self.parent_property._deferred_column_loader + populators["new"].append((self.key, set_deferred_for_local_state)) else: - def reset_col_for_deferred(state, dict_, row): - # reset state on the key so that deferred callables - # fire off on next access. - state._reset(dict_, key) - return reset_col_for_deferred, None, None + populators["expire"].append((self.key, False)) def init_class_attribute(self, mapper): self.is_class_level = True @@ -223,20 +223,22 @@ class DeferredColumnLoader(LoaderStrategy): ) def setup_query( - self, context, entity, path, loadopt, adapter, - only_load_props=None, **kwargs): + self, context, entity, path, loadopt, + adapter, column_collection, memoized_populators, + only_load_props=None, **kw): if ( ( loadopt and 'undefer_pks' in loadopt.local_opts and - set(self.columns).intersection(self.parent.primary_key) + set(self.columns).intersection( + self.parent._should_undefer_in_wildcard) ) or ( loadopt and self.group and - loadopt.local_opts.get('undefer_group', False) == self.group + loadopt.local_opts.get('undefer_group_%s' % self.group, False) ) or ( @@ -245,7 +247,12 @@ class DeferredColumnLoader(LoaderStrategy): ): self.parent_property._get_strategy_by_cls(ColumnLoader).\ setup_query(context, entity, - path, loadopt, adapter, **kwargs) + path, loadopt, adapter, + column_collection, memoized_populators, **kw) + elif self.is_class_level: + memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED + else: + memoized_populators[self.parent_property] = _DEFER_FOR_STATE def _load_for_state(self, state, passive): if not state.key: @@ -305,6 +312,8 @@ class LoadDeferredColumns(object): class AbstractRelationshipLoader(LoaderStrategy): """LoaderStratgies which deal with related objects.""" + __slots__ = 'mapper', 'target', 'uselist' + def __init__(self, parent): super(AbstractRelationshipLoader, self).__init__(parent) self.mapper = self.parent_property.mapper @@ -321,6 +330,8 @@ class NoLoader(AbstractRelationshipLoader): """ + __slots__ = () + def init_class_attribute(self, mapper): self.is_class_level = True @@ -333,21 +344,29 @@ class NoLoader(AbstractRelationshipLoader): def create_row_processor( self, context, path, loadopt, mapper, - row, adapter): + result, adapter, populators): def invoke_no_load(state, dict_, row): - state._initialize(self.key) - return invoke_no_load, None, None + if self.uselist: + state.manager.get_impl(self.key).initialize(state, dict_) + else: + dict_[self.key] = None + populators["new"].append((self.key, invoke_no_load)) @log.class_logger @properties.RelationshipProperty.strategy_for(lazy=True) @properties.RelationshipProperty.strategy_for(lazy="select") -class LazyLoader(AbstractRelationshipLoader): +class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots): """Provide loading behavior for a :class:`.RelationshipProperty` with "lazy=True", that is loads when first accessed. """ + __slots__ = ( + '_lazywhere', '_rev_lazywhere', 'use_get', '_bind_to_col', + '_equated_columns', '_rev_bind_to_col', '_rev_equated_columns', + '_simple_lazy_clause') + def __init__(self, parent): super(LazyLoader, self).__init__(parent) join_condition = self.parent_property._join_condition @@ -378,7 +397,7 @@ class LazyLoader(AbstractRelationshipLoader): self._equated_columns[c] = self._equated_columns[col] self.logger.info("%s will use query.get() to " - "optimize instance loads" % self) + "optimize instance loads", self) def init_class_attribute(self, mapper): self.is_class_level = True @@ -406,78 +425,57 @@ class LazyLoader(AbstractRelationshipLoader): active_history=active_history ) - def lazy_clause( - self, state, reverse_direction=False, - alias_secondary=False, - adapt_source=None, - passive=None): + def _memoized_attr__simple_lazy_clause(self): + criterion, bind_to_col = ( + self._lazywhere, + self._bind_to_col + ) + + params = [] + + def visit_bindparam(bindparam): + bindparam.unique = False + if bindparam._identifying_key in bind_to_col: + params.append(( + bindparam.key, bind_to_col[bindparam._identifying_key], + None)) + else: + params.append((bindparam.key, None, bindparam.value)) + + criterion = visitors.cloned_traverse( + criterion, {}, {'bindparam': visit_bindparam} + ) + + return criterion, params + + def _generate_lazy_clause(self, state, passive): + criterion, param_keys = self._simple_lazy_clause + if state is None: - return self._lazy_none_clause( - reverse_direction, - adapt_source=adapt_source) + return sql_util.adapt_criterion_to_null( + criterion, [key for key, ident, value in param_keys]) - if not reverse_direction: - criterion, bind_to_col = \ - self._lazywhere, \ - self._bind_to_col - else: - criterion, bind_to_col = \ - self._rev_lazywhere, \ - self._rev_bind_to_col - - if reverse_direction: - mapper = self.parent_property.mapper - else: - mapper = self.parent_property.parent + mapper = self.parent_property.parent o = state.obj() # strong ref dict_ = attributes.instance_dict(o) - # use the "committed state" only if we're in a flush - # for this state. + if passive & attributes.INIT_OK: + passive ^= attributes.INIT_OK - if passive and passive & attributes.LOAD_AGAINST_COMMITTED: - def visit_bindparam(bindparam): - if bindparam._identifying_key in bind_to_col: - bindparam.callable = \ - lambda: mapper._get_committed_state_attr_by_column( - state, dict_, - bind_to_col[bindparam._identifying_key]) - else: - def visit_bindparam(bindparam): - if bindparam._identifying_key in bind_to_col: - bindparam.callable = \ - lambda: mapper._get_state_attr_by_column( - state, dict_, - bind_to_col[bindparam._identifying_key]) + params = {} + for key, ident, value in param_keys: + if ident is not None: + if passive and passive & attributes.LOAD_AGAINST_COMMITTED: + value = mapper._get_committed_state_attr_by_column( + state, dict_, ident, passive) + else: + value = mapper._get_state_attr_by_column( + state, dict_, ident, passive) - if self.parent_property.secondary is not None and alias_secondary: - criterion = sql_util.ClauseAdapter( - self.parent_property.secondary.alias()).\ - traverse(criterion) + params[key] = value - criterion = visitors.cloned_traverse( - criterion, {}, {'bindparam': visit_bindparam}) - - if adapt_source: - criterion = adapt_source(criterion) - return criterion - - def _lazy_none_clause(self, reverse_direction=False, adapt_source=None): - if not reverse_direction: - criterion, bind_to_col = \ - self._lazywhere, \ - self._bind_to_col - else: - criterion, bind_to_col = \ - self._rev_lazywhere, \ - self._rev_bind_to_col - - criterion = sql_util.adapt_criterion_to_null(criterion, bind_to_col) - - if adapt_source: - criterion = adapt_source(criterion) - return criterion + return criterion, params def _load_for_state(self, state, passive): if not state.key and ( @@ -554,10 +552,9 @@ class LazyLoader(AbstractRelationshipLoader): @util.dependencies("sqlalchemy.orm.strategy_options") def _emit_lazyload( - self, strategy_options, session, state, - ident_key, passive): - q = session.query(self.mapper)._adapt_all_clauses() + self, strategy_options, session, state, ident_key, passive): + q = session.query(self.mapper)._adapt_all_clauses() if self.parent_property.secondary is not None: q = q.select_from(self.mapper, self.parent_property.secondary) @@ -588,17 +585,19 @@ class LazyLoader(AbstractRelationshipLoader): rev._use_get and \ not isinstance(rev.strategy, LazyLoader): q = q.options( - strategy_options.Load(rev.parent). - lazyload(rev.key)) + strategy_options.Load(rev.parent).lazyload(rev.key)) - lazy_clause = self.lazy_clause(state, passive=passive) + lazy_clause, params = self._generate_lazy_clause( + state, passive=passive) if pending: - bind_values = sql_util.bind_values(lazy_clause) - if None in bind_values: + if util.has_intersection( + orm_util._none_set, params.values()): return None + elif util.has_intersection(orm_util._never_set, params.values()): + return None - q = q.filter(lazy_clause) + q = q.filter(lazy_clause).params(params) result = q.all() if self.uselist: @@ -618,7 +617,7 @@ class LazyLoader(AbstractRelationshipLoader): def create_row_processor( self, context, path, loadopt, - mapper, row, adapter): + mapper, result, adapter, populators): key = self.key if not self.is_class_level: # we are not the primary manager for this attribute @@ -629,12 +628,12 @@ class LazyLoader(AbstractRelationshipLoader): # "lazyload" option on a "no load" # attribute - "eager" attributes always have a # class-level lazyloader installed. - set_lazy_callable = InstanceState._row_processor( + set_lazy_callable = InstanceState._instance_level_callable_processor( mapper.class_manager, - LoadLazyAttribute(key), key) + LoadLazyAttribute(key, self._strategy_keys[0]), key) - return set_lazy_callable, None, None - else: + populators["new"].append((self.key, set_lazy_callable)) + elif context.populate_existing or mapper.always_refresh: def reset_for_lazy_callable(state, dict_, row): # we are the primary manager for this attribute on # this class - reset its @@ -646,26 +645,29 @@ class LazyLoader(AbstractRelationshipLoader): # any existing state. state._reset(dict_, key) - return reset_for_lazy_callable, None, None + populators["new"].append((self.key, reset_for_lazy_callable)) class LoadLazyAttribute(object): """serializable loader object used by LazyLoader""" - def __init__(self, key): + def __init__(self, key, strategy_key=(('lazy', 'select'),)): self.key = key + self.strategy_key = strategy_key def __call__(self, state, passive=attributes.PASSIVE_OFF): key = self.key instance_mapper = state.manager.mapper prop = instance_mapper._props[key] - strategy = prop._strategies[LazyLoader] + strategy = prop._strategies[self.strategy_key] return strategy._load_for_state(state, passive) @properties.RelationshipProperty.strategy_for(lazy="immediate") class ImmediateLoader(AbstractRelationshipLoader): + __slots__ = () + def init_class_attribute(self, mapper): self.parent_property.\ _get_strategy_by_cls(LazyLoader).\ @@ -679,16 +681,18 @@ class ImmediateLoader(AbstractRelationshipLoader): def create_row_processor( self, context, path, loadopt, - mapper, row, adapter): + mapper, result, adapter, populators): def load_immediate(state, dict_, row): state.get_impl(self.key).get(state, dict_) - return None, None, load_immediate + populators["delayed"].append((self.key, load_immediate)) @log.class_logger @properties.RelationshipProperty.strategy_for(lazy="subquery") class SubqueryLoader(AbstractRelationshipLoader): + __slots__ = 'join_depth', + def __init__(self, parent): super(SubqueryLoader, self).__init__(parent) self.join_depth = self.parent_property.join_depth @@ -706,6 +710,8 @@ class SubqueryLoader(AbstractRelationshipLoader): if not context.query._enable_eagerloads: return + elif context.query._yield_per: + context.query._no_yield_per("subquery") path = path[self.parent_property] @@ -994,7 +1000,7 @@ class SubqueryLoader(AbstractRelationshipLoader): def create_row_processor( self, context, path, loadopt, - mapper, row, adapter): + mapper, result, adapter, populators): if not self.parent.class_manager[self.key].impl.supports_population: raise sa_exc.InvalidRequestError( "'%s' does not support object " @@ -1006,7 +1012,13 @@ class SubqueryLoader(AbstractRelationshipLoader): subq = path.get(context.attributes, 'subquery') if subq is None: - return None, None, None + return + + assert subq.session is context.session, ( + "Subquery session doesn't refer to that of " + "our context. Are there broken context caching " + "schemes being used?" + ) local_cols = self.parent_property.local_columns @@ -1022,11 +1034,14 @@ class SubqueryLoader(AbstractRelationshipLoader): local_cols = [adapter.columns[c] for c in local_cols] if self.uselist: - return self._create_collection_loader(collections, local_cols) + self._create_collection_loader( + context, collections, local_cols, populators) else: - return self._create_scalar_loader(collections, local_cols) + self._create_scalar_loader( + context, collections, local_cols, populators) - def _create_collection_loader(self, collections, local_cols): + def _create_collection_loader( + self, context, collections, local_cols, populators): def load_collection_from_subq(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), @@ -1035,9 +1050,12 @@ class SubqueryLoader(AbstractRelationshipLoader): state.get_impl(self.key).\ set_committed_value(state, dict_, collection) - return load_collection_from_subq, None, None, collections.loader + populators["new"].append((self.key, load_collection_from_subq)) + if context.invoke_all_eagers: + populators["eager"].append((self.key, collections.loader)) - def _create_scalar_loader(self, collections, local_cols): + def _create_scalar_loader( + self, context, collections, local_cols, populators): def load_scalar_from_subq(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), @@ -1053,7 +1071,9 @@ class SubqueryLoader(AbstractRelationshipLoader): state.get_impl(self.key).\ set_committed_value(state, dict_, scalar) - return load_scalar_from_subq, None, None, collections.loader + populators["new"].append((self.key, load_scalar_from_subq)) + if context.invoke_all_eagers: + populators["eager"].append((self.key, collections.loader)) @log.class_logger @@ -1064,6 +1084,9 @@ class JoinedLoader(AbstractRelationshipLoader): using joined eager loading. """ + + __slots__ = 'join_depth', + def __init__(self, parent): super(JoinedLoader, self).__init__(parent) self.join_depth = self.parent_property.join_depth @@ -1081,6 +1104,8 @@ class JoinedLoader(AbstractRelationshipLoader): if not context.query._enable_eagerloads: return + elif context.query._yield_per and self.uselist: + context.query._no_yield_per("joined collection") path = path[self.parent_property] @@ -1123,16 +1148,12 @@ class JoinedLoader(AbstractRelationshipLoader): path = path[self.mapper] - for value in self.mapper._iterate_polymorphic_properties( - mappers=with_polymorphic): - value.setup( - context, - entity, - path, - clauses, - parentmapper=self.mapper, - column_collection=add_to_collection, - chained_from_outerjoin=chained_from_outerjoin) + loading._setup_entity_query( + context, self.mapper, entity, + path, clauses, add_to_collection, + with_polymorphic=with_polymorphic, + parentmapper=self.mapper, + chained_from_outerjoin=chained_from_outerjoin) if with_poly_info is not None and \ None in set(context.secondary_columns): @@ -1235,10 +1256,11 @@ class JoinedLoader(AbstractRelationshipLoader): clauses = orm_util.ORMAdapter( to_adapt, equivalents=self.mapper._equivalent_columns, - adapt_required=True) + adapt_required=True, allow_label_resolve=False, + anonymize_labels=True) assert clauses.aliased_class is not None - if self.parent_property.direction != interfaces.MANYTOONE: + if self.parent_property.uselist: context.multi_row_eager_loaders = True innerjoin = ( @@ -1303,8 +1325,19 @@ class JoinedLoader(AbstractRelationshipLoader): if adapter: if getattr(adapter, 'aliased_class', None): + # joining from an adapted entity. The adapted entity + # might be a "with_polymorphic", so resolve that to our + # specific mapper's entity before looking for our attribute + # name on it. + efm = inspect(adapter.aliased_class).\ + _entity_for_mapper( + parentmapper + if parentmapper.isa(self.parent) else self.parent) + + # look for our attribute on the adapted entity, else fall back + # to our straight property onclause = getattr( - adapter.aliased_class, self.key, + efm.entity, self.key, self.parent_property) else: onclause = getattr( @@ -1321,40 +1354,31 @@ class JoinedLoader(AbstractRelationshipLoader): assert clauses.aliased_class is not None - join_to_outer = innerjoin and isinstance(towrap, sql.Join) and \ - towrap.isouter + attach_on_outside = ( + not chained_from_outerjoin or + not innerjoin or innerjoin == 'unnested') - if chained_from_outerjoin and join_to_outer and innerjoin == 'nested': - inner = orm_util.join( - towrap.right, - clauses.aliased_class, - onclause, - isouter=False - ) - - eagerjoin = orm_util.join( - towrap.left, - inner, - towrap.onclause, - isouter=True - ) - eagerjoin._target_adapter = inner._target_adapter - else: - if chained_from_outerjoin: - innerjoin = False - eagerjoin = orm_util.join( + if attach_on_outside: + # this is the "classic" eager join case. + eagerjoin = orm_util._ORMJoin( towrap, clauses.aliased_class, onclause, - isouter=not innerjoin + isouter=not innerjoin or ( + chained_from_outerjoin and isinstance(towrap, sql.Join) + ), _left_memo=self.parent, _right_memo=self.mapper ) + else: + # all other cases are innerjoin=='nested' approach + eagerjoin = self._splice_nested_inner_join( + path, towrap, clauses, onclause) + context.eager_joins[entity_key] = eagerjoin # send a hint to the Query as to where it may "splice" this join eagerjoin.stop_on = entity.selectable - if self.parent_property.secondary is None and \ - not parentmapper: + if not parentmapper: # for parentclause that is the non-eager end of the join, # ensure all the parent cols in the primaryjoin are actually # in the @@ -1377,7 +1401,67 @@ class JoinedLoader(AbstractRelationshipLoader): ) ) - def _create_eager_adapter(self, context, row, adapter, path, loadopt): + def _splice_nested_inner_join( + self, path, join_obj, clauses, onclause, splicing=False): + + if splicing is False: + # first call is always handed a join object + # from the outside + assert isinstance(join_obj, orm_util._ORMJoin) + elif isinstance(join_obj, sql.selectable.FromGrouping): + return self._splice_nested_inner_join( + path, join_obj.element, clauses, onclause, splicing + ) + elif not isinstance(join_obj, orm_util._ORMJoin): + if path[-2] is splicing: + return orm_util._ORMJoin( + join_obj, clauses.aliased_class, + onclause, isouter=False, + _left_memo=splicing, + _right_memo=path[-1].mapper + ) + else: + # only here if splicing == True + return None + + target_join = self._splice_nested_inner_join( + path, join_obj.right, clauses, + onclause, join_obj._right_memo) + if target_join is None: + right_splice = False + target_join = self._splice_nested_inner_join( + path, join_obj.left, clauses, + onclause, join_obj._left_memo) + if target_join is None: + # should only return None when recursively called, + # e.g. splicing==True + assert splicing is not False, \ + "assertion failed attempting to produce joined eager loads" + return None + else: + right_splice = True + + if right_splice: + # for a right splice, attempt to flatten out + # a JOIN b JOIN c JOIN .. to avoid needless + # parenthesis nesting + if not join_obj.isouter and not target_join.isouter: + eagerjoin = join_obj._splice_into_center(target_join) + else: + eagerjoin = orm_util._ORMJoin( + join_obj.left, target_join, + join_obj.onclause, isouter=join_obj.isouter, + _left_memo=join_obj._left_memo) + else: + eagerjoin = orm_util._ORMJoin( + target_join, join_obj.right, + join_obj.onclause, isouter=join_obj.isouter, + _right_memo=join_obj._right_memo) + + eagerjoin._target_adapter = target_join._target_adapter + return eagerjoin + + def _create_eager_adapter(self, context, result, adapter, path, loadopt): user_defined_adapter = self._init_user_defined_eager_proc( loadopt, context) if loadopt else False @@ -1395,17 +1479,16 @@ class JoinedLoader(AbstractRelationshipLoader): if decorator is None: return False - try: - self.mapper.identity_key_from_row(row, decorator) + if self.mapper._result_has_identity_key(result, decorator): return decorator - except KeyError: + else: # no identity key - don't return a row # processor, will cause a degrade to lazy return False def create_row_processor( self, context, path, loadopt, mapper, - row, adapter): + result, adapter, populators): if not self.parent.class_manager[self.key].impl.supports_population: raise sa_exc.InvalidRequestError( "'%s' does not support object " @@ -1417,36 +1500,40 @@ class JoinedLoader(AbstractRelationshipLoader): eager_adapter = self._create_eager_adapter( context, - row, + result, adapter, our_path, loadopt) if eager_adapter is not False: key = self.key - _instance = loading.instance_processor( + _instance = loading._instance_processor( self.mapper, context, + result, our_path[self.mapper], eager_adapter) if not self.uselist: - return self._create_scalar_loader(context, key, _instance) + self._create_scalar_loader(context, key, _instance, populators) else: - return self._create_collection_loader(context, key, _instance) + self._create_collection_loader( + context, key, _instance, populators) else: - return self.parent_property._get_strategy_by_cls(LazyLoader).\ + self.parent_property._get_strategy_by_cls(LazyLoader).\ create_row_processor( context, path, loadopt, - mapper, row, adapter) + mapper, result, adapter, populators) - def _create_collection_loader(self, context, key, _instance): + def _create_collection_loader(self, context, key, _instance, populators): def load_collection_from_joined_new_row(state, dict_, row): collection = attributes.init_state_collection( state, dict_, key) result_list = util.UniqueAppender(collection, 'append_without_event') context.attributes[(state, key)] = result_list - _instance(row, result_list) + inst = _instance(row) + if inst is not None: + result_list.append(inst) def load_collection_from_joined_existing_row(state, dict_, row): if (state, key) in context.attributes: @@ -1462,25 +1549,30 @@ class JoinedLoader(AbstractRelationshipLoader): collection, 'append_without_event') context.attributes[(state, key)] = result_list - _instance(row, result_list) + inst = _instance(row) + if inst is not None: + result_list.append(inst) def load_collection_from_joined_exec(state, dict_, row): - _instance(row, None) + _instance(row) - return load_collection_from_joined_new_row, \ - load_collection_from_joined_existing_row, \ - None, load_collection_from_joined_exec + populators["new"].append((self.key, load_collection_from_joined_new_row)) + populators["existing"].append( + (self.key, load_collection_from_joined_existing_row)) + if context.invoke_all_eagers: + populators["eager"].append( + (self.key, load_collection_from_joined_exec)) - def _create_scalar_loader(self, context, key, _instance): + def _create_scalar_loader(self, context, key, _instance, populators): def load_scalar_from_joined_new_row(state, dict_, row): # set a scalar object instance directly on the parent # object, bypassing InstrumentedAttribute event handlers. - dict_[key] = _instance(row, None) + dict_[key] = _instance(row) def load_scalar_from_joined_existing_row(state, dict_, row): # call _instance on the row, even though the object has # been created, so that we further descend into properties - existing = _instance(row, None) + existing = _instance(row) if existing is not None \ and key in dict_ \ and existing is not dict_[key]: @@ -1490,11 +1582,13 @@ class JoinedLoader(AbstractRelationshipLoader): % self) def load_scalar_from_joined_exec(state, dict_, row): - _instance(row, None) + _instance(row) - return load_scalar_from_joined_new_row, \ - load_scalar_from_joined_existing_row, \ - None, load_scalar_from_joined_exec + populators["new"].append((self.key, load_scalar_from_joined_new_row)) + populators["existing"].append( + (self.key, load_scalar_from_joined_existing_row)) + if context.invoke_all_eagers: + populators["eager"].append((self.key, load_scalar_from_joined_exec)) def single_parent_validator(desc, prop): diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/strategy_options.py b/lib/python3.5/site-packages/sqlalchemy/orm/strategy_options.py index 392f7ce..141f867 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/strategy_options.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/strategy_options.py @@ -1,5 +1,4 @@ -# orm/strategy_options.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -89,6 +88,7 @@ class Load(Generative, MapperOption): cloned.local_opts = {} return cloned + _merge_into_path = False strategy = None propagate_to_loaders = False @@ -162,11 +162,14 @@ class Load(Generative, MapperOption): ext_info = inspect(ac) path_element = ext_info.mapper + existing = path.entity_path[prop].get( + self.context, "path_with_polymorphic") if not ext_info.is_aliased_class: ac = orm_util.with_polymorphic( ext_info.mapper.base_mapper, ext_info.mapper, aliased=True, - _use_mapper_path=True) + _use_mapper_path=True, + _existing_alias=existing) path.entity_path[prop].set( self.context, "path_with_polymorphic", inspect(ac)) path = path[prop][path_element] @@ -177,6 +180,9 @@ class Load(Generative, MapperOption): path = path.entity_path return path + def __str__(self): + return "Load(strategy=%r)" % (self.strategy, ) + def _coerce_strat(self, strategy): if strategy is not None: strategy = tuple(sorted(strategy.items())) @@ -209,7 +215,15 @@ class Load(Generative, MapperOption): cloned._set_path_strategy() def _set_path_strategy(self): - if self.path.has_entity: + if self._merge_into_path: + # special helper for undefer_group + existing = self.path.get(self.context, "loader") + if existing: + existing.local_opts.update(self.local_opts) + else: + self.path.set(self.context, "loader", self) + + elif self.path.has_entity: self.path.parent.set(self.context, "loader", self) else: self.path.set(self.context, "loader", self) @@ -359,6 +373,7 @@ class _UnboundLoad(Load): return None token = start_path[0] + if isinstance(token, util.string_types): entity = self._find_entity_basestring(query, token, raiseerr) elif isinstance(token, PropComparator): @@ -402,10 +417,27 @@ class _UnboundLoad(Load): # prioritize "first class" options over those # that were "links in the chain", e.g. "x" and "y" in # someload("x.y.z") versus someload("x") / someload("x.y") - if self._is_chain_link: - effective_path.setdefault(context, "loader", loader) + + if effective_path.is_token: + for path in effective_path.generate_for_superclasses(): + if self._merge_into_path: + # special helper for undefer_group + existing = path.get(context, "loader") + if existing: + existing.local_opts.update(self.local_opts) + else: + path.set(context, "loader", loader) + elif self._is_chain_link: + path.setdefault(context, "loader", loader) + else: + path.set(context, "loader", loader) else: - effective_path.set(context, "loader", loader) + # only supported for the undefer_group() wildcard opt + assert not self._merge_into_path + if self._is_chain_link: + effective_path.setdefault(context, "loader", loader) + else: + effective_path.set(context, "loader", loader) def _find_entity_prop_comparator(self, query, token, mapper, raiseerr): if _is_aliased_class(mapper): @@ -631,15 +663,47 @@ def joinedload(loadopt, attr, innerjoin=None): query(Order).options(joinedload(Order.user, innerjoin=True)) - If the joined-eager load is chained onto an existing LEFT OUTER JOIN, - ``innerjoin=True`` will be bypassed and the join will continue to - chain as LEFT OUTER JOIN so that the results don't change. As an - alternative, specify the value ``"nested"``. This will instead nest the - join on the right side, e.g. using the form "a LEFT OUTER JOIN - (b JOIN c)". + In order to chain multiple eager joins together where some may be + OUTER and others INNER, right-nested joins are used to link them:: - .. versionadded:: 0.9.4 Added ``innerjoin="nested"`` option to support - nesting of eager "inner" joins. + query(A).options( + joinedload(A.bs, innerjoin=False). + joinedload(B.cs, innerjoin=True) + ) + + The above query, linking A.bs via "outer" join and B.cs via "inner" join + would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When using + SQLite, this form of JOIN is translated to use full subqueries as this + syntax is otherwise not directly supported. + + The ``innerjoin`` flag can also be stated with the term ``"unnested"``. + This will prevent joins from being right-nested, and will instead + link an "innerjoin" eagerload to an "outerjoin" eagerload by bypassing + the "inner" join. Using this form as follows:: + + query(A).options( + joinedload(A.bs, innerjoin=False). + joinedload(B.cs, innerjoin="unnested") + ) + + Joins will be rendered as "a LEFT OUTER JOIN b LEFT OUTER JOIN c", so that + all of "a" is matched rather than being incorrectly limited by a "b" that + does not contain a "c". + + .. note:: The "unnested" flag does **not** affect the JOIN rendered + from a many-to-many association table, e.g. a table configured + as :paramref:`.relationship.secondary`, to the target table; for + correctness of results, these joins are always INNER and are + therefore right-nested if linked to an OUTER join. + + .. versionadded:: 0.9.4 Added support for "nesting" of eager "inner" + joins. See :ref:`feature_2976`. + + .. versionchanged:: 1.0.0 ``innerjoin=True`` now implies + ``innerjoin="nested"``, whereas in 0.9 it implied + ``innerjoin="unnested"``. In order to achieve the pre-1.0 "unnested" + inner join behavior, use the value ``innerjoin="unnested"``. + See :ref:`migration_3008`. .. note:: @@ -979,10 +1043,11 @@ def undefer_group(loadopt, name): :func:`.orm.undefer` """ + loadopt._merge_into_path = True return loadopt.set_column_strategy( "*", None, - {"undefer_group": name} + {"undefer_group_%s" % name: True} ) diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/sync.py b/lib/python3.5/site-packages/sqlalchemy/orm/sync.py index 2e897c4..ccca508 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/sync.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/sync.py @@ -1,5 +1,5 @@ # orm/sync.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -45,11 +45,28 @@ def populate(source, source_mapper, dest, dest_mapper, uowcommit.attributes[("pk_cascaded", dest, r)] = True +def bulk_populate_inherit_keys( + source_dict, source_mapper, synchronize_pairs): + # a simplified version of populate() used by bulk insert mode + for l, r in synchronize_pairs: + try: + prop = source_mapper._columntoproperty[l] + value = source_dict[prop.key] + except exc.UnmappedColumnError: + _raise_col_to_prop(False, source_mapper, l, source_mapper, r) + + try: + prop = source_mapper._columntoproperty[r] + source_dict[prop.key] = value + except exc.UnmappedColumnError: + _raise_col_to_prop(True, source_mapper, l, source_mapper, r) + + def clear(dest, dest_mapper, synchronize_pairs): for l, r in synchronize_pairs: if r.primary_key and \ dest_mapper._get_state_attr_by_column( - dest, dest.dict, r) is not None: + dest, dest.dict, r) not in orm_util._none_set: raise AssertionError( "Dependency rule tried to blank-out primary key " @@ -68,7 +85,7 @@ def update(source, source_mapper, dest, old_prefix, synchronize_pairs): oldvalue = source_mapper._get_committed_attr_by_column( source.obj(), l) value = source_mapper._get_state_attr_by_column( - source, source.dict, l) + source, source.dict, l, passive=attributes.PASSIVE_OFF) except exc.UnmappedColumnError: _raise_col_to_prop(False, source_mapper, l, None, r) dest[r.key] = value @@ -79,7 +96,7 @@ def populate_dict(source, source_mapper, dict_, synchronize_pairs): for l, r in synchronize_pairs: try: value = source_mapper._get_state_attr_by_column( - source, source.dict, l) + source, source.dict, l, passive=attributes.PASSIVE_OFF) except exc.UnmappedColumnError: _raise_col_to_prop(False, source_mapper, l, None, r) diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/unitofwork.py b/lib/python3.5/site-packages/sqlalchemy/orm/unitofwork.py index 71e6182..8b4ae64 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/unitofwork.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/unitofwork.py @@ -1,5 +1,5 @@ # orm/unitofwork.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -16,6 +16,7 @@ organizes them in order of dependency, and executes. from .. import util, event from ..util import topological from . import attributes, persistence, util as orm_util +import itertools def track_cascade_events(descriptor, prop): @@ -379,14 +380,19 @@ class UOWTransaction(object): execute() method has succeeded and the transaction has been committed. """ + if not self.states: + return + states = set(self.states) isdel = set( s for (s, (isdelete, listonly)) in self.states.items() if isdelete ) other = states.difference(isdel) - self.session._remove_newly_deleted(isdel) - self.session._register_newly_persistent(other) + if isdel: + self.session._remove_newly_deleted(isdel) + if other: + self.session._register_newly_persistent(other) class IterateMappersMixin(object): diff --git a/lib/python3.5/site-packages/sqlalchemy/orm/util.py b/lib/python3.5/site-packages/sqlalchemy/orm/util.py index 215de5f..42fadca 100644 --- a/lib/python3.5/site-packages/sqlalchemy/orm/util.py +++ b/lib/python3.5/site-packages/sqlalchemy/orm/util.py @@ -1,5 +1,5 @@ # orm/util.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -13,9 +13,9 @@ from . import attributes import re from .base import instance_str, state_str, state_class_str, attribute_str, \ - state_attribute_str, object_mapper, object_state, _none_set + state_attribute_str, object_mapper, object_state, _none_set, _never_set from .base import class_mapper, _class_to_mapper -from .base import _InspectionAttr +from .base import InspectionAttr from .path_registry import PathRegistry all_cascades = frozenset(("delete", "delete-orphan", "all", "merge", @@ -30,21 +30,19 @@ class CascadeOptions(frozenset): 'all', 'none', 'delete-orphan']) _allowed_cascades = all_cascades - def __new__(cls, arg): - values = set([ - c for c - in re.split('\s*,\s*', arg or "") - if c - ]) + __slots__ = ( + 'save_update', 'delete', 'refresh_expire', 'merge', + 'expunge', 'delete_orphan') + def __new__(cls, value_list): + if isinstance(value_list, util.string_types) or value_list is None: + return cls.from_string(value_list) + values = set(value_list) if values.difference(cls._allowed_cascades): raise sa_exc.ArgumentError( "Invalid cascade option(s): %s" % ", ".join([repr(x) for x in - sorted( - values.difference(cls._allowed_cascades) - )]) - ) + sorted(values.difference(cls._allowed_cascades))])) if "all" in values: values.update(cls._add_w_all_cascades) @@ -70,6 +68,15 @@ class CascadeOptions(frozenset): ",".join([x for x in sorted(self)]) ) + @classmethod + def from_string(cls, arg): + values = [ + c for c + in re.split('\s*,\s*', arg or "") + if c + ] + return cls(values) + def _validator_events( desc, key, validator, include_removes, include_backrefs): @@ -270,15 +277,14 @@ first() class ORMAdapter(sql_util.ColumnAdapter): - """Extends ColumnAdapter to accept ORM entities. - - The selectable is extracted from the given entity, - and the AliasedClass if any is referenced. + """ColumnAdapter subclass which excludes adaptation of entities from + non-matching mappers. """ def __init__(self, entity, equivalents=None, adapt_required=False, - chain_to=None): + chain_to=None, allow_label_resolve=True, + anonymize_labels=False): info = inspection.inspect(entity) self.mapper = info.mapper @@ -288,16 +294,18 @@ class ORMAdapter(sql_util.ColumnAdapter): self.aliased_class = entity else: self.aliased_class = None - sql_util.ColumnAdapter.__init__(self, selectable, - equivalents, chain_to, - adapt_required=adapt_required) - def replace(self, elem): + sql_util.ColumnAdapter.__init__( + self, selectable, equivalents, chain_to, + adapt_required=adapt_required, + allow_label_resolve=allow_label_resolve, + anonymize_labels=anonymize_labels, + include_fn=self._include_fn + ) + + def _include_fn(self, elem): entity = elem._annotations.get('parentmapper', None) - if not entity or entity.isa(self.mapper): - return sql_util.ColumnAdapter.replace(self, elem) - else: - return None + return not entity or entity.isa(self.mapper) class AliasedClass(object): @@ -354,6 +362,7 @@ class AliasedClass(object): if alias is None: alias = mapper._with_polymorphic_selectable.alias( name=name, flat=flat) + self._aliased_insp = AliasedInsp( self, mapper, @@ -412,7 +421,7 @@ class AliasedClass(object): id(self), self._aliased_insp._target.__name__) -class AliasedInsp(_InspectionAttr): +class AliasedInsp(InspectionAttr): """Provide an inspection interface for an :class:`.AliasedClass` object. @@ -460,9 +469,9 @@ class AliasedInsp(_InspectionAttr): self._base_alias = _base_alias or self self._use_mapper_path = _use_mapper_path - self._adapter = sql_util.ClauseAdapter( + self._adapter = sql_util.ColumnAdapter( selectable, equivalents=mapper._equivalent_columns, - adapt_on_names=adapt_on_names) + adapt_on_names=adapt_on_names, anonymize_labels=True) self._adapt_on_names = adapt_on_names self._target = mapper.class_ @@ -521,14 +530,18 @@ class AliasedInsp(_InspectionAttr): def _adapt_element(self, elem): return self._adapter.traverse(elem).\ _annotate({ - 'parententity': self.entity, + 'parententity': self, 'parentmapper': self.mapper} ) def _entity_for_mapper(self, mapper): self_poly = self.with_polymorphic_mappers if mapper in self_poly: - return getattr(self.entity, mapper.class_.__name__)._aliased_insp + if mapper is self.mapper: + return self + else: + return getattr( + self.entity, mapper.class_.__name__)._aliased_insp elif mapper.isa(self.mapper): return self else: @@ -536,8 +549,13 @@ class AliasedInsp(_InspectionAttr): mapper, self) def __repr__(self): - return '' % ( - id(self), self.class_.__name__) + if self.with_polymorphic_mappers: + with_poly = "(%s)" % ", ".join( + mp.class_.__name__ for mp in self.with_polymorphic_mappers) + else: + with_poly = "" + return '' % ( + id(self), self.class_.__name__, with_poly) inspection._inspects(AliasedClass)(lambda target: target._aliased_insp) @@ -641,7 +659,8 @@ def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False): def with_polymorphic(base, classes, selectable=False, flat=False, polymorphic_on=None, aliased=False, - innerjoin=False, _use_mapper_path=False): + innerjoin=False, _use_mapper_path=False, + _existing_alias=None): """Produce an :class:`.AliasedClass` construct which specifies columns for descendant mappers of the given base. @@ -706,6 +725,16 @@ def with_polymorphic(base, classes, selectable=False, only be specified if querying for one specific subtype only """ primary_mapper = _class_to_mapper(base) + if _existing_alias: + assert _existing_alias.mapper is primary_mapper + classes = util.to_set(classes) + new_classes = set([ + mp.class_ for mp in + _existing_alias.with_polymorphic_mappers]) + if classes == new_classes: + return _existing_alias + else: + classes = classes.union(new_classes) mappers, selectable = primary_mapper.\ _with_polymorphic_args(classes, selectable, innerjoin=innerjoin) @@ -751,7 +780,10 @@ class _ORMJoin(expression.Join): __visit_name__ = expression.Join.__visit_name__ - def __init__(self, left, right, onclause=None, isouter=False): + def __init__( + self, + left, right, onclause=None, isouter=False, + _left_memo=None, _right_memo=None): left_info = inspection.inspect(left) left_orm_info = getattr(left, '_joined_from_info', left_info) @@ -761,6 +793,9 @@ class _ORMJoin(expression.Join): self._joined_from_info = right_info + self._left_memo = _left_memo + self._right_memo = _right_memo + if isinstance(onclause, util.string_types): onclause = getattr(left_orm_info.entity, onclause) @@ -802,6 +837,43 @@ class _ORMJoin(expression.Join): expression.Join.__init__(self, left, right, onclause, isouter) + if not prop and getattr(right_info, 'mapper', None) \ + and right_info.mapper.single: + # if single inheritance target and we are using a manual + # or implicit ON clause, augment it the same way we'd augment the + # WHERE. + single_crit = right_info.mapper._single_table_criterion + if single_crit is not None: + if right_info.is_aliased_class: + single_crit = right_info._adapter.traverse(single_crit) + self.onclause = self.onclause & single_crit + + def _splice_into_center(self, other): + """Splice a join into the center. + + Given join(a, b) and join(b, c), return join(a, b).join(c) + + """ + leftmost = other + while isinstance(leftmost, sql.Join): + leftmost = leftmost.left + + assert self.right is leftmost + + left = _ORMJoin( + self.left, other.left, + self.onclause, isouter=self.isouter, + _left_memo=self._left_memo, + _right_memo=other._left_memo + ) + + return _ORMJoin( + left, + other.right, + other.onclause, isouter=other.isouter, + _right_memo=other._right_memo + ) + def join(self, right, onclause=None, isouter=False, join_to_left=None): return _ORMJoin(self, right, onclause, isouter) @@ -894,9 +966,7 @@ def with_parent(instance, prop): elif isinstance(prop, attributes.QueryableAttribute): prop = prop.property - return prop.compare(operators.eq, - instance, - value_is_parent=True) + return prop._with_parent(instance) def has_identity(object): diff --git a/lib/python3.5/site-packages/sqlalchemy/pool.py b/lib/python3.5/site-packages/sqlalchemy/pool.py index d26bbf3..32b4736 100644 --- a/lib/python3.5/site-packages/sqlalchemy/pool.py +++ b/lib/python3.5/site-packages/sqlalchemy/pool.py @@ -1,5 +1,5 @@ # sqlalchemy/pool.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -186,6 +186,10 @@ class Pool(log.Identified): database that supports transactions, as it will lead to deadlocks and stale state. + * ``"none"`` - same as ``None`` + + .. versionadded:: 0.9.10 + * ``False`` - same as None, this is here for backwards compatibility. @@ -220,7 +224,7 @@ class Pool(log.Identified): self._use_threadlocal = use_threadlocal if reset_on_return in ('rollback', True, reset_rollback): self._reset_on_return = reset_rollback - elif reset_on_return in (None, False, reset_none): + elif reset_on_return in ('none', None, False, reset_none): self._reset_on_return = reset_none elif reset_on_return in ('commit', reset_commit): self._reset_on_return = reset_commit @@ -230,6 +234,7 @@ class Pool(log.Identified): % reset_on_return) self.echo = echo + if _dispatch: self.dispatch._update(_dispatch, only_propagate=False) if _dialect: @@ -244,13 +249,46 @@ class Pool(log.Identified): for l in listeners: self.add_listener(l) + @property + def _creator(self): + return self.__dict__['_creator'] + + @_creator.setter + def _creator(self, creator): + self.__dict__['_creator'] = creator + self._invoke_creator = self._should_wrap_creator(creator) + + def _should_wrap_creator(self, creator): + """Detect if creator accepts a single argument, or is sent + as a legacy style no-arg function. + + """ + + try: + argspec = util.get_callable_argspec(self._creator, no_self=True) + except TypeError: + return lambda crec: creator() + + defaulted = argspec[3] is not None and len(argspec[3]) or 0 + positionals = len(argspec[0]) - defaulted + + # look for the exact arg signature that DefaultStrategy + # sends us + if (argspec[0], argspec[3]) == (['connection_record'], (None,)): + return creator + # or just a single positional + elif positionals == 1: + return creator + # all other cases, just wrap and assume legacy "creator" callable + # thing + else: + return lambda crec: creator() + def _close_connection(self, connection): self.logger.debug("Closing connection %r", connection) try: self._dialect.do_close(connection) - except (SystemExit, KeyboardInterrupt): - raise - except: + except Exception: self.logger.error("Exception closing connection %r", connection, exc_info=True) @@ -305,7 +343,7 @@ class Pool(log.Identified): """Return a new :class:`.Pool`, of the same class as this one and configured with identical creation arguments. - This method is used in conjunection with :meth:`dispose` + This method is used in conjunction with :meth:`dispose` to close out an entire :class:`.Pool` and create a new one in its place. @@ -425,6 +463,8 @@ class _ConnectionRecord(object): """ + _soft_invalidate_time = 0 + @util.memoized_property def info(self): """The ``.info`` dictionary associated with the DBAPI connection. @@ -441,18 +481,19 @@ class _ConnectionRecord(object): try: dbapi_connection = rec.get_connection() except: - rec.checkin() - raise - fairy = _ConnectionFairy(dbapi_connection, rec) + with util.safe_reraise(): + rec.checkin() + echo = pool._should_log_debug() + fairy = _ConnectionFairy(dbapi_connection, rec, echo) rec.fairy_ref = weakref.ref( fairy, lambda ref: _finalize_fairy and _finalize_fairy( dbapi_connection, - rec, pool, ref, pool._echo) + rec, pool, ref, echo) ) _refs.add(rec) - if pool._echo: + if echo: pool.logger.debug("Connection %r checked out from pool", dbapi_connection) return fairy @@ -472,7 +513,7 @@ class _ConnectionRecord(object): if self.connection is not None: self.__close() - def invalidate(self, e=None): + def invalidate(self, e=None, soft=False): """Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`. This method is called for all connection invalidations, including @@ -480,6 +521,13 @@ class _ConnectionRecord(object): :meth:`.Connection.invalidate` methods are called, as well as when any so-called "automatic invalidation" condition occurs. + :param e: an exception object indicating a reason for the invalidation. + + :param soft: if True, the connection isn't closed; instead, this + connection will be recycled on next checkout. + + .. versionadded:: 1.0.3 + .. seealso:: :ref:`pool_connection_invalidation` @@ -488,22 +536,31 @@ class _ConnectionRecord(object): # already invalidated if self.connection is None: return - self.__pool.dispatch.invalidate(self.connection, self, e) + if soft: + self.__pool.dispatch.soft_invalidate(self.connection, self, e) + else: + self.__pool.dispatch.invalidate(self.connection, self, e) if e is not None: self.__pool.logger.info( - "Invalidate connection %r (reason: %s:%s)", + "%sInvalidate connection %r (reason: %s:%s)", + "Soft " if soft else "", self.connection, e.__class__.__name__, e) else: self.__pool.logger.info( - "Invalidate connection %r", self.connection) - self.__close() - self.connection = None + "%sInvalidate connection %r", + "Soft " if soft else "", + self.connection) + if soft: + self._soft_invalidate_time = time.time() + else: + self.__close() + self.connection = None def get_connection(self): recycle = False if self.connection is None: - self.connection = self.__connect() self.info.clear() + self.connection = self.__connect() if self.__pool.dispatch.connect: self.__pool.dispatch.connect(self.connection, self) elif self.__pool._recycle > -1 and \ @@ -519,22 +576,35 @@ class _ConnectionRecord(object): self.connection ) recycle = True + elif self._soft_invalidate_time > self.starttime: + self.__pool.logger.info( + "Connection %r invalidated due to local soft invalidation; " + + "recycling", + self.connection + ) + recycle = True if recycle: self.__close() - self.connection = self.__connect() self.info.clear() + + # ensure that if self.__connect() fails, + # we are not referring to the previous stale connection here + self.connection = None + self.connection = self.__connect() + if self.__pool.dispatch.connect: self.__pool.dispatch.connect(self.connection, self) return self.connection def __close(self): + self.finalize_callback.clear() self.__pool._close_connection(self.connection) def __connect(self): try: self.starttime = time.time() - connection = self.__pool._creator() + connection = self.__pool._invoke_creator(self) self.__pool.logger.debug("Created new connection %r", connection) return connection except Exception as e: @@ -560,19 +630,20 @@ def _finalize_fairy(connection, connection_record, connection) try: - fairy = fairy or _ConnectionFairy(connection, connection_record) + fairy = fairy or _ConnectionFairy( + connection, connection_record, echo) assert fairy.connection is connection - fairy._reset(pool, echo) + fairy._reset(pool) # Immediately close detached instances if not connection_record: pool._close_connection(connection) - except Exception as e: + except BaseException as e: pool.logger.error( "Exception during reset or similar", exc_info=True) if connection_record: connection_record.invalidate(e=e) - if isinstance(e, (SystemExit, KeyboardInterrupt)): + if not isinstance(e, Exception): raise if connection_record: @@ -603,9 +674,10 @@ class _ConnectionFairy(object): """ - def __init__(self, dbapi_connection, connection_record): + def __init__(self, dbapi_connection, connection_record, echo): self.connection = dbapi_connection self._connection_record = connection_record + self._echo = echo connection = None """A reference to the actual DBAPI connection being tracked.""" @@ -642,7 +714,6 @@ class _ConnectionFairy(object): fairy._pool = pool fairy._counter = 0 - fairy._echo = pool._should_log_debug() if threadconns is not None: threadconns.current = weakref.ref(fairy) @@ -666,7 +737,13 @@ class _ConnectionFairy(object): pool.logger.info( "Disconnection detected on checkout: %s", e) fairy._connection_record.invalidate(e) - fairy.connection = fairy._connection_record.get_connection() + try: + fairy.connection = \ + fairy._connection_record.get_connection() + except: + with util.safe_reraise(): + fairy._connection_record.checkin() + attempts -= 1 pool.logger.info("Reconnection attempts exhausted on checkout") @@ -684,11 +761,11 @@ class _ConnectionFairy(object): _close = _checkin - def _reset(self, pool, echo): + def _reset(self, pool): if pool.dispatch.reset: pool.dispatch.reset(self, self._connection_record) if pool._reset_on_return is reset_rollback: - if echo: + if self._echo: pool.logger.debug("Connection %s rollback-on-return%s", self.connection, ", via agent" @@ -698,7 +775,7 @@ class _ConnectionFairy(object): else: pool._dialect.do_rollback(self) elif pool._reset_on_return is reset_commit: - if echo: + if self._echo: pool.logger.debug("Connection %s commit-on-return%s", self.connection, ", via agent" @@ -734,7 +811,7 @@ class _ConnectionFairy(object): """ return self._connection_record.info - def invalidate(self, e=None): + def invalidate(self, e=None, soft=False): """Mark this connection as invalidated. This method can be called directly, and is also called as a result @@ -743,6 +820,13 @@ class _ConnectionFairy(object): further use by the pool. The invalidation mechanism proceeds via the :meth:`._ConnectionRecord.invalidate` internal method. + :param e: an exception object indicating a reason for the invalidation. + + :param soft: if True, the connection isn't closed; instead, this + connection will be recycled on next checkout. + + .. versionadded:: 1.0.3 + .. seealso:: :ref:`pool_connection_invalidation` @@ -753,9 +837,10 @@ class _ConnectionFairy(object): util.warn("Can't invalidate an already-closed connection.") return if self._connection_record: - self._connection_record.invalidate(e=e) - self.connection = None - self._checkin() + self._connection_record.invalidate(e=e, soft=soft) + if not soft: + self.connection = None + self._checkin() def cursor(self, *args, **kwargs): """Return a new DBAPI cursor for the underlying connection. @@ -804,6 +889,19 @@ class SingletonThreadPool(Pool): Maintains one connection per each thread, never moving a connection to a thread other than the one which it was created in. + .. warning:: the :class:`.SingletonThreadPool` will call ``.close()`` + on arbitrary connections that exist beyond the size setting of + ``pool_size``, e.g. if more unique **thread identities** + than what ``pool_size`` states are used. This cleanup is + non-deterministic and not sensitive to whether or not the connections + linked to those thread identities are currently in use. + + :class:`.SingletonThreadPool` may be improved in a future release, + however in its current status it is generally used only for test + scenarios using a SQLite ``:memory:`` database and is not recommended + for production use. + + Options are the same as those of :class:`.Pool`, as well as: :param pool_size: The number of threads in which to maintain connections @@ -840,9 +938,7 @@ class SingletonThreadPool(Pool): for conn in self._all_conns: try: conn.close() - except (SystemExit, KeyboardInterrupt): - raise - except: + except Exception: # pysqlite won't even let you close a conn from a thread # that didn't create it pass @@ -919,9 +1015,9 @@ class QueuePool(Pool): on returning a connection. Defaults to 30. :param \**kw: Other keyword arguments including - :paramref:`.Pool.recycle`, :paramref:`.Pool.echo`, - :paramref:`.Pool.reset_on_return` and others are passed to the - :class:`.Pool` constructor. + :paramref:`.Pool.recycle`, :paramref:`.Pool.echo`, + :paramref:`.Pool.reset_on_return` and others are passed to the + :class:`.Pool` constructor. """ Pool.__init__(self, creator, **kw) @@ -960,8 +1056,8 @@ class QueuePool(Pool): try: return self._create_connection() except: - self._dec_overflow() - raise + with util.safe_reraise(): + self._dec_overflow() else: return self._do_get() diff --git a/lib/python3.5/site-packages/sqlalchemy/processors.py b/lib/python3.5/site-packages/sqlalchemy/processors.py index 3794b01..b57e674 100644 --- a/lib/python3.5/site-packages/sqlalchemy/processors.py +++ b/lib/python3.5/site-packages/sqlalchemy/processors.py @@ -1,5 +1,5 @@ # sqlalchemy/processors.py -# Copyright (C) 2010-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2016 the SQLAlchemy authors and contributors # # Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com # diff --git a/lib/python3.5/site-packages/sqlalchemy/schema.py b/lib/python3.5/site-packages/sqlalchemy/schema.py index 4b6ad19..5b703f7 100644 --- a/lib/python3.5/site-packages/sqlalchemy/schema.py +++ b/lib/python3.5/site-packages/sqlalchemy/schema.py @@ -1,5 +1,5 @@ # schema.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -35,6 +35,7 @@ from .sql.schema import ( UniqueConstraint, _get_table_key, ColumnCollectionConstraint, + ColumnCollectionMixin ) @@ -58,5 +59,7 @@ from .sql.ddl import ( DDLBase, DDLElement, _CreateDropBase, - _DDLCompiles + _DDLCompiles, + sort_tables, + sort_tables_and_constraints ) diff --git a/lib/python3.5/site-packages/sqlalchemy/sql/__init__.py b/lib/python3.5/site-packages/sqlalchemy/sql/__init__.py index 4d01385..eb305a8 100644 --- a/lib/python3.5/site-packages/sqlalchemy/sql/__init__.py +++ b/lib/python3.5/site-packages/sqlalchemy/sql/__init__.py @@ -1,5 +1,5 @@ # sql/__init__.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -38,6 +38,7 @@ from .expression import ( false, False_, func, + funcfilter, insert, intersect, intersect_all, diff --git a/lib/python3.5/site-packages/sqlalchemy/sql/annotation.py b/lib/python3.5/site-packages/sqlalchemy/sql/annotation.py index 02f5c3c..6ad25ab 100644 --- a/lib/python3.5/site-packages/sqlalchemy/sql/annotation.py +++ b/lib/python3.5/site-packages/sqlalchemy/sql/annotation.py @@ -1,5 +1,5 @@ # sql/annotation.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -46,6 +46,7 @@ class Annotated(object): self.__dict__ = element.__dict__.copy() self.__element = element self._annotations = values + self._hash = hash(element) def _annotate(self, values): _values = self._annotations.copy() @@ -87,7 +88,7 @@ class Annotated(object): return self.__class__(clone, self._annotations) def __hash__(self): - return hash(self.__element) + return self._hash def __eq__(self, other): if isinstance(self.__element, operators.ColumnOperators): diff --git a/lib/python3.5/site-packages/sqlalchemy/sql/base.py b/lib/python3.5/site-packages/sqlalchemy/sql/base.py index 5358d95..cf7dcfd 100644 --- a/lib/python3.5/site-packages/sqlalchemy/sql/base.py +++ b/lib/python3.5/site-packages/sqlalchemy/sql/base.py @@ -1,5 +1,5 @@ # sql/base.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -449,10 +449,13 @@ class ColumnCollection(util.OrderedProperties): """ - def __init__(self): + __slots__ = '_all_columns' + + def __init__(self, *columns): super(ColumnCollection, self).__init__() - self.__dict__['_all_col_set'] = util.column_set() - self.__dict__['_all_columns'] = [] + object.__setattr__(self, '_all_columns', []) + for c in columns: + self.add(c) def __str__(self): return repr([str(c) for c in self]) @@ -478,14 +481,11 @@ class ColumnCollection(util.OrderedProperties): other = self[column.name] if other.name == other.key: remove_col = other - self._all_col_set.remove(other) del self._data[other.key] if column.key in self._data: remove_col = self._data[column.key] - self._all_col_set.remove(remove_col) - self._all_col_set.add(column) self._data[column.key] = column if remove_col is not None: self._all_columns[:] = [column if c is remove_col @@ -530,7 +530,6 @@ class ColumnCollection(util.OrderedProperties): # in a _make_proxy operation util.memoized_property.reset(value, "proxy_set") - self._all_col_set.add(value) self._all_columns.append(value) self._data[key] = value @@ -539,22 +538,20 @@ class ColumnCollection(util.OrderedProperties): def remove(self, column): del self._data[column.key] - self._all_col_set.remove(column) self._all_columns[:] = [ c for c in self._all_columns if c is not column] def update(self, iter): cols = list(iter) + all_col_set = set(self._all_columns) self._all_columns.extend( - c for label, c in cols if c not in self._all_col_set) - self._all_col_set.update(c for label, c in cols) + c for label, c in cols if c not in all_col_set) self._data.update((label, c) for label, c in cols) def extend(self, iter): cols = list(iter) - self._all_columns.extend(c for c in cols if c not in - self._all_col_set) - self._all_col_set.update(cols) + all_col_set = set(self._all_columns) + self._all_columns.extend(c for c in cols if c not in all_col_set) self._data.update((c.key, c) for c in cols) __hash__ = None @@ -574,28 +571,24 @@ class ColumnCollection(util.OrderedProperties): return util.OrderedProperties.__contains__(self, other) def __getstate__(self): - return {'_data': self.__dict__['_data'], - '_all_columns': self.__dict__['_all_columns']} + return {'_data': self._data, + '_all_columns': self._all_columns} def __setstate__(self, state): - self.__dict__['_data'] = state['_data'] - self.__dict__['_all_columns'] = state['_all_columns'] - self.__dict__['_all_col_set'] = util.column_set(state['_all_columns']) + object.__setattr__(self, '_data', state['_data']) + object.__setattr__(self, '_all_columns', state['_all_columns']) def contains_column(self, col): - # this has to be done via set() membership - return col in self._all_col_set + return col in set(self._all_columns) def as_immutable(self): - return ImmutableColumnCollection( - self._data, self._all_col_set, self._all_columns) + return ImmutableColumnCollection(self._data, self._all_columns) class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection): - def __init__(self, data, colset, all_columns): + def __init__(self, data, all_columns): util.ImmutableProperties.__init__(self, data) - self.__dict__['_all_col_set'] = colset - self.__dict__['_all_columns'] = all_columns + object.__setattr__(self, '_all_columns', all_columns) extend = remove = util.ImmutableProperties._immutable diff --git a/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py b/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py index ca03b62..722beb1 100644 --- a/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py +++ b/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py @@ -1,5 +1,5 @@ # sql/compiler.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -23,13 +23,12 @@ To generate user-defined SQL strings, see """ +import contextlib import re -from . import schema, sqltypes, operators, functions, \ - util as sql_util, visitors, elements, selectable, base +from . import schema, sqltypes, operators, functions, visitors, \ + elements, selectable, crud from .. import util, exc -import decimal import itertools -import operator RESERVED_WORDS = set([ 'all', 'analyse', 'analyze', 'and', 'any', 'array', @@ -54,7 +53,7 @@ LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I) ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$']) BIND_PARAMS = re.compile(r'(? self.label_length: + if len(anonname) > self.label_length - 6: counter = self.truncated_names.get(ident_class, 1) truncname = anonname[0:max(self.label_length - 6, 0)] + \ "_" + hex(counter)[2:] @@ -1118,7 +1198,7 @@ class SQLCompiler(Compiled): if cte._cte_alias is not None: orig_cte = cte._cte_alias if orig_cte not in self.ctes: - self.visit_cte(orig_cte) + self.visit_cte(orig_cte, **kwargs) cte_alias_name = cte._cte_alias.name if isinstance(cte_alias_name, elements._truncated_label): cte_alias_name = self._truncated_identifier( @@ -1153,12 +1233,16 @@ class SQLCompiler(Compiled): self, asfrom=True, **kwargs ) + if cte._suffixes: + text += " " + self._generate_prefixes( + cte, cte._suffixes, **kwargs) + self.ctes[cte] = text if asfrom: if cte_alias_name: text = self.preparer.format_alias(cte, cte_alias_name) - text += " AS " + cte_name + text += self.get_render_as_alias_suffix(cte_name) else: return self.preparer.format_alias(cte, cte_name) return text @@ -1177,8 +1261,8 @@ class SQLCompiler(Compiled): elif asfrom: ret = alias.original._compiler_dispatch(self, asfrom=True, **kwargs) + \ - " AS " + \ - self.preparer.format_alias(alias, alias_name) + self.get_render_as_alias_suffix( + self.preparer.format_alias(alias, alias_name)) if fromhints and alias in fromhints: ret = self.format_from_hint_text(ret, alias, @@ -1188,19 +1272,11 @@ class SQLCompiler(Compiled): else: return alias.original._compiler_dispatch(self, **kwargs) - def _add_to_result_map(self, keyname, name, objects, type_): - if not self.dialect.case_sensitive: - keyname = keyname.lower() + def get_render_as_alias_suffix(self, alias_name_text): + return " AS " + alias_name_text - if keyname in self.result_map: - # conflicting keyname, just double up the list - # of objects. this will cause an "ambiguous name" - # error if an attempt is made by the result set to - # access. - e_name, e_obj, e_type = self.result_map[keyname] - self.result_map[keyname] = e_name, e_obj + objects, e_type - else: - self.result_map[keyname] = name, objects, type_ + def _add_to_result_map(self, keyname, name, objects, type_): + self._result_columns.append((keyname, name, objects, type_)) def _label_select_column(self, select, column, populate_result_map, @@ -1251,10 +1327,17 @@ class SQLCompiler(Compiled): result_expr = _CompileLabel(col_expr, elements._as_truncated(column.name), alt_names=(column.key,)) - elif not isinstance(column, - (elements.UnaryExpression, elements.TextClause)) \ - and (not hasattr(column, 'name') or - isinstance(column, functions.Function)): + elif ( + not isinstance(column, elements.TextClause) and + ( + not isinstance(column, elements.UnaryExpression) or + column.wraps_column_expression + ) and + ( + not hasattr(column, 'name') or + isinstance(column, functions.Function) + ) + ): result_expr = _CompileLabel(col_expr, column.anon_label) elif col_expr is not column: # TODO: are we sure "column" has a .name and .key here ? @@ -1289,6 +1372,9 @@ class SQLCompiler(Compiled): def get_crud_hint_text(self, table, text): return None + def get_statement_hint_text(self, hint_texts): + return " ".join(hint_texts) + def _transform_select_for_nested_joins(self, select): """Rewrite any "a JOIN (b JOIN c)" expression as "a JOIN (select * from b JOIN c) AS anon", to support @@ -1387,12 +1473,13 @@ class SQLCompiler(Compiled): (inner_col[c._key_label], c) for c in select.inner_columns ) - for key, (name, objs, typ) in list(self.result_map.items()): - objs = tuple([d.get(col, col) for col in objs]) - self.result_map[key] = (name, objs, typ) + + self._result_columns = [ + (key, name, tuple([d.get(col, col) for col in objs]), typ) + for key, name, objs, typ in self._result_columns + ] _default_stack_entry = util.immutabledict([ - ('iswrapper', False), ('correlate_froms', frozenset()), ('asfrom_froms', frozenset()) ]) @@ -1420,10 +1507,10 @@ class SQLCompiler(Compiled): return froms def visit_select(self, select, asfrom=False, parens=True, - iswrapper=False, fromhints=None, + fromhints=None, compound_index=0, - force_result_map=False, nested_join_translation=False, + select_wraps_for=None, **kwargs): needs_nested_translation = \ @@ -1437,21 +1524,25 @@ class SQLCompiler(Compiled): select) text = self.visit_select( transformed_select, asfrom=asfrom, parens=parens, - iswrapper=iswrapper, fromhints=fromhints, + fromhints=fromhints, compound_index=compound_index, - force_result_map=force_result_map, nested_join_translation=True, **kwargs ) toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] - populate_result_map = force_result_map or ( - compound_index == 0 and ( - toplevel or - entry['iswrapper'] - ) - ) + populate_result_map = toplevel or \ + ( + compound_index == 0 and entry.get( + 'need_result_map_for_compound', False) + ) or entry.get('need_result_map_for_nested', False) + + # this was first proposed as part of #3372; however, it is not + # reached in current tests and could possibly be an assertion + # instead. + if not populate_result_map and 'add_to_result_map' in kwargs: + del kwargs['add_to_result_map'] if needs_nested_translation: if populate_result_map: @@ -1459,6 +1550,114 @@ class SQLCompiler(Compiled): select, transformed_select) return text + froms = self._setup_select_stack(select, entry, asfrom) + + column_clause_args = kwargs.copy() + column_clause_args.update({ + 'within_label_clause': False, + 'within_columns_clause': False + }) + + text = "SELECT " # we're off to a good start ! + + if select._hints: + hint_text, byfrom = self._setup_select_hints(select) + if hint_text: + text += hint_text + " " + else: + byfrom = None + + if select._prefixes: + text += self._generate_prefixes( + select, select._prefixes, **kwargs) + + text += self.get_select_precolumns(select, **kwargs) + + # the actual list of columns to print in the SELECT column list. + inner_columns = [ + c for c in [ + self._label_select_column( + select, + column, + populate_result_map, asfrom, + column_clause_args, + name=name) + for name, column in select._columns_plus_names + ] + if c is not None + ] + + if populate_result_map and select_wraps_for is not None: + # if this select is a compiler-generated wrapper, + # rewrite the targeted columns in the result map + wrapped_inner_columns = set(select_wraps_for.inner_columns) + translate = dict( + (outer, inner.pop()) for outer, inner in [ + ( + outer, + outer.proxy_set.intersection(wrapped_inner_columns)) + for outer in select.inner_columns + ] if inner + ) + self._result_columns = [ + (key, name, tuple(translate.get(o, o) for o in obj), type_) + for key, name, obj, type_ in self._result_columns + ] + + text = self._compose_select_body( + text, select, inner_columns, froms, byfrom, kwargs) + + if select._statement_hints: + per_dialect = [ + ht for (dialect_name, ht) + in select._statement_hints + if dialect_name in ('*', self.dialect.name) + ] + if per_dialect: + text += " " + self.get_statement_hint_text(per_dialect) + + if self.ctes and self._is_toplevel_select(select): + text = self._render_cte_clause() + text + + if select._suffixes: + text += " " + self._generate_prefixes( + select, select._suffixes, **kwargs) + + self.stack.pop(-1) + + if asfrom and parens: + return "(" + text + ")" + else: + return text + + def _is_toplevel_select(self, select): + """Return True if the stack is placed at the given select, and + is also the outermost SELECT, meaning there is either no stack + before this one, or the enclosing stack is a topmost INSERT. + + """ + return ( + self.stack[-1]['selectable'] is select and + ( + len(self.stack) == 1 or self.isinsert and len(self.stack) == 2 + and self.statement is self.stack[0]['selectable'] + ) + ) + + def _setup_select_hints(self, select): + byfrom = dict([ + (from_, hinttext % { + 'name': from_._compiler_dispatch( + self, ashint=True) + }) + for (from_, dialect), hinttext in + select._hints.items() + if dialect in ('*', self.dialect.name) + ]) + hint_text = self.get_select_hint_text(byfrom) + return hint_text, byfrom + + def _setup_select_stack(self, select, entry, asfrom): correlate_froms = entry['correlate_froms'] asfrom_froms = entry['asfrom_froms'] @@ -1477,52 +1676,14 @@ class SQLCompiler(Compiled): new_entry = { 'asfrom_froms': new_correlate_froms, - 'iswrapper': iswrapper, - 'correlate_froms': all_correlate_froms + 'correlate_froms': all_correlate_froms, + 'selectable': select, } self.stack.append(new_entry) + return froms - column_clause_args = kwargs.copy() - column_clause_args.update({ - 'within_label_clause': False, - 'within_columns_clause': False - }) - - text = "SELECT " # we're off to a good start ! - - if select._hints: - byfrom = dict([ - (from_, hinttext % { - 'name': from_._compiler_dispatch( - self, ashint=True) - }) - for (from_, dialect), hinttext in - select._hints.items() - if dialect in ('*', self.dialect.name) - ]) - hint_text = self.get_select_hint_text(byfrom) - if hint_text: - text += hint_text + " " - - if select._prefixes: - text += self._generate_prefixes( - select, select._prefixes, **kwargs) - - text += self.get_select_precolumns(select) - - # the actual list of columns to print in the SELECT column list. - inner_columns = [ - c for c in [ - self._label_select_column(select, - column, - populate_result_map, asfrom, - column_clause_args, - name=name) - for name, column in select._columns_plus_names - ] - if c is not None - ] - + def _compose_select_body( + self, text, select, inner_columns, froms, byfrom, kwargs): text += ', '.join(inner_columns) if froms: @@ -1557,30 +1718,16 @@ class SQLCompiler(Compiled): text += " \nHAVING " + t if select._order_by_clause.clauses: - if self.dialect.supports_simple_order_by_label: - order_by_select = select - else: - order_by_select = None + text += self.order_by_clause(select, **kwargs) - text += self.order_by_clause( - select, order_by_select=order_by_select, **kwargs) - - if select._limit is not None or select._offset is not None: - text += self.limit_clause(select) + if (select._limit_clause is not None or + select._offset_clause is not None): + text += self.limit_clause(select, **kwargs) if select._for_update_arg is not None: - text += self.for_update_clause(select) + text += self.for_update_clause(select, **kwargs) - if self.ctes and \ - compound_index == 0 and toplevel: - text = self._render_cte_clause() + text - - self.stack.pop(-1) - - if asfrom and parens: - return "(" + text + ")" - else: - return text + return text def _generate_prefixes(self, stmt, prefixes, **kw): clause = " ".join( @@ -1612,7 +1759,7 @@ class SQLCompiler(Compiled): else: return "WITH" - def get_select_precolumns(self, select): + def get_select_precolumns(self, select, **kw): """Called when building a ``SELECT`` statement, position is just before column list. @@ -1626,7 +1773,7 @@ class SQLCompiler(Compiled): else: return "" - def for_update_clause(self, select): + def for_update_clause(self, select, **kw): return " FOR UPDATE" def returning_clause(self, stmt, returning_cols): @@ -1634,20 +1781,20 @@ class SQLCompiler(Compiled): "RETURNING is not supported by this " "dialect's statement compiler.") - def limit_clause(self, select): + def limit_clause(self, select, **kw): text = "" - if select._limit is not None: - text += "\n LIMIT " + self.process(elements.literal(select._limit)) - if select._offset is not None: - if select._limit is None: + if select._limit_clause is not None: + text += "\n LIMIT " + self.process(select._limit_clause, **kw) + if select._offset_clause is not None: + if select._limit_clause is None: text += "\n LIMIT -1" - text += " OFFSET " + self.process(elements.literal(select._offset)) + text += " OFFSET " + self.process(select._offset_clause, **kw) return text def visit_table(self, table, asfrom=False, iscrud=False, ashint=False, - fromhints=None, **kwargs): + fromhints=None, use_schema=True, **kwargs): if asfrom or ashint: - if getattr(table, "schema", None): + if use_schema and getattr(table, "schema", None): ret = self.preparer.quote_schema(table.schema) + \ "." + self.preparer.quote(table.name) else: @@ -1669,10 +1816,15 @@ class SQLCompiler(Compiled): ) def visit_insert(self, insert_stmt, **kw): - self.isinsert = True - colparams = self._get_colparams(insert_stmt, **kw) + self.stack.append( + {'correlate_froms': set(), + "asfrom_froms": set(), + "selectable": insert_stmt}) - if not colparams and \ + self.isinsert = True + crud_params = crud._get_crud_params(self, insert_stmt, **kw) + + if not crud_params and \ not self.dialect.supports_default_values and \ not self.dialect.supports_empty_insert: raise exc.CompileError("The '%s' dialect with current database " @@ -1687,9 +1839,9 @@ class SQLCompiler(Compiled): "version settings does not support " "in-place multirow inserts." % self.dialect.name) - colparams_single = colparams[0] + crud_params_single = crud_params[0] else: - colparams_single = colparams + crud_params_single = crud_params preparer = self.preparer supports_default_values = self.dialect.supports_default_values @@ -1720,9 +1872,9 @@ class SQLCompiler(Compiled): text += table_text - if colparams_single or not supports_default_values: + if crud_params_single or not supports_default_values: text += " (%s)" % ', '.join([preparer.format_column(c[0]) - for c in colparams_single]) + for c in crud_params_single]) if self.returning or insert_stmt._returning: self.returning = self.returning or insert_stmt._returning @@ -1733,25 +1885,27 @@ class SQLCompiler(Compiled): text += " " + returning_clause if insert_stmt.select is not None: - text += " %s" % self.process(insert_stmt.select, **kw) - elif not colparams and supports_default_values: + text += " %s" % self.process(self._insert_from_select, **kw) + elif not crud_params and supports_default_values: text += " DEFAULT VALUES" elif insert_stmt._has_multi_parameters: text += " VALUES %s" % ( ", ".join( "(%s)" % ( - ', '.join(c[1] for c in colparam_set) + ', '.join(c[1] for c in crud_param_set) ) - for colparam_set in colparams + for crud_param_set in crud_params ) ) else: text += " VALUES (%s)" % \ - ', '.join([c[1] for c in colparams]) + ', '.join([c[1] for c in crud_params]) if self.returning and not self.returning_precedes_values: text += " " + returning_clause + self.stack.pop(-1) + return text def update_limit_clause(self, update_stmt): @@ -1787,8 +1941,8 @@ class SQLCompiler(Compiled): def visit_update(self, update_stmt, **kw): self.stack.append( {'correlate_froms': set([update_stmt.table]), - "iswrapper": False, - "asfrom_froms": set([update_stmt.table])}) + "asfrom_froms": set([update_stmt.table]), + "selectable": update_stmt}) self.isupdate = True @@ -1803,7 +1957,7 @@ class SQLCompiler(Compiled): table_text = self.update_tables_clause(update_stmt, update_stmt.table, extra_froms, **kw) - colparams = self._get_colparams(update_stmt, **kw) + crud_params = crud._get_crud_params(self, update_stmt, **kw) if update_stmt._hints: dialect_hints = dict([ @@ -1830,7 +1984,7 @@ class SQLCompiler(Compiled): text += ', '.join( c[0]._compiler_dispatch(self, include_table=include_table) + - '=' + c[1] for c in colparams + '=' + c[1] for c in crud_params ) if self.returning or update_stmt._returning: @@ -1850,7 +2004,7 @@ class SQLCompiler(Compiled): text += " " + extra_from_text if update_stmt._whereclause is not None: - t = self.process(update_stmt._whereclause) + t = self.process(update_stmt._whereclause, **kw) if t: text += " WHERE " + t @@ -1866,384 +2020,14 @@ class SQLCompiler(Compiled): return text - def _create_crud_bind_param(self, col, value, required=False, name=None): - if name is None: - name = col.key - bindparam = elements.BindParameter(name, value, - type_=col.type, required=required) - bindparam._is_crud = True - return bindparam._compiler_dispatch(self) - @util.memoized_property def _key_getters_for_crud_column(self): - if self.isupdate and self.statement._extra_froms: - # when extra tables are present, refer to the columns - # in those extra tables as table-qualified, including in - # dictionaries and when rendering bind param names. - # the "main" table of the statement remains unqualified, - # allowing the most compatibility with a non-multi-table - # statement. - _et = set(self.statement._extra_froms) - - def _column_as_key(key): - str_key = elements._column_as_key(key) - if hasattr(key, 'table') and key.table in _et: - return (key.table.name, str_key) - else: - return str_key - - def _getattr_col_key(col): - if col.table in _et: - return (col.table.name, col.key) - else: - return col.key - - def _col_bind_name(col): - if col.table in _et: - return "%s_%s" % (col.table.name, col.key) - else: - return col.key - - else: - _column_as_key = elements._column_as_key - _getattr_col_key = _col_bind_name = operator.attrgetter("key") - - return _column_as_key, _getattr_col_key, _col_bind_name - - def _get_colparams(self, stmt, **kw): - """create a set of tuples representing column/string pairs for use - in an INSERT or UPDATE statement. - - Also generates the Compiled object's postfetch, prefetch, and - returning column collections, used for default handling and ultimately - populating the ResultProxy's prefetch_cols() and postfetch_cols() - collections. - - """ - - self.postfetch = [] - self.prefetch = [] - self.returning = [] - - # no parameters in the statement, no parameters in the - # compiled params - return binds for all columns - if self.column_keys is None and stmt.parameters is None: - return [ - (c, self._create_crud_bind_param(c, - None, required=True)) - for c in stmt.table.columns - ] - - if stmt._has_multi_parameters: - stmt_parameters = stmt.parameters[0] - else: - stmt_parameters = stmt.parameters - - # getters - these are normally just column.key, - # but in the case of mysql multi-table update, the rules for - # .key must conditionally take tablename into account - _column_as_key, _getattr_col_key, _col_bind_name = \ - self._key_getters_for_crud_column - - # if we have statement parameters - set defaults in the - # compiled params - if self.column_keys is None: - parameters = {} - else: - parameters = dict((_column_as_key(key), REQUIRED) - for key in self.column_keys - if not stmt_parameters or - key not in stmt_parameters) - - # create a list of column assignment clauses as tuples - values = [] - - if stmt_parameters is not None: - for k, v in stmt_parameters.items(): - colkey = _column_as_key(k) - if colkey is not None: - parameters.setdefault(colkey, v) - else: - # a non-Column expression on the left side; - # add it to values() in an "as-is" state, - # coercing right side to bound param - if elements._is_literal(v): - v = self.process( - elements.BindParameter(None, v, type_=k.type), - **kw) - else: - v = self.process(v.self_group(), **kw) - - values.append((k, v)) - - need_pks = self.isinsert and \ - not self.inline and \ - not stmt._returning - - implicit_returning = need_pks and \ - self.dialect.implicit_returning and \ - stmt.table.implicit_returning - - if self.isinsert: - implicit_return_defaults = (implicit_returning and - stmt._return_defaults) - elif self.isupdate: - implicit_return_defaults = (self.dialect.implicit_returning and - stmt.table.implicit_returning and - stmt._return_defaults) - else: - implicit_return_defaults = False - - if implicit_return_defaults: - if stmt._return_defaults is True: - implicit_return_defaults = set(stmt.table.c) - else: - implicit_return_defaults = set(stmt._return_defaults) - - postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid - - check_columns = {} - - # special logic that only occurs for multi-table UPDATE - # statements - if self.isupdate and stmt._extra_froms and stmt_parameters: - normalized_params = dict( - (elements._clause_element_as_expr(c), param) - for c, param in stmt_parameters.items() - ) - affected_tables = set() - for t in stmt._extra_froms: - for c in t.c: - if c in normalized_params: - affected_tables.add(t) - check_columns[_getattr_col_key(c)] = c - value = normalized_params[c] - if elements._is_literal(value): - value = self._create_crud_bind_param( - c, value, required=value is REQUIRED, - name=_col_bind_name(c)) - else: - self.postfetch.append(c) - value = self.process(value.self_group(), **kw) - values.append((c, value)) - # determine tables which are actually - # to be updated - process onupdate and - # server_onupdate for these - for t in affected_tables: - for c in t.c: - if c in normalized_params: - continue - elif (c.onupdate is not None and not - c.onupdate.is_sequence): - if c.onupdate.is_clause_element: - values.append( - (c, self.process( - c.onupdate.arg.self_group(), - **kw) - ) - ) - self.postfetch.append(c) - else: - values.append( - (c, self._create_crud_bind_param( - c, None, name=_col_bind_name(c) - ) - ) - ) - self.prefetch.append(c) - elif c.server_onupdate is not None: - self.postfetch.append(c) - - if self.isinsert and stmt.select_names: - # for an insert from select, we can only use names that - # are given, so only select for those names. - cols = (stmt.table.c[_column_as_key(name)] - for name in stmt.select_names) - else: - # iterate through all table columns to maintain - # ordering, even for those cols that aren't included - cols = stmt.table.columns - - for c in cols: - col_key = _getattr_col_key(c) - if col_key in parameters and col_key not in check_columns: - value = parameters.pop(col_key) - if elements._is_literal(value): - value = self._create_crud_bind_param( - c, value, required=value is REQUIRED, - name=_col_bind_name(c) - if not stmt._has_multi_parameters - else "%s_0" % _col_bind_name(c) - ) - else: - if isinstance(value, elements.BindParameter) and \ - value.type._isnull: - value = value._clone() - value.type = c.type - - if c.primary_key and implicit_returning: - self.returning.append(c) - value = self.process(value.self_group(), **kw) - elif implicit_return_defaults and \ - c in implicit_return_defaults: - self.returning.append(c) - value = self.process(value.self_group(), **kw) - else: - self.postfetch.append(c) - value = self.process(value.self_group(), **kw) - values.append((c, value)) - - elif self.isinsert: - if c.primary_key and \ - need_pks and \ - ( - implicit_returning or - not postfetch_lastrowid or - c is not stmt.table._autoincrement_column - ): - - if implicit_returning: - if c.default is not None: - if c.default.is_sequence: - if self.dialect.supports_sequences and \ - (not c.default.optional or - not self.dialect.sequences_optional): - proc = self.process(c.default, **kw) - values.append((c, proc)) - self.returning.append(c) - elif c.default.is_clause_element: - values.append( - (c, self.process( - c.default.arg.self_group(), **kw)) - ) - self.returning.append(c) - else: - values.append( - (c, self._create_crud_bind_param(c, None)) - ) - self.prefetch.append(c) - else: - self.returning.append(c) - else: - if ( - (c.default is not None and - (not c.default.is_sequence or - self.dialect.supports_sequences)) or - c is stmt.table._autoincrement_column and - (self.dialect.supports_sequences or - self.dialect. - preexecute_autoincrement_sequences) - ): - - values.append( - (c, self._create_crud_bind_param(c, None)) - ) - - self.prefetch.append(c) - - elif c.default is not None: - if c.default.is_sequence: - if self.dialect.supports_sequences and \ - (not c.default.optional or - not self.dialect.sequences_optional): - proc = self.process(c.default, **kw) - values.append((c, proc)) - if implicit_return_defaults and \ - c in implicit_return_defaults: - self.returning.append(c) - elif not c.primary_key: - self.postfetch.append(c) - elif c.default.is_clause_element: - values.append( - (c, self.process( - c.default.arg.self_group(), **kw)) - ) - - if implicit_return_defaults and \ - c in implicit_return_defaults: - self.returning.append(c) - elif not c.primary_key: - # don't add primary key column to postfetch - self.postfetch.append(c) - else: - values.append( - (c, self._create_crud_bind_param(c, None)) - ) - self.prefetch.append(c) - elif c.server_default is not None: - if implicit_return_defaults and \ - c in implicit_return_defaults: - self.returning.append(c) - elif not c.primary_key: - self.postfetch.append(c) - elif implicit_return_defaults and \ - c in implicit_return_defaults: - self.returning.append(c) - - elif self.isupdate: - if c.onupdate is not None and not c.onupdate.is_sequence: - if c.onupdate.is_clause_element: - values.append( - (c, self.process( - c.onupdate.arg.self_group(), **kw)) - ) - if implicit_return_defaults and \ - c in implicit_return_defaults: - self.returning.append(c) - else: - self.postfetch.append(c) - else: - values.append( - (c, self._create_crud_bind_param(c, None)) - ) - self.prefetch.append(c) - elif c.server_onupdate is not None: - if implicit_return_defaults and \ - c in implicit_return_defaults: - self.returning.append(c) - else: - self.postfetch.append(c) - elif implicit_return_defaults and \ - c in implicit_return_defaults: - self.returning.append(c) - - if parameters and stmt_parameters: - check = set(parameters).intersection( - _column_as_key(k) for k in stmt.parameters - ).difference(check_columns) - if check: - raise exc.CompileError( - "Unconsumed column names: %s" % - (", ".join("%s" % c for c in check)) - ) - - if stmt._has_multi_parameters: - values_0 = values - values = [values] - - values.extend( - [ - ( - c, - (self._create_crud_bind_param( - c, row[c.key], - name="%s_%d" % (c.key, i + 1) - ) if elements._is_literal(row[c.key]) - else self.process( - row[c.key].self_group(), **kw)) - if c.key in row else param - ) - for (c, param) in values_0 - ] - for i, row in enumerate(stmt.parameters[1:]) - ) - - return values + return crud._key_getters_for_crud_column(self) def visit_delete(self, delete_stmt, **kw): self.stack.append({'correlate_froms': set([delete_stmt.table]), - "iswrapper": False, - "asfrom_froms": set([delete_stmt.table])}) + "asfrom_froms": set([delete_stmt.table]), + "selectable": delete_stmt}) self.isdelete = True text = "DELETE " @@ -2283,7 +2067,7 @@ class SQLCompiler(Compiled): delete_stmt, delete_stmt._returning) if delete_stmt._whereclause is not None: - t = delete_stmt._whereclause._compiler_dispatch(self) + t = delete_stmt._whereclause._compiler_dispatch(self, **kw) if t: text += " WHERE " + t @@ -2386,9 +2170,11 @@ class DDLCompiler(Compiled): (table.description, column.name, ce.args[0]) )) - const = self.create_table_constraints(table) + const = self.create_table_constraints( + table, _include_foreign_key_constraints= + create.include_foreign_key_constraints) if const: - text += ", \n\t" + const + text += separator + "\t" + const text += "\n)%s\n\n" % self.post_create_table(table) return text @@ -2410,7 +2196,9 @@ class DDLCompiler(Compiled): return text - def create_table_constraints(self, table): + def create_table_constraints( + self, table, + _include_foreign_key_constraints=None): # On some DB order is significant: visit PK first, then the # other constraints (engine.ReflectionTest.testbasic failed on FB2) @@ -2418,20 +2206,28 @@ class DDLCompiler(Compiled): if table.primary_key: constraints.append(table.primary_key) - constraints.extend([c for c in table._sorted_constraints - if c is not table.primary_key]) + all_fkcs = table.foreign_key_constraints + if _include_foreign_key_constraints is not None: + omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints) + else: + omit_fkcs = set() - return ", \n\t".join(p for p in - (self.process(constraint) - for constraint in constraints - if ( - constraint._create_rule is None or - constraint._create_rule(self)) - and ( - not self.dialect.supports_alter or - not getattr(constraint, 'use_alter', False) - )) if p is not None - ) + constraints.extend([c for c in table._sorted_constraints + if c is not table.primary_key and + c not in omit_fkcs]) + + return ", \n\t".join( + p for p in + (self.process(constraint) + for constraint in constraints + if ( + constraint._create_rule is None or + constraint._create_rule(self)) + and ( + not self.dialect.supports_alter or + not getattr(constraint, 'use_alter', False) + )) if p is not None + ) def visit_drop_table(self, drop): return "\nDROP TABLE " + self.preparer.format_table(drop.element) @@ -2506,6 +2302,16 @@ class DDLCompiler(Compiled): text += " INCREMENT BY %d" % create.element.increment if create.element.start is not None: text += " START WITH %d" % create.element.start + if create.element.minvalue is not None: + text += " MINVALUE %d" % create.element.minvalue + if create.element.maxvalue is not None: + text += " MAXVALUE %d" % create.element.maxvalue + if create.element.nominvalue is not None: + text += " NO MINVALUE" + if create.element.nomaxvalue is not None: + text += " NO MAXVALUE" + if create.element.cycle is not None: + text += " CYCLE" return text def visit_drop_sequence(self, drop): @@ -2513,15 +2319,26 @@ class DDLCompiler(Compiled): self.preparer.format_sequence(drop.element) def visit_drop_constraint(self, drop): + constraint = drop.element + if constraint.name is not None: + formatted_name = self.preparer.format_constraint(constraint) + else: + formatted_name = None + + if formatted_name is None: + raise exc.CompileError( + "Can't emit DROP CONSTRAINT for constraint %r; " + "it has no name" % drop.element) return "ALTER TABLE %s DROP CONSTRAINT %s%s" % ( self.preparer.format_table(drop.element.table), - self.preparer.format_constraint(drop.element), + formatted_name, drop.cascade and " CASCADE" or "" ) def get_column_specification(self, column, **kwargs): colspec = self.preparer.format_column(column) + " " + \ - self.dialect.type_compiler.process(column.type) + self.dialect.type_compiler.process( + column.type, type_expression=column) default = self.get_column_default_string(column) if default is not None: colspec += " DEFAULT " + default @@ -2538,7 +2355,8 @@ class DDLCompiler(Compiled): if isinstance(column.server_default.arg, util.string_types): return "'%s'" % column.server_default.arg else: - return self.sql_compiler.process(column.server_default.arg) + return self.sql_compiler.process( + column.server_default.arg, literal_binds=True) else: return None @@ -2585,14 +2403,14 @@ class DDLCompiler(Compiled): formatted_name = self.preparer.format_constraint(constraint) if formatted_name is not None: text += "CONSTRAINT %s " % formatted_name - remote_table = list(constraint._elements.values())[0].column.table + remote_table = list(constraint.elements)[0].column.table text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( ', '.join(preparer.quote(f.parent.name) - for f in constraint._elements.values()), + for f in constraint.elements), self.define_constraint_remote_table( constraint, remote_table, preparer), ', '.join(preparer.quote(f.column.name) - for f in constraint._elements.values()) + for f in constraint.elements) ) text += self.define_constraint_match(constraint) text += self.define_constraint_cascades(constraint) @@ -2645,13 +2463,13 @@ class DDLCompiler(Compiled): class GenericTypeCompiler(TypeCompiler): - def visit_FLOAT(self, type_): + def visit_FLOAT(self, type_, **kw): return "FLOAT" - def visit_REAL(self, type_): + def visit_REAL(self, type_, **kw): return "REAL" - def visit_NUMERIC(self, type_): + def visit_NUMERIC(self, type_, **kw): if type_.precision is None: return "NUMERIC" elif type_.scale is None: @@ -2662,7 +2480,7 @@ class GenericTypeCompiler(TypeCompiler): {'precision': type_.precision, 'scale': type_.scale} - def visit_DECIMAL(self, type_): + def visit_DECIMAL(self, type_, **kw): if type_.precision is None: return "DECIMAL" elif type_.scale is None: @@ -2673,31 +2491,31 @@ class GenericTypeCompiler(TypeCompiler): {'precision': type_.precision, 'scale': type_.scale} - def visit_INTEGER(self, type_): + def visit_INTEGER(self, type_, **kw): return "INTEGER" - def visit_SMALLINT(self, type_): + def visit_SMALLINT(self, type_, **kw): return "SMALLINT" - def visit_BIGINT(self, type_): + def visit_BIGINT(self, type_, **kw): return "BIGINT" - def visit_TIMESTAMP(self, type_): + def visit_TIMESTAMP(self, type_, **kw): return 'TIMESTAMP' - def visit_DATETIME(self, type_): + def visit_DATETIME(self, type_, **kw): return "DATETIME" - def visit_DATE(self, type_): + def visit_DATE(self, type_, **kw): return "DATE" - def visit_TIME(self, type_): + def visit_TIME(self, type_, **kw): return "TIME" - def visit_CLOB(self, type_): + def visit_CLOB(self, type_, **kw): return "CLOB" - def visit_NCLOB(self, type_): + def visit_NCLOB(self, type_, **kw): return "NCLOB" def _render_string_type(self, type_, name): @@ -2709,91 +2527,91 @@ class GenericTypeCompiler(TypeCompiler): text += ' COLLATE "%s"' % type_.collation return text - def visit_CHAR(self, type_): + def visit_CHAR(self, type_, **kw): return self._render_string_type(type_, "CHAR") - def visit_NCHAR(self, type_): + def visit_NCHAR(self, type_, **kw): return self._render_string_type(type_, "NCHAR") - def visit_VARCHAR(self, type_): + def visit_VARCHAR(self, type_, **kw): return self._render_string_type(type_, "VARCHAR") - def visit_NVARCHAR(self, type_): + def visit_NVARCHAR(self, type_, **kw): return self._render_string_type(type_, "NVARCHAR") - def visit_TEXT(self, type_): + def visit_TEXT(self, type_, **kw): return self._render_string_type(type_, "TEXT") - def visit_BLOB(self, type_): + def visit_BLOB(self, type_, **kw): return "BLOB" - def visit_BINARY(self, type_): + def visit_BINARY(self, type_, **kw): return "BINARY" + (type_.length and "(%d)" % type_.length or "") - def visit_VARBINARY(self, type_): + def visit_VARBINARY(self, type_, **kw): return "VARBINARY" + (type_.length and "(%d)" % type_.length or "") - def visit_BOOLEAN(self, type_): + def visit_BOOLEAN(self, type_, **kw): return "BOOLEAN" - def visit_large_binary(self, type_): - return self.visit_BLOB(type_) + def visit_large_binary(self, type_, **kw): + return self.visit_BLOB(type_, **kw) - def visit_boolean(self, type_): - return self.visit_BOOLEAN(type_) + def visit_boolean(self, type_, **kw): + return self.visit_BOOLEAN(type_, **kw) - def visit_time(self, type_): - return self.visit_TIME(type_) + def visit_time(self, type_, **kw): + return self.visit_TIME(type_, **kw) - def visit_datetime(self, type_): - return self.visit_DATETIME(type_) + def visit_datetime(self, type_, **kw): + return self.visit_DATETIME(type_, **kw) - def visit_date(self, type_): - return self.visit_DATE(type_) + def visit_date(self, type_, **kw): + return self.visit_DATE(type_, **kw) - def visit_big_integer(self, type_): - return self.visit_BIGINT(type_) + def visit_big_integer(self, type_, **kw): + return self.visit_BIGINT(type_, **kw) - def visit_small_integer(self, type_): - return self.visit_SMALLINT(type_) + def visit_small_integer(self, type_, **kw): + return self.visit_SMALLINT(type_, **kw) - def visit_integer(self, type_): - return self.visit_INTEGER(type_) + def visit_integer(self, type_, **kw): + return self.visit_INTEGER(type_, **kw) - def visit_real(self, type_): - return self.visit_REAL(type_) + def visit_real(self, type_, **kw): + return self.visit_REAL(type_, **kw) - def visit_float(self, type_): - return self.visit_FLOAT(type_) + def visit_float(self, type_, **kw): + return self.visit_FLOAT(type_, **kw) - def visit_numeric(self, type_): - return self.visit_NUMERIC(type_) + def visit_numeric(self, type_, **kw): + return self.visit_NUMERIC(type_, **kw) - def visit_string(self, type_): - return self.visit_VARCHAR(type_) + def visit_string(self, type_, **kw): + return self.visit_VARCHAR(type_, **kw) - def visit_unicode(self, type_): - return self.visit_VARCHAR(type_) + def visit_unicode(self, type_, **kw): + return self.visit_VARCHAR(type_, **kw) - def visit_text(self, type_): - return self.visit_TEXT(type_) + def visit_text(self, type_, **kw): + return self.visit_TEXT(type_, **kw) - def visit_unicode_text(self, type_): - return self.visit_TEXT(type_) + def visit_unicode_text(self, type_, **kw): + return self.visit_TEXT(type_, **kw) - def visit_enum(self, type_): - return self.visit_VARCHAR(type_) + def visit_enum(self, type_, **kw): + return self.visit_VARCHAR(type_, **kw) - def visit_null(self, type_): + def visit_null(self, type_, **kw): raise exc.CompileError("Can't generate DDL for %r; " "did you forget to specify a " "type on this Column?" % type_) - def visit_type_decorator(self, type_): - return self.process(type_.type_engine(self.dialect)) + def visit_type_decorator(self, type_, **kw): + return self.process(type_.type_engine(self.dialect), **kw) - def visit_user_defined(self, type_): - return type_.get_col_spec() + def visit_user_defined(self, type_, **kw): + return type_.get_col_spec(**kw) class IdentifierPreparer(object): diff --git a/lib/python3.5/site-packages/sqlalchemy/sql/crud.py b/lib/python3.5/site-packages/sqlalchemy/sql/crud.py new file mode 100644 index 0000000..273cc7e --- /dev/null +++ b/lib/python3.5/site-packages/sqlalchemy/sql/crud.py @@ -0,0 +1,571 @@ +# sql/crud.py +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Functions used by compiler.py to determine the parameters rendered +within INSERT and UPDATE statements. + +""" +from .. import util +from .. import exc +from . import elements +import operator + +REQUIRED = util.symbol('REQUIRED', """ +Placeholder for the value within a :class:`.BindParameter` +which is required to be present when the statement is passed +to :meth:`.Connection.execute`. + +This symbol is typically used when a :func:`.expression.insert` +or :func:`.expression.update` statement is compiled without parameter +values present. + +""") + + +def _get_crud_params(compiler, stmt, **kw): + """create a set of tuples representing column/string pairs for use + in an INSERT or UPDATE statement. + + Also generates the Compiled object's postfetch, prefetch, and + returning column collections, used for default handling and ultimately + populating the ResultProxy's prefetch_cols() and postfetch_cols() + collections. + + """ + + compiler.postfetch = [] + compiler.prefetch = [] + compiler.returning = [] + + # no parameters in the statement, no parameters in the + # compiled params - return binds for all columns + if compiler.column_keys is None and stmt.parameters is None: + return [ + (c, _create_bind_param( + compiler, c, None, required=True)) + for c in stmt.table.columns + ] + + if stmt._has_multi_parameters: + stmt_parameters = stmt.parameters[0] + else: + stmt_parameters = stmt.parameters + + # getters - these are normally just column.key, + # but in the case of mysql multi-table update, the rules for + # .key must conditionally take tablename into account + _column_as_key, _getattr_col_key, _col_bind_name = \ + _key_getters_for_crud_column(compiler) + + # if we have statement parameters - set defaults in the + # compiled params + if compiler.column_keys is None: + parameters = {} + else: + parameters = dict((_column_as_key(key), REQUIRED) + for key in compiler.column_keys + if not stmt_parameters or + key not in stmt_parameters) + + # create a list of column assignment clauses as tuples + values = [] + + if stmt_parameters is not None: + _get_stmt_parameters_params( + compiler, + parameters, stmt_parameters, _column_as_key, values, kw) + + check_columns = {} + + # special logic that only occurs for multi-table UPDATE + # statements + if compiler.isupdate and stmt._extra_froms and stmt_parameters: + _get_multitable_params( + compiler, stmt, stmt_parameters, check_columns, + _col_bind_name, _getattr_col_key, values, kw) + + if compiler.isinsert and stmt.select_names: + _scan_insert_from_select_cols( + compiler, stmt, parameters, + _getattr_col_key, _column_as_key, + _col_bind_name, check_columns, values, kw) + else: + _scan_cols( + compiler, stmt, parameters, + _getattr_col_key, _column_as_key, + _col_bind_name, check_columns, values, kw) + + if parameters and stmt_parameters: + check = set(parameters).intersection( + _column_as_key(k) for k in stmt.parameters + ).difference(check_columns) + if check: + raise exc.CompileError( + "Unconsumed column names: %s" % + (", ".join("%s" % c for c in check)) + ) + + if stmt._has_multi_parameters: + values = _extend_values_for_multiparams(compiler, stmt, values, kw) + + return values + + +def _create_bind_param( + compiler, col, value, process=True, + required=False, name=None, **kw): + if name is None: + name = col.key + bindparam = elements.BindParameter( + name, value, type_=col.type, required=required) + bindparam._is_crud = True + if process: + bindparam = bindparam._compiler_dispatch(compiler, **kw) + return bindparam + + +def _key_getters_for_crud_column(compiler): + if compiler.isupdate and compiler.statement._extra_froms: + # when extra tables are present, refer to the columns + # in those extra tables as table-qualified, including in + # dictionaries and when rendering bind param names. + # the "main" table of the statement remains unqualified, + # allowing the most compatibility with a non-multi-table + # statement. + _et = set(compiler.statement._extra_froms) + + def _column_as_key(key): + str_key = elements._column_as_key(key) + if hasattr(key, 'table') and key.table in _et: + return (key.table.name, str_key) + else: + return str_key + + def _getattr_col_key(col): + if col.table in _et: + return (col.table.name, col.key) + else: + return col.key + + def _col_bind_name(col): + if col.table in _et: + return "%s_%s" % (col.table.name, col.key) + else: + return col.key + + else: + _column_as_key = elements._column_as_key + _getattr_col_key = _col_bind_name = operator.attrgetter("key") + + return _column_as_key, _getattr_col_key, _col_bind_name + + +def _scan_insert_from_select_cols( + compiler, stmt, parameters, _getattr_col_key, + _column_as_key, _col_bind_name, check_columns, values, kw): + + need_pks, implicit_returning, \ + implicit_return_defaults, postfetch_lastrowid = \ + _get_returning_modifiers(compiler, stmt) + + cols = [stmt.table.c[_column_as_key(name)] + for name in stmt.select_names] + + compiler._insert_from_select = stmt.select + + add_select_cols = [] + if stmt.include_insert_from_select_defaults: + col_set = set(cols) + for col in stmt.table.columns: + if col not in col_set and col.default: + cols.append(col) + + for c in cols: + col_key = _getattr_col_key(c) + if col_key in parameters and col_key not in check_columns: + parameters.pop(col_key) + values.append((c, None)) + else: + _append_param_insert_select_hasdefault( + compiler, stmt, c, add_select_cols, kw) + + if add_select_cols: + values.extend(add_select_cols) + compiler._insert_from_select = compiler._insert_from_select._generate() + compiler._insert_from_select._raw_columns = \ + tuple(compiler._insert_from_select._raw_columns) + tuple( + expr for col, expr in add_select_cols) + + +def _scan_cols( + compiler, stmt, parameters, _getattr_col_key, + _column_as_key, _col_bind_name, check_columns, values, kw): + + need_pks, implicit_returning, \ + implicit_return_defaults, postfetch_lastrowid = \ + _get_returning_modifiers(compiler, stmt) + + if stmt._parameter_ordering: + parameter_ordering = [ + _column_as_key(key) for key in stmt._parameter_ordering + ] + ordered_keys = set(parameter_ordering) + cols = [ + stmt.table.c[key] for key in parameter_ordering + ] + [ + c for c in stmt.table.c if c.key not in ordered_keys + ] + else: + cols = stmt.table.columns + + for c in cols: + col_key = _getattr_col_key(c) + if col_key in parameters and col_key not in check_columns: + + _append_param_parameter( + compiler, stmt, c, col_key, parameters, _col_bind_name, + implicit_returning, implicit_return_defaults, values, kw) + + elif compiler.isinsert: + if c.primary_key and \ + need_pks and \ + ( + implicit_returning or + not postfetch_lastrowid or + c is not stmt.table._autoincrement_column + ): + + if implicit_returning: + _append_param_insert_pk_returning( + compiler, stmt, c, values, kw) + else: + _append_param_insert_pk(compiler, stmt, c, values, kw) + + elif c.default is not None: + + _append_param_insert_hasdefault( + compiler, stmt, c, implicit_return_defaults, + values, kw) + + elif c.server_default is not None: + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + elif not c.primary_key: + compiler.postfetch.append(c) + elif implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + + elif compiler.isupdate: + _append_param_update( + compiler, stmt, c, implicit_return_defaults, values, kw) + + +def _append_param_parameter( + compiler, stmt, c, col_key, parameters, _col_bind_name, + implicit_returning, implicit_return_defaults, values, kw): + value = parameters.pop(col_key) + if elements._is_literal(value): + value = _create_bind_param( + compiler, c, value, required=value is REQUIRED, + name=_col_bind_name(c) + if not stmt._has_multi_parameters + else "%s_0" % _col_bind_name(c), + **kw + ) + else: + if isinstance(value, elements.BindParameter) and \ + value.type._isnull: + value = value._clone() + value.type = c.type + + if c.primary_key and implicit_returning: + compiler.returning.append(c) + value = compiler.process(value.self_group(), **kw) + elif implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + value = compiler.process(value.self_group(), **kw) + else: + compiler.postfetch.append(c) + value = compiler.process(value.self_group(), **kw) + values.append((c, value)) + + +def _append_param_insert_pk_returning(compiler, stmt, c, values, kw): + if c.default is not None: + if c.default.is_sequence: + if compiler.dialect.supports_sequences and \ + (not c.default.optional or + not compiler.dialect.sequences_optional): + proc = compiler.process(c.default, **kw) + values.append((c, proc)) + compiler.returning.append(c) + elif c.default.is_clause_element: + values.append( + (c, compiler.process( + c.default.arg.self_group(), **kw)) + ) + compiler.returning.append(c) + else: + values.append( + (c, _create_prefetch_bind_param(compiler, c)) + ) + + else: + compiler.returning.append(c) + + +def _create_prefetch_bind_param(compiler, c, process=True, name=None): + param = _create_bind_param(compiler, c, None, process=process, name=name) + compiler.prefetch.append(c) + return param + + +class _multiparam_column(elements.ColumnElement): + def __init__(self, original, index): + self.key = "%s_%d" % (original.key, index + 1) + self.original = original + self.default = original.default + self.type = original.type + + def __eq__(self, other): + return isinstance(other, _multiparam_column) and \ + other.key == self.key and \ + other.original == self.original + + +def _process_multiparam_default_bind(compiler, c, index, kw): + + if not c.default: + raise exc.CompileError( + "INSERT value for column %s is explicitly rendered as a bound" + "parameter in the VALUES clause; " + "a Python-side value or SQL expression is required" % c) + elif c.default.is_clause_element: + return compiler.process(c.default.arg.self_group(), **kw) + else: + col = _multiparam_column(c, index) + return _create_prefetch_bind_param(compiler, col) + + +def _append_param_insert_pk(compiler, stmt, c, values, kw): + if ( + (c.default is not None and + (not c.default.is_sequence or + compiler.dialect.supports_sequences)) or + c is stmt.table._autoincrement_column and + (compiler.dialect.supports_sequences or + compiler.dialect. + preexecute_autoincrement_sequences) + ): + values.append( + (c, _create_prefetch_bind_param(compiler, c)) + ) + + +def _append_param_insert_hasdefault( + compiler, stmt, c, implicit_return_defaults, values, kw): + + if c.default.is_sequence: + if compiler.dialect.supports_sequences and \ + (not c.default.optional or + not compiler.dialect.sequences_optional): + proc = compiler.process(c.default, **kw) + values.append((c, proc)) + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + elif not c.primary_key: + compiler.postfetch.append(c) + elif c.default.is_clause_element: + proc = compiler.process(c.default.arg.self_group(), **kw) + values.append((c, proc)) + + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + elif not c.primary_key: + # don't add primary key column to postfetch + compiler.postfetch.append(c) + else: + values.append( + (c, _create_prefetch_bind_param(compiler, c)) + ) + + +def _append_param_insert_select_hasdefault( + compiler, stmt, c, values, kw): + + if c.default.is_sequence: + if compiler.dialect.supports_sequences and \ + (not c.default.optional or + not compiler.dialect.sequences_optional): + proc = c.default + values.append((c, proc)) + elif c.default.is_clause_element: + proc = c.default.arg.self_group() + values.append((c, proc)) + else: + values.append( + (c, _create_prefetch_bind_param(compiler, c, process=False)) + ) + + +def _append_param_update( + compiler, stmt, c, implicit_return_defaults, values, kw): + + if c.onupdate is not None and not c.onupdate.is_sequence: + if c.onupdate.is_clause_element: + values.append( + (c, compiler.process( + c.onupdate.arg.self_group(), **kw)) + ) + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + else: + compiler.postfetch.append(c) + else: + values.append( + (c, _create_prefetch_bind_param(compiler, c)) + ) + elif c.server_onupdate is not None: + if implicit_return_defaults and \ + c in implicit_return_defaults: + compiler.returning.append(c) + else: + compiler.postfetch.append(c) + elif implicit_return_defaults and \ + stmt._return_defaults is not True and \ + c in implicit_return_defaults: + compiler.returning.append(c) + + +def _get_multitable_params( + compiler, stmt, stmt_parameters, check_columns, + _col_bind_name, _getattr_col_key, values, kw): + + normalized_params = dict( + (elements._clause_element_as_expr(c), param) + for c, param in stmt_parameters.items() + ) + affected_tables = set() + for t in stmt._extra_froms: + for c in t.c: + if c in normalized_params: + affected_tables.add(t) + check_columns[_getattr_col_key(c)] = c + value = normalized_params[c] + if elements._is_literal(value): + value = _create_bind_param( + compiler, c, value, required=value is REQUIRED, + name=_col_bind_name(c)) + else: + compiler.postfetch.append(c) + value = compiler.process(value.self_group(), **kw) + values.append((c, value)) + # determine tables which are actually to be updated - process onupdate + # and server_onupdate for these + for t in affected_tables: + for c in t.c: + if c in normalized_params: + continue + elif (c.onupdate is not None and not + c.onupdate.is_sequence): + if c.onupdate.is_clause_element: + values.append( + (c, compiler.process( + c.onupdate.arg.self_group(), + **kw) + ) + ) + compiler.postfetch.append(c) + else: + values.append( + (c, _create_prefetch_bind_param( + compiler, c, name=_col_bind_name(c))) + ) + elif c.server_onupdate is not None: + compiler.postfetch.append(c) + + +def _extend_values_for_multiparams(compiler, stmt, values, kw): + values_0 = values + values = [values] + + values.extend( + [ + ( + c, + (_create_bind_param( + compiler, c, row[c.key], + name="%s_%d" % (c.key, i + 1) + ) if elements._is_literal(row[c.key]) + else compiler.process( + row[c.key].self_group(), **kw)) + if c.key in row else + _process_multiparam_default_bind(compiler, c, i, kw) + ) + for (c, param) in values_0 + ] + for i, row in enumerate(stmt.parameters[1:]) + ) + return values + + +def _get_stmt_parameters_params( + compiler, parameters, stmt_parameters, _column_as_key, values, kw): + for k, v in stmt_parameters.items(): + colkey = _column_as_key(k) + if colkey is not None: + parameters.setdefault(colkey, v) + else: + # a non-Column expression on the left side; + # add it to values() in an "as-is" state, + # coercing right side to bound param + if elements._is_literal(v): + v = compiler.process( + elements.BindParameter(None, v, type_=k.type), + **kw) + else: + v = compiler.process(v.self_group(), **kw) + + values.append((k, v)) + + +def _get_returning_modifiers(compiler, stmt): + need_pks = compiler.isinsert and \ + not compiler.inline and \ + not stmt._returning and \ + not stmt._has_multi_parameters + + implicit_returning = need_pks and \ + compiler.dialect.implicit_returning and \ + stmt.table.implicit_returning + + if compiler.isinsert: + implicit_return_defaults = (implicit_returning and + stmt._return_defaults) + elif compiler.isupdate: + implicit_return_defaults = (compiler.dialect.implicit_returning and + stmt.table.implicit_returning and + stmt._return_defaults) + else: + implicit_return_defaults = False + + if implicit_return_defaults: + if stmt._return_defaults is True: + implicit_return_defaults = set(stmt.table.c) + else: + implicit_return_defaults = set(stmt._return_defaults) + + postfetch_lastrowid = need_pks and compiler.dialect.postfetch_lastrowid + + return need_pks, implicit_returning, \ + implicit_return_defaults, postfetch_lastrowid diff --git a/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py b/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py index 1f2c448..1cb9eeb 100644 --- a/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py +++ b/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py @@ -1,5 +1,5 @@ # sql/ddl.py -# Copyright (C) 2009-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -12,7 +12,6 @@ to invoke them for a create/drop call. from .. import util from .elements import ClauseElement -from .visitors import traverse from .base import Executable, _generative, SchemaVisitor, _bind_or_error from ..util import topological from .. import event @@ -370,7 +369,7 @@ class DDL(DDLElement): :class:`.DDLEvents` - :mod:`sqlalchemy.event` + :ref:`event_toplevel` """ @@ -464,19 +463,28 @@ class CreateTable(_CreateDropBase): __visit_name__ = "create_table" - def __init__(self, element, on=None, bind=None): + def __init__( + self, element, on=None, bind=None, + include_foreign_key_constraints=None): """Create a :class:`.CreateTable` construct. :param element: a :class:`.Table` that's the subject of the CREATE :param on: See the description for 'on' in :class:`.DDL`. :param bind: See the description for 'bind' in :class:`.DDL`. + :param include_foreign_key_constraints: optional sequence of + :class:`.ForeignKeyConstraint` objects that will be included + inline within the CREATE construct; if omitted, all foreign key + constraints that do not specify use_alter=True are included. + + .. versionadded:: 1.0.0 """ super(CreateTable, self).__init__(element, on=on, bind=bind) self.columns = [CreateColumn(column) for column in element.columns ] + self.include_foreign_key_constraints = include_foreign_key_constraints class _DropView(_CreateDropBase): @@ -696,48 +704,80 @@ class SchemaGenerator(DDLBase): tables = self.tables else: tables = list(metadata.tables.values()) - collection = [t for t in sort_tables(tables) - if self._can_create_table(t)] + + collection = sort_tables_and_constraints( + [t for t in tables if self._can_create_table(t)]) + seq_coll = [s for s in metadata._sequences.values() if s.column is None and self._can_create_sequence(s)] + event_collection = [ + t for (t, fks) in collection if t is not None + ] metadata.dispatch.before_create(metadata, self.connection, - tables=collection, + tables=event_collection, checkfirst=self.checkfirst, _ddl_runner=self) for seq in seq_coll: self.traverse_single(seq, create_ok=True) - for table in collection: - self.traverse_single(table, create_ok=True) + for table, fkcs in collection: + if table is not None: + self.traverse_single( + table, create_ok=True, + include_foreign_key_constraints=fkcs, + _is_metadata_operation=True) + else: + for fkc in fkcs: + self.traverse_single(fkc) metadata.dispatch.after_create(metadata, self.connection, - tables=collection, + tables=event_collection, checkfirst=self.checkfirst, _ddl_runner=self) - def visit_table(self, table, create_ok=False): + def visit_table( + self, table, create_ok=False, + include_foreign_key_constraints=None, + _is_metadata_operation=False): if not create_ok and not self._can_create_table(table): return - table.dispatch.before_create(table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self) + table.dispatch.before_create( + table, self.connection, + checkfirst=self.checkfirst, + _ddl_runner=self, + _is_metadata_operation=_is_metadata_operation) for column in table.columns: if column.default is not None: self.traverse_single(column.default) - self.connection.execute(CreateTable(table)) + if not self.dialect.supports_alter: + # e.g., don't omit any foreign key constraints + include_foreign_key_constraints = None + + self.connection.execute( + CreateTable( + table, + include_foreign_key_constraints=include_foreign_key_constraints + )) if hasattr(table, 'indexes'): for index in table.indexes: self.traverse_single(index) - table.dispatch.after_create(table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self) + table.dispatch.after_create( + table, self.connection, + checkfirst=self.checkfirst, + _ddl_runner=self, + _is_metadata_operation=_is_metadata_operation) + + def visit_foreign_key_constraint(self, constraint): + if not self.dialect.supports_alter: + return + self.connection.execute(AddConstraint(constraint)) def visit_sequence(self, sequence, create_ok=False): if not create_ok and not self._can_create_sequence(sequence): @@ -765,11 +805,51 @@ class SchemaDropper(DDLBase): else: tables = list(metadata.tables.values()) - collection = [ - t - for t in reversed(sort_tables(tables)) - if self._can_drop_table(t) - ] + try: + unsorted_tables = [t for t in tables if self._can_drop_table(t)] + collection = list(reversed( + sort_tables_and_constraints( + unsorted_tables, + filter_fn=lambda constraint: False + if not self.dialect.supports_alter + or constraint.name is None + else None + ) + )) + except exc.CircularDependencyError as err2: + if not self.dialect.supports_alter: + util.warn( + "Can't sort tables for DROP; an " + "unresolvable foreign key " + "dependency exists between tables: %s, and backend does " + "not support ALTER. To restore at least a partial sort, " + "apply use_alter=True to ForeignKey and " + "ForeignKeyConstraint " + "objects involved in the cycle to mark these as known " + "cycles that will be ignored." + % ( + ", ".join(sorted([t.fullname for t in err2.cycles])) + ) + ) + collection = [(t, ()) for t in unsorted_tables] + else: + util.raise_from_cause( + exc.CircularDependencyError( + err2.args[0], + err2.cycles, err2.edges, + msg="Can't sort tables for DROP; an " + "unresolvable foreign key " + "dependency exists between tables: %s. Please ensure " + "that the ForeignKey and ForeignKeyConstraint objects " + "involved in the cycle have " + "names so that they can be dropped using " + "DROP CONSTRAINT." + % ( + ", ".join(sorted([t.fullname for t in err2.cycles])) + ) + + ) + ) seq_coll = [ s @@ -777,18 +857,27 @@ class SchemaDropper(DDLBase): if s.column is None and self._can_drop_sequence(s) ] + event_collection = [ + t for (t, fks) in collection if t is not None + ] + metadata.dispatch.before_drop( - metadata, self.connection, tables=collection, + metadata, self.connection, tables=event_collection, checkfirst=self.checkfirst, _ddl_runner=self) - for table in collection: - self.traverse_single(table, drop_ok=True) + for table, fkcs in collection: + if table is not None: + self.traverse_single( + table, drop_ok=True, _is_metadata_operation=True) + else: + for fkc in fkcs: + self.traverse_single(fkc) for seq in seq_coll: self.traverse_single(seq, drop_ok=True) metadata.dispatch.after_drop( - metadata, self.connection, tables=collection, + metadata, self.connection, tables=event_collection, checkfirst=self.checkfirst, _ddl_runner=self) def _can_drop_table(self, table): @@ -812,13 +901,15 @@ class SchemaDropper(DDLBase): def visit_index(self, index): self.connection.execute(DropIndex(index)) - def visit_table(self, table, drop_ok=False): + def visit_table(self, table, drop_ok=False, _is_metadata_operation=False): if not drop_ok and not self._can_drop_table(table): return - table.dispatch.before_drop(table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self) + table.dispatch.before_drop( + table, self.connection, + checkfirst=self.checkfirst, + _ddl_runner=self, + _is_metadata_operation=_is_metadata_operation) for column in table.columns: if column.default is not None: @@ -826,9 +917,16 @@ class SchemaDropper(DDLBase): self.connection.execute(DropTable(table)) - table.dispatch.after_drop(table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self) + table.dispatch.after_drop( + table, self.connection, + checkfirst=self.checkfirst, + _ddl_runner=self, + _is_metadata_operation=_is_metadata_operation) + + def visit_foreign_key_constraint(self, constraint): + if not self.dialect.supports_alter: + return + self.connection.execute(DropConstraint(constraint)) def visit_sequence(self, sequence, drop_ok=False): if not drop_ok and not self._can_drop_sequence(sequence): @@ -837,32 +935,161 @@ class SchemaDropper(DDLBase): def sort_tables(tables, skip_fn=None, extra_dependencies=None): - """sort a collection of Table objects in order of - their foreign-key dependency.""" + """sort a collection of :class:`.Table` objects based on dependency. + + This is a dependency-ordered sort which will emit :class:`.Table` + objects such that they will follow their dependent :class:`.Table` objects. + Tables are dependent on another based on the presence of + :class:`.ForeignKeyConstraint` objects as well as explicit dependencies + added by :meth:`.Table.add_is_dependent_on`. + + .. warning:: + + The :func:`.sort_tables` function cannot by itself accommodate + automatic resolution of dependency cycles between tables, which + are usually caused by mutually dependent foreign key constraints. + To resolve these cycles, either the + :paramref:`.ForeignKeyConstraint.use_alter` parameter may be appled + to those constraints, or use the + :func:`.sql.sort_tables_and_constraints` function which will break + out foreign key constraints involved in cycles separately. + + :param tables: a sequence of :class:`.Table` objects. + + :param skip_fn: optional callable which will be passed a + :class:`.ForeignKey` object; if it returns True, this + constraint will not be considered as a dependency. Note this is + **different** from the same parameter in + :func:`.sort_tables_and_constraints`, which is + instead passed the owning :class:`.ForeignKeyConstraint` object. + + :param extra_dependencies: a sequence of 2-tuples of tables which will + also be considered as dependent on each other. + + .. seealso:: + + :func:`.sort_tables_and_constraints` + + :meth:`.MetaData.sorted_tables` - uses this function to sort + + + """ + + if skip_fn is not None: + def _skip_fn(fkc): + for fk in fkc.elements: + if skip_fn(fk): + return True + else: + return None + else: + _skip_fn = None + + return [ + t for (t, fkcs) in + sort_tables_and_constraints( + tables, filter_fn=_skip_fn, extra_dependencies=extra_dependencies) + if t is not None + ] + + +def sort_tables_and_constraints( + tables, filter_fn=None, extra_dependencies=None): + """sort a collection of :class:`.Table` / :class:`.ForeignKeyConstraint` + objects. + + This is a dependency-ordered sort which will emit tuples of + ``(Table, [ForeignKeyConstraint, ...])`` such that each + :class:`.Table` follows its dependent :class:`.Table` objects. + Remaining :class:`.ForeignKeyConstraint` objects that are separate due to + dependency rules not satisifed by the sort are emitted afterwards + as ``(None, [ForeignKeyConstraint ...])``. + + Tables are dependent on another based on the presence of + :class:`.ForeignKeyConstraint` objects, explicit dependencies + added by :meth:`.Table.add_is_dependent_on`, as well as dependencies + stated here using the :paramref:`~.sort_tables_and_constraints.skip_fn` + and/or :paramref:`~.sort_tables_and_constraints.extra_dependencies` + parameters. + + :param tables: a sequence of :class:`.Table` objects. + + :param filter_fn: optional callable which will be passed a + :class:`.ForeignKeyConstraint` object, and returns a value based on + whether this constraint should definitely be included or excluded as + an inline constraint, or neither. If it returns False, the constraint + will definitely be included as a dependency that cannot be subject + to ALTER; if True, it will **only** be included as an ALTER result at + the end. Returning None means the constraint is included in the + table-based result unless it is detected as part of a dependency cycle. + + :param extra_dependencies: a sequence of 2-tuples of tables which will + also be considered as dependent on each other. + + .. versionadded:: 1.0.0 + + .. seealso:: + + :func:`.sort_tables` + + + """ + + fixed_dependencies = set() + mutable_dependencies = set() - tables = list(tables) - tuples = [] if extra_dependencies is not None: - tuples.extend(extra_dependencies) - - def visit_foreign_key(fkey): - if fkey.use_alter: - return - elif skip_fn and skip_fn(fkey): - return - parent_table = fkey.column.table - if parent_table in tables: - child_table = fkey.parent.table - if parent_table is not child_table: - tuples.append((parent_table, child_table)) + fixed_dependencies.update(extra_dependencies) + remaining_fkcs = set() for table in tables: - traverse(table, - {'schema_visitor': True}, - {'foreign_key': visit_foreign_key}) + for fkc in table.foreign_key_constraints: + if fkc.use_alter is True: + remaining_fkcs.add(fkc) + continue - tuples.extend( - [parent, table] for parent in table._extra_dependencies + if filter_fn: + filtered = filter_fn(fkc) + + if filtered is True: + remaining_fkcs.add(fkc) + continue + + dependent_on = fkc.referred_table + if dependent_on is not table: + mutable_dependencies.add((dependent_on, table)) + + fixed_dependencies.update( + (parent, table) for parent in table._extra_dependencies ) - return list(topological.sort(tuples, tables)) + try: + candidate_sort = list( + topological.sort( + fixed_dependencies.union(mutable_dependencies), tables, + deterministic_order=True + ) + ) + except exc.CircularDependencyError as err: + for edge in err.edges: + if edge in mutable_dependencies: + table = edge[1] + can_remove = [ + fkc for fkc in table.foreign_key_constraints + if filter_fn is None or filter_fn(fkc) is not False] + remaining_fkcs.update(can_remove) + for fkc in can_remove: + dependent_on = fkc.referred_table + if dependent_on is not table: + mutable_dependencies.discard((dependent_on, table)) + candidate_sort = list( + topological.sort( + fixed_dependencies.union(mutable_dependencies), tables, + deterministic_order=True + ) + ) + + return [ + (table, table.foreign_key_constraints.difference(remaining_fkcs)) + for table in candidate_sort + ] + [(None, list(remaining_fkcs))] diff --git a/lib/python3.5/site-packages/sqlalchemy/sql/default_comparator.py b/lib/python3.5/site-packages/sqlalchemy/sql/default_comparator.py index 4f53e29..d180dbc 100644 --- a/lib/python3.5/site-packages/sqlalchemy/sql/default_comparator.py +++ b/lib/python3.5/site-packages/sqlalchemy/sql/default_comparator.py @@ -1,5 +1,5 @@ # sql/default_comparator.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -9,8 +9,8 @@ """ from .. import exc, util -from . import operators from . import type_api +from . import operators from .elements import BindParameter, True_, False_, BinaryExpression, \ Null, _const_expr, _clause_element_as_expr, \ ClauseList, ColumnElement, TextClause, UnaryExpression, \ @@ -18,294 +18,271 @@ from .elements import BindParameter, True_, False_, BinaryExpression, \ from .selectable import SelectBase, Alias, Selectable, ScalarSelect -class _DefaultColumnComparator(operators.ColumnOperators): - """Defines comparison and math operations. +def _boolean_compare(expr, op, obj, negate=None, reverse=False, + _python_is_types=(util.NoneType, bool), + result_type = None, + **kwargs): - See :class:`.ColumnOperators` and :class:`.Operators` for descriptions - of all operations. + if result_type is None: + result_type = type_api.BOOLEANTYPE - """ + if isinstance(obj, _python_is_types + (Null, True_, False_)): - @util.memoized_property - def type(self): - return self.expr.type - - def operate(self, op, *other, **kwargs): - o = self.operators[op.__name__] - return o[0](self, self.expr, op, *(other + o[1:]), **kwargs) - - def reverse_operate(self, op, other, **kwargs): - o = self.operators[op.__name__] - return o[0](self, self.expr, op, other, - reverse=True, *o[1:], **kwargs) - - def _adapt_expression(self, op, other_comparator): - """evaluate the return type of , - and apply any adaptations to the given operator. - - This method determines the type of a resulting binary expression - given two source types and an operator. For example, two - :class:`.Column` objects, both of the type :class:`.Integer`, will - produce a :class:`.BinaryExpression` that also has the type - :class:`.Integer` when compared via the addition (``+``) operator. - However, using the addition operator with an :class:`.Integer` - and a :class:`.Date` object will produce a :class:`.Date`, assuming - "days delta" behavior by the database (in reality, most databases - other than Postgresql don't accept this particular operation). - - The method returns a tuple of the form , . - The resulting operator and type will be those applied to the - resulting :class:`.BinaryExpression` as the final operator and the - right-hand side of the expression. - - Note that only a subset of operators make usage of - :meth:`._adapt_expression`, - including math operators and user-defined operators, but not - boolean comparison or special SQL keywords like MATCH or BETWEEN. - - """ - return op, other_comparator.type - - def _boolean_compare(self, expr, op, obj, negate=None, reverse=False, - _python_is_types=(util.NoneType, bool), - **kwargs): - - if isinstance(obj, _python_is_types + (Null, True_, False_)): - - # allow x ==/!= True/False to be treated as a literal. - # this comes out to "== / != true/false" or "1/0" if those - # constants aren't supported and works on all platforms - if op in (operators.eq, operators.ne) and \ - isinstance(obj, (bool, True_, False_)): - return BinaryExpression(expr, - _literal_as_text(obj), - op, - type_=type_api.BOOLEANTYPE, - negate=negate, modifiers=kwargs) - else: - # all other None/True/False uses IS, IS NOT - if op in (operators.eq, operators.is_): - return BinaryExpression(expr, _const_expr(obj), - operators.is_, - negate=operators.isnot) - elif op in (operators.ne, operators.isnot): - return BinaryExpression(expr, _const_expr(obj), - operators.isnot, - negate=operators.is_) - else: - raise exc.ArgumentError( - "Only '=', '!=', 'is_()', 'isnot()' operators can " - "be used with None/True/False") - else: - obj = self._check_literal(expr, op, obj) - - if reverse: - return BinaryExpression(obj, - expr, - op, - type_=type_api.BOOLEANTYPE, - negate=negate, modifiers=kwargs) - else: + # allow x ==/!= True/False to be treated as a literal. + # this comes out to "== / != true/false" or "1/0" if those + # constants aren't supported and works on all platforms + if op in (operators.eq, operators.ne) and \ + isinstance(obj, (bool, True_, False_)): return BinaryExpression(expr, - obj, + _literal_as_text(obj), op, - type_=type_api.BOOLEANTYPE, + type_=result_type, negate=negate, modifiers=kwargs) - - def _binary_operate(self, expr, op, obj, reverse=False, result_type=None, - **kw): - obj = self._check_literal(expr, op, obj) - - if reverse: - left, right = obj, expr else: - left, right = expr, obj - - if result_type is None: - op, result_type = left.comparator._adapt_expression( - op, right.comparator) - - return BinaryExpression(left, right, op, type_=result_type) - - def _conjunction_operate(self, expr, op, other, **kw): - if op is operators.and_: - return and_(expr, other) - elif op is operators.or_: - return or_(expr, other) - else: - raise NotImplementedError() - - def _scalar(self, expr, op, fn, **kw): - return fn(expr) - - def _in_impl(self, expr, op, seq_or_selectable, negate_op, **kw): - seq_or_selectable = _clause_element_as_expr(seq_or_selectable) - - if isinstance(seq_or_selectable, ScalarSelect): - return self._boolean_compare(expr, op, seq_or_selectable, - negate=negate_op) - elif isinstance(seq_or_selectable, SelectBase): - - # TODO: if we ever want to support (x, y, z) IN (select x, - # y, z from table), we would need a multi-column version of - # as_scalar() to produce a multi- column selectable that - # does not export itself as a FROM clause - - return self._boolean_compare( - expr, op, seq_or_selectable.as_scalar(), - negate=negate_op, **kw) - elif isinstance(seq_or_selectable, (Selectable, TextClause)): - return self._boolean_compare(expr, op, seq_or_selectable, - negate=negate_op, **kw) - elif isinstance(seq_or_selectable, ClauseElement): - raise exc.InvalidRequestError( - 'in_() accepts' - ' either a list of expressions ' - 'or a selectable: %r' % seq_or_selectable) - - # Handle non selectable arguments as sequences - args = [] - for o in seq_or_selectable: - if not _is_literal(o): - if not isinstance(o, operators.ColumnOperators): - raise exc.InvalidRequestError( - 'in_() accepts' - ' either a list of expressions ' - 'or a selectable: %r' % o) - elif o is None: - o = Null() + # all other None/True/False uses IS, IS NOT + if op in (operators.eq, operators.is_): + return BinaryExpression(expr, _const_expr(obj), + operators.is_, + negate=operators.isnot) + elif op in (operators.ne, operators.isnot): + return BinaryExpression(expr, _const_expr(obj), + operators.isnot, + negate=operators.is_) else: - o = expr._bind_param(op, o) - args.append(o) - if len(args) == 0: + raise exc.ArgumentError( + "Only '=', '!=', 'is_()', 'isnot()' operators can " + "be used with None/True/False") + else: + obj = _check_literal(expr, op, obj) - # Special case handling for empty IN's, behave like - # comparison against zero row selectable. We use != to - # build the contradiction as it handles NULL values - # appropriately, i.e. "not (x IN ())" should not return NULL - # values for x. + if reverse: + return BinaryExpression(obj, + expr, + op, + type_=result_type, + negate=negate, modifiers=kwargs) + else: + return BinaryExpression(expr, + obj, + op, + type_=result_type, + negate=negate, modifiers=kwargs) - util.warn('The IN-predicate on "%s" was invoked with an ' - 'empty sequence. This results in a ' - 'contradiction, which nonetheless can be ' - 'expensive to evaluate. Consider alternative ' - 'strategies for improved performance.' % expr) - if op is operators.in_op: - return expr != expr - else: - return expr == expr - return self._boolean_compare(expr, op, - ClauseList(*args).self_group(against=op), - negate=negate_op) +def _binary_operate(expr, op, obj, reverse=False, result_type=None, + **kw): + obj = _check_literal(expr, op, obj) - def _unsupported_impl(self, expr, op, *arg, **kw): - raise NotImplementedError("Operator '%s' is not supported on " - "this expression" % op.__name__) + if reverse: + left, right = obj, expr + else: + left, right = expr, obj - def _inv_impl(self, expr, op, **kw): - """See :meth:`.ColumnOperators.__inv__`.""" - if hasattr(expr, 'negation_clause'): - return expr.negation_clause + if result_type is None: + op, result_type = left.comparator._adapt_expression( + op, right.comparator) + + return BinaryExpression( + left, right, op, type_=result_type, modifiers=kw) + + +def _conjunction_operate(expr, op, other, **kw): + if op is operators.and_: + return and_(expr, other) + elif op is operators.or_: + return or_(expr, other) + else: + raise NotImplementedError() + + +def _scalar(expr, op, fn, **kw): + return fn(expr) + + +def _in_impl(expr, op, seq_or_selectable, negate_op, **kw): + seq_or_selectable = _clause_element_as_expr(seq_or_selectable) + + if isinstance(seq_or_selectable, ScalarSelect): + return _boolean_compare(expr, op, seq_or_selectable, + negate=negate_op) + elif isinstance(seq_or_selectable, SelectBase): + + # TODO: if we ever want to support (x, y, z) IN (select x, + # y, z from table), we would need a multi-column version of + # as_scalar() to produce a multi- column selectable that + # does not export itself as a FROM clause + + return _boolean_compare( + expr, op, seq_or_selectable.as_scalar(), + negate=negate_op, **kw) + elif isinstance(seq_or_selectable, (Selectable, TextClause)): + return _boolean_compare(expr, op, seq_or_selectable, + negate=negate_op, **kw) + elif isinstance(seq_or_selectable, ClauseElement): + raise exc.InvalidRequestError( + 'in_() accepts' + ' either a list of expressions ' + 'or a selectable: %r' % seq_or_selectable) + + # Handle non selectable arguments as sequences + args = [] + for o in seq_or_selectable: + if not _is_literal(o): + if not isinstance(o, operators.ColumnOperators): + raise exc.InvalidRequestError( + 'in_() accepts' + ' either a list of expressions ' + 'or a selectable: %r' % o) + elif o is None: + o = Null() else: - return expr._negate() + o = expr._bind_param(op, o) + args.append(o) + if len(args) == 0: - def _neg_impl(self, expr, op, **kw): - """See :meth:`.ColumnOperators.__neg__`.""" - return UnaryExpression(expr, operator=operators.neg) + # Special case handling for empty IN's, behave like + # comparison against zero row selectable. We use != to + # build the contradiction as it handles NULL values + # appropriately, i.e. "not (x IN ())" should not return NULL + # values for x. - def _match_impl(self, expr, op, other, **kw): - """See :meth:`.ColumnOperators.match`.""" - return self._boolean_compare( - expr, operators.match_op, - self._check_literal( - expr, operators.match_op, other), - **kw) - - def _distinct_impl(self, expr, op, **kw): - """See :meth:`.ColumnOperators.distinct`.""" - return UnaryExpression(expr, operator=operators.distinct_op, - type_=expr.type) - - def _between_impl(self, expr, op, cleft, cright, **kw): - """See :meth:`.ColumnOperators.between`.""" - return BinaryExpression( - expr, - ClauseList( - self._check_literal(expr, operators.and_, cleft), - self._check_literal(expr, operators.and_, cright), - operator=operators.and_, - group=False, group_contents=False), - op, - negate=operators.notbetween_op - if op is operators.between_op - else operators.between_op, - modifiers=kw) - - def _collate_impl(self, expr, op, other, **kw): - return collate(expr, other) - - # a mapping of operators with the method they use, along with - # their negated operator for comparison operators - operators = { - "and_": (_conjunction_operate,), - "or_": (_conjunction_operate,), - "inv": (_inv_impl,), - "add": (_binary_operate,), - "mul": (_binary_operate,), - "sub": (_binary_operate,), - "div": (_binary_operate,), - "mod": (_binary_operate,), - "truediv": (_binary_operate,), - "custom_op": (_binary_operate,), - "concat_op": (_binary_operate,), - "lt": (_boolean_compare, operators.ge), - "le": (_boolean_compare, operators.gt), - "ne": (_boolean_compare, operators.eq), - "gt": (_boolean_compare, operators.le), - "ge": (_boolean_compare, operators.lt), - "eq": (_boolean_compare, operators.ne), - "like_op": (_boolean_compare, operators.notlike_op), - "ilike_op": (_boolean_compare, operators.notilike_op), - "notlike_op": (_boolean_compare, operators.like_op), - "notilike_op": (_boolean_compare, operators.ilike_op), - "contains_op": (_boolean_compare, operators.notcontains_op), - "startswith_op": (_boolean_compare, operators.notstartswith_op), - "endswith_op": (_boolean_compare, operators.notendswith_op), - "desc_op": (_scalar, UnaryExpression._create_desc), - "asc_op": (_scalar, UnaryExpression._create_asc), - "nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst), - "nullslast_op": (_scalar, UnaryExpression._create_nullslast), - "in_op": (_in_impl, operators.notin_op), - "notin_op": (_in_impl, operators.in_op), - "is_": (_boolean_compare, operators.is_), - "isnot": (_boolean_compare, operators.isnot), - "collate": (_collate_impl,), - "match_op": (_match_impl,), - "distinct_op": (_distinct_impl,), - "between_op": (_between_impl, ), - "notbetween_op": (_between_impl, ), - "neg": (_neg_impl,), - "getitem": (_unsupported_impl,), - "lshift": (_unsupported_impl,), - "rshift": (_unsupported_impl,), - } - - def _check_literal(self, expr, operator, other): - if isinstance(other, (ColumnElement, TextClause)): - if isinstance(other, BindParameter) and \ - other.type._isnull: - other = other._clone() - other.type = expr.type - return other - elif hasattr(other, '__clause_element__'): - other = other.__clause_element__() - elif isinstance(other, type_api.TypeEngine.Comparator): - other = other.expr - - if isinstance(other, (SelectBase, Alias)): - return other.as_scalar() - elif not isinstance(other, (ColumnElement, TextClause)): - return expr._bind_param(operator, other) + util.warn('The IN-predicate on "%s" was invoked with an ' + 'empty sequence. This results in a ' + 'contradiction, which nonetheless can be ' + 'expensive to evaluate. Consider alternative ' + 'strategies for improved performance.' % expr) + if op is operators.in_op: + return expr != expr else: - return other + return expr == expr + + return _boolean_compare(expr, op, + ClauseList(*args).self_group(against=op), + negate=negate_op) + + +def _unsupported_impl(expr, op, *arg, **kw): + raise NotImplementedError("Operator '%s' is not supported on " + "this expression" % op.__name__) + + +def _inv_impl(expr, op, **kw): + """See :meth:`.ColumnOperators.__inv__`.""" + if hasattr(expr, 'negation_clause'): + return expr.negation_clause + else: + return expr._negate() + + +def _neg_impl(expr, op, **kw): + """See :meth:`.ColumnOperators.__neg__`.""" + return UnaryExpression(expr, operator=operators.neg) + + +def _match_impl(expr, op, other, **kw): + """See :meth:`.ColumnOperators.match`.""" + + return _boolean_compare( + expr, operators.match_op, + _check_literal( + expr, operators.match_op, other), + result_type=type_api.MATCHTYPE, + negate=operators.notmatch_op + if op is operators.match_op else operators.match_op, + **kw + ) + + +def _distinct_impl(expr, op, **kw): + """See :meth:`.ColumnOperators.distinct`.""" + return UnaryExpression(expr, operator=operators.distinct_op, + type_=expr.type) + + +def _between_impl(expr, op, cleft, cright, **kw): + """See :meth:`.ColumnOperators.between`.""" + return BinaryExpression( + expr, + ClauseList( + _check_literal(expr, operators.and_, cleft), + _check_literal(expr, operators.and_, cright), + operator=operators.and_, + group=False, group_contents=False), + op, + negate=operators.notbetween_op + if op is operators.between_op + else operators.between_op, + modifiers=kw) + + +def _collate_impl(expr, op, other, **kw): + return collate(expr, other) + +# a mapping of operators with the method they use, along with +# their negated operator for comparison operators +operator_lookup = { + "and_": (_conjunction_operate,), + "or_": (_conjunction_operate,), + "inv": (_inv_impl,), + "add": (_binary_operate,), + "mul": (_binary_operate,), + "sub": (_binary_operate,), + "div": (_binary_operate,), + "mod": (_binary_operate,), + "truediv": (_binary_operate,), + "custom_op": (_binary_operate,), + "concat_op": (_binary_operate,), + "lt": (_boolean_compare, operators.ge), + "le": (_boolean_compare, operators.gt), + "ne": (_boolean_compare, operators.eq), + "gt": (_boolean_compare, operators.le), + "ge": (_boolean_compare, operators.lt), + "eq": (_boolean_compare, operators.ne), + "like_op": (_boolean_compare, operators.notlike_op), + "ilike_op": (_boolean_compare, operators.notilike_op), + "notlike_op": (_boolean_compare, operators.like_op), + "notilike_op": (_boolean_compare, operators.ilike_op), + "contains_op": (_boolean_compare, operators.notcontains_op), + "startswith_op": (_boolean_compare, operators.notstartswith_op), + "endswith_op": (_boolean_compare, operators.notendswith_op), + "desc_op": (_scalar, UnaryExpression._create_desc), + "asc_op": (_scalar, UnaryExpression._create_asc), + "nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst), + "nullslast_op": (_scalar, UnaryExpression._create_nullslast), + "in_op": (_in_impl, operators.notin_op), + "notin_op": (_in_impl, operators.in_op), + "is_": (_boolean_compare, operators.is_), + "isnot": (_boolean_compare, operators.isnot), + "collate": (_collate_impl,), + "match_op": (_match_impl,), + "notmatch_op": (_match_impl,), + "distinct_op": (_distinct_impl,), + "between_op": (_between_impl, ), + "notbetween_op": (_between_impl, ), + "neg": (_neg_impl,), + "getitem": (_unsupported_impl,), + "lshift": (_unsupported_impl,), + "rshift": (_unsupported_impl,), + "contains": (_unsupported_impl,), +} + + +def _check_literal(expr, operator, other): + if isinstance(other, (ColumnElement, TextClause)): + if isinstance(other, BindParameter) and \ + other.type._isnull: + other = other._clone() + other.type = expr.type + return other + elif hasattr(other, '__clause_element__'): + other = other.__clause_element__() + elif isinstance(other, type_api.TypeEngine.Comparator): + other = other.expr + + if isinstance(other, (SelectBase, Alias)): + return other.as_scalar() + elif not isinstance(other, (ColumnElement, TextClause)): + return expr._bind_param(operator, other) + else: + return other + diff --git a/lib/python3.5/site-packages/sqlalchemy/sql/dml.py b/lib/python3.5/site-packages/sqlalchemy/sql/dml.py index f7e033d..7b506f9 100644 --- a/lib/python3.5/site-packages/sqlalchemy/sql/dml.py +++ b/lib/python3.5/site-packages/sqlalchemy/sql/dml.py @@ -1,5 +1,5 @@ # sql/dml.py -# Copyright (C) 2009-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -10,7 +10,8 @@ Provide :class:`.Insert`, :class:`.Update` and :class:`.Delete`. """ from .base import Executable, _generative, _from_objects, DialectKWArgs -from .elements import ClauseElement, _literal_as_text, Null, and_, _clone +from .elements import ClauseElement, _literal_as_text, Null, and_, _clone, \ + _column_as_key from .selectable import _interpret_as_from, _interpret_as_select, HasPrefixes from .. import util from .. import exc @@ -26,6 +27,7 @@ class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement): _execution_options = \ Executable._execution_options.union({'autocommit': True}) _hints = util.immutabledict() + _parameter_ordering = None _prefixes = () def _process_colparams(self, parameters): @@ -38,6 +40,16 @@ class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement): else: return p + if self._preserve_parameter_order and parameters is not None: + if not isinstance(parameters, list) or \ + (parameters and not isinstance(parameters[0], tuple)): + raise ValueError( + "When preserve_parameter_order is True, " + "values() only accepts a list of 2-tuples") + self._parameter_ordering = [key for key, value in parameters] + + return dict(parameters), False + if (isinstance(parameters, (list, tuple)) and parameters and isinstance(parameters[0], (list, tuple, dict))): @@ -177,6 +189,7 @@ class ValuesBase(UpdateBase): _supports_multi_parameters = False _has_multi_parameters = False + _preserve_parameter_order = False select = None def __init__(self, table, values, prefixes): @@ -213,23 +226,32 @@ class ValuesBase(UpdateBase): users.update().where(users.c.id==5).values(name="some name") - :param \*args: Alternatively, a dictionary, tuple or list - of dictionaries or tuples can be passed as a single positional - argument in order to form the VALUES or - SET clause of the statement. The single dictionary form - works the same as the kwargs form:: + :param \*args: As an alternative to passing key/value parameters, + a dictionary, tuple, or list of dictionaries or tuples can be passed + as a single positional argument in order to form the VALUES or + SET clause of the statement. The forms that are accepted vary + based on whether this is an :class:`.Insert` or an :class:`.Update` + construct. + + For either an :class:`.Insert` or :class:`.Update` construct, a + single dictionary can be passed, which works the same as that of + the kwargs form:: users.insert().values({"name": "some name"}) - If a tuple is passed, the tuple should contain the same number - of columns as the target :class:`.Table`:: + users.update().values({"name": "some new name"}) + + Also for either form but more typically for the :class:`.Insert` + construct, a tuple that contains an entry for every column in the + table is also accepted:: users.insert().values((5, "some name")) - The :class:`.Insert` construct also supports multiply-rendered VALUES - construct, for those backends which support this SQL syntax - (SQLite, Postgresql, MySQL). This mode is indicated by passing a - list of one or more dictionaries/tuples:: + The :class:`.Insert` construct also supports being passed a list + of dictionaries or full-table-tuples, which on the server will + render the less common SQL syntax of "multiple values" - this + syntax is supported on backends such as SQLite, Postgresql, MySQL, + but not necessarily others:: users.insert().values([ {"name": "some name"}, @@ -237,38 +259,61 @@ class ValuesBase(UpdateBase): {"name": "yet another name"}, ]) - In the case of an :class:`.Update` - construct, only the single dictionary/tuple form is accepted, - else an exception is raised. It is also an exception case to - attempt to mix the single-/multiple- value styles together, - either through multiple :meth:`.ValuesBase.values` calls - or by sending a list + kwargs at the same time. + The above form would render a multiple VALUES statement similar to:: - .. note:: + INSERT INTO users (name) VALUES + (:name_1), + (:name_2), + (:name_3) - Passing a multiple values list is *not* the same - as passing a multiple values list to the - :meth:`.Connection.execute` method. Passing a list of parameter - sets to :meth:`.ValuesBase.values` produces a construct of this - form:: + It is essential to note that **passing multiple values is + NOT the same as using traditional executemany() form**. The above + syntax is a **special** syntax not typically used. To emit an + INSERT statement against multiple rows, the normal method is + to pass a multiple values list to the :meth:`.Connection.execute` + method, which is supported by all database backends and is generally + more efficient for a very large number of parameters. - INSERT INTO table (col1, col2, col3) VALUES - (col1_0, col2_0, col3_0), - (col1_1, col2_1, col3_1), - ... + .. seealso:: - whereas a multiple list passed to :meth:`.Connection.execute` - has the effect of using the DBAPI - `executemany() `_ - method, which provides a high-performance system of invoking - a single-row INSERT statement many times against a series - of parameter sets. The "executemany" style is supported by - all database backends, as it does not depend on a special SQL - syntax. + :ref:`execute_multiple` - an introduction to + the traditional Core method of multiple parameter set + invocation for INSERTs and other statements. - .. versionadded:: 0.8 - Support for multiple-VALUES INSERT statements. + .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES + clause, even a list of length one, + implies that the :paramref:`.Insert.inline` flag is set to + True, indicating that the statement will not attempt to fetch + the "last inserted primary key" or other defaults. The + statement deals with an arbitrary number of rows, so the + :attr:`.ResultProxy.inserted_primary_key` accessor does not + apply. + .. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports + columns with Python side default values and callables in the + same way as that of an "executemany" style of invocation; the + callable is invoked for each row. See :ref:`bug_3288` + for other details. + + The :class:`.Update` construct supports a special form which is a + list of 2-tuples, which when provided must be passed in conjunction + with the + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` + parameter. + This form causes the UPDATE statement to render the SET clauses + using the order of parameters given to :meth:`.Update.values`, rather + than the ordering of columns given in the :class:`.Table`. + + .. versionadded:: 1.0.10 - added support for parameter-ordered + UPDATE statements via the + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` + flag. + + .. seealso:: + + :ref:`updates_order_parameters` - full example of the + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` + flag .. seealso:: @@ -358,7 +403,7 @@ class ValuesBase(UpdateBase): SELECT, multi-valued VALUES clause), :meth:`.ValuesBase.return_defaults` is intended only for an "ORM-style" single-row INSERT/UPDATE statement. The row returned - by the statement is also consumed implcitly when + by the statement is also consumed implicitly when :meth:`.ValuesBase.return_defaults` is used. By contrast, :meth:`.UpdateBase.returning` leaves the RETURNING result-set intact with a collection of any number of rows. @@ -380,7 +425,7 @@ class ValuesBase(UpdateBase): :func:`.mapper`. :param cols: optional list of column key names or :class:`.Column` - objects. If omitted, all column expressions evaulated on the server + objects. If omitted, all column expressions evaluated on the server are added to the returning list. .. versionadded:: 0.9.0 @@ -434,8 +479,13 @@ class Insert(ValuesBase): dynamically render the VALUES clause at execution time based on the parameters passed to :meth:`.Connection.execute`. - :param inline: if True, SQL defaults will be compiled 'inline' into - the statement and not pre-executed. + :param inline: if True, no attempt will be made to retrieve the + SQL-generated default values to be provided within the statement; + in particular, + this allows SQL expressions to be rendered 'inline' within the + statement without the need to pre-execute them beforehand; for + backends that support "returning", this turns off the "implicit + returning" feature for the statement. If both `values` and compile-time bind parameters are present, the compile-time bind parameters override the information specified @@ -463,6 +513,7 @@ class Insert(ValuesBase): ValuesBase.__init__(self, table, values, prefixes) self._bind = bind self.select = self.select_names = None + self.include_insert_from_select_defaults = False self.inline = inline self._returning = returning self._validate_dialect_kwargs(dialect_kw) @@ -475,7 +526,7 @@ class Insert(ValuesBase): return () @_generative - def from_select(self, names, select): + def from_select(self, names, select, include_defaults=True): """Return a new :class:`.Insert` construct which represents an ``INSERT...FROM SELECT`` statement. @@ -494,25 +545,28 @@ class Insert(ValuesBase): is not checked before passing along to the database, the database would normally raise an exception if these column lists don't correspond. + :param include_defaults: if True, non-server default values and + SQL expressions as specified on :class:`.Column` objects + (as documented in :ref:`metadata_defaults_toplevel`) not + otherwise specified in the list of names will be rendered + into the INSERT and SELECT statements, so that these values are also + included in the data to be inserted. - .. note:: + .. note:: A Python-side default that uses a Python callable function + will only be invoked **once** for the whole statement, and **not + per row**. - Depending on backend, it may be necessary for the :class:`.Insert` - statement to be constructed using the ``inline=True`` flag; this - flag will prevent the implicit usage of ``RETURNING`` when the - ``INSERT`` statement is rendered, which isn't supported on a - backend such as Oracle in conjunction with an ``INSERT..SELECT`` - combination:: + .. versionadded:: 1.0.0 - :meth:`.Insert.from_select` now renders + Python-side and SQL expression column defaults into the + SELECT statement for columns otherwise not included in the + list of column names. - sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5) - ins = table2.insert(inline=True).from_select(['a', 'b'], sel) - - .. note:: - - A SELECT..INSERT construct in SQL has no VALUES clause. Therefore - :class:`.Column` objects which utilize Python-side defaults - (e.g. as described at :ref:`metadata_defaults_toplevel`) - will **not** take effect when using :meth:`.Insert.from_select`. + .. versionchanged:: 1.0.0 an INSERT that uses FROM SELECT + implies that the :paramref:`.insert.inline` flag is set to + True, indicating that the statement will not attempt to fetch + the "last inserted primary key" or other defaults. The statement + deals with an arbitrary number of rows, so the + :attr:`.ResultProxy.inserted_primary_key` accessor does not apply. .. versionadded:: 0.8.3 @@ -522,9 +576,12 @@ class Insert(ValuesBase): "This construct already inserts value expressions") self.parameters, self._has_multi_parameters = \ - self._process_colparams(dict((n, Null()) for n in names)) + self._process_colparams( + dict((_column_as_key(n), Null()) for n in names)) self.select_names = names + self.inline = True + self.include_insert_from_select_defaults = include_defaults self.select = _interpret_as_select(select) def _copy_internals(self, clone=_clone, **kw): @@ -552,6 +609,7 @@ class Update(ValuesBase): prefixes=None, returning=None, return_defaults=False, + preserve_parameter_order=False, **dialect_kw): """Construct an :class:`.Update` object. @@ -614,6 +672,19 @@ class Update(ValuesBase): be available in the dictionary returned from :meth:`.ResultProxy.last_updated_params`. + :param preserve_parameter_order: if True, the update statement is + expected to receive parameters **only** via the :meth:`.Update.values` + method, and they must be passed as a Python ``list`` of 2-tuples. + The rendered UPDATE statement will emit the SET clause for each + referenced column maintaining this order. + + .. versionadded:: 1.0.10 + + .. seealso:: + + :ref:`updates_order_parameters` - full example of the + :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag + If both ``values`` and compile-time bind parameters are present, the compile-time bind parameters override the information specified within ``values`` on a per-key basis. @@ -655,6 +726,7 @@ class Update(ValuesBase): """ + self._preserve_parameter_order = preserve_parameter_order ValuesBase.__init__(self, table, values, prefixes) self._bind = bind self._returning = returning @@ -728,10 +800,10 @@ class Delete(UpdateBase): :meth:`~.TableClause.delete` method on :class:`~.schema.Table`. - :param table: The table to be updated. + :param table: The table to delete rows from. :param whereclause: A :class:`.ClauseElement` describing the ``WHERE`` - condition of the ``UPDATE`` statement. Note that the + condition of the ``DELETE`` statement. Note that the :meth:`~Delete.where()` generative method may be used instead. .. seealso:: diff --git a/lib/python3.5/site-packages/sqlalchemy/sql/elements.py b/lib/python3.5/site-packages/sqlalchemy/sql/elements.py index 6114460..67957a1 100644 --- a/lib/python3.5/site-packages/sqlalchemy/sql/elements.py +++ b/lib/python3.5/site-packages/sqlalchemy/sql/elements.py @@ -1,5 +1,5 @@ # sql/elements.py -# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under @@ -19,7 +19,8 @@ from .visitors import Visitable, cloned_traverse, traverse from .annotation import Annotated import itertools from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG -from .base import _generative, Generative +from .base import _generative +import numbers import re import operator @@ -227,6 +228,7 @@ class ClauseElement(Visitable): is_selectable = False is_clause_element = True + description = None _order_by_label_element = None _is_from_container = False @@ -539,7 +541,7 @@ class ClauseElement(Visitable): __nonzero__ = __bool__ def __repr__(self): - friendly = getattr(self, 'description', None) + friendly = self.description if friendly is None: return object.__repr__(self) else: @@ -624,8 +626,73 @@ class ColumnElement(operators.ColumnOperators, ClauseElement): __visit_name__ = 'column' primary_key = False foreign_keys = [] + _label = None - _key_label = key = None + """The named label that can be used to target + this column in a result set. + + This label is almost always the label used when + rendering AS