From 4f40925785a0db114baa042f512279ef44326d26 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Sat, 21 Nov 2009 07:03:40 +0000 Subject: [PATCH] [soc2009/multidb] Split SQL construction into two seperate classes, the Query class which stores data about a query being constructed, and a Compiler class which generates SQL. git-svn-id: http://code.djangoproject.com/svn/django/branches/soc2009/multidb@11759 bcc190cf-cafb-0310-a4f2-bffc1f526a37 --- TODO | 43 + django/conf/locale/he/LC_MESSAGES/django.mo | Bin 72623 -> 72636 bytes django/conf/locale/pl/LC_MESSAGES/django.mo | Bin 65771 -> 66081 bytes django/contrib/gis/db/models/sql/query.py | 4 +- django/contrib/gis/db/models/sql/where.py | 4 +- django/contrib/gis/tests/relatedapp/tests.py | 7 +- django/core/management/commands/syncdb.py | 51 +- django/db/backends/__init__.py | 15 +- django/db/backends/creation.py | 2 +- django/db/backends/oracle/base.py | 60 +- django/db/backends/oracle/compiler.py | 66 ++ django/db/backends/oracle/query.py | 140 --- django/db/backends/postgresql/operations.py | 1 + django/db/models/aggregates.py | 3 - django/db/models/fields/__init__.py | 4 +- django/db/models/fields/related.py | 7 +- django/db/models/query.py | 74 +- django/db/models/sql/compiler.py | 902 ++++++++++++++++++ django/db/models/sql/query.py | 743 +-------------- django/db/models/sql/subqueries.py | 258 +---- django/db/models/sql/where.py | 5 + django/db/utils.py | 3 +- django/forms/models.py | 4 +- docs/ref/databases.txt | 1 + docs/ref/django-admin.txt | 8 + docs/ref/settings.txt | 5 +- tests/modeltests/delete/models.py | 4 +- tests/modeltests/many_to_one/models.py | 2 +- tests/modeltests/proxy_models/models.py | 5 +- .../aggregation_regress/models.py | 5 +- tests/regressiontests/backends/tests.py | 4 +- .../model_inheritance_regress/models.py | 3 +- .../multiple_database/tests.py | 2 - tests/regressiontests/queries/models.py | 15 +- 34 files changed, 1231 insertions(+), 1219 deletions(-) create mode 100644 TODO create mode 100644 django/db/backends/oracle/compiler.py delete mode 100644 django/db/backends/oracle/query.py create mode 100644 django/db/models/sql/compiler.py diff --git a/TODO b/TODO new file mode 100644 index 0000000000..c2118db473 --- /dev/null +++ b/TODO @@ -0,0 +1,43 @@ +Django Multiple Database TODO List +================================== + +Required for v1.2 +~~~~~~~~~~~~~~~~~ + + * Finalize the sql.Query internals + * Clean up the use of db.backend.query_class() + * Verify it still works with GeoDjango + * Resolve internal uses of multidb interface + * Update database backend for session store to use Multidb + * Check default Site creation behavior + * Resolve the public facing UI issues around using multi-db + * Should we take the opportunity to modify DB backends to use fully qualified paths? + * Meta.using? Is is still required/desirable? + * syncdb + * Add --exclude/--include argument? (not sure this approach will work due to flush) + * Flush - which models are flushed? + * Fixture loading over multiple DBs + * Testing infrastructure + * Most tests don't need multidb. Some absolutely require it, but only to prove you + can write to a different db. Second DB could be a SQLite temp file. Need to have + test infrastructure to allow creation of the temp database. + * Cleanup of new API entry points + * validate() on a field + * name/purpose clash with Honza? + * any overlap with existing methods? + * Accessing _using in BaseModelFormSet. + +Optional for v1.2 +~~~~~~~~~~~~~~~~~ + +These are the next layer of UI. We can deliver for v1.2 without these if neccessary. + + * Technique for determining using() at runtime (by callback?) + * Sticky models + * Related objects + * saving and deleting + * default or an option + * Sample docs for how to do: + * master/slave + * Sharding + * Test protection against cross-database joins. diff --git a/django/conf/locale/he/LC_MESSAGES/django.mo b/django/conf/locale/he/LC_MESSAGES/django.mo index 8d8e328203ec7649df88ecafb612bc59b30141b4..9a3e69c5c1116f9ff9e506bb1c1124b8cbb23d9f 100644 GIT binary patch delta 7984 zcmXZgdwkDjAII_U4>LBKvCV9A*eoM5GtTn7_qUUjBH3L_A$f=} z$JvdPO39g;F}iW9jVN?-S_ivd?_Hn2p4aF4UWd17>@4&g(4tHC81A~cg&v$k)Gcb($ftZdXF&cBR1+K*USd4Y>V!r2B zHmsl`go^Yv&dz3U^ImflD$#5VLm%tm9MnY1FbO}##&`-f;SCJMn6+;FBn&4`S?haV zEeai}sE)l+0}VpekHdaA1tV|=*2cpaj-{v_`yCr#_&R4x)OZ=F_5)B0e83!!8h@&9 z6))gSD(2v3j9l*q+J`!dbJ!6>Hn>DGP)9HvHPC}t8z)-)FNzR{Az-q7N|!H=>SWFKT6{EIyBF|BuDB3S69kYS$XIGk3ar z-@A)KT`C5m&h8;>g|ko-7TWh67)5-<{K>2^qc*wkH=|bE2{m3O*1%^_3Cu*@iFp{# z{qt5*s6)jT)P#Fcm!$;Nu@r0K1&goR_X^ZRp@nXRP0e&{N_{ufgpZ=yO+oGGT-064 zm(1^dN+B8#qPDgS)$wmL_&@Hp*G1ih1k?(;qWWc_CU_i`_-oh(^H2-ggU{koi|^rr zsojIRIMt z9B#r`JcLT%{8si~Tk#hadQQO~yOq^OB^GCKD#j7F$F?{WwL`N}JC=tBaWgWM_wqJ( z#$7&P7l;R8AN&;G#G0SFMBno%XeG-q64#)?c zsCHS{5NDuvZh?JYiQ2JE7W-dPh@#@4S&CZubyOnZpSi>m@OI*KOv4=1mM=l|Uv6$g z?aVjkNz5Sr35R3b&w2Ye2S;%KyxkPCs7U&Pw~s!y$5QNsjd!>SM`BmvH&M^47&Gxt zjKv{eaxX9kTjN?x#Z%ZBYk$R)!~v*8a&dy*|3wO;s2H)+ZP_;SINn43HB^Fqzji-N zAK?Jvqj(o4?{bMhf`f?nqb^;e-JaJH+vEK>4twBEOvcbX>?relX%xC+HfrVTQJ3Zd z4#Uj7F0u94ocJI%!E4wC^{RQo;X-$vmQg}#`4 zz0OiN3=G{KvjG;;Jj|hOKZ6_Qd(9yYU^SVAvs-KnK*0lYQm~VED=F0R2%Dj5Q~r#+hNhf?Dxg_I(-Zhix5_ zDDR(wIvjEp-U-xzrKpK7nH8vkB91y6)>+Kcw5Y!sPT$W{ZCr`1q{>s zFRv_cu}WOWXw<|_a1ge|!8j8&&@t4VC^ai^A#waMx8++<-*;mimZ1`?z!VHS?(87- z{1sH|PP%GMsLAV=rGzU>TP=di&hU$00yoPF5 z<-hFzXbPbe?!k$u4_~4#-+t5tN6j;+owi0aV{ajT01*nPFU{l

Q)DxasP_NpayP(>OTP0 z{{hqyOhqL;2jg%t>UUu)`pWDJ3YzE?YDK@J2B@$&`m7tE394gTR6^}hEAMUJ??pYU zhf(8AL+#icY=%oQ1^42uSbmoM*MzA*y0hts>UcM5;1Q_rV^J%30_)?8s00^c0k0aPMSp(cI_byROy{RgN-)?xucjIkHp-+WzgAaM?^!|nJqPQ2uKpP~OFg>Dp9{Nnxy{TX$(slU3F zbjA?kzNq?PSb4@)Kh5Houm<1rP?u~8_Qq|f*HisBH(pazyEJ56-)m1nXL&bj3-805 zaSCd}#i*@cgWBqSs4XtR2rNVG*cH^#)coD;SPC{HegyT)IS(~YKI-3=9T=kbf1HAz zO&Pv|mrfV6B2*%Us00q1r?DmRMbu8kUUoZl6KdSsQ0;qSb-V|)uq>>h z_dnhmOtuftp`PU%sP~?a>UhvRf|}?AYHNSQ1gw6=?L;%w1np6;p|{0jurcvuRR3J` zE4SDx)}S)of?>E5b(V*5I$pxD_~=#l2gxBENSt!bCFrA$=w%GX`Ka;UMNPN@6L2He z!Nb?s{}>8qs8GXy>_fsF}5I-yqH7pGvheLsyF=QoVT8>j_FmAeU( zQ2pDX>bsS*|0)coLK!}Y8ejry!f6)IMqa9yXYuoYy7qHX6D>e3WSzykFqZfvYP@Tx zBdzwAOCS+-30wOV)bVcAKqIjgPC+HG6g5BrYRf*ybSy$`b@1OVkw~+Vc{6HZ>8SqQ zPzephARLccus?}{CYph2_`10OHNbMzdo4tD*lzJ2RKFsO!%|e@H!Qy8A9ophqE?!P zci&)1A)r_KkzEPsQ15>f;x7r5~%#m&&FZIb1@sw;%FRPHBkAlS0Oed zF2f|ORV`5YgO!T>S!8SeQOLOz7zF3l7VVB0DUzaMnPx)2x`Eo zsQQ1QGJeB+7j+3gM77^+eucVR-=gl!&!`FiL|yJ0HC_7z)Fp3)8n;``fL~eYPenWx z!>!>|tV{d?M&Vni4y#a?s1U2-FQ|5Z;2^9X;wH{S-Ia%N20n{QupG6($k2c{4I6~| zZs516&~NnzsFiO(o!t(MM6Z@xK@@6$M2lOXR(Kn#-vF$Jqfk3F7PV7Pq58dK^>a{( zzwcAft=x!uM&HjdUHZDT#RP)--wpf$66KcE+ zRKoY7cEr!Iidm?YzHc82P&;w}bu=gKd$}1F5vcrsIf>Yp@1rbUgPQ09>W8KrbtfuN zI}%>U#RU~c^J+CF$6~DqqF}QBP{a-E|LVN)AJCPXa#uek+ZT0vK={C6C+V_B$)9y6z# zGg0m5nM+Z3U=!;3eTr4|{&!N)fV<5@s0ojve&a8oCb*6|lB&^e;7HU~H$@GchWg&# zzV|`(yB~GdkD%_@3#fLB(AUIkC@9lSsFi<>dUnOA0nVcmx{Nxika{lRBvgVeQ2lO2 zweOBfXn@scqWV9Kx8pOYc7^rq{eMn{Rn4+*D4mHp34fXCDe{w zK_wQb?~FwqK^xRKeayi=1x+v<8{kOP#4}KtzG*H*O|$}=<0h*=X`VwJ!5^pv1!7(M zI;im)p?=WPQ3*VOdKLaO3ds~+LH(D|I@B-I5!6c0p*sAAO6a=PSB-OVE!3roG1E}* zzMp*`f=Xzl#S`%k;%P4Sy#o8N9W~(|)PzM=f7~oXZRszlvki_9RQ@#^kD8#p*$1_< zEYx`8Q9Cf%zQ2mv@da2{hqai3R<@y_SEpSQB9@h`D#=SI`FF`jCG$#FmMkcUT)lA8 F{{gGE{5${v delta 7971 zcmXZgdwkDjAII_UFKjb5W(S+&Hf__voU%xUiD+7eqGXn1YN@0VCMc?y4tS`!{-?SNp!_ zMPvKrp4S}vU=ohPZdim}aJ|K6FqHV370%9PcZ?vv4_=447>l#;YFvQvSb!#4NgXm^{#|%0PQb>v78~ILZ z^*DbLO~~kjI=eft9TuVnEV1&n7(=|>JY?3Ip&z;O7N`|>MD>@AVfY|wqEk_KVg^Ps zzPErx6dB7=18zWFmYt}El~^ASTYS>WYf%GTLajJ{tuqZ%h%-kBFV*g_jF=T8pD^W*s8a0u?CO5HY>`I)5oiHD@<#SQ(-!NCA zcIGp4FZL%sgm+@Aeu3HIFP zK1^@oZNxjUHzriLiI2cs;!W5Bqqg%C64P-c-i3X!6jSjMc4d4oWd|1o^H3{)4|Qn{ z<1ozL=_d9bwk6(z$#@DoV#F@byApe%COQ_CzZtaywRkPI+3orrgsNYL{`Dk|k{E~y zU$}ztsFgjB8t4m5#NVtuimR?T1KZ&+ycuVr?#36Gj@}+Of%d2!$wwXOH0*(8d)WUl z5*Ns5g?0A2+nJ10h&y5-E=66kqiVsp&e?*`06b#TAMQ!Fk*^;3)*_zf#xjE#v`TmDv5 zy)VpapM+L&+A_{r9Qu{}F2tZZXoafR9yNg*F#`Ld1{{Jhn1>il4Red8iND zyU0X&|0L94tIP2ApgOEXP2{Lqi|VLOrLzf6Bu+x@#Pb-1Z=m{HZkD3@D@V28Yx##U zLhrvOnBWpxL0FX=I2LnhkcxwGDypMhs5?<<*5U%<$gka&FGrQH$7WcCnqV!aqxX%o zz0~{9BcTt&3)mF*V-lXm))?_Eb+8?30<$p&=b=`#0_)*=)X{7~?Z8e9#VS<0!{#Ye zy+6<&P2v)Xp?J?hSMV{`C*Fh_V2AlN>PU{GIy{HEY;~&LfZ?dPDXM)d%WsQn*9BW( zKUBYCs@Z=fCR;|4`7*|lztG~3Q5}?{cBT?z@f>O*k%!#(BLQ{myQ2E-kGd1Xumes+ z)t`^L%u5cj|JsUDGV}_zScL=FfcS{z*PuG|zH>H09aSsT5oMsZ{0`IrV^BLV$>OP~ zBb#CI>!|kc`6L#QSdZGe;fGyEqp&4$fmw{riA${fbJRflEUvcrBx*rtQFr1ZYT|W{ zxcW^{6K!ewSCi1d*P}YjL{%7o%D)5K;z-mAW})i6i8_*{sGVD9@h;R6RHIf{gX*X5 zQP-{kYC>_?OYc90gwAF>s^UEJZA>Lzf|}57)PUbud-Ot|Am_1OBj5| zsCuQS9Vxf`!>E30QT<*-wTn98e$h!g!TzU^F^~)uOg5jzWa76m5x1bu?t8ovy^}6K z1@)cihuZ4FsEOQ%8u&5PQ9W(>uc9XM4)({jK8fBWE}>S~>y)dIi}}Rkup4f}XV5$C zwr(a4BP_#5F{XwKh{c$S4S#mO^L55S#QFFkuEwc&&o6x5(f^i29}@F_b-#puhdSFf zzqyrk!f@iAsQgx5 zHoOWapaz_S+WJMPt=@?0XeTztD%6&qKpo9Ls2xkHb(d}g>dQF;HO@lR&z7|quJ^y2 zgq}?mK8wdum+Yb6-LrfSHL;gb6DdJWV4JxguOU8y+Np+T-3~QH^?Nm{{tXy{H=`Do zgJF9ABdx-CD|iI;ET2Zb_l2m2Tg>gKf%c%b_FGKE^B9fI{%`}Nqh3P~i-%!r;_;~V zv(OK2v1Kem&2%|NU@7V>w_zb3#c_D|pY9iutvHA{=`T0I$*3cG97Ay?s=pUd1J1`p zT!K-!?JxGfDT%MiP{rS@;3DcWH9F@yibtJwB5GnCQ7i9>Q!vlU_oMpx5o7TTYJs8W z-GFhZ_H9x58Ryx5CHj(~84g8tFbXwbfyL917wQ#TJmrF`UxXTHHfka7TKp*{5bs6x zcM5f+f1xH2^S8T%$vz1+?1t(n7u(?k)C68bb+8n*Woz&{EJtni1=Pet{&7Z`El>+f zL$%L9O{fpn!;z?V{umM(XcDTTZ_Y+_@CNEAN>B|}TU>@}SB}lF5;gHN7Pr3WF5?ZT z=az%ra2(!@3z7Q1cbvooGGZ^eFWWR6OZ*XPr3sf^eh#XmnW*Qs09)dAycUn4CJ^lf zg8!)Kjdu_i;%wZ9s-G7K1phzqB+k(LUqM0*J6sV6e)IEi7;zEakKf>E>{}-g{OMJK zX~b371}~#NSZ(UM_P1g>@k7`H7g~G>wSx`o1-yS_Qyjn2!6w#L>Pm7k0e*bem_>5i(Gg}%=477{x95vUID zMdd$?>fmYf1=J;c9aaB*a~_-=PNl8Fjh;M%9n59|&IZL{z^S^#gt|(VL9s zWDG=Ayce6`qZotFq8hw~xj;( zmOmXe@mG8jx|K^%&*(EN_zuxTDBTgqS#{t+cGT;^C%cz}d-pHAX z^@%&8`sKW~JLP-HB;qMZL;dE`3-!Jypq|%U%))gz5ij5f9NQ$|4Z+Q*??gt_IVARdj{iMgl=ylwFZ7MEgUz5fa;_!{+r`2lr_E?68E<8Ezh)C#&_@aH&c$8t~u zjW!=Jr=sf5FkeI6fn})Yw+gS&`!6M-4%eGoQ9lcIpuX{kQ3ISt9m$`ljzePIR>z|{ zPC=EYqslW;?FOOFdIai@J&LOLGWr^L5eYR|hFbY2sApG!>Yy4mq2s8dx`=8Q7w0C} z64mZ1RQ>Bw6UwsuY*hO@u`51^s#g+c?|%&$x-=V6GuvhHL5okI20Uk8Hba`a+Z=&v zmx3Co3+iaHQSC;V<52@YfSTacro4YOEV7I_sOK^t)zMk z{3zUe*f3JBEbp*#y z3p$IcUpK+^7lrzurJ*J;4)rSh0uretomn5gQx~Sq9%0O^8d8>GU`%= zH*=<--hD4r{eGwkWg1cLhCb$-N3Be%{oEA3T;u74oxCUCFP)f1l?pCx& zX=#xHMc(hf*T}hf@1D0lXY!q6OkH!Wy_3@3`*ftwvN1l>PPm)7T&_y|ks0S;THKCF z@EGRB^Oy)fpdY%LJ0`}&gn{Ud=`a8@T6=CxMqCIrUpdq|pJPgF-`wSP1ybot!WT!F z<4_Y%LJgdWT3`|S<1%YskNnRSXZe5@PXBbMjb=kF6o%=rH0lQHq2}+@!tHXUqSBK@ z1{{vs$XwI}i_KM*-+;r&Z^MaLq@~N19QR-l9>d~z3$^jotz0f2%!HaR1T}wd)CLN= zsidY-5>sFe)I`luJMW2UaG15vK;6l5Yu|-Bkz=R@FJMx36Mu-@ znA`P+iWW}Z#xV`*Br>7yEI;awN?Lnm)CQ}gHeT1_CT1(tNpwJMpgU@wA?9e*{F5;W z>$?_F(NTYk+S&K0iI1WNp2c9ij_UUUwE>^D&W4ks`lUiW%1mY<)IwEIkF+6b-YCqE zy)YT;yXH~pkE>8S&)ClCm=m>7e$)a*QAb_g^3_rO>!MDuDW=CZsD%ciZfLm0)6BV; znfy|8>sjrkB9EgMxQe>d2dF!GZSDT;odttY3x=Q;%8hzNg;5(Whr091sQDV8`nN_s zl1SA2G3_~j9nEMGy2ClB36`NgHmgy0xB)eA2Wp~&s7G=fbK^bKMv`@KUeX}kL|hE# z;T_ZlzwF3na2%G!%N^Z34=Sl5orMRP!%#aMjrzivi8{)4==r>3M&e_b2_K;D)T@)T z!Q`k7r9q9)j5?8GmamGsky>sly5ojsGizvry5ml$f&EYmj7ELTrebznh*|JQ)I2v( z8@!9!z;p8rmL%5QR)bPl3f;q~)S$B68a`kIad>B5IGlima1ZK^pJGMy>*73uaI8$+ z8nw|ySR6NEUc7_(Fj-gb9!sGX?uK>r`CsZ(T+c9^hH~AUm(Fd@!1Cl*p*D60wUON2 zor$VqLE<6!1+K<&_zv}HDA~h#MD?&Had#|?+cASa|M#hclK5!m>gl{xwNO7?Vz4qE zM|~OsdO1HUvZ0=RS-@bfXVM9!Y`_KoEqxzjeZS1P$|3H7@7wCr{FfsbXa7q|}8s9O7 z^Vd(aE+kUnaMZwQs5_gFIEBKuvfNHSoH%Kd|;^=4%Y6{S)d$!Us6>)SW6+ug`xt6@6jU#6~z#4S3tUk2=D~s7LS|HNiX7!rnui{>f1Jl&FnlFtefhg`(yu zh&r(n=+=O6Dw?=1>Jc=vxFhPwdZH#6it0ZK^>LkO?Tb(gu1CGZTT%TEqUJkkUcnIJ zd#L$*hVuLasRRsl?jRFt!4T9=3z;QR{VJda);1eq0C6kShPt8_9Euu07Il)7ES`?) zKNmI6s-c{}25cn3N7A*^Iu`iS=};WCq4KDKH7#z6TA-8J12ticITW?2ar_vA$;#gG2I8?{)F+Ki>+VN%70=KRGiN$|e{ND8Dv#14t zI0r`P^PihaNg8^g7Fc1fM-7NW-SJ+`fX7gGbl=+lMvZ%qIzgY|PXDB+uijwPOPLY% zaW07EF&s0ozH0;(?QkLLC0mACV2k-9YJn4|2`->c;3?{3^cwYy{g_MRf>CiOYQ9pa z8;L;mua8=<9j2kUI`M}l9D+KsF{tnO$*4y#&+I80LMtnAc^Uq2p$w=qteQwl*6;WTgbubflLVcHyK)nOAEWa6bw8v2kUNrBZ zHvSBC0#8MjiQW)PgTj-vb{} z1B1VD%!=AbDC(svf_g+1F$9~T9^qirqn(L*7giwixn0|-Xu^}$aKUMCJ-`m+KceoW z{dlKe3~FOTP%qOIOoEFsF|J0vJDV*&g4*C|RKIJMzlT2h{6DqCb5FwMdV~IS{D2ye zc!IOzl&H^tcJ#(FsP+n|Bac9BtQP77+M!;`A*dT1i#p*+sFRwHsafB(+B)n;O}O9U zW2ieii`vjt48nU@5C1_u+iDY?mv9nRCq9mP#zB*ug)^Yy5Y#+jsCkQ^ThFEpl>lsv z`l{@J>d+tctiD1`Fxm34sEsZ#SE8QnM%2dkU{Xvr*_kIDYTm3C7e(Dj*~y&07OG1^ zN7Mwh!`7$;JEIorY54&byKyu5Z&2gPOmX^G!nDK@sCT24#XT$@iCS+u>c-|x;rzAJ zbtIDG4)Xx&*`38KcoVgee^CqiOm*6mVP4{3)WlU#??z2jzq)2)Yj0&nVj%53+*GvC zP-_^6+Rzl#3Cu&C$P#N`i<)?=xfAvB?K2OezFLo>UdH>V6M2g|@)Xk?bE3Yu+@-11 zrZNyU@Mmm|H?a_wo9?uC!y?2x%!jB?L+}izUm?s++{R)z>f|+BR6JW$;7;O1$sEI=@E{vL>q~$B1`d71j zJq#jlf|{?Jwa1`dx}m7?kOi4LJYC8tm?=MB_R-$jl0 zo$qYaA8Qf^q2_CYDKH9K-~iM-r{{D2o}(hsoWvc}+gxsevtT9Eo!7wp_yy|IG8{GW z1k|0(Ky7dy>du#=K1JW5=G%`NcNo?09BSP41w8)%DtAd}z@L~3-=TIKu+SNh8Fj~@ zs3Xje8dnaxV0CPRo3S(|U&QbJ7=gOMUr;x40=0olsQGTYsc6Cns3Uo4aiYb}j#Hu9 zv!E6%fa+HkHDPsYZ-`Zh+hZwQj(PAZYQaQHoEuAqinE}`yThzf1hvz0s0BVl9eEwh zf`c(L&c@F%4%^^ASRR`!bv7^^D-xf_jF@VfvylR*g`1!jZi_qux2vl)L|emP)UzFh zdIS?t&uTtuW6R9VsAswtweh2<6FQ0Nf6csudIuh%`ad(@dfMGQhvm+I0MtZ5sGSB| z9Af4|J&HWmUK+L0%BY2Fpf=dV+9R#Kr?vOD_K~O!O-0Y&|IehNw{|IN;q9o6>_tty zAGPySs1000y*!UmC-kSae?pB*w!+z13e-y-j9M^< zLIY4o`X#F0DAc&g7SBO%;^pSI<~mgWO{o66P&abG;?pba^M93uHw`yYM|uZ!5|2@L z{I})3RyrH;N1aSc)JC&lBFtfNsF@GF$rncTFOJ$sWwX{wp1*d~*gCd1JE11*iaO#R z=!0LPHZ}rvXX8=hXJRleMUC5u>h}Zcoj8c|#sH9>vU#I4X5JE0GDw>TPg#{*Cs8G;)B6{f`TsQ&X&^SGB#(GJ(327HJ5Ill{i z@rn7|e2p6T9<`9~x6XwAs09L1CzB1+U_R8wE1|~KMqg~~=ytWFq5<*= z9PGu*hP%;kjq`=Hf35RPcN_W1?MkZjx}%!W5n&-4TK!Jv)K2F73p;_ax7oWk6A89hJ$ zKT**FKAW7Q2|z#M)D~yJK;mqu2?}9yEREVg1nMigxy1u9ka#@msOOrCQR7#d8`1Ok zf8S9Fq+uUwBd4$#UP3*BLYtjOQUsmo zqBio!WyGxj!_#x`ZKA|Saz16w1!l;Q$T3iX$9)a3GT}+7` zQ9r!;S^G3=pSzXMe<~U_lF*&(LmlZQ)WnZbJAHvVq4#F8ZBBbe3?QEiwQy03%UXLi z)QL4f-DopZzc$t$<)%`CL^SH51l6w+rpCIcFQWFS z1qY)xFdQ{*Eb0->GTlq9!+O+!ZKxgYwER!z5!6YX!c=$}Jtt)O7pRTDw|w&L&c=dK z8wU|If7FhLqB@R5O)wF4r;E)EsEzDKZTJM{#|x-O;@aW- zTlM6qjgLb0n}}K`7X4Y@HIIsRw!#`VqXz6iZRAJHi-%Af`V+OGx2SOe-#HVfN4=~e zsG}}x)!NaIKIEU(Y1vT-ns1tc% zvG4cJyy;Q>bD+Na^H{zhx;3!0b*P29g9aFi-B25wiXk`;*Whl9#mJq`%k&R6CQh`= zd5N20OX4}Gh5t6+pibr^>I4FJbN>2V=G^T(v&xu`xGrYGPN+K`fjXH84v7L?KcnOPOrakLCuE&<>yVv<`xxKm2O(i=GS1j=kgNaN1==?fe5B0?} z4K?u+)T23rVfY#gVU~T)U*p%oa>OIBCjNjm(Dx_jr(Qj?BWlC$fmA#lF$)Rb{mx&< z=fpC^EwByFzzX;r^^O$$+4(VB4QmreVOiXY8Sp7;W4;F*OJFhLNbH8Qv5G$bu7l3b zs-gxq#cbFIOW`!sJ8=Z-<1K41bjW#R^-vp+#_Bj3GvP%H#lNsL1|N2Q2Gqx@#68jT z@BcSb$w=a;b+~V_*AZS!@|m#&R>nLy0CVAD48sGckKGf@i|;WXhW*0bV?)%-)(rhI z3fp2d_U8Uwr>N*9ta#M1gpTZ2rq~x<% zoX;#~mOIY#*MwEAV@=dVjnN0&qS_cqBMd=NF? zIn+8=^{+^DC-+Ds!^fx{zd}vyI^pP#>X+I~kJ><1GYmChA+ro>165Ju8ld_&Ma|pR z@?G3iG_aR73^GTUD))=X0MAHLeZn z3oZuv*t%U4s6>!hgWB02s2#sSZN%%0voU|vJCOpFPh)XrRR8R#@%d2CxCA!9^5!H| zzniFe9-^PVf1Xm&z`w1-d(;BHXPu78QAe5@H9;2CNrj>M6|nXan2NX(YQBb+Z-qL^ z&KAdDa^hi_SfBriR5Woc>TO+U9X6vD+H3g(sD77F8@g>iL46^;LM@!}obz$cg}T$? z7=UF^Cli6%cpY>HQfW*@10$`W7iz!&OpT*Z3(rDLuo%^U6{_E6)Q{sm7N0}U2C*Ia zC#Z3?&N~Y?M4dp3^PImn(1nB?h?;mTYQd?fiRPkqyaIJc>(LvxppJGsYJs0n{f=0C z+Pq}mMD@Rq8vpz}=bx3zOA@-1pbO3fA*hA(pf*z6;tCd5N8LdK)I@Dj8|Y~5-B2%e z42I!ORKMG(araO^)c$f)(T({mOyQ&JnG#DN1a$L)X6l)yx0PB z;%LK*hU!hLmU(`fNt~e)<+)RsV&w_e}`7jlh z!TeYYi{n7lg6mL69fw+QFKV5GsFOX0Nm<`@j*6beP1FL9F)hAEO%!m|xx=)mexaxd zi=t-(s14OZomg`W#crsRn2I`~m8cWmjylov=*~dp6_xau;+pfTQy$a~!%;_F2eoi3 z)WTg*8|{ti_a&;|Xe@^_uneBW(&%^Hc@&?aHdqt+-OttXI?rD_T}Yw_{)~CB&<%d~ z$7bdp)GwjgZ#qZq#v;V)u?F72L|)wWE$3fe<+<&A3dUk7+BahXe297^LBBdTlI2&o zQ^`d_4f#I-88MQzzGe4?BG1O64z%ZH0sWNL!H!f^IudyzkAN7ClD0}S)2(qUkGY^ zK4cxYt2h-M>F1aTTcS49#_WVTvL2Wb-KdvnK5C-%s10vNEqD<1sW^d}=a%IkSp3Z5 zm+1NTfA6X2&Jy2uekIC=^@wAzI_}5X=>Nbu(k7^hTcHnjMBP~z)CT%u794_lG;>f# z{jIhCfa-q$eR=;}C#~T;>W;3X?&v=1PF`YN^n2(Wd1KTB?NAeULoGZU^}R99;)SSw zD^V};CiKPKsC9lqw;z?GmNB9?-T88?kH5I76s3~>sq@WO3w6}}urN-*T)5lf+nAR) z#qZ7!o3a>A+zY4RHq=I{JaabI3YQWO#2gs>htpmT3lh5f!xg?dEq&>IuIaGs%`nZitudQ{m_AIm(bcc7rP7e~!g!3;;w-~ZL2qK-{b z6ST&p7>OFt+wuc3hCa9C`gu3$> z)JcrB_UUi<{D+WON;J$|40`K)jLM?=wMK2EGiu@PmhWc{v-Yo1 z8<~i_^{#oS`3||M==1y(D`DDq&R-NZ!Ro|YFdW~Y+AIF+Y40j9t|P~+SmE#ddx*1HVAM11(WU+79)sdszEG z^n4GX7x{^(6PSz|x4@DA`yVQLHmgtrcB1a&5NhGm=6%!xZ&3^Sd~oh0C8i_}K}}o~ z^(afBUcy?accC@vhI*jZi^c%nKi4QvgJHbUp6OoH z1`nh9AGi3l#TQT~b_Lb{K5G0k^!)ty`sC~^31+4t1!|!J=y?ZFcUB6uu_~wqYg^pF z+FP1Yn3ncFsD9&6^G!s}HyyRktWTW3p7}NsI+}f`ogOyNS;t$bf&Zd5@(DFADX*5E zaWLu`=S7`h87zi%Q5znC+QjEowtvUS6J`0V%OCaS&?!=csjRxT$DBebmmHq3*N|>Lfa#Ucz3emuej9 zb3X_57H>uMJBT`wGpLDgqWayp{72NsFB$)sr1dhOK9-*U{_CbzL~f6IDm4UdPYi8%Aa05|9K?O zQ0{0uT>Wfm5x>#U@k@%XQzWxG)&KlilH5W{JKInL>bjzs>;k#8SlDvP{~5ciQm}g)Sv}C9 zl>M~wW#D<`hEV^GQ8j3U(JWn|J{ZB6knBQgq!UpYY0J zaU0qr89R-}h16eAexc2s$jw<&DP$dgHsdFWpkrQgi79QV-=%y_V>Ig8Og=B(^+bGE zlK;u(h@qa&>Z4fvHhr#Gzj*Y267ioaC!Nky@{##y3#jomlZ?V#*q`=Cw1r_cjI{-N z)1I6-jm1@oe|*MyV}WhwP=+(N%>^b4l0Ybmii z;qzDW1RaLc(3W~z>!`fGLv>vwcY=0Z=P9k}-weZWHu;H^a5dWX1vaLw0j_84G8|33 zn|@g+?il`vqQ$Iw9?&LpEPl|o33mDN`wp$StDO zwER@;OMWLgU1frBt0a@`VUh~OpHV+YeU-H}!y;DCO+NnGKrS_9CAl%^&$w3J zJpWqpJ@7dtAB`6lzPPO3H*_SWC;dlAo+^;SIQyk zapYUzP})AD=(<4s0F%&1*Anu#a3=A7+O|?UkV}Oj6kWNj?+Tk^HTuvtQx8N}KkMiT z_~Sb|l(RvLsaGMF%o=|*|Fm`&W0z5i+2r|Y52o!4;^a1O3hYZRDZoE6XhCl{I!dLZ{h>|iwTmDD?uqjeJ{B-lq=MK#~qa6 z6kQh*%#)0ET`w8;Kwrp5X&g_oHVIu-P1VzpUx%Y@f{N68P>xX^(pJ$X>uw%5X?FQh zpUk+F33AGpN)X@F7jhX}@U}G^!AQzlI#*?aNt8~MSH#(^?`Z0?=(7upQ@*9$k1~jM zAL31v8pOKpk@L6uAX~#};rts~LwsU59kMat9}=@M5pjBqzoruxqbw!2f^w3gDbn!A zL+aaU>uH_0P>-~F9opa1-Yb40{8Im*lZsC9S6w=6r0D8{-_mvz590^Qbm|{zYk+ys z3)^7_Y=OFV)9(w)02bVf{V4Ov52a6Ia=PY_+vlnA{2x=9MnjAz@jrj)?;dX0fZI5N zxC7-rCE-<@$tu%cpHhN)!mAMRZ5CdaApZ*;Lg-tI;`#m?hPSLkVLBJ3p$742>UA(F z#$P8ji0h_<{(r@IuJ+{fQ8Lj#1?3dk0>mAy|6uEP4=>Rkf4R#s`S1h_s3AY`H97?_ z_$u{^lHh<51*r#9W>8wwuW5qC{;+W?EmvCK|3?^fpTsp9J79at zK}t8`EwqiKl%`Ckc#;2+iFZ>!jJm#~UoY~y=HpqbU#9)E^%<-@?PrLO(Z3vKM$h>- z_f+^?685LQg3ehe<f3CD7I$4K)#51U$rlYPOsVBC!di2{#NkpklX+-&)qH7x^hmG;3 ze+2bPlxm(<9wWIZnwOc?O11N|L)y$w}!&zqR<>CTWI#^m$7z z6>a|1$KrU(S;`aQSo%z)45Q2^r>i9;l(?fO;`ukEl8H$&I|M8LVeXCP)P&!g-lG}*CP;^Z-2Q$}o;?9_ov2#7GJQCv7#6Kz4`XfjT zU{D6iKa@q}cGICL`R0_;~*8-g5R9$zOtQLKnSX%+hFDGA}oEPOQ$}Hl7^qWb0 zd+Ix=m%=8PlahyGm)rHZ1!K&KbgV+rwFq-!T5G#vd9^jPdKl)Wy#VDni|Y!cUeDSy zQ-5sr0oaH3f|$Y6H}+73=RYU@XQ~j@TaRp<%z2nLcSd=90 z`LHxT0Y#dJG>`vZY~ZM{IKNS2{L=OMuWQDA{Y~Z!zMc)mEm@Yz%co11s7|pPmcNZ# zvm%jK;+QB6>lC+kWw=-DU#r>%7A;b+SjmuLMaq>dmA^>QxHjJw@pJcx)>6`Q$aA?ebSu@V>PS1?VhuXHH2X*K@ zB(Cqjjl7aZckbDtixVGsU&|{l)yGO+{sa0A=-ut$y#7Pt>VF#R6;z;ix2OWWq7Utf Pat(?c8XeQ2yX*e|=E$z1 delta 21577 zcmZA82YiiZ|HttY2@)h?#2&H5-o#3%SzC&rR75C=M2Q;3F-q-~qNqJ;)C^Ulw%U8u zUM)qb)~wS0Ki_lz(!c-zem$4>b^We=-S>UY;d!1#pZh!<;WPD^JCDod3Tx+b1>uM2 zi+wQ!Kf_eG4AbBWa~-Co{2hAZPRxLNt^OpYr+fi5-fh%84>1sZBVDcl%o*u&yHb-W zPC%AH4O|J;u_kJQ`sjy^tv&+zkE^4_H=)|^L@o3nYN8XE4R4?}_yRS4hW0L3Cd}I2 z?Mz&hfI5~kE20)u9fx8#j>nzokBvLHTv@OkR>WA;0v4bTu0V~q2DQ^osPXop7H}9d z<7qdUjAVXB4fqT_N7RwkQO=2~FN4}iZB%_5)QNOQP2305;ULt}e~wzfRMY}yp(b9A zx>akf?A}gB4fml|c*H7Bp&I^#`S2Gkj6NSa1BIX_DvxSc3w1K@qb7{Bau2KTk2>;^ zsEvJz%sGV*>?d%|GN2jg+Dr$juP&>JAF)Yn_(WxZP2ZI)|ZSNj+$sPYR7X>J6mn_J5duKLrr`hHSkr`t@;_Y@ZV7<@EkQ> z>MlL`(Bx<0psMqOZ)Wp3}1AK~Fz-ZJhnux`4DQZE- zFdv@7wfGcg69=t|T=wwSxHvf3Xc(}2udtb+A8hDP`qD!w1(~m*;3(A2 zH)1V3j=B};d+=$Ag-{FbhWdm|#1LG8CGZSZV1CzIGMc#D$J~GHVQ#{Dl)ZX74`Bnd zJ=P!|hg#SQ)IuJk#_{Xra+St#Y>n|)9rvK#k~gSZ6%@IV z1J#Llj<_R+Qyz~x+M}pja0fMTP=fPO8iJ~?je5x1VquI!9sM*6!2_s`{f^pDP#?}; zJE%pbD7Hj(7=Wp9HR@^KfWEj7o8U2It*#uOI1l3(^r1W*)ovE*Gk&4PH=`frZI}l4 zV_H1=3FqI2%voy~($|@=45~voYDe#*?r|4n^RCY@9X>z}_#D0Q4Qc_deoovEHBKOE zL%C2B2V1;&Kh9r)3RY1aH9%b}H$hDlVdc)K3A>}(^|tb-s1q1!<&ow%%t(AHrox4& zeoIjEe(NTap3Ei;z}=V`Pg?mlYUO{RcJdOn0Ix)+pD${GSy2P$N3|=C>Q}+6hMKUp z*#Onf-JFaDj6{F@$jZG@9THIk4?zt$8r5-v)z7f{dFFD|Yr770BF|9c{)L(+ZGXoA zWPGQlzi<+PqYDcBbYN&<0hicaZ8)6%5g!54S?xH684Rz$NaVVzy)agGKvr?Xl zS#TNp>;2zKMz7sL^OpHHh7b=L;Cu&!VK&MwQLkGM3_~|+0lQEqbpW;C6BfUKI*F?m zzh&k7D(n4!Ohz4FSjE4n4-B7y&gVh{)BtnL1*oH3iaL?isQ#N#6YoT||Iy+{Q42h0 zUPiUMiEa(_D;XWx@2G+PMGfpb$hj4nQRTd-BQAy-pfaj`EzE=stUeMoVRzKS9EWP> zM%~)6X3`+eKbXLL0vd1^>Mb~k+Q9|XgjY}ty>C82wR?%`mu9eI0A`?^4Yi;`s0k~h z`iG;&ZD{4DgE@b7h#;VWx}Z9IjJ#>Ce%A1w)jvip=mn~u&u30K6KaC|W>M65rOnEy ziECjX)|~GFreJR0r=N&UZitEKNBa)i55_t}o`mL8t{!M@=}#>X%vhTPtrdccR8W zfI5j2$TQ`3owdLLd6JX2qJ$gTVeHJ%@HE4gy;hdLBQO;`c-NmUg!U{lo5wL^VZbV1#UIE(8`O#OzS7Bs@j zb7l}u4Qgk|t|)Y0V|?pPSru7s6CQ4@us7T6HAu-2#}?u=?5 zg<5zY)JY6NwHuF$&m7MAt6~8Goxpm`g}X5yUcn%IjT$g;gmdrmqi#`E)Ja94o`sJs zJ`(k;%tuYQ!rX*f_-@n*9MVxKaDjl1<~o+fhgb@8j&xSq0M(%dHo#6;4wqsAp2rec ze-vM_*ayRL9k#tcRcTTz|HlaM#O@=zxRcwh}M!Q^J;0COYp<|q1wTY-BTa9UO zlerzW)4ix$am?Zut$ZDQh~GzjB0k3Ys7X|IHz1RmOeChk&Sng1$8nez2cwR9Bx=H0 zsLz2#sD9hbJ*b5oLY?SY)UCOR!T1_=>vE6v+)}rzDj7Wk4NwENLk-x+;sY>>@>tZ0 zA6Wex)PlUnInPcG^rc)J(_(qldmd`#R;Y!wN44wbiF5yZlhG0N_XOCfl}Dl<@h?yv zrlA%(2lc+MMsM6}^*^GIn>VYqaH%<@y^CFqE0p&W?+6-VKVxfEN>0!p(bo% zWF?Ajrs-C8=K<@)IB|gdib(UuwO{1TRRVRqD!p23N_ApbZg*kWOPsVqUS9@ zE$AVt{vXuN(@b;*%#MoZMJ=?5SqAl*Rz)qW0j9$lsP+p{yvTKiOGOZhbUrXN3r8 zrIpYh!^~!wo^nUbi#<>a8I787venPP5Xy^CJ3WeeHqN5jT{N#-{V(PtH<o&X_!^tdQJ)H5^`)9C#lLq_**66#)0w+3IKIxe#KO4Kv3&dOU+&%iE=|A=aT%Hltv z?)44Sgio#h4F*v5na%lYfGlM6)aOF2xHxK{QkWSlS-Bxs{a(! zLX)sD&O?oN4>RKL*dG6#&G~Df_H*p0Fp_dC>O}UVCOnK9@C=s3tEjidcdj#VR@6rF zpxPHg?YtD~)`p|TYl7<664ma*xtzZ`#t_JWai|W1FcW@`TJcO&hvldpuR|T-W>mlZ z*aJ^vCk*}CdB|oVKMh*`9)}j0Wt6-`5&O;iFns6d&V>7Jm{)UVOSZ{7at#m(XfqnnRJYcR!}i5h60xyW3Gxss^3J^!lt4g=C4pYUt+F6o$wlTtHBmBMRBh+xR09PG3p4Pq1ye8 z>X>GUQw~H;nA^;67DBZzhH75{wSk&eZiL>HTQA}ME7O*MR@?z~0$ou%jkWk-)Ivt0 zPGlTvM>8=M&b9J?ZVV}$UK3xce~Cx8P^Tezze~db+zj2@! zotN`N6^DJ}d=Q1MbUxwQA>Y)l$=DH#ta84lN1#r2A!-4quo_-My=|FS>ua6+UzAK~ z0(CJA`(RC6Z$3iZk`muKRxqohz8&jger$ufr~OeAPr#hG9kqb-SPZYDPA<(F#$$e0 zdNQdo3+gG)Y2{$lTTm1=KxOpD+Ng<}qCS#8vhq+=`>Ck$7MROW{a2ft%pcJ6_x~Ts zXou&q9bQ8nedV>ztq4OctSJ`82+W29Q9GW3>2Mk9-mgdXKY$wdBx(T{Q470?z47i^ z&c7>}57s%a*%r(}`5@}3ZlVV8UGH=ZL=Bt`(_^s3OQG6TKrNs;24V}$gI!SdLs9jk zQQv}d)^q;a$+rY_r29}4TtKb(CI;Yr^Oe=7-Qb)|Ce*N8Fj0Qp!$_Z z-GUlOKewwT87&|Rwc;MAhVhsg2cia?V9rJ@TF7b)!HuX6S5XhsUDU$7H#b>#1v5vUDB zqxyY;0ciMiZ<=O`MDxaGRC)nuk$Gc?z|YtEhIjQ3F3foy=P+XZ+4Nk%Fl4 z%cA;)qCSAa(5;GkWHdl?YY>InNe?W7!%^*)V=%7A6?htxaPSuA;R^VkSDA87)I%MQ z9dIpbV!y498Br$`w3YMMQ4}Gdqpgm*hwU&McEjBG8EWUVP$#qmwewXNgd0%<9RR>%3+7%yQXEVRq{7Iep1W;klaU!tcW=2gSp&OgW3!D^HT zU?*IUH8Eh1^Q<(%e3ZLk6C8!1coK7>-(F{7h0NAiLGS+vGCgUq3F~0Kea_0dpgJaE zew>U|a4qWD_yt?w8>?@+-??>hsD)3$hPWE_3_V4iRK^3&U(G6G9=-p)$<(D{JZi$@ zm<#WD8u07rN2i=03lXo5m9R4w$0RI_JFqC;LcNB*2c6H4Tv&o~Jxq-WsAsM}dj9=? z6q(KhCSg4OhI(o{9&+B(9%dYB;7_bP3iaBKMGZU`)8Zo3I4dolE=l~5b4 zjh=u1izL&XKo``*brAjV0;a;7sDba84>1kpr>KcvTikWT>F0}TpV`cX=_nVpa%r>j z5zb!?YZ1_Z^)VebML%qZJ{X0n?{4-+z3+*r{-2{SPPFn2E6+n6^>WlnZL;z~)OhEQ zaQ`*YRRY?{UDVP4hUxGXYGCi9ju}wxg3R2g1r#ttP~(+1YoHcTAJs1c)xJGy+$gtI z^g?y)V-F1zJ&fZ~AK_nP7TkdPJ#YZ^+FmeUnAuKv-g{RSq@UZ> zlZ<}33`4!QGqFByLM`kyYUMsBorUYCRDqesH4q?nkWRdpbDrXt!DM%s1s^}8n2VZd!SCT zugc8t8b-z+$Dsz8g&KGP>Zx5}_1jSs9k%#MRJ+@#1wAxhU}4H$r=5w5VgTiGs11Z+ z2CRpkfB$PmMiWP(8g@agIKkqBP#s2~7Bm?(@gmfTtwOcmfNHlL^|gG+%Gc4eK>UdK z3sk?hXYBp&bjCS>7}Ns#nIlmHPeV;O7d6mQ)PmQccC-b(aR=&XccUgahH7`#%2&+W z=KV9AzdAf2paI^XzQtT;ot@-I4NwX-aV6A3!mQlL$`Pnr_#tYXUZ@4cTYZ1jLp=-itS#zfx?l*#U?H4> z8b8@|ZzrP;`%ouw9JAsj)V+L+s{b3+G4*+8B7Y2|oCUR$!dME6qrN@cpeF2tVfZEL zOXnPFVUHc%uBT));VaC7{ui8y3ZOm@LQuD)A!fwxsEI$t0{8{0-%8Z8^8@Ncj-lG0 zM@{$(YMuwETlp_~-v4wLoj?}UQRhPqPy}_PWl$$k2{lk1)JZfjn_GQb)UAxhO!x_w z#8Frg*P+I}gF4aQ(DU#AFUV-3x2S==e{vR-9(7Byp%zdOgRm@Wp!%pCHb=GVjM_ji z^eh0ipi!t3n}$VjG3q1^p<4r7BcmgJggVj;mz;Z526Iqug!%~;g<9ZH)KQN{P5dQl z;)SS%euHYa3Ds@~R>PxM6@4x{8?1Sm_g^b(NI<{+B2g=xgR!_9i({!P{IJ9}<~G!i z(EL}OBOZ$7D6hmuco|c9@c>?P{!6T)*PXXu6jq^r4eDY2`8w~v?nR~>&JJ>$1yS)3 z)WcHI;Ek_)QR7*@>4e%b$ElCFz}{xbh%JR zTF?wZ)t5sZadj+;4Y4%FV`2Onb>w?c{ZC;|yog%RAE*Vq#E$4rd&_wkqEQ_NVrEsK z8cs(|ycl&)zs2mh3ALkRs1rJG-bA%~h72+QBW<&VIwz z=yS*U@AKMWL(0ps2|h+0W%Zw(fx}TdZHPLlrl0hwi9b6o*>aDAeael9j(jO|S#C;6tc~^)l*%=$`o;)z14K z_rD04jQ5<0D`OhUwNV}Gqdu5gV_N(KQ{f=gj)tJxkHNG!3AKS4sApq7rp9%c7B{0# z?gv!6L-)A<8tAk&xPlt+j`;vJz#mqAiQ4f$sC%3JSLYM6GU}mhhkD)mVj-M}ZE-yo z!?gFEe_<_)+V~f4GA+og!SeVPi(}ab_Q8T>DJNiITwvw>7()3O>dPhHL+7vWt#LBt zS*V2+{mofeI4-2z5ewoQRJ}X@Bj*Do6nhcqfzfydwZkfporQdW`68nx3JzdN@u8WSk@!A5u!OJT?pk8W36G9?L&!e+P*Ti{!)iOv3S{!wW>rlouX zz40FU;zRS9`4)8x)BNeYmj0+)m>JbR2Wp%G9=ZP^Wc&z}M>VX08lVoQ!$znM5f<-= z`XK6x8t_xojhQHqMfIP9{rfbPm)Uk>Zw1AMer3C#C)%uc6Cq-X@Z)#g~cPyu2vt1T1X$%LqE(-Mgy)wz0apG z3}0ghtp1nt^H2+4hPrj@to)t18@0eAm=WD)$*AKU z3p_-v@Nd)aZ)a!OQMV>HYT$CHepOM=Kp5&s>!I#-ORMjQx|PxBg?&)%`#QQ^1IXx} z3_&%Vg4)5?s0o*w+ff6bLQQl9wS#*Yh%ZqC`@V5*p+D+%EP%R&6;ZdSE^598n1Scd z)!vig0Yg26{j9+t%t3h~YT#w4ov%W*+hFA_R^EwP$bR!YR;7Fw)i2*$r(Y4XBxYlN zS9vnpX(QCmJE2zE!;D2un1FgX`=jpRIMl+XquS51@?0w~M4i-9RQvU){##KCI*y*d z|DPk1hrkuoM6XZ-{)0O5RR1_-f7Ajqp(f64HDwqIguB&fR?BQv_bXn zh?=MSziy|ZF99uJpy@Wppq}zcsDWmnPGAmdhf6KK4b^TJY9R+vpZOQCEM7wO_v0PV zJOQZoL2fcySzgo*3!zq44E69;L_JhZQSW&t)YCfv)owiMBxa%pUV>`3%HsP`lkS$eZD14WR#w$)5y*kq2zU~ zz^&A2aYu+B!9>zk@_9*rQul`!_kT9QGo(9O&O6t08tM9x#H;OEPW{&mx|Dnc>f$K3 zA+P)WE#=Rwe{ozzDndNm7SqM*rkgj3UnIG`A{dy4gGsuM6U^&m|NFBFvALuuTTmuqsa3Bcpkk$wA@0k#>DE9FGIdI9;9B^7UC<2 z6(C>66X9ns{BwF;wPli#AvWowth7Uuu&Domp@ zq!1$i+61cni$O+UQB0)%K6S;g5zZi{t2Zf-ayBc6Q{G28nDT9Gi$9VI5noAp9sQf2 zXZ~*q>iUUbJ<jfK+15{O zz91h=pOd(ox=IS#waA#Ew$0 z>pZD5?b~7q&LBR4R9BUDwZgX4wZJv>U4)-g)|HR8?tc6+nF_xAUB8nmk^hkfvBdS& zkRJOHdrkfsq0dRviCrS5>j4IPa?W22zoz^XsUNWf)c+Lf8}fPR^F3o+C%>BfKGO13 z&hzKmN&49YzBR#gf${WcarQHYk18o-*dr5g6sSYXS z`iJ^@lw$}y^WyyH5ok!dOK>MvBfWbyV6bnA=_kn-qz}ojr|k-?Wes=HCYe|qmbKV? z>dz9(PplK^6!EI4>os*PNekRGs6a+nZPH{?8)EZG?^}Et_94ENn6B!SYcbGTQaj2Y z;|S_9lJs4yYbxnuQYvEi72vu_zsZz4(!MkKsiaJM_+y__?^;iONmBXNS(3tA`?#-D z`h=vbD&2O_trq2Q@~6pvV|9^O-txtXr(A1^WhE^kHU=}$??dv9QySo(q>|KKz)aLV zpv^hT#8!_}cY zg0#ol*20IRqSSp!d_1Wok2;3qaB)^G@4mg;)Fp{ndl<#4B+US~3 z{3gzzypOtMQdeSGupmiSF>AZn##oMNtWNd)$^V~ErtfG_(>l!~A4Vhntkbp2!S%%I z^$*%hNEL1H64d9SE`oAKnwmvB=B6fnLt1}a&ToIPZPV4~r^t5T_=8xVs=q%hr z<&UJ1#B*9_AM(D|<~*^UmOpIu@0-6{T=TrOyf1YV=pRGURhal4C+k{Cox3-Gq#@~V zKpRQ9NS7$5T-)jRI;DVrGC(?Fl}Q!J?SLl{F^MdKs}m`I8ty`)^o7B`aoOxkS23Z!M!rzH)d-iPu!Qe(=xej%35@&oCU za!n)C+T#DcfBJ?fK!?Am{0dW3&V?!0bjoAsw3yg3(n*rW2;z@l$#14U)*5dh-_`QX zsQ;JxI4j?xJd|=)+UR2cykZ2_k#zONZ>T(q2k{?LlIlpUNX5u{V`uD&k*I4s?LHv& zXM&xWNSZ@@2yNOB)Abdx-JTquia5oa`9HBh3fah9u@2W!E9^qLOM3Te#$a`*Z%L{` z{x~V+Do_3v6R&2ckj>Z+Jc%KfV$v4Lgm~tJnPB$#j#Ec?d zl9Z43nMo&!m!=$T?FU%9U+^OJDOU}~9Rm->+>`uXyg_-e78iq5)vQ7%X-O+FG=5nn^v z>#24AI01JVXD(?lbxly$ucX`LTNBgONuU4mWJ=g1UR1`D&XB(LG~o+{d?5M9bZSg0 zLh43pL~Jb{Ch3}rLrDV}>l)=An3=v`d20FbPkANfJysr0`BUBhJal?Rnn%SB8nmOL zJt>sfDbhGnAm!$y|1R=z#FyeWQW6zkVN=vky7P>qt33G*w9{4C;lDp05%1&q{YRtk z375hElCC*8%gMTaX0WC-ZfkWVExwd^BVwsYL)4yfIof?meH8ic$XCU7Sd>(flycR# zynD1|Ceg4iX(|IPz=E{QVRe@&r(CTqUkpo7UzT))KDsiIZ(;TM$v?FG08F614Cb=7 zo(>#b?be?E)k#LeAJb(dsS^1H)PF|49$m&#o=bTRX$5IF@$tk)lh;+2d|%QI;`^PD z=gQxkv67)4Q6f6{YLj=w)*JyQMF{BhBOBB$kF;YZf7G=uFX|BaP@V9*eft{&74 zOimq9(6uupUs7O)Qb~t8bn)+>_;E~pV$bNP#F%bj$wfPM@X6M@ zH<94p3Eg7)MkV%4h!6gxe@y?F^-lf{>!@tl&f00eAVCz<*Qc?Em^KY@-NHFrAZ#Mrdq0`tLv5~ja|PbeTS&H zgdPb4qT&aICD-1t-6v0zgt(Yqu~9Zpbg!Plu?bp!RNujal3#5u?ByHWEumXfe9x%l z>|3IIl5TGcN*=R)WI%GEBl*3PLyzC{Eglp9zkPz^d&UGOL=TRM?cXaVI6f*kDmFT% zPhwPbul=)=b6;5OllY0W``2mj diff --git a/django/contrib/gis/db/models/sql/query.py b/django/contrib/gis/db/models/sql/query.py index 1691637c1e..4b34c20878 100644 --- a/django/contrib/gis/db/models/sql/query.py +++ b/django/contrib/gis/db/models/sql/query.py @@ -98,7 +98,7 @@ class GeoQuery(sql.Query): aliases.add(r) col_aliases.add(col[1]) else: - result.append(col.as_sql(quote_func=qn)) + result.append(col.as_sql(qn=qn)) if hasattr(col, 'alias'): aliases.add(col.alias) @@ -112,7 +112,7 @@ class GeoQuery(sql.Query): result.extend([ '%s%s' % ( - self.get_extra_select_format(alias) % aggregate.as_sql(quote_func=qn), + self.get_extra_select_format(alias) % aggregate.as_sql(qn=qn, connection=self.connection), alias is not None and ' AS %s' % alias or '' ) for alias, aggregate in self.aggregate_select.items() diff --git a/django/contrib/gis/db/models/sql/where.py b/django/contrib/gis/db/models/sql/where.py index 105cbfbec5..0b8101300c 100644 --- a/django/contrib/gis/db/models/sql/where.py +++ b/django/contrib/gis/db/models/sql/where.py @@ -78,7 +78,7 @@ class GeoWhereNode(WhereNode): annotation = GeoAnnotation(field, value, where) return super(WhereNode, self).add(((obj.alias, col, field.db_type()), lookup_type, annotation, params), connector) - def make_atom(self, child, qn): + def make_atom(self, child, qn, connection): obj, lookup_type, value_annot, params = child if isinstance(value_annot, GeoAnnotation): @@ -94,7 +94,7 @@ class GeoWhereNode(WhereNode): else: # If not a GeometryField, call the `make_atom` from the # base class. - return super(GeoWhereNode, self).make_atom(child, qn) + return super(GeoWhereNode, self).make_atom(child, qn, connection) @classmethod def _check_geo_field(cls, opts, lookup): diff --git a/django/contrib/gis/tests/relatedapp/tests.py b/django/contrib/gis/tests/relatedapp/tests.py index 0e905274d4..2bdf29304d 100644 --- a/django/contrib/gis/tests/relatedapp/tests.py +++ b/django/contrib/gis/tests/relatedapp/tests.py @@ -279,11 +279,11 @@ class RelatedGeoModelTest(unittest.TestCase): def test14_collect(self): "Testing the `collect` GeoQuerySet method and `Collect` aggregate." # Reference query: - # SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN - # "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id") + # SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN + # "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id") # WHERE "relatedapp_city"."state" = 'TX'; ref_geom = fromstr('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)') - + c1 = City.objects.filter(state='TX').collect(field_name='location__point') c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect'] @@ -293,6 +293,7 @@ class RelatedGeoModelTest(unittest.TestCase): self.assertEqual(4, len(coll)) self.assertEqual(ref_geom, coll) + # TODO: Related tests for KML, GML, and distance lookups. def suite(): diff --git a/django/core/management/commands/syncdb.py b/django/core/management/commands/syncdb.py index f4b358887d..b82753cbf6 100644 --- a/django/core/management/commands/syncdb.py +++ b/django/core/management/commands/syncdb.py @@ -5,21 +5,17 @@ from django.conf import settings from django.core.management.base import NoArgsCommand from django.core.management.color import no_style from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal -from django.db import connections, transaction, models +from django.db import connections, transaction, models, DEFAULT_DB_ALIAS from django.utils.importlib import import_module -try: - set -except NameError: - from sets import Set as set # Python 2.3 fallback class Command(NoArgsCommand): option_list = NoArgsCommand.option_list + ( make_option('--noinput', action='store_false', dest='interactive', default=True, help='Tells Django to NOT prompt the user for input of any kind.'), make_option('--database', action='store', dest='database', - default='', help='Nominates a database to sync. Defaults to the ' - '"default" database.'), + default=DEFAULT_DB_ALIAS, help='Nominates a database to sync. ' + 'Defaults to the "default" database.'), ) help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created." @@ -30,8 +26,6 @@ class Command(NoArgsCommand): show_traceback = options.get('traceback', False) self.style = no_style() - - connection = connections[options["database"]] # Import the 'management' module within each installed app, to register # dispatcher events. @@ -52,6 +46,8 @@ class Command(NoArgsCommand): if not msg.startswith('No module named') or 'management' not in msg: raise + db = options['database'] + connection = connections[db] cursor = connection.cursor() # Get a list of already installed *models* so that references work right. @@ -88,11 +84,11 @@ class Command(NoArgsCommand): tables.append(connection.introspection.table_name_converter(model._meta.db_table)) - transaction.commit_unless_managed() + transaction.commit_unless_managed(using=db) # Send the post_syncdb signal, so individual apps can do whatever they need # to do at this point. - emit_post_sync_signal(created_models, verbosity, interactive) + emit_post_sync_signal(created_models, verbosity, interactive, db) # The connection may have been closed by a syncdb handler. cursor = connection.cursor() @@ -103,7 +99,7 @@ class Command(NoArgsCommand): app_name = app.__name__.split('.')[-2] for model in models.get_models(app): if model in created_models: - custom_sql = custom_sql_for_model(model, self.style) + custom_sql = custom_sql_for_model(model, self.style, connection) if custom_sql: if verbosity >= 1: print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name) @@ -116,9 +112,9 @@ class Command(NoArgsCommand): if show_traceback: import traceback traceback.print_exc() - transaction.rollback_unless_managed() + transaction.rollback_unless_managed(using=db) else: - transaction.commit_unless_managed() + transaction.commit_unless_managed(using=db) else: if verbosity >= 2: print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name) @@ -137,28 +133,9 @@ class Command(NoArgsCommand): except Exception, e: sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \ (app_name, model._meta.object_name, e)) - transaction.rollback_unless_managed() + transaction.rollback_unless_managed(using=db) else: - if verbosity >= 2: - print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name) - # Install SQL indicies for all newly created models - for app in models.get_apps(): - app_name = app.__name__.split('.')[-2] - for model in models.get_models(app): - if model in created_models: - index_sql = connection.creation.sql_indexes_for_model(model, self.style) - if index_sql: - if verbosity >= 1: - print "Installing index for %s.%s model" % (app_name, model._meta.object_name) - try: - for sql in index_sql: - cursor.execute(sql) - except Exception, e: - sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \ - (app_name, model._meta.object_name, e)) - transaction.rollback_unless_managed(using=db) - else: - transaction.commit_unless_managed(using=db) + transaction.commit_unless_managed(using=db) - from django.core.management import call_command - call_command('loaddata', 'initial_data', verbosity=verbosity, database=db) + from django.core.management import call_command + call_command('loaddata', 'initial_data', verbosity=verbosity, database=db) diff --git a/django/db/backends/__init__.py b/django/db/backends/__init__.py index 4aa04da321..f71016b5ca 100644 --- a/django/db/backends/__init__.py +++ b/django/db/backends/__init__.py @@ -18,12 +18,14 @@ except ImportError: from django.db.backends import util from django.utils import datetime_safe +from django.utils.importlib import import_module class BaseDatabaseWrapper(local): """ Represents a database connection. """ ops = None + def __init__(self, settings_dict): # `settings_dict` should be a dictionary containing keys such as # DATABASE_NAME, DATABASE_USER, etc. It's called `settings_dict` @@ -114,8 +116,9 @@ class BaseDatabaseOperations(object): a backend performs ordering or calculates the ID of a recently-inserted row. """ + compiler_module = "django.db.models.sql.compiler" + def __init__(self): - # this cache is used for backends that provide custom Queyr classes self._cache = {} def autoinc_sql(self, table, column): @@ -280,15 +283,17 @@ class BaseDatabaseOperations(object): """ pass - def query_class(self, DefaultQueryClass, subclass=None): + def compiler(self, compiler_name): """ Given the default Query class, returns a custom Query class to use for this backend. Returns the Query class unmodified if the backend doesn't need a custom Query clsas. """ - if subclass is not None: - return subclass - return DefaultQueryClass + if compiler_name not in self._cache: + self._cache[compiler_name] = getattr( + import_module(self.compiler_module), compiler_name + ) + return self._cache[compiler_name] def quote_name(self, name): """ diff --git a/django/db/backends/creation.py b/django/db/backends/creation.py index 5327c6c2df..2608f153b3 100644 --- a/django/db/backends/creation.py +++ b/django/db/backends/creation.py @@ -316,7 +316,7 @@ class BaseDatabaseCreation(object): output.append(ds) return output - def create_test_db(self, verbosity=1, autoclobber=False, alias=''): + def create_test_db(self, verbosity=1, autoclobber=False, alias=None): """ Creates a test database, prompting the user for confirmation if the database already exists. Returns the name of the test database created. diff --git a/django/db/backends/oracle/base.py b/django/db/backends/oracle/base.py index 29e0ff04a3..e9b952138d 100644 --- a/django/db/backends/oracle/base.py +++ b/django/db/backends/oracle/base.py @@ -26,7 +26,6 @@ except ImportError, e: from django.db.backends import * from django.db.backends.signals import connection_created -from django.db.backends.oracle import query from django.db.backends.oracle.client import DatabaseClient from django.db.backends.oracle.creation import DatabaseCreation from django.db.backends.oracle.introspection import DatabaseIntrospection @@ -47,13 +46,13 @@ else: class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () needs_datetime_string_cast = False - uses_custom_query_class = True interprets_empty_strings_as_nulls = True uses_savepoints = True can_return_id_from_insert = True class DatabaseOperations(BaseDatabaseOperations): + compiler_module = "django.db.backends.oracle.compiler" def autoinc_sql(self, table, column): # To simulate auto-incrementing primary keys in Oracle, we have to @@ -102,6 +101,54 @@ WHEN (new.%(col_name)s IS NULL) sql = "TRUNC(%s, '%s')" % (field_name, lookup_type) return sql + def convert_values(self, value, field): + if isinstance(value, Database.LOB): + value = value.read() + if field and field.get_internal_type() == 'TextField': + value = force_unicode(value) + + # Oracle stores empty strings as null. We need to undo this in + # order to adhere to the Django convention of using the empty + # string instead of null, but only if the field accepts the + # empty string. + if value is None and field and field.empty_strings_allowed: + value = u'' + # Convert 1 or 0 to True or False + elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'): + value = bool(value) + # Force floats to the correct type + elif value is not None and field and field.get_internal_type() == 'FloatField': + value = float(value) + # Convert floats to decimals + elif value is not None and field and field.get_internal_type() == 'DecimalField': + value = util.typecast_decimal(field.format_number(value)) + # cx_Oracle always returns datetime.datetime objects for + # DATE and TIMESTAMP columns, but Django wants to see a + # python datetime.date, .time, or .datetime. We use the type + # of the Field to determine which to cast to, but it's not + # always available. + # As a workaround, we cast to date if all the time-related + # values are 0, or to time if the date is 1/1/1900. + # This could be cleaned a bit by adding a method to the Field + # classes to normalize values from the database (the to_python + # method is used for validation and isn't what we want here). + elif isinstance(value, Database.Timestamp): + # In Python 2.3, the cx_Oracle driver returns its own + # Timestamp object that we must convert to a datetime class. + if not isinstance(value, datetime.datetime): + value = datetime.datetime(value.year, value.month, + value.day, value.hour, value.minute, value.second, + value.fsecond) + if field and field.get_internal_type() == 'DateTimeField': + pass + elif field and field.get_internal_type() == 'DateField': + value = value.date() + elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1): + value = value.time() + elif value.hour == value.minute == value.second == value.microsecond == 0: + value = value.date() + return value + def datetime_cast_sql(self): return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" @@ -141,15 +188,6 @@ WHEN (new.%(col_name)s IS NULL) return u'' return force_unicode(value.read()) - def query_class(self, DefaultQueryClass, subclass=None): - if (DefaultQueryClass, subclass) in self._cache: - return self._cache[DefaultQueryClass, subclass] - Query = query.query_class(DefaultQueryClass, Database) - if subclass is not None: - Query = type('Query', (subclass, Query), {}) - self._cache[DefaultQueryClass, subclass] = Query - return Query - def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but diff --git a/django/db/backends/oracle/compiler.py b/django/db/backends/oracle/compiler.py new file mode 100644 index 0000000000..cc1541ff3f --- /dev/null +++ b/django/db/backends/oracle/compiler.py @@ -0,0 +1,66 @@ +from django.db.models.sql import compiler + + +class SQLCompiler(compiler.SQLCompiler): + def resolve_columns(self, row, fields=()): + # If this query has limit/offset information, then we expect the + # first column to be an extra "_RN" column that we need to throw + # away. + if self.query.high_mark is not None or self.query.low_mark: + rn_offset = 1 + else: + rn_offset = 0 + index_start = rn_offset + len(self.query.extra_select.keys()) + values = [self.query.convert_values(v, None, connection=self.connection) + for v in row[rn_offset:index_start]] + for value, field in map(None, row[index_start:], fields): + values.append(self.query.convert_values(value, field, connection=self.connection)) + return tuple(values) + + def as_sql(self, with_limits=True, with_col_aliases=False): + """ + Creates the SQL for this query. Returns the SQL string and list + of parameters. This is overriden from the original Query class + to handle the additional SQL Oracle requires to emulate LIMIT + and OFFSET. + + If 'with_limits' is False, any limit/offset information is not + included in the query. + """ + + # The `do_offset` flag indicates whether we need to construct + # the SQL needed to use limit/offset with Oracle. + do_offset = with_limits and (self.query.high_mark is not None + or self.query.low_mark) + if not do_offset: + sql, params = super(SQLCompiler, self).as_sql(with_limits=False, + with_col_aliases=with_col_aliases) + else: + sql, params = super(SQLCompiler, self).as_sql(with_limits=False, + with_col_aliases=True) + + # Wrap the base query in an outer SELECT * with boundaries on + # the "_RN" column. This is the canonical way to emulate LIMIT + # and OFFSET on Oracle. + high_where = '' + if self.query.high_mark is not None: + high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,) + sql = 'SELECT * FROM (SELECT ROWNUM AS "_RN", "_SUB".* FROM (%s) "_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark) + + return sql, params + + +class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler): + pass + +class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler): + pass + +class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler): + pass + +class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler): + pass + +class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler): + pass diff --git a/django/db/backends/oracle/query.py b/django/db/backends/oracle/query.py deleted file mode 100644 index f3d37b9c67..0000000000 --- a/django/db/backends/oracle/query.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -Custom Query class for Oracle. -Derives from: django.db.models.sql.query.Query -""" - -import datetime - -from django.db.backends import util -from django.utils.encoding import force_unicode - -def query_class(QueryClass, Database): - """ - Returns a custom django.db.models.sql.query.Query subclass that is - appropriate for Oracle. - - The 'Database' module (cx_Oracle) is passed in here so that all the setup - required to import it only needs to be done by the calling module. - """ - class OracleQuery(QueryClass): - def __reduce__(self): - """ - Enable pickling for this class (normal pickling handling doesn't - work as Python can only pickle module-level classes by default). - """ - if hasattr(QueryClass, '__getstate__'): - assert hasattr(QueryClass, '__setstate__') - data = self.__getstate__() - else: - data = self.__dict__ - return (unpickle_query_class, (QueryClass,), data) - - def resolve_columns(self, row, fields=()): - # If this query has limit/offset information, then we expect the - # first column to be an extra "_RN" column that we need to throw - # away. - if self.high_mark is not None or self.low_mark: - rn_offset = 1 - else: - rn_offset = 0 - index_start = rn_offset + len(self.extra_select.keys()) - values = [self.convert_values(v, None) - for v in row[rn_offset:index_start]] - for value, field in map(None, row[index_start:], fields): - values.append(self.convert_values(value, field)) - return tuple(values) - - def convert_values(self, value, field): - if isinstance(value, Database.LOB): - value = value.read() - if field and field.get_internal_type() == 'TextField': - value = force_unicode(value) - - # Oracle stores empty strings as null. We need to undo this in - # order to adhere to the Django convention of using the empty - # string instead of null, but only if the field accepts the - # empty string. - if value is None and field and field.empty_strings_allowed: - value = u'' - # Convert 1 or 0 to True or False - elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'): - value = bool(value) - # Force floats to the correct type - elif value is not None and field and field.get_internal_type() == 'FloatField': - value = float(value) - # Convert floats to decimals - elif value is not None and field and field.get_internal_type() == 'DecimalField': - value = util.typecast_decimal(field.format_number(value)) - # cx_Oracle always returns datetime.datetime objects for - # DATE and TIMESTAMP columns, but Django wants to see a - # python datetime.date, .time, or .datetime. We use the type - # of the Field to determine which to cast to, but it's not - # always available. - # As a workaround, we cast to date if all the time-related - # values are 0, or to time if the date is 1/1/1900. - # This could be cleaned a bit by adding a method to the Field - # classes to normalize values from the database (the to_python - # method is used for validation and isn't what we want here). - elif isinstance(value, Database.Timestamp): - # In Python 2.3, the cx_Oracle driver returns its own - # Timestamp object that we must convert to a datetime class. - if not isinstance(value, datetime.datetime): - value = datetime.datetime(value.year, value.month, - value.day, value.hour, value.minute, value.second, - value.fsecond) - if field and field.get_internal_type() == 'DateTimeField': - pass - elif field and field.get_internal_type() == 'DateField': - value = value.date() - elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1): - value = value.time() - elif value.hour == value.minute == value.second == value.microsecond == 0: - value = value.date() - return value - - def as_sql(self, with_limits=True, with_col_aliases=False): - """ - Creates the SQL for this query. Returns the SQL string and list - of parameters. This is overriden from the original Query class - to handle the additional SQL Oracle requires to emulate LIMIT - and OFFSET. - - If 'with_limits' is False, any limit/offset information is not - included in the query. - """ - - # The `do_offset` flag indicates whether we need to construct - # the SQL needed to use limit/offset with Oracle. - do_offset = with_limits and (self.high_mark is not None - or self.low_mark) - if not do_offset: - sql, params = super(OracleQuery, self).as_sql(with_limits=False, - with_col_aliases=with_col_aliases) - else: - sql, params = super(OracleQuery, self).as_sql(with_limits=False, - with_col_aliases=True) - - # Wrap the base query in an outer SELECT * with boundaries on - # the "_RN" column. This is the canonical way to emulate LIMIT - # and OFFSET on Oracle. - high_where = '' - if self.high_mark is not None: - high_where = 'WHERE ROWNUM <= %d' % (self.high_mark,) - sql = 'SELECT * FROM (SELECT ROWNUM AS "_RN", "_SUB".* FROM (%s) "_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.low_mark) - - return sql, params - - return OracleQuery - -def unpickle_query_class(QueryClass): - """ - Utility function, called by Python's unpickling machinery, that handles - unpickling of Oracle Query subclasses. - """ - # XXX: Would be nice to not have any dependency on cx_Oracle here. Since - # modules can't be pickled, we need a way to know to load the right module. - import cx_Oracle - - klass = query_class(QueryClass, cx_Oracle) - return klass.__new__(klass) -unpickle_query_class.__safe_for_unpickling__ = True diff --git a/django/db/backends/postgresql/operations.py b/django/db/backends/postgresql/operations.py index e2bc992e49..f51743646d 100644 --- a/django/db/backends/postgresql/operations.py +++ b/django/db/backends/postgresql/operations.py @@ -7,6 +7,7 @@ from django.db.backends import BaseDatabaseOperations class DatabaseOperations(BaseDatabaseOperations): def __init__(self, connection): + super(DatabaseOperations, self).__init__() self._postgres_version = None self.connection = connection diff --git a/django/db/models/aggregates.py b/django/db/models/aggregates.py index ce8829c593..a2349cf5c6 100644 --- a/django/db/models/aggregates.py +++ b/django/db/models/aggregates.py @@ -43,9 +43,6 @@ class Aggregate(object): """ klass = getattr(query.aggregates_module, self.name) aggregate = klass(col, source=source, is_summary=is_summary, **self.extra) - # Validate that the backend has a fully supported, correct - # implementation of this aggregate - query.connection.ops.check_aggregate_support(aggregate) query.aggregates[alias] = aggregate class Avg(Aggregate): diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py index d4b3499d0c..8acd04d19c 100644 --- a/django/db/models/fields/__init__.py +++ b/django/db/models/fields/__init__.py @@ -201,9 +201,9 @@ class Field(object): if hasattr(value, 'relabel_aliases'): return value if hasattr(value, 'as_sql'): - sql, params = value.as_sql(connection) + sql, params = value.as_sql() else: - sql, params = value._as_sql(connection) + sql, params = value._as_sql(connection=connection) return QueryWrapper(('(%s)' % sql), params) diff --git a/django/db/models/fields/related.py b/django/db/models/fields/related.py index ce2813481a..c574762a27 100644 --- a/django/db/models/fields/related.py +++ b/django/db/models/fields/related.py @@ -145,15 +145,18 @@ class RelatedField(object): v = v[0] return v + if hasattr(value, 'get_compiler'): + value = value.get_compiler(connection=connection) + if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'): # If the value has a relabel_aliases method, it will need to # be invoked before the final SQL is evaluated if hasattr(value, 'relabel_aliases'): return value if hasattr(value, 'as_sql'): - sql, params = value.as_sql(connection) + sql, params = value.as_sql() else: - sql, params = value._as_sql(connection) + sql, params = value._as_sql(connection=connection) return QueryWrapper(('(%s)' % sql), params) # FIXME: lt and gt are explicitally allowed to make diff --git a/django/db/models/query.py b/django/db/models/query.py index 9ef9a93284..2da101113b 100644 --- a/django/db/models/query.py +++ b/django/db/models/query.py @@ -34,12 +34,11 @@ class QuerySet(object): using = None using = using or DEFAULT_DB_ALIAS connection = connections[using] - self.query = query or connection.ops.query_class(sql.Query)(self.model, connection) + self.query = query or sql.Query(self.model) self._result_cache = None self._iter = None self._sticky_filter = False - self._using = (query and - connections.alias_for_connection(self.query.connection) or using) + self._using = using ######################## # PYTHON MAGIC METHODS # @@ -237,8 +236,9 @@ class QuerySet(object): else: init_list.append(field.attname) model_cls = deferred_class_factory(self.model, skip) - - for row in self.query.results_iter(): + + compiler = self.query.get_compiler(using=self._using) + for row in compiler.results_iter(): if fill_cache: obj, _ = get_cached_row(self.model, row, index_start, max_depth, @@ -279,7 +279,7 @@ class QuerySet(object): query.add_aggregate(aggregate_expr, self.model, alias, is_summary=True) - return query.get_aggregation() + return query.get_aggregation(using=self._using) def count(self): """ @@ -292,7 +292,7 @@ class QuerySet(object): if self._result_cache is not None and not self._iter: return len(self._result_cache) - return self.query.get_count() + return self.query.get_count(using=self._using) def get(self, *args, **kwargs): """ @@ -420,7 +420,7 @@ class QuerySet(object): else: forced_managed = False try: - rows = query.execute_sql(None) + rows = query.get_compiler(self._using).execute_sql(None) if forced_managed: transaction.commit(using=self._using) else: @@ -444,12 +444,12 @@ class QuerySet(object): query = self.query.clone(sql.UpdateQuery) query.add_update_fields(values) self._result_cache = None - return query.execute_sql(None) + return query.get_compiler(self._using).execute_sql(None) _update.alters_data = True def exists(self): if self._result_cache is None: - return self.query.has_results() + return self.query.has_results(using=self._using) return bool(self._result_cache) ################################################## @@ -662,16 +662,6 @@ class QuerySet(object): """ clone = self._clone() clone._using = alias - connection = connections[alias] - clone.query.set_connection(connection) - cls = clone.query.get_query_class() - if cls is sql.Query: - subclass = None - else: - subclass = cls - clone.query.__class__ = connection.ops.query_class( - sql.Query, subclass - ) return clone ################################### @@ -757,8 +747,8 @@ class QuerySet(object): Returns the internal query's SQL and parameters (as a tuple). """ obj = self.values("pk") - if connection == obj.query.connection: - return obj.query.as_nested_sql() + if connection == connections[obj._using]: + return obj.query.get_compiler(connection=connection).as_nested_sql() raise ValueError("Can't do subqueries with queries on different DBs.") def _validate(self): @@ -789,7 +779,7 @@ class ValuesQuerySet(QuerySet): names = extra_names + field_names + aggregate_names - for row in self.query.results_iter(): + for row in self.query.get_compiler(self._using).results_iter(): yield dict(zip(names, row)) def _setup_query(self): @@ -886,8 +876,8 @@ class ValuesQuerySet(QuerySet): % self.__class__.__name__) obj = self._clone() - if connection == obj.query.connection: - return obj.query.as_nested_sql() + if connection == connections[obj._using]: + return obj.query.get_compiler(connection=connection).as_nested_sql() raise ValueError("Can't do subqueries with queries on different DBs.") def _validate(self): @@ -904,10 +894,10 @@ class ValuesQuerySet(QuerySet): class ValuesListQuerySet(ValuesQuerySet): def iterator(self): if self.flat and len(self._fields) == 1: - for row in self.query.results_iter(): + for row in self.query.get_compiler(self._using).results_iter(): yield row[0] elif not self.query.extra_select and not self.query.aggregate_select: - for row in self.query.results_iter(): + for row in self.query.get_compiler(self._using).results_iter(): yield tuple(row) else: # When extra(select=...) or an annotation is involved, the extra @@ -926,7 +916,7 @@ class ValuesListQuerySet(ValuesQuerySet): else: fields = names - for row in self.query.results_iter(): + for row in self.query.get_compiler(self._using).results_iter(): data = dict(zip(names, row)) yield tuple([data[f] for f in fields]) @@ -938,7 +928,7 @@ class ValuesListQuerySet(ValuesQuerySet): class DateQuerySet(QuerySet): def iterator(self): - return self.query.results_iter() + return self.query.get_compiler(self._using).results_iter() def _setup_query(self): """ @@ -948,10 +938,7 @@ class DateQuerySet(QuerySet): instance. """ self.query.clear_deferred_loading() - self.query = self.query.clone( - klass=self.query.connection.ops.query_class(sql.Query, sql.DateQuery), - setup=True - ) + self.query = self.query.clone(klass=sql.DateQuery, setup=True) self.query.select = [] field = self.model._meta.get_field(self._field_name, many_to_many=False) assert isinstance(field, DateField), "%r isn't a DateField." \ @@ -1089,19 +1076,18 @@ def delete_objects(seen_objs, using): signals.pre_delete.send(sender=cls, instance=instance) pk_list = [pk for pk,instance in items] - del_query = connection.ops.query_class(sql.Query, sql.DeleteQuery)(cls, connection) - del_query.delete_batch_related(pk_list) + del_query = sql.DeleteQuery(cls) + del_query.delete_batch_related(pk_list, using=using) - update_query = connection.ops.query_class(sql.Query, sql.UpdateQuery)(cls, connection) + update_query = sql.UpdateQuery(cls) for field, model in cls._meta.get_fields_with_model(): if (field.rel and field.null and field.rel.to in seen_objs and filter(lambda f: f.column == field.rel.get_related_field().column, field.rel.to._meta.fields)): if model: - connection.ops.query_class(sql.Query, sql.UpdateQuery)(model, connection).clear_related(field, - pk_list) + sql.UpdateQuery(model).clear_related(field, pk_list, using=using) else: - update_query.clear_related(field, pk_list) + update_query.clear_related(field, pk_list, using=using) # Now delete the actual data. for cls in ordered_classes: @@ -1109,8 +1095,8 @@ def delete_objects(seen_objs, using): items.reverse() pk_list = [pk for pk,instance in items] - del_query = connection.ops.query_class(sql.Query, sql.DeleteQuery)(cls, connection) - del_query.delete_batch(pk_list) + del_query = sql.DeleteQuery(cls) + del_query.delete_batch(pk_list, using=using) # Last cleanup; set NULLs where there once was a reference to the # object, NULL the primary key of the found objects, and perform @@ -1139,7 +1125,7 @@ def insert_query(model, values, return_id=False, raw_values=False, using=None): the InsertQuery class and is how Model.save() is implemented. It is not part of the public API. """ - connection = connections[using] - query = connection.ops.query_class(sql.Query, sql.InsertQuery)(model, connection) + query = sql.InsertQuery(model) query.insert_values(values, raw_values) - return query.execute_sql(return_id) + compiler = query.get_compiler(using=using) + return compiler.execute_sql(return_id) diff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py new file mode 100644 index 0000000000..21ae779000 --- /dev/null +++ b/django/db/models/sql/compiler.py @@ -0,0 +1,902 @@ +from django.core.exceptions import FieldError +from django.db import connections +from django.db.backends.util import truncate_name +from django.db.models.sql.constants import * +from django.db.models.sql.datastructures import EmptyResultSet +from django.db.models.sql.expressions import SQLEvaluator +from django.db.models.sql.query import get_proxied_model, get_order_dir, \ + select_related_descend, Query + +class SQLCompiler(object): + def __init__(self, query, connection, using): + self.query = query + self.connection = connection + self.using = using + self.quote_cache = {} + + # Check that the compiler will be able to execute the query + for alias, aggregate in self.query.aggregate_select.items(): + self.connection.ops.check_aggregate_support(aggregate) + + def pre_sql_setup(self): + """ + Does any necessary class setup immediately prior to producing SQL. This + is for things that can't necessarily be done in __init__ because we + might not have all the pieces in place at that time. + """ + if not self.query.tables: + self.query.join((None, self.query.model._meta.db_table, None, None)) + if (not self.query.select and self.query.default_cols and not + self.query.included_inherited_models): + self.query.setup_inherited_models() + if self.query.select_related and not self.query.related_select_cols: + self.fill_related_selections() + + def quote_name_unless_alias(self, name): + """ + A wrapper around connection.ops.quote_name that doesn't quote aliases + for table names. This avoids problems with some SQL dialects that treat + quoted strings specially (e.g. PostgreSQL). + """ + if name in self.quote_cache: + return self.quote_cache[name] + if ((name in self.query.alias_map and name not in self.query.table_map) or + name in self.query.extra_select): + self.quote_cache[name] = name + return name + r = self.connection.ops.quote_name(name) + self.quote_cache[name] = r + return r + + def as_sql(self, with_limits=True, with_col_aliases=False): + """ + Creates the SQL for this query. Returns the SQL string and list of + parameters. + + If 'with_limits' is False, any limit/offset information is not included + in the query. + """ + self.pre_sql_setup() + out_cols = self.get_columns(with_col_aliases) + ordering, ordering_group_by = self.get_ordering() + + # This must come after 'select' and 'ordering' -- see docstring of + # get_from_clause() for details. + from_, f_params = self.get_from_clause() + + qn = self.quote_name_unless_alias + + where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection) + having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection) + params = [] + for val in self.query.extra_select.itervalues(): + params.extend(val[1]) + + result = ['SELECT'] + if self.query.distinct: + result.append('DISTINCT') + result.append(', '.join(out_cols + self.query.ordering_aliases)) + + result.append('FROM') + result.extend(from_) + params.extend(f_params) + + if where: + result.append('WHERE %s' % where) + params.extend(w_params) + if self.query.extra_where: + if not where: + result.append('WHERE') + else: + result.append('AND') + result.append(' AND '.join(self.query.extra_where)) + + grouping, gb_params = self.get_grouping() + if grouping: + if ordering: + # If the backend can't group by PK (i.e., any database + # other than MySQL), then any fields mentioned in the + # ordering clause needs to be in the group by clause. + if not self.connection.features.allows_group_by_pk: + for col, col_params in ordering_group_by: + if col not in grouping: + grouping.append(str(col)) + gb_params.extend(col_params) + else: + ordering = self.connection.ops.force_no_ordering() + result.append('GROUP BY %s' % ', '.join(grouping)) + params.extend(gb_params) + + if having: + result.append('HAVING %s' % having) + params.extend(h_params) + + if ordering: + result.append('ORDER BY %s' % ', '.join(ordering)) + + if with_limits: + if self.query.high_mark is not None: + result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark)) + if self.query.low_mark: + if self.query.high_mark is None: + val = self.connection.ops.no_limit_value() + if val: + result.append('LIMIT %d' % val) + result.append('OFFSET %d' % self.query.low_mark) + + params.extend(self.query.extra_params) + return ' '.join(result), tuple(params) + + def as_nested_sql(self): + """ + Perform the same functionality as the as_sql() method, returning an + SQL string and parameters. However, the alias prefixes are bumped + beforehand (in a copy -- the current query isn't changed) and any + ordering is removed. + + Used when nesting this query inside another. + """ + obj = self.query.clone() + obj.clear_ordering(True) + obj.bump_prefix() + return obj.get_compiler(connection=self.connection).as_sql() + + def get_columns(self, with_aliases=False): + """ + Returns the list of columns to use in the select statement. If no + columns have been specified, returns all columns relating to fields in + the model. + + If 'with_aliases' is true, any column names that are duplicated + (without the table names) are given unique aliases. This is needed in + some cases to avoid ambiguity with nested queries. + """ + qn = self.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()] + aliases = set(self.query.extra_select.keys()) + if with_aliases: + col_aliases = aliases.copy() + else: + col_aliases = set() + if self.query.select: + only_load = self.deferred_to_columns() + for col in self.query.select: + if isinstance(col, (list, tuple)): + alias, column = col + table = self.query.alias_map[alias][TABLE_NAME] + if table in only_load and col not in only_load[table]: + continue + r = '%s.%s' % (qn(alias), qn(column)) + if with_aliases: + if col[1] in col_aliases: + c_alias = 'Col%d' % len(col_aliases) + result.append('%s AS %s' % (r, c_alias)) + aliases.add(c_alias) + col_aliases.add(c_alias) + else: + result.append('%s AS %s' % (r, qn2(col[1]))) + aliases.add(r) + col_aliases.add(col[1]) + else: + result.append(r) + aliases.add(r) + col_aliases.add(col[1]) + else: + result.append(col.as_sql(qn, self.connection)) + + if hasattr(col, 'alias'): + aliases.add(col.alias) + col_aliases.add(col.alias) + + elif self.query.default_cols: + cols, new_aliases = self.get_default_columns(with_aliases, + col_aliases) + result.extend(cols) + aliases.update(new_aliases) + + max_name_length = self.connection.ops.max_name_length() + result.extend([ + '%s%s' % ( + aggregate.as_sql(qn, self.connection), + alias is not None + and ' AS %s' % qn(truncate_name(alias, max_name_length)) + or '' + ) + for alias, aggregate in self.query.aggregate_select.items() + ]) + + for table, col in self.query.related_select_cols: + r = '%s.%s' % (qn(table), qn(col)) + if with_aliases and col in col_aliases: + c_alias = 'Col%d' % len(col_aliases) + result.append('%s AS %s' % (r, c_alias)) + aliases.add(c_alias) + col_aliases.add(c_alias) + else: + result.append(r) + aliases.add(r) + col_aliases.add(col) + + self._select_aliases = aliases + return result + + def get_default_columns(self, with_aliases=False, col_aliases=None, + start_alias=None, opts=None, as_pairs=False): + """ + Computes the default columns for selecting every field in the base + model. Will sometimes be called to pull in related models (e.g. via + select_related), in which case "opts" and "start_alias" will be given + to provide a starting point for the traversal. + + Returns a list of strings, quoted appropriately for use in SQL + directly, as well as a set of aliases used in the select statement (if + 'as_pairs' is True, returns a list of (alias, col_name) pairs instead + of strings as the first component and None as the second component). + """ + result = [] + if opts is None: + opts = self.query.model._meta + qn = self.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + aliases = set() + only_load = self.deferred_to_columns() + # Skip all proxy to the root proxied model + proxied_model = get_proxied_model(opts) + + if start_alias: + seen = {None: start_alias} + for field, model in opts.get_fields_with_model(): + if start_alias: + try: + alias = seen[model] + except KeyError: + if model is proxied_model: + alias = start_alias + else: + link_field = opts.get_ancestor_link(model) + alias = self.query.join((start_alias, model._meta.db_table, + link_field.column, model._meta.pk.column)) + seen[model] = alias + else: + # If we're starting from the base model of the queryset, the + # aliases will have already been set up in pre_sql_setup(), so + # we can save time here. + alias = self.query.included_inherited_models[model] + table = self.query.alias_map[alias][TABLE_NAME] + if table in only_load and field.column not in only_load[table]: + continue + if as_pairs: + result.append((alias, field.column)) + aliases.add(alias) + continue + if with_aliases and field.column in col_aliases: + c_alias = 'Col%d' % len(col_aliases) + result.append('%s.%s AS %s' % (qn(alias), + qn2(field.column), c_alias)) + col_aliases.add(c_alias) + aliases.add(c_alias) + else: + r = '%s.%s' % (qn(alias), qn2(field.column)) + result.append(r) + aliases.add(r) + if with_aliases: + col_aliases.add(field.column) + return result, aliases + + def get_ordering(self): + """ + Returns a tuple containing a list representing the SQL elements in the + "order by" clause, and the list of SQL elements that need to be added + to the GROUP BY clause as a result of the ordering. + + Also sets the ordering_aliases attribute on this instance to a list of + extra aliases needed in the select. + + Determining the ordering SQL can change the tables we need to include, + so this should be run *before* get_from_clause(). + """ + if self.query.extra_order_by: + ordering = self.query.extra_order_by + elif not self.query.default_ordering: + ordering = self.query.order_by + else: + ordering = self.query.order_by or self.query.model._meta.ordering + qn = self.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + distinct = self.query.distinct + select_aliases = self._select_aliases + result = [] + group_by = [] + ordering_aliases = [] + if self.query.standard_ordering: + asc, desc = ORDER_DIR['ASC'] + else: + asc, desc = ORDER_DIR['DESC'] + + # It's possible, due to model inheritance, that normal usage might try + # to include the same field more than once in the ordering. We track + # the table/column pairs we use and discard any after the first use. + processed_pairs = set() + + for field in ordering: + if field == '?': + result.append(self.connection.ops.random_function_sql()) + continue + if isinstance(field, int): + if field < 0: + order = desc + field = -field + else: + order = asc + result.append('%s %s' % (field, order)) + group_by.append((field, [])) + continue + col, order = get_order_dir(field, asc) + if col in self.query.aggregate_select: + result.append('%s %s' % (col, order)) + continue + if '.' in field: + # This came in through an extra(order_by=...) addition. Pass it + # on verbatim. + table, col = col.split('.', 1) + if (table, col) not in processed_pairs: + elt = '%s.%s' % (qn(table), col) + processed_pairs.add((table, col)) + if not distinct or elt in select_aliases: + result.append('%s %s' % (elt, order)) + group_by.append((elt, [])) + elif get_order_dir(field)[0] not in self.query.extra_select: + # 'col' is of the form 'field' or 'field1__field2' or + # '-field1__field2__field', etc. + for table, col, order in self.find_ordering_name(field, + self.query.model._meta, default_order=asc): + if (table, col) not in processed_pairs: + elt = '%s.%s' % (qn(table), qn2(col)) + processed_pairs.add((table, col)) + if distinct and elt not in select_aliases: + ordering_aliases.append(elt) + result.append('%s %s' % (elt, order)) + group_by.append((elt, [])) + else: + elt = qn2(col) + if distinct and col not in select_aliases: + ordering_aliases.append(elt) + result.append('%s %s' % (elt, order)) + group_by.append(self.query.extra_select[col]) + self.query.ordering_aliases = ordering_aliases + return result, group_by + + def find_ordering_name(self, name, opts, alias=None, default_order='ASC', + already_seen=None): + """ + Returns the table alias (the name might be ambiguous, the alias will + not be) and column name for ordering by the given 'name' parameter. + The 'name' is of the form 'field1__field2__...__fieldN'. + """ + name, order = get_order_dir(name, default_order) + pieces = name.split(LOOKUP_SEP) + if not alias: + alias = self.query.get_initial_alias() + field, target, opts, joins, last, extra = self.query.setup_joins(pieces, + opts, alias, False) + alias = joins[-1] + col = target.column + if not field.rel: + # To avoid inadvertent trimming of a necessary alias, use the + # refcount to show that we are referencing a non-relation field on + # the model. + self.query.ref_alias(alias) + + # Must use left outer joins for nullable fields and their relations. + self.query.promote_alias_chain(joins, + self.query.alias_map[joins[0]][JOIN_TYPE] == self.query.LOUTER) + + # If we get to this point and the field is a relation to another model, + # append the default ordering for that model. + if field.rel and len(joins) > 1 and opts.ordering: + # Firstly, avoid infinite loops. + if not already_seen: + already_seen = set() + join_tuple = tuple([self.query.alias_map[j][TABLE_NAME] for j in joins]) + if join_tuple in already_seen: + raise FieldError('Infinite loop caused by ordering.') + already_seen.add(join_tuple) + + results = [] + for item in opts.ordering: + results.extend(self.find_ordering_name(item, opts, alias, + order, already_seen)) + return results + + if alias: + # We have to do the same "final join" optimisation as in + # add_filter, since the final column might not otherwise be part of + # the select set (so we can't order on it). + while 1: + join = self.query.alias_map[alias] + if col != join[RHS_JOIN_COL]: + break + self.query.unref_alias(alias) + alias = join[LHS_ALIAS] + col = join[LHS_JOIN_COL] + return [(alias, col, order)] + + def get_from_clause(self): + """ + Returns a list of strings that are joined together to go after the + "FROM" part of the query, as well as a list any extra parameters that + need to be included. Sub-classes, can override this to create a + from-clause via a "select". + + This should only be called after any SQL construction methods that + might change the tables we need. This means the select columns and + ordering must be done first. + """ + result = [] + qn = self.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + first = True + for alias in self.query.tables: + if not self.query.alias_refcount[alias]: + continue + try: + name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias] + except KeyError: + # Extra tables can end up in self.tables, but not in the + # alias_map if they aren't in a join. That's OK. We skip them. + continue + alias_str = (alias != name and ' %s' % alias or '') + if join_type and not first: + result.append('%s %s%s ON (%s.%s = %s.%s)' + % (join_type, qn(name), alias_str, qn(lhs), + qn2(lhs_col), qn(alias), qn2(col))) + else: + connector = not first and ', ' or '' + result.append('%s%s%s' % (connector, qn(name), alias_str)) + first = False + for t in self.query.extra_tables: + alias, unused = self.query.table_alias(t) + # Only add the alias if it's not already present (the table_alias() + # calls increments the refcount, so an alias refcount of one means + # this is the only reference. + if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: + connector = not first and ', ' or '' + result.append('%s%s' % (connector, qn(alias))) + first = False + return result, [] + + def get_grouping(self): + """ + Returns a tuple representing the SQL elements in the "group by" clause. + """ + qn = self.quote_name_unless_alias + result, params = [], [] + if self.query.group_by is not None: + if len(self.query.model._meta.fields) == len(self.query.select) and \ + self.connection.features.allows_group_by_pk: + self.query.group_by = [(self.query.model._meta.db_table, self.query.model._meta.pk.column)] + + group_by = self.query.group_by or [] + + extra_selects = [] + for extra_select, extra_params in self.query.extra_select.itervalues(): + extra_selects.append(extra_select) + params.extend(extra_params) + for col in group_by + self.query.related_select_cols + extra_selects: + if isinstance(col, (list, tuple)): + result.append('%s.%s' % (qn(col[0]), qn(col[1]))) + elif hasattr(col, 'as_sql'): + result.append(col.as_sql(qn)) + else: + result.append(str(col)) + return result, params + + def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1, + used=None, requested=None, restricted=None, nullable=None, + dupe_set=None, avoid_set=None): + """ + Fill in the information needed for a select_related query. The current + depth is measured as the number of connections away from the root model + (for example, cur_depth=1 means we are looking at models with direct + connections to the root model). + """ + if not restricted and self.query.max_depth and cur_depth > self.query.max_depth: + # We've recursed far enough; bail out. + return + + if not opts: + opts = self.query.get_meta() + root_alias = self.query.get_initial_alias() + self.query.related_select_cols = [] + self.query.related_select_fields = [] + if not used: + used = set() + if dupe_set is None: + dupe_set = set() + if avoid_set is None: + avoid_set = set() + orig_dupe_set = dupe_set + + # Setup for the case when only particular related fields should be + # included in the related selection. + if requested is None and restricted is not False: + if isinstance(self.query.select_related, dict): + requested = self.query.select_related + restricted = True + else: + restricted = False + + for f, model in opts.get_fields_with_model(): + if not select_related_descend(f, restricted, requested): + continue + # The "avoid" set is aliases we want to avoid just for this + # particular branch of the recursion. They aren't permanently + # forbidden from reuse in the related selection tables (which is + # what "used" specifies). + avoid = avoid_set.copy() + dupe_set = orig_dupe_set.copy() + table = f.rel.to._meta.db_table + if nullable or f.null: + promote = True + else: + promote = False + if model: + int_opts = opts + alias = root_alias + alias_chain = [] + for int_model in opts.get_base_chain(model): + # Proxy model have elements in base chain + # with no parents, assign the new options + # object and skip to the next base in that + # case + if not int_opts.parents[int_model]: + int_opts = int_model._meta + continue + lhs_col = int_opts.parents[int_model].column + dedupe = lhs_col in opts.duplicate_targets + if dedupe: + avoid.update(self.query.dupe_avoidance.get(id(opts), lhs_col), + ()) + dupe_set.add((opts, lhs_col)) + int_opts = int_model._meta + alias = self.query.join((alias, int_opts.db_table, lhs_col, + int_opts.pk.column), exclusions=used, + promote=promote) + alias_chain.append(alias) + for (dupe_opts, dupe_col) in dupe_set: + self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias) + if self.query.alias_map[root_alias][JOIN_TYPE] == self.query.LOUTER: + self.query.promote_alias_chain(alias_chain, True) + else: + alias = root_alias + + dedupe = f.column in opts.duplicate_targets + if dupe_set or dedupe: + avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ())) + if dedupe: + dupe_set.add((opts, f.column)) + + alias = self.query.join((alias, table, f.column, + f.rel.get_related_field().column), + exclusions=used.union(avoid), promote=promote) + used.add(alias) + columns, aliases = self.get_default_columns(start_alias=alias, + opts=f.rel.to._meta, as_pairs=True) + self.query.related_select_cols.extend(columns) + if self.query.alias_map[alias][JOIN_TYPE] == self.query.LOUTER: + self.query.promote_alias_chain(aliases, True) + self.query.related_select_fields.extend(f.rel.to._meta.fields) + if restricted: + next = requested.get(f.name, {}) + else: + next = False + if f.null is not None: + new_nullable = f.null + else: + new_nullable = None + for dupe_opts, dupe_col in dupe_set: + self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias) + self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1, + used, next, restricted, new_nullable, dupe_set, avoid) + + def deferred_to_columns(self): + """ + Converts the self.deferred_loading data structure to mapping of table + names to sets of column names which are to be loaded. Returns the + dictionary. + """ + columns = {} + self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb) + return columns + + def results_iter(self): + """ + Returns an iterator over the results from executing this query. + """ + resolve_columns = hasattr(self, 'resolve_columns') + fields = None + for rows in self.execute_sql(MULTI): + for row in rows: + if resolve_columns: + if fields is None: + # We only set this up here because + # related_select_fields isn't populated until + # execute_sql() has been called. + if self.query.select_fields: + fields = self.query.select_fields + self.query.related_select_fields + else: + fields = self.query.model._meta.fields + row = self.resolve_columns(row, fields) + + if self.query.aggregate_select: + aggregate_start = len(self.query.extra_select.keys()) + len(self.query.select) + aggregate_end = aggregate_start + len(self.query.aggregate_select) + row = tuple(row[:aggregate_start]) + tuple([ + self.query.resolve_aggregate(value, aggregate, self.connection) + for (alias, aggregate), value + in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end]) + ]) + tuple(row[aggregate_end:]) + + yield row + + def execute_sql(self, result_type=MULTI): + """ + Run the query against the database and returns the result(s). The + return value is a single data item if result_type is SINGLE, or an + iterator over the results if the result_type is MULTI. + + result_type is either MULTI (use fetchmany() to retrieve all rows), + SINGLE (only retrieve a single row), or None. In this last case, the + cursor is returned if any query is executed, since it's used by + subclasses such as InsertQuery). It's possible, however, that no query + is needed, as the filters describe an empty set. In that case, None is + returned, to avoid any unnecessary database interaction. + """ + try: + sql, params = self.as_sql() + if not sql: + raise EmptyResultSet + except EmptyResultSet: + if result_type == MULTI: + return empty_iter() + else: + return + + cursor = self.connection.cursor() + cursor.execute(sql, params) + + if not result_type: + return cursor + if result_type == SINGLE: + if self.query.ordering_aliases: + return cursor.fetchone()[:-len(self.query.ordering_aliases)] + return cursor.fetchone() + + # The MULTI case. + if self.query.ordering_aliases: + result = order_modified_iter(cursor, len(self.query.ordering_aliases), + self.connection.features.empty_fetchmany_value) + else: + result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), + self.connection.features.empty_fetchmany_value) + if not self.connection.features.can_use_chunked_reads: + # If we are using non-chunked reads, we return the same data + # structure as normally, but ensure it is all read into memory + # before going any further. + return list(result) + return result + + +class SQLInsertCompiler(SQLCompiler): + def as_sql(self): + # We don't need quote_name_unless_alias() here, since these are all + # going to be column names (so we can avoid the extra overhead). + qn = self.connection.ops.quote_name + opts = self.query.model._meta + result = ['INSERT INTO %s' % qn(opts.db_table)] + result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns])) + result.append('VALUES (%s)' % ', '.join(self.query.values)) + params = self.query.params + if self.query.return_id and self.connection.features.can_return_id_from_insert: + col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) + r_fmt, r_params = self.connection.ops.return_insert_id() + result.append(r_fmt % col) + params = params + r_params + return ' '.join(result), params + + + def execute_sql(self, return_id=False): + self.query.return_id = return_id + cursor = super(SQLInsertCompiler, self).execute_sql(None) + if not (return_id and cursor): + return + if self.connection.features.can_return_id_from_insert: + return self.connection.ops.fetch_returned_insert_id(cursor) + return self.connection.ops.last_insert_id(cursor, + self.query.model._meta.db_table, self.query.model._meta.pk.column) + + +class SQLDeleteCompiler(SQLCompiler): + def as_sql(self): + """ + Creates the SQL for this query. Returns the SQL string and list of + parameters. + """ + assert len(self.query.tables) == 1, \ + "Can only delete from one table at a time." + qn = self.quote_name_unless_alias + result = ['DELETE FROM %s' % qn(self.query.tables[0])] + where, params = self.query.where.as_sql(qn=qn, connection=self.connection) + result.append('WHERE %s' % where) + return ' '.join(result), tuple(params) + +class SQLUpdateCompiler(SQLCompiler): + def as_sql(self): + """ + Creates the SQL for this query. Returns the SQL string and list of + parameters. + """ + from django.db.models.base import Model + + self.pre_sql_setup() + if not self.query.values: + return '', () + table = self.query.tables[0] + qn = self.quote_name_unless_alias + result = ['UPDATE %s' % qn(table)] + result.append('SET') + values, update_params = [], [] + for field, model, val in self.query.values: + if hasattr(val, 'prepare_database_save'): + val = val.prepare_database_save(field) + else: + val = field.get_db_prep_save(val, connection=self.connection) + + # Getting the placeholder for the field. + if hasattr(field, 'get_placeholder'): + placeholder = field.get_placeholder(val) + else: + placeholder = '%s' + + if hasattr(val, 'evaluate'): + val = SQLEvaluator(val, self.query, allow_joins=False) + name = field.column + if hasattr(val, 'as_sql'): + sql, params = val.as_sql(qn, self.connection) + values.append('%s = %s' % (qn(name), sql)) + update_params.extend(params) + elif val is not None: + values.append('%s = %s' % (qn(name), placeholder)) + update_params.append(val) + else: + values.append('%s = NULL' % qn(name)) + if not values: + return '', () + result.append(', '.join(values)) + where, params = self.query.where.as_sql(qn=qn, connection=self.connection) + if where: + result.append('WHERE %s' % where) + return ' '.join(result), tuple(update_params + params) + + def execute_sql(self, result_type): + """ + Execute the specified update. Returns the number of rows affected by + the primary update query. The "primary update query" is the first + non-empty query that is executed. Row counts for any subsequent, + related queries are not available. + """ + cursor = super(SQLUpdateCompiler, self).execute_sql(result_type) + rows = cursor and cursor.rowcount or 0 + is_empty = cursor is None + del cursor + for query in self.query.get_related_updates(): + aux_rows = query.get_compiler(self.using).execute_sql(result_type) + if is_empty: + rows = aux_rows + is_empty = False + return rows + + def pre_sql_setup(self): + """ + If the update depends on results from other tables, we need to do some + munging of the "where" conditions to match the format required for + (portable) SQL updates. That is done here. + + Further, if we are going to be running multiple updates, we pull out + the id values to update at this point so that they don't change as a + result of the progressive updates. + """ + self.query.select_related = False + self.query.clear_ordering(True) + super(SQLUpdateCompiler, self).pre_sql_setup() + count = self.query.count_active_tables() + if not self.query.related_updates and count == 1: + return + + # We need to use a sub-select in the where clause to filter on things + # from other tables. + query = self.query.clone(klass=Query) + query.bump_prefix() + query.extra = {} + query.select = [] + query.add_fields([query.model._meta.pk.name]) + must_pre_select = count > 1 and not self.connection.features.update_can_self_select + + # Now we adjust the current query: reset the where clause and get rid + # of all the tables we don't need (since they're in the sub-select). + self.query.where = self.query.where_class() + if self.query.related_updates or must_pre_select: + # Either we're using the idents in multiple update queries (so + # don't want them to change), or the db backend doesn't support + # selecting from the updating table (e.g. MySQL). + idents = [] + for rows in query.get_compiler(self.using).execute_sql(MULTI): + idents.extend([r[0] for r in rows]) + self.query.add_filter(('pk__in', idents)) + self.query.related_ids = idents + else: + # The fast path. Filters and updates in one query. + self.query.add_filter(('pk__in', query.get_compiler(self.using))) + for alias in self.query.tables[1:]: + self.query.alias_refcount[alias] = 0 + +class SQLAggregateCompiler(SQLCompiler): + def as_sql(self, qn=None): + """ + Creates the SQL for this query. Returns the SQL string and list of + parameters. + """ + if qn is None: + qn = self.quote_name_unless_alias + sql = ('SELECT %s FROM (%s) subquery' % ( + ', '.join([ + aggregate.as_sql(qn, self.connection) + for aggregate in self.query.aggregate_select.values() + ]), + self.query.subquery) + ) + params = self.query.sub_params + return (sql, params) + +class SQLDateCompiler(SQLCompiler): + def results_iter(self): + """ + Returns an iterator over the results from executing this query. + """ + resolve_columns = hasattr(self, 'resolve_columns') + if resolve_columns: + from django.db.models.fields import DateTimeField + fields = [DateTimeField()] + else: + from django.db.backends.util import typecast_timestamp + needs_string_cast = self.connection.features.needs_datetime_string_cast + + offset = len(self.query.extra_select) + for rows in self.execute_sql(MULTI): + for row in rows: + date = row[offset] + if resolve_columns: + date = self.resolve_columns(row, fields)[offset] + elif needs_string_cast: + date = typecast_timestamp(str(date)) + yield date + + +def empty_iter(): + """ + Returns an iterator containing no results. + """ + yield iter([]).next() + + +def order_modified_iter(cursor, trim, sentinel): + """ + Yields blocks of rows from a cursor. We use this iterator in the special + case when extra output columns have been added to support ordering + requirements. We must trim those extra columns before anything else can use + the results, since they're only needed to make the SQL valid. + """ + for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), + sentinel): + yield [r[:-trim] for r in rows] diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py index 123fa41dc1..b36193197e 100644 --- a/django/db/models/sql/query.py +++ b/django/db/models/sql/query.py @@ -11,17 +11,16 @@ from copy import deepcopy from django.utils.tree import Node from django.utils.datastructures import SortedDict from django.utils.encoding import force_unicode -from django.db.backends.util import truncate_name -from django.db import connection, connections +from django.db import connection, connections, DEFAULT_DB_ALIAS from django.db.models import signals from django.db.models.fields import FieldDoesNotExist from django.db.models.query_utils import select_related_descend from django.db.models.sql import aggregates as base_aggregates_module +from django.db.models.sql.constants import * +from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin from django.db.models.sql.expressions import SQLEvaluator from django.db.models.sql.where import WhereNode, Constraint, EverythingNode, AND, OR from django.core.exceptions import FieldError -from datastructures import EmptyResultSet, Empty, MultiJoin -from constants import * __all__ = ['Query'] @@ -38,9 +37,10 @@ class Query(object): query_terms = QUERY_TERMS aggregates_module = base_aggregates_module - def __init__(self, model, connection, where=WhereNode): + compiler = 'SQLCompiler' + + def __init__(self, model, where=WhereNode): self.model = model - self.connection = connection self.alias_refcount = {} self.alias_map = {} # Maps alias to join information self.table_map = {} # Maps table names to list of aliases. @@ -104,7 +104,7 @@ class Query(object): Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ - sql, params = self.as_sql() + sql, params = self.get_compiler(DEFAULT_DB_ALIAS).as_sql() return sql % params def __deepcopy__(self, memo): @@ -119,8 +119,6 @@ class Query(object): obj_dict = self.__dict__.copy() obj_dict['related_select_fields'] = [] obj_dict['related_select_cols'] = [] - del obj_dict['connection'] - obj_dict['connection_settings'] = self.connection.settings_dict # Fields can't be pickled, so if a field list has been # specified, we pickle the list of field names instead. @@ -142,11 +140,13 @@ class Query(object): ] self.__dict__.update(obj_dict) - self.connection = connections[connections.alias_for_settings( - obj_dict['connection_settings'])] - def get_query_class(self): - return Query + def get_compiler(self, using=None, connection=None): + if using is None and connection is None: + raise ValueError("Need either using or connection") + if using: + connection = connections[using] + return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ @@ -156,22 +156,6 @@ class Query(object): """ return self.model._meta - def quote_name_unless_alias(self, name): - """ - A wrapper around connection.ops.quote_name that doesn't quote aliases - for table names. This avoids problems with some SQL dialects that treat - quoted strings specially (e.g. PostgreSQL). - """ - if name in self.quote_cache: - return self.quote_cache[name] - if ((name in self.alias_map and name not in self.table_map) or - name in self.extra_select): - self.quote_cache[name] = name - return name - r = self.connection.ops.quote_name(name) - self.quote_cache[name] = r - return r - def clone(self, klass=None, **kwargs): """ Creates a copy of the current instance. The 'kwargs' parameter can be @@ -180,7 +164,6 @@ class Query(object): obj = Empty() obj.__class__ = klass or self.__class__ obj.model = self.model - obj.connection = self.connection obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.table_map = self.table_map.copy() @@ -243,16 +226,16 @@ class Query(object): obj._setup_query() return obj - def convert_values(self, value, field): + def convert_values(self, value, field, connection): """Convert the database-returned value into a type that is consistent across database backends. By default, this defers to the underlying backend operations, but it can be overridden by Query classes for specific backends. """ - return self.connection.ops.convert_values(value, field) + return connection.ops.convert_values(value, field) - def resolve_aggregate(self, value, aggregate): + def resolve_aggregate(self, value, aggregate, connection): """Resolve the value of aggregates returned by the database to consistent (and reasonable) types. @@ -272,39 +255,9 @@ class Query(object): return float(value) else: # Return value depends on the type of the field being processed. - return self.convert_values(value, aggregate.field) + return self.convert_values(value, aggregate.field, connection) - def results_iter(self): - """ - Returns an iterator over the results from executing this query. - """ - resolve_columns = hasattr(self, 'resolve_columns') - fields = None - for rows in self.execute_sql(MULTI): - for row in rows: - if resolve_columns: - if fields is None: - # We only set this up here because - # related_select_fields isn't populated until - # execute_sql() has been called. - if self.select_fields: - fields = self.select_fields + self.related_select_fields - else: - fields = self.model._meta.fields - row = self.resolve_columns(row, fields) - - if self.aggregate_select: - aggregate_start = len(self.extra_select.keys()) + len(self.select) - aggregate_end = aggregate_start + len(self.aggregate_select) - row = tuple(row[:aggregate_start]) + tuple([ - self.resolve_aggregate(value, aggregate) - for (alias, aggregate), value - in zip(self.aggregate_select.items(), row[aggregate_start:aggregate_end]) - ]) + tuple(row[aggregate_end:]) - - yield row - - def get_aggregation(self): + def get_aggregation(self, using): """ Returns the dictionary with the values of the existing aggregations. """ @@ -316,7 +269,7 @@ class Query(object): # over the subquery instead. if self.group_by is not None: from subqueries import AggregateQuery - query = self.connection.ops.query_class(Query, AggregateQuery)(self.model, self.connection) + query = AggregateQuery(self.model) obj = self.clone() @@ -327,7 +280,7 @@ class Query(object): query.aggregate_select[alias] = aggregate del obj.aggregate_select[alias] - query.add_subquery(obj) + query.add_subquery(obj, using) else: query = self self.select = [] @@ -341,17 +294,17 @@ class Query(object): query.related_select_cols = [] query.related_select_fields = [] - result = query.execute_sql(SINGLE) + result = query.get_compiler(using).execute_sql(SINGLE) if result is None: result = [None for q in query.aggregate_select.items()] return dict([ - (alias, self.resolve_aggregate(val, aggregate)) + (alias, self.resolve_aggregate(val, aggregate, connection=connections[using])) for (alias, aggregate), val in zip(query.aggregate_select.items(), result) ]) - def get_count(self): + def get_count(self, using): """ Performs a COUNT() query using the current filter constraints. """ @@ -365,11 +318,11 @@ class Query(object): subquery.clear_ordering(True) subquery.clear_limits() - obj = self.connection.ops.query_class(Query, AggregateQuery)(obj.model, obj.connection) - obj.add_subquery(subquery) + obj = AggregateQuery(obj.model) + obj.add_subquery(subquery, using=using) obj.add_count_column() - number = obj.get_aggregation()[None] + number = obj.get_aggregation(using=using)[None] # Apply offset and limit constraints manually, since using LIMIT/OFFSET # in SQL (in variants that provide them) doesn't change the COUNT @@ -380,7 +333,7 @@ class Query(object): return number - def has_results(self): + def has_results(self, using): q = self.clone() q.add_extra({'a': 1}, None, None, None, None, None) q.add_fields(()) @@ -388,99 +341,8 @@ class Query(object): q.set_aggregate_mask(()) q.clear_ordering() q.set_limits(high=1) - return bool(q.execute_sql(SINGLE)) - - def as_sql(self, with_limits=True, with_col_aliases=False): - """ - Creates the SQL for this query. Returns the SQL string and list of - parameters. - - If 'with_limits' is False, any limit/offset information is not included - in the query. - """ - self.pre_sql_setup() - out_cols = self.get_columns(with_col_aliases) - ordering, ordering_group_by = self.get_ordering() - - # This must come after 'select' and 'ordering' -- see docstring of - # get_from_clause() for details. - from_, f_params = self.get_from_clause() - - qn = self.quote_name_unless_alias - where, w_params = self.where.as_sql(qn=qn, connection=self.connection) - having, h_params = self.having.as_sql(qn=qn, connection=self.connection) - params = [] - for val in self.extra_select.itervalues(): - params.extend(val[1]) - - result = ['SELECT'] - if self.distinct: - result.append('DISTINCT') - result.append(', '.join(out_cols + self.ordering_aliases)) - - result.append('FROM') - result.extend(from_) - params.extend(f_params) - - if where: - result.append('WHERE %s' % where) - params.extend(w_params) - if self.extra_where: - if not where: - result.append('WHERE') - else: - result.append('AND') - result.append(' AND '.join(self.extra_where)) - - grouping, gb_params = self.get_grouping() - if grouping: - if ordering: - # If the backend can't group by PK (i.e., any database - # other than MySQL), then any fields mentioned in the - # ordering clause needs to be in the group by clause. - if not self.connection.features.allows_group_by_pk: - for col, col_params in ordering_group_by: - if col not in grouping: - grouping.append(str(col)) - gb_params.extend(col_params) - else: - ordering = self.connection.ops.force_no_ordering() - result.append('GROUP BY %s' % ', '.join(grouping)) - params.extend(gb_params) - - if having: - result.append('HAVING %s' % having) - params.extend(h_params) - - if ordering: - result.append('ORDER BY %s' % ', '.join(ordering)) - - if with_limits: - if self.high_mark is not None: - result.append('LIMIT %d' % (self.high_mark - self.low_mark)) - if self.low_mark: - if self.high_mark is None: - val = self.connection.ops.no_limit_value() - if val: - result.append('LIMIT %d' % val) - result.append('OFFSET %d' % self.low_mark) - - params.extend(self.extra_params) - return ' '.join(result), tuple(params) - - def as_nested_sql(self): - """ - Perform the same functionality as the as_sql() method, returning an - SQL string and parameters. However, the alias prefixes are bumped - beforehand (in a copy -- the current query isn't changed) and any - ordering is removed. - - Used when nesting this query inside another. - """ - obj = self.clone() - obj.clear_ordering(True) - obj.bump_prefix() - return obj.as_sql() + compiler = q.get_compiler(using=using) + return bool(compiler.execute_sql(SINGLE)) def combine(self, rhs, connector): """ @@ -580,20 +442,6 @@ class Query(object): self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by - def pre_sql_setup(self): - """ - Does any necessary class setup immediately prior to producing SQL. This - is for things that can't necessarily be done in __init__ because we - might not have all the pieces in place at that time. - """ - if not self.tables: - self.join((None, self.model._meta.db_table, None, None)) - if (not self.select and self.default_cols and not - self.included_inherited_models): - self.setup_inherited_models() - if self.select_related and not self.related_select_cols: - self.fill_related_selections() - def deferred_to_data(self, target, callback): """ Converts the self.deferred_loading data structure to an alternate data @@ -672,15 +520,6 @@ class Query(object): for model, values in seen.iteritems(): callback(target, model, values) - def deferred_to_columns(self): - """ - Converts the self.deferred_loading data structure to mapping of table - names to sets of column names which are to be loaded. Returns the - dictionary. - """ - columns = {} - self.deferred_to_data(columns, self.deferred_to_columns_cb) - return columns def deferred_to_columns_cb(self, target, model, fields): """ @@ -693,352 +532,6 @@ class Query(object): for field in fields: target[table].add(field.column) - def get_columns(self, with_aliases=False): - """ - Returns the list of columns to use in the select statement. If no - columns have been specified, returns all columns relating to fields in - the model. - - If 'with_aliases' is true, any column names that are duplicated - (without the table names) are given unique aliases. This is needed in - some cases to avoid ambiguity with nested queries. - """ - qn = self.quote_name_unless_alias - qn2 = self.connection.ops.quote_name - result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.extra_select.iteritems()] - aliases = set(self.extra_select.keys()) - if with_aliases: - col_aliases = aliases.copy() - else: - col_aliases = set() - if self.select: - only_load = self.deferred_to_columns() - for col in self.select: - if isinstance(col, (list, tuple)): - alias, column = col - table = self.alias_map[alias][TABLE_NAME] - if table in only_load and col not in only_load[table]: - continue - r = '%s.%s' % (qn(alias), qn(column)) - if with_aliases: - if col[1] in col_aliases: - c_alias = 'Col%d' % len(col_aliases) - result.append('%s AS %s' % (r, c_alias)) - aliases.add(c_alias) - col_aliases.add(c_alias) - else: - result.append('%s AS %s' % (r, qn2(col[1]))) - aliases.add(r) - col_aliases.add(col[1]) - else: - result.append(r) - aliases.add(r) - col_aliases.add(col[1]) - else: - result.append(col.as_sql(qn, self.connection)) - - if hasattr(col, 'alias'): - aliases.add(col.alias) - col_aliases.add(col.alias) - - elif self.default_cols: - cols, new_aliases = self.get_default_columns(with_aliases, - col_aliases) - result.extend(cols) - aliases.update(new_aliases) - - result.extend([ - '%s%s' % ( - aggregate.as_sql(qn, self.connection), - alias is not None and ' AS %s' % qn(alias) or '' - ) - for alias, aggregate in self.aggregate_select.items() - ]) - - for table, col in self.related_select_cols: - r = '%s.%s' % (qn(table), qn(col)) - if with_aliases and col in col_aliases: - c_alias = 'Col%d' % len(col_aliases) - result.append('%s AS %s' % (r, c_alias)) - aliases.add(c_alias) - col_aliases.add(c_alias) - else: - result.append(r) - aliases.add(r) - col_aliases.add(col) - - self._select_aliases = aliases - return result - - def get_default_columns(self, with_aliases=False, col_aliases=None, - start_alias=None, opts=None, as_pairs=False): - """ - Computes the default columns for selecting every field in the base - model. Will sometimes be called to pull in related models (e.g. via - select_related), in which case "opts" and "start_alias" will be given - to provide a starting point for the traversal. - - Returns a list of strings, quoted appropriately for use in SQL - directly, as well as a set of aliases used in the select statement (if - 'as_pairs' is True, returns a list of (alias, col_name) pairs instead - of strings as the first component and None as the second component). - """ - result = [] - if opts is None: - opts = self.model._meta - qn = self.quote_name_unless_alias - qn2 = self.connection.ops.quote_name - aliases = set() - only_load = self.deferred_to_columns() - # Skip all proxy to the root proxied model - proxied_model = get_proxied_model(opts) - - if start_alias: - seen = {None: start_alias} - for field, model in opts.get_fields_with_model(): - if start_alias: - try: - alias = seen[model] - except KeyError: - if model is proxied_model: - alias = start_alias - else: - link_field = opts.get_ancestor_link(model) - alias = self.join((start_alias, model._meta.db_table, - link_field.column, model._meta.pk.column)) - seen[model] = alias - else: - # If we're starting from the base model of the queryset, the - # aliases will have already been set up in pre_sql_setup(), so - # we can save time here. - alias = self.included_inherited_models[model] - table = self.alias_map[alias][TABLE_NAME] - if table in only_load and field.column not in only_load[table]: - continue - if as_pairs: - result.append((alias, field.column)) - aliases.add(alias) - continue - if with_aliases and field.column in col_aliases: - c_alias = 'Col%d' % len(col_aliases) - result.append('%s.%s AS %s' % (qn(alias), - qn2(field.column), c_alias)) - col_aliases.add(c_alias) - aliases.add(c_alias) - else: - r = '%s.%s' % (qn(alias), qn2(field.column)) - result.append(r) - aliases.add(r) - if with_aliases: - col_aliases.add(field.column) - return result, aliases - - def get_from_clause(self): - """ - Returns a list of strings that are joined together to go after the - "FROM" part of the query, as well as a list any extra parameters that - need to be included. Sub-classes, can override this to create a - from-clause via a "select". - - This should only be called after any SQL construction methods that - might change the tables we need. This means the select columns and - ordering must be done first. - """ - result = [] - qn = self.quote_name_unless_alias - qn2 = self.connection.ops.quote_name - first = True - for alias in self.tables: - if not self.alias_refcount[alias]: - continue - try: - name, alias, join_type, lhs, lhs_col, col, nullable = self.alias_map[alias] - except KeyError: - # Extra tables can end up in self.tables, but not in the - # alias_map if they aren't in a join. That's OK. We skip them. - continue - alias_str = (alias != name and ' %s' % alias or '') - if join_type and not first: - result.append('%s %s%s ON (%s.%s = %s.%s)' - % (join_type, qn(name), alias_str, qn(lhs), - qn2(lhs_col), qn(alias), qn2(col))) - else: - connector = not first and ', ' or '' - result.append('%s%s%s' % (connector, qn(name), alias_str)) - first = False - for t in self.extra_tables: - alias, unused = self.table_alias(t) - # Only add the alias if it's not already present (the table_alias() - # calls increments the refcount, so an alias refcount of one means - # this is the only reference. - if alias not in self.alias_map or self.alias_refcount[alias] == 1: - connector = not first and ', ' or '' - result.append('%s%s' % (connector, qn(alias))) - first = False - return result, [] - - def get_grouping(self): - """ - Returns a tuple representing the SQL elements in the "group by" clause. - """ - qn = self.quote_name_unless_alias - result, params = [], [] - if self.group_by is not None: - if len(self.model._meta.fields) == len(self.group_by) and \ - self.connection.features.allows_group_by_pk: - self.group_by = [(self.model._meta.db_table, self.model._meta.pk.column)] - group_by = self.group_by or [] - - extra_selects = [] - for extra_select, extra_params in self.extra_select.itervalues(): - extra_selects.append(extra_select) - params.extend(extra_params) - for col in group_by + self.related_select_cols + extra_selects: - if isinstance(col, (list, tuple)): - result.append('%s.%s' % (qn(col[0]), qn(col[1]))) - elif hasattr(col, 'as_sql'): - result.append(col.as_sql(qn)) - else: - result.append(str(col)) - return result, params - - def get_ordering(self): - """ - Returns a tuple containing a list representing the SQL elements in the - "order by" clause, and the list of SQL elements that need to be added - to the GROUP BY clause as a result of the ordering. - - Also sets the ordering_aliases attribute on this instance to a list of - extra aliases needed in the select. - - Determining the ordering SQL can change the tables we need to include, - so this should be run *before* get_from_clause(). - """ - if self.extra_order_by: - ordering = self.extra_order_by - elif not self.default_ordering: - ordering = self.order_by - else: - ordering = self.order_by or self.model._meta.ordering - qn = self.quote_name_unless_alias - qn2 = self.connection.ops.quote_name - distinct = self.distinct - select_aliases = self._select_aliases - result = [] - group_by = [] - ordering_aliases = [] - if self.standard_ordering: - asc, desc = ORDER_DIR['ASC'] - else: - asc, desc = ORDER_DIR['DESC'] - - # It's possible, due to model inheritance, that normal usage might try - # to include the same field more than once in the ordering. We track - # the table/column pairs we use and discard any after the first use. - processed_pairs = set() - - for field in ordering: - if field == '?': - result.append(self.connection.ops.random_function_sql()) - continue - if isinstance(field, int): - if field < 0: - order = desc - field = -field - else: - order = asc - result.append('%s %s' % (field, order)) - group_by.append((field, [])) - continue - col, order = get_order_dir(field, asc) - if col in self.aggregate_select: - result.append('%s %s' % (col, order)) - continue - if '.' in field: - # This came in through an extra(order_by=...) addition. Pass it - # on verbatim. - table, col = col.split('.', 1) - if (table, col) not in processed_pairs: - elt = '%s.%s' % (qn(table), col) - processed_pairs.add((table, col)) - if not distinct or elt in select_aliases: - result.append('%s %s' % (elt, order)) - group_by.append((elt, [])) - elif get_order_dir(field)[0] not in self.extra_select: - # 'col' is of the form 'field' or 'field1__field2' or - # '-field1__field2__field', etc. - for table, col, order in self.find_ordering_name(field, - self.model._meta, default_order=asc): - if (table, col) not in processed_pairs: - elt = '%s.%s' % (qn(table), qn2(col)) - processed_pairs.add((table, col)) - if distinct and elt not in select_aliases: - ordering_aliases.append(elt) - result.append('%s %s' % (elt, order)) - group_by.append((elt, [])) - else: - elt = qn2(col) - if distinct and col not in select_aliases: - ordering_aliases.append(elt) - result.append('%s %s' % (elt, order)) - group_by.append(self.extra_select[col]) - self.ordering_aliases = ordering_aliases - return result, group_by - - def find_ordering_name(self, name, opts, alias=None, default_order='ASC', - already_seen=None): - """ - Returns the table alias (the name might be ambiguous, the alias will - not be) and column name for ordering by the given 'name' parameter. - The 'name' is of the form 'field1__field2__...__fieldN'. - """ - name, order = get_order_dir(name, default_order) - pieces = name.split(LOOKUP_SEP) - if not alias: - alias = self.get_initial_alias() - field, target, opts, joins, last, extra = self.setup_joins(pieces, - opts, alias, False) - alias = joins[-1] - col = target.column - if not field.rel: - # To avoid inadvertent trimming of a necessary alias, use the - # refcount to show that we are referencing a non-relation field on - # the model. - self.ref_alias(alias) - - # Must use left outer joins for nullable fields and their relations. - self.promote_alias_chain(joins, - self.alias_map[joins[0]][JOIN_TYPE] == self.LOUTER) - - # If we get to this point and the field is a relation to another model, - # append the default ordering for that model. - if field.rel and len(joins) > 1 and opts.ordering: - # Firstly, avoid infinite loops. - if not already_seen: - already_seen = set() - join_tuple = tuple([self.alias_map[j][TABLE_NAME] for j in joins]) - if join_tuple in already_seen: - raise FieldError('Infinite loop caused by ordering.') - already_seen.add(join_tuple) - - results = [] - for item in opts.ordering: - results.extend(self.find_ordering_name(item, opts, alias, - order, already_seen)) - return results - - if alias: - # We have to do the same "final join" optimisation as in - # add_filter, since the final column might not otherwise be part of - # the select set (so we can't order on it). - while 1: - join = self.alias_map[alias] - if col != join[RHS_JOIN_COL]: - break - self.unref_alias(alias) - alias = join[LHS_ALIAS] - col = join[LHS_JOIN_COL] - return [(alias, col, order)] def table_alias(self, table_name, create=False): """ @@ -1342,113 +835,6 @@ class Query(object): self.unref_alias(alias) self.included_inherited_models = {} - def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1, - used=None, requested=None, restricted=None, nullable=None, - dupe_set=None, avoid_set=None): - """ - Fill in the information needed for a select_related query. The current - depth is measured as the number of connections away from the root model - (for example, cur_depth=1 means we are looking at models with direct - connections to the root model). - """ - if not restricted and self.max_depth and cur_depth > self.max_depth: - # We've recursed far enough; bail out. - return - - if not opts: - opts = self.get_meta() - root_alias = self.get_initial_alias() - self.related_select_cols = [] - self.related_select_fields = [] - if not used: - used = set() - if dupe_set is None: - dupe_set = set() - if avoid_set is None: - avoid_set = set() - orig_dupe_set = dupe_set - - # Setup for the case when only particular related fields should be - # included in the related selection. - if requested is None and restricted is not False: - if isinstance(self.select_related, dict): - requested = self.select_related - restricted = True - else: - restricted = False - - for f, model in opts.get_fields_with_model(): - if not select_related_descend(f, restricted, requested): - continue - # The "avoid" set is aliases we want to avoid just for this - # particular branch of the recursion. They aren't permanently - # forbidden from reuse in the related selection tables (which is - # what "used" specifies). - avoid = avoid_set.copy() - dupe_set = orig_dupe_set.copy() - table = f.rel.to._meta.db_table - if nullable or f.null: - promote = True - else: - promote = False - if model: - int_opts = opts - alias = root_alias - alias_chain = [] - for int_model in opts.get_base_chain(model): - # Proxy model have elements in base chain - # with no parents, assign the new options - # object and skip to the next base in that - # case - if not int_opts.parents[int_model]: - int_opts = int_model._meta - continue - lhs_col = int_opts.parents[int_model].column - dedupe = lhs_col in opts.duplicate_targets - if dedupe: - avoid.update(self.dupe_avoidance.get(id(opts), lhs_col), - ()) - dupe_set.add((opts, lhs_col)) - int_opts = int_model._meta - alias = self.join((alias, int_opts.db_table, lhs_col, - int_opts.pk.column), exclusions=used, - promote=promote) - alias_chain.append(alias) - for (dupe_opts, dupe_col) in dupe_set: - self.update_dupe_avoidance(dupe_opts, dupe_col, alias) - if self.alias_map[root_alias][JOIN_TYPE] == self.LOUTER: - self.promote_alias_chain(alias_chain, True) - else: - alias = root_alias - - dedupe = f.column in opts.duplicate_targets - if dupe_set or dedupe: - avoid.update(self.dupe_avoidance.get((id(opts), f.column), ())) - if dedupe: - dupe_set.add((opts, f.column)) - - alias = self.join((alias, table, f.column, - f.rel.get_related_field().column), - exclusions=used.union(avoid), promote=promote) - used.add(alias) - columns, aliases = self.get_default_columns(start_alias=alias, - opts=f.rel.to._meta, as_pairs=True) - self.related_select_cols.extend(columns) - if self.alias_map[alias][JOIN_TYPE] == self.LOUTER: - self.promote_alias_chain(aliases, True) - self.related_select_fields.extend(f.rel.to._meta.fields) - if restricted: - next = requested.get(f.name, {}) - else: - next = False - if f.null is not None: - new_nullable = f.null - else: - new_nullable = None - for dupe_opts, dupe_col in dupe_set: - self.update_dupe_avoidance(dupe_opts, dupe_col, alias) - self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1, - used, next, restricted, new_nullable, dupe_set, avoid) def add_aggregate(self, aggregate, model, alias, is_summary): """ @@ -1497,7 +883,6 @@ class Query(object): col = field_name # Add the aggregate to the query - alias = truncate_name(alias, self.connection.ops.max_name_length()) aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary) def add_filter(self, filter_expr, connector=AND, negate=False, trim=False, @@ -1548,10 +933,6 @@ class Query(object): raise ValueError("Cannot use None as a query value") lookup_type = 'isnull' value = True - elif (value == '' and lookup_type == 'exact' and - self.connection.features.interprets_empty_strings_as_nulls): - lookup_type = 'isnull' - value = True elif callable(value): value = value() elif hasattr(value, 'evaluate'): @@ -1969,7 +1350,7 @@ class Query(object): original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. """ - query = self.connection.ops.query_class(Query)(self.model, self.connection) + query = Query(self.model) query.add_filter(filter_expr, can_reuse=can_reuse) query.bump_prefix() query.clear_ordering(True) @@ -2347,54 +1728,6 @@ class Query(object): self.select = [(select_alias, select_col)] self.remove_inherited_models() - def set_connection(self, connection): - self.connection = connection - - def execute_sql(self, result_type=MULTI): - """ - Run the query against the database and returns the result(s). The - return value is a single data item if result_type is SINGLE, or an - iterator over the results if the result_type is MULTI. - - result_type is either MULTI (use fetchmany() to retrieve all rows), - SINGLE (only retrieve a single row), or None. In this last case, the - cursor is returned if any query is executed, since it's used by - subclasses such as InsertQuery). It's possible, however, that no query - is needed, as the filters describe an empty set. In that case, None is - returned, to avoid any unnecessary database interaction. - """ - try: - sql, params = self.as_sql() - if not sql: - raise EmptyResultSet - except EmptyResultSet: - if result_type == MULTI: - return empty_iter() - else: - return - cursor = self.connection.cursor() - cursor.execute(sql, params) - - if not result_type: - return cursor - if result_type == SINGLE: - if self.ordering_aliases: - return cursor.fetchone()[:-len(self.ordering_aliases)] - return cursor.fetchone() - - # The MULTI case. - if self.ordering_aliases: - result = order_modified_iter(cursor, len(self.ordering_aliases), - self.connection.features.empty_fetchmany_value) - else: - result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), - self.connection.features.empty_fetchmany_value) - if not self.connection.features.can_use_chunked_reads: - # If we are using non-chunked reads, we return the same data - # structure as normally, but ensure it is all read into memory - # before going any further. - return list(result) - return result def get_order_dir(field, default='ASC'): """ @@ -2409,22 +1742,6 @@ def get_order_dir(field, default='ASC'): return field[1:], dirn[1] return field, dirn[0] -def empty_iter(): - """ - Returns an iterator containing no results. - """ - yield iter([]).next() - -def order_modified_iter(cursor, trim, sentinel): - """ - Yields blocks of rows from a cursor. We use this iterator in the special - case when extra output columns have been added to support ordering - requirements. We must trim those extra columns before anything else can use - the results, since they're only needed to make the SQL valid. - """ - for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), - sentinel): - yield [r[:-trim] for r in rows] def setup_join_cache(sender, **kwargs): """ diff --git a/django/db/models/sql/subqueries.py b/django/db/models/sql/subqueries.py index 02dec73bd4..be4689820f 100644 --- a/django/db/models/sql/subqueries.py +++ b/django/db/models/sql/subqueries.py @@ -3,6 +3,7 @@ Query subclasses which provide extra functionality beyond simple data retrieval. """ from django.core.exceptions import FieldError +from django.db import connections from django.db.models.sql.constants import * from django.db.models.sql.datastructures import Date from django.db.models.sql.expressions import SQLEvaluator @@ -17,28 +18,15 @@ class DeleteQuery(Query): Delete queries are done through this class, since they are more constrained than general queries. """ - def get_query_class(self): - return DeleteQuery - def as_sql(self): - """ - Creates the SQL for this query. Returns the SQL string and list of - parameters. - """ - assert len(self.tables) == 1, \ - "Can only delete from one table at a time." - qn = self.quote_name_unless_alias - result = ['DELETE FROM %s' % qn(self.tables[0])] - where, params = self.where.as_sql(qn=qn, connection=self.connection) - result.append('WHERE %s' % where) - return ' '.join(result), tuple(params) + compiler = 'SQLDeleteCompiler' - def do_query(self, table, where): + def do_query(self, table, where, using): self.tables = [table] self.where = where - self.execute_sql(None) + self.get_compiler(using).execute_sql(None) - def delete_batch_related(self, pk_list): + def delete_batch_related(self, pk_list, using): """ Set up and execute delete queries for all the objects related to the primary key values in pk_list. To delete the objects themselves, use @@ -58,7 +46,7 @@ class DeleteQuery(Query): 'in', pk_list[offset : offset+GET_ITERATOR_CHUNK_SIZE]), AND) - self.do_query(related.field.m2m_db_table(), where) + self.do_query(related.field.m2m_db_table(), where, using=using) for f in cls._meta.many_to_many: w1 = self.where_class() @@ -74,9 +62,9 @@ class DeleteQuery(Query): AND) if w1: where.add(w1, AND) - self.do_query(f.m2m_db_table(), where) + self.do_query(f.m2m_db_table(), where, using=using) - def delete_batch(self, pk_list): + def delete_batch(self, pk_list, using): """ Set up and execute delete queries for all the objects in pk_list. This should be called after delete_batch_related(), if necessary. @@ -89,19 +77,19 @@ class DeleteQuery(Query): field = self.model._meta.pk where.add((Constraint(None, field.column, field), 'in', pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]), AND) - self.do_query(self.model._meta.db_table, where) + self.do_query(self.model._meta.db_table, where, using=using) class UpdateQuery(Query): """ Represents an "update" SQL query. """ + + compiler = 'SQLUpdateCompiler' + def __init__(self, *args, **kwargs): super(UpdateQuery, self).__init__(*args, **kwargs) self._setup_query() - def get_query_class(self): - return UpdateQuery - def _setup_query(self): """ Runs on initialization and after cloning. Any attributes that would @@ -117,98 +105,8 @@ class UpdateQuery(Query): return super(UpdateQuery, self).clone(klass, related_updates=self.related_updates.copy(), **kwargs) - def execute_sql(self, result_type=None): - """ - Execute the specified update. Returns the number of rows affected by - the primary update query. The "primary update query" is the first - non-empty query that is executed. Row counts for any subsequent, - related queries are not available. - """ - cursor = super(UpdateQuery, self).execute_sql(result_type) - rows = cursor and cursor.rowcount or 0 - is_empty = cursor is None - del cursor - for query in self.get_related_updates(): - aux_rows = query.execute_sql(result_type) - if is_empty: - rows = aux_rows - is_empty = False - return rows - def as_sql(self): - """ - Creates the SQL for this query. Returns the SQL string and list of - parameters. - """ - self.pre_sql_setup() - if not self.values: - return '', () - table = self.tables[0] - qn = self.quote_name_unless_alias - result = ['UPDATE %s' % qn(table)] - result.append('SET') - values, update_params = [], [] - for name, val, placeholder in self.values: - if hasattr(val, 'as_sql'): - sql, params = val.as_sql(qn, self.connection) - values.append('%s = %s' % (qn(name), sql)) - update_params.extend(params) - elif val is not None: - values.append('%s = %s' % (qn(name), placeholder)) - update_params.append(val) - else: - values.append('%s = NULL' % qn(name)) - result.append(', '.join(values)) - where, params = self.where.as_sql(qn=qn, connection=self.connection) - if where: - result.append('WHERE %s' % where) - return ' '.join(result), tuple(update_params + params) - - def pre_sql_setup(self): - """ - If the update depends on results from other tables, we need to do some - munging of the "where" conditions to match the format required for - (portable) SQL updates. That is done here. - - Further, if we are going to be running multiple updates, we pull out - the id values to update at this point so that they don't change as a - result of the progressive updates. - """ - self.select_related = False - self.clear_ordering(True) - super(UpdateQuery, self).pre_sql_setup() - count = self.count_active_tables() - if not self.related_updates and count == 1: - return - - # We need to use a sub-select in the where clause to filter on things - # from other tables. - query = self.clone(klass=Query) - query.bump_prefix() - query.extra = {} - query.select = [] - query.add_fields([query.model._meta.pk.name]) - must_pre_select = count > 1 and not self.connection.features.update_can_self_select - - # Now we adjust the current query: reset the where clause and get rid - # of all the tables we don't need (since they're in the sub-select). - self.where = self.where_class() - if self.related_updates or must_pre_select: - # Either we're using the idents in multiple update queries (so - # don't want them to change), or the db backend doesn't support - # selecting from the updating table (e.g. MySQL). - idents = [] - for rows in query.execute_sql(MULTI): - idents.extend([r[0] for r in rows]) - self.add_filter(('pk__in', idents)) - self.related_ids = idents - else: - # The fast path. Filters and updates in one query. - self.add_filter(('pk__in', query)) - for alias in self.tables[1:]: - self.alias_refcount[alias] = 0 - - def clear_related(self, related_field, pk_list): + def clear_related(self, related_field, pk_list, using): """ Set up and execute an update query that clears related entries for the keys in pk_list. @@ -221,8 +119,8 @@ class UpdateQuery(Query): self.where.add((Constraint(None, f.column, f), 'in', pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]), AND) - self.values = [(related_field.column, None, '%s')] - self.execute_sql(None) + self.values = [(related_field, None, None)] + self.get_compiler(using).execute_sql(None) def add_update_values(self, values): """ @@ -235,6 +133,9 @@ class UpdateQuery(Query): field, model, direct, m2m = self.model._meta.get_field_by_name(name) if not direct or m2m: raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field) + if model: + self.add_related_update(model, field, val) + continue values_seq.append((field, model, val)) return self.add_update_fields(values_seq) @@ -244,36 +145,18 @@ class UpdateQuery(Query): Used by add_update_values() as well as the "fast" update path when saving models. """ - from django.db.models.base import Model - for field, model, val in values_seq: - if hasattr(val, 'prepare_database_save'): - val = val.prepare_database_save(field) - else: - val = field.get_db_prep_save(val, connection=self.connection) + self.values.extend(values_seq) - # Getting the placeholder for the field. - if hasattr(field, 'get_placeholder'): - placeholder = field.get_placeholder(val) - else: - placeholder = '%s' - - if hasattr(val, 'evaluate'): - val = SQLEvaluator(val, self, allow_joins=False) - if model: - self.add_related_update(model, field.column, val, placeholder) - else: - self.values.append((field.column, val, placeholder)) - - def add_related_update(self, model, column, value, placeholder): + def add_related_update(self, model, field, value): """ Adds (name, value) to an update query for an ancestor model. Updates are coalesced so that we only run one update query per ancestor. """ try: - self.related_updates[model].append((column, value, placeholder)) + self.related_updates[model].append((field, None, value)) except KeyError: - self.related_updates[model] = [(column, value, placeholder)] + self.related_updates[model] = [(field, None, value)] def get_related_updates(self): """ @@ -285,7 +168,7 @@ class UpdateQuery(Query): return [] result = [] for model, values in self.related_updates.iteritems(): - query = self.connection.ops.query_class(Query, UpdateQuery)(model, self.connection) + query = UpdateQuery(model) query.values = values if self.related_ids: query.add_filter(('pk__in', self.related_ids)) @@ -293,6 +176,8 @@ class UpdateQuery(Query): return result class InsertQuery(Query): + compiler = 'SQLInsertCompiler' + def __init__(self, *args, **kwargs): super(InsertQuery, self).__init__(*args, **kwargs) self.columns = [] @@ -300,41 +185,12 @@ class InsertQuery(Query): self.params = () self.return_id = False - def get_query_class(self): - return InsertQuery - def clone(self, klass=None, **kwargs): extras = {'columns': self.columns[:], 'values': self.values[:], 'params': self.params, 'return_id': self.return_id} extras.update(kwargs) return super(InsertQuery, self).clone(klass, **extras) - def as_sql(self): - # We don't need quote_name_unless_alias() here, since these are all - # going to be column names (so we can avoid the extra overhead). - qn = self.connection.ops.quote_name - opts = self.model._meta - result = ['INSERT INTO %s' % qn(opts.db_table)] - result.append('(%s)' % ', '.join([qn(c) for c in self.columns])) - result.append('VALUES (%s)' % ', '.join(self.values)) - params = self.params - if self.return_id and self.connection.features.can_return_id_from_insert: - col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) - r_fmt, r_params = self.connection.ops.return_insert_id() - result.append(r_fmt % col) - params = params + r_params - return ' '.join(result), params - - def execute_sql(self, return_id=False): - self.return_id = return_id - cursor = super(InsertQuery, self).execute_sql(None) - if not (return_id and cursor): - return - if self.connection.features.can_return_id_from_insert: - return self.connection.ops.fetch_returned_insert_id(cursor) - return self.connection.ops.last_insert_id(cursor, - self.model._meta.db_table, self.model._meta.pk.column) - def insert_values(self, insert_values, raw_values=False): """ Set up the insert query from the 'insert_values' dictionary. The @@ -368,47 +224,8 @@ class DateQuery(Query): date field. This requires some special handling when converting the results back to Python objects, so we put it in a separate class. """ - def __getstate__(self): - """ - Special DateQuery-specific pickle handling. - """ - for elt in self.select: - if isinstance(elt, Date): - # Eliminate a method reference that can't be pickled. The - # __setstate__ method restores this. - elt.date_sql_func = None - return super(DateQuery, self).__getstate__() - def __setstate__(self, obj_dict): - super(DateQuery, self).__setstate__(obj_dict) - for elt in self.select: - if isinstance(elt, Date): - self.date_sql_func = self.connection.ops.date_trunc_sql - - def get_query_class(self): - return DateQuery - - def results_iter(self): - """ - Returns an iterator over the results from executing this query. - """ - resolve_columns = hasattr(self, 'resolve_columns') - if resolve_columns: - from django.db.models.fields import DateTimeField - fields = [DateTimeField()] - else: - from django.db.backends.util import typecast_timestamp - needs_string_cast = self.connection.features.needs_datetime_string_cast - - offset = len(self.extra_select) - for rows in self.execute_sql(MULTI): - for row in rows: - date = row[offset] - if resolve_columns: - date = self.resolve_columns(row, fields)[offset] - elif needs_string_cast: - date = typecast_timestamp(str(date)) - yield date + compiler = 'SQLDateCompiler' def add_date_select(self, field, lookup_type, order='ASC'): """ @@ -430,25 +247,8 @@ class AggregateQuery(Query): An AggregateQuery takes another query as a parameter to the FROM clause and only selects the elements in the provided list. """ - def get_query_class(self): - return AggregateQuery - def add_subquery(self, query): - self.subquery, self.sub_params = query.as_sql(with_col_aliases=True) + compiler = 'SQLAggregateCompiler' - def as_sql(self, qn=None): - """ - Creates the SQL for this query. Returns the SQL string and list of - parameters. - """ - if qn is None: - qn = self.quote_name_unless_alias - sql = ('SELECT %s FROM (%s) subquery' % ( - ', '.join([ - aggregate.as_sql(qn, self.connection) - for aggregate in self.aggregate_select.values() - ]), - self.subquery) - ) - params = self.sub_params - return (sql, params) + def add_subquery(self, query, using): + self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True) diff --git a/django/db/models/sql/where.py b/django/db/models/sql/where.py index 825b0ff7e6..6db819d915 100644 --- a/django/db/models/sql/where.py +++ b/django/db/models/sql/where.py @@ -162,6 +162,11 @@ class WhereNode(tree.Node): else: extra = '' + if (len(params) == 1 and params[0] == '' and lookup_type == 'exact' + and connection.features.interprets_empty_strings_as_nulls): + lookup_type = 'isnull' + value_annot = True + if lookup_type in connection.operators: format = "%s %%s %%s" % (connection.ops.lookup_cast(lookup_type),) return (format % (field_sql, diff --git a/django/db/utils.py b/django/db/utils.py index 2917dbee96..c9effde28d 100644 --- a/django/db/utils.py +++ b/django/db/utils.py @@ -10,6 +10,7 @@ def load_backend(backend_name): # backends that ships with Django, so look there first. return import_module('.base', 'django.db.backends.%s' % backend_name) except ImportError, e: + raise # If the import failed, we might be looking for a database backend # distributed external to Django. So we'll try that next. try: @@ -17,7 +18,7 @@ def load_backend(backend_name): except ImportError, e_user: # The database backend wasn't found. Display a helpful error message # listing all possible (built-in) database backends. - backend_dir = os.path.join(__path__[0], 'backends') + backend_dir = os.path.join(os.path.dirname(__file__), 'backends') try: available_backends = [f for f in os.listdir(backend_dir) if os.path.isdir(os.path.join(backend_dir, f)) diff --git a/django/forms/models.py b/django/forms/models.py index 98b63b39d5..31f598596e 100644 --- a/django/forms/models.py +++ b/django/forms/models.py @@ -3,6 +3,7 @@ Helper functions for creating Form classes from Django models and database field objects. """ +from django.db import connections from django.utils.encoding import smart_unicode, force_unicode from django.utils.datastructures import SortedDict from django.utils.text import get_text_list, capfirst @@ -471,8 +472,7 @@ class BaseModelFormSet(BaseFormSet): pk = self.data[pk_key] pk_field = self.model._meta.pk pk = pk_field.get_db_prep_lookup('exact', pk, - connection=self.get_queryset().query.connection) - pk = pk_field.get_db_prep_lookup('exact', pk) + connection=connections[self.get_queryset()._using]) if isinstance(pk, list): pk = pk[0] kwargs['instance'] = self._existing_object(pk) diff --git a/docs/ref/databases.txt b/docs/ref/databases.txt index ec47fbb1a9..c4052a50b5 100644 --- a/docs/ref/databases.txt +++ b/docs/ref/databases.txt @@ -256,6 +256,7 @@ Here's a sample configuration which uses a MySQL option file:: } } + # my.cnf [client] database = DATABASE_NAME diff --git a/docs/ref/django-admin.txt b/docs/ref/django-admin.txt index 7901a16857..71f54b4276 100644 --- a/docs/ref/django-admin.txt +++ b/docs/ref/django-admin.txt @@ -765,6 +765,14 @@ with an appropriate extension (e.g. ``json`` or ``xml``). See the documentation for ``loaddata`` for details on the specification of fixture data files. +--database +~~~~~~~~~~ + +The alias for the database install the tables for. By default uses the +``'default'`` alias. + +--noinput +~~~~~~~~~ The :djadminopt:`--noinput` option may be provided to suppress all user prompts. diff --git a/docs/ref/settings.txt b/docs/ref/settings.txt index df09f2183d..c91198d75b 100644 --- a/docs/ref/settings.txt +++ b/docs/ref/settings.txt @@ -144,8 +144,6 @@ Default: ``600`` The default number of seconds to cache a page when the caching middleware or ``cache_page()`` decorator is used. -.. setting:: DATABASES - .. setting:: CSRF_COOKIE_NAME CSRF_COOKIE_NAME @@ -192,6 +190,9 @@ end users) indicating the reason the request was rejected. See :ref:`ref-contrib-csrf`. + +.. setting:: DATABASES + DATABASES --------- diff --git a/tests/modeltests/delete/models.py b/tests/modeltests/delete/models.py index 58b296d8d2..943a327403 100644 --- a/tests/modeltests/delete/models.py +++ b/tests/modeltests/delete/models.py @@ -171,9 +171,9 @@ True # temporarily replace the UpdateQuery class to verify that E.f is actually nulled out first >>> import django.db.models.sql >>> class LoggingUpdateQuery(django.db.models.sql.UpdateQuery): -... def clear_related(self, related_field, pk_list): +... def clear_related(self, related_field, pk_list, using): ... print "CLEARING FIELD",related_field.name -... return super(LoggingUpdateQuery, self).clear_related(related_field, pk_list) +... return super(LoggingUpdateQuery, self).clear_related(related_field, pk_list, using) >>> original_class = django.db.models.sql.UpdateQuery >>> django.db.models.sql.UpdateQuery = LoggingUpdateQuery >>> e1.delete() diff --git a/tests/modeltests/many_to_one/models.py b/tests/modeltests/many_to_one/models.py index 8093e73013..85c237aadd 100644 --- a/tests/modeltests/many_to_one/models.py +++ b/tests/modeltests/many_to_one/models.py @@ -157,7 +157,7 @@ False # The underlying query only makes one join when a related table is referenced twice. >>> queryset = Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith') ->>> sql = queryset.query.as_sql()[0] +>>> sql = queryset.query.get_compiler(queryset._using).as_sql()[0] >>> sql.count('INNER JOIN') 1 diff --git a/tests/modeltests/proxy_models/models.py b/tests/modeltests/proxy_models/models.py index e38266fb70..44fee9ee6c 100644 --- a/tests/modeltests/proxy_models/models.py +++ b/tests/modeltests/proxy_models/models.py @@ -166,12 +166,13 @@ class ProxyImprovement(Improvement): __test__ = {'API_TESTS' : """ # The MyPerson model should be generating the same database queries as the # Person model (when the same manager is used in each case). ->>> MyPerson.other.all().query.as_sql() == Person.objects.order_by("name").query.as_sql() +>>> from django.db import DEFAULT_DB_ALIAS +>>> MyPerson.other.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql() == Person.objects.order_by("name").query.get_compiler(DEFAULT_DB_ALIAS).as_sql() True # The StatusPerson models should have its own table (it's using ORM-level # inheritance). ->>> StatusPerson.objects.all().query.as_sql() == Person.objects.all().query.as_sql() +>>> StatusPerson.objects.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql() == Person.objects.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql() False # Creating a Person makes them accessible through the MyPerson proxy. diff --git a/tests/regressiontests/aggregation_regress/models.py b/tests/regressiontests/aggregation_regress/models.py index d561106546..157de4c538 100644 --- a/tests/regressiontests/aggregation_regress/models.py +++ b/tests/regressiontests/aggregation_regress/models.py @@ -250,10 +250,10 @@ FieldError: Cannot resolve keyword 'foo' into field. Choices are: authors, conta >>> out = pickle.dumps(qs) # Then check that the round trip works. ->>> query = qs.query.as_sql()[0] +>>> query = qs.query.get_compiler(qs._using).as_sql()[0] >>> select_fields = qs.query.select_fields >>> query2 = pickle.loads(pickle.dumps(qs)) ->>> query2.query.as_sql()[0] == query +>>> query2.query.get_compiler(query2._using).as_sql()[0] == query True >>> query2.query.select_fields = select_fields @@ -380,5 +380,4 @@ if run_stddev_tests(): >>> Book.objects.aggregate(Variance('price', sample=True)) {'price__variance': 700.53...} - """ diff --git a/tests/regressiontests/backends/tests.py b/tests/regressiontests/backends/tests.py index 9f4ea3be77..c6ed025a90 100644 --- a/tests/regressiontests/backends/tests.py +++ b/tests/regressiontests/backends/tests.py @@ -18,13 +18,13 @@ class Callproc(unittest.TestCase): return True else: return True - + class LongString(unittest.TestCase): def test_long_string(self): # If the backend is Oracle, test that we can save a text longer # than 4000 chars and read it properly - if settings.DATABASE_ENGINE == 'oracle': + if settings.DATABASES[DEFAULT_DB_ALIAS]['DATABASE_ENGINE'] == 'oracle': c = connection.cursor() c.execute('CREATE TABLE ltext ("TEXT" NCLOB)') long_str = ''.join([unicode(x) for x in xrange(4000)]) diff --git a/tests/regressiontests/model_inheritance_regress/models.py b/tests/regressiontests/model_inheritance_regress/models.py index 6a804a97c1..79494077d9 100644 --- a/tests/regressiontests/model_inheritance_regress/models.py +++ b/tests/regressiontests/model_inheritance_regress/models.py @@ -314,7 +314,8 @@ DoesNotExist: ArticleWithAuthor matching query does not exist. # likely to ocurr naturally with model inheritance, so we check it here). # Regression test for #9390. This necessarily pokes at the SQL string for the # query, since the duplicate problems are only apparent at that late stage. ->>> sql = ArticleWithAuthor.objects.order_by('pub_date', 'pk').query.as_sql()[0] +>>> qs = ArticleWithAuthor.objects.order_by('pub_date', 'pk') +>>> sql = qs.query.get_compiler(qs._using).as_sql()[0] >>> fragment = sql[sql.find('ORDER BY'):] >>> pos = fragment.find('pub_date') >>> fragment.find('pub_date', pos + 1) == -1 diff --git a/tests/regressiontests/multiple_database/tests.py b/tests/regressiontests/multiple_database/tests.py index e7876e67c9..bc1be2cf94 100644 --- a/tests/regressiontests/multiple_database/tests.py +++ b/tests/regressiontests/multiple_database/tests.py @@ -71,8 +71,6 @@ class PickleQuerySetTestCase(TestCase): def test_pickling(self): for db in connections: qs = Book.objects.all() - self.assertEqual(qs.query.connection, - pickle.loads(pickle.dumps(qs)).query.connection) self.assertEqual(qs._using, pickle.loads(pickle.dumps(qs))._using) diff --git a/tests/regressiontests/queries/models.py b/tests/regressiontests/queries/models.py index 4df15d5b4b..214f3a6763 100644 --- a/tests/regressiontests/queries/models.py +++ b/tests/regressiontests/queries/models.py @@ -822,8 +822,8 @@ We can do slicing beyond what is currently in the result cache, too. Bug #7045 -- extra tables used to crash SQL construction on the second use. >>> qs = Ranking.objects.extra(tables=['django_site']) ->>> s = qs.query.as_sql() ->>> s = qs.query.as_sql() # test passes if this doesn't raise an exception. +>>> s = qs.query.get_compiler(qs._using).as_sql() +>>> s = qs.query.get_compiler(qs._using).as_sql() # test passes if this doesn't raise an exception. Bug #7098 -- Make sure semi-deprecated ordering by related models syntax still works. @@ -912,9 +912,9 @@ We should also be able to pickle things that use select_related(). The only tricky thing here is to ensure that we do the related selections properly after unpickling. >>> qs = Item.objects.select_related() ->>> query = qs.query.as_sql()[0] +>>> query = qs.query.get_compiler(qs._using).as_sql()[0] >>> query2 = pickle.loads(pickle.dumps(qs.query)) ->>> query2.as_sql()[0] == query +>>> query2.get_compiler(qs._using).as_sql()[0] == query True Check pickling of deferred-loading querysets @@ -1051,7 +1051,7 @@ sufficient that this query runs without error. Calling order_by() with no parameters removes any existing ordering on the model. But it should still be possible to add new ordering after that. >>> qs = Author.objects.order_by().order_by('name') ->>> 'ORDER BY' in qs.query.as_sql()[0] +>>> 'ORDER BY' in qs.query.get_compiler(qs._using).as_sql()[0] True Incorrect SQL was being generated for certain types of exclude() queries that @@ -1085,7 +1085,8 @@ performance problems on backends like MySQL. Nested queries should not evaluate the inner query as part of constructing the SQL (so we should see a nested query here, indicated by two "SELECT" calls). ->>> Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")).query.as_sql()[0].count('SELECT') +>>> qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")) +>>> qs.query.get_compiler(qs._using).as_sql()[0].count('SELECT') 2 Bug #10181 -- Avoid raising an EmptyResultSet if an inner query is provably @@ -1235,7 +1236,7 @@ portion in MySQL to prevent unnecessary sorting. >>> query = Tag.objects.values_list('parent_id', flat=True).order_by().query >>> query.group_by = ['parent_id'] ->>> sql = query.as_sql()[0] +>>> sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0] >>> fragment = "ORDER BY " >>> pos = sql.find(fragment) >>> sql.find(fragment, pos + 1) == -1