From 4318cc103d8bab17549b32a3643793befa655d9e Mon Sep 17 00:00:00 2001 From: gongwenxin Date: Tue, 20 May 2025 16:17:10 +0800 Subject: [PATCH] llm --- .../__pycache__/llm_service.cpython-312.pyc | Bin 0 -> 12372 bytes .../__pycache__/llm_service.cpython-312.pyc | Bin 0 -> 11371 bytes .../llm_utils/llm_service.py | 227 ++++++++++++++++++ ddms_compliance_suite/test_orchestrator.py | 78 +++++- 4 files changed, 302 insertions(+), 3 deletions(-) create mode 100644 ddms_compliance_suite/__pycache__/llm_service.cpython-312.pyc create mode 100644 ddms_compliance_suite/llm_utils/__pycache__/llm_service.cpython-312.pyc create mode 100644 ddms_compliance_suite/llm_utils/llm_service.py diff --git a/ddms_compliance_suite/__pycache__/llm_service.cpython-312.pyc b/ddms_compliance_suite/__pycache__/llm_service.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fa323b5f369132a9fe5d2f40bd88bec71ffe388 GIT binary patch literal 12372 zcmcIrTXPiGorff~o6=Tlzrxv8QW`DNNH}q}qRMzzHaN~28_K{|C0cEJG}DqgjHV~u zJz$|yl>`V$pqshLfVjvYz$})85g>FyA7H=1zIwW6<|$j7w><6df6nQi?wKAgRU18+ z>F#s>_xm~iub=#+VHf`Xe%GG$cXG$qcBLW**m5T0A${pbz8>(`JU{{rZDtWWC=RqpwfT z_BHgH#sISibNJ2VhJ!g=xFU9c=Knnc5%onq^z8krgjJC~VApbmYjP z(#mu9@hJf+H<(UlZ9U}ydp~JupBQNk&_2|)Ogg+OP9&*Ts$UyOdOHSawWS8WG&B&k zn#mGOk{$2U>;wE=OjWbV0Zmn#8z7g~)>d}NqriQz=-fPCyt`3&d7i}tx$e{RrIpLh zg{k7DTkgc%VUD?(_BEt3Ny}n~lMt$Q6k_a5r%lbWVm=4W9SsbBmThX9-9i6?AkI`Q z9*?)Mv}UDDoeM9alosd9qZgfpRp-Wt(#$uebbIi1+nif7&iX02;iR5}lGEDjHkD5; zyO(x~TUUy=N0k=(x8;|^KLyMh77v+5ub$CrSj>ZcRZn*? zJ!`*$M~Kv&c>ddcz}crI_6ja(FKOg;M2pSN+%$F%=vg(RW&7;@#Lw}R{K9*Bw_N~* zdFYuA=2>PJWZ&%*_NkH8s-!P~LoUVT$5j$Zww~;4{XOKO#`m;#@A;Q5{{^1@Jsx0zAZf%5q`&F*wM^75B;`y?Mz|gZeioX zpSIw36g1qc8>Q3R&g?1o@~Ut|cx3tERB2`O`;k)|Z1MWCGdW$pFyXAcD34q#yqI*R zPnYMHov|%v`NhGUmhB|KgllWIv~#_*IOQzgEY7TY06hBAS}#*oJ*(TQ8nd)aZwpK2 zboH1fe6fY~AVR5m6Xpf0g7s1O;W5RtVC9b|v}|iGleZKZ0>~sh`1ISExZ&JC?@V0* z%R+WU1?Pi4cba{c5E?zvpHBgVCd4I0e(ekd#y-{tJvPL87ND|rOO;HVTN}m2k)xj- zsD=?_t#og+u)PCyK#0!R^TPH;K?fYNc;6QyVGlB@_p+>E`{er^(mFG#TN?YfWG1ie zH%-HgDUy}K`ZTm%Sl=jqvmt$f)=OBv_+JBz!Y~6s8*@ zk|y%mP>|4Jr6s)W10!o|S-bVfU{1p>C7DCIPjMh^lpO=>hy9%*Q^xgduMz82Do%?~ zuG@Mtqo35$tk*CF7pzH;P+@7fi6w1Gm?xKEiVnmH`BI;j)y$-=skx+yFpo@QsZ>PB zEFSYI;lAcSd3Av*Q1N8jL4}9bMVkGDyso>YqQV?N=4|*aRFX&NHL}4lC*wbl{5}fG z8@6#wqm?3Y?QMVO7Zst}+CYv-&zlJ2y%__1CRls?pEv;20Pj`u3l%;TMtSkm-Cc3s zpfk9)U%1oD#m%eE@-=voS9b_MLhuxk?OzWc{FI401M$y!@vO9Z8g2*8i0{Qm_nf(j z(#n!pL+;4qqVw&%Gq(PkI1(*Y5oBx7N6}avA|l0J{`!V<_X@U`#vc~XJd;s=b`GVL zJ2hU|yjuzEQ{&FcR)x387m$s!u;E@^!FEJ@2&@_<8CK07N;c=-sXuL9Qv@4@^^wB* zLnJETEZ?|^tm>S506vjw0k}LiUEE%FuYi_kBxGJhA>cSu^M#E^Kqyj`hb3qzKCRB# zF(ekEa$y7^mKzcXuBJ9*ME+)?;o7VVMn~?0fObJBAQcyvApyMF>%S5U39fIspJ^~k zn63YUC>1O_Q)ABDe+nVUgb54bwn3cUJLTS7kYDdUF0C!O6IY6x7xB`azwJJ`T)Kb9 zy*WoKC$5%uE;-k4lFtO03R<*n4u)xHWP9VRusKs@+q`xT7>e zdOKxIJzFajYGPtdKLuS@Qwtj!4ip!}A^(%%T;mC~R}o5e7GV0+zJQ5<7VyM4G?>n} z?&M2nV%3)j0(})(!%eUP5py~a?|CB>&=|#hj|l(YM)qMclj%vOjxl5q3;kjSSxoF@ z+84G-MKZJvAqYZrOVkS%zUH))t)-)Y9_h!r0llx^X8lNV8V6waYcfaS<8jR#l=xUO zn`UW4v$9P#L%2%kQ<}gQ1^PpuPHBKGi~!fL%XFLam87Ca3#^Jq!TrcMp@Aq_E{4n+ z&4SM@vkjKZL;c(WxJ)9!9We)~0Z&%TRMtqa9wE*51h8 zi8J@+8E10?eKLxXkR3F#eMzhTaLUMOuWC4a7>tMvE%V8_Z`{fA2ohL{&{Eh$%3Z`3S@!VU>(0&3+qen?Jb{y$_B_f0v$4dNTlGRuzk5w z(A>XM9zXZ}$f*0|0e&EPev&kgrHvC=_x5;cnX-QvTG;qPGUr1RSwE#j=?=;FDxe>w zNMUoSuzo&ZltRct{zyaaWQt9Ht?JYygoe*{4xE)D`+bR17Jmk>O2^uvE-w z6e%-wIw9*Ft`zXJlm05ZiJS1=2>vR~5pC`c5fggIB?r+Jq(O1Gfun5-4^TuL^_C)9 zQE^a9o#Jth#oo`TRerZZ>->j^m4T8IO9M}}Ya5w@nPm>mlBS}`8%WyNF{DtNg&zu! z`bN(mkcpMy7SvFpew3a6g((`=Ois89k3^uN>S^a{Ve6WEeGQYMkgP~1sKIV6mA@Jb z20#?y?jl9J;=*~jCk??w(5s>g<=YkXNQJE}n7y;L;Y>Uu=MdgauXqy_LK)U$=u=e| zKwVCQC-#|`R!H!ic7*fBAv|{J2 zShKHbn${Q3J#$A!F)Ehb)TOJXl@%xq9fEuAI&zA$bqXbQh^x?phe%B)?Ih<@SR5bb zS5*^r6(hEUH4*~+_ImjR^upU6=i;=paVbPI!eak~%VVmM!I3OVPV>(Y1<-czJUcn<%+0vZQ4ZW@ zikF@ghP=-d%1q&;-~r>Y2LUP7&rK$7VFZ@ykC{y#gjoC^|GAm=qZz8tD0PSNeFu+p z9ym}dBEkhRRZvBXg_iL(l`2%_l*Ri@BcF@OlMGrG{Hs$L%O!p={caHw8<1s0Ve<|= z&ABm84mOIC;-!0e4U-I^2n0xyC_-UrWK0w&^c<-SLgVSPczw;CS@L^(>=*4`@zJ9K zFX-41se>jNb2IYIFe|(`U!Gs1*vdy8b+Pp7n1iZ4Xy&N(t*?qWy8CpjX{f3*4~h&& zYHtL|6rie`{2(akHAvV$Bv|WS?zRCOkuTp1?yGMQgezba_1W$UlF`8`==bam*A_wQ zUu`>Yj2jjpbT9l`+S0f4D4`4M-$iOP@@37SCrGO|;Z2JXFI8|Pm^jr&R?L%8wD%Dl zSn2Fad1MmPJ8{Gio*s#Fp5W+A1mi<1zV^&mW+B;*I1|`y!>~Y$10F3HvlU z2_8a@<{h+5wh5 zop*J-+uq(i+*F52N%VeBJI#SOrO-$RY{D}FIw2wr;X7w1l6_Kg6%W2Td}+H24@;-F zQGAwm#y#kwlojgYYytalgFh`3mWB@=M^R(0`o1bjI*~N97}vz;ctwbcGU~!Z5rq9_ zPPFqP*w$J`G?pq1`~*j!K)Z{RPBgN@L&{%cP<;!Wgo=w$wJO!&m!aaBPEz8sFs{-R zR)$TgN(JJc2biX+7d3%1`z0h>-2Sq-xlrb1Bo{fr;4BL>uoRAOdW{4fl2^P!QWZx1 zO0@3eeVp!+tOEzeK6=Q#h%UrO{|eN;sYy?yQMh;PEv|{siNcf1{$&vyF5j4k0>yAd zToXYa@&>;wkzh?KN(YRK)h5A%yhCGZFnKYRM%e_~<=HtM3t{qnbEk?qy{Fz`FBWfI<(ef!@(`kUIekKBd+M9Y_G`dJL~GU^kWE!N6@ z7NbU0{^ali&$6a%wvfd+5si!n7MnB0nHA^j)6VQyu4RwwCNdEX&K3H6_=AIo_Iqnk#$(#6SjSp_ckt+E zYVG$5)b~3N?^kh{46sVSZRf0xwzf3pc^1v{;s{WK`E1IHqdDUZ8lqKCMr)-E({_BX zsBbwi7=uy=8IAmd98^ra%ExIy2 z#rK8ez=Em^2cAZFrEUirLYG&o0z9rnkXB|_FaSdOcF%uP9vMMDNT{AaL68-0Aj~9i z*Nh+1(lsOmJ*X=ia-$HI*{Vz>04=_ z8(?)I1Oo_NZIOAb(VvRZZ76w=-NIDx!;6a0ct6%vxwa!Id(DwmqnFCOA$1Lanqf7> z7;cTdu~vY<%EY~?24E9NdgTn=%X?gcfl^6|8W-&_4hNOVm(V31kto zMO>l7;FlkB6>1KDOV8;HJ@W8ZyvH>b(N8hO#(hPV4+4zT_X|M{OE?%uuoe}B;X!`(kJ Ie}w1%1Fc8boB#j- literal 0 HcmV?d00001 diff --git a/ddms_compliance_suite/llm_utils/__pycache__/llm_service.cpython-312.pyc b/ddms_compliance_suite/llm_utils/__pycache__/llm_service.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52a01cd4b962f8b4e7d9c7a5dcc30f5784474421 GIT binary patch literal 11371 zcma)CdsI`$y5BqRgoK250t%6*6nR)5$D&uo7gSKGXdls<#_SCQLqhjXRMOC*tu?k< zu`i%%#lFOrBe>cszAAUEyVl{fXRWhS(gp6*vyS(cgmC{kc=fDvmi^=2Z}!d$gnBoN z*?VTb`R1FM@0P&;Tz~LA^8qQIm|2ElgtAW+=7VD-*0ktRnx@i6%ZT`}^apWT03%i#w1mc>BN9mM zevC>WHBHNAX_Xjm#m!3+sU>xNN#Xtt8PMvfSz0AWTag(}D<7pG6N!|*)CiwUGLx#F zgI5~N58kwT79N>|meBR3hv)w^c2*GWSB{a1O%=1Xiqg!{V@8z1HrKq&kJ8M+B{PO% z_Blu(vSQ{Sm&~OW&cRDR=N#nz)*Q?!{el?TW?#YV7>e1YPwdMH(-`u|yg6mhom2L_ z-;%u*hq;(dBlE(&;#e!*isxYDf?SNsG-LyP<)iCO%pYsL$(Zy2(7Wgq`5|2}{p zzzzyuzz*Pr`ICsj)gXcRZ z554C2OMb&VTM5|KMR(WVSe3xrUR6F9qJeJ#q8H ziJn6)<(^$-#ZSHX%Cbev+j1`oD8!kSwwN3alCrTfm!-vMhMCz~C}(9xqs`Pz8jY;N zXoTrQ6q-{i#6t)ouxg{xWV6{FCbSH+(MZLk`UFTWV)r4%9_}nf z1EWh_E#|Vtr3dWPKDxzZCQDr|Yh$U|4kNdiY>+32W*Q_3=Z&<};vh?{)@GyAVX@NO z7ckmyF_R@NtrR+cxI_aV^zR@!g560>>k{^6`Vv>PZ}O}k5v{nRN$D{69Q0|*+BbM| zMnq+Il674$||t2}0(sE*QxIZaasG_mNp|6h>IrVfO#PZU{FULnLWOD-Y! zMTavGJq9T$1C`+xol}`OLP@Cl;v*d7q=J;`fD)+G5k49sp)WC_c5$Q9YoiXQx>mE?-SMX zG9vM&nGG_3^xUt9Bx*LWg$oH4As|JB5dKA^s9plV3_zb8Lw`g(AtfcG6pa~RnT!;1 zUvg4TC`bhn2PJW&h*Z`KP&xXqR}e~QuWFSPs#!%%&6YZTZ*ZQ4N`)RC|G=T(r$@#I z-|>HTA=v->U~hY%_q_k(xAhBwx=>$H`}D*|Z;W66Z2bDWf4_Mgv~T?SJAv-&!Pjs3 zPaY0@Ffj4{xqzp`|L*b0FOLWN5B<`9m}5I}`jY?HiOF|8{{Cx|?H`R_JLW&}`ef%N z|B;*iOV_rwkhTiM81;4YWbpRs;Q8bJOCL|XGZ1C~O4edA?ju`S*hM`>usm*;Q7+)vm#_2dm2;IU&;o&p(FzyyitBD{isKMQsG1A5@XbK;6 zh=1rIgkerVi~8dpw-9m>X$X-UYYHLbFa>&}Q=1SIk~0FJj_`J%V}^U0kYUbv2r2Li z`w201r(yA-CcO7ft2rwQ%Ls@N`T)aVH>)@)5?EWZET^@;QCJGa-&d zRIHM4VGfYcuR+p|-AlwYX+I~4IVJb;w=gV15$N5XfUgKTagDU-uVkuZG#>nNK!VoESNbii`kKs}g7CuH4tQ^cg9 zF7yz1U&Qd+#6UBPxwc6h5zez2np;c=2~nRo3{2Xjj)xg#c zR={Q?BGhL@ZE=s0NAsBR#Z4dIbf?1+*{qrnuAK3JZ0OXV@xTZZ&cf!`gsgGK8qcWWa2lfa$}XQ4<6Q{Y&P zb`(8INn~=CnsqjE*n_?$)y1}2GE1w1D1SNDnjO=}Y}5u4kOhFwEPAvL z2n-aqCC6|^r^bwj(7Tht(iXTiL^hF=4zPpBJu8mN2H}G2w&YinTeJE10o+j*W&!;3 z+EO0FKhK>4JoDX}>H8SRH|5n7jxW5UVF(yi80}(BOT&Fd-%ZP--{CKiB)2W~F_ILz zQ_&m}PxKXqT|XS-)TXCFfI z;L$H9-nbI-98Pvje0Du>{OI_N&!W!4@uU9!o6)fyy9RUPKXX0M-4D&dgMbN}*(E=j zGt%+z-u8cS`0qDA%I1eLKG;4!_$Bz8(C6gYkHK^Gzts!l1a}q~PaZijaqCjxLy+YP znq)5e1Ax$fymS0|A9R=VNYHI4elXGwlF?xq1+IrnE7*B&IQv z;iHoQ7-;(i5TJ|~6gW+szXUS?MT;XvJR#y65o$Y!Z)U)8e#rjgNBkXM@skj8!azef z-9RJ#;&9;OGojkgE(gE)JmC3o;>LSW80hQ?eEmW2!l!|cI}oL(J9ztj|LKpn+HGX) zpj^^dIAqoa0YN22)*I0XttJOKiJ;P^ivm_+u@QiTStS57fMZ4zZMIlk8I`0B0W?6t zF)Gd>;sTejpj_dESUom=s|Ugj{7?X{2flqf*#7~q2O+87x)?b2HV6ciFpJ|iU}4Wg zlaO(7D;zl4G5+0YsEz3eIiZ|d#%LgNPWi4LH7e8;+|%G?bsWF(`S{@5F@uQJ?ZBC1fe$>PLGqGaR!$)z5x6X~Ij69h(P0mzXwi%W8e8b0}-tV zT=4`B_4At)8jC)*1?VkARzOL4y`TI9HnDTSHW!FMO#+t>19P;VfzB)blc%8niCd>8 zZk(BXzdP99U#{N)wl5uFW2^_lsQ5qytOyDQd8pNA8_j$N=;XF2hckW^? z68K)>c3A~q0b`?B6>p_R{@`VkxBznK6b>0IwHXNx_Y<56 zvM_DDDY2^Xml4KiV$;F1hIlD>+r~&m*ya`7(lnbKFm*D1nV|EydJ9A*jC(1#eP9*f znTNkM4dKn**kYng%@C&`xoaYrE+b4Rdco?kI7y?x?2N-ZlI7zSSz%^8FA|o5Hr7J` zAav*CdIIz;6ss(G3@w^%`c-iJ+p(#IRk(aNemA3ZG^4_sQ8C=gWK@h~yx^%B%Y1S) zbEP+PrDxl?ZvMIS?)09T!Gz%ypKhCH^Uun}lQqX`n2hBEuQAIjhaAJ3hgUJW=Y7f- zreuPIt@xA>Q>9NSu{=A@*jpK!YYM|&!Pg1km9t*JD6{UUv&S?!Okp)sV`QpdVhT)N zjfv6ZOi8fx?A~UtX4RN3i&?bBt64h*A(MDd6>v+>JeSp-H8wAIEOX&l))V*S;>J{0)8(e2H0GJ@jH%wYyn$KP$mlFSWzz!*rqy@0ooee9^{XzbdXU^q zUKO+b+0pHLyxaFMq?Or!(3jTgsTxxzk1F%L$~>R4;O@NKb6dK%oZjZy5*c(!pQGP( z(KT4i)IP(!MEXkWnUV%Z*XUDPKzz;oQMKNy*7xXsQWp)V1})yQs^J80S@meyHgDNB zruGG2nPIHBeDEo6@x~#Ox43Fp=Pj-qEq=~h{2cSVX{2~Bqj};^Qt{6VaxUo3>zJYn zX8nskqiNLG=ruNuEUA-Tbwq`5V3Y8;8sz`J21q z#`4yT=56%mp?u!vF2z_zesA`V8H)ze2dN(yR}3{Vb$fo?v}dfKVyK9zH88t&duw+y z1uu42-7To-s+tnXQq~E6E-LLWzgRxdI8wBR(Jma*E*jOY^lDc!t7?4OEkA2B&&74e zG5MQ@USl>r$2>!1W>{c=bRl}*nGN!hHX>4I$ zb~2QcY242=9AFL{Wb#@^wJt{Mno0*2zdq1m^YTz*kUZRY7qi>M?Apuhbu#;3Wt^{p z2If}?{t3~3P8(60irP-;P_th_nKar_b+%WXJ*r;fRWAW{^5^l$+OHWB4<2B2m7q7C zno;F*jPkiD8P~TI)94WK?kI8PFg`>xx@zWG10vqVD0kgeC;d7lMN|*z2C&1!mATa- z><}cwq`;hSz8z{i80a8W;X?QPyUS@cTFslzgvILdo~@GB&Rge*ZhJWR)07 ze!zuL`-5nHO_}hA1?e@I@K2Z$zW=05uPGA#sYndpg}6)p@&VFT++uaog~F{C3_RzC zdhQ9VDU6?S?ZirrM#64}dspt7=_#~#iX0HcrBDo=zi~ppSz*QXa7;KTV1`jB!bG`I zf?`oImrdv_q)>ulDs(#}Z(#QXLb2?gEJgeh-XomC(7TuB-$TOtTz+aPR`TRndgj=C z{k^z7xH$Q4W_Hh0-S#Ol%7CS1=J%MpU!0P1Ss9k1osx5(3M@5aifaLTh9a>ew}b9l z-*NC%iC4bhfeMlE!Xc4RAecK^jIO153jviQ&{at5i}iU6(K!?Pw37Qt?y5SJ58l+=diAluM-P;9G%jxsq#F_y)nFU;776h5W&o8(4l&5dh+x z4-)dYID#Q}Vt({OTic=ImtG*Z$+z5y0ktDFU;L z?$B?32%gg7va&M&P4Mu)LA}p4W1+smiJeXSP%UDx7-acvJpzX#)-^+_RETq^(TQT0 zGuLY&a!|OSO5yaia~8~)Nc|k7NGtMLft{u}^esgfOn_#o-O!$uz)pxV=;|b9>w6y6 z$->bjey5b8vdHyKNi4QU*B`7LAbAaUdByL*2;cL+LE&!bxg9DFZU`-AxJs+HG_c|g z8`tfuWTm8SzlE~fs9lJQGS=Q%6_;CAQMb_u_jeRJW!Pl&gBQ?Z^fMRk!U+8~h6D55 zj?E^7AtQl6H2RSS^(+#_jFNhkyn+%El33c%&l=eDN17XLcK%{&#$oVKwhHz!_yy2v z>V04at{_I&fe$++#Buz-1j7}7!Q%dc#s3A9{|(FcV)?&d>c3%mf5n#Em8W~sM&;>V zd3x815qWOA0t8ppf9ta5?n;XQB(olkZ_DopK2=k<#ND5$eW7Y3-Gx zqBO54t*hZ&b9eJt*3N$jaq)9FoF~bto#m&>J6E4t&15edNm~A6(oUv)XM4?9a%KCL zyV9(lRbFZ7m@NKe{IPf@t<@)U{bxpAuXX4}Uq&4x(%uy*j#eM4W;7K((K-$))p)fv zWAjU<1lW8^FeS!xTk!jsP$ywTDfbkZOow=JH-JkA37cs@Feabas0Kuz|RWsVEpOjAn1cA#$POK$9+0r>B z8JkDU5JUF=6yqlueK$PvX1;Sx@a#gl) II2(QcAAhR|zW@LL literal 0 HcmV?d00001 diff --git a/ddms_compliance_suite/llm_utils/llm_service.py b/ddms_compliance_suite/llm_utils/llm_service.py new file mode 100644 index 0000000..812d8b6 --- /dev/null +++ b/ddms_compliance_suite/llm_utils/llm_service.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- +# ddms_compliance_suite/llm_utils/llm_service.py +import os +import json +import logging +import re +from typing import Optional, Dict, Any, List + +import requests +from pydantic import BaseModel, Field +from pydantic.json_schema import models_json_schema + +logger = logging.getLogger(__name__) + +# --- Pydantic 模型示例 (用于测试和生成Schema) --- +class SampleUserAddress(BaseModel): + street: str = Field(..., description="街道地址") + city: str = Field(..., description="城市") + zip_code: str = Field(..., description="邮政编码", pattern=r"^[0-9]{5,6}$") + +class SampleUserProfile(BaseModel): + user_id: int = Field(..., description="用户ID", ge=1) + username: str = Field(..., description="用户名", min_length=3, max_length=50) + email: Optional[str] = Field(None, description="用户邮箱", pattern=r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$") + is_active: bool = Field(True, description="账户是否激活") + address: Optional[SampleUserAddress] = Field(None, description="用户地址") + hobbies: Optional[list[str]] = Field(None, description="兴趣爱好列表") + +# --- LLM 服务类 --- +class LLMService: + """ + 封装与大语言模型 (LLM) API的交互,用于智能参数生成和验证。 + 目前针对通义千问兼容OpenAI的模式进行实现。 + """ + def __init__(self, api_key: str, base_url: str, model_name: str = "qwen-plus"): + """ + 初始化LLM服务。 + + Args: + api_key: 大模型服务的API Key。 + base_url: 大模型服务的兼容OpenAI的基础URL。 + model_name: 要使用的具体模型名称。 + """ + if not api_key: + raise ValueError("API Key不能为空") + if not base_url: + raise ValueError("Base URL不能为空") + + self.api_key = api_key + self.base_url = base_url.rstrip('/') + self.model_name = model_name + self.headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + } + logger.info(f"LLMService initialized for model '{self.model_name}' at {self.base_url}") + + def _execute_chat_completion_request( + self, + messages: List[Dict[str, str]], + max_tokens: int = 1024, + temperature: float = 0.7, + # TODO: Consider adding a parameter like response_format_type: Optional[str] = None + # if the LLM API supports forcing JSON output (e.g., { "type": "json_object" }) + ) -> Optional[str]: + """ + 执行对LLM聊天补全端点的通用请求。 + + Args: + messages: 发送给LLM的消息列表 (例如, [{'role': 'system', 'content': '...'}, {'role': 'user', ...}]). + max_tokens: LLM生成内容的最大token数。 + temperature: LLM生成的随机性。 + + Returns: + LLM助手返回的原始文本内容,如果发生错误或没有有效内容则返回None。 + """ + payload = { + "model": self.model_name, + "messages": messages, + "max_tokens": max_tokens, + "temperature": temperature, + } + # if response_format_type: + # payload["response_format"] = {"type": response_format_type} + + logger.debug(f"LLM API Request Payload:\n{json.dumps(payload, indent=2, ensure_ascii=False)}") + + try: + response = requests.post(f"{self.base_url}/chat/completions", headers=self.headers, json=payload, timeout=60) + response.raise_for_status() # 如果HTTP状态码表示错误,则抛出异常 + + response_data = response.json() + logger.debug(f"LLM API Response:\n{json.dumps(response_data, indent=2, ensure_ascii=False)}") + + if response_data.get("choices") and len(response_data["choices"]) > 0 and response_data["choices"][0].get("message"): + assistant_message = response_data["choices"][0]["message"] + assistant_response_content = assistant_message.get("content") + if assistant_response_content: + return assistant_response_content + else: + logger.warning("LLM响应中消息内容为空。") + else: + logger.warning(f"LLM响应格式不符合预期或choices为空: {response_data}") + + except requests.exceptions.RequestException as e_req: + logger.error(f"请求LLM API时发生网络错误: {e_req}", exc_info=True) + except Exception as e: + logger.error(f"执行LLM聊天补全请求时发生未知错误: {e}", exc_info=True) + + return None + + def generate_parameters_from_schema( + self, + pydantic_model_class: type[BaseModel], + prompt_instructions: Optional[str] = None, + max_tokens: int = 1024, + temperature: float = 0.7 + ) -> Optional[Dict[str, Any]]: + """ + 根据给定的Pydantic模型类生成JSON Schema,并调用LLM生成符合该Schema的参数字典。 + """ + try: + # 1. 从Pydantic模型生成JSON Schema + model_schema = pydantic_model_class.model_json_schema(ref_template='{model}') + main_model_name = pydantic_model_class.__name__ + schema_str = json.dumps(model_schema, indent=2, ensure_ascii=False) + logger.debug(f"Generated JSON Schema for '{main_model_name}':\n{schema_str}") + + # 2. 构建Prompt + system_prompt = ( + "你是一个API测试数据生成助手。你的任务是根据用户提供的JSON Schema和额外指令," + "生成一个符合该Schema的JSON对象。请确保你的输出严格是一个JSON对象," + "不包含任何额外的解释、注释或Markdown标记。" + ) + user_prompt_content = f"请为以下JSON Schema生成一个有效的JSON对象实例:\n\n```json\n{schema_str}\n```\n" + if prompt_instructions: + user_prompt_content += f"\n请遵循以下额外指令:\n{prompt_instructions}" + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt_content} + ] + + # 3. 调用通用LLM请求方法获取原始响应 + assistant_response_content = self._execute_chat_completion_request( + messages=messages, + max_tokens=max_tokens, + temperature=temperature + ) + + # 4. 解析响应 + if assistant_response_content: + # 尝试从返回内容中提取JSON部分 + json_match = re.search(r'```json\n(.*?)\n```', assistant_response_content, re.DOTALL) + if json_match: + json_str = json_match.group(1) + else: + first_brace = assistant_response_content.find('{') + last_brace = assistant_response_content.rfind('}') + if first_brace != -1 and last_brace != -1 and last_brace > first_brace: + json_str = assistant_response_content[first_brace : last_brace+1] + else: + json_str = assistant_response_content + + try: + generated_params = json.loads(json_str) + pydantic_model_class.model_validate(generated_params) + logger.info(f"成功从LLM生成并验证了 '{main_model_name}' 的参数。") + return generated_params + except json.JSONDecodeError as e_json: + logger.error(f"无法将LLM响应解析为JSON: {e_json}\n原始响应片段: '{json_str[:500]}'") + except Exception as e_val: # Pydantic ValidationError + logger.error(f"LLM生成的参数未能通过Pydantic模型验证: {e_val}\n生成的参数: {json_str}") + else: + logger.warning("从LLM获取的响应内容为空或请求失败。") + + except Exception as e: + logger.error(f"执行LLM参数生成时发生未知错误: {e}", exc_info=True) + + return None + +# --- 示例用法 (用于模块内测试) --- +if __name__ == '__main__': + logging.basicConfig(level=logging.DEBUG, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') + + DASH_API_KEY = os.environ.get("DASHSCOPE_API_KEY") or "sk-YOUR_DASHSCOPE_API_KEY" + DASH_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1" + + if "YOUR_DASHSCOPE_API_KEY" in DASH_API_KEY: + logger.warning("请将 DASH_API_KEY 替换为您的有效API密钥,或设置 DASHSCOPE_API_KEY 环境变量。") + + llm_service_instance = LLMService(api_key=DASH_API_KEY, base_url=DASH_BASE_URL) + + logger.info("\n--- 测试 SampleUserProfile 参数生成 ---") + generated_profile = llm_service_instance.generate_parameters_from_schema( + pydantic_model_class=SampleUserProfile, + prompt_instructions="请生成一个表示非活跃用户的配置文件,用户名包含 \"test_user\" 字样,城市为上海,并包含至少一个兴趣爱好。" + ) + + if generated_profile: + logger.info(f"成功生成的 UserProfile:\n{json.dumps(generated_profile, indent=2, ensure_ascii=False)}") + try: + SampleUserProfile.model_validate(generated_profile) + logger.info("生成的UserProfile通过了Pydantic验证。") + except Exception as e: + logger.error(f"生成的UserProfile未能通过Pydantic验证: {e}") + else: + logger.warning("未能生成UserProfile。") + + logger.info("\n--- 测试 SampleUserAddress 参数生成 ---") + generated_address = llm_service_instance.generate_parameters_from_schema( + pydantic_model_class=SampleUserAddress, + prompt_instructions="生成一个位于北京市朝阳区的地址,邮编以1000开头。" + ) + if generated_address: + logger.info(f"成功生成的 UserAddress:\n{json.dumps(generated_address, indent=2, ensure_ascii=False)}") + else: + logger.warning("未能生成UserAddress。") + + logger.info("\n--- 测试 SampleUserProfile 无额外指令 ---") + generated_profile_no_instr = llm_service_instance.generate_parameters_from_schema( + pydantic_model_class=SampleUserProfile + ) + if generated_profile_no_instr: + logger.info(f"成功生成的 (无指令) UserProfile:\n{json.dumps(generated_profile_no_instr, indent=2, ensure_ascii=False)}") + else: + logger.warning("未能生成 (无指令) UserProfile。") \ No newline at end of file diff --git a/ddms_compliance_suite/test_orchestrator.py b/ddms_compliance_suite/test_orchestrator.py index 428c34e..d1f4d45 100644 --- a/ddms_compliance_suite/test_orchestrator.py +++ b/ddms_compliance_suite/test_orchestrator.py @@ -16,6 +16,12 @@ from .api_caller.caller import APICaller, APIRequest, APIResponse from .json_schema_validator.validator import JSONSchemaValidator from .test_framework_core import ValidationResult, TestSeverity, APIRequestContext, APIResponseContext, BaseAPITestCase from .test_case_registry import TestCaseRegistry +# 尝试导入 LLMService,如果失败则允许,因为 LLM 功能是可选的 +try: + from .llm_utils.llm_service import LLMService +except ImportError: + LLMService = None + logging.getLogger(__name__).info("LLMService 未找到,LLM 相关功能将不可用。") class ExecutedTestCaseResult: """存储单个APITestCase在其适用的端点上执行后的结果。""" @@ -298,7 +304,11 @@ class APITestOrchestrator: """API测试编排器""" def __init__(self, base_url: str, - custom_test_cases_dir: Optional[str] = None # 新的自定义测试用例目录路径 + custom_test_cases_dir: Optional[str] = None, # 新的自定义测试用例目录路径 + llm_api_key: Optional[str] = None, + llm_base_url: Optional[str] = None, + llm_model_name: Optional[str] = None, + use_llm_for_request_body: bool = False ): """ 初始化API测试编排器 @@ -306,6 +316,10 @@ class APITestOrchestrator: Args: base_url: API基础URL custom_test_cases_dir: 存放自定义 APITestCase 的目录路径。如果为 None,则不加载自定义测试用例。 + llm_api_key: 大模型服务的API Key。 + llm_base_url: 大模型服务的兼容OpenAI的基础URL。 + llm_model_name: 要使用的具体模型名称。 + use_llm_for_request_body: 是否使用LLM生成请求体,默认为False。 """ self.base_url = base_url.rstrip('/') self.logger = logging.getLogger(__name__) @@ -324,9 +338,39 @@ class APITestOrchestrator: self.logger.info(f"TestCaseRegistry 初始化完成,发现 {len(self.test_case_registry.get_all_test_case_classes())} 个测试用例类。") except Exception as e: self.logger.error(f"初始化 TestCaseRegistry 失败: {e}", exc_info=True) - # 即使注册表初始化失败,编排器本身仍可用于旧逻辑(如果保留)或不运行自定义测试 else: self.logger.info("未提供 custom_test_cases_dir,不加载自定义 APITestCase。") + + # 初始化 LLM 服务 (如果配置了) + self.llm_service: Optional[LLMService] = None + self.use_llm_for_request_body = use_llm_for_request_body + + if LLMService is None: # 检查导入是否成功 + self.logger.warning("LLMService 类未能导入,LLM 相关功能将完全禁用。") + self.use_llm_for_request_body = False # 强制禁用 + elif self.use_llm_for_request_body: # 只有当用户希望使用且类已导入时才尝试初始化 + if llm_api_key and llm_base_url and llm_model_name: + try: + self.llm_service = LLMService( + api_key=llm_api_key, + base_url=llm_base_url, + model_name=llm_model_name + ) + self.logger.info(f"LLMService 已成功初始化,模型: {llm_model_name}。将尝试使用LLM生成请求体。") + except ValueError as ve: # LLMService init might raise ValueError for bad args + self.logger.error(f"LLMService 初始化失败 (参数错误): {ve}。将回退到非LLM请求体生成。") + self.llm_service = None + self.use_llm_for_request_body = False # 初始化失败,禁用LLM使用 + except Exception as e: + self.logger.error(f"LLMService 初始化时发生未知错误: {e}。将回退到非LLM请求体生成。", exc_info=True) + self.llm_service = None + self.use_llm_for_request_body = False # 初始化失败,禁用LLM使用 + else: + self.logger.warning("希望使用LLM生成请求体,但未提供完整的LLM配置 (api_key, base_url, model_name)。将回退到非LLM请求体生成。") + self.use_llm_for_request_body = False # 配置不全,禁用LLM使用 + elif not self.use_llm_for_request_body: + self.logger.info("配置为不使用LLM生成请求体。") + def _execute_single_test_case( self, @@ -636,7 +680,35 @@ class APITestOrchestrator: # --- 生成请求体数据 --- body_data: Optional[Any] = None if body_schema_dict: - body_data = self._generate_data_from_schema(body_schema_dict) + generated_by_llm = False + if self.use_llm_for_request_body and self.llm_service: + self.logger.debug(f"尝试使用 LLM 为端点 {endpoint_spec.method} {endpoint_spec.path} 生成请求体。") + try: + # TODO: 动态创建 Pydantic 模型 (步骤2的核心) + # DynamicPydanticModel = self._create_pydantic_model_from_schema(body_schema_dict, "DynamicRequestBodyModel") + # if DynamicPydanticModel: + # # TODO: 考虑是否需要从 endpoint_spec 中提取一些 prompt_instructions + # llm_generated_body = self.llm_service.generate_parameters_from_schema( + # pydantic_model_class=DynamicPydanticModel, + # prompt_instructions=f"为API端点 {endpoint_spec.title or endpoint_spec.path} 生成请求体。" + # ) + # if llm_generated_body is not None: + # body_data = llm_generated_body + # generated_by_llm = True + # self.logger.info(f"LLM 成功为 {endpoint_spec.method} {endpoint_spec.path} 生成了请求体。") + # else: + # self.logger.warning(f"LLM未能为 {endpoint_spec.method} {endpoint_spec.path} 生成请求体,将回退到默认方法。") + # else: + # self.logger.warning(f"未能从Schema动态创建Pydantic模型用于LLM请求体生成,将回退。") + self.logger.info("LLM请求体生成部分尚未完全实现 (_create_pydantic_model_from_schema)。暂时回退。") # 临时日志 + pass # 占位,直到 _create_pydantic_model_from_schema 完成 + except Exception as e: + self.logger.error(f"使用LLM生成请求体时发生错误: {e}。将回退到默认方法。", exc_info=True) + + if not generated_by_llm: # 如果未使用LLM或LLM生成失败 + if self.use_llm_for_request_body and self.llm_service: # 只有在尝试过LLM之后才打印这条回退日志 + self.logger.debug(f"LLM生成请求体失败或未启用,回退到基于规则的生成方法 for {endpoint_spec.method} {endpoint_spec.path}。") + body_data = self._generate_data_from_schema(body_schema_dict) return { "path_params": path_params_data,