From 8156ceb3c72c263c5ce8de1c1e6661c1af9dde2c Mon Sep 17 00:00:00 2001 From: Farouk Adeleke Date: Mon, 20 Oct 2025 08:26:59 -0400 Subject: [PATCH] Complete Migration --- __init__.py | 0 agent.py | 172 ---------------- agent/__pycache__/__init__.cpython-310.pyc | Bin 171 -> 0 bytes agent/__pycache__/agent.cpython-310.pyc | Bin 2915 -> 0 bytes agent/__pycache__/constants.cpython-310.pyc | Bin 2398 -> 0 bytes agent/__pycache__/helpers.cpython-310.pyc | Bin 2457 -> 0 bytes .../__pycache__/hill_climbing.cpython-310.pyc | Bin 2427 -> 0 bytes agent/__pycache__/index.cpython-310.pyc | Bin 2919 -> 0 bytes agent/__pycache__/models.cpython-310.pyc | Bin 3272 -> 0 bytes agent/__pycache__/modules.cpython-310.pyc | Bin 5191 -> 0 bytes agent/__pycache__/utils.cpython-310.pyc | Bin 5554 -> 0 bytes constants.py | 75 ------- helpers.py | 85 -------- hill_climbing.py | 119 ----------- models.py | 60 ------ modules.py | 128 ------------ pyproject.toml | 11 +- utils.py | 192 ------------------ 18 files changed, 1 insertion(+), 841 deletions(-) create mode 100644 __init__.py delete mode 100644 agent.py delete mode 100644 agent/__pycache__/__init__.cpython-310.pyc delete mode 100644 agent/__pycache__/agent.cpython-310.pyc delete mode 100644 agent/__pycache__/constants.cpython-310.pyc delete mode 100644 agent/__pycache__/helpers.cpython-310.pyc delete mode 100644 agent/__pycache__/hill_climbing.cpython-310.pyc delete mode 100644 agent/__pycache__/index.cpython-310.pyc delete mode 100644 agent/__pycache__/models.cpython-310.pyc delete mode 100644 agent/__pycache__/modules.cpython-310.pyc delete mode 100644 agent/__pycache__/utils.cpython-310.pyc delete mode 100644 constants.py delete mode 100644 helpers.py delete mode 100644 hill_climbing.py delete mode 100644 models.py delete mode 100644 modules.py delete mode 100644 utils.py diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/agent.py b/agent.py deleted file mode 100644 index 4d48c4b..0000000 --- a/agent.py +++ /dev/null @@ -1,172 +0,0 @@ -from modaic import PrecompiledAgent, PrecompiledConfig -from modules import TweetGeneratorModule, TweetEvaluatorModule -from models import EvaluationResult -from hill_climbing import HillClimbingOptimizer -from typing import Optional, List, Dict, Any -from utils import get_dspy_lm -from constants import DEFAULT_CATEGORIES, DEFAULT_ITERATIONS, DEFAULT_PATIENCE - - -class TweetOptimizerConfig(PrecompiledConfig): - lm: str = "openrouter/google/gemini-2.5-flash" - eval_lm: str = "openrouter/openai/gpt-5" - categories: List[str] = DEFAULT_CATEGORIES - max_iterations: int = DEFAULT_ITERATIONS - patience: int = DEFAULT_PATIENCE - - -class TweetOptimizerAgent(PrecompiledAgent): - config: TweetOptimizerConfig - - def __init__(self, config: TweetOptimizerConfig): - super().__init__(config) - self.tweet_generator = TweetGeneratorModule() - self.tweet_evaluator = TweetEvaluatorModule() - - # set up optimizer - self.optimizer = HillClimbingOptimizer( - generator=self.tweet_generator, - evaluator=self.tweet_evaluator, - categories=config.categories, - max_iterations=config.max_iterations, - patience=config.patience - ) - - self.lm = config.lm - self.eval_lm = config.eval_lm - - # initialize DSPy with the specified model - self.tweet_generator.set_lm(get_dspy_lm(config.lm)) - self.tweet_evaluator.set_lm(get_dspy_lm(config.eval_lm)) - - def forward( - self, - input_text: str, - current_tweet: str = "", - previous_evaluation: Optional[EvaluationResult] = None, - ) -> str: - """Generate a single optimized tweet (single iteration).""" - tweet = self.tweet_generator(input_text, current_tweet, previous_evaluation) - return tweet - - def optimize( - self, - input_text: str, - iterations: Optional[int] = None, - patience: Optional[int] = None - ) -> Dict[str, Any]: - """Run full optimization process like the CLI.""" - max_iterations = iterations or self.config.max_iterations - patience_limit = patience or self.config.patience - - results = { - 'initial_text': input_text, - 'final_tweet': '', - 'best_score': 0.0, - 'iterations_run': 0, - 'early_stopped': False, - 'scores_history': [], - 'improvement_count': 0 - } - - best_tweet = "" - best_score = 0.0 - - for iteration, (current_tweet, scores, is_improvement, patience_counter, _, _) in enumerate( - self.optimizer.optimize(input_text) - ): - iteration_num = iteration + 1 - results['iterations_run'] = iteration_num - results['scores_history'].append(scores) - - if is_improvement: - best_tweet = current_tweet - best_score = sum(scores.category_scores) / len(scores.category_scores) - results['improvement_count'] += 1 - - # check for early stopping - if patience_counter >= patience_limit: - results['early_stopped'] = True - break - - # stop at max iterations - if iteration_num >= max_iterations: - break - - results.update({ - 'final_tweet': best_tweet, - 'best_score': best_score - }) - - return results - - def evaluate_tweet( - self, - tweet_text: str, - original_text: str = "", - current_best_tweet: str = "" - ) -> EvaluationResult: - """Evaluate a tweet using the configured categories.""" - return self.tweet_evaluator(tweet_text, self.config.categories, original_text, current_best_tweet) - - -if __name__ == "__main__": - # create agent with default config - config = TweetOptimizerConfig() - tweet_optimizer = TweetOptimizerAgent(config) - """ - import os - - # set up test environment (replace with real API key for actual usage) - if not os.getenv("OPENROUTER_API_KEY"): - raise ValueError("OPENROUTER_API_KEY environment variable is not set") - - - - # single tweet generation - print("=== Single Tweet Generation ===") - try: - single_tweet = tweet_optimizer( - input_text="Anthropic added a new OSS model on HuggingFace.", - current_tweet="", - previous_evaluation=None, - ) - print(f"Generated tweet: {single_tweet}") - except Exception as e: - print(f"Error in single generation: {e}") - - # full optimization process - print("\n=== Full Optimization Process ===") - try: - results = tweet_optimizer.optimize( - input_text="Anthropic added a new OSS model on HuggingFace.", - iterations=10, # Reduced for testing - patience=8 - ) - print(f"Initial text: {results['initial_text']}") - print(f"Final tweet: {results['final_tweet']}") - print(f"Best score: {results['best_score']:.2f}") - print(f"Iterations run: {results['iterations_run']}") - print(f"Improvements found: {results['improvement_count']}") - print(f"Early stopped: {results['early_stopped']}") - except Exception as e: - print(f"Error in optimization: {e}") - """ - # push to hub - print("\n=== Push to Hub ===") - try: - tweet_optimizer.push_to_hub( - "farouk1/tweet-optimizer-v2", - commit_message="Complete Migration", - with_code=True - ) - print("Successfully pushed to hub!") - except Exception as e: - print(f"Error pushing to hub: {e}") - """ - print("\n=== Agent Configuration ===") - print(f"Model: {config.lm}") - print(f"Categories: {config.categories}") - print(f"Max iterations: {config.max_iterations}") - print(f"Patience: {config.patience}") - """ \ No newline at end of file diff --git a/agent/__pycache__/__init__.cpython-310.pyc b/agent/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 992044a3f79d05b6054fd4e71ab99e0c7ac4b1bc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 171 zcmd1j<>g`k0^Z=S86f&Gh(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o6vHKeRZts8~NO zF(oxeUoSZ^IU`j+H$NpYGg&_|JvFZcBvq7Onr*0GQl6SxqMKh(l9`)Xm0F}*W&}~8 hA0MBYmst`YuUAlci^B$}xilx$4rEU;6OdqG003e5DWm`Z diff --git a/agent/__pycache__/agent.cpython-310.pyc b/agent/__pycache__/agent.cpython-310.pyc deleted file mode 100644 index 39d31ee6282e70a25bc6afb2c96123352a2cb23c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2915 zcmZuz%WoXF87DdK+1ZC=IgaWyRRT2V7L~SfYxJVFab?9Zn#zGKA38`em@K)gvFEWN zXBf;cd= zn}V#YeI&)SDi&ET_?@{ZYrS!$=b$QQ+5BtT@opV|CPaNtltQL;CI3+I zCKtNvJ$#nt&1LVH>d{44R+SHhYVsQTu78l_`9Yo)pJe6y!J^KJ>`X{FhL4S^Oml62 zm?>oK%|)GXwKz@kVr=QnyNB=JdHmsVa&YJP@ZN)m_YWUk_T4`|e0b;h{)3}OdbHyE z2s#dr4i2Yy(W{>YjrX9aBLG2S%1O)~gK}a^*uvp#pKvR74Q=le;l;iPguP8*%z<_o z_e2D=3v@po7}|q5K8Q8AkjDCIodD0?t`?${RZ|N&nOD_37n8XtvNGGfwRd}WmZ$1O zv%EOFafJ`bG@Hy9_3mx$36L*HLWk3|7V}DGLg_(~K2I`OGey}c?Js~3A*S^KL|rKmyuQwba8TNz1aTyOokeZorU?5H=Ce&Lx;Qk=FpU zm6f#|U~>!LDGVWQVE;6mPEs^590E~1eRoo$Iqg;} zGuVB0tK;Z!Z*eMzup(})@FXFLXhcV*n34ZeZ2(;OspkNfsuiGEPC-R1b_6JvTR_{f zCqS{FRvXpIsjz13b26jc<=%@Z4g))|`H1_%Iz-MWD?oGdJk~ zP1b|bgQEThP?K}=gtl}>Gt#oZ)4x(mYIaUql32BEYNvKv^2CDTwDgGw#fK7b`WFYd zLZEw4_n?lTj-d9S?)XSzc%dEO8ZZ0x0Qt}9m#`16c+nGeJ)FR;+0oe10Vuc&4m!BF zOC>uCLc1~60=DU>W+5b+D7Zik*5<%qJT&4+)=NnDO7G=rJ}zX8a=oMu)<$G(+E}l4 z6-1D{kSH~XAhr_uOuh*X?LPf1m2>qK)j^U>!TS`P!Ii56#eeA=3(|V1bz3~cl@2&2p!>n91plk8G272wSMi$zYqHbh4#Z0mK&@1th2T;^M z09v-NLF!~_x7GqRLYJUZPBDXUdk*1>J70K9m(!)k+0w6>(VE?YzRu}g@~7W^%$C8& z?lN5VmeI1m94rTIz}*{UIc$e5*!w;K^tZ`Nr|lUXNAf`%E(lOMs@I?zHa9p!Hhtm@imoK`+BEfvW!>`eV>T}-E&$s5av)d z@)p7`5pDw<%U=MFnfy8Syo+?t1jbIOfhd~--9X59U`+cf>(p)vyOtbcZd2&tMJR{U z;8B&fa#3oxS#X%C9q1ADcqD&HLJA;Ga${XxVyjcCUZC) ztc4(VU^;k)*)SetD!Fpuc(h*8Tpe7LCUJi~Ie}|kMtX_9qPz0y>Iz+Ky7BzEr{gUvH@ZAd)v(Mzw;fNo!xwuM_ueRMGIJcNMocl zzJiX!DMT%JO94V+H*v`r5GZR0hUHrTW7-LIowUqV~sZX@ga;?szY=q6(~8MVJoBYR{q88gK_x1@-H2dz?{YlYhqdMZ4~{_CvH$=8 diff --git a/agent/__pycache__/constants.cpython-310.pyc b/agent/__pycache__/constants.cpython-310.pyc deleted file mode 100644 index 8b7a0a1e74202847f3b5a4b0ed3b658ed2b0e998..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2398 zcmah~%W~T`6r~<^Y{!b_x1Bf%@^q9&(m1U*owTW9!44a}Mx@*r8xA8t5*nIhNyr|t z%YH>WU1ir#=&x|qna*^^AJ9!#y&xr?wVsDTTrrWT_O=VYbS>%eeA-LTiFPFIO4uvrQBaSzUC}+>5 z;$z2Uw&x55^U9_z2A<>cZnv(r&S5nf@Ty{(nx<;(`U}qeRzpg7pMLPEB6!yP~*Tm<|^Aq7;x8rGR7#GffB+ zW;;RtAPNdWP-s62YV|6DYBGcy3AhCgrw&?raW~9Y4l8P{@~Vc7db5gaTCg<`!u3Bd zmoFkJK{Tez%O^Wr$ty~~7d?NxJd<6n7qY=Vw)-Y*53>bw9RYvbv``TZoKs}+_sklh zq0c-YnI1#JiLhtLW8#!C@eDa0%w1~~d;zI?T#S%uQ-DlrzT>cgqp>Mw`XeL;U_$C4 z?n_%kn}THl8o?`k><{$PMKvT^w^2>p82T9m?hgPb`NaO^n5g&_bL^C{8J|#|11|K12pK#ByqY9rujm&ozlLheD zcSG)hr_KGyqpXN;o-lNAijyMCF=>VyVHO&s?CO> zb@unww_(O&HuJ9sM?_1qaM2jXzFNa1r(s=J8wXmvoPVuq zdb52LuNI}938Lz_u90(l(c$%GWC|Hyc;SL!n6~YRsIb0agv(co`l_N~vYG&;alQEl zH#D-AfWb`*w<~%F5;^Z@a1Idcg%;)~8MOewjXg{jCrC%b(D{3>u_y0|Fo)aiX4`00 z+8SgS=HumIrKVORTNC5}H^$Z{2nJFo;*f<{OLniJHdLLA%`}>mRwb)3MXMZ2_mhJ+ zfosi5H3=^#*ptmCV=&^LgG&kKxC&&9MZ^iV9OZ1_MtrN{IuwvBUIMGC_J*vS@1mjA zFm4&TTF2zpIOHP(NUd7sDA9}mBD`oza`#+Qt+(3ELtMv=WGDZkcw4g1sY@ zxo1*ll}lZ-J7DEwhnn!ogqz$JQUo8<_dDh2vMoBU;Kw{*Uh(94%qVx^zJz;M_$7A~ z&b$Eap~b$GpL72$3`5vPgYeyf%b`jFjXr{m9_DVsKT{H0Ofc;W~21y!YIX1#Whg4k$h=kw0J=bU?;YEzjWKT?mf6;LNQ-9WhwMnragPIczkp$X1DOFT{Kf7OH=ZQs0ycTvQLhv z9o;6so;&iU^q5Y)N8StIgs+dUuLHCJMx__zugXhG$!~RDIxbJ?OPp_%5C0Aso*yzf zj5Lc+VaCUa*a@A9n{r>ZT+bR@m}#+xIJ6iS zW_4yQ;@1550~h=W3%N^LaHh13(|vXj>jCRUTI^?Xvb{IiF8Z(-sVud7q7>VE89aBM z7;;!+*V=ZX#op59q2FjRL&HYJEG;}zrW}n%BITx9m^3A4MOFa0sm<-1;87N*4PQdN zq~plzCUGj139PwSM6O}s?cL^2N=Vi0M_eS$onF)%h$g^_;$AZXeA+I_Z2b7Erallt zZ)XtDFg_G=`^ne&j^;olBiPs(P0ZySr|!bzE@rxO9?G~=t)dCTfCgSj1A2*;--Ur( z$D~2N}aGyWPG&fh0li2SjY^!3=_4mg*?ZRhlGol-w<^0=WTf<$2p|+UA07teE7* zdCv{cVQ9|oS**JS(p`)678aDP72c{x?O*v{Y3JVhJ3Bk3^h_c+cp0a^z1@si*!QsH#(M z18WdPtq{dSIZka-K^M*gi<~VY@FO0Mhb;Fc&uvJ@d335o-055yGefo}S!=T!=5#GW zY_YquYoa8{4p2(+>*ibY`&z8+?zPq;&e_30q=i{qRIYK+k5IZ>>R$V)(0UbYQG(^;Lx0Ht(CC@?x_e!=%j5oFiJejSG#)zx|( z$zJzXsl1B0H_q-E^m-ne;<5cXsSE=RT4!Wd9hsIuY6;{yf&SrZ=Xntl*efcdOGK9~ zJXWJ5ny}GWjxr_Gsk9Zxa^{NB``LUNVTJLTJ{A;Q|MKJbzjsZpEyo;-`z#fL3!d{L zKLZ#IDzDg&CoOO}D-QVu{F`(53(Lqj7yOSnqHO)GF9j4Ve1YyREY1vnj$z}BDeH+X zDAE!kEfdn_nJK%Q<1Yv@`EK6KIJEq-5CseZVwfZfr j;#c`Q`)`D@k*$$Bt-FuIPI#q2%A>?b7XGUq1p-F>i&d@AWzNm)tf%ZiJrDugs|O(!Xr^lB1U`&jM4 za&T=YN~ckk6!{HaPSeUK^7O3-oLAR*-spW@uqmiD()sp6G*Yl|eq4wwszSan6oMq& zY}I4s&GP60a~Zxe2f|T+843u)f#DL@U;{=uWhOV7#Vu~HLS)=w4tK^LbD6hi1}^Jx zn{~Oz;V~4~8elw|( zEZOB^cU_5x@mxME+Jzy+GOQFH!z7!CVuxouuh#d~74Mrsq`Qbzx^oNwYcGx}zFCNb zm$IKl55q*?0>E5KX99ThJm#?l2HK(K1imZql^1~2xJDtW2~b=cKuJx38np?OG6dn^ z4c`=f*eg~P87Oi>k&{4tOVX03<2P>j9f{Mh@8IU>yON7?G>#ZgN9%DEZ}Cx9uqcT~ z5kyz1B~eW8eK^ucTwI`Yap$9E$7l=S3tRZsCy%5PhDn}OVW^NQ6|o!(tDlE=&%O2% z)>{?XRjNsph82HUNlWl*D)LyN(+(AH2R@ZuRJfZ?{rbDL-l^C_K~wK215b7nJZ<^a|vvug*Cr(^4Tgi&QM2ejxDauxlsH*dadkE_E59`_%HWGJT(z~Cn=`oQp4K-l^(>kndEKGo@}-5 z@^bFH^mGi`s!zpv$lRu4-&M17im0Z1OQ=rpx$nGEx6%|*zR8>VR`sNdT)NjD#vHm; zk^3tGo*N|I1`-Tjjaj^-#fuI#4?I|eSH^|l(p&ad&l~yRRaB+B=x9yf({TkWag?)! zLGx>FY+2A)>c4zn5_8>!8gd4llup}U&H`6H1415?9>D}#htz}?azxCQBbb65R-RLv zs=1-K=4TxYTtXbu$UxVc%MU@QbimrNzT;4$EPxHtS|HdtaKOefZyk7^gXvM<0J~al z9g?|SZS%l$2e^CWJSP9pKi|~o+5k6vBDxTtP;a6?P)#$BG9HG~3q$C@@b1FA8;19% zQ956-!;lqm7z#%zx{9dAD2=ib(n#`3yrpF7zYIYYvDGTQTNJ5K50>y1QH_6zDBOkW zr)qP#2E;)CgbnZ62TWHF+X3nr!FaoOInq4V z-7~Vt%$_K~mn4UreUO1%{FfTJ<>VibgC9v;)k7``L^J5>>h9`S)mK$r_4^@#@8^I1 z_winzkbhz0>SsgaZ7Aw5KsezvCk2gZ!D2=UXWYuI!jA32i5=6nbGPtf&+ty}7eO2V zPq>?hMKA6ZQ5=~uFYgzFcwl%x9~Pr{RIJ5o#d^F>$sNK29zG*H6zt53uR*)VBWOp0 zLVJVvACU3jYd8@ZGd=oHifL6WvRv@JxhQMBcBN;(DredJYud?f9DOE4eMgi+rgbI% zSn(zoy6Zi7lIG22@0jY*c~@4I_l0Wm8v3rkm*x3>o)w>D<^0~F&Wh|*NH~U%jjBv@ zZGVs{6z$DLop7}{N%CTB>GgLG-raln!BMimcXV*)-u=4=4=(%e9v$4@JGy)C@PQs( z$bASM2Z#FyQ@rTa&w|F=P}Ct1L1M~D%pQVqVoTV<;cS<1D|QWU?-Jp~z6gZ9Nnp%@ zb{O|W1iTA;KOPv~gE>B^H8|JC>S~<=PhYJTqLfur3ptrr)jSuIxhS$S+rG7PdwZ6r z>R7Y9IK6R24%sxD%op|cZS4urFK9xC)3g@zN@haoL6JU9GFUT3-6`!afDz?X9FB+D zPm(e%M3U$*NeW{s;G-n@v`O=BM2e-kc^P5BSD-?fH;xY zfM_c#YdOH?29U3ayoo%LZ{MFhR6?rBEaf7f>`c??v6#UAX*Qjt2rwK1RXl!kQX`zU zs|y%xKe^ROOtO-Tr#p)iIfNB)YlSBXNkk($GR2JipJ@Y-!cRQ~a%oxthUFAY#9~K) zVYvmo9eV-{3ud*^tegsK#y%%A%3ba~i{db_1DlVyFI*mgWN#Bas;hg#o%uZC{bx=b z@c|z`BXR$8Vv~3PAXwAajH9eT(D{`+HO{1o?oS&jK@*8lz@fgjkm5;JH7Y?XgBO7{ zr_9`>BM4a!N)L+qJCK^3kw>(pGn$c>{e%9EQc|-s(vrlgZBsk7+mc5X6sM(+JSaYt zfYZM^AQb}NgSrQG1a$B}!L_s+%LbeNTk25c>!*1)g4)#yUIQq+wsrSO~5MMoJ~ekaQ|}qSvwl_I)A>bkAwk zK$t_*$XiH$gXA`lBl&BfV&V!Chn;9ov?8T;SJ@|n#}Z@$Q@&Uc$4 z3NAk6F_4u}zGqJO-RLce@`vN=+U>Hh zL>);?CEDxW1Iol*6K0K&$dx%@+&_kA8U)7fohC6DX}3OEz@x3Z2Ige#G<60uFudvj wlAEei$nM>v!8{er1HtG6k8%Y$t5B=`-R>yALnA7@-H2d@@A5n#!`kqF07i_@% diff --git a/agent/__pycache__/models.cpython-310.pyc b/agent/__pycache__/models.cpython-310.pyc deleted file mode 100644 index 372dac3866a02fac645705989c0237ad1c637ed0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3272 zcmbVO&2QX96rUM;yDO;aGHfGLHt&~8dq0f(p*LO@6~AOaP#ge=E1N!)nt?aXZ3 zm>>bkl>=u~36A-f=E{jP;!+8f_s08`Y$y_z{5&6TX5QEDH_m5g8w8$LzWr;bStsOo z>`Xo`bZ)_`z68Murzz>ufOc8Hx=!Fw9Al~5tppWlJKRmZZZ)WOYe9{Yj|s1E?~rg$ z&;uu^L%+&v(68BkgV#4myYUC?OxljAe-J5gJLe)bm5&mUa#Oz_r3sI8F5A?&pC(HG zK~a~P`t48F!;RI?zF33i`(KyM7&n@HJm}nlSKS36NI*FW*jM00;0Tv9?)*f9itt30 zyDJ1nDt4r1M?5=H74=`x+y=<{ql&1D25_r+*|{~*yjz7&zl zGf?Uj7rxvPlk=lBS=1Fr)HJzJu}pe+nyC$Ei>0-Py)??K5%y3G+o(8~!qi8*7fTyB zGMG=)*r3-9z51B8+m38P6b;)JhNZ$(><%%5vEhU5D!i-ks&_$XvQITVU~(3|Y?mnf zI=iksuU$^}DFl^i2GY^aFXWMTK!1c$2%uYBUdvP;?6If#U=Ic($$YfkR)sI4Y+G~+ zue7_k;CK5<`6`a(iaKM*4GiwcZ#zg;nn1Ol#fl*Ph0peE5^Sp#7jcOX?_ zO-d*?W}G-xaskGVhZE5YJ&3h=U;3)H5I&#H9;tfigM|FQcu$J+DQr&J;jL>{7tOrvY?{ zh?)gyox1=x%PIi3hG_CCz;$McY%yNr^+T8qW?`%a{RVFyy20F_hY)pxdEVl4ha@<| z=lL1ZiK6atd5+VkaRet*+`6ZzDVT`PLXWV<4BsJ4UjtMVodd zG3TZ^p*w{rM3qI<1!(M3SwmDo4CpSTQd7=GsBuQ9c_93q=srL}`zi1T7?w>kM@M3| ziZ^Z}Vx|>`%rW680wNWeJO}IAOk$o*1+7*mYQZdyN_4`hi=eOY%z39qU0S@l23jR8 zmeC#;_QWdVNum2Pi;?t)54J}knTHOC5FC78E8vvn^C;` zAd(!|E${ZA>Nk@#(R-a^2jJ$2lMQ$k62LZW{!QgOV5Ym|XLgTml=Xi^OL}02b*>o~ z?LHZc#tIsNXWhJlyW!Rgq_{EV{V?Js8K}-mn#T~^o6s0;(5CWbSPnGrDv@rTJZkR5 z7VY7A^G@Oa*LlbM#{i@Mw&oE0dK_YkAsz(z+G>=>{S+w7lClOGNe6Tt#WGjQkJ5Aq zheW99C}TJjzF1B_V3fTw%6AAQ_ZWv1wCTEh4QAzK6l3l|+A;MNpk48>CvRSZu0k?l z-!GD4dCFgFTp8Sn*bY}?ajSmU##g?1%0bP^^>`FozQ~FTPdn(}gNjA9Pktr85lx|P z0l?2&SBvY%Lkb9%aI-u$sBkT#^YS7JERa(jm>q0YhTuA=m9@x<7pGh}raHw&qsn*@ zHW}XUs;U7M;F9wja1+X17jsu!TSc6~H7-VY2O30rRsJUY?QtL`f1+|aN3J}JPWT{9 zYdzPWbbG1j!tE-!b@Yfygp#S`@FyToR)iOZ+d2&228}ZF7|AzIz$5rD{jSMlhmGbxICNh-$Sz07BkluM`@!UTspm)#~qu>=n^NGWKft`NstME zSu8D+r81q$Gf6t*hdyW0+K+zdOn*s#0ABl)=S(|O$F6$L0wg8Nar;ncaJV>o-*dk2 zoK;+@SOlI|e*2H@zdT3C-|^${qrt~Tc$2?^@QF`5#KB*sLqo+;Le)_z@l{{zXko!A zgu0{4??R^-8jcZ~ju~2x6_%V*Sa!-`#i@iXQ0hRsyStjG*s zJ5QvpAa$nH>A9{bb(Yi_Qdja#R{jEv_N6N8DzdJ09*hQtsif6)T+rpwN(aBmi_DI> z9fVyT-(t3B%Vpa)yoC979N9tC?Fn13+rsuD-}agCf(}$o4#3`I%)jBaZ`!+o*tR+I zk~j*Yt@%M?g$G*!7(eR|)3ke>vk1n+rPv}K&HI3+n$MDUYRUOroNw^PQr`LoOGaI8 zF`j_y*fT8bioOkJZ*K-1s&flKT_dgJ(_|(XxbJdyD~Nl^;YG`}qMgw5=5F-oM&-!{ zq{#=G44@BHBb|IK={j8M)C04!bSp1T-fw{)>)Wud+lwu z0L$@$_JRkeFR+Toy_>HsNbAnUfc!8RFg|zd^{fRpht((Z-9E3wnD{0*kwr9mO3|o7 zs|T8zNsHDGWUFHk(xJyi?D`pq|0{$FzDX0~+JSu94jmn6X!{A9{`yBQ@t&SbL;eIaxNFh@z& z6E3U10a#(sGG1(Bdw26j`Wa2eR@437K9hDOWvHNmfUhS|C>v37S97JM$zdg}MxCDpTkz*mde4n~abC&Yjp79)3!Q4nlmLW85jZ^OjPl~&Amo&7&3%O~YmXgdj)F+nwPI8w?EIXRwq zS_G=v^|=2Qd`KTxiiUzdj(_@N_@df@u2tzYHGZcY3|@GuJ@P>s3V#E}X*H~;q7<#0 zVG|vV7f}2HL|Vyxq0Ks-kv~YM_$fA#p7CQS;a(HOy8$>XD@swJ+9O3#{;jCWV_j1o zDTVqs99y>GJ@S^y`XkDT8MNwVNC$Z~8w zvG_5zq$v+^r({;ir*A^%Hd=4=Afw_5qgj&+z$AlDe0q-zmHTwBAQ7exH9#3$3+`%& zsvTvoC{)A`NM)biRQbzz7rX;~mI>gp7;S8kJws^NW|@3+^+RRP+_MHVqJa1lhA@ZL zjT&M4M+xq(G|*yj6Y;WkmhLu;lA^)%%o^>*V7Z!1k1= zi4&V@Ur^vWN{)?nGarlkMNte+axCXcx~v^+1`zHiL41Co%-Rht zi+x8U-VtxYNQZ>7hGs&F-y%cuk-0~K45^?(@yT8FkIKgi1R453O^s0skecIE4QL;4 z5*XRGB}Qs?g<0im}dPW`gO^g&xj~^raIZna4VE%faBBOj)1=4KZ&?RmgbxGM03& z+hviTE$CM`$P%ip_u!ustjRgt z_fB=qXWzDnm`ZW$G z$sU6OhUw%62*t3*0SQ9VBi*Q}x}s>tBhA2roR$}^vSJ))(9Sr3fIzi>83*+eCTFNe z4d}js(t&1K(7Q@4MbY(#(*^{!f965$TO2SCYLX8BYhXi&O#&OgfS=MCq?m-WUqZzO zyon7W4`&9h5yP1d;S9C{;S8KbuRrA2y77FLVCkaYg&ZzRTaj>69a28lNwR6tGgA{% zY`Dw7U3QW{WOkN#m1dVogmmF?KAaY}SqE~uBuhmw3rWp2xP5t$Q8AgyWkE*IGENz4 n<>C(QQEn8im>TD^ToeNWN$`?vkQ(^@EUlI5YJI9+pMLp2*Cs&0 diff --git a/agent/__pycache__/utils.cpython-310.pyc b/agent/__pycache__/utils.cpython-310.pyc deleted file mode 100644 index b003ec864791b1f4d62ec9012271c19025e59f54..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5554 zcmb_gTW=f372erhu1M-?>Sp;8udfjkTXJqKYUE-?rfoGc<%)6~FN!j2&PZCDOA@mq z+hVC8h|-D*c}URIcKQo-)JW24{7{OMaQhblvn!{F-$uv^_h_c$v`g z9At(~7mkaY=J{@@$iIq0)1zMEl$oD+~I89<^w$Yx$V8gokY8C z@*K}YbBYg&lc){xVbokPBt}t#j*p1bus*%+@KHYYdB!`#k3j3d+U5m54(qdB%{gc$ z_#`yvyPDI`Oz~-GF7TrwBTfou-R#X}h9AS5m(f@64aPjqPe40+KzowE1Z|O@+P4~) zpN94o*y=T+^%;H^npb;v=k#2Yw+=tgFW~J(&MHRn(nasZzbZv|NgZE2XuyrSgq#b?D|&Wo>2kUh=YVt#p0i&hlDy zuT?yp>Ta!ED=o)8lclxN>cZO6O1Tn`r6#wbD3upW@sU(>r&2;^i#MUVweV3j9Sx0d zRpRlL+okgA${qAnUAVneeSe`+iYNP&)MRbtgA&G=>QkffZ5sT}YAGHot*)-DR?928 zhYw2k;>ko)S@@8KPFJ6*msb|9^{dDGtkbQDyGm`_Qx^IxyX%O%I&GNhMiaYhmD}6Z zrSj5RJhpbXR9dUjVwOwg8*4Y?e9c#4qbUOs#f}teOEzlcu=Hc0KzX`|=iAR9+GmuZ z7-zfYmxUds%m-G-=rC?_>kEq+cMWdahHdP2%N64_BRc+=F@wJM@Q_6X7Ruhf%8VV8 z8SR3~DCaBWmKx}oyX-6DH`Xp%+=V^2cg#;rYLS#4v&Tl!X`ih4kA%B7byqdrA6Hh& z?s`xcb1{4Tjfts#&0;5LaM4iZ@60Q1F&o>>mS{)^?_&Ew)NI5y-wIoCuJoiPS}K4d z(~?0$$#F8VA|;EKJWge{9y9T^Azwx1yAS5?L_$XM>pmCt`MH{3+Z6L*llwt!-rvA@ z^h!3j9=6eoF=QXs)##k49Kab#E;%VT5nTAWUj<;g87f7j3s9NeyS9XPIr)G!=qsgZsMQr`|a z(Ih9rh5A>!&~BevZu-36h3m2z?sK9n%QIM0aWpoYQEaz-wHZ6&3D70dE|h2SR-S{1 z?K(P(^Fb6eBIP$~0vg!q9g{gSyFlV)5)&k5A>i5~mC3*I6%yp!L|jBE9;IIUJVVW+ zV-Vz-A(m%jZ1#E9cGw7;LK!}KE`LA`^N0meuQvQp>`NvJyfm4j4l38efPMlDeb)&s#^ zy&!j1M>=;~LMEExTwLfwLqbmAUVDhr%n#-_TI$kU!Kd#U#s(IP&!68bX5x(QEFSP% zLG_{7jEwrR5T>-^0b7(q7cJibQV=C&fr zdLT!Quqyr7DD*<)b5!6&q;v;g&b|Oz-ZdIIpr^TANHMhmOjMRadhMFg)%KXOJH{^C zG0`s5AzVh%a$6Zyr-!}f_In5sV03#BAvd70H`_tzCD@BFadxavED96C-r_*}jipAQ z0>2IrOXB5mpf=qV_<2=tn(H^X8;QE8sWe>Xie@~HIJjRI)lh^@xt**xKGIe4AbQwU z6{=FS>Vc}(nvL~f13u?PCKs{(H4OsgVoqK~EgtO|BrUZkiQ!Lo}2Bajt0V`N#JCX0tATf&nE#e9_b!>6p?z(4)v90C8@~{8b%ZyDJLnI=8M_q&OnwWyhNWl&!q>G zeV# zE#-FG{12H#n0#m^;s1$VCjE@c-82d4(gPuz$&7tt&KQ8VDA@=14cBf6qX)Ook3 zEAHZ!l(>zi3LJTn>RI)c4kEob4VsHgz6KH<*_xCR)6g-V+*Iirp((7aP-&UO zZHU;S|0IYiX;P7#CWPo39VNkv{Fnr-wQs|E=o)9=22^j=#k<5oBRa>jMi#tvg8h_D l%@lGYqxQ7@0-&2bX8w8MXdzR`9CM~}Q{z+lLhj7SKLIRy4L<+? diff --git a/constants.py b/constants.py deleted file mode 100644 index 1eb4de6..0000000 --- a/constants.py +++ /dev/null @@ -1,75 +0,0 @@ -from typing import Dict, List - -# tweet configuration -TWEET_MAX_LENGTH = 280 -TWEET_TRUNCATION_SUFFIX = "..." -TWEET_TRUNCATION_LENGTH = TWEET_MAX_LENGTH - len(TWEET_TRUNCATION_SUFFIX) - -# score configuration -MIN_SCORE = 1 -MAX_SCORE = 9 -DEFAULT_SCORE = 5 - -# file paths -CATEGORIES_FILE = "categories.json" -SETTINGS_FILE = "settings.json" -HISTORY_FILE = "input_history.json" - -# history configuration -MAX_HISTORY_ITEMS = 50 # maximum number of historical inputs to store - -# model configuration -DEFAULT_MODEL = "openrouter/anthropic/claude-sonnet-4.5" - -AVAILABLE_MODELS: Dict[str, str] = { - "Claude Sonnet 4.5": "openrouter/anthropic/claude-sonnet-4.5", - "Opus 4.1": "openrouter/anthropic/claude-opus-4.1", - "Gemini 2.5 Flash": "openrouter/google/gemini-2.5-flash", - "Gemini 2.5 Flash Lite": "openrouter/google/gemini-2.5-flash-lite", - "Gemini 2.5 Pro": "openrouter/google/gemini-2.5-pro", - "GPT-5": "openrouter/openai/gpt-5" -} - -# openrouter API configuration -OPENROUTER_API_BASE = "https://openrouter.ai/api/v1" -OPENROUTER_MAX_TOKENS = 4096 -OPENROUTER_TEMPERATURE = 0.7 - -# optimization defaults -DEFAULT_ITERATIONS = 10 -DEFAULT_PATIENCE = 5 -DEFAULT_USE_CACHE = True - -# default evaluation categories -DEFAULT_CATEGORIES: List[str] = [ - "Engagement potential - how likely users are to like, retweet, or reply", - "Clarity and readability - how easy the tweet is to understand", - "Emotional impact - how well the tweet evokes feelings or reactions", - "Relevance to target audience - how well it resonates with intended readers" -] - -# error messages -ERROR_PARSING = "Default evaluation due to parsing error" -ERROR_VALIDATION = "Default evaluation due to validation error" -ERROR_GENERATION = "Tweet generation failed" -ERROR_EVALUATION = "Tweet evaluation failed" -ERROR_DSPy_INIT = "DSPy initialization failed" -ERROR_NO_API_KEY = "OPENROUTER_API_KEY environment variable is required" -ERROR_SAVE_CATEGORIES = "Failed to save categories" -ERROR_LOAD_CATEGORIES = "Failed to load categories" -ERROR_SAVE_SETTINGS = "Failed to save settings" -ERROR_LOAD_SETTINGS = "Failed to load settings" -ERROR_SAVE_HISTORY = "Failed to save input history" -ERROR_LOAD_HISTORY = "Failed to load input history" - -# cache configuration -CACHE_ENABLE_MEMORY = True -CACHE_ENABLE_DISK = True - -# iteration display -ITERATION_SLEEP_TIME = 0.1 # seconds - -# truncation display -CATEGORY_DISPLAY_MAX_LENGTH = 30 -CATEGORY_DISPLAY_TRUNCATION = "..." -CATEGORY_IMPROVEMENT_MAX_LENGTH = 50 diff --git a/helpers.py b/helpers.py deleted file mode 100644 index 5be03ed..0000000 --- a/helpers.py +++ /dev/null @@ -1,85 +0,0 @@ -from typing import Optional, Dict, Any -from models import EvaluationResult -from constants import MAX_SCORE - - -def format_evaluation_for_generator(evaluation: Optional[EvaluationResult]) -> str: - """ - Format an evaluation result as text for the generator module. - - Args: - evaluation: The evaluation result to format - - Returns: - Formatted string with category-by-category reasoning and scores - """ - if not evaluation or not evaluation.evaluations: - return "" - - eval_lines = [] - for eval in evaluation.evaluations: - eval_lines.append(f"{eval.category} (Score: {eval.score}/{MAX_SCORE}): {eval.reasoning}") - - return "\n".join(eval_lines) - - -def build_settings_dict( - selected_model: str, - iterations: int, - patience: int, - use_cache: bool -) -> Dict[str, Any]: - """ - Build a settings dictionary for saving. - - Args: - selected_model: The selected model name - iterations: Number of optimization iterations - patience: Patience threshold for early stopping - use_cache: Whether to use DSPy cache - - Returns: - Dictionary containing all settings - """ - return { - "selected_model": selected_model, - "iterations": iterations, - "patience": patience, - "use_cache": use_cache - } - - -def truncate_tweet(tweet: str, max_length: int, suffix: str = "...") -> str: - """ - Truncate a tweet to the maximum length with a suffix. - - Args: - tweet: The tweet text to truncate - max_length: Maximum allowed length - suffix: Suffix to add when truncating (default: "...") - - Returns: - Truncated tweet text - """ - tweet = tweet.strip() - if len(tweet) <= max_length: - return tweet - - truncation_point = max_length - len(suffix) - return tweet[:truncation_point] + suffix - - -def truncate_category_display(category: str, max_length: int = 30) -> str: - """ - Truncate a category name for display purposes. - - Args: - category: The category name - max_length: Maximum display length (default: 30) - - Returns: - Truncated category name with "..." if needed - """ - if len(category) <= max_length: - return category - return category[:max_length] + "..." diff --git a/hill_climbing.py b/hill_climbing.py deleted file mode 100644 index 0b5a1e5..0000000 --- a/hill_climbing.py +++ /dev/null @@ -1,119 +0,0 @@ -from typing import List, Iterator, Tuple, Dict -from models import EvaluationResult -from agent.modules import TweetGeneratorModule, TweetEvaluatorModule -from helpers import format_evaluation_for_generator - -class HillClimbingOptimizer: - """Hill climbing optimizer for tweet improvement.""" - - def __init__( - self, - generator: TweetGeneratorModule, - evaluator: TweetEvaluatorModule, - categories: List[str], - max_iterations: int = 10, - patience: int = 5 - ): - self.generator = generator - self.evaluator = evaluator - self.categories = categories - self.max_iterations = max_iterations - self.patience = patience - - def optimize(self, initial_text: str) -> Iterator[Tuple[str, EvaluationResult, bool, int, Dict[str, str], Dict[str, str]]]: - """ - Optimize tweet using hill climbing algorithm. - - Yields: - Tuple of (current_tweet, evaluation_result, is_improvement, patience_counter, generator_inputs, evaluator_inputs) - """ - # Generate initial tweet - generator_inputs = { - "input_text": initial_text, - "current_tweet": "", - "previous_evaluation": "" - } - current_tweet = self.generator( - input_text=initial_text, - current_tweet="", - previous_evaluation=None - ) - - evaluator_inputs = { - "original_text": initial_text, - "current_best_tweet": "", - "tweet_text": current_tweet - } - current_score = self.evaluator( - tweet_text=current_tweet, - categories=self.categories, - original_text=initial_text, - current_best_tweet="" - ) - - best_tweet = current_tweet - best_score = current_score - patience_counter = 0 - - yield (current_tweet, current_score, True, patience_counter, generator_inputs, evaluator_inputs) - - for iteration in range(1, self.max_iterations): - # Generate improved tweet with previous evaluation as feedback - try: - # Format evaluation for display in generator inputs - eval_text = format_evaluation_for_generator(best_score) - - generator_inputs = { - "input_text": initial_text, - "current_tweet": best_tweet, - "previous_evaluation": eval_text - } - - candidate_tweet = self.generator( - input_text=initial_text, - current_tweet=best_tweet, - previous_evaluation=best_score - ) - - # Evaluate candidate - evaluator_inputs = { - "original_text": initial_text, - "current_best_tweet": best_tweet, - "tweet_text": candidate_tweet - } - candidate_score = self.evaluator( - tweet_text=candidate_tweet, - categories=self.categories, - original_text=initial_text, - current_best_tweet=best_tweet - ) - - # Check if candidate is better (hill climbing condition) - is_improvement = candidate_score > best_score - - if is_improvement: - best_tweet = candidate_tweet - best_score = candidate_score - patience_counter = 0 - yield (candidate_tweet, candidate_score, True, patience_counter, generator_inputs, evaluator_inputs) - else: - patience_counter += 1 - yield (best_tweet, candidate_score, False, patience_counter, generator_inputs, evaluator_inputs) - - # Early stopping if no improvement for 'patience' iterations - if patience_counter >= self.patience: - break - - except Exception as e: - # If generation fails, yield current best - patience_counter += 1 - evaluator_inputs = { - "original_text": initial_text, - "current_best_tweet": best_tweet, - "tweet_text": best_tweet - } - yield (best_tweet, best_score, False, patience_counter, generator_inputs, evaluator_inputs) - - if patience_counter >= self.patience: - break - diff --git a/models.py b/models.py deleted file mode 100644 index 08dd15e..0000000 --- a/models.py +++ /dev/null @@ -1,60 +0,0 @@ -from pydantic import BaseModel, Field, validator -from typing import List -from constants import MIN_SCORE, MAX_SCORE - -class CategoryEvaluation(BaseModel): - """Pydantic model for a single category evaluation with reasoning.""" - - category: str = Field(description="The evaluation category name") - reasoning: str = Field(description="Explanation for the score") - score: int = Field( - description=f"Score for this category ({MIN_SCORE}-{MAX_SCORE})", - ge=MIN_SCORE, - le=MAX_SCORE - ) - - @validator('score') - def validate_score(cls, score): - """Ensure score is within the valid range.""" - if not isinstance(score, int) or score < MIN_SCORE or score > MAX_SCORE: - raise ValueError(f"Score {score} must be an integer between {MIN_SCORE} and {MAX_SCORE}") - return score - -class EvaluationResult(BaseModel): - """Pydantic model for tweet evaluation results.""" - - evaluations: List[CategoryEvaluation] = Field( - description="List of category evaluations with reasoning and scores" - ) - - @validator('evaluations') - def validate_evaluations(cls, evals): - """Ensure we have at least one evaluation.""" - if not evals or len(evals) < 1: - raise ValueError("Must have at least one category evaluation") - return evals - - @property - def category_scores(self) -> List[int]: - """Get list of scores for backwards compatibility.""" - return [eval.score for eval in self.evaluations] - - def total_score(self) -> float: - """Calculate the total score across all categories.""" - return sum(eval.score for eval in self.evaluations) - - def average_score(self) -> float: - """Calculate the average score across all categories.""" - return self.total_score() / len(self.evaluations) - - def __gt__(self, other): - """Compare evaluation results based on total score.""" - if not isinstance(other, EvaluationResult): - return NotImplemented - return self.total_score() > other.total_score() - - def __eq__(self, other): - """Check equality based on total score.""" - if not isinstance(other, EvaluationResult): - return NotImplemented - return self.total_score() == other.total_score() diff --git a/modules.py b/modules.py deleted file mode 100644 index f4d034e..0000000 --- a/modules.py +++ /dev/null @@ -1,128 +0,0 @@ -import dspy -from typing import List, Optional -from models import EvaluationResult, CategoryEvaluation -from constants import ( - TWEET_MAX_LENGTH, - TWEET_TRUNCATION_SUFFIX, - DEFAULT_SCORE, - ERROR_PARSING, - ERROR_VALIDATION, - ERROR_GENERATION, - ERROR_EVALUATION, - MIN_SCORE, - MAX_SCORE -) -from helpers import format_evaluation_for_generator, truncate_tweet - -class TweetGenerator(dspy.Signature): - """Generate or improve a tweet based on input text and detailed evaluation feedback with reasoning.""" - - input_text: str = dspy.InputField(desc="Original text or current tweet to improve") - current_tweet: str = dspy.InputField(desc="Current best tweet version (empty for first generation)") - previous_evaluation: str = dspy.InputField(desc="Previous evaluation with category-by-category reasoning and scores (empty for first generation)") - improved_tweet: str = dspy.OutputField(desc=f"Generated or improved tweet text (max {TWEET_MAX_LENGTH} characters)") - -class TweetEvaluator(dspy.Signature): - """Evaluate a tweet across multiple custom categories. For each category, provide detailed reasoning explaining the score, then assign a score. Ensure the tweet maintains the same meaning as the original text.""" - - original_text: str = dspy.InputField(desc="Original input text that started the optimization") - current_best_tweet: str = dspy.InputField(desc="Current best tweet version for comparison (empty for first evaluation)") - tweet_text: str = dspy.InputField(desc="Tweet text to evaluate") - categories: str = dspy.InputField(desc="Comma-separated list of evaluation category descriptions") - evaluations: List[CategoryEvaluation] = dspy.OutputField( - desc=f"List of evaluations with category name, detailed reasoning, and score ({MIN_SCORE}-{MAX_SCORE}) for each category. Ensure the tweet conveys the same meaning as the original text." - ) - -class TweetGeneratorModule(dspy.Module): - """DSPy module for generating and improving tweets.""" - - def __init__(self): - super().__init__() - self.generate = dspy.ChainOfThought(TweetGenerator) - - def forward(self, input_text: str, current_tweet: str = "", previous_evaluation: Optional[EvaluationResult] = None) -> str: - """Generate or improve a tweet.""" - try: - # Format previous evaluation as text - eval_text = format_evaluation_for_generator(previous_evaluation) - - result = self.generate( - input_text=input_text, - current_tweet=current_tweet, - previous_evaluation=eval_text - ) - - # Ensure tweet doesn't exceed character limit - tweet = truncate_tweet(result.improved_tweet, TWEET_MAX_LENGTH, TWEET_TRUNCATION_SUFFIX) - - return tweet - except Exception as e: - raise Exception(f"{ERROR_GENERATION}: {str(e)}") - -class TweetEvaluatorModule(dspy.Module): - """DSPy module for evaluating tweets across custom categories.""" - - def __init__(self): - super().__init__() - self.evaluate = dspy.ChainOfThought(TweetEvaluator) - - def forward(self, tweet_text: str, categories: List[str], original_text: str = "", current_best_tweet: str = "") -> EvaluationResult: - """Evaluate a tweet across specified categories.""" - try: - # Join categories into comma-separated string - categories_str = ", ".join(categories) - - result = self.evaluate( - original_text=original_text, - current_best_tweet=current_best_tweet, - tweet_text=tweet_text, - categories=categories_str - ) - - # Extract and validate evaluations - evaluations = result.evaluations - - # Ensure we have the right number of evaluations - if len(evaluations) != len(categories): - # Create default evaluations if mismatch - evaluations = [ - CategoryEvaluation( - category=cat, - reasoning=ERROR_PARSING, - score=DEFAULT_SCORE - ) for cat in categories - ] - else: - # Validate each evaluation - validated_evals = [] - for i, eval in enumerate(evaluations): - try: - # Ensure score is valid - score = max(MIN_SCORE, min(MAX_SCORE, int(eval.score))) - validated_evals.append(CategoryEvaluation( - category=categories[i] if i < len(categories) else eval.category, - reasoning=eval.reasoning if eval.reasoning else "No reasoning provided", - score=score - )) - except (ValueError, TypeError, AttributeError): - validated_evals.append(CategoryEvaluation( - category=categories[i] if i < len(categories) else "Unknown", - reasoning=ERROR_VALIDATION, - score=DEFAULT_SCORE - )) - evaluations = validated_evals - - # Create validated result - validated_result = EvaluationResult(evaluations=evaluations) - - return validated_result - except Exception as e: - # Return default evaluations on error - default_evals = [ - CategoryEvaluation( - category=cat, - reasoning=f"{ERROR_EVALUATION}: {str(e)}", - score=DEFAULT_SCORE - ) for cat in categories - ] - return EvaluationResult(evaluations=default_evals) diff --git a/pyproject.toml b/pyproject.toml index e523742..5109fe9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,16 +3,7 @@ name = "tweet-optimizer-v2" version = "0.1.0" description = "CLI tool for optimizing tweets using DSPy and hill-climbing algorithm" requires-python = ">=3.11" -dependencies = [ - "dspy>=3.0.3", - "dspy-ai>=3.0.3", - "modaic>=0.1.1", - "pandas>=2.3.3", - "pydantic>=2.12.2", - "pytest>=8.4.2", - "pytest-mock>=3.15.1", - "requests>=2.32.5", -] +dependencies = ["dspy>=3.0.3", "dspy-ai>=3.0.3", "modaic>=0.3.0", "pandas>=2.3.3", "pydantic>=2.12.2", "pytest>=8.4.2", "pytest-mock>=3.15.1", "requests>=2.32.5"] [project.scripts] tweet-optimizer = "cli:main" diff --git a/utils.py b/utils.py deleted file mode 100644 index c99aea2..0000000 --- a/utils.py +++ /dev/null @@ -1,192 +0,0 @@ -import json -import os -import dspy -from typing import List, Dict, Any -from constants import ( - CATEGORIES_FILE, - SETTINGS_FILE, - HISTORY_FILE, - DEFAULT_CATEGORIES, - DEFAULT_MODEL, - DEFAULT_ITERATIONS, - DEFAULT_PATIENCE, - DEFAULT_USE_CACHE, - MAX_HISTORY_ITEMS, - OPENROUTER_API_BASE, - OPENROUTER_MAX_TOKENS, - OPENROUTER_TEMPERATURE, - ERROR_NO_API_KEY, - ERROR_SAVE_CATEGORIES, - ERROR_LOAD_CATEGORIES, - ERROR_SAVE_SETTINGS, - ERROR_LOAD_SETTINGS, - ERROR_SAVE_HISTORY, - ERROR_LOAD_HISTORY, - ERROR_DSPy_INIT, - TWEET_MAX_LENGTH -) - -def save_categories(categories: List[str]) -> None: - """Save categories to JSON file.""" - try: - with open(CATEGORIES_FILE, 'w') as f: - json.dump(categories, f, indent=2) - except Exception as e: - print(f"{ERROR_SAVE_CATEGORIES}: {str(e)}") - -def load_categories() -> List[str]: - """Load categories from JSON file.""" - try: - if os.path.exists(CATEGORIES_FILE): - with open(CATEGORIES_FILE, 'r') as f: - categories = json.load(f) - return categories if isinstance(categories, list) else [] - else: - save_categories(DEFAULT_CATEGORIES) - return DEFAULT_CATEGORIES - except Exception as e: - print(f"{ERROR_LOAD_CATEGORIES}: {str(e)}") - return [] - -def get_dspy_lm(model_name: str): - """Get a DSPy LM instance for the specified model (cached per model).""" - try: - openrouter_key = os.getenv("OPENROUTER_API_KEY") - if not openrouter_key: - raise ValueError(ERROR_NO_API_KEY) - - max_tokens = 16000 if "openai/gpt-5" in model_name else OPENROUTER_MAX_TOKENS - temperature = 1.0 if "openai/gpt-5" in model_name else OPENROUTER_TEMPERATURE - - lm = dspy.LM( - model=model_name, - api_key=openrouter_key, - api_base=OPENROUTER_API_BASE, - max_tokens=max_tokens, - temperature=temperature - ) - return lm - except Exception as e: - raise Exception(f"Failed to create LM: {str(e)}") - -def initialize_dspy(model_name: str = DEFAULT_MODEL, use_cache: bool = DEFAULT_USE_CACHE) -> bool: - """Initialize DSPy with OpenRouter and selected model.""" - # Configure cache settings - try: - dspy.configure_cache( - enable_memory_cache=use_cache, - enable_disk_cache=use_cache - ) - except Exception: - # Cache configuration might fail in some environments, continue anyway - pass - - # Only configure DSPy once globally - if not hasattr(dspy, '_replit_configured'): - try: - # Get the LM for the default model - default_lm = get_dspy_lm(model_name) - dspy.configure(lm=default_lm) - dspy._replit_configured = True # type: ignore - except Exception as e: - raise Exception(f"{ERROR_DSPy_INIT}: {str(e)}") - - return True - -def format_tweet_for_display(tweet: str) -> str: - """Format tweet text for better display.""" - return tweet.strip() - -def calculate_tweet_length(tweet: str) -> int: - """Calculate tweet length.""" - return len(tweet.strip()) - -def is_valid_tweet(tweet: str) -> bool: - """Check if tweet is valid (not empty and within character limit).""" - cleaned_tweet = tweet.strip() - return bool(cleaned_tweet) and len(cleaned_tweet) <= TWEET_MAX_LENGTH - -def save_settings(settings: Dict[str, Any]) -> None: - """Save settings to JSON file.""" - try: - with open(SETTINGS_FILE, 'w') as f: - json.dump(settings, f, indent=2) - except Exception as e: - print(f"{ERROR_SAVE_SETTINGS}: {str(e)}") - -def load_settings() -> Dict[str, Any]: - """Load settings from JSON file.""" - try: - if os.path.exists(SETTINGS_FILE): - with open(SETTINGS_FILE, 'r') as f: - settings = json.load(f) - return settings if isinstance(settings, dict) else get_default_settings() - else: - # Return default settings if file doesn't exist - default_settings = get_default_settings() - save_settings(default_settings) - return default_settings - except Exception as e: - print(f"{ERROR_LOAD_SETTINGS}: {str(e)}") - return get_default_settings() - -def get_default_settings() -> Dict[str, Any]: - """Get default settings.""" - return { - "selected_model": DEFAULT_MODEL, - "iterations": DEFAULT_ITERATIONS, - "patience": DEFAULT_PATIENCE, - "use_cache": DEFAULT_USE_CACHE - } - -def save_input_history(history: List[str]) -> None: - """Save input history to JSON file.""" - try: - with open(HISTORY_FILE, 'w') as f: - json.dump(history, f, indent=2) - except Exception as e: - print(f"{ERROR_SAVE_HISTORY}: {str(e)}") - -def load_input_history() -> List[str]: - """Load input history from JSON file.""" - try: - if os.path.exists(HISTORY_FILE): - with open(HISTORY_FILE, 'r') as f: - history = json.load(f) - return history if isinstance(history, list) else [] - else: - return [] - except Exception as e: - print(f"{ERROR_LOAD_HISTORY}: {str(e)}") - return [] - -def add_to_input_history(history: List[str], new_input: str) -> List[str]: - """ - Add a new input to history, maintaining max size and avoiding duplicates. - - Args: - history: Current history list - new_input: New input text to add - - Returns: - Updated history list with new input at the beginning - """ - # Strip whitespace from input - new_input = new_input.strip() - - # Don't add empty strings - if not new_input: - return history - - # Remove duplicate if it exists - if new_input in history: - history.remove(new_input) - - # Add to beginning of list - updated_history = [new_input] + history - - # Trim to max size - if len(updated_history) > MAX_HISTORY_ITEMS: - updated_history = updated_history[:MAX_HISTORY_ITEMS] - - return updated_history