From 8af7fd829a1b4cd3529a059264a8c8f82fa99759 Mon Sep 17 00:00:00 2001 From: Farouk Adeleke Date: Sat, 24 Jan 2026 15:41:31 -0800 Subject: [PATCH] Fix config override bug by recreating LMs after load_state --- config.json | 2 +- nanocode.py | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/config.json b/config.json index 3a011d0..875a216 100644 --- a/config.json +++ b/config.json @@ -6,6 +6,6 @@ "api_base": "https://openrouter.ai/api/v1", "max_tokens": 50000, "max_output_chars": 100000, - "verbose": false, + "verbose": true, "track_usage": true } \ No newline at end of file diff --git a/nanocode.py b/nanocode.py index 50f7d6c..550eb52 100644 --- a/nanocode.py +++ b/nanocode.py @@ -170,8 +170,8 @@ def run_bash(cmd: str) -> str: class RLMReasoningCallback(BaseCallback): def on_module_end(self, call_id, outputs, exception): if outputs and hasattr(outputs, "reasoning") and hasattr(outputs, "code"): - print(f"{DIM}⏺ [REASONING STEP] {outputs.reasoning}\n{RESET}") - print(f"{DIM}⏺ [CODE] {outputs.code}\n{RESET}") + print(f"{DIM}⏺ [REASONING STEP]\n{outputs.reasoning}\n{RESET}") + print(f"{DIM}⏺ [CODE]\n```\n{outputs.code}\n```\n{RESET}") # -- Program --- @@ -193,7 +193,7 @@ class RLMCodingConfig(PrecompiledConfig): api_base: str = "https://openrouter.ai/api/v1" max_tokens: int = 50000 max_output_chars: int = 100000 - verbose: bool = False + verbose: bool = True track_usage: bool = True @@ -261,22 +261,22 @@ class RLMCodingProgram(PrecompiledProgram): def wrapped_q(prompt): # wrap query print( - f"{DIM}⏺ [LLM QUERY]: {prompt[:100]}...{RESET}\n" + f"{DIM}⏺ [LLM QUERY]:\n{prompt[:100]}...{RESET}\n" if len(prompt) > 100 - else f"{DIM}⏺ [LLM QUERY]: {prompt}{RESET}\n" + else f"{DIM}⏺ [LLM QUERY]:\n{prompt}{RESET}\n" ) res = orig_q(prompt) print( - f"{DIM}⏺ [LLM QUERY RESULT]: {str(res)[:200]}...{RESET}\n" + f"{DIM}⏺ [LLM QUERY RESULT]:\n{str(res)[:200]}...{RESET}\n" if len(str(res)) > 200 - else f"{DIM}⏺ [LLM QUERY RESULT]: {res}{RESET}\n" + else f"{DIM}⏺ [LLM QUERY RESULT]:\n{res}{RESET}\n" ) return res def wrapped_b(prompts): # wrap batched query - print(f"{DIM}⏺ [LLM QUERY BATCHED]: {len(prompts)} prompts{RESET}\n") + print(f"{DIM}⏺ [LLM QUERY BATCHED]:\n{len(prompts)} prompts{RESET}\n") res = orig_b(prompts) - print(f"{DIM}⏺ [LLM QUERY BATCHED]: {len(res)} results{RESET}\n") + print(f"{DIM}⏺ [LLM QUERY BATCHED]:\n{len(res)} results{RESET}\n") return res tools["llm_query"] = wrapped_q @@ -318,7 +318,7 @@ class RLMCodingProgram(PrecompiledProgram): tools=self.tools, max_output_chars=self.config.max_output_chars, max_iterations=self.config.max_iters, - verbose=self.config.verbose, + verbose=False, # We add our own verbose logging ) new_instance.set_lm(self.lm) self.agent = new_instance @@ -358,6 +358,7 @@ class RLMCodingProgram(PrecompiledProgram): if __name__ == "__main__": agent = RLMCodingProgram(RLMCodingConfig()) + #agent(task="explicity call llm_query(who is the ceo of apple?) to get the answer to 'who is the ceo of apple?'") branches = ["main", "dev", "prod"] for branch in branches: agent.push_to_hub( @@ -366,3 +367,4 @@ if __name__ == "__main__": branch=branch, ) +