(no commit message)

This commit is contained in:
2026-01-18 22:37:06 -08:00
parent 2945d17b91
commit 192a744884
12 changed files with 599 additions and 0 deletions

View File

@@ -1,2 +1,71 @@
# nanocode # nanocode
Minimal Claude Code alternative. Single Python file, zero dependencies, ~250 lines.
Built using Claude Code, then used to build itself.
![screenshot](screenshot.png)
## Features
- Full agentic loop with tool use
- Tools: `read`, `write`, `edit`, `glob`, `grep`, `bash`
- Conversation history
- Colored terminal output
## Usage
```bash
export ANTHROPIC_API_KEY="your-key"
python nanocode.py
```
### OpenRouter
Use [OpenRouter](https://openrouter.ai) to access any model:
```bash
export OPENROUTER_API_KEY="your-key"
python nanocode.py
```
To use a different model:
```bash
export OPENROUTER_API_KEY="your-key"
export MODEL="openai/gpt-5.2"
python nanocode.py
```
## Commands
- `/c` - Clear conversation
- `/q` or `exit` - Quit
## Tools
| Tool | Description |
|------|-------------|
| `read` | Read file with line numbers, offset/limit |
| `write` | Write content to file |
| `edit` | Replace string in file (must be unique) |
| `glob` | Find files by pattern, sorted by mtime |
| `grep` | Search files for regex |
| `bash` | Run shell command |
## Example
```
────────────────────────────────────────
what files are here?
────────────────────────────────────────
⏺ Glob(**/*.py)
⎿ nanocode.py
⏺ There's one Python file: nanocode.py
```
## License
MIT

4
auto_classes.json Normal file
View File

@@ -0,0 +1,4 @@
{
"AutoConfig": "nanocode.AgentConfig",
"AutoProgram": "nanocode.AgentProgram"
}

7
config.json Normal file
View File

@@ -0,0 +1,7 @@
{
"model": null,
"max_iters": 15,
"lm": "openrouter/anthropic/claude-3.5-sonnet",
"api_base": "https://openrouter.ai/api/v1",
"max_tokens": 8192
}

151
nanocode.py Normal file
View File

@@ -0,0 +1,151 @@
#!/usr/bin/env python3
"""nanocode-dspy - minimal claude code alternative using DSPy ReAct"""
import os
from modaic import PrecompiledProgram, PrecompiledConfig
import dspy
from dspy.utils.callback import BaseCallback
from utils import (
RESET, BOLD, DIM, BLUE, CYAN, GREEN, RED, MAGENTA,
separator, render_markdown,
read_file, write_file, edit_file, glob_files, grep_files, run_bash,
AVAILABLE_MODELS, select_model,
)
# --- DSPy Signature ---
class CodingAssistant(dspy.Signature):
"""You are a concise coding assistant. Help the user with their coding task by using the available tools to read, write, edit files, search the codebase, and run commands."""
task: str = dspy.InputField(desc="The user's coding task or question")
answer: str = dspy.OutputField(desc="Your response to the user after completing the task")
affected_files: list[str] = dspy.OutputField(desc="List of files that were written or modified during the task")
# ReAct agent with tools
tools = [read_file, write_file, edit_file, glob_files, grep_files, run_bash]
class ToolLoggingCallback(BaseCallback):
"""Callback that logs tool calls as they happen."""
def on_tool_start(self, call_id, instance, inputs):
"""Log when a tool starts executing."""
tool_name = instance.name if hasattr(instance, 'name') else str(instance)
# Format args nicely
args_str = ", ".join(f"{k}={repr(v)[:50]}" for k, v in inputs.items())
print(f" {MAGENTA}{tool_name}({args_str}){RESET}", flush=True)
def on_tool_end(self, call_id, outputs, exception):
"""Log when a tool finishes executing."""
if exception:
print(f" {RED}Error: {exception}{RESET}", flush=True)
def on_module_end(self, call_id, outputs, exception):
"""Log when the finish tool is called (ReAct completion)."""
# Check if this is a ReAct prediction with tool_calls
if outputs and 'tool_calls' in outputs:
for call in outputs['tool_calls']:
args_str = ", ".join(f"{k}={repr(v)[:50]}" for k, v in call.args.items())
if call.name == 'finish':
print(f" {GREEN}⏺ finish{RESET}", flush=True)
else:
print(f" {MAGENTA}{call.name}({args_str}){RESET}", flush=True)
class AgentConfig(PrecompiledConfig):
max_iters: int = 15
lm: str = "openrouter/anthropic/claude-3.5-sonnet" # Default fallback
api_base: str = "https://openrouter.ai/api/v1"
max_tokens: int = 8192
class AgentProgram(PrecompiledProgram):
config: AgentConfig
def __init__(self, config: AgentConfig, **kwargs):
self.config = config
super().__init__(config, **kwargs)
# Configure logging callback globally
dspy.settings.configure(callbacks=[ToolLoggingCallback()])
agent = dspy.ReAct(CodingAssistant, tools=tools, max_iters=self.config.max_iters)
lm = dspy.LM(self.config.lm, api_base=self.config.api_base, max_tokens=self.config.max_tokens)
agent.set_lm(lm)
self.agent = agent
def forward(self, task: str) -> str:
assert task, "Task cannot be empty"
return self.agent(task=task)
# --- Main ---
def main():
"""Create AgentConfig with selected model."""
model = os.getenv("MODEL")
if model is None:
model = select_model()
# Add openrouter/ prefix if not already present
if not model.startswith("openrouter/"):
model = f"openrouter/{model}"
config = AgentConfig()
config.lm = model
agent = AgentProgram(config)
print(f"{BOLD}nanocode-dspy{RESET} | {DIM}{agent.config.lm} | {os.getcwd()}{RESET}\n")
# Conversation history for context
history = []
while True:
try:
print(separator())
user_input = input(f"{BOLD}{BLUE}{RESET} ").strip()
print(separator())
if not user_input:
continue
if user_input in ("/q", "exit"):
break
if user_input == "/c":
history = []
print(f"{GREEN}⏺ Cleared conversation{RESET}")
continue
# Build context from history
context = f"Working directory: {os.getcwd()}\n"
if history:
context += "\nPrevious conversation:\n"
for h in history[-5:]: # Keep last 5 exchanges
context += f"User: {h['user']}\nAssistant: {h['assistant']}\n\n"
task = f"{context}\nCurrent task: {user_input}"
print(f"\n{CYAN}{RESET} Thinking...", flush=True)
# Run the ReAct agent
result = agent(task=task)
# Display the answer
print(f"\n{CYAN}{RESET} {render_markdown(result.answer)}")
# Save to history
history.append({"user": user_input, "assistant": result.answer})
print()
except (KeyboardInterrupt, EOFError):
break
except Exception as err:
import traceback
traceback.print_exc()
print(f"{RED}⏺ Error: {err}{RESET}")
if __name__ == "__main__":
main()

93
program.json Normal file
View File

@@ -0,0 +1,93 @@
{
"agent.react": {
"traces": [],
"train": [],
"demos": [],
"signature": {
"instructions": "You are a concise coding assistant. Help the user with their coding task by using the available tools to read, write, edit files, search the codebase, and run commands.\n\nYou are an Agent. In each episode, you will be given the fields `task` as input. And you can see your past trajectory so far.\nYour goal is to use one or more of the supplied tools to collect any necessary information for producing `answer`, `affected_files`.\n\nTo do this, you will interleave next_thought, next_tool_name, and next_tool_args in each turn, and also when finishing the task.\nAfter each tool call, you receive a resulting observation, which gets appended to your trajectory.\n\nWhen writing next_thought, you may reason about the current situation and plan for future steps.\nWhen selecting the next_tool_name and its next_tool_args, the tool must be one of:\n\n(1) read_file, whose description is <desc>Read file contents with line numbers. Args: path: Path to the file to read offset: Line number to start from (0-indexed) limit: Maximum number of lines to read Returns: File contents with line numbers </desc>. It takes arguments {'path': {'type': 'string'}, 'offset': {'type': 'integer', 'default': 0}, 'limit': {'type': 'integer', 'default': None}}.\n(2) write_file, whose description is <desc>Write content to a file. Args: path: Path to the file to write content: Content to write to the file Returns: 'ok' on success </desc>. It takes arguments {'path': {'type': 'string'}, 'content': {'type': 'string'}}.\n(3) edit_file, whose description is <desc>Replace text in a file. Args: path: Path to the file to edit old: Text to find and replace new: Replacement text replace_all: If True, replace all occurrences; otherwise old must be unique Returns: 'ok' on success, error message on failure </desc>. It takes arguments {'path': {'type': 'string'}, 'old': {'type': 'string'}, 'new': {'type': 'string'}, 'replace_all': {'type': 'boolean', 'default': False}}.\n(4) glob_files, whose description is <desc>Find files matching a glob pattern, sorted by modification time. Args: pattern: Glob pattern to match (e.g., '**/*.py') path: Base directory to search in Returns: Newline-separated list of matching files </desc>. It takes arguments {'pattern': {'type': 'string'}, 'path': {'type': 'string', 'default': '.'}}.\n(5) grep_files, whose description is <desc>Search files for a regex pattern. Args: pattern: Regular expression pattern to search for path: Base directory to search in Returns: Matching lines in format 'filepath:line_num:content' </desc>. It takes arguments {'pattern': {'type': 'string'}, 'path': {'type': 'string', 'default': '.'}}.\n(6) run_bash, whose description is <desc>Run a shell command and return output. Args: cmd: Shell command to execute Returns: Command output (stdout and stderr combined) </desc>. It takes arguments {'cmd': {'type': 'string'}}.\n(7) finish, whose description is <desc>Marks the task as complete. That is, signals that all information for producing the outputs, i.e. `answer`, `affected_files`, are now available to be extracted.</desc>. It takes arguments {}.\nWhen providing `next_tool_args`, the value inside the field must be in JSON format",
"fields": [
{
"prefix": "Task:",
"description": "The user's coding task or question"
},
{
"prefix": "Trajectory:",
"description": "${trajectory}"
},
{
"prefix": "Next Thought:",
"description": "${next_thought}"
},
{
"prefix": "Next Tool Name:",
"description": "${next_tool_name}"
},
{
"prefix": "Next Tool Args:",
"description": "${next_tool_args}"
}
]
},
"lm": {
"model": "openrouter/anthropic/claude-3.5-sonnet",
"model_type": "chat",
"cache": true,
"num_retries": 3,
"finetuning_model": null,
"launch_kwargs": {},
"train_kwargs": {},
"temperature": null,
"max_tokens": 8192,
"api_base": "https://openrouter.ai/api/v1"
}
},
"agent.extract.predict": {
"traces": [],
"train": [],
"demos": [],
"signature": {
"instructions": "You are a concise coding assistant. Help the user with their coding task by using the available tools to read, write, edit files, search the codebase, and run commands.",
"fields": [
{
"prefix": "Task:",
"description": "The user's coding task or question"
},
{
"prefix": "Trajectory:",
"description": "${trajectory}"
},
{
"prefix": "Reasoning: Let's think step by step in order to",
"description": "${reasoning}"
},
{
"prefix": "Answer:",
"description": "Your response to the user after completing the task"
},
{
"prefix": "Affected Files:",
"description": "List of files that were written or modified during the task"
}
]
},
"lm": {
"model": "openrouter/anthropic/claude-3.5-sonnet",
"model_type": "chat",
"cache": true,
"num_retries": 3,
"finetuning_model": null,
"launch_kwargs": {},
"train_kwargs": {},
"temperature": null,
"max_tokens": 8192,
"api_base": "https://openrouter.ai/api/v1"
}
},
"metadata": {
"dependency_versions": {
"python": "3.13",
"dspy": "3.1.0",
"cloudpickle": "3.1"
}
}
}

4
push.py Normal file
View File

@@ -0,0 +1,4 @@
from nanocode import AgentProgram, AgentConfig
agent = AgentProgram(AgentConfig())
agent.push_to_hub("farouk1/nanocode", with_code=True)

7
pyproject.toml Normal file
View File

@@ -0,0 +1,7 @@
[project]
name = "nanocode"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.13"
dependencies = ["dspy>=3.1.0", "modaic>=0.10.0"]

50
utils/__init__.py Normal file
View File

@@ -0,0 +1,50 @@
"""Utility modules for nanocode."""
from .display import (
RESET,
BOLD,
DIM,
BLUE,
CYAN,
GREEN,
YELLOW,
RED,
MAGENTA,
separator,
render_markdown,
)
from .file_ops import (
read_file,
write_file,
edit_file,
glob_files,
grep_files,
)
from .shell_ops import run_bash
from .model_selection import AVAILABLE_MODELS, select_model
__all__ = [
# Display
"RESET",
"BOLD",
"DIM",
"BLUE",
"CYAN",
"GREEN",
"YELLOW",
"RED",
"MAGENTA",
"separator",
"render_markdown",
# File operations
"read_file",
"write_file",
"edit_file",
"glob_files",
"grep_files",
# Shell operations
"run_bash",
# Model selection
"AVAILABLE_MODELS",
"select_model",
]

25
utils/display.py Normal file
View File

@@ -0,0 +1,25 @@
"""Display and UI utilities for nanocode."""
import os
import re
# ANSI colors
RESET = "\033[0m"
BOLD = "\033[1m"
DIM = "\033[2m"
BLUE = "\033[34m"
CYAN = "\033[36m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
RED = "\033[31m"
MAGENTA = "\033[35m"
def separator():
"""Return a horizontal separator line that fits the terminal width."""
return f"{DIM}{'' * min(os.get_terminal_size().columns, 80)}{RESET}"
def render_markdown(text):
"""Convert basic markdown bold syntax to ANSI bold."""
return re.sub(r"\*\*(.+?)\*\*", f"{BOLD}\\1{RESET}", text)

104
utils/file_ops.py Normal file
View File

@@ -0,0 +1,104 @@
"""File operation utilities for nanocode."""
import glob as globlib
import os
import re
def read_file(path: str, offset: int = 0, limit: int = None) -> str:
"""Read file contents with line numbers.
Args:
path: Path to the file to read
offset: Line number to start from (0-indexed)
limit: Maximum number of lines to read
Returns:
File contents with line numbers
"""
lines = open(path).readlines()
if limit is None:
limit = len(lines)
selected = lines[offset : offset + limit]
return "".join(f"{offset + idx + 1:4}| {line}" for idx, line in enumerate(selected))
def write_file(path: str, content: str) -> str:
"""Write content to a file.
Args:
path: Path to the file to write
content: Content to write to the file
Returns:
'ok' on success
"""
with open(path, "w") as f:
f.write(content)
return "ok"
def edit_file(path: str, old: str, new: str, replace_all: bool = False) -> str:
"""Replace text in a file.
Args:
path: Path to the file to edit
old: Text to find and replace
new: Replacement text
replace_all: If True, replace all occurrences; otherwise old must be unique
Returns:
'ok' on success, error message on failure
"""
text = open(path).read()
if old not in text:
return "error: old_string not found"
count = text.count(old)
if not replace_all and count > 1:
return f"error: old_string appears {count} times, must be unique (use replace_all=True)"
replacement = text.replace(old, new) if replace_all else text.replace(old, new, 1)
with open(path, "w") as f:
f.write(replacement)
return "ok"
def glob_files(pattern: str, path: str = ".") -> str:
"""Find files matching a glob pattern, sorted by modification time.
Args:
pattern: Glob pattern to match (e.g., '**/*.py')
path: Base directory to search in
Returns:
Newline-separated list of matching files
"""
full_pattern = (path + "/" + pattern).replace("//", "/")
files = globlib.glob(full_pattern, recursive=True)
files = sorted(
files,
key=lambda f: os.path.getmtime(f) if os.path.isfile(f) else 0,
reverse=True,
)
return "\n".join(files) or "no files found"
def grep_files(pattern: str, path: str = ".") -> str:
"""Search files for a regex pattern.
Args:
pattern: Regular expression pattern to search for
path: Base directory to search in
Returns:
Matching lines in format 'filepath:line_num:content'
"""
regex = re.compile(pattern)
hits = []
for filepath in globlib.glob(path + "/**", recursive=True):
try:
for line_num, line in enumerate(open(filepath), 1):
if regex.search(line):
hits.append(f"{filepath}:{line_num}:{line.rstrip()}")
except Exception:
pass
return "\n".join(hits[:50]) or "no matches found"

51
utils/model_selection.py Normal file
View File

@@ -0,0 +1,51 @@
"""Model selection utilities for nanocode."""
import os
from .display import RESET, BOLD, DIM, BLUE, GREEN, RED
# Available OpenRouter models
AVAILABLE_MODELS = {
"1": ("Claude 3.5 Sonnet", "anthropic/claude-3.5-sonnet"),
"2": ("Claude 3.5 Haiku", "anthropic/claude-3.5-haiku"),
"3": ("GPT-4o", "openai/gpt-4o"),
"4": ("GPT-4o mini", "openai/gpt-4o-mini"),
"5": ("Gemini Pro 1.5", "google/gemini-pro-1.5"),
"6": ("Llama 3.1 405B", "meta-llama/llama-3.1-405b-instruct"),
"7": ("DeepSeek V3", "deepseek/deepseek-chat"),
"8": ("Qwen 2.5 72B", "qwen/qwen-2.5-72b-instruct"),
}
def select_model():
"""Interactive model selection or use environment variable."""
# Check environment variable first
model_env = os.getenv("MODEL")
if model_env:
print(f"{GREEN}⏺ Using model from environment: {model_env}{RESET}")
return model_env
print(f"\n{BOLD}Select a model:{RESET}")
for key, (name, model_id) in AVAILABLE_MODELS.items():
print(f" {BLUE}{key}{RESET}. {name} ({DIM}{model_id}{RESET})")
print(f" {BLUE}c{RESET}. Custom model (enter manually)")
while True:
try:
choice = input(f"\n{BOLD}{BLUE}{RESET} Enter choice (1-8 or c): ").strip().lower()
if choice in AVAILABLE_MODELS:
name, model_id = AVAILABLE_MODELS[choice]
print(f"{GREEN}⏺ Selected: {name}{RESET}")
return model_id
elif choice == "c":
custom_model = input(f"{BOLD}{BLUE}{RESET} Enter model ID (e.g., openai/gpt-4): ").strip()
if custom_model:
print(f"{GREEN}⏺ Selected custom model: {custom_model}{RESET}")
return custom_model
else:
print(f"{RED}⏺ Invalid model ID{RESET}")
else:
print(f"{RED}⏺ Invalid choice. Please enter 1-8 or c{RESET}")
except (KeyboardInterrupt, EOFError):
print(f"\n{RED}⏺ Model selection cancelled{RESET}")
exit(1)

34
utils/shell_ops.py Normal file
View File

@@ -0,0 +1,34 @@
"""Shell operation utilities for nanocode."""
import subprocess
from .display import DIM, RESET
def run_bash(cmd: str) -> str:
"""Run a shell command and return output.
Args:
cmd: Shell command to execute
Returns:
Command output (stdout and stderr combined)
"""
proc = subprocess.Popen(
cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
text=True
)
output_lines = []
try:
while True:
line = proc.stdout.readline()
if not line and proc.poll() is not None:
break
if line:
print(f" {DIM}{line.rstrip()}{RESET}", flush=True)
output_lines.append(line)
proc.wait(timeout=30)
except subprocess.TimeoutExpired:
proc.kill()
output_lines.append("\n(timed out after 30s)")
return "".join(output_lines).strip() or "(empty output)"