Complete Migration
This commit is contained in:
84
README.md
84
README.md
@@ -9,7 +9,6 @@ A composable DSPy agent that optimizes tweets using a reflective generate-evalua
|
||||
- **Customizable Categories**: Define evaluation criteria (engagement, clarity, tone, etc.)
|
||||
- **Multiple Usage Modes**: Single generation, full optimization, or standalone evaluation
|
||||
- **Structured Evaluation**: 1-9 scoring with detailed reasoning per category
|
||||
- **CLI Compatibility**: Same functionality as the original CLI tool
|
||||
- **Easy Configuration**: Flexible model, iteration, and patience settings
|
||||
|
||||
## Installation
|
||||
@@ -48,22 +47,10 @@ from tweet_optimizer_agent import TweetOptimizerAgent, TweetOptimizerConfig
|
||||
|
||||
# Create agent with default settings
|
||||
config = TweetOptimizerConfig()
|
||||
agent = TweetOptimizerAgent(config)
|
||||
tweet_optimizer = TweetOptimizerAgent(config)
|
||||
|
||||
# Single tweet generation
|
||||
tweet = agent(
|
||||
input_text="Create a tweet about HuggingFace transformers",
|
||||
current_tweet="",
|
||||
previous_evaluation=None
|
||||
)
|
||||
print(f"Generated: {tweet}")
|
||||
```
|
||||
|
||||
### Full Optimization Process
|
||||
|
||||
```python
|
||||
# Run complete optimization (like CLI)
|
||||
results = agent.optimize(
|
||||
# run complete optimization
|
||||
results = tweet_optimizer(
|
||||
input_text="Create a tweet about HuggingFace transformers",
|
||||
iterations=10,
|
||||
patience=5
|
||||
@@ -75,6 +62,12 @@ print(f"Score: {results['best_score']:.2f}")
|
||||
print(f"Iterations: {results['iterations_run']}")
|
||||
```
|
||||
|
||||
### Full Optimization Process
|
||||
|
||||
```python
|
||||
|
||||
```
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
```python
|
||||
@@ -94,20 +87,6 @@ config = TweetOptimizerConfig(
|
||||
agent = TweetOptimizerAgent(config)
|
||||
```
|
||||
|
||||
### Tweet Evaluation
|
||||
|
||||
```python
|
||||
# Evaluate a specific tweet
|
||||
evaluation = agent.evaluate_tweet(
|
||||
tweet_text="Excited to share our new AI model!",
|
||||
original_text="We released a new AI model",
|
||||
current_best_tweet=""
|
||||
)
|
||||
|
||||
for eval in evaluation.evaluations:
|
||||
print(f"{eval.category}: {eval.score}/9 - {eval.reasoning}")
|
||||
```
|
||||
|
||||
### Deploy to Modaic Hub
|
||||
|
||||
```python
|
||||
@@ -122,38 +101,27 @@ agent.push_to_hub(
|
||||
### Load from Hub
|
||||
|
||||
```python
|
||||
# Load a pre-trained agent from Modaic Hub
|
||||
agent = TweetOptimizerAgent.from_precompiled("your-username/tweet-optimizer")
|
||||
# Load this pre-trained agent from Modaic Hub
|
||||
from modaic import AutoAgent
|
||||
|
||||
tweet_optimizer = AutoAgent.from_precompiled("farouk1/tweet-optimizer-v2")
|
||||
|
||||
# Use immediately
|
||||
optimized = agent("Your tweet content here")
|
||||
results = tweet_optimizer(
|
||||
input_text="Your tweet content here",
|
||||
iterations=20,
|
||||
patience=7
|
||||
)
|
||||
|
||||
print(f"Original: {results['initial_text']}")
|
||||
print(f"Optimized: {results['final_tweet']}")
|
||||
print(f"Score: {results['best_score']:.2f}")
|
||||
print(f"Iterations: {results['iterations_run']}")
|
||||
```
|
||||
|
||||
## CLI Tool
|
||||
### Configuration Options
|
||||
|
||||
The original CLI functionality is still available:
|
||||
|
||||
```bash
|
||||
# Basic usage
|
||||
python cli.py "Create a tweet about AI breakthroughs"
|
||||
|
||||
# With custom settings
|
||||
python cli.py "Product launch announcement" \
|
||||
--model "Claude Sonnet 4.5" \
|
||||
--iterations 15 \
|
||||
--patience 8 \
|
||||
--categories "Excitement" "Clarity" "Call-to-action"
|
||||
|
||||
# List available models
|
||||
python cli.py --list-models
|
||||
|
||||
# Quiet mode (output only final tweet)
|
||||
python cli.py "Content here" --quiet
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### TweetOptimizerConfig
|
||||
#### TweetOptimizerConfig
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|------------------|-----------|----------------------------------------|---------------------------------------------|
|
||||
@@ -202,7 +170,6 @@ This Modaic agent implementation is based on the original DSPy Tweet Optimizer b
|
||||
|
||||
- Core DSPy modules (TweetGeneratorModule, TweetEvaluatorModule)
|
||||
- Hill-climbing optimization algorithm
|
||||
- CLI interface and utilities
|
||||
- Comprehensive testing framework
|
||||
|
||||
**Original Author**: Tom Doerr ([@tom-doerr](https://github.com/tom-doerr))
|
||||
@@ -213,5 +180,4 @@ This Modaic agent implementation is based on the original DSPy Tweet Optimizer b
|
||||
- Packaged as a Modaic PrecompiledAgent
|
||||
- Added hub deployment functionality
|
||||
- Enhanced configuration options
|
||||
- Maintained CLI compatibility
|
||||
- Extended usage examples
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -37,6 +37,9 @@ class TweetOptimizerAgent(PrecompiledAgent):
|
||||
|
||||
self.lm = config.lm
|
||||
self.eval_lm = config.eval_lm
|
||||
self.categories = config.categories
|
||||
self.max_iterations = config.max_iterations
|
||||
self.patience = config.patience
|
||||
|
||||
# initialize DSPy with the specified model
|
||||
self.tweet_generator.set_lm(get_dspy_lm(config.lm))
|
||||
@@ -49,8 +52,8 @@ class TweetOptimizerAgent(PrecompiledAgent):
|
||||
patience: Optional[int] = None
|
||||
) -> str:
|
||||
"""Run full optimization process."""
|
||||
max_iterations = iterations or self.config.max_iterations
|
||||
patience_limit = patience or self.config.patience
|
||||
max_iterations = iterations or self.max_iterations
|
||||
patience_limit = patience or self.patience
|
||||
|
||||
results = {
|
||||
'initial_text': input_text,
|
||||
|
||||
Reference in New Issue
Block a user