diff --git a/agent/__pycache__/__init__.cpython-310.pyc b/agent/__pycache__/__init__.cpython-310.pyc index 260d9c7..992044a 100644 Binary files a/agent/__pycache__/__init__.cpython-310.pyc and b/agent/__pycache__/__init__.cpython-310.pyc differ diff --git a/agent/__pycache__/agent.cpython-310.pyc b/agent/__pycache__/agent.cpython-310.pyc index 251de63..39d31ee 100644 Binary files a/agent/__pycache__/agent.cpython-310.pyc and b/agent/__pycache__/agent.cpython-310.pyc differ diff --git a/agent/__pycache__/modules.cpython-310.pyc b/agent/__pycache__/modules.cpython-310.pyc index 04a7d29..30b5443 100644 Binary files a/agent/__pycache__/modules.cpython-310.pyc and b/agent/__pycache__/modules.cpython-310.pyc differ diff --git a/agent/constants.py b/agent/constants.py index 1eb4de6..aab7ba5 100644 --- a/agent/constants.py +++ b/agent/constants.py @@ -27,7 +27,7 @@ AVAILABLE_MODELS: Dict[str, str] = { "Gemini 2.5 Flash": "openrouter/google/gemini-2.5-flash", "Gemini 2.5 Flash Lite": "openrouter/google/gemini-2.5-flash-lite", "Gemini 2.5 Pro": "openrouter/google/gemini-2.5-pro", - "GPT-5": "openrouter/openai/gpt-5" + "GPT-5": "openrouter/openai/gpt-5", } # openrouter API configuration @@ -45,7 +45,7 @@ DEFAULT_CATEGORIES: List[str] = [ "Engagement potential - how likely users are to like, retweet, or reply", "Clarity and readability - how easy the tweet is to understand", "Emotional impact - how well the tweet evokes feelings or reactions", - "Relevance to target audience - how well it resonates with intended readers" + "Relevance to target audience - how well it resonates with intended readers", ] # error messages diff --git a/agent/helpers.py b/agent/helpers.py index 90dd1ff..f986363 100644 --- a/agent/helpers.py +++ b/agent/helpers.py @@ -6,38 +6,37 @@ from .constants import MAX_SCORE def format_evaluation_for_generator(evaluation: Optional[EvaluationResult]) -> str: """ Format an evaluation result as text for the generator module. - + Args: evaluation: The evaluation result to format - + Returns: Formatted string with category-by-category reasoning and scores """ if not evaluation or not evaluation.evaluations: return "" - + eval_lines = [] for eval in evaluation.evaluations: - eval_lines.append(f"{eval.category} (Score: {eval.score}/{MAX_SCORE}): {eval.reasoning}") - + eval_lines.append( + f"{eval.category} (Score: {eval.score}/{MAX_SCORE}): {eval.reasoning}" + ) + return "\n".join(eval_lines) def build_settings_dict( - selected_model: str, - iterations: int, - patience: int, - use_cache: bool + selected_model: str, iterations: int, patience: int, use_cache: bool ) -> Dict[str, Any]: """ Build a settings dictionary for saving. - + Args: selected_model: The selected model name iterations: Number of optimization iterations patience: Patience threshold for early stopping use_cache: Whether to use DSPy cache - + Returns: Dictionary containing all settings """ @@ -45,26 +44,26 @@ def build_settings_dict( "selected_model": selected_model, "iterations": iterations, "patience": patience, - "use_cache": use_cache + "use_cache": use_cache, } def truncate_tweet(tweet: str, max_length: int, suffix: str = "...") -> str: """ Truncate a tweet to the maximum length with a suffix. - + Args: tweet: The tweet text to truncate max_length: Maximum allowed length suffix: Suffix to add when truncating (default: "...") - + Returns: Truncated tweet text """ tweet = tweet.strip() if len(tweet) <= max_length: return tweet - + truncation_point = max_length - len(suffix) return tweet[:truncation_point] + suffix @@ -72,11 +71,11 @@ def truncate_tweet(tweet: str, max_length: int, suffix: str = "...") -> str: def truncate_category_display(category: str, max_length: int = 30) -> str: """ Truncate a category name for display purposes. - + Args: category: The category name max_length: Maximum display length (default: 30) - + Returns: Truncated category name with "..." if needed """ diff --git a/agent/hill_climbing.py b/agent/hill_climbing.py index e4ee9a8..bfaf112 100644 --- a/agent/hill_climbing.py +++ b/agent/hill_climbing.py @@ -3,27 +3,32 @@ from .models import EvaluationResult from .modules import TweetGeneratorModule, TweetEvaluatorModule from .helpers import format_evaluation_for_generator + class HillClimbingOptimizer: """Hill climbing optimizer for tweet improvement.""" - + def __init__( self, generator: TweetGeneratorModule, evaluator: TweetEvaluatorModule, categories: List[str], max_iterations: int = 10, - patience: int = 5 + patience: int = 5, ): self.generator = generator self.evaluator = evaluator self.categories = categories self.max_iterations = max_iterations self.patience = patience - - def optimize(self, initial_text: str) -> Iterator[Tuple[str, EvaluationResult, bool, int, Dict[str, str], Dict[str, str]]]: + + def optimize( + self, initial_text: str + ) -> Iterator[ + Tuple[str, EvaluationResult, bool, int, Dict[str, str], Dict[str, str]] + ]: """ Optimize tweet using hill climbing algorithm. - + Yields: Tuple of (current_tweet, evaluation_result, is_improvement, patience_counter, generator_inputs, evaluator_inputs) """ @@ -31,89 +36,114 @@ class HillClimbingOptimizer: generator_inputs = { "input_text": initial_text, "current_tweet": "", - "previous_evaluation": "" + "previous_evaluation": "", } current_tweet = self.generator( - input_text=initial_text, - current_tweet="", - previous_evaluation=None + input_text=initial_text, current_tweet="", previous_evaluation=None ) - + evaluator_inputs = { "original_text": initial_text, "current_best_tweet": "", - "tweet_text": current_tweet + "tweet_text": current_tweet, } current_score = self.evaluator( tweet_text=current_tweet, categories=self.categories, original_text=initial_text, - current_best_tweet="" + current_best_tweet="", ) - + best_tweet = current_tweet best_score = current_score patience_counter = 0 - - yield (current_tweet, current_score, True, patience_counter, generator_inputs, evaluator_inputs) - + + yield ( + current_tweet, + current_score, + True, + patience_counter, + generator_inputs, + evaluator_inputs, + ) + for iteration in range(1, self.max_iterations): # Generate improved tweet with previous evaluation as feedback try: # Format evaluation for display in generator inputs eval_text = format_evaluation_for_generator(best_score) - + generator_inputs = { "input_text": initial_text, "current_tweet": best_tweet, - "previous_evaluation": eval_text + "previous_evaluation": eval_text, } - + candidate_tweet = self.generator( input_text=initial_text, current_tweet=best_tweet, - previous_evaluation=best_score + previous_evaluation=best_score, ) - + # Evaluate candidate evaluator_inputs = { "original_text": initial_text, "current_best_tweet": best_tweet, - "tweet_text": candidate_tweet + "tweet_text": candidate_tweet, } candidate_score = self.evaluator( tweet_text=candidate_tweet, categories=self.categories, original_text=initial_text, - current_best_tweet=best_tweet + current_best_tweet=best_tweet, ) - + # Check if candidate is better (hill climbing condition) is_improvement = candidate_score > best_score - + if is_improvement: best_tweet = candidate_tweet best_score = candidate_score patience_counter = 0 - yield (candidate_tweet, candidate_score, True, patience_counter, generator_inputs, evaluator_inputs) + yield ( + candidate_tweet, + candidate_score, + True, + patience_counter, + generator_inputs, + evaluator_inputs, + ) else: patience_counter += 1 - yield (best_tweet, candidate_score, False, patience_counter, generator_inputs, evaluator_inputs) - + yield ( + best_tweet, + candidate_score, + False, + patience_counter, + generator_inputs, + evaluator_inputs, + ) + # Early stopping if no improvement for 'patience' iterations if patience_counter >= self.patience: break - + except Exception as e: # If generation fails, yield current best patience_counter += 1 evaluator_inputs = { "original_text": initial_text, "current_best_tweet": best_tweet, - "tweet_text": best_tweet + "tweet_text": best_tweet, } - yield (best_tweet, best_score, False, patience_counter, generator_inputs, evaluator_inputs) - + yield ( + best_tweet, + best_score, + False, + patience_counter, + generator_inputs, + evaluator_inputs, + ) + if patience_counter >= self.patience: break - diff --git a/agent/index.py b/agent/index.py new file mode 100644 index 0000000..5fe2607 --- /dev/null +++ b/agent/index.py @@ -0,0 +1,105 @@ +from modaic import PrecompiledAgent, PrecompiledConfig +from .modules import TweetGeneratorModule, TweetEvaluatorModule +from .models import EvaluationResult +from .hill_climbing import HillClimbingOptimizer +from typing import Optional, List +from .utils import get_dspy_lm +from .constants import DEFAULT_CATEGORIES, DEFAULT_ITERATIONS, DEFAULT_PATIENCE + + +class TweetOptimizerConfig(PrecompiledConfig): + lm: str = "openrouter/google/gemini-2.5-flash" + eval_lm: str = "openrouter/openai/gpt-5" + categories: List[str] = DEFAULT_CATEGORIES + max_iterations: int = DEFAULT_ITERATIONS + patience: int = DEFAULT_PATIENCE + + +class TweetOptimizerAgent(PrecompiledAgent): + config: TweetOptimizerConfig + + current_tweet: str = "" + previous_evaluation: Optional[EvaluationResult] = None + + def __init__(self, config: TweetOptimizerConfig, **kwargs): + super().__init__(config, **kwargs) + self.tweet_generator = TweetGeneratorModule() + self.tweet_evaluator = TweetEvaluatorModule() + + # set up optimizer + self.optimizer = HillClimbingOptimizer( + generator=self.tweet_generator, + evaluator=self.tweet_evaluator, + categories=config.categories, + max_iterations=config.max_iterations, + patience=config.patience, + ) + + self.lm = config.lm + self.eval_lm = config.eval_lm + self.categories = config.categories + self.max_iterations = config.max_iterations + self.patience = config.patience + + # initialize DSPy with the specified model + self.tweet_generator.set_lm(get_dspy_lm(config.lm)) + self.tweet_evaluator.set_lm(get_dspy_lm(config.eval_lm)) + + def forward( + self, + input_text: str, + iterations: Optional[int] = None, + patience: Optional[int] = None, + ) -> str: + """Run full optimization process.""" + max_iterations = iterations or self.max_iterations + patience_limit = patience or self.patience + + results = { + "initial_text": input_text, + "final_tweet": "", + "best_score": 0.0, + "iterations_run": 0, + "early_stopped": False, + "scores_history": [], + "improvement_count": 0, + } + + best_tweet = "" + best_score = 0.0 + + for iteration, ( + current_tweet, + scores, + is_improvement, + patience_counter, + _, + _, + ) in enumerate(self.optimizer.optimize(input_text)): + iteration_num = iteration + 1 + results["iterations_run"] = iteration_num + results["scores_history"].append(scores) + + if is_improvement: + best_tweet = current_tweet + best_score = sum(scores.category_scores) / len(scores.category_scores) + results["improvement_count"] += 1 + + # check for early stopping + if patience_counter >= patience_limit: + results["early_stopped"] = True + break + + # stop at max iterations + if iteration_num >= max_iterations: + break + + results.update({"final_tweet": best_tweet, "best_score": best_score}) + + self.reset() + + return results + + def reset(self): + self.current_tweet = "" + self.previous_evaluation = None diff --git a/agent/models.py b/agent/models.py index 198c53f..81b5308 100644 --- a/agent/models.py +++ b/agent/models.py @@ -2,57 +2,61 @@ from pydantic import BaseModel, Field, validator from typing import List from .constants import MIN_SCORE, MAX_SCORE + class CategoryEvaluation(BaseModel): """Pydantic model for a single category evaluation with reasoning.""" - + category: str = Field(description="The evaluation category name") reasoning: str = Field(description="Explanation for the score") score: int = Field( - description=f"Score for this category ({MIN_SCORE}-{MAX_SCORE})", - ge=MIN_SCORE, - le=MAX_SCORE + description=f"Score for this category ({MIN_SCORE}-{MAX_SCORE})", + ge=MIN_SCORE, + le=MAX_SCORE, ) - - @validator('score') + + @validator("score") def validate_score(cls, score): """Ensure score is within the valid range.""" if not isinstance(score, int) or score < MIN_SCORE or score > MAX_SCORE: - raise ValueError(f"Score {score} must be an integer between {MIN_SCORE} and {MAX_SCORE}") + raise ValueError( + f"Score {score} must be an integer between {MIN_SCORE} and {MAX_SCORE}" + ) return score + class EvaluationResult(BaseModel): """Pydantic model for tweet evaluation results.""" - + evaluations: List[CategoryEvaluation] = Field( description="List of category evaluations with reasoning and scores" ) - - @validator('evaluations') + + @validator("evaluations") def validate_evaluations(cls, evals): """Ensure we have at least one evaluation.""" if not evals or len(evals) < 1: raise ValueError("Must have at least one category evaluation") return evals - + @property def category_scores(self) -> List[int]: """Get list of scores for backwards compatibility.""" return [eval.score for eval in self.evaluations] - + def total_score(self) -> float: """Calculate the total score across all categories.""" return sum(eval.score for eval in self.evaluations) - + def average_score(self) -> float: """Calculate the average score across all categories.""" return self.total_score() / len(self.evaluations) - + def __gt__(self, other): """Compare evaluation results based on total score.""" if not isinstance(other, EvaluationResult): return NotImplemented return self.total_score() > other.total_score() - + def __eq__(self, other): """Check equality based on total score.""" if not isinstance(other, EvaluationResult): diff --git a/agent/modules.py b/agent/modules.py index f4d034e..e9d021e 100644 --- a/agent/modules.py +++ b/agent/modules.py @@ -1,7 +1,7 @@ import dspy from typing import List, Optional -from models import EvaluationResult, CategoryEvaluation -from constants import ( +from .models import EvaluationResult, CategoryEvaluation +from .constants import ( TWEET_MAX_LENGTH, TWEET_TRUNCATION_SUFFIX, DEFAULT_SCORE, @@ -10,87 +10,115 @@ from constants import ( ERROR_GENERATION, ERROR_EVALUATION, MIN_SCORE, - MAX_SCORE + MAX_SCORE, ) -from helpers import format_evaluation_for_generator, truncate_tweet +from .helpers import format_evaluation_for_generator, truncate_tweet + class TweetGenerator(dspy.Signature): """Generate or improve a tweet based on input text and detailed evaluation feedback with reasoning.""" - + input_text: str = dspy.InputField(desc="Original text or current tweet to improve") - current_tweet: str = dspy.InputField(desc="Current best tweet version (empty for first generation)") - previous_evaluation: str = dspy.InputField(desc="Previous evaluation with category-by-category reasoning and scores (empty for first generation)") - improved_tweet: str = dspy.OutputField(desc=f"Generated or improved tweet text (max {TWEET_MAX_LENGTH} characters)") + current_tweet: str = dspy.InputField( + desc="Current best tweet version (empty for first generation)" + ) + previous_evaluation: str = dspy.InputField( + desc="Previous evaluation with category-by-category reasoning and scores (empty for first generation)" + ) + improved_tweet: str = dspy.OutputField( + desc=f"Generated or improved tweet text (max {TWEET_MAX_LENGTH} characters)" + ) + class TweetEvaluator(dspy.Signature): """Evaluate a tweet across multiple custom categories. For each category, provide detailed reasoning explaining the score, then assign a score. Ensure the tweet maintains the same meaning as the original text.""" - - original_text: str = dspy.InputField(desc="Original input text that started the optimization") - current_best_tweet: str = dspy.InputField(desc="Current best tweet version for comparison (empty for first evaluation)") + + original_text: str = dspy.InputField( + desc="Original input text that started the optimization" + ) + current_best_tweet: str = dspy.InputField( + desc="Current best tweet version for comparison (empty for first evaluation)" + ) tweet_text: str = dspy.InputField(desc="Tweet text to evaluate") - categories: str = dspy.InputField(desc="Comma-separated list of evaluation category descriptions") + categories: str = dspy.InputField( + desc="Comma-separated list of evaluation category descriptions" + ) evaluations: List[CategoryEvaluation] = dspy.OutputField( desc=f"List of evaluations with category name, detailed reasoning, and score ({MIN_SCORE}-{MAX_SCORE}) for each category. Ensure the tweet conveys the same meaning as the original text." ) + class TweetGeneratorModule(dspy.Module): """DSPy module for generating and improving tweets.""" - + def __init__(self): super().__init__() self.generate = dspy.ChainOfThought(TweetGenerator) - - def forward(self, input_text: str, current_tweet: str = "", previous_evaluation: Optional[EvaluationResult] = None) -> str: + + def forward( + self, + input_text: str, + current_tweet: str = "", + previous_evaluation: Optional[EvaluationResult] = None, + ) -> str: """Generate or improve a tweet.""" try: # Format previous evaluation as text eval_text = format_evaluation_for_generator(previous_evaluation) - + result = self.generate( input_text=input_text, current_tweet=current_tweet, - previous_evaluation=eval_text + previous_evaluation=eval_text, ) - + # Ensure tweet doesn't exceed character limit - tweet = truncate_tweet(result.improved_tweet, TWEET_MAX_LENGTH, TWEET_TRUNCATION_SUFFIX) - + tweet = truncate_tweet( + result.improved_tweet, TWEET_MAX_LENGTH, TWEET_TRUNCATION_SUFFIX + ) + return tweet except Exception as e: raise Exception(f"{ERROR_GENERATION}: {str(e)}") + class TweetEvaluatorModule(dspy.Module): """DSPy module for evaluating tweets across custom categories.""" - + def __init__(self): super().__init__() self.evaluate = dspy.ChainOfThought(TweetEvaluator) - - def forward(self, tweet_text: str, categories: List[str], original_text: str = "", current_best_tweet: str = "") -> EvaluationResult: + + def forward( + self, + tweet_text: str, + categories: List[str], + original_text: str = "", + current_best_tweet: str = "", + ) -> EvaluationResult: """Evaluate a tweet across specified categories.""" try: # Join categories into comma-separated string categories_str = ", ".join(categories) - + result = self.evaluate( original_text=original_text, current_best_tweet=current_best_tweet, tweet_text=tweet_text, - categories=categories_str + categories=categories_str, ) - + # Extract and validate evaluations evaluations = result.evaluations - + # Ensure we have the right number of evaluations if len(evaluations) != len(categories): # Create default evaluations if mismatch evaluations = [ CategoryEvaluation( - category=cat, - reasoning=ERROR_PARSING, - score=DEFAULT_SCORE - ) for cat in categories + category=cat, reasoning=ERROR_PARSING, score=DEFAULT_SCORE + ) + for cat in categories ] else: # Validate each evaluation @@ -99,22 +127,32 @@ class TweetEvaluatorModule(dspy.Module): try: # Ensure score is valid score = max(MIN_SCORE, min(MAX_SCORE, int(eval.score))) - validated_evals.append(CategoryEvaluation( - category=categories[i] if i < len(categories) else eval.category, - reasoning=eval.reasoning if eval.reasoning else "No reasoning provided", - score=score - )) + validated_evals.append( + CategoryEvaluation( + category=categories[i] + if i < len(categories) + else eval.category, + reasoning=eval.reasoning + if eval.reasoning + else "No reasoning provided", + score=score, + ) + ) except (ValueError, TypeError, AttributeError): - validated_evals.append(CategoryEvaluation( - category=categories[i] if i < len(categories) else "Unknown", - reasoning=ERROR_VALIDATION, - score=DEFAULT_SCORE - )) + validated_evals.append( + CategoryEvaluation( + category=categories[i] + if i < len(categories) + else "Unknown", + reasoning=ERROR_VALIDATION, + score=DEFAULT_SCORE, + ) + ) evaluations = validated_evals - + # Create validated result validated_result = EvaluationResult(evaluations=evaluations) - + return validated_result except Exception as e: # Return default evaluations on error @@ -122,7 +160,8 @@ class TweetEvaluatorModule(dspy.Module): CategoryEvaluation( category=cat, reasoning=f"{ERROR_EVALUATION}: {str(e)}", - score=DEFAULT_SCORE - ) for cat in categories + score=DEFAULT_SCORE, + ) + for cat in categories ] return EvaluationResult(evaluations=default_evals) diff --git a/agent/utils.py b/agent/utils.py index 7ef0f4c..b1d5497 100644 --- a/agent/utils.py +++ b/agent/utils.py @@ -23,22 +23,24 @@ from .constants import ( ERROR_SAVE_HISTORY, ERROR_LOAD_HISTORY, ERROR_DSPy_INIT, - TWEET_MAX_LENGTH + TWEET_MAX_LENGTH, ) + def save_categories(categories: List[str]) -> None: """Save categories to JSON file.""" try: - with open(CATEGORIES_FILE, 'w') as f: + with open(CATEGORIES_FILE, "w") as f: json.dump(categories, f, indent=2) except Exception as e: print(f"{ERROR_SAVE_CATEGORIES}: {str(e)}") + def load_categories() -> List[str]: """Load categories from JSON file.""" try: if os.path.exists(CATEGORIES_FILE): - with open(CATEGORIES_FILE, 'r') as f: + with open(CATEGORIES_FILE, "r") as f: categories = json.load(f) return categories if isinstance(categories, list) else [] else: @@ -48,6 +50,7 @@ def load_categories() -> List[str]: print(f"{ERROR_LOAD_CATEGORIES}: {str(e)}") return [] + def get_dspy_lm(model_name: str): """Get a DSPy LM instance for the specified model (cached per model).""" try: @@ -57,32 +60,32 @@ def get_dspy_lm(model_name: str): max_tokens = 16000 if "openai/gpt-5" in model_name else OPENROUTER_MAX_TOKENS temperature = 1.0 if "openai/gpt-5" in model_name else OPENROUTER_TEMPERATURE - + lm = dspy.LM( model=model_name, api_key=openrouter_key, api_base=OPENROUTER_API_BASE, max_tokens=max_tokens, - temperature=temperature + temperature=temperature, ) return lm except Exception as e: raise Exception(f"Failed to create LM: {str(e)}") -def initialize_dspy(model_name: str = DEFAULT_MODEL, use_cache: bool = DEFAULT_USE_CACHE) -> bool: + +def initialize_dspy( + model_name: str = DEFAULT_MODEL, use_cache: bool = DEFAULT_USE_CACHE +) -> bool: """Initialize DSPy with OpenRouter and selected model.""" # Configure cache settings try: - dspy.configure_cache( - enable_memory_cache=use_cache, - enable_disk_cache=use_cache - ) + dspy.configure_cache(enable_memory_cache=use_cache, enable_disk_cache=use_cache) except Exception: # Cache configuration might fail in some environments, continue anyway pass - + # Only configure DSPy once globally - if not hasattr(dspy, '_replit_configured'): + if not hasattr(dspy, "_replit_configured"): try: # Get the LM for the default model default_lm = get_dspy_lm(model_name) @@ -90,37 +93,44 @@ def initialize_dspy(model_name: str = DEFAULT_MODEL, use_cache: bool = DEFAULT_U dspy._replit_configured = True # type: ignore except Exception as e: raise Exception(f"{ERROR_DSPy_INIT}: {str(e)}") - + return True + def format_tweet_for_display(tweet: str) -> str: """Format tweet text for better display.""" return tweet.strip() + def calculate_tweet_length(tweet: str) -> int: """Calculate tweet length.""" return len(tweet.strip()) + def is_valid_tweet(tweet: str) -> bool: """Check if tweet is valid (not empty and within character limit).""" cleaned_tweet = tweet.strip() return bool(cleaned_tweet) and len(cleaned_tweet) <= TWEET_MAX_LENGTH + def save_settings(settings: Dict[str, Any]) -> None: """Save settings to JSON file.""" try: - with open(SETTINGS_FILE, 'w') as f: + with open(SETTINGS_FILE, "w") as f: json.dump(settings, f, indent=2) except Exception as e: print(f"{ERROR_SAVE_SETTINGS}: {str(e)}") + def load_settings() -> Dict[str, Any]: """Load settings from JSON file.""" try: if os.path.exists(SETTINGS_FILE): - with open(SETTINGS_FILE, 'r') as f: + with open(SETTINGS_FILE, "r") as f: settings = json.load(f) - return settings if isinstance(settings, dict) else get_default_settings() + return ( + settings if isinstance(settings, dict) else get_default_settings() + ) else: # Return default settings if file doesn't exist default_settings = get_default_settings() @@ -130,28 +140,31 @@ def load_settings() -> Dict[str, Any]: print(f"{ERROR_LOAD_SETTINGS}: {str(e)}") return get_default_settings() + def get_default_settings() -> Dict[str, Any]: """Get default settings.""" return { "selected_model": DEFAULT_MODEL, "iterations": DEFAULT_ITERATIONS, "patience": DEFAULT_PATIENCE, - "use_cache": DEFAULT_USE_CACHE + "use_cache": DEFAULT_USE_CACHE, } + def save_input_history(history: List[str]) -> None: """Save input history to JSON file.""" try: - with open(HISTORY_FILE, 'w') as f: + with open(HISTORY_FILE, "w") as f: json.dump(history, f, indent=2) except Exception as e: print(f"{ERROR_SAVE_HISTORY}: {str(e)}") + def load_input_history() -> List[str]: """Load input history from JSON file.""" try: if os.path.exists(HISTORY_FILE): - with open(HISTORY_FILE, 'r') as f: + with open(HISTORY_FILE, "r") as f: history = json.load(f) return history if isinstance(history, list) else [] else: @@ -160,33 +173,34 @@ def load_input_history() -> List[str]: print(f"{ERROR_LOAD_HISTORY}: {str(e)}") return [] + def add_to_input_history(history: List[str], new_input: str) -> List[str]: """ Add a new input to history, maintaining max size and avoiding duplicates. - + Args: history: Current history list new_input: New input text to add - + Returns: Updated history list with new input at the beginning """ # Strip whitespace from input new_input = new_input.strip() - + # Don't add empty strings if not new_input: return history - + # Remove duplicate if it exists if new_input in history: history.remove(new_input) - + # Add to beginning of list updated_history = [new_input] + history - + # Trim to max size if len(updated_history) > MAX_HISTORY_ITEMS: updated_history = updated_history[:MAX_HISTORY_ITEMS] - + return updated_history diff --git a/auto_classes.json b/auto_classes.json index 1662aca..dde6163 100644 --- a/auto_classes.json +++ b/auto_classes.json @@ -1,4 +1,4 @@ { - "AutoConfig": "agent.agent.TweetOptimizerConfig", - "AutoAgent": "agent.agent.TweetOptimizerAgent" + "AutoConfig": "agent.index.TweetOptimizerConfig", + "AutoAgent": "agent.index.TweetOptimizerAgent" } \ No newline at end of file diff --git a/main.py b/main.py index f2aa568..90b10eb 100644 --- a/main.py +++ b/main.py @@ -1,4 +1,5 @@ -from agent.agent import TweetOptimizerAgent, TweetOptimizerConfig +from agent.index import TweetOptimizerAgent, TweetOptimizerConfig + def main(): # create agent with default config @@ -16,7 +17,7 @@ def main(): results = tweet_optimizer( input_text="Anthropic added a new OSS model on HuggingFace.", iterations=10, # Reduced for testing - patience=8 + patience=8, ) print(f"Initial text: {results['initial_text']}") print(f"Final tweet: {results['final_tweet']}") @@ -31,19 +32,20 @@ def main(): print("\n=== Push to Hub ===") try: tweet_optimizer.push_to_hub( - "farouk1/tweet-optimizer-v2", - commit_message="Complete Migration", - with_code=True - ) + "farouk1/tweet-optimizer-v2", + commit_message="Complete Migration", + with_code=True, + ) print("Successfully pushed to hub!") except Exception as e: print(f"Error pushing to hub: {e}") - + print("\n=== Agent Configuration ===") print(f"Model: {config.lm}") print(f"Categories: {config.categories}") print(f"Max iterations: {config.max_iterations}") print(f"Patience: {config.patience}") + if __name__ == "__main__": main()