{ "model": "gpt-4o", "scenario": "gpqa", "optimizer": "MIPROv2", "api_base": null, "prompt_model": "gpt-4o", "prompt_api_base": null, "max_tokens": null, "temperature": null, "max_bootstrapped_demos": 3, "max_labeled_demos": 3, "num_threads": 16, "val_size": null }