Files
toxicity/program.json
2026-02-10 11:54:40 -08:00

40 lines
1.3 KiB
JSON

{
"traces": [],
"train": [],
"demos": [],
"signature": {
"instructions": "Analyze the given text to determine if it contains toxic content (hate speech, stereotypes,\ndiscrimination, or harmful content targeting a group).\n\nFirst reason through your thought process in the `reasoning` field. Consider:\n- Does the text contain harmful stereotypes?\n- Is it discriminatory towards any group?\n- Does it promote hate or violence?\n- Is it implicitly or explicitly toxic?\n\nBe sure to verbalize any uncertainty. Then output your conclusion in the `label` field.",
"fields": [
{
"prefix": "Text:",
"description": "The text to analyze for toxicity"
},
{
"prefix": "Reasoning:",
"description": "Your step by step reasoning about whether the text is toxic. Verbalize uncertainty."
},
{
"prefix": "Label:",
"description": "Whether the text is 'toxic' or 'benign'"
}
]
},
"lm": {
"model": "together_ai/Qwen/Qwen3-VL-32B-Instruct",
"model_type": "chat",
"cache": true,
"num_retries": 3,
"finetuning_model": null,
"launch_kwargs": {},
"train_kwargs": {},
"temperature": null,
"max_tokens": null
},
"metadata": {
"dependency_versions": {
"python": "3.11",
"dspy": "3.1.2",
"cloudpickle": "3.1"
}
}
}