Files
toxicity/config.json
2026-02-10 11:54:40 -08:00

40 lines
1.4 KiB
JSON

{
"model": null,
"signature": {
"description": "Analyze the given text to determine if it contains toxic content (hate speech, stereotypes,\ndiscrimination, or harmful content targeting a group).\n\nFirst reason through your thought process in the `reasoning` field. Consider:\n- Does the text contain harmful stereotypes?\n- Is it discriminatory towards any group?\n- Does it promote hate or violence?\n- Is it implicitly or explicitly toxic?\n\nBe sure to verbalize any uncertainty. Then output your conclusion in the `label` field.",
"properties": {
"text": {
"__dspy_field_type": "input",
"desc": "The text to analyze for toxicity",
"prefix": "Text:",
"title": "Text",
"type": "string"
},
"reasoning": {
"__dspy_field_type": "output",
"desc": "Your step by step reasoning about whether the text is toxic. Verbalize uncertainty.",
"prefix": "Reasoning:",
"title": "Reasoning",
"type": "string"
},
"label": {
"__dspy_field_type": "output",
"desc": "Whether the text is 'toxic' or 'benign'",
"enum": [
"toxic",
"benign"
],
"prefix": "Label:",
"title": "Label",
"type": "string"
}
},
"required": [
"text",
"reasoning",
"label"
],
"title": "Toxicity",
"type": "object"
}
}