fix: guard against malformed LLM findings in conversation validation
Filter out non-dict entries from user_findings and handle non-dict result to prevent 'str' object has no attribute 'setdefault' errors. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -514,7 +514,9 @@ class LLMClient:
|
||||
@staticmethod
|
||||
def _validate_conversation_result(result: dict) -> dict:
|
||||
"""Validate and normalize conversation analysis result."""
|
||||
findings = result.get("user_findings", [])
|
||||
if not isinstance(result, dict):
|
||||
return {"user_findings": [], "conversation_summary": ""}
|
||||
findings = [f for f in result.get("user_findings", []) if isinstance(f, dict)]
|
||||
for finding in findings:
|
||||
finding.setdefault("username", "unknown")
|
||||
score = float(finding.get("toxicity_score", 0.0))
|
||||
|
||||
Reference in New Issue
Block a user