Add 120s timeout to image analysis streaming

The vision model request was hanging indefinitely, freezing the bot.
The streaming loop had no timeout so if the model never returned
chunks, the bot would wait forever. Now times out after 2 minutes.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-23 10:37:37 -05:00
parent e1dea84d08
commit 86aacfb84f

View File

@@ -373,6 +373,7 @@ class LLMClient:
async with self._semaphore:
try:
async def _stream_image():
stream = await self._client.chat.completions.create(
model=self.model,
messages=[
@@ -394,10 +395,17 @@ class LLMClient:
notified = True
chunks.append(delta.content)
content = "".join(chunks).strip()
return "".join(chunks).strip()
content = await asyncio.wait_for(_stream_image(), timeout=120)
elapsed = int((time.monotonic() - t0) * 1000)
self._log_llm("image", elapsed, bool(content), req_json, content or None)
return content if content else None
except asyncio.TimeoutError:
elapsed = int((time.monotonic() - t0) * 1000)
logger.error("LLM image analysis timed out after %ds", elapsed // 1000)
self._log_llm("image", elapsed, False, req_json, error="Timeout")
return None
except Exception as e:
elapsed = int((time.monotonic() - t0) * 1000)
logger.error("LLM image analysis error: %s", e)