Add LLM request/response logging to database
Log every LLM call (analysis, chat, image, raw_analyze) to a new LlmLog table with request type, model, token counts, duration, success/failure, and truncated request/response payloads. Enables debugging prompt issues and tracking usage. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
10
bot.py
10
bot.py
@@ -65,15 +65,18 @@ class BCSBot(commands.Bot):
|
||||
|
||||
self.config = config
|
||||
|
||||
# Database (initialized async in setup_hook)
|
||||
self.db = Database()
|
||||
|
||||
# LLM clients (OpenAI-compatible — works with llama.cpp, Ollama, or OpenAI)
|
||||
llm_base_url = os.getenv("LLM_BASE_URL", "http://athena.lan:11434")
|
||||
llm_model = os.getenv("LLM_MODEL", "Qwen3-VL-32B-Thinking-Q8_0")
|
||||
llm_api_key = os.getenv("LLM_API_KEY", "not-needed")
|
||||
self.llm = LLMClient(llm_base_url, llm_model, llm_api_key)
|
||||
self.llm = LLMClient(llm_base_url, llm_model, llm_api_key, db=self.db)
|
||||
|
||||
# Heavy/escalation model for re-analysis, chat, and manual commands
|
||||
llm_heavy_model = os.getenv("LLM_ESCALATION_MODEL", llm_model)
|
||||
self.llm_heavy = LLMClient(llm_base_url, llm_heavy_model, llm_api_key)
|
||||
self.llm_heavy = LLMClient(llm_base_url, llm_heavy_model, llm_api_key, db=self.db)
|
||||
|
||||
# Drama tracker
|
||||
sentiment = config.get("sentiment", {})
|
||||
@@ -84,9 +87,6 @@ class BCSBot(commands.Bot):
|
||||
offense_reset_minutes=timeouts.get("offense_reset_minutes", 120),
|
||||
)
|
||||
|
||||
# Database (initialized async in setup_hook)
|
||||
self.db = Database()
|
||||
|
||||
async def setup_hook(self):
|
||||
# Initialize database and hydrate DramaTracker
|
||||
db_ok = await self.db.init()
|
||||
|
||||
Reference in New Issue
Block a user