fix: use escalation model and fallback to permanent memories in migration

- Use LLM_ESCALATION_* env vars for better profile generation
- Fall back to joining permanent memories if profile_update is null

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-26 13:14:38 -05:00
parent efe7f901c2
commit b918ba51a8

View File

@@ -23,10 +23,11 @@ async def main():
print("Database not available.") print("Database not available.")
return return
# Use escalation model for better profile generation
llm = LLMClient( llm = LLMClient(
base_url=os.getenv("LLM_BASE_URL", ""), base_url=os.getenv("LLM_ESCALATION_BASE_URL", os.getenv("LLM_BASE_URL", "")),
model=os.getenv("LLM_MODEL", "gpt-4o-mini"), model=os.getenv("LLM_ESCALATION_MODEL", os.getenv("LLM_MODEL", "gpt-4o-mini")),
api_key=os.getenv("LLM_API_KEY", "not-needed"), api_key=os.getenv("LLM_ESCALATION_API_KEY", os.getenv("LLM_API_KEY", "not-needed")),
) )
states = await db.load_all_user_states() states = await db.load_all_user_states()
@@ -52,8 +53,18 @@ async def main():
current_profile="", current_profile="",
) )
if result and result.get("profile_update"): if not result:
profile = result["profile_update"] print(f" LLM returned no result, keeping existing notes.")
continue
# Use profile_update if provided, otherwise build from permanent memories
profile = result.get("profile_update")
if not profile:
permanent = [m["memory"] for m in result.get("memories", []) if m.get("expiration") == "permanent"]
if permanent:
profile = " ".join(permanent)
if profile:
print(f" New: {profile[:200]}") print(f" New: {profile[:200]}")
await db.save_user_state( await db.save_user_state(
user_id=state["user_id"], user_id=state["user_id"],