fix: use escalation model and fallback to permanent memories in migration
- Use LLM_ESCALATION_* env vars for better profile generation - Fall back to joining permanent memories if profile_update is null Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -23,10 +23,11 @@ async def main():
|
||||
print("Database not available.")
|
||||
return
|
||||
|
||||
# Use escalation model for better profile generation
|
||||
llm = LLMClient(
|
||||
base_url=os.getenv("LLM_BASE_URL", ""),
|
||||
model=os.getenv("LLM_MODEL", "gpt-4o-mini"),
|
||||
api_key=os.getenv("LLM_API_KEY", "not-needed"),
|
||||
base_url=os.getenv("LLM_ESCALATION_BASE_URL", os.getenv("LLM_BASE_URL", "")),
|
||||
model=os.getenv("LLM_ESCALATION_MODEL", os.getenv("LLM_MODEL", "gpt-4o-mini")),
|
||||
api_key=os.getenv("LLM_ESCALATION_API_KEY", os.getenv("LLM_API_KEY", "not-needed")),
|
||||
)
|
||||
|
||||
states = await db.load_all_user_states()
|
||||
@@ -52,8 +53,18 @@ async def main():
|
||||
current_profile="",
|
||||
)
|
||||
|
||||
if result and result.get("profile_update"):
|
||||
profile = result["profile_update"]
|
||||
if not result:
|
||||
print(f" LLM returned no result, keeping existing notes.")
|
||||
continue
|
||||
|
||||
# Use profile_update if provided, otherwise build from permanent memories
|
||||
profile = result.get("profile_update")
|
||||
if not profile:
|
||||
permanent = [m["memory"] for m in result.get("memories", []) if m.get("expiration") == "permanent"]
|
||||
if permanent:
|
||||
profile = " ".join(permanent)
|
||||
|
||||
if profile:
|
||||
print(f" New: {profile[:200]}")
|
||||
await db.save_user_state(
|
||||
user_id=state["user_id"],
|
||||
|
||||
Reference in New Issue
Block a user