feat: inject persistent memory context into chat responses

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-26 12:56:02 -05:00
parent 89fabd85da
commit d61e85d928

View File

@@ -3,6 +3,7 @@ import logging
import random
import re
from collections import deque
from datetime import datetime, timezone
from pathlib import Path
import discord
@@ -25,6 +26,52 @@ def _load_prompt(filename: str) -> str:
return _prompt_cache[filename]
_TOPIC_KEYWORDS = {
"gta", "warzone", "cod", "battlefield", "fortnite", "apex", "valorant",
"minecraft", "roblox", "league", "dota", "overwatch", "destiny", "halo",
"work", "job", "school", "college", "girlfriend", "boyfriend", "wife",
"husband", "dog", "cat", "pet", "car", "music", "movie", "food",
}
_GENERIC_CHANNELS = {"general", "off-topic", "memes"}
def _extract_topic_keywords(text: str, channel_name: str) -> list[str]:
"""Extract topic keywords from message text and channel name."""
words = set(text.lower().split()) & _TOPIC_KEYWORDS
if channel_name.lower() not in _GENERIC_CHANNELS:
words.add(channel_name.lower())
return list(words)[:5]
def _format_relative_time(dt: datetime) -> str:
"""Return a human-readable relative time string."""
now = datetime.now(timezone.utc)
# Ensure dt is timezone-aware
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
delta = now - dt
seconds = int(delta.total_seconds())
if seconds < 60:
return "just now"
minutes = seconds // 60
if minutes < 60:
return f"{minutes}m ago"
hours = minutes // 60
if hours < 24:
return f"{hours}h ago"
days = hours // 24
if days == 1:
return "yesterday"
if days < 7:
return f"{days} days ago"
weeks = days // 7
if weeks < 5:
return f"{weeks}w ago"
months = days // 30
return f"{months}mo ago"
class ChatCog(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@@ -39,6 +86,43 @@ class ChatCog(commands.Cog):
prompt_file = mode_config.get("prompt_file", "chat_personality.txt")
return _load_prompt(prompt_file)
async def _build_memory_context(self, user_id: int, message_text: str, channel_name: str) -> str:
"""Build a layered memory context block for the chat prompt."""
lines = []
# Layer 1: Profile (always)
profile = self.bot.drama_tracker.get_user_notes(user_id)
if profile:
lines.append(f"Profile: {profile}")
# Layer 2: Recent memories (last 5)
recent_memories = await self.bot.db.get_recent_memories(user_id, limit=5)
if recent_memories:
parts = []
for mem in recent_memories:
time_str = _format_relative_time(mem["created_at"])
parts.append(f"{mem['memory']} ({time_str})")
lines.append("Recent: " + " | ".join(parts))
# Layer 3: Topic-matched memories (deduplicated against recent)
keywords = _extract_topic_keywords(message_text, channel_name)
if keywords:
topic_memories = await self.bot.db.get_memories_by_topics(user_id, keywords, limit=5)
# Deduplicate against recent memories
recent_texts = {mem["memory"] for mem in recent_memories} if recent_memories else set()
unique_topic = [mem for mem in topic_memories if mem["memory"] not in recent_texts]
if unique_topic:
parts = []
for mem in unique_topic:
time_str = _format_relative_time(mem["created_at"])
parts.append(f"{mem['memory']} ({time_str})")
lines.append("Relevant: " + " | ".join(parts))
if not lines:
return ""
return "[What you know about this person:]\n" + "\n".join(lines)
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author.bot:
@@ -176,11 +260,13 @@ class ChatCog(commands.Cog):
context_parts.append(f"{user_data.offense_count} offense(s)")
score_context = f"[Server context: {message.author.display_name}{', '.join(context_parts)}]"
# Gather user notes and recent messages for richer context
# Gather memory context and recent messages for richer context
extra_context = ""
user_notes = self.bot.drama_tracker.get_user_notes(message.author.id)
if user_notes:
extra_context += f"[Notes about {message.author.display_name}: {user_notes}]\n"
memory_context = await self._build_memory_context(
message.author.id, content, message.channel.name,
)
if memory_context:
extra_context += memory_context + "\n"
# Include mention scan findings if available
if scan_summary: