feat: add relevance-gated proactive replies
Replace random-only proactive reply logic with LLM relevance check. The bot now evaluates recent conversation context and user memory before deciding to jump in, then applies reply_chance as a second gate. Bump reply_chance values higher since the relevance filter prevents most irrelevant replies. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
52
cogs/chat.py
52
cogs/chat.py
@@ -226,16 +226,58 @@ class ChatCog(commands.Cog):
|
||||
ch_id = message.channel.id
|
||||
self._messages_since_reply[ch_id] = self._messages_since_reply.get(ch_id, 0) + 1
|
||||
cooldown = self.bot.config.get("modes", {}).get("proactive_cooldown_messages", 5)
|
||||
reply_chance = mode_config.get("reply_chance", 0.0)
|
||||
|
||||
if (
|
||||
self._messages_since_reply[ch_id] >= cooldown
|
||||
and reply_chance > 0
|
||||
and random.random() < reply_chance
|
||||
and message.content and message.content.strip()
|
||||
):
|
||||
should_reply = True
|
||||
is_proactive = True
|
||||
# Gather recent messages for relevance check
|
||||
recent_for_check = []
|
||||
try:
|
||||
async for msg in message.channel.history(limit=5, before=message):
|
||||
if msg.content and msg.content.strip() and not msg.author.bot:
|
||||
recent_for_check.append(
|
||||
f"{msg.author.display_name}: {msg.content[:200]}"
|
||||
)
|
||||
except discord.HTTPException:
|
||||
pass
|
||||
recent_for_check.reverse()
|
||||
recent_for_check.append(
|
||||
f"{message.author.display_name}: {message.content[:200]}"
|
||||
)
|
||||
|
||||
# Build memory context for users in recent messages
|
||||
memory_parts = []
|
||||
seen_users = set()
|
||||
for line in recent_for_check:
|
||||
name = line.split(":")[0]
|
||||
if name not in seen_users and message.guild:
|
||||
seen_users.add(name)
|
||||
member = discord.utils.find(
|
||||
lambda m, n=name: m.display_name == n,
|
||||
message.guild.members,
|
||||
)
|
||||
if member:
|
||||
profile = self.bot.drama_tracker.get_user_notes(member.id)
|
||||
if profile:
|
||||
memory_parts.append(f"{name}: {profile}")
|
||||
|
||||
memory_ctx = "\n".join(memory_parts) if memory_parts else ""
|
||||
|
||||
is_relevant = await self.bot.llm.check_reply_relevance(
|
||||
recent_for_check, memory_ctx,
|
||||
)
|
||||
|
||||
if is_relevant:
|
||||
reply_chance = mode_config.get("reply_chance", 0.0)
|
||||
if reply_chance > 0 and random.random() < reply_chance:
|
||||
should_reply = True
|
||||
is_proactive = True
|
||||
else:
|
||||
# Not relevant — partially reset cooldown so we check again sooner
|
||||
self._messages_since_reply[ch_id] = max(
|
||||
0, self._messages_since_reply[ch_id] - 3
|
||||
)
|
||||
|
||||
if not should_reply:
|
||||
return
|
||||
|
||||
10
config.yaml
10
config.yaml
@@ -83,7 +83,7 @@ modes:
|
||||
description: "Friendly chat participant"
|
||||
prompt_file: "personalities/chat_chatty.txt"
|
||||
proactive_replies: true
|
||||
reply_chance: 0.10
|
||||
reply_chance: 0.40
|
||||
moderation: relaxed
|
||||
relaxed_thresholds:
|
||||
warning_threshold: 0.80
|
||||
@@ -96,7 +96,7 @@ modes:
|
||||
description: "Savage roast mode"
|
||||
prompt_file: "personalities/chat_roast.txt"
|
||||
proactive_replies: true
|
||||
reply_chance: 0.20
|
||||
reply_chance: 0.60
|
||||
moderation: relaxed
|
||||
relaxed_thresholds:
|
||||
warning_threshold: 0.85
|
||||
@@ -109,7 +109,7 @@ modes:
|
||||
description: "Your biggest fan"
|
||||
prompt_file: "personalities/chat_hype.txt"
|
||||
proactive_replies: true
|
||||
reply_chance: 0.15
|
||||
reply_chance: 0.50
|
||||
moderation: relaxed
|
||||
relaxed_thresholds:
|
||||
warning_threshold: 0.80
|
||||
@@ -122,7 +122,7 @@ modes:
|
||||
description: "Had a few too many"
|
||||
prompt_file: "personalities/chat_drunk.txt"
|
||||
proactive_replies: true
|
||||
reply_chance: 0.20
|
||||
reply_chance: 0.60
|
||||
moderation: relaxed
|
||||
relaxed_thresholds:
|
||||
warning_threshold: 0.85
|
||||
@@ -135,7 +135,7 @@ modes:
|
||||
description: "Insufferable grammar nerd mode"
|
||||
prompt_file: "personalities/chat_english_teacher.txt"
|
||||
proactive_replies: true
|
||||
reply_chance: 0.20
|
||||
reply_chance: 0.60
|
||||
moderation: relaxed
|
||||
relaxed_thresholds:
|
||||
warning_threshold: 0.85
|
||||
|
||||
@@ -804,6 +804,63 @@ class LLMClient:
|
||||
self._log_llm("pick_reaction", elapsed, False, message_text[:200], error=str(e))
|
||||
return None
|
||||
|
||||
async def check_reply_relevance(
|
||||
self, recent_messages: list[str], memory_context: str = "",
|
||||
) -> bool:
|
||||
"""Check if the bot would naturally want to jump into a conversation.
|
||||
|
||||
Returns True if the conversation is something worth replying to.
|
||||
"""
|
||||
prompt = (
|
||||
"You're a regular member of a Discord gaming server. You're reading chat and deciding "
|
||||
"whether you'd naturally want to jump in and say something.\n\n"
|
||||
"Say YES if:\n"
|
||||
"- Someone said something you'd have a strong reaction to\n"
|
||||
"- You know something relevant about these people (see memory context)\n"
|
||||
"- Someone is wrong or has a hot take you'd want to respond to\n"
|
||||
"- The conversation is funny or interesting enough to comment on\n"
|
||||
"- Someone mentioned something you have an opinion on\n\n"
|
||||
"Say NO if:\n"
|
||||
"- It's mundane/boring small talk\n"
|
||||
"- You'd have nothing interesting to add\n"
|
||||
"- People are just chatting normally and don't need interruption\n\n"
|
||||
"Reply with EXACTLY one word: YES or NO."
|
||||
)
|
||||
convo_text = "\n".join(recent_messages[-5:])
|
||||
user_content = ""
|
||||
if memory_context:
|
||||
user_content += f"{memory_context}\n\n"
|
||||
user_content += f"Recent chat:\n{convo_text}"
|
||||
|
||||
t0 = time.monotonic()
|
||||
|
||||
async with self._semaphore:
|
||||
try:
|
||||
temp_kwargs = {"temperature": 0.3} if self._supports_temperature else {}
|
||||
response = await self._client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": user_content[:1000]},
|
||||
],
|
||||
**temp_kwargs,
|
||||
max_completion_tokens=16,
|
||||
)
|
||||
elapsed = int((time.monotonic() - t0) * 1000)
|
||||
content = (response.choices[0].message.content or "").strip().lower()
|
||||
is_relevant = "yes" in content
|
||||
self._log_llm(
|
||||
"check_relevance", elapsed, True,
|
||||
user_content[:300], content,
|
||||
)
|
||||
logger.debug("Relevance check: %s", content)
|
||||
return is_relevant
|
||||
except Exception as e:
|
||||
elapsed = int((time.monotonic() - t0) * 1000)
|
||||
logger.error("Relevance check error: %s", e)
|
||||
self._log_llm("check_relevance", elapsed, False, user_content[:300], error=str(e))
|
||||
return False
|
||||
|
||||
async def extract_memories(
|
||||
self,
|
||||
conversation: list[dict[str, str]],
|
||||
|
||||
Reference in New Issue
Block a user