Improve LLM context with full timestamped channel history
Send last ~8 messages from all users (not just others) as a multi-line chat log with relative timestamps so the LLM can better understand conversation flow and escalation patterns. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -657,23 +657,54 @@ class SentimentCog(commands.Cog):
|
||||
ch_id = message.channel.id
|
||||
if ch_id not in self._channel_history:
|
||||
max_ctx = self.bot.config.get("sentiment", {}).get(
|
||||
"context_messages", 3
|
||||
"context_messages", 8
|
||||
)
|
||||
self._channel_history[ch_id] = deque(maxlen=max_ctx + 1)
|
||||
self._channel_history[ch_id] = deque(maxlen=max_ctx)
|
||||
self._channel_history[ch_id].append(
|
||||
(message.author.display_name, message.content)
|
||||
(message.author.display_name, message.content, datetime.now(timezone.utc))
|
||||
)
|
||||
|
||||
def _get_context(self, message: discord.Message) -> str:
|
||||
"""Build a timestamped chat log from recent channel messages.
|
||||
|
||||
Excludes messages currently buffered for this user+channel
|
||||
(those appear in the TARGET MESSAGE section instead).
|
||||
"""
|
||||
ch_id = message.channel.id
|
||||
history = self._channel_history.get(ch_id, deque())
|
||||
# Exclude the current message (last item)
|
||||
context_entries = list(history)[:-1] if len(history) > 1 else []
|
||||
if not context_entries:
|
||||
if not history:
|
||||
return "(no prior context)"
|
||||
return " | ".join(
|
||||
f"{name}: {content}" for name, content in context_entries
|
||||
)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
# Collect IDs of messages in the current debounce batch so we can skip them
|
||||
batch_key = (ch_id, message.author.id)
|
||||
batch_msgs = self._message_buffer.get(batch_key, [])
|
||||
# Build a set of (author, content) from the batch for fast lookup
|
||||
batch_set = {(m.author.display_name, m.content) for m in batch_msgs}
|
||||
|
||||
lines = []
|
||||
for name, content, ts in history:
|
||||
if (name, content) in batch_set:
|
||||
continue
|
||||
delta = now - ts
|
||||
rel = self._format_relative_time(delta)
|
||||
lines.append(f"[{rel}] {name}: {content}")
|
||||
|
||||
if not lines:
|
||||
return "(no prior context)"
|
||||
return "\n".join(lines)
|
||||
|
||||
@staticmethod
|
||||
def _format_relative_time(delta: timedelta) -> str:
|
||||
total_seconds = int(delta.total_seconds())
|
||||
if total_seconds < 60:
|
||||
return f"~{total_seconds}s ago"
|
||||
minutes = total_seconds // 60
|
||||
if minutes < 60:
|
||||
return f"~{minutes}m ago"
|
||||
hours = minutes // 60
|
||||
return f"~{hours}h ago"
|
||||
|
||||
async def _log_analysis(
|
||||
self, message: discord.Message, score: float, drama_score: float,
|
||||
|
||||
@@ -14,10 +14,10 @@ sentiment:
|
||||
mute_threshold: 0.75
|
||||
spike_warning_threshold: 0.5 # Single message score that triggers instant warning
|
||||
spike_mute_threshold: 0.8 # Single message score that triggers instant mute
|
||||
context_messages: 3 # Number of previous messages to include as context
|
||||
context_messages: 8 # Number of previous messages to include as context
|
||||
rolling_window_size: 10 # Number of messages to track per user
|
||||
rolling_window_minutes: 15 # Time window for tracking
|
||||
batch_window_seconds: 3 # Wait this long for more messages before analyzing (debounce)
|
||||
batch_window_seconds: 10 # Wait this long for more messages before analyzing (debounce)
|
||||
escalation_threshold: 0.25 # Triage toxicity score that triggers re-analysis with heavy model
|
||||
|
||||
game_channels:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
You are a Discord chat moderator AI for a gaming server. You will be given a TARGET MESSAGE to analyze, plus recent channel context for background.
|
||||
You are a Discord chat moderator AI for a gaming server. You will be given a TARGET MESSAGE to analyze, plus recent channel messages for background.
|
||||
|
||||
CRITICAL: Only score the TARGET MESSAGE. The context is ONLY for understanding tone and conversation flow. Do NOT score the context messages — they belong to other users and are already being analyzed separately.
|
||||
CRITICAL: Only score the TARGET MESSAGE. The context section contains recent messages from ALL users in the channel (including the target user's own prior messages) — it is ONLY for understanding tone, conversation flow, and escalation patterns. Do NOT score the context messages — they are already being analyzed separately.
|
||||
|
||||
CONTEXT — This is a friend group who use crude nicknames (e.g. "tits" is someone's nickname). A nickname alone is NOT toxic. However, you must still flag genuinely aggressive language.
|
||||
|
||||
|
||||
@@ -114,7 +114,7 @@ class LLMClient:
|
||||
self, message: str, context: str = "", user_notes: str = "",
|
||||
channel_context: str = "",
|
||||
) -> dict | None:
|
||||
user_content = f"=== CONTEXT (other users' recent messages, for background only) ===\n{context}\n\n"
|
||||
user_content = f"=== RECENT CHANNEL MESSAGES (for background context only) ===\n{context}\n\n"
|
||||
if user_notes:
|
||||
user_content += f"=== NOTES ABOUT THIS USER (from prior analysis) ===\n{user_notes}\n\n"
|
||||
if channel_context:
|
||||
@@ -302,7 +302,7 @@ class LLMClient:
|
||||
channel_context: str = "",
|
||||
) -> tuple[str, dict | None]:
|
||||
"""Return the raw LLM response string AND parsed result for /bcs-test (single LLM call)."""
|
||||
user_content = f"=== CONTEXT (other users' recent messages, for background only) ===\n{context}\n\n"
|
||||
user_content = f"=== RECENT CHANNEL MESSAGES (for background context only) ===\n{context}\n\n"
|
||||
if user_notes:
|
||||
user_content += f"=== NOTES ABOUT THIS USER (from prior analysis) ===\n{user_notes}\n\n"
|
||||
if channel_context:
|
||||
|
||||
Reference in New Issue
Block a user