Fix repetitive roast responses with anti-repetition mechanisms
Add frequency_penalty (0.8) and presence_penalty (0.6) to LLM chat
calls to discourage repeated tokens. Inject the bot's last 5 responses
into the system prompt so the model knows what to avoid. Strengthen
the roast prompt with explicit anti-repetition rules and remove example
lines the model was copying verbatim ("Real ___ energy", etc.).
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
13
cogs/chat.py
13
cogs/chat.py
@@ -188,10 +188,17 @@ class ChatCog(commands.Cog):
|
||||
|
||||
active_prompt = self._get_active_prompt()
|
||||
|
||||
# Collect recent bot replies so the LLM can avoid repeating itself
|
||||
recent_bot_replies = [
|
||||
m["content"][:150] for m in self._chat_history[ch_id]
|
||||
if m["role"] == "assistant"
|
||||
][-5:]
|
||||
|
||||
response = await self.bot.llm.chat(
|
||||
list(self._chat_history[ch_id]),
|
||||
active_prompt,
|
||||
on_first_token=start_typing,
|
||||
recent_bot_replies=recent_bot_replies,
|
||||
)
|
||||
|
||||
if typing_ctx:
|
||||
@@ -302,9 +309,15 @@ class ChatCog(commands.Cog):
|
||||
self._chat_history[ch_id].append({"role": "user", "content": context})
|
||||
active_prompt = self._get_active_prompt()
|
||||
|
||||
recent_bot_replies = [
|
||||
m["content"][:150] for m in self._chat_history[ch_id]
|
||||
if m["role"] == "assistant"
|
||||
][-5:]
|
||||
|
||||
response = await self.bot.llm.chat(
|
||||
list(self._chat_history[ch_id]),
|
||||
active_prompt,
|
||||
recent_bot_replies=recent_bot_replies,
|
||||
)
|
||||
|
||||
# Strip leaked metadata
|
||||
|
||||
Reference in New Issue
Block a user