feat: add pick_reaction method to LLMClient
Lightweight LLM call that picks a contextual emoji reaction for a Discord message. Uses temperature 0.9 for variety, max 16 tokens, and validates the response is a short emoji token or returns None. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -743,6 +743,61 @@ class LLMClient:
|
||||
self._log_llm("classify_intent", elapsed, False, message_text[:200], error=str(e))
|
||||
return "chat"
|
||||
|
||||
async def pick_reaction(self, message_text: str, channel_name: str) -> str | None:
|
||||
"""Pick a contextual emoji reaction for a Discord message.
|
||||
|
||||
Returns an emoji string, or None if no reaction is appropriate.
|
||||
"""
|
||||
prompt = (
|
||||
"You are a lurker in a Discord gaming server. "
|
||||
"Given a message and its channel, decide if it deserves a reaction emoji.\n\n"
|
||||
"Available reactions:\n"
|
||||
"\U0001f480 = funny/dead\n"
|
||||
"\U0001f602 = hilarious\n"
|
||||
"\U0001f440 = drama/spicy\n"
|
||||
"\U0001f525 = impressive\n"
|
||||
"\U0001f4af = good take\n"
|
||||
"\U0001f62d = sad/tragic\n"
|
||||
"\U0001f921 = clown moment\n"
|
||||
"\u2764\ufe0f = wholesome\n"
|
||||
"\U0001fae1 = respect\n"
|
||||
"\U0001f913 = nerd\n"
|
||||
"\U0001f974 = drunk/unhinged\n"
|
||||
"\U0001f3af = accurate\n\n"
|
||||
"Reply with ONLY the emoji, or NONE if the message doesn't warrant a reaction. "
|
||||
"Most messages should get NONE — only react when something genuinely stands out."
|
||||
)
|
||||
t0 = time.monotonic()
|
||||
|
||||
async with self._semaphore:
|
||||
try:
|
||||
temp_kwargs = {"temperature": 0.9} if self._supports_temperature else {}
|
||||
response = await self._client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": f"[#{channel_name}] {message_text}"},
|
||||
],
|
||||
**temp_kwargs,
|
||||
max_completion_tokens=16,
|
||||
)
|
||||
elapsed = int((time.monotonic() - t0) * 1000)
|
||||
raw = (response.choices[0].message.content or "").strip()
|
||||
token = raw.split()[0] if raw.split() else ""
|
||||
|
||||
if not token or token.lower() == "none" or len(token) > 7:
|
||||
self._log_llm("pick_reaction", elapsed, True, message_text[:200], "NONE")
|
||||
return None
|
||||
|
||||
self._log_llm("pick_reaction", elapsed, True, message_text[:200], token)
|
||||
logger.debug("Picked reaction %s for: %s", token, message_text[:80])
|
||||
return token
|
||||
except Exception as e:
|
||||
elapsed = int((time.monotonic() - t0) * 1000)
|
||||
logger.error("Reaction pick error: %s", e)
|
||||
self._log_llm("pick_reaction", elapsed, False, message_text[:200], error=str(e))
|
||||
return None
|
||||
|
||||
async def extract_memories(
|
||||
self,
|
||||
conversation: list[dict[str, str]],
|
||||
|
||||
Reference in New Issue
Block a user