Improve help command, model attribution, and model config
Lint / Shell (shellcheck) (push) Successful in 10s
Lint / JS (eslint) (push) Successful in 8s
Lint / Python (ruff) (push) Successful in 4s
Lint / Python deps (pip-audit) (push) Successful in 1m25s
Lint / Secret scan (gitleaks) (push) Successful in 5s

help: grouped into AI / Games / Random / Server categories with Option B
purple header; descriptions auto-pulled from the command registry.

Model attribution: added _MODEL_DISPLAY map so 'via lotusllm' becomes
'via Llama 3.2 1B', 'via gemma3:latest' becomes 'via Gemma 3 4B', etc.

Config: OLLAMA_MODEL switched from lotusllm to llama3.2:latest; added
BALL_MODEL (sadiq-bd/llama3.2-1b-uncensored) as a dedicated config var
for the 8ball so it stays on the uncensored model without affecting fortune.

Descriptions: fortune -> AI-generated fortune cookie; ask -> Ask LotusBot;
health -> Bot health & stats (admin only).

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-20 19:27:14 -04:00
parent bb5307c06b
commit 43903af22e
2 changed files with 54 additions and 17 deletions
+52 -16
View File
@@ -15,13 +15,37 @@ from utils import send_text, send_html, send_reaction, sanitize_input
from wordle import handle_wordle
from config import (
MAX_DICE_SIDES, MAX_DICE_COUNT, BOT_PREFIX, ADMIN_USERS,
OLLAMA_URL, OLLAMA_MODEL, ASK_MODEL, COOLDOWN_SECONDS,
OLLAMA_URL, OLLAMA_MODEL, BALL_MODEL, ASK_MODEL, COOLDOWN_SECONDS,
MINECRAFT_RCON_HOST, MINECRAFT_RCON_PORT, MINECRAFT_RCON_PASSWORD,
RCON_TIMEOUT, MIN_USERNAME_LENGTH, MAX_USERNAME_LENGTH,
)
logger = logging.getLogger("matrixbot")
# Human-readable display names for Ollama model tags
_MODEL_DISPLAY = {
"lotusllm": "Llama 3.2 1B",
"lotusllm:latest": "Llama 3.2 1B",
"lotusllmben:latest": "Llama 2 7B",
"sadiq-bd/llama3.2-1b-uncensored:latest": "Llama 3.2 1B",
"llama3.2:latest": "Llama 3.2 3B",
"llama3.2:1b": "Llama 3.2 1B",
"llama3.3:latest": "Llama 3.3 70B",
"gemma3:latest": "Gemma 3 4B",
"gemma3:1b": "Gemma 3 1B",
"huihui_ai/gemma3-abliterated:1b": "Gemma 3 1B",
"phi4-mini:latest": "Phi-4 Mini",
"deepseek-r1:latest": "DeepSeek R1",
"codellama:latest": "Code Llama 7B",
"dolphin-phi:latest": "Dolphin Phi",
}
def _model_label(tag: str) -> str:
"""Return a friendly display name for an Ollama model tag."""
return _MODEL_DISPLAY.get(tag, tag)
# Registry: name -> (handler, description)
COMMANDS = {}
@@ -85,15 +109,27 @@ def check_cooldown(sender: str, cmd_name: str, seconds: int = COOLDOWN_SECONDS)
@command("help", "Show all available commands")
async def cmd_help(client: AsyncClient, room_id: str, sender: str, args: str):
lines_plain = ["Commands:"]
lines_html = ["<h4>Commands</h4><ul>"]
categories = [
("🤖 AI", ["ask", "fortune", "8ball"]),
("🎮 Games", ["wordle", "trivia", "rps", "poll"]),
("🎲 Random", ["flip", "roll", "random", "champion", "agent"]),
("🖥️ Server", ["minecraft", "ping", "health"]),
]
for cmd_name, (_, desc) in sorted(COMMANDS.items()):
lines_plain.append(f" {BOT_PREFIX}{cmd_name} - {desc}")
lines_html.append(f"<li><strong>{BOT_PREFIX}{cmd_name}</strong> — {desc}</li>")
plain_lines = ["LotusBot Commands"]
html_parts = ['<font color="#a855f7"><strong>🌸 LotusBot — Commands</strong></font>']
lines_html.append("</ul>")
await send_html(client, room_id, "\n".join(lines_plain), "\n".join(lines_html))
for cat_name, cmd_names in categories:
plain_lines.append(f"\n{cat_name}")
html_parts.append(f"<br><strong>{cat_name}</strong><ul>")
for name in cmd_names:
if name in COMMANDS:
_, desc = COMMANDS[name]
plain_lines.append(f" {BOT_PREFIX}{name}{desc}")
html_parts.append(f"<li><strong>{BOT_PREFIX}{name}</strong> — {desc}</li>")
html_parts.append("</ul>")
await send_html(client, room_id, "\n".join(plain_lines), "".join(html_parts))
@command("ping", "Check bot latency")
@@ -296,7 +332,7 @@ async def cmd_8ball(client: AsyncClient, room_id: str, sender: str, args: str):
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(
f"{OLLAMA_URL}/api/generate",
json={"model": "sadiq-bd/llama3.2-1b-uncensored:latest", "prompt": prompt, "stream": False},
json={"model": BALL_MODEL, "prompt": prompt, "stream": False},
) as response:
data = await response.json()
raw = _normalize_caps(data.get("response", "").strip())
@@ -312,7 +348,7 @@ async def cmd_8ball(client: AsyncClient, room_id: str, sender: str, args: str):
html = (
f'<font color="{_answer_color}"><strong>🎱 {answer}</strong></font><br>'
f'<sup><em>{args}</em></sup><br>'
f'<sup><em>via {OLLAMA_MODEL}</em></sup>'
f'<sup><em>via {_model_label(BALL_MODEL)}</em></sup>'
)
await send_html(client, room_id, plain, html)
return
@@ -424,7 +460,7 @@ _FORTUNE_FALLBACKS = [
]
@command("fortune", "Get a fortune cookie message")
@command("fortune", "AI-generated fortune cookie")
async def cmd_fortune(client: AsyncClient, room_id: str, sender: str, args: str):
fortune = None
try:
@@ -464,7 +500,7 @@ async def cmd_fortune(client: AsyncClient, room_id: str, sender: str, args: str)
html = (
f'<font color="#14b8a6"><strong>🥠 Fortune Cookie</strong></font><br>'
f'<blockquote><em>{fortune}</em></blockquote>'
+ (f'<sup><em>via {OLLAMA_MODEL}</em></sup>' if from_llm else "")
+ (f'<sup><em>via {_model_label(OLLAMA_MODEL)}</em></sup>' if from_llm else "")
)
await send_html(client, room_id, plain, html)
@@ -768,7 +804,7 @@ async def cmd_trivia(client: AsyncClient, room_id: str, sender: str, args: str):
f'<em>{question["q"]}</em><br>'
f'<ul>{options_html}</ul>'
f'React with A/B/C/D — answer revealed in 30s!'
+ (f'<br><sup><em>via {ASK_MODEL}</em></sup>' if from_llm else "")
+ (f'<br><sup><em>via {_model_label(ASK_MODEL)}</em></sup>' if from_llm else "")
)
resp = await send_html(client, room_id, plain, html)
@@ -792,7 +828,7 @@ async def cmd_trivia(client: AsyncClient, room_id: str, sender: str, args: str):
# ==================== INTEGRATIONS ====================
@command("ask", "Ask Lotus LLM a question (2min cooldown)")
@command("ask", "Ask LotusBot a question (2min cooldown)")
async def cmd_ask(client: AsyncClient, room_id: str, sender: str, args: str):
if not args:
await send_text(client, room_id, f"Usage: {BOT_PREFIX}ask <question>")
@@ -845,7 +881,7 @@ async def cmd_ask(client: AsyncClient, room_id: str, sender: str, args: str):
f'<font color="#a855f7"><strong>🤖 LotusBot</strong></font><br>'
f'<em>Q: {question}</em><br>'
f'<blockquote>{full_response}</blockquote>'
f'<sup><em>via {ASK_MODEL}</em></sup>'
f'<sup><em>via {_model_label(ASK_MODEL)}</em></sup>'
)
await send_html(client, room_id, plain, html)
except asyncio.TimeoutError:
@@ -907,7 +943,7 @@ async def cmd_minecraft(client: AsyncClient, room_id: str, sender: str, args: st
# ==================== ADMIN COMMANDS ====================
@command("health", "Bot status and health (admin only)")
@command("health", "Bot health & stats (admin only)")
async def cmd_health(client: AsyncClient, room_id: str, sender: str, args: str):
if sender not in ADMIN_USERS:
await send_text(client, room_id, "You don't have permission to use this command.")
+2 -1
View File
@@ -18,7 +18,8 @@ LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
# Integrations
OLLAMA_URL = os.getenv("OLLAMA_URL", "http://10.10.10.157:11434")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "lotusllm")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3.2:latest")
BALL_MODEL = os.getenv("BALL_MODEL", "sadiq-bd/llama3.2-1b-uncensored:latest")
ASK_MODEL = os.getenv("ASK_MODEL", "gemma3:latest")
MINECRAFT_RCON_HOST = os.getenv("MINECRAFT_RCON_HOST", "10.10.10.67")
MINECRAFT_RCON_PORT = int(os.getenv("MINECRAFT_RCON_PORT", "25575"))