diff --git a/matrixbot/commands.py b/matrixbot/commands.py index de72c40..f012d0c 100644 --- a/matrixbot/commands.py +++ b/matrixbot/commands.py @@ -1174,12 +1174,48 @@ async def cmd_trivia(client: AsyncClient, room_id: str, sender: str, args: str): # ==================== INTEGRATIONS ==================== -@command("ask", "Ask LotusBot a question (2min cooldown)") +# Short aliases users can pass with --model / -m +_ASK_MODEL_ALIASES: dict[str, str] = { + "phi4": "phi4-mini:latest", + "phi4-mini": "phi4-mini:latest", + "llama": "llama3.2:latest", + "llama3": "llama3.2:latest", + "llama3-1b": "llama3.2:1b", + "gemma": "gemma3:latest", + "gemma-1b": "gemma3:1b", + "deepseek": "deepseek-r1:latest", + "codellama": "codellama:latest", + "qwen": "qwen2.5:latest", + "dolphin": "dolphin-phi:latest", + "creative": "huihui_ai/llama3.2-abliterate:3b", + "abliterated": "huihui_ai/llama3.2-abliterate:3b", + "uncensored": "llama2-uncensored:latest", + "llama2": "llama2-uncensored:latest", +} + + +@command("ask", "Ask LotusBot a question — optionally pick a model with --model (2min cooldown)") async def cmd_ask(client: AsyncClient, room_id: str, sender: str, args: str): if not args: - await send_text(client, room_id, f"Usage: {BOT_PREFIX}ask ") + aliases = ", ".join(sorted(_ASK_MODEL_ALIASES)) + await send_text(client, room_id, + f"Usage: {BOT_PREFIX}ask [--model ] \nModels: {aliases}") return + # Parse optional --model / -m flag + model = ASK_MODEL + model_flag = re.match(r"^(?:--model|-m)\s+(\S+)\s+(.*)", args, re.DOTALL) + if model_flag: + alias = model_flag.group(1).lower() + args = model_flag.group(2).strip() + resolved = _ASK_MODEL_ALIASES.get(alias) + if not resolved: + aliases = ", ".join(sorted(_ASK_MODEL_ALIASES)) + await send_text(client, room_id, + f"Unknown model '{alias}'. Available: {aliases}") + return + model = resolved + remaining = check_cooldown(sender, "ask") if remaining: await send_text(client, room_id, f"Command on cooldown. Try again in {remaining}s.") @@ -1190,7 +1226,7 @@ async def cmd_ask(client: AsyncClient, room_id: str, sender: str, args: str): await send_text(client, room_id, "Please provide a valid question.") return - await send_text(client, room_id, "Thinking...") + await send_text(client, room_id, f"Thinking... (via {_model_label(model)})") try: timeout = aiohttp.ClientTimeout(total=120) @@ -1198,7 +1234,7 @@ async def cmd_ask(client: AsyncClient, room_id: str, sender: str, args: str): async with session.post( f"{OLLAMA_URL}/api/chat", json={ - "model": ASK_MODEL, + "model": model, "stream": False, "messages": [ { @@ -1227,7 +1263,7 @@ async def cmd_ask(client: AsyncClient, room_id: str, sender: str, args: str): f'🤖 LotusBot
' f'Q: {question}
' f'
{full_response}
' - f'via {_model_label(ASK_MODEL)}' + f'via {_model_label(model)}' ) await send_html(client, room_id, plain, html) except asyncio.TimeoutError: