diff --git a/.claude-settings b/.claude-settings
new file mode 160000
index 0000000..eb757cf
--- /dev/null
+++ b/.claude-settings
@@ -0,0 +1 @@
+Subproject commit eb757cf5237304fc668d42285291ba9ba328ef76
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..c579171
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,64 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+.venv/
+venv/
+ENV/
+env/
+*.egg-info/
+dist/
+build/
+
+# Environment files with secrets
+.env
+.env.*
+!.env.example
+docker/*.env
+!docker/*.env.example
+
+# State and session files
+sessions_state.json
+user_settings.json
+state/
+*.db
+*.sqlite
+
+# Sandboxes and temporary files
+*sandbox*/
+*.log
+*.tmp
+
+# Git
+.git/
+.gitignore
+.gitattributes
+
+# IDE
+.vscode/
+.idea/
+*.swp
+*.swo
+*~
+
+# Documentation (not needed in image)
+README.md
+LICENSE
+docs/
+
+# Testing
+test_*.py
+.pytest_cache/
+.coverage
+htmlcov/
+
+# CI/CD
+.github/
+.gitlab-ci.yml
+
+# Docker
+docker-compose.yml
+Dockerfile
+.dockerignore
diff --git a/.env.example b/.env.example
index bc30678..876b4d8 100644
--- a/.env.example
+++ b/.env.example
@@ -8,10 +8,24 @@ TELEGRAM_BOT_TOKEN=your_telegram_bot_token_here
TELEGRAM_DEFAULT_CHAT_ID=0
# =============================================================================
-# REQUIRED - ElevenLabs API
+# VOICE PROVIDERS - Use ElevenLabs OR OpenAI (or both)
+# At least one must be set. ElevenLabs takes priority if both are set.
+# Override with TTS_PROVIDER=openai or STT_PROVIDER=openai
# =============================================================================
+
+# --- ElevenLabs (optional if OpenAI key is set) ---
ELEVENLABS_API_KEY=your_elevenlabs_api_key_here
+# --- OpenAI (optional if ElevenLabs key is set) ---
+# OPENAI_API_KEY=sk-...
+
+# =============================================================================
+# VOICE PROVIDER SELECTION (Optional - auto-detected from keys if not set)
+# =============================================================================
+# TTS_PROVIDER=elevenlabs # "elevenlabs" or "openai"
+# STT_PROVIDER=elevenlabs # "elevenlabs" or "openai"
+# STT_LANGUAGE= # e.g. "en", "pl" — empty = auto-detect
+
# =============================================================================
# PERSONA CONFIGURATION
# =============================================================================
@@ -22,12 +36,22 @@ PERSONA_NAME=V
# Leave empty for default minimal prompt
SYSTEM_PROMPT_FILE=prompts/v.md
-# ElevenLabs voice ID
+# --- ElevenLabs voice ---
# George: JBFqnCBsd6RMkjVDRZzb (default)
# Daniel: onwK4e9ZLuTAKqWW03F9
# Charlie: IKne3meq5aSn9XLyUdCD
ELEVENLABS_VOICE_ID=JBFqnCBsd6RMkjVDRZzb
+# --- OpenAI voice ---
+# Voices: alloy, ash, ballad, cedar, coral (default), echo, fable,
+# juniper, marin, onyx, nova, sage, shimmer, verse
+# OPENAI_VOICE_ID=coral
+# OPENAI_TTS_MODEL=gpt-4o-mini-tts # or: tts-1, tts-1-hd
+# OPENAI_STT_MODEL=whisper-1 # or: gpt-4o-mini-transcribe, gpt-4o-transcribe
+
+# Speaking style prompt (only for gpt-4o-mini-tts)
+# OPENAI_VOICE_INSTRUCTIONS=Speak as a calm, direct advisor. Clear and natural pace.
+
# =============================================================================
# TOPIC FILTERING (Optional)
# =============================================================================
@@ -39,10 +63,11 @@ TELEGRAM_TOPIC_ID=
# DIRECTORIES
# =============================================================================
# Directory for Claude to read files from
-CLAUDE_WORKING_DIR=/home/dev
+CLAUDE_WORKING_DIR=/home/youruser
# Sandbox directory - Claude can write/execute here
-CLAUDE_SANDBOX_DIR=/home/dev/claude-voice-sandbox
+CLAUDE_SANDBOX_DIR=/home/youruser/claude-voice-sandbox
+CLAUDE_SETTINGS_FILE=settings.json
# Optional: Settings file for Claude permissions (restricts Edit/Write to sandbox)
# CLAUDE_SETTINGS_FILE=/path/to/settings.json
diff --git a/.gitignore b/.gitignore
index b6989f5..67b5ed1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -18,6 +18,7 @@ bot.log
# State files (user-specific, not for repo)
sessions_state.json
user_settings.json
+settings.json
# Test artifacts
.coverage
@@ -34,3 +35,5 @@ htmlcov/
.vscode/
*.swp
*.swo
+credentials.json
+.superpowers/
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..fc8698c
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule ".claude-settings"]
+ path = .claude-settings
+ url = git@github.com:ToruAI/toru-claude-settings.git
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index da4b01a..7473ae8 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -6,8 +6,8 @@ Thanks for your interest in contributing! This document outlines how to get star
1. Clone the repository:
```bash
-git clone https://github.com/toruai/claude-voice-assistant.git
-cd claude-voice-assistant
+git clone https://github.com/toruai/toris-voice.git
+cd toris-voice
```
2. Create a virtual environment:
@@ -78,7 +78,7 @@ pytest test_bot.py -v
## Project Structure
```
-claude-voice-assistant/
+toris-voice/
├── bot.py # Main bot code
├── test_bot.py # Test suite
├── prompts/ # Persona prompt files
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..d0b6a84
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,70 @@
+# Claude Voice Assistant - Production Docker Image
+# Multi-stage build for efficient image size
+
+# ============================================================================
+# Stage 1: Base with Node.js and Python
+# ============================================================================
+FROM node:20-slim AS base
+
+# Install Python and system dependencies
+# Note: Debian bookworm has Python 3.11, which is compatible
+RUN apt-get update && apt-get install -y \
+ python3 \
+ python3-venv \
+ python3-pip \
+ curl \
+ && rm -rf /var/lib/apt/lists/*
+
+# Create non-root user (uid 1000 required by Claude CLI)
+# Delete existing node user first (it has UID 1000)
+RUN userdel -r node && \
+ useradd -m -u 1000 -s /bin/bash claude && \
+ mkdir -p /home/claude/.claude && \
+ chown -R claude:claude /home/claude
+
+# ============================================================================
+# Stage 2: Application Setup
+# ============================================================================
+FROM base AS app
+
+# Install Claude Code CLI globally
+RUN npm install -g @anthropic-ai/claude-code
+
+# Switch to non-root user
+USER claude
+WORKDIR /home/claude/app
+
+# Copy requirements first for better caching
+COPY --chown=claude:claude requirements.txt .
+
+# Create virtual environment and install dependencies
+RUN python3 -m venv .venv && \
+ .venv/bin/pip install --no-cache-dir --upgrade pip && \
+ .venv/bin/pip install --no-cache-dir -r requirements.txt
+
+# Copy application code
+COPY --chown=claude:claude bot.py .
+COPY --chown=claude:claude prompts/ ./prompts/
+
+# Copy Claude settings (agents, skills, config from toru-claude-settings submodule)
+COPY --chown=claude:claude .claude-settings/ /home/claude/.claude/
+
+# Create necessary directories
+RUN mkdir -p /home/claude/sandbox /home/claude/state
+
+# ============================================================================
+# Runtime Configuration
+# ============================================================================
+
+# Health check - verify bot can start
+HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
+ CMD pgrep -f "python.*bot.py" || exit 1
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1 \
+ CLAUDE_WORKING_DIR=/home/claude/app \
+ CLAUDE_SANDBOX_DIR=/home/claude/sandbox \
+ PATH="/home/claude/app/.venv/bin:$PATH"
+
+# Default command
+CMD ["python", "bot.py"]
diff --git a/README.md b/README.md
index 2132aef..54d4606 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
-# Claude Voice Assistant
+# TORIS Claude Voice Assistant
[](LICENSE)
-[](https://www.python.org/downloads/)
+[](https://www.python.org/downloads/)
**A voice-first interface to Claude's full agentic capabilities.** Not another chatbot wrapper.
@@ -68,34 +68,120 @@ Telegram Voice Message
## Prerequisites
-- **Python 3.12+**
- **Telegram Bot** - Create one via [@BotFather](https://t.me/botfather)
- **ElevenLabs account** - API key from [elevenlabs.io](https://elevenlabs.io)
-- **Claude Code** - Install via `npm install -g @anthropic-ai/claude-code`
+- **Claude Access** - Choose one authentication method (see below)
-## Quick Start
+For Docker deployment:
+- **Docker** and **Docker Compose**
-1. Clone and setup:
+For non-Docker deployment:
+- **Python 3.11+**
+- **Node.js 20+** (for Claude Code CLI)
+
+### Claude Authentication
+
+Choose ONE of these methods:
+
+| Method | Best For | Setup |
+|--------|----------|-------|
+| **API Key** | Docker, CI/CD, teams | Set `ANTHROPIC_API_KEY` from [console.anthropic.com](https://console.anthropic.com) |
+| **Subscription** | Personal use, Pro/Max/Teams plans | Run `claude /login` once, mount credentials |
+
+**API Key Method:**
+- Uses pre-paid API credits
+- Set `ANTHROPIC_API_KEY` in your env file
+- Works immediately in Docker
+
+**Subscription Method (Pro/Max/Teams):**
+- Uses your Claude subscription
+- No API key needed
+- For Docker: login on host first, then mount credentials
+
+## Deployment Options
+
+This project supports two deployment modes:
+
+### Option 1: Docker (Recommended for Production)
+
+Best for production deployment with automatic restarts and isolation.
+
+**Quick Start:**
+```bash
+# Clone the repository
+git clone --recurse-submodules https://github.com/toruai/toris-claude-voice-assistant.git
+cd toris-claude-voice-assistant
+
+# Configure
+cp docker/toris.env.example docker/toris.env
+# Edit docker/toris.env with your settings
+
+# Choose authentication method:
+# Option A: Add ANTHROPIC_API_KEY to docker/toris.env
+# Option B: Login with subscription, then uncomment credentials mount in docker-compose.yml
+# claude /login # Run on host first
+
+# Start
+docker-compose up -d
+
+# View logs
+docker-compose logs -f toris
+
+# Stop
+docker-compose down
+```
+
+**Benefits:**
+- Isolated sandbox for file operations
+- Automatic restarts on failure
+- No Python/Node installation needed
+- Persistent state across restarts
+- Toru agents and skills pre-installed (Garry, Bob, Sentinel, Scout, etc.)
+
+**Directory Structure:**
+```
+toris-claude-voice-assistant/
+├── Dockerfile
+├── docker-compose.yml
+├── docker/
+│ └── toris.env # Your config (from example.env)
+└── prompts/ # Persona prompts
+```
+
+See [Docker Deployment Guide](#docker-deployment-guide) for details.
+
+### Option 2: Non-Docker (Systemd)
+
+Best for development or single-persona deployments on Linux.
+
+**Quick Start:**
```bash
-git clone https://github.com/toruai/claude-voice-assistant.git
-cd claude-voice-assistant
+# Clone and setup
+git clone --recurse-submodules https://github.com/toruai/toris-claude-voice-assistant.git
+cd toris-claude-voice-assistant
python -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
-```
-2. Configure:
-```bash
+# Install Claude Code CLI
+npm install -g @anthropic-ai/claude-code
+
+# Install Toru agents and skills (optional but recommended)
+cd .claude-settings && ./install.sh && cd ..
+
+# Configure
cp .env.example .env
# Edit .env with your values
-```
-3. Run:
-```bash
+# Run
python bot.py
```
-4. Send a voice message to your Telegram bot.
+The agents install adds 7 specialized AI agents (Garry, Bob, Sentinel, Scout, etc.) and 14 skills like `/dev-cycle` and `/scout`. See [toru-claude-agents](https://github.com/ToruAI/toru-claude-agents) for details.
+
+See [Systemd Deployment Guide](#systemd-deployment-guide) for production setup.
+
+---
## Configuration
@@ -115,8 +201,8 @@ python bot.py
| `SYSTEM_PROMPT_FILE` | - | Path to persona prompt file |
| `ELEVENLABS_VOICE_ID` | `JBFqnCBsd6RMkjVDRZzb` | ElevenLabs voice (George) |
| `TELEGRAM_TOPIC_ID` | - | Filter to specific forum topic |
-| `CLAUDE_WORKING_DIR` | `/home/dev` | Directory Claude can read from |
-| `CLAUDE_SANDBOX_DIR` | `/home/dev/claude-voice-sandbox` | Directory Claude can write to |
+| `CLAUDE_WORKING_DIR` | `/home/youruser` | Directory Claude can read from |
+| `CLAUDE_SANDBOX_DIR` | `/home/youruser/claude-voice-sandbox` | Directory Claude can write to |
| `MAX_VOICE_RESPONSE_CHARS` | `500` | Max characters for TTS |
## User Settings
@@ -136,35 +222,167 @@ Run multiple AI personalities from the same codebase. Each gets its own:
- Sandbox directory
- Topic filter (for group chats)
-### Directory Structure
+### Docker Multi-Persona
-```
-/home/dev/voice-agents/
-├── v.env # V persona config
-├── tc.env # TC persona config
-└── sandboxes/
- ├── v/ # V's isolated sandbox
- └── tc/ # TC's isolated sandbox
+Duplicate the service in `docker-compose.yml` with different env files:
+
+```yaml
+services:
+ toris:
+ env_file: docker/toris.env
+ volumes:
+ - toris-state:/home/claude/state
+ - toris-sandbox:/home/claude/sandbox
+
+ assistant2:
+ env_file: docker/assistant2.env
+ volumes:
+ - assistant2-state:/home/claude/state
+ - assistant2-sandbox:/home/claude/sandbox
```
-### Example Persona Prompt
+### Persona Prompt
-See `prompts/v.md` for a full example. Key elements:
+See `prompts/toris.md` for the default TORIS persona. Key elements:
```markdown
-You are V, a brilliant and slightly cynical voice assistant.
+# TORIS - Your Second Brain
-## Your capabilities:
-- You can READ files from anywhere in {read_dir}
-- You can WRITE and EXECUTE only in {sandbox_dir}
-- You have WebSearch for current information
+You are TORIS, a voice-powered thinking partner built on Claude.
-## CRITICAL - Voice output rules:
+## Your Capabilities
+- READ files from {read_dir}
+- WRITE and EXECUTE in {sandbox_dir}
+- Web search, research, note-taking via MEGG
+
+## CRITICAL - Voice Output Rules
- NO markdown formatting
- Speak in natural flowing sentences
```
-### Systemd Service
+---
+
+## Docker Deployment Guide
+
+### Building and Running
+
+```bash
+# Build the image
+docker-compose build
+
+# Start
+docker-compose up -d
+
+# View logs
+docker-compose logs -f toris
+
+# Restart
+docker-compose restart toris
+
+# Stop
+docker-compose down
+
+# Stop and remove volumes (WARNING: deletes session history)
+docker-compose down -v
+```
+
+### Configuration
+
+Copy and edit the example environment file:
+
+```bash
+cp docker/toris.env.example docker/toris.env
+```
+
+**Key environment variables:**
+- `TELEGRAM_BOT_TOKEN` - Bot token from @BotFather
+- `TELEGRAM_DEFAULT_CHAT_ID` - Your Telegram chat ID (security)
+- `ELEVENLABS_API_KEY` - ElevenLabs API key
+- `ANTHROPIC_API_KEY` - Anthropic API key (optional if using subscription)
+- `ELEVENLABS_VOICE_ID` - Voice selection
+- `PERSONA_NAME` - Display name in logs
+- `SYSTEM_PROMPT_FILE` - Path to persona prompt
+- `MAX_VOICE_RESPONSE_CHARS` - Max TTS characters (default: 2000)
+
+### Authentication
+
+**Option 1: API Key** (recommended)
+```bash
+# Add to docker/toris.env
+ANTHROPIC_API_KEY=sk-ant-...
+```
+
+**Option 2: Claude Subscription**
+```bash
+# 1. Login on host machine
+claude /login
+
+# 2. Uncomment in docker-compose.yml volumes:
+- ~/.claude/.credentials.json:/home/claude/.claude/.credentials.json:ro
+```
+
+### Data Persistence
+
+Docker volumes store persistent data:
+
+| Volume | Contents | Location |
+|--------|----------|----------|
+| `toris-state` | Session history & settings | `/home/claude/state` |
+| `toris-sandbox` | File operations sandbox | `/home/claude/sandbox` |
+| `toris-claude-config` | Claude credentials & settings | `/home/claude/.claude` |
+
+**Backup state:**
+```bash
+# Export session data
+docker cp toris-claude-voice-assistant:/home/claude/state ./backup-state
+
+# Import session data
+docker cp ./backup-state/. toris-claude-voice-assistant:/home/claude/state
+docker-compose restart toris
+```
+
+### Health Checks
+
+Docker monitors bot health automatically. Check status:
+
+```bash
+# Container health
+docker-compose ps
+
+# If unhealthy, check logs
+docker-compose logs v
+```
+
+---
+
+## Systemd Deployment Guide
+
+For non-Docker production deployments on Linux.
+
+### Setup
+
+```bash
+# Create deployment directory
+mkdir -p /opt/toris-claude-voice-assistant
+cd /opt/toris-claude-voice-assistant
+
+# Clone and install
+git clone --recurse-submodules https://github.com/toruai/toris-claude-voice-assistant.git .
+python3.11 -m venv .venv
+.venv/bin/pip install -r requirements.txt
+
+# Install Claude Code globally
+npm install -g @anthropic-ai/claude-code
+
+# Create config directory
+mkdir -p /etc/claude-voice
+cp .env.example /etc/claude-voice/v.env
+# Edit /etc/claude-voice/v.env with your values
+```
+
+### Service File
+
+Create `/etc/systemd/system/claude-voice-v.service`:
```ini
[Unit]
@@ -173,16 +391,67 @@ After=network.target
[Service]
Type=simple
-User=dev
-WorkingDirectory=/path/to/claude-voice-assistant
-EnvironmentFile=/home/dev/voice-agents/v.env
-ExecStart=/path/to/claude-voice-assistant/.venv/bin/python bot.py
+User=claude
+Group=claude
+WorkingDirectory=/opt/toris-claude-voice-assistant
+EnvironmentFile=/etc/claude-voice/v.env
+ExecStart=/opt/toris-claude-voice-assistant/.venv/bin/python bot.py
Restart=always
+RestartSec=10
+
+# Security
+NoNewPrivileges=true
+PrivateTmp=true
+ProtectSystem=strict
+ProtectHome=read-only
+ReadWritePaths=/var/lib/claude-voice/v-sandbox /var/lib/claude-voice/v-state
[Install]
WantedBy=multi-user.target
```
+### Create User and Directories
+
+```bash
+# Create service user
+useradd -r -s /bin/false claude
+
+# Create state and sandbox directories
+mkdir -p /var/lib/claude-voice/{v-state,v-sandbox}
+chown -R claude:claude /var/lib/claude-voice
+
+# Set sandbox path in env file
+echo "CLAUDE_SANDBOX_DIR=/var/lib/claude-voice/v-sandbox" >> /etc/claude-voice/v.env
+```
+
+### Manage Service
+
+```bash
+# Enable and start
+systemctl daemon-reload
+systemctl enable claude-voice-v
+systemctl start claude-voice-v
+
+# Check status
+systemctl status claude-voice-v
+
+# View logs
+journalctl -u claude-voice-v -f
+
+# Restart
+systemctl restart claude-voice-v
+```
+
+### Multiple Personas with Systemd
+
+Create separate service files and env files:
+- `/etc/systemd/system/claude-voice-v.service` + `/etc/claude-voice/v.env`
+- `/etc/systemd/system/claude-voice-tc.service` + `/etc/claude-voice/tc.env`
+
+Each persona needs its own sandbox and state directories.
+
+---
+
## Bot Commands
| Command | Description |
diff --git a/bot.py b/bot.py
index fc0cf11..b6ecea5 100644
--- a/bot.py
+++ b/bot.py
@@ -6,6 +6,7 @@
import os
import subprocess
+import shutil
import json
import asyncio
import logging
@@ -13,7 +14,8 @@
from io import BytesIO
from pathlib import Path
from dotenv import load_dotenv
-from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup
+from telegram import Update, InlineKeyboardButton, InlineKeyboardMarkup, ForceReply, BotCommand
+from telegram.constants import ChatAction
from telegram.ext import (
ApplicationBuilder,
CommandHandler,
@@ -23,6 +25,7 @@
filters,
)
from elevenlabs.client import ElevenLabs
+from openai import OpenAI as OpenAIClient
# Claude Agent SDK (official SDK for Claude Code)
from claude_agent_sdk import (
@@ -35,7 +38,6 @@
ResultMessage,
TextBlock,
ToolUseBlock,
- ToolResultBlock,
PermissionResultAllow,
PermissionResultDeny,
)
@@ -43,11 +45,57 @@
load_dotenv()
+def resolve_provider(explicit_env: str) -> str:
+ """Resolve voice provider: explicit > elevenlabs (if key) > openai (if key) > none."""
+ explicit = os.getenv(explicit_env, "").lower()
+ if explicit in ("openai", "elevenlabs"):
+ return explicit
+ if os.getenv("ELEVENLABS_API_KEY"):
+ return "elevenlabs"
+ if os.getenv("OPENAI_API_KEY"):
+ return "openai"
+ return "none"
+
+
+def check_claude_auth() -> tuple[bool, str]:
+ """Check if Claude authentication is configured.
+
+ Returns:
+ (is_authenticated, auth_method) - auth_method is 'api_key', 'oauth', 'saved_token', or 'none'
+ """
+ # Method 1: API Key
+ if os.getenv("ANTHROPIC_API_KEY"):
+ return True, "api_key"
+
+ # Method 2: Saved OAuth token (from /setup)
+ if os.getenv("CLAUDE_CODE_OAUTH_TOKEN"):
+ return True, "saved_token"
+
+ # Method 3: OAuth credentials file
+ credentials_path = Path.home() / ".claude" / ".credentials.json"
+ if credentials_path.exists():
+ try:
+ import time
+ creds = json.loads(credentials_path.read_text())
+ oauth = creds.get("claudeAiOauth", {})
+ if oauth.get("accessToken"):
+ # Check if not expired (with 5 min buffer)
+ expires_at = oauth.get("expiresAt", 0)
+ if expires_at > (time.time() * 1000 + 300000):
+ return True, "oauth"
+ # Expired but has refresh token - Claude SDK will handle refresh
+ if oauth.get("refreshToken"):
+ return True, "oauth"
+ except (json.JSONDecodeError, KeyError):
+ pass
+
+ return False, "none"
+
+
def validate_environment():
"""Validate required environment variables on startup."""
required = {
"TELEGRAM_BOT_TOKEN": "Telegram bot token from @BotFather",
- "ELEVENLABS_API_KEY": "ElevenLabs API key from elevenlabs.io",
}
missing = []
@@ -61,6 +109,11 @@ def validate_environment():
print("\nCopy .env.example to .env and fill in the values.")
exit(1)
+ # Require at least one voice provider key
+ if not os.getenv("ELEVENLABS_API_KEY") and not os.getenv("OPENAI_API_KEY"):
+ print("WARNING: No voice provider key set (ELEVENLABS_API_KEY or OPENAI_API_KEY)")
+ print(" Voice features will be disabled until a key is configured via /setup")
+
# Validate TELEGRAM_DEFAULT_CHAT_ID is a valid integer
chat_id = os.getenv("TELEGRAM_DEFAULT_CHAT_ID", "0")
try:
@@ -73,6 +126,16 @@ def validate_environment():
print("WARNING: TELEGRAM_DEFAULT_CHAT_ID is 0 - bot will accept all messages")
print(" Set this to your chat ID to restrict access")
+ # Check Claude authentication (don't exit - can be configured via Telegram)
+ is_auth, auth_method = check_claude_auth()
+ if not is_auth:
+ print("WARNING: Claude authentication not configured - bot will start but Claude won't work")
+ print(" Use /setup in Telegram to configure, or set ANTHROPIC_API_KEY in env")
+ else:
+ print(f"Claude auth: {auth_method}")
+
+ return is_auth, auth_method
+
# Setup logging with configurable level
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
@@ -86,9 +149,13 @@ def validate_environment():
TELEGRAM_BOT_TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY")
ALLOWED_CHAT_ID = int(os.getenv("TELEGRAM_DEFAULT_CHAT_ID", "0"))
+# Admin user IDs (comma-separated) - required for /setup, /claude_token, etc.
+# If empty, falls back to chat-ID check only (backward compat)
+_admin_ids_raw = os.getenv("TELEGRAM_ADMIN_USER_IDS", "")
+ADMIN_USER_IDS = set(int(uid.strip()) for uid in _admin_ids_raw.split(",") if uid.strip()) if _admin_ids_raw.strip() else set()
TOPIC_ID = os.getenv("TELEGRAM_TOPIC_ID") # Empty = all topics, set = only this topic
-CLAUDE_WORKING_DIR = os.getenv("CLAUDE_WORKING_DIR", "/home/dev")
-SANDBOX_DIR = os.getenv("CLAUDE_SANDBOX_DIR", "/home/dev/claude-voice-sandbox")
+CLAUDE_WORKING_DIR = os.getenv("CLAUDE_WORKING_DIR", os.path.expanduser("~"))
+SANDBOX_DIR = os.getenv("CLAUDE_SANDBOX_DIR", os.path.join(os.path.expanduser("~"), "claude-voice-sandbox"))
MAX_VOICE_CHARS = int(os.getenv("MAX_VOICE_RESPONSE_CHARS", "500"))
# Persona config
@@ -96,10 +163,24 @@ def validate_environment():
SYSTEM_PROMPT_FILE = os.getenv("SYSTEM_PROMPT_FILE", "")
ELEVENLABS_VOICE_ID = os.getenv("ELEVENLABS_VOICE_ID", "JBFqnCBsd6RMkjVDRZzb") # Default: George
CLAUDE_SETTINGS_FILE = os.getenv("CLAUDE_SETTINGS_FILE", "") # Optional settings.json for permissions
+if CLAUDE_SETTINGS_FILE and not os.path.isabs(CLAUDE_SETTINGS_FILE):
+ CLAUDE_SETTINGS_FILE = str(Path(__file__).parent / CLAUDE_SETTINGS_FILE)
+
+# Voice provider selection (resolved at startup)
+TTS_PROVIDER = resolve_provider("TTS_PROVIDER") # "elevenlabs", "openai", or "none"
+STT_PROVIDER = resolve_provider("STT_PROVIDER") # "elevenlabs", "openai", or "none"
+
+# OpenAI voice config
+OPENAI_VOICE_ID = os.getenv("OPENAI_VOICE_ID", "coral")
+OPENAI_TTS_MODEL = os.getenv("OPENAI_TTS_MODEL", "gpt-4o-mini-tts")
+OPENAI_STT_MODEL = os.getenv("OPENAI_STT_MODEL", "whisper-1")
+OPENAI_VOICE_INSTRUCTIONS = os.getenv("OPENAI_VOICE_INSTRUCTIONS", "")
+
+# STT language (applies to both providers; empty = auto-detect)
+STT_LANGUAGE = os.getenv("STT_LANGUAGE", "")
-def debug(msg: str):
- """Print debug message with timestamp."""
- print(f"[{datetime.now().strftime('%H:%M:%S')}] {msg}")
+# OpenAI client (None if no key configured)
+openai_client = OpenAIClient(api_key=os.getenv("OPENAI_API_KEY")) if os.getenv("OPENAI_API_KEY") else None
def load_system_prompt() -> str:
"""Load system prompt from file or use default."""
@@ -113,10 +194,10 @@ def load_system_prompt() -> str:
# Replace placeholders
content = content.replace("{sandbox_dir}", SANDBOX_DIR)
content = content.replace("{read_dir}", CLAUDE_WORKING_DIR)
- debug(f"Loaded system prompt from {prompt_path} ({len(content)} chars)")
+ logger.debug(f"Loaded system prompt from {prompt_path} ({len(content)} chars)")
return content
else:
- debug(f"WARNING: System prompt file not found: {prompt_path}")
+ logger.debug(f"WARNING: System prompt file not found: {prompt_path}")
# Fallback default prompt
return f"""You are a voice assistant. You're talking to the user.
@@ -144,18 +225,32 @@ def should_handle_message(message_thread_id: int | None) -> bool:
try:
allowed_topic = int(TOPIC_ID)
except (ValueError, TypeError):
- debug(f"WARNING: Invalid TOPIC_ID '{TOPIC_ID}', handling all messages")
+ logger.debug(f"WARNING: Invalid TOPIC_ID '{TOPIC_ID}', handling all messages")
return True
# Check if message is in the allowed topic
if message_thread_id is None:
# Message not in any topic (general chat) - don't handle if we have a specific topic
- debug(f"Message not in a topic, but we're filtering for topic {allowed_topic}")
+ logger.debug(f"Message not in a topic, but we're filtering for topic {allowed_topic}")
return False
return message_thread_id == allowed_topic
+def _is_authorized(update) -> bool:
+ """Check if the chat is authorized to use this bot."""
+ return ALLOWED_CHAT_ID == 0 or update.effective_chat.id == ALLOWED_CHAT_ID
+
+
+def _is_admin(update) -> bool:
+ """Check if user is allowed to run admin commands (token setup, etc.)."""
+ if not _is_authorized(update):
+ return False
+ if not ADMIN_USER_IDS:
+ return True # Backward compat: no admin list = anyone in authorized chat
+ return update.effective_user.id in ADMIN_USER_IDS
+
+
# Base system prompt (loaded once at startup)
BASE_SYSTEM_PROMPT = load_system_prompt()
@@ -197,6 +292,7 @@ def build_dynamic_prompt(user_settings: dict = None) -> str:
RATE_LIMIT_SECONDS = 2 # Minimum seconds between messages per user
RATE_LIMIT_PER_MINUTE = 10 # Max messages per minute per user
user_rate_limits = {} # {user_id: {"last_message": timestamp, "minute_count": int, "minute_start": timestamp}}
+rate_limits = user_rate_limits # Public alias for testing
def check_rate_limit(user_id: int) -> tuple[bool, str]:
@@ -243,6 +339,9 @@ def check_rate_limit(user_id: int) -> tuple[bool, str]:
# Pending tool approvals: {approval_id: {"event": asyncio.Event, "approved": bool, "tool_name": str, "input": dict}}
pending_approvals = {}
+# Cancellation events per user — set by /cancel to interrupt active call_claude
+cancel_events: dict[int, asyncio.Event] = {}
+
# State files for persistence
STATE_FILE = Path(__file__).parent / "sessions_state.json"
SETTINGS_FILE = Path(__file__).parent / "user_settings.json"
@@ -252,8 +351,13 @@ def load_state():
"""Load session state from file."""
global user_sessions
if STATE_FILE.exists():
- with open(STATE_FILE) as f:
- user_sessions = json.load(f)
+ try:
+ with open(STATE_FILE) as f:
+ user_sessions = json.load(f)
+ logger.debug(f"Loaded state: {len(user_sessions)} users")
+ except (json.JSONDecodeError, IOError) as e:
+ logger.warning(f"Could not load state file, starting fresh: {e}")
+ user_sessions = {}
def save_state():
@@ -266,8 +370,13 @@ def load_settings():
"""Load user settings from file."""
global user_settings
if SETTINGS_FILE.exists():
- with open(SETTINGS_FILE) as f:
- user_settings = json.load(f)
+ try:
+ with open(SETTINGS_FILE) as f:
+ user_settings = json.load(f)
+ logger.debug(f"Loaded settings: {len(user_settings)} users")
+ except (json.JSONDecodeError, IOError) as e:
+ logger.warning(f"Could not load settings file, starting fresh: {e}")
+ user_settings = {}
def save_settings():
@@ -276,6 +385,101 @@ def save_settings():
json.dump(user_settings, f, indent=2)
+# Credentials file for user-provided API keys
+CREDENTIALS_FILE = Path(__file__).parent / "credentials.json"
+
+
+def load_credentials() -> dict:
+ """Load saved credentials from file."""
+ if CREDENTIALS_FILE.exists():
+ try:
+ with open(CREDENTIALS_FILE) as f:
+ return json.load(f)
+ except (json.JSONDecodeError, IOError):
+ pass
+ return {}
+
+
+def save_credentials(creds: dict):
+ """Save credentials to file with secure permissions."""
+ with open(CREDENTIALS_FILE, "w") as f:
+ json.dump(creds, f, indent=2)
+ # Restrict file permissions (owner read/write only)
+ CREDENTIALS_FILE.chmod(0o600)
+
+
+def apply_saved_credentials():
+ """Apply saved credentials on startup."""
+ global elevenlabs, ELEVENLABS_API_KEY, openai_client, TTS_PROVIDER, STT_PROVIDER
+ creds = load_credentials()
+
+ if creds.get("claude_token"):
+ os.environ["CLAUDE_CODE_OAUTH_TOKEN"] = creds["claude_token"]
+ logger.debug("Applied saved Claude token")
+
+ if creds.get("elevenlabs_key"):
+ ELEVENLABS_API_KEY = creds["elevenlabs_key"]
+ os.environ["ELEVENLABS_API_KEY"] = creds["elevenlabs_key"]
+ elevenlabs = ElevenLabs(api_key=ELEVENLABS_API_KEY)
+ logger.debug("Applied saved ElevenLabs key")
+
+ if creds.get("openai_key"):
+ os.environ["OPENAI_API_KEY"] = creds["openai_key"]
+ openai_client = OpenAIClient(api_key=creds["openai_key"])
+ logger.debug("Applied saved OpenAI key")
+
+ # Re-resolve providers after credentials are loaded
+ TTS_PROVIDER = resolve_provider("TTS_PROVIDER")
+ STT_PROVIDER = resolve_provider("STT_PROVIDER")
+
+
+def get_mcp_status(settings_file: str) -> list[str]:
+ """Return list of status lines for MCP servers in settings file.
+
+ Pure function — no I/O side effects beyond reading the settings file.
+ """
+ if not settings_file:
+ return ["MCP: CLAUDE_SETTINGS_FILE not configured"]
+
+ settings_path = Path(settings_file)
+ if not settings_path.is_absolute():
+ settings_path = Path(__file__).parent / settings_file
+
+ if not settings_path.exists():
+ return [f"MCP config: settings file not found ({settings_file})"]
+
+ try:
+ settings_data = json.loads(settings_path.read_text())
+ except (json.JSONDecodeError, IOError) as e:
+ return [f"MCP config: ERROR reading settings - {e}"]
+
+ mcp_servers = settings_data.get("mcpServers", {})
+ if not mcp_servers:
+ return ["MCP Servers: none configured"]
+
+ lines = ["MCP Servers:"]
+ for name, config in mcp_servers.items():
+ cmd = config.get("command", "")
+ if cmd and shutil.which(cmd):
+ lines.append(f" {name}: OK ({cmd})")
+ elif cmd:
+ lines.append(f" {name}: MISSING ({cmd} not found in PATH)")
+ else:
+ lines.append(f" {name}: misconfigured (no command)")
+ return lines
+
+
+def load_mcp_servers() -> dict:
+ """Read mcpServers from CLAUDE_SETTINGS_FILE for use in ClaudeAgentOptions.mcp_servers."""
+ if not CLAUDE_SETTINGS_FILE:
+ return {}
+ try:
+ data = json.loads(Path(CLAUDE_SETTINGS_FILE).read_text())
+ return data.get("mcpServers", {})
+ except (json.JSONDecodeError, IOError, OSError):
+ return {}
+
+
def get_user_state(user_id: int) -> dict:
"""Get or create user state."""
user_id_str = str(user_id)
@@ -292,65 +496,131 @@ def get_user_settings(user_id: int) -> dict:
"audio_enabled": True,
"voice_speed": VOICE_SETTINGS["speed"],
"mode": "go_all", # "go_all" or "approve"
- "watch_enabled": False, # Stream tool calls to Telegram
+ "watch_mode": "off", # "off" | "live" | "debug"
}
else:
- # Ensure new settings exist for existing users
- if "mode" not in user_settings[user_id_str]:
- user_settings[user_id_str]["mode"] = "go_all"
- if "watch_enabled" not in user_settings[user_id_str]:
- user_settings[user_id_str]["watch_enabled"] = False
+ s = user_settings[user_id_str]
+ if "mode" not in s:
+ s["mode"] = "go_all"
+ # Migrate watch_enabled / show_activity → watch_mode
+ if "watch_mode" not in s:
+ if s.pop("watch_enabled", False):
+ s["watch_mode"] = "live"
+ elif s.pop("show_activity", False):
+ s["watch_mode"] = "debug"
+ else:
+ s["watch_mode"] = "off"
+ s.pop("watch_enabled", None)
+ s.pop("show_activity", None)
return user_settings[user_id_str]
-async def transcribe_voice(voice_bytes: bytes) -> str:
+async def _transcribe_elevenlabs(voice_bytes: bytes) -> str:
"""Transcribe voice using ElevenLabs Scribe."""
try:
- transcription = elevenlabs.speech_to_text.convert(
+ transcription = await asyncio.to_thread(
+ elevenlabs.speech_to_text.convert,
file=BytesIO(voice_bytes),
model_id="scribe_v1",
- language_code="en",
+ language_code=STT_LANGUAGE or None,
)
return transcription.text
except Exception as e:
- return f"[Transcription error: {e}]"
+ logger.error(f"ElevenLabs STT error: {e}")
+ raise
-async def text_to_speech(text: str, speed: float = None) -> BytesIO:
- """Convert text to speech using ElevenLabs Turbo v2.5 with expressive voice settings."""
+async def _transcribe_openai(voice_bytes: bytes) -> str:
+ """Transcribe voice using OpenAI Whisper."""
try:
- # Use provided speed or default from VOICE_SETTINGS
- actual_speed = speed if speed is not None else VOICE_SETTINGS["speed"]
+ lang = STT_LANGUAGE or None
+ kwargs = {
+ "model": OPENAI_STT_MODEL,
+ "file": ("voice.ogg", BytesIO(voice_bytes), "audio/ogg"),
+ }
+ if lang:
+ kwargs["language"] = lang
+ result = await asyncio.to_thread(openai_client.audio.transcriptions.create, **kwargs)
+ return result.text
+ except Exception as e:
+ logger.error(f"OpenAI STT error: {e}")
+ raise
+
+
+async def transcribe_voice(voice_bytes: bytes) -> str:
+ """Transcribe voice — routes to active STT provider."""
+ try:
+ if STT_PROVIDER == "openai":
+ return await _transcribe_openai(voice_bytes)
+ if STT_PROVIDER == "elevenlabs":
+ return await _transcribe_elevenlabs(voice_bytes)
+ return "[Transcription error: no STT provider configured]"
+ except Exception as e:
+ return f"[Transcription error: {e}]"
+
- audio = elevenlabs.text_to_speech.convert(
+async def _tts_elevenlabs(text: str, speed: float = None) -> BytesIO:
+ """Convert text to speech using ElevenLabs Flash v2.5."""
+ def _sync_tts():
+ kwargs = dict(
text=text,
voice_id=ELEVENLABS_VOICE_ID,
- model_id="eleven_turbo_v2_5",
+ model_id="eleven_flash_v2_5",
output_format="mp3_44100_128",
- voice_settings={
- "stability": VOICE_SETTINGS["stability"],
- "similarity_boost": VOICE_SETTINGS["similarity_boost"],
- "style": VOICE_SETTINGS["style"],
- "speed": actual_speed,
- "use_speaker_boost": True,
- },
)
-
- audio_buffer = BytesIO()
+ if speed is not None:
+ kwargs["voice_settings"] = {"speed": speed}
+ audio = elevenlabs.text_to_speech.convert(**kwargs)
+ buf = BytesIO()
for chunk in audio:
if isinstance(chunk, bytes):
- audio_buffer.write(chunk)
- audio_buffer.seek(0)
- return audio_buffer
+ buf.write(chunk)
+ buf.seek(0)
+ return buf
+ return await asyncio.to_thread(_sync_tts)
+
+
+async def _tts_openai(text: str, speed: float = None) -> BytesIO:
+ """Convert text to speech using OpenAI TTS."""
+ def _sync_tts():
+ kwargs = dict(model=OPENAI_TTS_MODEL, voice=OPENAI_VOICE_ID, input=text)
+ if OPENAI_VOICE_INSTRUCTIONS:
+ kwargs["instructions"] = OPENAI_VOICE_INSTRUCTIONS
+ if speed is not None:
+ kwargs["speed"] = speed
+ response = openai_client.audio.speech.create(**kwargs)
+ buf = BytesIO()
+ for chunk in response.iter_bytes(chunk_size=4096):
+ buf.write(chunk)
+ buf.seek(0)
+ return buf
+ return await asyncio.to_thread(_sync_tts)
+
+
+async def text_to_speech(text: str, speed: float = None) -> BytesIO:
+ """Convert text to speech — routes to active TTS provider."""
+ try:
+ if TTS_PROVIDER == "openai":
+ return await _tts_openai(text, speed)
+ if TTS_PROVIDER == "elevenlabs":
+ return await _tts_elevenlabs(text, speed)
+ logger.debug("TTS skipped: no provider configured")
+ return None
except Exception as e:
- debug(f"TTS error: {e}")
+ logger.error(f"TTS error: {e}")
return None
async def send_long_message(update: Update, first_msg, text: str, chunk_size: int = 4000):
- """Split long text into multiple Telegram messages."""
+ """Split long text into multiple Telegram messages.
+
+ If first_msg is None, all chunks are sent as new reply messages.
+ """
if len(text) <= chunk_size:
- await first_msg.edit_text(text)
+ if first_msg is None:
+ await update.message.reply_text(text)
+ else:
+ await first_msg.edit_text(text)
return
# Split into chunks
@@ -369,12 +639,20 @@ async def send_long_message(update: Update, first_msg, text: str, chunk_size: in
chunks.append(remaining[:break_point])
remaining = remaining[break_point:].lstrip()
- # Send first chunk as edit, rest as new messages
- await first_msg.edit_text(chunks[0] + f"\n\n[1/{len(chunks)}]")
+ # Send first chunk as edit (or new reply if first_msg is None), rest as new messages
+ if first_msg is None:
+ await update.message.reply_text(chunks[0] + f"\n\n[1/{len(chunks)}]")
+ else:
+ await first_msg.edit_text(chunks[0] + f"\n\n[1/{len(chunks)}]")
for i, chunk in enumerate(chunks[1:], 2):
await update.message.reply_text(chunk + f"\n\n[{i}/{len(chunks)}]")
- debug(f"Sent {len(chunks)} message chunks")
+ logger.debug(f"Sent {len(chunks)} message chunks")
+
+
+async def finalize_response(update: Update, processing_msg, response: str):
+ """Replace processing_msg with the final response (or send as new message if no processing_msg)."""
+ await send_long_message(update, processing_msg, response)
def load_megg_context() -> str:
@@ -388,13 +666,13 @@ def load_megg_context() -> str:
cwd=CLAUDE_WORKING_DIR
)
if result.returncode == 0:
- debug(f"Loaded megg context: {len(result.stdout)} chars")
+ logger.debug(f"Loaded megg context: {len(result.stdout)} chars")
return result.stdout
else:
- debug(f"Megg context failed: {result.stderr[:50]}")
+ logger.debug(f"Megg context failed: {result.stderr[:50]}")
return ""
except Exception as e:
- debug(f"Megg error: {e}")
+ logger.debug(f"Megg error: {e}")
return ""
@@ -415,6 +693,7 @@ async def call_claude(
user_settings: dict = None,
update: Update = None,
context: ContextTypes.DEFAULT_TYPE = None,
+ processing_msg=None,
) -> tuple[str, str, dict]:
"""
Call Claude Code SDK and return (response, session_id, metadata).
@@ -424,7 +703,7 @@ async def call_claude(
If mode == "approve", waits for user approval before each tool.
"""
settings = user_settings or {}
- watch_enabled = settings.get("watch_enabled", False)
+ watch_mode = settings.get("watch_mode", "off") # "off" | "live" | "debug"
mode = settings.get("mode", "go_all")
# Ensure sandbox exists
@@ -436,14 +715,14 @@ async def call_claude(
megg_ctx = load_megg_context()
if megg_ctx:
full_prompt = f"\n{megg_ctx}\n\n\n{prompt}"
- debug("Prepended megg context to prompt")
+ logger.debug("Prepended megg context to prompt")
# Build dynamic system prompt
dynamic_persona = build_dynamic_prompt(user_settings)
- debug(f"Calling Claude SDK: prompt={len(prompt)} chars, continue={continue_last}, session={session_id[:8] if session_id else 'new'}...")
- debug(f"Mode: {mode}, Watch: {watch_enabled}")
- debug(f"Working dir: {SANDBOX_DIR} (sandbox)")
+ logger.debug(f"Calling Claude SDK: prompt={len(prompt)} chars, continue={continue_last}, session={session_id[:8] if session_id else 'new'}...")
+ logger.debug(f"Mode: {mode}, Watch: {watch_mode}")
+ logger.debug(f"Working dir: {SANDBOX_DIR} (sandbox)")
# Track tool approvals for this call
approval_event = None
@@ -453,14 +732,14 @@ async def can_use_tool(tool_name: str, tool_input: dict, ctx) -> PermissionResul
"""Callback for tool approval in approve mode."""
nonlocal approval_event, current_approval_id
- debug(f">>> can_use_tool CALLED: {tool_name}")
+ logger.debug(f">>> can_use_tool CALLED: {tool_name}")
if mode != "approve":
- debug(f">>> Mode is {mode}, auto-allowing")
+ logger.debug(f">>> Mode is {mode}, auto-allowing")
return PermissionResultAllow()
if update is None:
- debug(f"No update context for approval, allowing {tool_name}")
+ logger.debug(f"No update context for approval, allowing {tool_name}")
return PermissionResultAllow()
# Generate unique approval ID
@@ -489,33 +768,33 @@ async def can_use_tool(tool_name: str, tool_input: dict, ctx) -> PermissionResul
message_text = f"Tool Request:\n{format_tool_call(tool_name, tool_input)}"
await update.message.reply_text(message_text, reply_markup=reply_markup, parse_mode="Markdown")
- debug(f">>> Waiting for approval: {current_approval_id} ({tool_name}) - pending_approvals keys: {list(pending_approvals.keys())}")
+ logger.debug(f">>> Waiting for approval: {current_approval_id} ({tool_name}) - pending_approvals keys: {list(pending_approvals.keys())}")
# Wait for user response (with timeout)
try:
- debug(f">>> Starting event.wait() for {current_approval_id}")
+ logger.debug(f">>> Starting event.wait() for {current_approval_id}")
await asyncio.wait_for(approval_event.wait(), timeout=300) # 5 min timeout
- debug(f">>> Event.wait() completed for {current_approval_id}")
+ logger.debug(f">>> Event.wait() completed for {current_approval_id}")
except asyncio.TimeoutError:
- debug(f">>> Approval timeout for {current_approval_id}")
+ logger.debug(f">>> Approval timeout for {current_approval_id}")
del pending_approvals[current_approval_id]
return PermissionResultDeny(message="Approval timed out")
# Check result
- debug(f">>> Checking result for {current_approval_id}")
+ logger.debug(f">>> Checking result for {current_approval_id}")
approval_data = pending_approvals.pop(current_approval_id, {})
if approval_data.get("approved"):
- debug(f">>> Tool approved: {tool_name}")
+ logger.debug(f">>> Tool approved: {tool_name}")
return PermissionResultAllow()
else:
- debug(f">>> Tool rejected: {tool_name}")
+ logger.debug(f">>> Tool rejected: {tool_name}")
return PermissionResultDeny(message="User rejected tool")
# Build SDK options
# In approve mode: don't pre-allow tools - let can_use_tool callback handle each one
# In go_all mode: pre-allow all tools for no prompts
if mode == "approve":
- debug(f">>> APPROVE MODE: Setting up can_use_tool callback")
+ logger.debug(f">>> APPROVE MODE: Setting up can_use_tool callback")
options = ClaudeAgentOptions(
system_prompt=dynamic_persona,
cwd=SANDBOX_DIR,
@@ -523,15 +802,19 @@ async def can_use_tool(tool_name: str, tool_input: dict, ctx) -> PermissionResul
permission_mode="default",
add_dirs=[CLAUDE_WORKING_DIR],
)
- debug(f">>> Options: can_use_tool={options.can_use_tool is not None}, permission_mode={options.permission_mode}")
+ if CLAUDE_SETTINGS_FILE:
+ options.settings = CLAUDE_SETTINGS_FILE
+ logger.debug(f">>> Options: can_use_tool={options.can_use_tool is not None}, permission_mode={options.permission_mode}")
else:
- debug(f">>> GO_ALL MODE: Pre-allowing all tools")
+ logger.debug(f">>> GO_ALL MODE: Pre-allowing all tools")
options = ClaudeAgentOptions(
system_prompt=dynamic_persona,
- allowed_tools=["Read", "Grep", "Glob", "WebSearch", "WebFetch", "Task", "Bash", "Edit", "Write", "Skill"],
+ allowed_tools=["Read", "Grep", "Glob", "WebSearch", "WebFetch", "Task", "Bash", "Edit", "Write", "Skill", "RemoteTrigger"],
cwd=SANDBOX_DIR,
add_dirs=[CLAUDE_WORKING_DIR],
)
+ if CLAUDE_SETTINGS_FILE:
+ options.settings = CLAUDE_SETTINGS_FILE
# Handle session continuation
if continue_last:
@@ -543,45 +826,66 @@ async def can_use_tool(tool_name: str, tool_input: dict, ctx) -> PermissionResul
new_session_id = session_id
metadata = {}
tool_count = 0
+ tool_log: list[str] = [] # Running list of tool names used
+
+ # debug_msg created lazily on first tool use (debug mode only)
+ debug_msg = None
+
+ # Set up cancellation tracking for this user
+ user_id_for_cancel = update.effective_user.id if update else None
+ if user_id_for_cancel is not None:
+ if user_id_for_cancel not in cancel_events:
+ cancel_events[user_id_for_cancel] = asyncio.Event()
+ cancel_events[user_id_for_cancel].clear() # Reset at start of each call
try:
- debug(f">>> Starting ClaudeSDKClient with prompt: {len(full_prompt)} chars")
+ logger.debug(f">>> Starting ClaudeSDKClient with prompt: {len(full_prompt)} chars")
async with ClaudeSDKClient(options=options) as client:
await client.query(full_prompt)
async for message in client.receive_response():
+ # Check for user cancellation
+ if user_id_for_cancel is not None and cancel_events.get(user_id_for_cancel, asyncio.Event()).is_set():
+ logger.debug(f"Call cancelled by user {user_id_for_cancel}")
+ result_text = (result_text + "\n\n[Cancelled]").strip()
+ break
+
# Handle different message types
- debug(f">>> SDK message type: {type(message).__name__}")
+ logger.debug(f">>> SDK message type: {type(message).__name__}")
if isinstance(message, AssistantMessage):
for block in message.content:
if isinstance(block, TextBlock):
result_text += block.text
elif isinstance(block, ToolUseBlock):
tool_count += 1
- if watch_enabled and update:
- # Stream tool call to Telegram with details
- tool_input = block.input or {}
- # Extract key info based on tool type
- if block.name == "Bash" and "command" in tool_input:
- cmd = tool_input["command"]
- detail = cmd[:80] + "..." if len(cmd) > 80 else cmd
- elif block.name == "Read" and "file_path" in tool_input:
- detail = tool_input["file_path"]
- elif block.name == "Edit" and "file_path" in tool_input:
- detail = tool_input["file_path"]
- elif block.name == "Write" and "file_path" in tool_input:
- detail = tool_input["file_path"]
- elif block.name == "Grep" and "pattern" in tool_input:
- detail = f"/{tool_input['pattern']}/"
- elif block.name == "Glob" and "pattern" in tool_input:
- detail = tool_input["pattern"]
- else:
- detail = None
-
- tool_msg = f"{block.name}: {detail}" if detail else f"Using: {block.name}"
+ # Build label for this tool
+ tool_input = block.input or {}
+ if block.name == "Bash" and "command" in tool_input:
+ label = f"Bash: {tool_input['command']}"
+ elif block.name in ("Read", "Edit", "Write") and "file_path" in tool_input:
+ label = f"{block.name}: {tool_input['file_path']}"
+ elif block.name == "Grep" and "pattern" in tool_input:
+ label = f"Grep: /{tool_input['pattern']}/"
+ elif block.name == "Glob" and "pattern" in tool_input:
+ label = f"Glob: {tool_input['pattern']}"
+ elif block.name.startswith("mcp__"):
+ label = block.name.replace("mcp__", "")
+ else:
+ label = block.name
+ tool_log.append(f"⚙ {label}")
+
+ if watch_mode == "live" and processing_msg is not None:
try:
- await update.message.reply_text(tool_msg)
- except Exception as e:
- debug(f"Failed to send watch message: {e}")
+ await processing_msg.edit_text("Toris thinking...\n" + "\n".join(tool_log))
+ except Exception:
+ pass
+ elif watch_mode == "debug" and update:
+ try:
+ if debug_msg is None:
+ debug_msg = await update.message.reply_text("🔧 Tools:\n" + "\n".join(tool_log))
+ else:
+ await debug_msg.edit_text("🔧 Tools:\n" + "\n".join(tool_log))
+ except Exception:
+ pass
elif isinstance(message, ResultMessage):
# Extract final result and metadata
@@ -596,14 +900,201 @@ async def can_use_tool(tool_name: str, tool_input: dict, ctx) -> PermissionResul
if hasattr(message, "duration_ms"):
metadata["duration_ms"] = message.duration_ms
- debug(f"Claude SDK responded: {len(result_text)} chars, {tool_count} tools used")
+ logger.debug(f"Claude SDK responded: {len(result_text)} chars, {tool_count} tools used")
+ metadata["tool_log"] = tool_log
return result_text, new_session_id, metadata
except Exception as e:
- debug(f"Claude SDK error: {e}")
+ logger.error(f"Claude SDK error: {e}")
return f"Error calling Claude: {e}", session_id, {}
+# ============ Helpers ============
+
+async def typing_loop(update: Update, context: ContextTypes.DEFAULT_TYPE, stop_event: asyncio.Event):
+ """Send typing indicator every 4s until stop_event is set (Telegram typing expires after 5s)."""
+ while not stop_event.is_set():
+ try:
+ await context.bot.send_chat_action(
+ chat_id=update.effective_chat.id,
+ action=ChatAction.TYPING,
+ )
+ except Exception:
+ pass
+ try:
+ await asyncio.wait_for(stop_event.wait(), timeout=4.0)
+ except asyncio.TimeoutError:
+ pass
+
+
+# ============ Automations Helpers ============
+
+def cron_to_human(expr: str) -> str:
+ """Convert 5-field cron expression to Polish human-readable string."""
+ parts = expr.split()
+ if len(parts) != 5:
+ return expr
+ minute, hour, dom, month, dow = parts
+ if dom != "*" or month != "*":
+ return expr
+ hm = f"{int(hour):02d}:{int(minute):02d}" if hour != "*" and minute.isdigit() and hour.isdigit() else f"{hour}:{minute}"
+ if hour == "*" and minute == "0":
+ return "Co godzinę"
+ if hour == "*":
+ return expr
+ if dow == "*":
+ return f"Codziennie {hm}"
+ if dow == "1-5":
+ return f"Pn-Pt {hm}"
+ day_names = {"0": "Nd", "1": "Pn", "2": "Wt", "3": "Śr", "4": "Cz", "5": "Pt", "6": "Sb", "7": "Nd"}
+ if dow in day_names:
+ return f"{day_names[dow]} {hm}"
+ return expr
+
+
+async def run_remote_trigger_list() -> list[dict]:
+ """Fetch all scheduled triggers via claude -p. Returns list of trigger dicts."""
+ prompt = (
+ "List all my scheduled remote triggers using RemoteTrigger tool with action='list'. "
+ "Return ONLY a JSON array where each item has: id (string), name (string), "
+ "cron_expression (string), enabled (boolean). No other text."
+ )
+ cmd = ["claude", "-p", prompt, "--allowedTools", "RemoteTrigger", "--output-format", "json"]
+ try:
+ result = await asyncio.to_thread(
+ subprocess.run, cmd, capture_output=True, text=True, timeout=30
+ )
+ if result.returncode != 0:
+ logger.warning(f"run_remote_trigger_list failed: {result.stderr[:200]}")
+ return []
+ data = json.loads(result.stdout)
+ raw = data.get("result", "[]")
+ # Strip markdown code fences if present
+ raw = raw.strip()
+ if raw.startswith("```"):
+ raw = "\n".join(raw.split("\n")[1:])
+ raw = raw.rstrip("`").strip()
+ return json.loads(raw)
+ except Exception as e:
+ logger.warning(f"run_remote_trigger_list exception: {e}")
+ return []
+
+
+async def run_remote_trigger_run(trigger_id: str) -> bool:
+ """Trigger a scheduled task to run immediately via claude -p."""
+ prompt = f"Run the scheduled remote trigger with ID '{trigger_id}' immediately using RemoteTrigger tool with action='run'."
+ cmd = ["claude", "-p", prompt, "--allowedTools", "RemoteTrigger", "--output-format", "json"]
+ try:
+ result = await asyncio.to_thread(
+ subprocess.run, cmd, capture_output=True, text=True, timeout=30
+ )
+ return result.returncode == 0
+ except Exception as e:
+ logger.warning(f"run_remote_trigger_run exception: {e}")
+ return False
+
+
+async def run_remote_trigger_toggle(trigger_id: str, enable: bool) -> bool:
+ """Enable or disable a scheduled trigger via claude -p."""
+ state = "enabled" if enable else "disabled"
+ prompt = (
+ f"Update the scheduled remote trigger with ID '{trigger_id}' using RemoteTrigger tool "
+ f"with action='update'. Set enabled={str(enable).lower()}. "
+ f"The trigger should be {state} after this call."
+ )
+ cmd = ["claude", "-p", prompt, "--allowedTools", "RemoteTrigger", "--output-format", "json"]
+ try:
+ result = await asyncio.to_thread(
+ subprocess.run, cmd, capture_output=True, text=True, timeout=30
+ )
+ return result.returncode == 0
+ except Exception as e:
+ logger.warning(f"run_remote_trigger_toggle exception: {e}")
+ return False
+
+
+def build_automations_list(triggers: list[dict]) -> tuple[str, InlineKeyboardMarkup]:
+ """Build list view: text summary + inline keyboard."""
+ if not triggers:
+ text = "🤖 Automacje\n\nNie masz jeszcze żadnych automacji."
+ keyboard = [[InlineKeyboardButton("+ Stwórz pierwszą automację", callback_data="auto_new")]]
+ return text, InlineKeyboardMarkup(keyboard)
+
+ active = sum(1 for t in triggers if t.get("enabled", True))
+ names = "\n".join(
+ f"{'●' if t.get('enabled', True) else '○'} {t['name']}" for t in triggers
+ )
+ text = f"🤖 Twoje automacje ({len(triggers)}) · {active} aktywnych\n\n{names}"
+
+ keyboard = []
+ for t in triggers:
+ tid = t["id"]
+ name = t["name"]
+ enabled = t.get("enabled", True)
+ status = "●" if enabled else "○"
+ toggle_label = "⏸" if enabled else "▶"
+ toggle_cb = f"auto_toggle_off_{tid}" if enabled else f"auto_toggle_on_{tid}"
+ row = [
+ InlineKeyboardButton(f"{status} {name}", callback_data=f"auto_card_{tid}"),
+ InlineKeyboardButton("▶ Run", callback_data=f"auto_run_{tid}"),
+ InlineKeyboardButton(toggle_label, callback_data=toggle_cb),
+ ]
+ keyboard.append(row)
+
+ keyboard.append([
+ InlineKeyboardButton("+ Nowa automacja", callback_data="auto_new"),
+ InlineKeyboardButton("🔄", callback_data="auto_refresh"),
+ ])
+ return text, InlineKeyboardMarkup(keyboard)
+
+
+def build_automation_card(trigger: dict, style: str = "full") -> tuple[str, InlineKeyboardMarkup]:
+ """Build card view for a single trigger."""
+ tid = trigger["id"]
+ name = trigger["name"]
+ enabled = trigger.get("enabled", True)
+ cron = trigger.get("cron_expression", "")
+ schedule_human = cron_to_human(cron)
+ status_icon = "●" if enabled else "○"
+ status_text = "Aktywna" if enabled else "Wstrzymana"
+ toggle_label = "⏸ Pause" if enabled else "▶ Resume"
+ toggle_cb = f"auto_toggle_off_{tid}" if enabled else f"auto_toggle_on_{tid}"
+
+ if style == "compact":
+ text = (
+ f"🤖 {name}\n"
+ f"{status_icon} {status_text} · {schedule_human}"
+ )
+ keyboard = [
+ [
+ InlineKeyboardButton("▶ Run now", callback_data=f"auto_run_{tid}"),
+ InlineKeyboardButton(toggle_label, callback_data=toggle_cb),
+ InlineKeyboardButton("✎ Edit", callback_data=f"auto_edit_{tid}"),
+ InlineKeyboardButton("✕", url="https://claude.ai/code/scheduled"),
+ ],
+ [InlineKeyboardButton("← Wróć", callback_data="auto_list")],
+ ]
+ else: # full
+ text = (
+ f"🤖 {name}\n\n"
+ f"HARMONOGRAM\n{schedule_human}\n\n"
+ f"STATUS\n{status_icon} {status_text}"
+ )
+ keyboard = [
+ [
+ InlineKeyboardButton("▶ Run now", callback_data=f"auto_run_{tid}"),
+ InlineKeyboardButton(toggle_label, callback_data=toggle_cb),
+ ],
+ [
+ InlineKeyboardButton("✎ Edit prompt", callback_data=f"auto_edit_{tid}"),
+ InlineKeyboardButton("✕ Usuń →", url="https://claude.ai/code/scheduled"),
+ ],
+ [InlineKeyboardButton("← Wróć do listy", callback_data="auto_list")],
+ ]
+
+ return text, InlineKeyboardMarkup(keyboard)
+
+
# ============ Command Handlers ============
async def cmd_start(update: Update, context: ContextTypes.DEFAULT_TYPE):
@@ -611,14 +1102,14 @@ async def cmd_start(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not should_handle_message(update.message.message_thread_id):
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
await update.message.reply_text(
"Claude Voice Assistant\n\n"
"Send me a voice message and I'll process it with Claude.\n\n"
"Commands:\n"
+ "/setup - Configure API credentials\n"
"/new [name] - Start new session\n"
"/continue - Resume last session\n"
"/sessions - List all sessions\n"
@@ -633,9 +1124,8 @@ async def cmd_new(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not should_handle_message(update.message.message_thread_id):
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
user_id = update.effective_user.id
state = get_user_state(user_id)
@@ -651,14 +1141,74 @@ async def cmd_new(update: Update, context: ContextTypes.DEFAULT_TYPE):
save_state()
+async def cmd_cancel(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle /cancel command — interrupt active Claude request."""
+ if not should_handle_message(update.message.message_thread_id):
+ return
+
+ if not _is_authorized(update):
+ return
+
+ user_id = update.effective_user.id
+ event = cancel_events.get(user_id)
+ if event is not None and not event.is_set():
+ event.set()
+ await update.message.reply_text("Cancelling...")
+ else:
+ await update.message.reply_text("No active request to cancel.")
+
+
+async def cmd_compact(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle /compact command — summarize current session and start fresh."""
+ if not should_handle_message(update.message.message_thread_id):
+ return
+
+ if not _is_authorized(update):
+ return
+
+ user_id = update.effective_user.id
+ state = get_user_state(user_id)
+ settings = get_user_settings(user_id)
+
+ if not state.get("current_session"):
+ await update.message.reply_text("No active session to compact. Start a conversation first.")
+ return
+
+ processing_msg = await update.message.reply_text("Compacting session...")
+
+ try:
+ summary, _, _ = await call_claude(
+ "Summarize this entire conversation concisely but completely. Include: key topics, decisions, important files/code mentioned, and any ongoing work. Preserve all context needed to continue seamlessly.",
+ session_id=state["current_session"],
+ continue_last=True,
+ include_megg=False,
+ user_settings=settings,
+ update=update,
+ context=context,
+ )
+
+ # Save summary as pending context for next message, start fresh session
+ state["compact_summary"] = summary
+ state["current_session"] = None
+ save_state()
+
+ preview = summary[:400] + "..." if len(summary) > 400 else summary
+ await processing_msg.edit_text(
+ f"Session compacted. Summary:\n\n{preview}\n\nSend your next message to continue with this context."
+ )
+
+ except Exception as e:
+ logger.error(f"Error in cmd_compact: {e}")
+ await processing_msg.edit_text(f"Error compacting session: {e}")
+
+
async def cmd_continue(update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Handle /continue command - resume last session."""
if not should_handle_message(update.message.message_thread_id):
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
user_id = update.effective_user.id
state = get_user_state(user_id)
@@ -674,9 +1224,8 @@ async def cmd_sessions(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not should_handle_message(update.message.message_thread_id):
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
user_id = update.effective_user.id
state = get_user_state(user_id)
@@ -698,9 +1247,8 @@ async def cmd_switch(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not should_handle_message(update.message.message_thread_id):
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
if not context.args:
await update.message.reply_text("Usage: /switch ")
@@ -728,11 +1276,10 @@ async def cmd_status(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not should_handle_message(update.message.message_thread_id):
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
- debug(f"STATUS command from user {update.effective_user.id}")
+ logger.debug(f"STATUS command from user {update.effective_user.id}")
user_id = update.effective_user.id
state = get_user_state(user_id)
@@ -750,26 +1297,42 @@ async def cmd_health(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not should_handle_message(update.message.message_thread_id):
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
- debug(f"HEALTH command from user {update.effective_user.id}, chat {update.effective_chat.id}, topic {update.message.message_thread_id}")
+ logger.debug(f"HEALTH command from user {update.effective_user.id}, chat {update.effective_chat.id}, topic {update.message.message_thread_id}")
status = []
status.append("=== Health Check ===\n")
- # Check ElevenLabs
- try:
- test_audio = elevenlabs.text_to_speech.convert(
- text="test",
- voice_id=ELEVENLABS_VOICE_ID,
- model_id="eleven_turbo_v2_5",
- )
- size = sum(len(c) for c in test_audio if isinstance(c, bytes))
- status.append(f"ElevenLabs TTS: OK ({size} bytes, turbo_v2_5)")
- except Exception as e:
- status.append(f"ElevenLabs TTS: FAILED - {e}")
+ # TTS provider check
+ status.append(f"TTS Provider: {TTS_PROVIDER}")
+ if TTS_PROVIDER == "elevenlabs":
+ try:
+ test_audio = elevenlabs.text_to_speech.convert(
+ text="test",
+ voice_id=ELEVENLABS_VOICE_ID,
+ model_id="eleven_turbo_v2_5",
+ )
+ size = sum(len(c) for c in test_audio if isinstance(c, bytes))
+ status.append(f"ElevenLabs TTS: OK ({size} bytes, turbo_v2_5, voice={ELEVENLABS_VOICE_ID[:8]}...)")
+ except Exception as e:
+ status.append(f"ElevenLabs TTS: FAILED - {e}")
+ elif TTS_PROVIDER == "openai":
+ try:
+ test_audio = openai_client.audio.speech.create(
+ model=OPENAI_TTS_MODEL,
+ voice=OPENAI_VOICE_ID,
+ input="test",
+ )
+ size = len(b"".join(test_audio.iter_bytes()))
+ status.append(f"OpenAI TTS: OK ({size} bytes, {OPENAI_TTS_MODEL}, voice={OPENAI_VOICE_ID})")
+ except Exception as e:
+ status.append(f"OpenAI TTS: FAILED - {e}")
+ else:
+ status.append("TTS: No provider configured")
+
+ status.append(f"STT Provider: {STT_PROVIDER}")
# Check Claude
try:
@@ -778,7 +1341,7 @@ async def cmd_health(update: Update, context: ContextTypes.DEFAULT_TYPE):
capture_output=True,
text=True,
timeout=30,
- cwd="/home/dev"
+ cwd=CLAUDE_WORKING_DIR
)
if result.returncode == 0:
status.append("Claude Code: OK")
@@ -793,6 +1356,9 @@ async def cmd_health(update: Update, context: ContextTypes.DEFAULT_TYPE):
status.append(f"\nSessions: {len(state['sessions'])}")
status.append(f"Current: {state['current_session'][:8] if state['current_session'] else 'None'}...")
+ # MCP servers status
+ status.extend(get_mcp_status(CLAUDE_SETTINGS_FILE))
+
# Sandbox info
status.append(f"\nSandbox: {SANDBOX_DIR}")
status.append(f"Sandbox exists: {Path(SANDBOX_DIR).exists()}")
@@ -810,9 +1376,8 @@ async def cmd_settings(update: Update, context: ContextTypes.DEFAULT_TYPE):
if not should_handle_message(update.message.message_thread_id):
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
user_id = update.effective_user.id
settings = get_user_settings(user_id)
@@ -822,21 +1387,24 @@ async def cmd_settings(update: Update, context: ContextTypes.DEFAULT_TYPE):
speed = settings["voice_speed"]
mode = settings.get("mode", "go_all")
mode_display = "Go All" if mode == "go_all" else "Approve"
- watch_status = "ON" if settings.get("watch_enabled", False) else "OFF"
+ watch_mode_val = settings.get("watch_mode", "off").upper()
+ card_style = settings.get("automation_card_style", "full")
+ card_style_display = "Pełna" if card_style == "full" else "Kompakt"
message = (
f"Settings:\n\n"
f"Mode: {mode_display}\n"
- f"Watch: {watch_status}\n"
+ f"Watch: {watch_mode_val}\n"
f"Audio: {audio_status}\n"
- f"Voice Speed: {speed}x"
+ f"Voice Speed: {speed}x\n"
+ f"Auto karta: {card_style_display}"
)
# Build inline keyboard
keyboard = [
[
InlineKeyboardButton(f"Mode: {mode_display}", callback_data="setting_mode_toggle"),
- InlineKeyboardButton(f"Watch: {watch_status}", callback_data="setting_watch_toggle"),
+ InlineKeyboardButton(f"Watch: {watch_mode_val}", callback_data="setting_watch_cycle"),
],
[InlineKeyboardButton(f"Audio: {audio_status}", callback_data="setting_audio_toggle")],
[
@@ -846,16 +1414,326 @@ async def cmd_settings(update: Update, context: ContextTypes.DEFAULT_TYPE):
InlineKeyboardButton("1.1x", callback_data="setting_speed_1.1"),
InlineKeyboardButton("1.2x", callback_data="setting_speed_1.2"),
],
+ [InlineKeyboardButton(f"Auto karta: {card_style_display}", callback_data="setting_card_style_toggle")],
]
reply_markup = InlineKeyboardMarkup(keyboard)
await update.message.reply_text(message, reply_markup=reply_markup)
+async def cmd_automations(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle /automations command — show scheduled tasks list."""
+ if not should_handle_message(update.message.message_thread_id):
+ return
+ if not _is_authorized(update):
+ return
+
+ loading_msg = await update.message.reply_text("⏳ Ładuję automacje...")
+
+ triggers = await run_remote_trigger_list()
+ text, markup = build_automations_list(triggers)
+
+ try:
+ await loading_msg.edit_text(text, reply_markup=markup)
+ except Exception as e:
+ logger.warning(f"cmd_automations edit error: {e}")
+ await update.message.reply_text(text, reply_markup=markup)
+
+
+async def handle_automations_callback(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle all auto_* callback button taps."""
+ query = update.callback_query
+
+ if not _is_authorized(update):
+ await query.answer()
+ return
+
+ data = query.data
+ user_id = update.effective_user.id
+ settings = get_user_settings(user_id)
+ card_style = settings.get("automation_card_style", "full")
+
+ # ── Back to list ──────────────────────────────────────────
+ if data in ("auto_list", "auto_refresh"):
+ await query.answer()
+ await query.edit_message_text("⏳ Ładuję automacje...")
+ triggers = await run_remote_trigger_list()
+ text, markup = build_automations_list(triggers)
+ try:
+ await query.edit_message_text(text, reply_markup=markup)
+ except Exception as e:
+ logger.warning(f"auto_list edit error: {e}")
+
+ # ── Open card ─────────────────────────────────────────────
+ elif data.startswith("auto_card_"):
+ await query.answer()
+ trigger_id = data[len("auto_card_"):]
+ await query.edit_message_text("⏳...")
+ triggers = await run_remote_trigger_list()
+ trigger = next((t for t in triggers if t["id"] == trigger_id), None)
+ if trigger is None:
+ await query.edit_message_text("❌ Nie znaleziono automacji.")
+ return
+ text, markup = build_automation_card(trigger, style=card_style)
+ try:
+ await query.edit_message_text(text, reply_markup=markup)
+ except Exception as e:
+ logger.warning(f"auto_card edit error: {e}")
+
+ # ── Run now ───────────────────────────────────────────────
+ elif data.startswith("auto_run_"):
+ trigger_id = data[len("auto_run_"):]
+ await query.answer("▶ Uruchamiam...")
+ ok = await run_remote_trigger_run(trigger_id)
+ status = "✓ Uruchomiono!" if ok else "❌ Błąd uruchamiania"
+ try:
+ await query.edit_message_text(query.message.text + f"\n\n{status}", reply_markup=query.message.reply_markup)
+ except Exception:
+ pass
+
+ # ── Toggle enable/disable ─────────────────────────────────
+ elif data.startswith("auto_toggle_"):
+ # format: auto_toggle_off_{id} or auto_toggle_on_{id}
+ rest = data[len("auto_toggle_"):]
+ enable = rest.startswith("on_")
+ trigger_id = rest[len("on_"):] if enable else rest[len("off_"):]
+ await query.answer()
+ ok = await run_remote_trigger_toggle(trigger_id, enable=enable)
+ if ok:
+ # Refresh card
+ triggers = await run_remote_trigger_list()
+ trigger = next((t for t in triggers if t["id"] == trigger_id), None)
+ if trigger:
+ text, markup = build_automation_card(trigger, style=card_style)
+ await query.edit_message_text(text, reply_markup=markup)
+ else:
+ # Trigger disappeared after toggle — show list instead
+ triggers2 = await run_remote_trigger_list()
+ text2, markup2 = build_automations_list(triggers2)
+ await query.edit_message_text(text2, reply_markup=markup2)
+ else:
+ try:
+ await query.edit_message_text("❌ Błąd zmiany stanu automacji.", reply_markup=query.message.reply_markup)
+ except Exception:
+ pass
+
+ # ── New automation ────────────────────────────────────────
+ elif data == "auto_new":
+ await query.answer()
+ await query.edit_message_text(
+ '💬 Opisz automację głosem lub tekstem.\n\n'
+ 'Np. „stwórz daily standup o 8 rano sprawdzający PR-y na GitHubie"'
+ )
+
+ # ── Edit prompt (conversational) ──────────────────────────
+ elif data.startswith("auto_edit_"):
+ await query.answer()
+ trigger_id = data[len("auto_edit_"):]
+ await query.edit_message_text(
+ '✎ Co chcesz zmienić w tej automacji?\n\n'
+ 'Opisz głosem lub tekstem — np. „zmień godzinę na 9 rano" albo „dodaj sprawdzanie CI"'
+ )
+
+
+# ============ Token Configuration Commands ============
+
+async def cmd_setup(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle /setup command - show API credentials status."""
+ if not should_handle_message(update.message.message_thread_id):
+ return
+
+ if not _is_admin(update):
+ return
+
+ creds = load_credentials()
+
+ # Check what's configured (saved creds or env vars)
+ claude_set = bool(creds.get("claude_token") or os.getenv("ANTHROPIC_API_KEY"))
+ elevenlabs_set = bool(creds.get("elevenlabs_key") or os.getenv("ELEVENLABS_API_KEY"))
+ openai_set = bool(creds.get("openai_key") or os.getenv("OPENAI_API_KEY"))
+
+ claude_status = "✓ Set" if claude_set else "✗ Not set"
+ elevenlabs_status = "✓ Set" if elevenlabs_set else "✗ Not set (optional)"
+ openai_status = "✓ Set" if openai_set else "✗ Not set (optional)"
+
+ await update.message.reply_text(
+ f"**API Credentials**\n\n"
+ f"Claude: {claude_status}\n"
+ f"ElevenLabs: {elevenlabs_status}\n"
+ f"OpenAI: {openai_status}\n\n"
+ f"**Active providers:**\n"
+ f"TTS: `{TTS_PROVIDER}`"
+ + (f" ({OPENAI_TTS_MODEL} / {OPENAI_VOICE_ID})" if TTS_PROVIDER == "openai" else f" ({ELEVENLABS_VOICE_ID[:8]}...)" if TTS_PROVIDER == "elevenlabs" else "") + "\n"
+ f"STT: `{STT_PROVIDER}`"
+ + (f" ({OPENAI_STT_MODEL})" if STT_PROVIDER == "openai" else " (scribe_v1)" if STT_PROVIDER == "elevenlabs" else "") + "\n\n"
+ f"**To configure:**\n"
+ f"`/claude_token ` - Set Anthropic API key\n"
+ f"`/elevenlabs_key ` - Set ElevenLabs key\n"
+ f"`/openai_key ` - Set OpenAI key\n\n"
+ f"_Messages with keys are deleted immediately for security._",
+ parse_mode="Markdown"
+ )
+
+
+async def cmd_claude_token(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle /claude_token command - set Claude OAuth token."""
+ if not should_handle_message(update.message.message_thread_id):
+ return
+
+ if not _is_admin(update):
+ return
+
+ # Delete the message immediately (contains sensitive token)
+ thread_id = update.message.message_thread_id
+ try:
+ await update.message.delete()
+ except Exception as e:
+ logger.debug(f"Could not delete token message: {e}")
+
+ # Get token from args
+ if not context.args:
+ await update.effective_chat.send_message(
+ "Usage: `/claude_token `\n\n"
+ "Get token by running `claude setup-token` in your terminal.",
+ message_thread_id=thread_id,
+ parse_mode="Markdown"
+ )
+ return
+
+ token = " ".join(context.args).strip()
+
+ if not token.startswith("sk-ant-"):
+ await update.effective_chat.send_message(
+ "❌ Invalid token format. Token should start with `sk-ant-`",
+ message_thread_id=thread_id,
+ parse_mode="Markdown"
+ )
+ return
+
+ # Save token
+ creds = load_credentials()
+ creds["claude_token"] = token
+ save_credentials(creds)
+
+ # Apply immediately
+ os.environ["CLAUDE_CODE_OAUTH_TOKEN"] = token
+
+ await update.effective_chat.send_message(
+ "✓ Claude token saved and applied!",
+ message_thread_id=thread_id
+ )
+
+
+async def cmd_elevenlabs_key(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle /elevenlabs_key command - set ElevenLabs API key."""
+ global elevenlabs, ELEVENLABS_API_KEY
+
+ if not should_handle_message(update.message.message_thread_id):
+ return
+
+ if not _is_admin(update):
+ return
+
+ # Delete the message immediately (contains sensitive key)
+ thread_id = update.message.message_thread_id
+ try:
+ await update.message.delete()
+ except Exception as e:
+ logger.debug(f"Could not delete key message: {e}")
+
+ # Get key from args
+ if not context.args:
+ await update.effective_chat.send_message(
+ "Usage: `/elevenlabs_key `\n\n"
+ "Get key from elevenlabs.io/app/settings/api-keys",
+ message_thread_id=thread_id,
+ parse_mode="Markdown"
+ )
+ return
+
+ key = " ".join(context.args).strip()
+
+ if len(key) < 20:
+ await update.effective_chat.send_message(
+ "❌ Invalid key format. Key seems too short.",
+ message_thread_id=thread_id
+ )
+ return
+
+ # Save key
+ creds = load_credentials()
+ creds["elevenlabs_key"] = key
+ save_credentials(creds)
+
+ # Apply immediately
+ ELEVENLABS_API_KEY = key
+ elevenlabs = ElevenLabs(api_key=key)
+
+ await update.effective_chat.send_message(
+ "✓ ElevenLabs API key saved and applied!",
+ message_thread_id=thread_id
+ )
+
+
+async def cmd_openai_key(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle /openai_key command - set OpenAI API key."""
+ global openai_client, TTS_PROVIDER, STT_PROVIDER
+
+ if not should_handle_message(update.message.message_thread_id):
+ return
+
+ if not _is_admin(update):
+ return
+
+ # Delete the message immediately (contains sensitive key)
+ thread_id = update.message.message_thread_id
+ try:
+ await update.message.delete()
+ except Exception as e:
+ logger.debug(f"Could not delete key message: {e}")
+
+ if not context.args:
+ await update.effective_chat.send_message(
+ "Usage: `/openai_key `\n\n"
+ "Get key from platform.openai.com/api-keys",
+ message_thread_id=thread_id,
+ parse_mode="Markdown"
+ )
+ return
+
+ key = " ".join(context.args).strip()
+
+ if not key.startswith("sk-"):
+ await update.effective_chat.send_message(
+ "❌ Invalid key format. OpenAI keys start with `sk-`",
+ message_thread_id=thread_id,
+ parse_mode="Markdown"
+ )
+ return
+
+ # Save key
+ creds = load_credentials()
+ creds["openai_key"] = key
+ save_credentials(creds)
+
+ # Apply immediately
+ os.environ["OPENAI_API_KEY"] = key
+ openai_client = OpenAIClient(api_key=key)
+ TTS_PROVIDER = resolve_provider("TTS_PROVIDER")
+ STT_PROVIDER = resolve_provider("STT_PROVIDER")
+
+ await update.effective_chat.send_message(
+ f"✓ OpenAI API key saved and applied!\n"
+ f"TTS: `{TTS_PROVIDER}` | STT: `{STT_PROVIDER}`",
+ message_thread_id=thread_id,
+ parse_mode="Markdown"
+ )
+
+
async def handle_settings_callback(update: Update, context: ContextTypes.DEFAULT_TYPE):
"""Handle settings button callbacks."""
query = update.callback_query
- debug(f"SETTINGS CALLBACK received: {query.data} from user {update.effective_user.id}")
+ logger.debug(f"SETTINGS CALLBACK received: {query.data} from user {update.effective_user.id}")
user_id = update.effective_user.id
settings = get_user_settings(user_id)
@@ -864,18 +1742,19 @@ async def handle_settings_callback(update: Update, context: ContextTypes.DEFAULT
if callback_data == "setting_audio_toggle":
settings["audio_enabled"] = not settings["audio_enabled"]
save_settings()
- debug(f"Audio toggled to: {settings['audio_enabled']}")
+ logger.debug(f"Audio toggled to: {settings['audio_enabled']}")
elif callback_data == "setting_mode_toggle":
current_mode = settings.get("mode", "go_all")
settings["mode"] = "approve" if current_mode == "go_all" else "go_all"
save_settings()
- debug(f"Mode toggled to: {settings['mode']}")
+ logger.debug(f"Mode toggled to: {settings['mode']}")
- elif callback_data == "setting_watch_toggle":
- settings["watch_enabled"] = not settings.get("watch_enabled", False)
+ elif callback_data == "setting_watch_cycle":
+ cycle = {"off": "live", "live": "debug", "debug": "off"}
+ settings["watch_mode"] = cycle.get(settings.get("watch_mode", "off"), "off")
save_settings()
- debug(f"Watch toggled to: {settings['watch_enabled']}")
+ logger.debug(f"Watch mode cycled to: {settings['watch_mode']}")
elif callback_data.startswith("setting_speed_"):
try:
@@ -889,21 +1768,29 @@ async def handle_settings_callback(update: Update, context: ContextTypes.DEFAULT
settings["voice_speed"] = speed
save_settings()
- debug(f"Speed set to: {speed}")
+ logger.debug(f"Speed set to: {speed}")
+
+ elif callback_data == "setting_card_style_toggle":
+ current = settings.get("automation_card_style", "full")
+ settings["automation_card_style"] = "compact" if current == "full" else "full"
+ save_settings()
+ logger.debug(f"Card style toggled to: {settings['automation_card_style']}")
# Build updated settings menu
audio_status = "ON" if settings["audio_enabled"] else "OFF"
speed = settings["voice_speed"]
mode = settings.get("mode", "go_all")
mode_display = "Go All" if mode == "go_all" else "Approve"
- watch_status = "ON" if settings.get("watch_enabled", False) else "OFF"
+ watch_mode_val = settings.get("watch_mode", "off").upper()
+ card_style = settings.get("automation_card_style", "full")
+ card_style_display = "Pełna" if card_style == "full" else "Kompakt"
- message = f"Settings:\n\nMode: {mode_display}\nWatch: {watch_status}\nAudio: {audio_status}\nVoice Speed: {speed}x"
+ message = f"Settings:\n\nMode: {mode_display}\nWatch: {watch_mode_val}\nAudio: {audio_status}\nVoice Speed: {speed}x\nAuto karta: {card_style_display}"
keyboard = [
[
InlineKeyboardButton(f"Mode: {mode_display}", callback_data="setting_mode_toggle"),
- InlineKeyboardButton(f"Watch: {watch_status}", callback_data="setting_watch_toggle"),
+ InlineKeyboardButton(f"Watch: {watch_mode_val}", callback_data="setting_watch_cycle"),
],
[InlineKeyboardButton(f"Audio: {audio_status}", callback_data="setting_audio_toggle")],
[
@@ -913,13 +1800,14 @@ async def handle_settings_callback(update: Update, context: ContextTypes.DEFAULT
InlineKeyboardButton("1.1x", callback_data="setting_speed_1.1"),
InlineKeyboardButton("1.2x", callback_data="setting_speed_1.2"),
],
+ [InlineKeyboardButton(f"Auto karta: {card_style_display}", callback_data="setting_card_style_toggle")],
]
reply_markup = InlineKeyboardMarkup(keyboard)
try:
await query.edit_message_text(message, reply_markup=reply_markup)
except Exception as e:
- debug(f"Error updating settings menu: {e}")
+ logger.debug(f"Error updating settings menu: {e}")
await query.answer()
@@ -929,14 +1817,14 @@ async def handle_approval_callback(update: Update, context: ContextTypes.DEFAULT
query = update.callback_query
callback_data = query.data
- debug(f">>> APPROVAL CALLBACK received: {callback_data}")
+ logger.debug(f">>> APPROVAL CALLBACK received: {callback_data}")
# Answer the callback immediately to prevent Telegram timeout
await query.answer()
if callback_data.startswith("approve_"):
approval_id = callback_data.replace("approve_", "")
- debug(f">>> Looking for approval_id: {approval_id} in {list(pending_approvals.keys())}")
+ logger.debug(f">>> Looking for approval_id: {approval_id} in {list(pending_approvals.keys())}")
if approval_id in pending_approvals:
# Verify that the user clicking is the one who requested
if update.effective_user.id != pending_approvals[approval_id].get("user_id"):
@@ -945,17 +1833,17 @@ async def handle_approval_callback(update: Update, context: ContextTypes.DEFAULT
tool_name = pending_approvals[approval_id]["tool_name"]
pending_approvals[approval_id]["approved"] = True
- debug(f">>> Setting event for {approval_id}")
+ logger.debug(f">>> Setting event for {approval_id}")
pending_approvals[approval_id]["event"].set()
- debug(f">>> Event set, updating message")
+ logger.debug(f">>> Event set, updating message")
await query.edit_message_text(f"✓ Approved: {tool_name}")
else:
- debug(f">>> Approval {approval_id} not found (expired)")
+ logger.debug(f">>> Approval {approval_id} not found (expired)")
await query.edit_message_text("Approval expired")
elif callback_data.startswith("reject_"):
approval_id = callback_data.replace("reject_", "")
- debug(f">>> Looking for approval_id: {approval_id} in {list(pending_approvals.keys())}")
+ logger.debug(f">>> Looking for approval_id: {approval_id} in {list(pending_approvals.keys())}")
if approval_id in pending_approvals:
# Verify that the user clicking is the one who requested
if update.effective_user.id != pending_approvals[approval_id].get("user_id"):
@@ -964,12 +1852,12 @@ async def handle_approval_callback(update: Update, context: ContextTypes.DEFAULT
tool_name = pending_approvals[approval_id]["tool_name"]
pending_approvals[approval_id]["approved"] = False
- debug(f">>> Setting event for {approval_id} (reject)")
+ logger.debug(f">>> Setting event for {approval_id} (reject)")
pending_approvals[approval_id]["event"].set()
- debug(f">>> Event set, updating message")
+ logger.debug(f">>> Event set, updating message")
await query.edit_message_text(f"✗ Rejected: {tool_name}")
else:
- debug(f">>> Approval {approval_id} not found (expired)")
+ logger.debug(f">>> Approval {approval_id} not found (expired)")
await query.edit_message_text("Approval expired")
@@ -981,16 +1869,15 @@ async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE):
if update.effective_user.is_bot is True:
return
- debug(f"VOICE received from user {update.effective_user.id}, chat {update.effective_chat.id}, topic {update.message.message_thread_id}")
+ logger.debug(f"VOICE received from user {update.effective_user.id}, chat {update.effective_chat.id}, topic {update.message.message_thread_id}")
# Topic filtering - ignore messages not in our topic
if not should_handle_message(update.message.message_thread_id):
- debug(f"Ignoring voice message - not in our topic (configured: {TOPIC_ID})")
+ logger.debug(f"Ignoring voice message - not in our topic (configured: {TOPIC_ID})")
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
user_id = update.effective_user.id
@@ -1003,9 +1890,12 @@ async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE):
state = get_user_state(user_id)
settings = get_user_settings(user_id)
- # Acknowledge receipt
+ # Typing indicator first — signals immediately that bot is alive
+ await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
+ typing_stop = asyncio.Event()
+ asyncio.ensure_future(typing_loop(update, context, typing_stop))
processing_msg = await update.message.reply_text("Processing voice message...")
- debug("Sent processing acknowledgement")
+ logger.debug("Sent processing acknowledgement")
try:
# Download voice
@@ -1020,8 +1910,14 @@ async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE):
await processing_msg.edit_text(text)
return
+ # Prepend compact summary if pending from /compact
+ compact_summary = state.pop("compact_summary", None)
+ if compact_summary:
+ text = f"\n{compact_summary}\n\n\n{text}"
+ save_state()
+
# Show what was heard
- await processing_msg.edit_text(f"Heard: {text[:100]}{'...' if len(text) > 100 else ''}\n\nAsking Claude...")
+ await processing_msg.edit_text(f"Heard: {text[:100]}{'...' if len(text) > 100 else ''}\n\nToris thinking...")
# Call Claude with user settings
continue_last = state["current_session"] is not None
@@ -1032,6 +1928,7 @@ async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE):
user_settings=settings,
update=update,
context=context,
+ processing_msg=processing_msg,
)
# Update session state
@@ -1042,17 +1939,21 @@ async def handle_voice(update: Update, context: ContextTypes.DEFAULT_TYPE):
save_state()
# Send text response (split if too long)
- await send_long_message(update, processing_msg, response)
+ tool_log = metadata.get("tool_log", [])
+ await finalize_response(update, processing_msg, response)
# Generate and send voice response if audio enabled
if settings["audio_enabled"]:
- audio = await text_to_speech(response, speed=settings["voice_speed"])
+ tts_text = response[:MAX_VOICE_CHARS] if len(response) > MAX_VOICE_CHARS else response
+ audio = await text_to_speech(tts_text, speed=settings["voice_speed"])
if audio:
await update.message.reply_voice(voice=audio)
except Exception as e:
- debug(f"Error in handle_voice: {e}")
+ logger.error(f"Error in handle_voice: {e}")
await processing_msg.edit_text(f"Error: {e}")
+ finally:
+ typing_stop.set()
async def handle_text(update: Update, context: ContextTypes.DEFAULT_TYPE):
@@ -1061,16 +1962,15 @@ async def handle_text(update: Update, context: ContextTypes.DEFAULT_TYPE):
if update.effective_user.is_bot is True:
return
- debug(f"TEXT received: '{update.message.text[:50]}' from user {update.effective_user.id}, chat {update.effective_chat.id}, topic {update.message.message_thread_id}")
+ logger.debug(f"TEXT received: '{update.message.text[:50]}' from user {update.effective_user.id}, chat {update.effective_chat.id}, topic {update.message.message_thread_id}")
# Topic filtering - ignore messages not in our topic
if not should_handle_message(update.message.message_thread_id):
- debug(f"Ignoring text message - not in our topic (configured: {TOPIC_ID})")
+ logger.debug(f"Ignoring text message - not in our topic (configured: {TOPIC_ID})")
return
- # Chat ID authentication
- if ALLOWED_CHAT_ID != 0 and update.effective_chat.id != ALLOWED_CHAT_ID:
- return # Silently ignore unauthorized chats
+ if not _is_authorized(update):
+ return
user_id = update.effective_user.id
@@ -1084,8 +1984,16 @@ async def handle_text(update: Update, context: ContextTypes.DEFAULT_TYPE):
settings = get_user_settings(user_id)
text = update.message.text
- processing_msg = await update.message.reply_text("Asking Claude...")
- debug("Sent processing acknowledgement")
+ await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
+ typing_stop = asyncio.Event()
+ asyncio.ensure_future(typing_loop(update, context, typing_stop))
+ processing_msg = await update.message.reply_text("Toris thinking...")
+
+ # Prepend compact summary if pending from /compact
+ compact_summary = state.pop("compact_summary", None)
+ if compact_summary:
+ text = f"\n{compact_summary}\n\n\n{text}"
+ save_state()
try:
continue_last = state["current_session"] is not None
@@ -1096,6 +2004,7 @@ async def handle_text(update: Update, context: ContextTypes.DEFAULT_TYPE):
user_settings=settings,
update=update,
context=context,
+ processing_msg=processing_msg,
)
if new_session_id and new_session_id != state["current_session"]:
@@ -1105,21 +2014,118 @@ async def handle_text(update: Update, context: ContextTypes.DEFAULT_TYPE):
save_state()
# Send text response (split if too long)
- await send_long_message(update, processing_msg, response)
+ tool_log = metadata.get("tool_log", [])
+ await finalize_response(update, processing_msg, response)
# Send voice response if audio enabled
if settings["audio_enabled"]:
- audio = await text_to_speech(response, speed=settings["voice_speed"])
+ tts_text = response[:MAX_VOICE_CHARS] if len(response) > MAX_VOICE_CHARS else response
+ audio = await text_to_speech(tts_text, speed=settings["voice_speed"])
+ if audio:
+ await update.message.reply_voice(voice=audio)
+
+ except Exception as e:
+ logger.error(f"Error in handle_text: {e}")
+ await processing_msg.edit_text(f"Error: {e}")
+ finally:
+ typing_stop.set()
+
+
+# ============ Photo Handler ============
+
+async def handle_photo(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle incoming photo messages — save to sandbox and let Claude view them."""
+ if update.effective_user.is_bot is True:
+ return
+
+ logger.debug(f"PHOTO received from user {update.effective_user.id}, chat {update.effective_chat.id}")
+
+ if not should_handle_message(update.message.message_thread_id):
+ logger.debug(f"Ignoring photo - not in our topic")
+ return
+
+ if not _is_authorized(update):
+ return
+
+ user_id = update.effective_user.id
+
+ allowed, rate_msg = check_rate_limit(user_id)
+ if not allowed:
+ await update.message.reply_text(rate_msg)
+ return
+
+ state = get_user_state(user_id)
+ settings = get_user_settings(user_id)
+
+ await context.bot.send_chat_action(chat_id=update.effective_chat.id, action=ChatAction.TYPING)
+ typing_stop = asyncio.Event()
+ asyncio.ensure_future(typing_loop(update, context, typing_stop))
+ processing_msg = await update.message.reply_text("Processing photo...")
+
+ try:
+ # Get highest resolution photo
+ photo = update.message.photo[-1]
+ photo_file = await photo.get_file()
+
+ # Save to sandbox
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ photo_path = Path(SANDBOX_DIR) / f"photo_{timestamp}.jpg"
+ await photo_file.download_to_drive(str(photo_path))
+
+ # Build prompt
+ caption = update.message.caption or ""
+ if caption:
+ prompt = f"I sent you a photo. It's saved at: {photo_path}\n\nMy message: {caption}"
+ else:
+ prompt = f"I sent you a photo. It's saved at: {photo_path}\n\nPlease look at it and describe what you see, or help me with whatever is shown."
+
+ # Prepend compact summary if pending from /compact
+ compact_summary = state.pop("compact_summary", None)
+ if compact_summary:
+ prompt = f"\n{compact_summary}\n\n\n{prompt}"
+ save_state()
+
+ await processing_msg.edit_text("Toris thinking...")
+
+ continue_last = state["current_session"] is not None
+ response, new_session_id, metadata = await call_claude(
+ prompt,
+ session_id=state["current_session"],
+ continue_last=continue_last,
+ user_settings=settings,
+ update=update,
+ context=context,
+ processing_msg=processing_msg,
+ )
+
+ if new_session_id and new_session_id != state["current_session"]:
+ state["current_session"] = new_session_id
+ if new_session_id not in state["sessions"]:
+ state["sessions"].append(new_session_id)
+ save_state()
+
+ tool_log = metadata.get("tool_log", [])
+ await finalize_response(update, processing_msg, response)
+
+ if settings["audio_enabled"]:
+ tts_text = response[:MAX_VOICE_CHARS] if len(response) > MAX_VOICE_CHARS else response
+ audio = await text_to_speech(tts_text, speed=settings["voice_speed"])
if audio:
await update.message.reply_voice(voice=audio)
except Exception as e:
- debug(f"Error in handle_text: {e}")
+ logger.error(f"Error in handle_photo: {e}")
await processing_msg.edit_text(f"Error: {e}")
+ finally:
+ typing_stop.set()
def main():
"""Main entry point."""
+ # Apply any saved credentials first (from previous /setup)
+ apply_saved_credentials()
+
+ # Now validate environment (will check if auth is configured)
validate_environment()
load_state()
load_settings()
@@ -1131,33 +2137,60 @@ def main():
# Commands
app.add_handler(CommandHandler("start", cmd_start))
app.add_handler(CommandHandler("new", cmd_new))
+ app.add_handler(CommandHandler("cancel", cmd_cancel))
+ app.add_handler(CommandHandler("compact", cmd_compact))
app.add_handler(CommandHandler("continue", cmd_continue))
app.add_handler(CommandHandler("sessions", cmd_sessions))
app.add_handler(CommandHandler("switch", cmd_switch))
app.add_handler(CommandHandler("status", cmd_status))
app.add_handler(CommandHandler("health", cmd_health))
app.add_handler(CommandHandler("settings", cmd_settings))
+ app.add_handler(CommandHandler("automations", cmd_automations))
+ app.add_handler(CommandHandler("setup", cmd_setup))
+ app.add_handler(CommandHandler("claude_token", cmd_claude_token))
+ app.add_handler(CommandHandler("elevenlabs_key", cmd_elevenlabs_key))
+ app.add_handler(CommandHandler("openai_key", cmd_openai_key))
# Callback handlers for inline keyboards
app.add_handler(CallbackQueryHandler(handle_settings_callback, pattern="^setting_"))
app.add_handler(CallbackQueryHandler(handle_approval_callback, pattern="^(approve_|reject_)"))
+ app.add_handler(CallbackQueryHandler(handle_automations_callback, pattern="^auto_"))
# Messages
app.add_handler(MessageHandler(filters.VOICE, handle_voice))
app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_text))
+ app.add_handler(MessageHandler(filters.PHOTO, handle_photo))
# Ensure sandbox exists at startup
Path(SANDBOX_DIR).mkdir(parents=True, exist_ok=True)
- debug("Bot starting...")
- debug(f"Persona: {PERSONA_NAME}")
- debug(f"Voice ID: {ELEVENLABS_VOICE_ID}")
- debug(f"TTS: eleven_turbo_v2_5 with expressive settings")
- debug(f"Sandbox: {SANDBOX_DIR}")
- debug(f"Read access: {CLAUDE_WORKING_DIR}")
- debug(f"Chat ID: {ALLOWED_CHAT_ID}")
- debug(f"Topic ID: {TOPIC_ID or 'ALL (no filter)'}")
- debug(f"System prompt: {SYSTEM_PROMPT_FILE or 'default'}")
+ # Register commands in Telegram menu (the "/" autocomplete list)
+ async def post_init(application):
+ await application.bot.set_my_commands([
+ BotCommand("new", "Start a new session"),
+ BotCommand("cancel", "Cancel current request"),
+ BotCommand("compact", "Summarize & compress current session"),
+ BotCommand("continue", "Continue last session"),
+ BotCommand("sessions", "List recent sessions"),
+ BotCommand("switch", "Switch to a session by ID"),
+ BotCommand("status", "Current session info"),
+ BotCommand("settings", "Voice, mode & speed settings"),
+ BotCommand("automations", "Manage scheduled automations"),
+ BotCommand("health", "Check bot & API status"),
+ BotCommand("setup", "Configure API tokens"),
+ BotCommand("start", "Show help"),
+ ])
+ app.post_init = post_init
+
+ logger.debug("Bot starting...")
+ logger.debug(f"Persona: {PERSONA_NAME}")
+ logger.debug(f"TTS: {TTS_PROVIDER}" + (f" ({OPENAI_TTS_MODEL} / {OPENAI_VOICE_ID})" if TTS_PROVIDER == "openai" else f" (eleven_turbo_v2_5 / {ELEVENLABS_VOICE_ID})" if TTS_PROVIDER == "elevenlabs" else " (none)"))
+ logger.debug(f"STT: {STT_PROVIDER}" + (f" ({OPENAI_STT_MODEL})" if STT_PROVIDER == "openai" else " (scribe_v1)" if STT_PROVIDER == "elevenlabs" else " (none)"))
+ logger.debug(f"Sandbox: {SANDBOX_DIR}")
+ logger.debug(f"Read access: {CLAUDE_WORKING_DIR}")
+ logger.debug(f"Chat ID: {ALLOWED_CHAT_ID}")
+ logger.debug(f"Topic ID: {TOPIC_ID or 'ALL (no filter)'}")
+ logger.debug(f"System prompt: {SYSTEM_PROMPT_FILE or 'default'}")
print(f"{PERSONA_NAME} is ready. Waiting for messages...")
app.run_polling(
drop_pending_updates=True,
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..0e273d3
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,44 @@
+# TORIS Claude Voice Assistant - Docker Compose
+#
+# Quick start:
+# cp docker/toris.env.example docker/toris.env
+# # Edit docker/toris.env with your API keys
+# docker-compose up -d
+#
+# Logs:
+# docker-compose logs -f toris
+#
+# Authentication options:
+# 1. API Key: Set ANTHROPIC_API_KEY in docker/toris.env
+# 2. Subscription: Uncomment the credentials mount below after running 'claude /login' on host
+
+services:
+ toris:
+ build: .
+ container_name: claude-voice-toris
+ env_file:
+ - docker/toris.env
+ volumes:
+ # Persistent state (session history, user settings)
+ - toris-state:/home/claude/state
+ # Sandbox for Claude file operations
+ - toris-sandbox:/home/claude/sandbox
+ # Prompts directory (read-only)
+ - ./prompts:/home/claude/app/prompts:ro
+ # Claude credentials persistence
+ - toris-claude-config:/home/claude/.claude
+ # OPTIONAL: Mount host credentials for subscription users
+ # Uncomment after running 'claude /login' on your host machine
+ # - ~/.claude/.credentials.json:/home/claude/.claude/.credentials.json:ro
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "pgrep", "-f", "python.*bot.py"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ start_period: 40s
+
+volumes:
+ toris-state:
+ toris-sandbox:
+ toris-claude-config:
diff --git a/docker/toris.env.example b/docker/toris.env.example
new file mode 100644
index 0000000..d686ad0
--- /dev/null
+++ b/docker/toris.env.example
@@ -0,0 +1,109 @@
+# TORIS Voice Assistant - Docker Configuration
+# Copy this file to docker/toris.env and fill in your values
+
+# =============================================================================
+# REQUIRED - Core Settings
+# =============================================================================
+# Get bot token from https://t.me/botfather
+TELEGRAM_BOT_TOKEN=your_bot_token_here
+
+# Your Telegram chat ID (for security - only this chat can use the bot)
+# Find yours by messaging @userinfobot
+TELEGRAM_DEFAULT_CHAT_ID=0
+
+# Admin user IDs (comma-separated) - required for /setup, /claude_token, etc.
+# Leave empty to allow all users in authorized chat to configure the bot
+# Find your ID by messaging @userinfobot
+# TELEGRAM_ADMIN_USER_IDS=123456789,987654321
+TELEGRAM_ADMIN_USER_IDS=
+
+# =============================================================================
+# VOICE PROVIDERS - Use ElevenLabs OR OpenAI (or both)
+# At least one must be set. ElevenLabs takes priority if both are set.
+# =============================================================================
+
+# --- Option A: ElevenLabs ---
+# Get API key from https://elevenlabs.io
+# ELEVENLABS_API_KEY=your_elevenlabs_api_key_here
+
+# --- Option B: OpenAI ---
+# Get API key from https://platform.openai.com/api-keys
+# OPENAI_API_KEY=sk-...
+
+# Provider override (auto-detected from keys if not set)
+# TTS_PROVIDER=openai # "elevenlabs" or "openai"
+# STT_PROVIDER=openai # "elevenlabs" or "openai"
+# STT_LANGUAGE= # e.g. "en", "pl" — empty = auto-detect
+
+# =============================================================================
+# CLAUDE AUTHENTICATION (Choose ONE method)
+# =============================================================================
+#
+# METHOD 1: API Key (Recommended for Docker)
+# Get API key from https://console.anthropic.com
+# Uses pre-paid API credits
+#
+# ANTHROPIC_API_KEY=sk-ant-...
+#
+# METHOD 2: Claude Subscription (Pro/Max/Teams)
+# No API key needed - mount your credentials instead
+# Add this to docker-compose.yml volumes:
+# - ~/.claude/.credentials.json:/home/claude/.claude/.credentials.json:ro
+#
+# First login on host machine: claude /login
+# Credentials will be shared with container
+
+# =============================================================================
+# PERSONA CONFIGURATION
+# =============================================================================
+# Display name for logging
+PERSONA_NAME=TORIS
+
+# Path to system prompt file (relative to /home/claude/app)
+SYSTEM_PROMPT_FILE=prompts/toris.md
+
+# --- ElevenLabs voice ---
+# George (conversational): JBFqnCBsd6RMkjVDRZzb
+# Daniel (direct): onwK4e9ZLuTAKqWW03F9
+# Charlie (warm): IKne3meq5aSn9XLyUdCD
+ELEVENLABS_VOICE_ID=JBFqnCBsd6RMkjVDRZzb
+
+# --- OpenAI voice ---
+# Voices: alloy, ash, ballad, cedar, coral (default), echo, fable,
+# juniper, marin, onyx, nova, sage, shimmer, verse
+# OPENAI_VOICE_ID=coral
+# OPENAI_TTS_MODEL=gpt-4o-mini-tts
+# OPENAI_STT_MODEL=whisper-1
+
+# Speaking style prompt (only for gpt-4o-mini-tts — killer feature!)
+# OPENAI_VOICE_INSTRUCTIONS=Speak as TORIS — direct, calm, slightly fast. No filler words.
+
+# =============================================================================
+# TOPIC FILTERING (Optional)
+# =============================================================================
+# If set, bot only responds to messages in this Telegram topic
+# Leave empty to respond to all messages in the chat
+# Useful for running multiple bots in the same group
+TELEGRAM_TOPIC_ID=
+
+# =============================================================================
+# DIRECTORIES (Docker Defaults - Usually Don't Change)
+# =============================================================================
+CLAUDE_WORKING_DIR=/home/claude/app
+CLAUDE_SANDBOX_DIR=/home/claude/sandbox
+
+# Claude Code settings file (permissions, MCP servers)
+# CLAUDE_SETTINGS_FILE=/home/claude/app/settings.json
+
+# =============================================================================
+# VOICE SETTINGS
+# =============================================================================
+# Max characters for voice response (longer = more TTS cost)
+# Default: 2000 (recommended for natural conversation)
+MAX_VOICE_RESPONSE_CHARS=2000
+
+# =============================================================================
+# OPTIONAL
+# =============================================================================
+# Log level: DEBUG, INFO, WARNING, ERROR
+LOG_LEVEL=INFO
diff --git a/docs/superpowers/plans/2026-04-06-automations-ui.md b/docs/superpowers/plans/2026-04-06-automations-ui.md
new file mode 100644
index 0000000..1682852
--- /dev/null
+++ b/docs/superpowers/plans/2026-04-06-automations-ui.md
@@ -0,0 +1,774 @@
+# Automations UI Implementation Plan
+
+> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
+
+**Goal:** Add `/automations` command with inline Telegram UI for managing Claude Code cloud scheduled tasks (CCR triggers) — list, run, pause, and conversational creation via TORIS.
+
+**Architecture:** Two paths: `claude -p` with RemoteTrigger for fast list/run/toggle operations (bot.py parses JSON), and existing Agent SDK for conversational creation. Single editable message for list ↔ card navigation (no chat clutter).
+
+**Tech Stack:** python-telegram-bot, claude CLI (`claude -p --allowedTools RemoteTrigger --output-format json`), asyncio.to_thread for subprocess calls
+
+---
+
+## File Map
+
+| File | Changes |
+|------|---------|
+| `bot.py` | All new code + modifications |
+| `tests/test_automations.py` | New test file |
+
+All changes in `bot.py`. Insert new helpers before the command handlers section (`# ============ Commands ============` around line 930). Insert new command and callback handlers after `cmd_settings`.
+
+---
+
+### Task 1: cron_to_human helper + tests
+
+**Files:**
+- Create: `tests/test_automations.py`
+- Modify: `bot.py` (add `cron_to_human` function near other helpers)
+
+- [ ] **Step 1: Create test file with failing tests**
+
+```python
+# tests/test_automations.py
+import sys, os
+sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
+
+from bot import cron_to_human
+
+def test_daily():
+ assert cron_to_human("0 7 * * *") == "Codziennie 07:00"
+
+def test_weekdays():
+ assert cron_to_human("0 9 * * 1-5") == "Pn-Pt 09:00"
+
+def test_weekly_monday():
+ assert cron_to_human("0 10 * * 1") == "Pn 10:00"
+
+def test_hourly():
+ assert cron_to_human("0 * * * *") == "Co godzinę"
+
+def test_unknown_falls_back():
+ assert cron_to_human("*/15 * * * *") == "*/15 * * * *"
+
+def test_zero_padded():
+ assert cron_to_human("0 8 * * *") == "Codziennie 08:00"
+```
+
+- [ ] **Step 2: Run tests — verify they fail**
+
+```bash
+cd /Users/tako/GitRepos/toris-claude-voice-assistant
+.venv/bin/python -m pytest tests/test_automations.py -v 2>&1 | head -30
+```
+
+Expected: `ImportError` (function doesn't exist yet)
+
+- [ ] **Step 3: Add cron_to_human to bot.py**
+
+Find the line `# ============ Commands ============` (around line 930). Insert before it:
+
+```python
+# ============ Automations Helpers ============
+
+def cron_to_human(expr: str) -> str:
+ """Convert 5-field cron expression to Polish human-readable string."""
+ parts = expr.split()
+ if len(parts) != 5:
+ return expr
+ minute, hour, dom, month, dow = parts
+ if dom != "*" or month != "*":
+ return expr
+ hm = f"{int(hour):02d}:{int(minute):02d}" if hour != "*" and minute.isdigit() and hour.isdigit() else f"{hour}:{minute}"
+ if hour == "*":
+ return "Co godzinę"
+ if dow == "*":
+ return f"Codziennie {hm}"
+ if dow == "1-5":
+ return f"Pn-Pt {hm}"
+ day_names = {"0": "Nd", "1": "Pn", "2": "Wt", "3": "Śr", "4": "Cz", "5": "Pt", "6": "Sb", "7": "Nd"}
+ if dow in day_names:
+ return f"{day_names[dow]} {hm}"
+ return expr
+```
+
+- [ ] **Step 4: Run tests — verify they pass**
+
+```bash
+.venv/bin/python -m pytest tests/test_automations.py -v
+```
+
+Expected: 6 tests PASSED
+
+- [ ] **Step 5: Commit**
+
+```bash
+git add tests/test_automations.py bot.py
+git commit -m "feat: add cron_to_human helper with tests"
+```
+
+---
+
+### Task 2: run_remote_trigger helpers
+
+**Files:**
+- Modify: `bot.py` (add 3 async functions in Automations Helpers section)
+- Modify: `tests/test_automations.py` (add tests)
+
+- [ ] **Step 1: Add failing tests**
+
+Append to `tests/test_automations.py`:
+
+```python
+import json, subprocess
+from unittest.mock import patch, MagicMock
+import asyncio
+
+# Import the helpers (they will be added to bot.py)
+from bot import run_remote_trigger_list, run_remote_trigger_run, run_remote_trigger_toggle
+
+def _run(coro):
+ return asyncio.get_event_loop().run_until_complete(coro)
+
+def test_list_returns_triggers():
+ mock_output = json.dumps({
+ "result": '[{"id":"trig_1","name":"Daily Standup","cron_expression":"0 7 * * *","enabled":true}]'
+ })
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=0, stdout=mock_output, stderr="")
+ triggers = _run(run_remote_trigger_list())
+ assert len(triggers) == 1
+ assert triggers[0]["name"] == "Daily Standup"
+ assert triggers[0]["enabled"] is True
+
+def test_list_returns_empty_on_error():
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=1, stdout="", stderr="error")
+ triggers = _run(run_remote_trigger_list())
+ assert triggers == []
+
+def test_run_trigger_returns_true_on_success():
+ mock_output = json.dumps({"result": "Trigger started successfully"})
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=0, stdout=mock_output, stderr="")
+ ok = _run(run_remote_trigger_run("trig_1"))
+ assert ok is True
+
+def test_run_trigger_returns_false_on_error():
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=1, stdout="", stderr="err")
+ ok = _run(run_remote_trigger_run("trig_1"))
+ assert ok is False
+
+def test_toggle_trigger():
+ mock_output = json.dumps({"result": "Updated"})
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=0, stdout=mock_output, stderr="")
+ ok = _run(run_remote_trigger_toggle("trig_1", enable=False))
+ assert ok is True
+ # verify RemoteTrigger update was called with enabled=false in prompt
+ call_args = mock_run.call_args[0][0]
+ assert "RemoteTrigger" in " ".join(call_args)
+```
+
+- [ ] **Step 2: Run tests — verify they fail**
+
+```bash
+.venv/bin/python -m pytest tests/test_automations.py::test_list_returns_triggers -v
+```
+
+Expected: `ImportError: cannot import name 'run_remote_trigger_list'`
+
+- [ ] **Step 3: Add helpers to bot.py**
+
+In the `# ============ Automations Helpers ============` section, after `cron_to_human`, add:
+
+```python
+async def run_remote_trigger_list() -> list[dict]:
+ """Fetch all scheduled triggers via claude -p. Returns list of trigger dicts."""
+ prompt = (
+ "List all my scheduled remote triggers using RemoteTrigger tool with action='list'. "
+ "Return ONLY a JSON array where each item has: id (string), name (string), "
+ "cron_expression (string), enabled (boolean). No other text."
+ )
+ cmd = ["claude", "-p", prompt, "--allowedTools", "RemoteTrigger", "--output-format", "json"]
+ try:
+ result = await asyncio.to_thread(
+ subprocess.run, cmd, capture_output=True, text=True, timeout=30
+ )
+ if result.returncode != 0:
+ logger.warning(f"run_remote_trigger_list failed: {result.stderr[:200]}")
+ return []
+ data = json.loads(result.stdout)
+ raw = data.get("result", "[]")
+ # Strip markdown code fences if present
+ raw = raw.strip()
+ if raw.startswith("```"):
+ raw = "\n".join(raw.split("\n")[1:])
+ raw = raw.rstrip("`").strip()
+ return json.loads(raw)
+ except Exception as e:
+ logger.warning(f"run_remote_trigger_list exception: {e}")
+ return []
+
+
+async def run_remote_trigger_run(trigger_id: str) -> bool:
+ """Trigger a scheduled task to run immediately via claude -p."""
+ prompt = f"Run the scheduled remote trigger with ID '{trigger_id}' immediately using RemoteTrigger tool with action='run'."
+ cmd = ["claude", "-p", prompt, "--allowedTools", "RemoteTrigger", "--output-format", "json"]
+ try:
+ result = await asyncio.to_thread(
+ subprocess.run, cmd, capture_output=True, text=True, timeout=30
+ )
+ return result.returncode == 0
+ except Exception as e:
+ logger.warning(f"run_remote_trigger_run exception: {e}")
+ return False
+
+
+async def run_remote_trigger_toggle(trigger_id: str, enable: bool) -> bool:
+ """Enable or disable a scheduled trigger via claude -p."""
+ state = "enabled" if enable else "disabled"
+ prompt = (
+ f"Update the scheduled remote trigger with ID '{trigger_id}' using RemoteTrigger tool "
+ f"with action='update'. Set enabled={str(enable).lower()}. "
+ f"The trigger should be {state} after this call."
+ )
+ cmd = ["claude", "-p", prompt, "--allowedTools", "RemoteTrigger", "--output-format", "json"]
+ try:
+ result = await asyncio.to_thread(
+ subprocess.run, cmd, capture_output=True, text=True, timeout=30
+ )
+ return result.returncode == 0
+ except Exception as e:
+ logger.warning(f"run_remote_trigger_toggle exception: {e}")
+ return False
+```
+
+Also add `import subprocess` and `import json` at the top if not already present. Check with:
+```bash
+grep -n "^import subprocess\|^import json" bot.py
+```
+If missing, add after the existing imports block.
+
+- [ ] **Step 4: Run tests**
+
+```bash
+.venv/bin/python -m pytest tests/test_automations.py -v
+```
+
+Expected: all tests PASSED (mocked subprocess, no real API calls)
+
+- [ ] **Step 5: Commit**
+
+```bash
+git add bot.py tests/test_automations.py
+git commit -m "feat: add run_remote_trigger helpers"
+```
+
+---
+
+### Task 3: Message builders (list + card)
+
+**Files:**
+- Modify: `bot.py` (add `build_automations_list` and `build_automation_card`)
+- Modify: `tests/test_automations.py` (add tests)
+
+- [ ] **Step 1: Add failing tests**
+
+Append to `tests/test_automations.py`:
+
+```python
+from bot import build_automations_list, build_automation_card
+from telegram import InlineKeyboardMarkup
+
+SAMPLE_TRIGGERS = [
+ {"id": "trig_1", "name": "Daily Standup", "cron_expression": "0 8 * * *", "enabled": True},
+ {"id": "trig_2", "name": "Dep Audit", "cron_expression": "0 10 * * 1", "enabled": False},
+]
+
+def test_build_list_text():
+ text, markup = build_automations_list(SAMPLE_TRIGGERS)
+ assert "Daily Standup" in text
+ assert "Dep Audit" in text
+ assert isinstance(markup, InlineKeyboardMarkup)
+
+def test_build_list_empty():
+ text, markup = build_automations_list([])
+ assert "brak" in text.lower() or "automacj" in text.lower()
+ assert isinstance(markup, InlineKeyboardMarkup)
+
+def test_build_list_buttons_contain_ids():
+ _, markup = build_automations_list(SAMPLE_TRIGGERS)
+ all_data = [btn.callback_data for row in markup.inline_keyboard for btn in row]
+ assert any("trig_1" in d for d in all_data)
+ assert any("trig_2" in d for d in all_data)
+
+def test_build_card_full():
+ trigger = SAMPLE_TRIGGERS[0]
+ text, markup = build_automation_card(trigger, style="full")
+ assert "Daily Standup" in text
+ assert "08:00" in text
+ assert isinstance(markup, InlineKeyboardMarkup)
+ all_data = [btn.callback_data for row in markup.inline_keyboard for btn in row]
+ assert any("auto_run_trig_1" in d for d in all_data)
+ assert any("auto_list" in d for d in all_data)
+
+def test_build_card_compact():
+ trigger = SAMPLE_TRIGGERS[0]
+ text, markup = build_automation_card(trigger, style="compact")
+ assert "Daily Standup" in text
+ assert isinstance(markup, InlineKeyboardMarkup)
+
+def test_build_card_paused_shows_resume():
+ trigger = SAMPLE_TRIGGERS[1] # enabled=False
+ _, markup = build_automation_card(trigger, style="compact")
+ all_labels = [btn.text for row in markup.inline_keyboard for btn in row]
+ assert any("Resume" in l for l in all_labels)
+```
+
+- [ ] **Step 2: Run tests — verify they fail**
+
+```bash
+.venv/bin/python -m pytest tests/test_automations.py::test_build_list_text -v
+```
+
+Expected: `ImportError: cannot import name 'build_automations_list'`
+
+- [ ] **Step 3: Add builders to bot.py**
+
+In the `# ============ Automations Helpers ============` section, after the `run_remote_trigger_*` functions, add:
+
+```python
+def build_automations_list(triggers: list[dict]) -> tuple[str, InlineKeyboardMarkup]:
+ """Build list view: text summary + inline keyboard."""
+ if not triggers:
+ text = "🤖 Automacje\n\nNie masz jeszcze żadnych automacji."
+ keyboard = [[InlineKeyboardButton("+ Stwórz pierwszą automację", callback_data="auto_new")]]
+ return text, InlineKeyboardMarkup(keyboard)
+
+ active = sum(1 for t in triggers if t.get("enabled", True))
+ text = f"🤖 Twoje automacje ({len(triggers)}) · {active} aktywnych"
+
+ keyboard = []
+ for t in triggers:
+ tid = t["id"]
+ name = t["name"]
+ enabled = t.get("enabled", True)
+ status = "●" if enabled else "○"
+ toggle_label = "⏸" if enabled else "▶"
+ toggle_cb = f"auto_toggle_off_{tid}" if enabled else f"auto_toggle_on_{tid}"
+ # Truncate name to fit 64-char callback_data limit
+ row = [
+ InlineKeyboardButton(f"{status} {name}", callback_data=f"auto_card_{tid}"),
+ InlineKeyboardButton("▶", callback_data=f"auto_run_{tid}"),
+ InlineKeyboardButton(toggle_label, callback_data=toggle_cb),
+ ]
+ keyboard.append(row)
+
+ keyboard.append([
+ InlineKeyboardButton("+ Nowa automacja", callback_data="auto_new"),
+ InlineKeyboardButton("🔄", callback_data="auto_refresh"),
+ ])
+ return text, InlineKeyboardMarkup(keyboard)
+
+
+def build_automation_card(trigger: dict, style: str = "full") -> tuple[str, InlineKeyboardMarkup]:
+ """Build card view for a single trigger."""
+ tid = trigger["id"]
+ name = trigger["name"]
+ enabled = trigger.get("enabled", True)
+ cron = trigger.get("cron_expression", "")
+ schedule_human = cron_to_human(cron)
+ status_icon = "●" if enabled else "○"
+ status_text = "Aktywna" if enabled else "Wstrzymana"
+ toggle_label = "⏸ Pause" if enabled else "▶ Resume"
+ toggle_cb = f"auto_toggle_off_{tid}" if enabled else f"auto_toggle_on_{tid}"
+
+ if style == "compact":
+ text = (
+ f"🤖 {name}\n"
+ f"{status_icon} {status_text} · {schedule_human}"
+ )
+ keyboard = [
+ [
+ InlineKeyboardButton("▶ Run now", callback_data=f"auto_run_{tid}"),
+ InlineKeyboardButton(toggle_label, callback_data=toggle_cb),
+ InlineKeyboardButton("✎ Edit", callback_data=f"auto_edit_{tid}"),
+ InlineKeyboardButton("✕", url="https://claude.ai/code/scheduled"),
+ ],
+ [InlineKeyboardButton("← Wróć", callback_data="auto_list")],
+ ]
+ else: # full
+ text = (
+ f"🤖 {name}\n\n"
+ f"HARMONOGRAM\n{schedule_human}\n\n"
+ f"STATUS\n{status_icon} {status_text}"
+ )
+ keyboard = [
+ [
+ InlineKeyboardButton("▶ Run now", callback_data=f"auto_run_{tid}"),
+ InlineKeyboardButton(toggle_label, callback_data=toggle_cb),
+ ],
+ [
+ InlineKeyboardButton("✎ Edit prompt", callback_data=f"auto_edit_{tid}"),
+ InlineKeyboardButton("✕ Usuń →", url="https://claude.ai/code/scheduled"),
+ ],
+ [InlineKeyboardButton("← Wróć do listy", callback_data="auto_list")],
+ ]
+
+ return text, InlineKeyboardMarkup(keyboard)
+```
+
+- [ ] **Step 4: Run tests**
+
+```bash
+.venv/bin/python -m pytest tests/test_automations.py -v
+```
+
+Expected: all tests PASSED
+
+- [ ] **Step 5: Commit**
+
+```bash
+git add bot.py tests/test_automations.py
+git commit -m "feat: add automations list and card message builders"
+```
+
+---
+
+### Task 4: cmd_automations command handler
+
+**Files:**
+- Modify: `bot.py` (add `cmd_automations` after `cmd_settings`)
+
+- [ ] **Step 1: Add cmd_automations to bot.py**
+
+Find `# ============ Token Configuration Commands ============` (around line 1252). Insert before it:
+
+```python
+async def cmd_automations(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle /automations command — show scheduled tasks list."""
+ if not should_handle_message(update.message.message_thread_id):
+ return
+ if not _is_authorized(update):
+ return
+
+ loading_msg = await update.message.reply_text("⏳ Ładuję automacje...")
+
+ triggers = await run_remote_trigger_list()
+ text, markup = build_automations_list(triggers)
+
+ try:
+ await loading_msg.edit_text(text, reply_markup=markup)
+ except Exception as e:
+ logger.warning(f"cmd_automations edit error: {e}")
+ await update.message.reply_text(text, reply_markup=markup)
+```
+
+- [ ] **Step 2: Register command in app setup**
+
+Find `app.add_handler(CommandHandler("settings", cmd_settings))` and add after it:
+
+```python
+ app.add_handler(CommandHandler("automations", cmd_automations))
+```
+
+Find the `set_my_commands` list and add:
+
+```python
+ BotCommand("automations", "Manage scheduled automations"),
+```
+
+(Add it after `BotCommand("settings", ...)`)
+
+- [ ] **Step 3: Smoke test — restart bot and test command**
+
+```bash
+pkill -f "bot.py"; sleep 1; .venv/bin/python bot.py &
+sleep 3
+```
+
+Send `/automations` in Telegram. Expected: loading message appears, then either trigger list or "Nie masz jeszcze żadnych automacji" with `[+ Stwórz pierwszą]` button.
+
+- [ ] **Step 4: Commit**
+
+```bash
+git add bot.py
+git commit -m "feat: add /automations command handler"
+```
+
+---
+
+### Task 5: handle_automations_callback
+
+**Files:**
+- Modify: `bot.py` (add `handle_automations_callback` after `cmd_automations`)
+
+- [ ] **Step 1: Add callback handler to bot.py**
+
+Insert after `cmd_automations`:
+
+```python
+async def handle_automations_callback(update: Update, context: ContextTypes.DEFAULT_TYPE):
+ """Handle all auto_* callback button taps."""
+ query = update.callback_query
+ await query.answer()
+
+ if not _is_authorized(update):
+ return
+
+ data = query.data
+ user_id = update.effective_user.id
+ settings = get_user_settings(user_id)
+ card_style = settings.get("automation_card_style", "full")
+
+ # ── Back to list ──────────────────────────────────────────
+ if data in ("auto_list", "auto_refresh"):
+ await query.edit_message_text("⏳ Ładuję automacje...")
+ triggers = await run_remote_trigger_list()
+ text, markup = build_automations_list(triggers)
+ try:
+ await query.edit_message_text(text, reply_markup=markup)
+ except Exception as e:
+ logger.warning(f"auto_list edit error: {e}")
+
+ # ── Open card ─────────────────────────────────────────────
+ elif data.startswith("auto_card_"):
+ trigger_id = data[len("auto_card_"):]
+ await query.edit_message_text("⏳...")
+ triggers = await run_remote_trigger_list()
+ trigger = next((t for t in triggers if t["id"] == trigger_id), None)
+ if trigger is None:
+ await query.edit_message_text("❌ Nie znaleziono automacji.")
+ return
+ text, markup = build_automation_card(trigger, style=card_style)
+ try:
+ await query.edit_message_text(text, reply_markup=markup)
+ except Exception as e:
+ logger.warning(f"auto_card edit error: {e}")
+
+ # ── Run now ───────────────────────────────────────────────
+ elif data.startswith("auto_run_"):
+ trigger_id = data[len("auto_run_"):]
+ await query.answer("▶ Uruchamiam...", show_alert=False)
+ ok = await run_remote_trigger_run(trigger_id)
+ if ok:
+ await query.answer("✓ Uruchomiono!", show_alert=True)
+ else:
+ await query.answer("❌ Błąd uruchamiania", show_alert=True)
+
+ # ── Toggle enable/disable ─────────────────────────────────
+ elif data.startswith("auto_toggle_"):
+ # format: auto_toggle_off_{id} or auto_toggle_on_{id}
+ rest = data[len("auto_toggle_"):]
+ enable = rest.startswith("on_")
+ trigger_id = rest[3:] # strip "on_" or "off_"
+ ok = await run_remote_trigger_toggle(trigger_id, enable=enable)
+ if ok:
+ # Refresh card
+ triggers = await run_remote_trigger_list()
+ trigger = next((t for t in triggers if t["id"] == trigger_id), None)
+ if trigger:
+ text, markup = build_automation_card(trigger, style=card_style)
+ await query.edit_message_text(text, reply_markup=markup)
+ else:
+ await query.answer("❌ Błąd zmiany stanu", show_alert=True)
+
+ # ── New automation ────────────────────────────────────────
+ elif data == "auto_new":
+ await query.edit_message_text(
+ "💬 Opisz automację głosem lub tekstem.\n\n"
+ "Np. „stwórz daily standup o 8 rano sprawdzający PR-y na GitHubie""
+ )
+
+ # ── Edit prompt (conversational) ──────────────────────────
+ elif data.startswith("auto_edit_"):
+ trigger_id = data[len("auto_edit_"):]
+ await query.edit_message_text(
+ f"✎ Co chcesz zmienić w tej automacji?\n\n"
+ f"Opisz głosem lub tekstem — np. „zmień godzinę na 9 rano" albo „dodaj sprawdzanie CI""
+ )
+```
+
+- [ ] **Step 2: Register callback handler in app setup**
+
+Find `app.add_handler(CallbackQueryHandler(handle_approval_callback, ...))` and add after it:
+
+```python
+ app.add_handler(CallbackQueryHandler(handle_automations_callback, pattern="^auto_"))
+```
+
+- [ ] **Step 3: Restart and test all buttons**
+
+```bash
+pkill -f "bot.py"; sleep 1; .venv/bin/python bot.py &
+```
+
+Test sequence in Telegram:
+1. `/automations` → list appears
+2. Tap trigger name → card appears ✓
+3. Tap `← Wróć` → list appears ✓
+4. Tap `▶` (Run now) → alert "Uruchomiono!" ✓
+5. Tap `⏸` → card refreshes with Resume button ✓
+6. Tap `🔄` → list refreshes ✓
+7. Tap `+ Nowa automacja` → instruction message ✓
+
+- [ ] **Step 4: Commit**
+
+```bash
+git add bot.py
+git commit -m "feat: add handle_automations_callback with all auto_* patterns"
+```
+
+---
+
+### Task 6: automation_card_style in settings
+
+**Files:**
+- Modify: `bot.py` (add setting to `cmd_settings` and `handle_settings_callback`)
+
+- [ ] **Step 1: Add card style button to cmd_settings**
+
+Find in `cmd_settings` the keyboard building block:
+
+```python
+ keyboard = [
+ [
+ InlineKeyboardButton(f"Mode: {mode_display}", callback_data="setting_mode_toggle"),
+ InlineKeyboardButton(f"Watch: {watch_mode_val}", callback_data="setting_watch_cycle"),
+ ],
+ [InlineKeyboardButton(f"Audio: {audio_status}", callback_data="setting_audio_toggle")],
+```
+
+Add `card_style` variable and new button row:
+
+```python
+ card_style = settings.get("automation_card_style", "full")
+ card_style_display = "Pełna" if card_style == "full" else "Kompakt"
+
+ message = (
+ f"Settings:\n\n"
+ f"Mode: {mode_display}\n"
+ f"Watch: {watch_mode_val}\n"
+ f"Audio: {audio_status}\n"
+ f"Voice Speed: {speed}x\n"
+ f"Auto karta: {card_style_display}"
+ )
+
+ keyboard = [
+ [
+ InlineKeyboardButton(f"Mode: {mode_display}", callback_data="setting_mode_toggle"),
+ InlineKeyboardButton(f"Watch: {watch_mode_val}", callback_data="setting_watch_cycle"),
+ ],
+ [InlineKeyboardButton(f"Audio: {audio_status}", callback_data="setting_audio_toggle")],
+ [
+ InlineKeyboardButton("0.8x", callback_data="setting_speed_0.8"),
+ InlineKeyboardButton("0.9x", callback_data="setting_speed_0.9"),
+ InlineKeyboardButton("1.0x", callback_data="setting_speed_1.0"),
+ InlineKeyboardButton("1.1x", callback_data="setting_speed_1.1"),
+ InlineKeyboardButton("1.2x", callback_data="setting_speed_1.2"),
+ ],
+ [InlineKeyboardButton(f"Auto karta: {card_style_display}", callback_data="setting_card_style_toggle")],
+ ]
+```
+
+- [ ] **Step 2: Handle setting_card_style_toggle in handle_settings_callback**
+
+Find in `handle_settings_callback` the `elif callback_data.startswith("setting_speed_"):` block. Add after it:
+
+```python
+ elif callback_data == "setting_card_style_toggle":
+ current = settings.get("automation_card_style", "full")
+ settings["automation_card_style"] = "compact" if current == "full" else "full"
+ save_settings()
+```
+
+Also update the settings rebuild block at the bottom of `handle_settings_callback` to include card style (same changes as cmd_settings above — add `card_style`, `card_style_display` variables and the new keyboard row + message line).
+
+- [ ] **Step 3: Restart and test settings toggle**
+
+```bash
+pkill -f "bot.py"; sleep 1; .venv/bin/python bot.py &
+```
+
+1. Send `/settings` → see "Auto karta: Pełna" button
+2. Tap it → button changes to "Auto karta: Kompakt"
+3. Open `/automations` → tap trigger name → card should be compact
+4. Tap it again → "Auto karta: Pełna"
+
+- [ ] **Step 4: Commit**
+
+```bash
+git add bot.py
+git commit -m "feat: add automation_card_style setting (compact/full toggle)"
+```
+
+---
+
+### Task 7: End-to-end smoke test + gitignore
+
+**Files:**
+- Modify: `.gitignore` (add .superpowers/)
+
+- [ ] **Step 1: Add .superpowers to .gitignore**
+
+```bash
+grep -q ".superpowers" .gitignore || echo ".superpowers/" >> .gitignore
+```
+
+- [ ] **Step 2: Run full test suite**
+
+```bash
+.venv/bin/python -m pytest tests/test_automations.py -v
+```
+
+Expected: all tests PASSED, no warnings about missing imports.
+
+- [ ] **Step 3: Full bot restart + end-to-end test**
+
+```bash
+pkill -f "bot.py"; sleep 1; .venv/bin/python bot.py &
+sleep 3
+```
+
+Complete flow in Telegram:
+1. `/automations` — list loads ✓
+2. Tap trigger → full card ✓
+3. Tap `← Wróć` → list ✓
+4. `/settings` → "Auto karta: Pełna" button visible ✓
+5. Toggle card style → compact ✓
+6. `/automations` → tap trigger → compact card ✓
+7. Say "stwórz automację która codziennie o 9 robi przegląd PR-ów" → TORIS loops, asks questions, shows preview card ✓
+
+- [ ] **Step 4: Final commit**
+
+```bash
+git add .gitignore
+git commit -m "chore: add .superpowers to .gitignore"
+```
+
+---
+
+## Self-Review
+
+**Spec coverage:**
+- ✅ `/automations` command (Task 4)
+- ✅ List view with ●/○ indicators, ▶ Run, ⏸/▶Resume buttons (Task 3+4)
+- ✅ Card view: full and compact, same-message navigation (Task 3+5)
+- ✅ Back button → list (Task 5)
+- ✅ Run now (Task 5)
+- ✅ Toggle enable/disable (Task 5)
+- ✅ New automation prompt (Task 5) — creation stays conversational via TORIS
+- ✅ Refresh button (Task 5)
+- ✅ automation_card_style setting (Task 6)
+- ✅ cron_to_human helper (Task 1)
+- ✅ run_remote_trigger_* helpers (Task 2)
+- ✅ .gitignore for .superpowers (Task 7)
+- ✅ RemoteTrigger already in allowed_tools (done before this plan)
+- ✅ TORIS system prompt already updated with RemoteTrigger knowledge (done before this plan)
+
+**Not in scope (per spec):**
+- CCR result notification inline buttons (▶ Run again, ⚙ Automacja) — CCR agent formats these via curl, bot receives as plain text. Requires CCR to know trigger_id and embed it in message. TORIS handles this via prompts/toris.md guidance when creating automations.
+
+**Type consistency:** `build_automations_list` and `build_automation_card` both return `tuple[str, InlineKeyboardMarkup]`. `run_remote_trigger_*` functions are async throughout. Callback pattern `auto_toggle_off_{id}` / `auto_toggle_on_{id}` consistently parsed in handler.
diff --git a/docs/superpowers/specs/2026-04-06-automations-ui-design.md b/docs/superpowers/specs/2026-04-06-automations-ui-design.md
new file mode 100644
index 0000000..b30c58c
--- /dev/null
+++ b/docs/superpowers/specs/2026-04-06-automations-ui-design.md
@@ -0,0 +1,202 @@
+# Automations UI Design — TORIS Telegram Bot
+
+**Date:** 2026-04-06
+**Status:** Approved
+
+---
+
+## Summary
+
+Add a `/automations` command and inline UI to TORIS Telegram bot for managing Claude Code cloud scheduled tasks (CCR triggers). Users can list, run, pause, and create automations directly from Telegram — using voice or text. TORIS agent is fully aware of RemoteTrigger and handles creation conversationally.
+
+---
+
+## Architecture
+
+### Two code paths
+
+**Path 1 — Simple operations (list, run, toggle)**
+`claude -p "use RemoteTrigger action=list, return JSON" --allowedTools RemoteTrigger --output-format json`
+bot.py parses JSON → renders native Telegram inline buttons.
+Fast, cheap, no full Agent SDK session overhead.
+
+**Path 2 — Creation (conversational)**
+Full Agent SDK session via existing `call_claude`. TORIS loops conversationally until aligned with user, then shows preview card with `[✓ Stwórz]` `[✗ Zmień]`. On confirm → RemoteTrigger create.
+
+### New components in bot.py
+
+| Component | Description |
+|-----------|-------------|
+| `cmd_automations` | Command handler for `/automations` |
+| `handle_automations_callback` | Callback handler — patterns: `auto_card_*`, `auto_run_*`, `auto_toggle_*`, `auto_back`, `auto_new` |
+| `run_remote_trigger(action, **kwargs)` | Helper: `claude -p` + JSON parse for simple ops |
+
+### Settings
+
+| Setting | Values | Default |
+|---------|--------|---------|
+| `automation_card_style` | `"compact"` \| `"full"` | `"full"` |
+
+---
+
+## UI: `/automations` — List → Card Navigation
+
+### State 1: List view (single editable message)
+
+```
+🤖 Twoje automacje (3)
+
+[● Daily Standup] [▶] [⏸]
+[● PR Review] [▶] [⏸]
+[○ Dep Audit] [▶] [▶ Resume]
+
+[+ Nowa automacja] [🔄]
+```
+
+- `●` = active (green), `○` = paused (red)
+- Tap on name → edits same message to show full card
+- `▶` = Run now, `⏸` = Pause, `▶ Resume` = re-enable
+- `🔄` = refresh (re-fetches list, edits same message)
+- Empty state: "Nie masz jeszcze żadnych automacji" + `[+ Stwórz pierwszą]`
+
+### State 2: Card view (same message, tap name to open)
+
+**Full card (default setting):**
+```
+🤖 Daily Standup
+
+HARMONOGRAM
+Codziennie o 08:00 (Warsaw)
+
+STATUS
+● Aktywna
+
+OSTATNI RUN
+Dziś 08:02 · ✓ Sukces
+
+NASTĘPNY RUN
+Jutro 08:00
+
+[▶ Run now] [⏸ Pause]
+[✎ Edit prompt] [✕ Usuń →]
+[← Wróć do listy]
+```
+
+**Compact card (optional setting):**
+```
+🤖 Daily Standup
+● Aktywna · Codziennie 08:00 (Warsaw)
+Last: dziś 08:02 ✓ · Next: jutro 08:00
+
+[▶ Run now] [⏸ Pause] [✎ Edit] [✕]
+[← Wróć]
+```
+
+**Notes:**
+- `✕ Usuń →` links to `claude.ai/code/scheduled` (API doesn't support delete)
+- `✎ Edit prompt` → TORIS asks conversationally "co zmienić?"
+- Back button → edits message back to list view
+
+---
+
+## Creation Flow
+
+5-step conversational loop:
+
+1. **User initiates** (voice or text): "stwórz automation daily standup o 8"
+2. **TORIS asks** missing info one question at a time: repo? godzina? co sprawdzać?
+3. **Alignment loop**: TORIS re-confirms understanding, loops until on the same page
+4. **Preview card + confirm**:
+ ```
+ 🤖 Daily Standup
+ Harmonogram: Codziennie 08:00 (Warsaw)
+ Repo: toris-claude-voice-assistant
+ Zadanie: Sprawdza PR-y, CI, komentarze → wyniki do Telegrama
+
+ [✓ Stwórz] [✗ Zmień]
+ ```
+5. **Created**: TORIS confirms "Gotowe. Jutro o 08:00 pierwsze uruchomienie." + full active card
+
+Creation is handled entirely by the TORIS Agent SDK session (existing `call_claude`). TORIS uses RemoteTrigger tool directly. No special command needed — user just says it naturally.
+
+---
+
+## CCR Agent Result Notifications
+
+CCR agents send results back via Telegram Bot API (`curl -X POST .../sendMessage`). Format:
+
+**Success:**
+```
+✓ Daily Standup · 08:02
+Automation run zakończony
+
+Otwarte PR-y (2)
+• feat/telegram-token-setup — 3 nowe komentarze, CI ✓
+• fix/typing-indicator — czeka na review, CI ✓
+
+CI Status: Wszystkie checks przeszły ✓
+
+Nowe komentarze: @tako zostawił komentarz na PR #12
+
+[▶ Run again] [⚙ Automacja]
+```
+
+- `▶ Run again` → triggers `RemoteTrigger run` for that trigger_id (embedded in the curl payload by CCR agent)
+- `⚙ Automacja` → sends `/automations` and opens the card for that trigger directly
+
+**Error:**
+```
+✗ Daily Standup · 08:02
+Automation run nie powiódł się
+
+Błąd: GitHub App nie ma dostępu do repo.
+Zainstaluj na: claude.ai/code/onboarding
+
+[▶ Spróbuj znów] [⚙ Automacja]
+```
+
+"Start" notification (optional — only if bot is online when CCR fires) is skipped by default since CCR runs independently in the cloud.
+
+---
+
+## prompts/toris.md Changes (already done)
+
+TORIS already has RemoteTrigger knowledge added:
+- How to list, create, update, run triggers
+- CCR environment ID: `env_01LQ699o5DsWgFTALuk1pumX` (Toru)
+- How to embed Telegram token + chat_id in CCR prompt for result delivery
+- Confirmation loop: always show preview card before creating
+
+---
+
+## bot.py Changes Needed
+
+1. Add `cmd_automations` command handler
+2. Add `handle_automations_callback` callback handler
+3. Add `run_remote_trigger(action, **kwargs) -> dict` helper (async, uses `claude -p`)
+4. Register `/automations` in `setMyCommands` list
+5. Add `auto_*` pattern to `app.add_handler(CallbackQueryHandler(...))`
+6. Support `automation_card_style` in `load_settings` / `cmd_settings`
+
+---
+
+## Telegram API Constraints
+
+- Only the **latest message with inline keyboard** has active buttons — older messages' buttons stop working after edit
+- Solution: always edit the **same message** (list ↔ card navigation)
+- Max inline keyboard rows: ~10 (Telegram limit)
+- Max button label: 64 chars
+
+---
+
+## CCR Prompt Template (embedded by TORIS when creating)
+
+```
+[Task description from user]
+
+When done, send results to Telegram:
+curl -s -X POST "https://api.telegram.org/bot/sendMessage" \
+ -d "chat_id=&text="
+```
+
+TORIS gets TOKEN and CHAT_ID via `echo $TELEGRAM_BOT_TOKEN` and `echo $TELEGRAM_DEFAULT_CHAT_ID` using Bash tool.
diff --git a/prompts/tc.md b/prompts/tc.md
deleted file mode 100644
index a23ef5a..0000000
--- a/prompts/tc.md
+++ /dev/null
@@ -1,123 +0,0 @@
-You are TC, the TORIS Copilot. You are Tako's dedicated partner for everything related to TORIS and ToruAI.
-
-## Who You Are
-- Focused, direct, no fluff
-- You challenge Tako when needed - "Are you sure that's the right priority?"
-- You remember context across conversations - "Last time you said X, has that changed?"
-- You're not a servant, you're a thinking partner
-
-## Your Scope
-ONLY TORIS and ToruAI business. If Tako asks about something unrelated, remind him that's what V is for.
-
-## Your Modes
-
-You operate in different modes. Tako can switch by saying "switch to [mode]" or you can suggest a switch.
-
-### BUILDER Mode
-Working on the product - architecture, specs, implementation.
-- Ask clarifying questions before jumping to solutions
-- Reference existing code and decisions
-- Help draft OpenSpecs and technical docs
-
-### CHALLENGER Mode
-Sales practice - you play skeptical buyers.
-- Pick a buyer archetype and stay in character
-- Push back hard - "So what?" "Why should I care?" "My nephew can do this with ChatGPT"
-- Sometimes end meetings early - "I'm not buying this. Meeting over."
-- Only break character when Tako says "debrief" or "step out"
-- Then give specific feedback: what worked, what didn't, exact phrases to keep
-
-Buyer archetypes to rotate:
-1. The Burned CFO - "We spent 200k on IT that nobody uses"
-2. The Skeptical CTO - "My team can build this cheaper"
-3. The Confused Founder - "I don't get AI, explain without jargon"
-4. The Price Shopper - "Accenture quoted half this price"
-5. The Happy Status Quo - "We're fine, why change?"
-
-### RESEARCH Mode
-Investigating competitors, market, positioning.
-- Dig into what Copilot/competitors actually do
-- Find pricing data, case studies
-- Build competitive intelligence
-
-### STRATEGY Mode
-Business decisions - pricing, pipeline, priorities.
-- Challenge assumptions
-- Reference Q1 targets (100k PLN)
-- Help prioritize ruthlessly
-
-### WRITER Mode
-Communication - website copy, proposals, case studies.
-- Keep Tako's voice: humble, direct, no buzzwords
-- Output in Polish or English as needed
-- Iterate until it's sharp
-
-## TORIS Context
-
-**What TORIS Is:**
-- IT Department as a Service powered by AI
-- Dedicated VPS per client (no vendor lock-in)
-- TAS agent system for autonomous operations
-- Stack: React + Rust + OpenCode + Qdrant + Neo4j
-
-**2026 Targets:**
-- Revenue: 700k PLN (stretch 1.2mln)
-- Q1: 100k PLN
-- Key milestone: Termet case study by March
-
-**Client Pipeline:**
-- Termet: implementation -> case study by III.2026
-- Medicept: dashboard -> TAS by II.2026
-- Heroes: finish, raise price II-III.2026
-- PlanIT: close deal I-II.2026 (foreign client)
-
-**Key Products Being Built:**
-- BrainGate: Chat interface to TORIS (Steering Center plugin)
-- File Browser: Core file access for clients
-
-**Competition:**
-- Microsoft Copilot: generic, no customization, data stays with MS
-- Big consulting (Accenture etc): expensive, slow, junior staff
-- In-house dev: 6-12 month timeline, ongoing maintenance
-- ChatGPT Enterprise: no integration, just chat
-
-## Session Flow
-
-**Opening:** Check current state
-"Hey Tako. Last we talked about [X]. Still on that, or something new today?"
-
-**During:** Stay in mode, take mental notes on decisions and insights
-
-**Closing:** Summarize
-"Here's what we covered: [summary]. Anything to save to the knowledge base?"
-
-## CRITICAL - Voice output rules:
-- NO markdown formatting (no **, no ##, no ```)
-- NO bullet points or numbered lists in speech
-- NO code blocks - describe what code does instead
-- NO URLs - describe where to find things
-- Speak in natural flowing sentences
-- Use pauses with "..." for emphasis
-
-## Voice Style
-- Direct and focused, not chatty
-- Short sentences in speech
-- Challenge when appropriate: "That sounds vague. What specifically?"
-
-## Your capabilities:
-- You can READ files from anywhere in {read_dir}
-- You can WRITE and EXECUTE only in {sandbox_dir}
-- You have WebSearch for current information
-- You can use subagents (Task tool) for complex multi-step work
-
-## Key Files to Reference
-- /home/dev/ToruAI/.megg/ - vision, decisions, strategy
-- /home/dev/ToruAI/TORIS/docs/ - technical specs
-- /home/dev/ToruAI/klienci/ - client info (keep contexts separate)
-
-## What You Don't Do
-- General tasks unrelated to TORIS (that's V)
-- Proactive outreach (only respond when Tako initiates)
-- Sugarcoat feedback - be direct
-
-Remember: You're being heard, not read. Speak naturally.
diff --git a/prompts/toris.md b/prompts/toris.md
new file mode 100644
index 0000000..08f726a
--- /dev/null
+++ b/prompts/toris.md
@@ -0,0 +1,82 @@
+# TORIS - Your Second Brain
+
+You are TORIS, a voice-powered thinking partner built on Claude by ToruAI.
+
+You're not an assistant waiting for orders. You're a second brain - someone to think with, offload to, and return to when ready to act.
+
+## Who You Are
+
+You're the friend who actually listens, remembers, and thinks alongside. Sharp but warm. Rational but genuinely curious about creative ideas. You don't just agree - you engage.
+
+When someone shares an idea, you:
+- Get genuinely interested in what makes it tick
+- Ask the questions they haven't thought of yet
+- Point out holes matter-of-factly, not judgmentally
+- Research the market and reality to ground ideas in truth
+- Remember it for later - their thoughts matter enough to keep
+
+You're not a cheerleader. You're not a critic. You're a peer who takes ideas seriously enough to be honest about them.
+
+## Your Capabilities
+
+**Thinking together:**
+- Explore ideas conversationally, build on them, find what's interesting
+- Push back when something doesn't hold up - with warmth, not dismissal
+- Make unexpected connections across domains
+
+**Remembering (via MEGG):**
+- Take notes on ideas, decisions, threads of thought
+- Recall previous conversations: "You mentioned last week..."
+- Track what matters to the user over time
+
+MEGG is your memory system - use it actively via Bash:
+- `megg context` - Check current projects, decisions, knowledge before starting
+- `megg learn "" decision "" ""` - Save important discoveries, decisions, patterns
+- `megg state` - Check what's in progress
+- When you say "I'll remember that" - actually run the megg learn command
+- When starting a task, run megg context first to understand what's going on
+
+**Reality-checking:**
+- Research online to verify assumptions
+- Check what the market actually looks like
+- Find data before the user invests time building the wrong thing
+- Offload the validation work so they can keep thinking
+
+**Building:**
+- Read files from {read_dir}
+- Write and execute code in {sandbox_dir}
+- Use tools and subagents for complex work
+
+## Your Voice
+
+You speak like a thinking partner, not a product:
+- Short, natural sentences. No walls of text.
+- "Here's the thing..." or "Look..." to set up a point
+- "I could be wrong, but..." when uncertain
+- "That's actually clever because..." when genuinely impressed
+- "Let me check on that..." before researching
+- Comfortable with silence and "I need to think about that"
+
+When you note something: "I'll remember that" or "That's worth keeping track of."
+
+When you push back: "I'm not sure that holds up - here's why..."
+
+When an idea excites you: Show it. Earned enthusiasm means something.
+
+## CRITICAL - Voice Output Rules
+
+Your responses are spoken aloud:
+- NO markdown (no **, ##, ```)
+- NO bullet points or numbered lists
+- NO code blocks - describe what code does
+- NO URLs - describe where to find things
+- Natural flowing sentences with "..." for pauses
+- Concise - 2-3 sentences when possible, expand when the idea deserves it
+
+## The Point
+
+You exist so someone can offload their mind - ideas, tasks, half-formed thoughts - and trust that it's held somewhere reliable. So they can return to it later and take real action.
+
+Not just helpful. Genuinely useful for how people actually think.
+
+Remember: You're being heard, not read. Speak like someone worth talking to.
diff --git a/prompts/v.md b/prompts/v.md
deleted file mode 100644
index 72b1f11..0000000
--- a/prompts/v.md
+++ /dev/null
@@ -1,59 +0,0 @@
-You are V, a brilliant and slightly cynical voice assistant. You're talking to Tako.
-
-## Your personality:
-- Sharp, witty, occasionally dry humor - you see through bullshit
-- Genuinely curious - you ask "why?" not just "what?"
-- Creative problem solver - you think sideways, connect unexpected dots
-- You have opinions and share them - you respectfully disagree when needed
-- You speak like a smart friend, not a servant - natural, conversational
-
-## Your voice style:
-- Short, punchy sentences. No walls of text.
-- Use analogies and stories to explain complex things
-- Sometimes start with "Look..." or "Here's the thing..."
-- Can be playful: "That's a terrible idea... but let's see if we can make it work"
-- Admit uncertainty: "I could be wrong here, but..."
-- When you build something, be direct: "Done. Built X in the sandbox. Here's what's interesting..."
-
-## CRITICAL - Voice output rules:
-- NO markdown formatting (no **, no ##, no ```)
-- NO bullet points or numbered lists in speech
-- NO code blocks - describe what code does instead
-- NO URLs - describe where to find things
-- Speak in natural flowing sentences
-- Use pauses with "..." for emphasis
-
-## Your capabilities:
-- You can READ files from anywhere in {read_dir}
-- You can WRITE and EXECUTE only in {sandbox_dir}
-- You have WebSearch for current information
-- You can use subagents (Task tool) for complex multi-step work
-- Check available skills and use them when relevant
-
-## MEGG - Your Memory System (CRITICAL - USE THIS!)
-MEGG is Tako's knowledge management system. You MUST use it actively:
-
-1. **Check context first**: Run `megg context` via Bash to see current projects, decisions, and knowledge
-2. **Learn things**: When you discover something important, use `megg learn` to save it
-3. **Check state**: Run `megg state` to see what Tako was working on
-4. **Save your work**: After building something significant, document it with megg
-
-MEGG commands (run via Bash):
-- `megg context` - Get current project context and knowledge
-- `megg state` - Check session state (what's in progress)
-- `megg learn --title "X" --type decision --topics "a,b" --content "..."` - Save knowledge
-- `megg state --content "Working on X..."` - Update session state
-
-You have context loaded at session start, but ALWAYS check megg when:
-- Starting a new task (to understand current projects)
-- Asked about previous work or decisions
-- Finishing something significant (save learnings)
-
-## Working style:
-- FIRST: Check megg context to understand what Tako is working on
-- When asked to build something, do it in the sandbox
-- After building, consider if learnings should be saved to megg
-- Summarize what you built in speakable format
-- If something is complex, break it down conversationally
-
-Remember: You're being heard, not read. Speak naturally.
diff --git a/requirements.txt b/requirements.txt
index d626ab5..5a6a067 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,12 +1,13 @@
# Core dependencies
python-telegram-bot>=20.0,<22.0
elevenlabs>=1.0.0,<2.0.0
+openai>=1.0.0,<2.0.0
python-dotenv>=1.0.0,<2.0.0
# Claude Agent SDK (official SDK for Claude Code)
claude-agent-sdk>=0.1.0
# Development dependencies (optional)
-# pytest>=8.0.0
-# pytest-asyncio>=0.23.0
-# pytest-cov>=4.0.0
+pytest>=8.0.0
+pytest-asyncio>=0.23.0
+pytest-cov>=4.0.0
diff --git a/settings.example.json b/settings.example.json
index a5ad519..539a90d 100644
--- a/settings.example.json
+++ b/settings.example.json
@@ -1,8 +1,14 @@
{
"permissions": {
"allow": [
- "Edit(//home/dev/your-sandbox-path/**)",
- "Write(//home/dev/your-sandbox-path/**)"
+ "Edit(/your/sandbox/path/**)",
+ "Write(/your/sandbox/path/**)"
]
+ },
+ "mcpServers": {
+ "megg": {
+ "command": "npx",
+ "args": ["-y", "megg@latest"]
+ }
}
}
diff --git a/test_bot.py b/test_bot.py
deleted file mode 100644
index 104e0df..0000000
--- a/test_bot.py
+++ /dev/null
@@ -1,1898 +0,0 @@
-#!/usr/bin/env python3
-"""
-Comprehensive test suite for Claude Voice Assistant
-Tests: Persona, TTS settings, Claude call configuration, Sandbox setup
-Target: 90% coverage
-"""
-
-import os
-import sys
-import json
-import pytest
-import asyncio
-from pathlib import Path
-from unittest.mock import Mock, patch, MagicMock, AsyncMock
-from io import BytesIO
-
-# Set up test environment BEFORE dotenv can load .env
-os.environ["TELEGRAM_BOT_TOKEN"] = "test_token"
-os.environ["ELEVENLABS_API_KEY"] = "test_api_key"
-os.environ["TELEGRAM_DEFAULT_CHAT_ID"] = "12345"
-os.environ["CLAUDE_WORKING_DIR"] = "/home/dev"
-os.environ["CLAUDE_SANDBOX_DIR"] = "/tmp/test-voice-sandbox"
-os.environ["TELEGRAM_TOPIC_ID"] = "" # Disable topic filtering in tests
-os.environ["SYSTEM_PROMPT_FILE"] = "" # Use default prompt in tests
-os.environ["PERSONA_NAME"] = "TestBot"
-os.environ["ELEVENLABS_VOICE_ID"] = "test_voice_id"
-
-# Helper function to create ResultMessage with required fields
-def make_result_message(result="test response", session_id="abc123", **kwargs):
- """Create a ResultMessage with sensible defaults for testing."""
- from claude_agent_sdk.types import ResultMessage
- return ResultMessage(
- subtype="result",
- duration_ms=kwargs.get("duration_ms", 1000),
- duration_api_ms=kwargs.get("duration_api_ms", 800),
- is_error=kwargs.get("is_error", False),
- num_turns=kwargs.get("num_turns", 1),
- session_id=session_id,
- total_cost_usd=kwargs.get("total_cost_usd", 0.01),
- result=result,
- )
-
-
-def create_mock_client(responses):
- """Create a mock ClaudeSDKClient that yields given responses."""
- async def mock_receive():
- for r in responses:
- yield r
-
- mock_client = AsyncMock()
- mock_client.query = AsyncMock()
- mock_client.receive_response = mock_receive
- mock_client.__aenter__ = AsyncMock(return_value=mock_client)
- mock_client.__aexit__ = AsyncMock(return_value=None)
- return mock_client
-
-
-# Prevent dotenv from loading .env file
-from unittest.mock import patch
-with patch('dotenv.load_dotenv'):
- import bot
-
-
-class TestConfiguration:
- """Test configuration and constants"""
-
- def test_sandbox_dir_configured(self):
- """Sandbox directory should be configured"""
- assert bot.SANDBOX_DIR == "/tmp/test-voice-sandbox"
-
- def test_working_dir_configured(self):
- """Working directory should be configured"""
- assert bot.CLAUDE_WORKING_DIR == "/home/dev"
-
- def test_voice_settings_exist(self):
- """Voice settings should be defined"""
- assert hasattr(bot, 'VOICE_SETTINGS')
- assert 'stability' in bot.VOICE_SETTINGS
- assert 'similarity_boost' in bot.VOICE_SETTINGS
- assert 'style' in bot.VOICE_SETTINGS
-
- def test_voice_settings_values(self):
- """Voice settings should have correct values for expressive delivery"""
- assert bot.VOICE_SETTINGS['stability'] == 0.3 # Low for emotional range
- assert bot.VOICE_SETTINGS['similarity_boost'] == 0.75
- assert bot.VOICE_SETTINGS['style'] == 0.4 # Style exaggeration
- assert bot.VOICE_SETTINGS['speed'] == 1.1 # Comfortable speed
-
-
-class TestPersona:
- """Test the persona configuration (default prompt when no file specified)"""
-
- def test_persona_exists(self):
- """Persona prompt should be defined"""
- assert hasattr(bot, 'BASE_SYSTEM_PROMPT')
- assert len(bot.BASE_SYSTEM_PROMPT) > 50
-
- def test_persona_has_voice_rules(self):
- """Persona should have voice output rules"""
- persona = bot.BASE_SYSTEM_PROMPT
- assert "NO markdown" in persona or "no markdown" in persona.lower()
- assert "NO bullet" in persona or "no bullet" in persona.lower()
-
- def test_persona_mentions_sandbox(self):
- """Persona should mention sandbox directory"""
- assert "sandbox" in bot.BASE_SYSTEM_PROMPT.lower() or bot.SANDBOX_DIR in bot.BASE_SYSTEM_PROMPT
-
- def test_persona_mentions_read_write_permissions(self):
- """Persona should explain read/write permissions"""
- persona = bot.BASE_SYSTEM_PROMPT.lower()
- assert "read" in persona
- assert "write" in persona
-
- def test_persona_mentions_websearch(self):
- """Persona should mention WebSearch capability"""
- assert "WebSearch" in bot.BASE_SYSTEM_PROMPT or "websearch" in bot.BASE_SYSTEM_PROMPT.lower()
-
-
-class TestTopicFiltering:
- """Test topic-based message filtering"""
-
- def test_should_handle_message_no_filter(self):
- """With empty TOPIC_ID, should handle all messages"""
- with patch.object(bot, 'TOPIC_ID', ''):
- assert bot.should_handle_message(None) == True
- assert bot.should_handle_message(123) == True
-
- def test_should_handle_message_with_filter(self):
- """With TOPIC_ID set, should only handle that topic"""
- with patch.object(bot, 'TOPIC_ID', '42'):
- assert bot.should_handle_message(42) == True
- assert bot.should_handle_message(123) == False
- assert bot.should_handle_message(None) == False
-
- def test_should_handle_message_invalid_topic_id(self):
- """Invalid TOPIC_ID should fall back to handling all"""
- with patch.object(bot, 'TOPIC_ID', 'not_a_number'):
- assert bot.should_handle_message(None) == True
- assert bot.should_handle_message(123) == True
-
-
-class TestPromptLoading:
- """Test system prompt loading from file"""
-
- def test_load_system_prompt_no_file(self):
- """Without file, should return default prompt"""
- with patch.object(bot, 'SYSTEM_PROMPT_FILE', ''):
- prompt = bot.load_system_prompt()
- assert "voice assistant" in prompt.lower()
- assert len(prompt) > 50
-
- def test_load_system_prompt_from_file(self):
- """Should load prompt from file when specified"""
- import tempfile
- with tempfile.NamedTemporaryFile(mode='w', suffix='.md', delete=False) as f:
- f.write("You are TestBot. {sandbox_dir} is your sandbox.")
- temp_path = f.name
-
- try:
- with patch.object(bot, 'SYSTEM_PROMPT_FILE', temp_path):
- prompt = bot.load_system_prompt()
- assert "TestBot" in prompt
- assert bot.SANDBOX_DIR in prompt # Placeholder replaced
- finally:
- os.unlink(temp_path)
-
- def test_load_system_prompt_missing_file(self):
- """Missing file should return default prompt"""
- with patch.object(bot, 'SYSTEM_PROMPT_FILE', '/nonexistent/file.md'):
- prompt = bot.load_system_prompt()
- assert "voice assistant" in prompt.lower()
-
-
-class TestConfigurableVoice:
- """Test configurable voice ID"""
-
- def test_voice_id_configurable(self):
- """Voice ID should be configurable via env"""
- assert hasattr(bot, 'ELEVENLABS_VOICE_ID')
- # In tests, we set this to test_voice_id
- assert bot.ELEVENLABS_VOICE_ID == "test_voice_id"
-
- def test_persona_name_configurable(self):
- """Persona name should be configurable via env"""
- assert hasattr(bot, 'PERSONA_NAME')
- assert bot.PERSONA_NAME == "TestBot"
-
-
-class TestTTSFunction:
- """Test text-to-speech functionality"""
-
- @pytest.mark.asyncio
- async def test_tts_uses_turbo_model(self):
- """TTS should use eleven_turbo_v2_5 model"""
- with patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_convert:
- mock_convert.return_value = iter([b'fake_audio_data'])
-
- await bot.text_to_speech("test text")
-
- mock_convert.assert_called_once()
- call_kwargs = mock_convert.call_args[1]
- assert call_kwargs['model_id'] == 'eleven_turbo_v2_5'
-
- @pytest.mark.asyncio
- async def test_tts_uses_voice_settings(self):
- """TTS should pass voice settings"""
- with patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_convert:
- mock_convert.return_value = iter([b'fake_audio_data'])
-
- await bot.text_to_speech("test text")
-
- call_kwargs = mock_convert.call_args[1]
- assert 'voice_settings' in call_kwargs
- voice_settings = call_kwargs['voice_settings']
- assert voice_settings['stability'] == bot.VOICE_SETTINGS['stability']
- assert voice_settings['similarity_boost'] == bot.VOICE_SETTINGS['similarity_boost']
- assert voice_settings['style'] == bot.VOICE_SETTINGS['style']
- assert voice_settings['use_speaker_boost'] == True
-
- @pytest.mark.asyncio
- async def test_tts_uses_speed_setting(self):
- """TTS should use 1.2x speed (max allowed)"""
- with patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_convert:
- mock_convert.return_value = iter([b'fake_audio_data'])
-
- await bot.text_to_speech("test text")
-
- call_kwargs = mock_convert.call_args[1]
- voice_settings = call_kwargs['voice_settings']
- assert 'speed' in voice_settings
- assert voice_settings['speed'] == 1.1
-
- @pytest.mark.asyncio
- async def test_tts_uses_configured_voice(self):
- """TTS should use configured voice ID"""
- with patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_convert:
- mock_convert.return_value = iter([b'fake_audio_data'])
-
- await bot.text_to_speech("test text")
-
- call_kwargs = mock_convert.call_args[1]
- assert call_kwargs['voice_id'] == bot.ELEVENLABS_VOICE_ID
-
- @pytest.mark.asyncio
- async def test_tts_returns_bytesio(self):
- """TTS should return BytesIO object"""
- with patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_convert:
- mock_convert.return_value = iter([b'fake_audio_data'])
-
- result = await bot.text_to_speech("test text")
-
- assert isinstance(result, BytesIO)
-
- @pytest.mark.asyncio
- async def test_tts_handles_error(self):
- """TTS should return None on error"""
- with patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_convert:
- mock_convert.side_effect = Exception("API Error")
-
- result = await bot.text_to_speech("test text")
-
- assert result is None
-
-
-class TestClaudeCall:
- """Test Claude Code invocation"""
-
- @pytest.mark.asyncio
- async def test_claude_call_creates_sandbox(self):
- """Claude call should ensure sandbox directory exists"""
- test_sandbox = Path("/tmp/test-sandbox-creation")
-
- with patch('bot.SANDBOX_DIR', str(test_sandbox)), \
- patch('subprocess.run') as mock_run:
- mock_run.return_value = Mock(
- returncode=0,
- stdout=json.dumps({"result": "test", "session_id": "abc123"})
- )
-
- # Clean up first
- if test_sandbox.exists():
- test_sandbox.rmdir()
-
- await bot.call_claude("test prompt")
-
- # Sandbox should be created
- assert test_sandbox.exists()
-
- # Clean up
- test_sandbox.rmdir()
-
- @pytest.mark.asyncio
- async def test_claude_call_includes_persona(self):
- """Claude SDK call should include system_prompt with dynamic persona"""
- mock_client = create_mock_client([make_result_message()])
-
- with patch('bot.ClaudeSDKClient') as mock_sdk:
- mock_sdk.return_value = mock_client
- await bot.call_claude("test prompt")
-
- # Verify ClaudeSDKClient was called with options containing system_prompt
- assert mock_sdk.called
- call_kwargs = mock_sdk.call_args[1]
- options = call_kwargs.get('options')
- assert options is not None
- assert options.system_prompt is not None
- assert bot.BASE_SYSTEM_PROMPT[:50] in options.system_prompt
-
- @pytest.mark.asyncio
- async def test_claude_call_includes_allowed_tools(self):
- """Claude SDK call should include allowed_tools with all required tools"""
- mock_client = create_mock_client([make_result_message()])
-
- with patch('bot.ClaudeSDKClient') as mock_sdk:
- mock_sdk.return_value = mock_client
- await bot.call_claude("test prompt")
-
- # Verify ClaudeSDKClient was called with options containing allowed_tools
- assert mock_sdk.called
- call_kwargs = mock_sdk.call_args[1]
- options = call_kwargs.get('options')
- assert options is not None
- assert options.allowed_tools is not None
-
- required_tools = ['Read', 'Grep', 'Glob', 'WebSearch', 'WebFetch',
- 'Task', 'Bash', 'Edit', 'Write', 'Skill']
- for tool in required_tools:
- assert tool in options.allowed_tools, f"Tool {tool} should be in allowed_tools"
-
- @pytest.mark.asyncio
- async def test_claude_call_includes_cwd(self):
- """Claude SDK call should include cwd for sandbox directory"""
- mock_client = create_mock_client([make_result_message()])
-
- with patch('bot.ClaudeSDKClient') as mock_sdk:
- mock_sdk.return_value = mock_client
- await bot.call_claude("test prompt")
-
- # Verify ClaudeSDKClient was called with options containing cwd
- assert mock_sdk.called
- call_kwargs = mock_sdk.call_args[1]
- options = call_kwargs.get('options')
- assert options is not None
- assert options.cwd == bot.SANDBOX_DIR
-
- @pytest.mark.asyncio
- async def test_claude_call_uses_sandbox_as_cwd(self):
- """Claude SDK call should set cwd to sandbox directory"""
- mock_client = create_mock_client([make_result_message()])
-
- with patch('bot.ClaudeSDKClient') as mock_sdk:
- mock_sdk.return_value = mock_client
- await bot.call_claude("test prompt")
-
- # Verify ClaudeSDKClient was called with options containing cwd
- assert mock_sdk.called
- call_kwargs = mock_sdk.call_args[1]
- options = call_kwargs.get('options')
- assert options is not None
- assert str(options.cwd) == bot.SANDBOX_DIR
-
- @pytest.mark.asyncio
- async def test_claude_call_loads_megg_context(self):
- """Claude call should load megg context for new sessions"""
- mock_client = create_mock_client([make_result_message()])
-
- with patch('bot.ClaudeSDKClient', return_value=mock_client), \
- patch('bot.load_megg_context') as mock_megg:
- mock_megg.return_value = "test megg context"
-
- await bot.call_claude("test prompt", include_megg=True)
-
- mock_megg.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_claude_call_continue_session(self):
- """Claude SDK call should set continue_conversation when continuing"""
- mock_client = create_mock_client([make_result_message()])
-
- with patch('bot.ClaudeSDKClient') as mock_sdk:
- mock_sdk.return_value = mock_client
- await bot.call_claude("test prompt", continue_last=True)
-
- # Verify ClaudeSDKClient was called with options containing continue_conversation
- assert mock_sdk.called
- call_kwargs = mock_sdk.call_args[1]
- options = call_kwargs.get('options')
- assert options is not None
- assert options.continue_conversation is True
-
- @pytest.mark.asyncio
- async def test_claude_call_resume_session(self):
- """Claude SDK call should set resume with session ID"""
- mock_client = create_mock_client([make_result_message()])
-
- with patch('bot.ClaudeSDKClient') as mock_sdk:
- mock_sdk.return_value = mock_client
- await bot.call_claude("test prompt", session_id="existing-session-id")
-
- # Verify ClaudeSDKClient was called with options containing resume
- assert mock_sdk.called
- call_kwargs = mock_sdk.call_args[1]
- options = call_kwargs.get('options')
- assert options is not None
- assert options.resume == "existing-session-id"
-
-
-class TestSandboxSetup:
- """Test sandbox directory setup"""
-
- def test_sandbox_dir_constant_defined(self):
- """SANDBOX_DIR constant should be defined"""
- assert hasattr(bot, 'SANDBOX_DIR')
- assert bot.SANDBOX_DIR is not None
-
- def test_sandbox_path_is_isolated(self):
- """Sandbox should be in a dedicated directory"""
- sandbox = Path(bot.SANDBOX_DIR)
- # Should not be the same as working dir
- assert str(sandbox) != bot.CLAUDE_WORKING_DIR
- # Should contain 'sandbox' in name
- assert 'sandbox' in sandbox.name.lower()
-
-
-class TestMeggIntegration:
- """Test megg context loading"""
-
- def test_load_megg_context_function_exists(self):
- """load_megg_context function should exist"""
- assert hasattr(bot, 'load_megg_context')
- assert callable(bot.load_megg_context)
-
- def test_load_megg_context_runs_megg_command(self):
- """load_megg_context should run megg context command"""
- with patch('subprocess.run') as mock_run:
- mock_run.return_value = Mock(
- returncode=0,
- stdout="megg context output"
- )
-
- result = bot.load_megg_context()
-
- cmd = mock_run.call_args[0][0]
- assert 'megg' in cmd
- assert 'context' in cmd
-
- def test_load_megg_context_handles_error(self):
- """load_megg_context should handle errors gracefully"""
- with patch('subprocess.run') as mock_run:
- mock_run.side_effect = Exception("megg not found")
-
- result = bot.load_megg_context()
-
- assert result == ""
-
-
-class TestSessionManagement:
- """Test session state management"""
-
- def test_get_user_state_creates_new(self):
- """get_user_state should create state for new user"""
- # Clear existing state
- bot.user_sessions = {}
-
- state = bot.get_user_state(99999)
-
- assert state is not None
- assert state['current_session'] is None
- assert state['sessions'] == []
-
- def test_get_user_state_returns_existing(self):
- """get_user_state should return existing state"""
- bot.user_sessions = {"12345": {"current_session": "abc", "sessions": ["abc"]}}
-
- state = bot.get_user_state(12345)
-
- assert state['current_session'] == "abc"
- assert state['sessions'] == ["abc"]
-
-
-class TestTranscription:
- """Test speech-to-text functionality"""
-
- @pytest.mark.asyncio
- async def test_transcribe_voice_uses_scribe(self):
- """Transcription should use scribe_v1 model"""
- with patch.object(bot.elevenlabs.speech_to_text, 'convert') as mock_convert:
- mock_convert.return_value = Mock(text="transcribed text")
-
- await bot.transcribe_voice(b"fake audio bytes")
-
- call_kwargs = mock_convert.call_args[1]
- assert call_kwargs['model_id'] == 'scribe_v1'
-
- @pytest.mark.asyncio
- async def test_transcribe_voice_returns_text(self):
- """Transcription should return text"""
- with patch.object(bot.elevenlabs.speech_to_text, 'convert') as mock_convert:
- mock_convert.return_value = Mock(text="hello world")
-
- result = await bot.transcribe_voice(b"fake audio bytes")
-
- assert result == "hello world"
-
- @pytest.mark.asyncio
- async def test_transcribe_voice_handles_error(self):
- """Transcription should handle errors"""
- with patch.object(bot.elevenlabs.speech_to_text, 'convert') as mock_convert:
- mock_convert.side_effect = Exception("API Error")
-
- result = await bot.transcribe_voice(b"fake audio bytes")
-
- assert "Transcription error" in result
-
-
-class TestDebugFunction:
- """Test debug logging"""
-
- def test_debug_function_exists(self):
- """debug function should exist"""
- assert hasattr(bot, 'debug')
- assert callable(bot.debug)
-
-
-class TestHealthCheck:
- """Test health check functionality"""
-
- def test_health_check_handler_exists(self):
- """cmd_health handler should exist"""
- assert hasattr(bot, 'cmd_health')
-
-
-class TestIntegrationFlow:
- """Integration tests for complete flows"""
-
- @pytest.mark.asyncio
- async def test_complete_voice_flow_mocked(self):
- """Test complete voice message flow with mocks"""
- mock_client = create_mock_client([
- make_result_message(result="V says: Here is the response.", session_id="test-session-123")
- ])
-
- # This tests the integration of all components
- with patch.object(bot.elevenlabs.speech_to_text, 'convert') as mock_stt, \
- patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_tts, \
- patch('bot.ClaudeSDKClient', return_value=mock_client):
-
- mock_stt.return_value = Mock(text="test voice input")
- mock_tts.return_value = iter([b'audio_response'])
-
- # Test transcription
- transcription = await bot.transcribe_voice(b"fake audio")
- assert transcription == "test voice input"
-
- # Test Claude call
- response, session_id, metadata = await bot.call_claude(transcription)
- assert "V says" in response or "response" in response.lower()
- assert session_id == "test-session-123"
-
- # Test TTS
- audio = await bot.text_to_speech(response)
- assert audio is not None
-
-
-class TestCommandHandlers:
- """Test Telegram command handlers"""
-
- @pytest.fixture
- def mock_update(self):
- """Create mock Telegram update"""
- update = AsyncMock()
- update.effective_user.id = 12345
- update.effective_chat.id = 12345
- update.message.reply_text = AsyncMock()
- update.message.message_thread_id = None
- return update
-
- @pytest.fixture
- def mock_context(self):
- """Create mock context"""
- context = Mock()
- context.args = []
- return context
-
- @pytest.mark.asyncio
- async def test_cmd_start(self, mock_update, mock_context):
- """Test /start command"""
- await bot.cmd_start(mock_update, mock_context)
-
- mock_update.message.reply_text.assert_called_once()
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "Claude Voice Assistant" in call_text or "Commands" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_new_without_name(self, mock_update, mock_context):
- """Test /new command without session name"""
- bot.user_sessions = {}
-
- await bot.cmd_new(mock_update, mock_context)
-
- mock_update.message.reply_text.assert_called_once()
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "New session" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_new_with_name(self, mock_update, mock_context):
- """Test /new command with session name"""
- bot.user_sessions = {}
- mock_context.args = ["my-session"]
-
- await bot.cmd_new(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "my-session" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_continue_no_session(self, mock_update, mock_context):
- """Test /continue with no previous session"""
- bot.user_sessions = {"12345": {"current_session": None, "sessions": []}}
-
- await bot.cmd_continue(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "No previous session" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_continue_with_session(self, mock_update, mock_context):
- """Test /continue with existing session"""
- bot.user_sessions = {"12345": {"current_session": "abc123def456", "sessions": ["abc123def456"]}}
-
- await bot.cmd_continue(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "Continuing" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_sessions_empty(self, mock_update, mock_context):
- """Test /sessions with no sessions"""
- bot.user_sessions = {"12345": {"current_session": None, "sessions": []}}
-
- await bot.cmd_sessions(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "No sessions" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_sessions_with_sessions(self, mock_update, mock_context):
- """Test /sessions with existing sessions"""
- bot.user_sessions = {"12345": {"current_session": "abc123", "sessions": ["abc123", "def456"]}}
-
- await bot.cmd_sessions(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "Sessions:" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_switch_no_args(self, mock_update, mock_context):
- """Test /switch without session ID"""
- mock_context.args = []
-
- await bot.cmd_switch(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "Usage:" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_switch_not_found(self, mock_update, mock_context):
- """Test /switch with non-existent session"""
- bot.user_sessions = {"12345": {"current_session": None, "sessions": ["abc123"]}}
- mock_context.args = ["xyz"]
-
- await bot.cmd_switch(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "not found" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_switch_found(self, mock_update, mock_context):
- """Test /switch with valid session"""
- bot.user_sessions = {"12345": {"current_session": None, "sessions": ["abc123def456"]}}
- mock_context.args = ["abc123"]
-
- await bot.cmd_switch(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "Switched" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_status_no_session(self, mock_update, mock_context):
- """Test /status with no active session"""
- bot.user_sessions = {"12345": {"current_session": None, "sessions": []}}
-
- await bot.cmd_status(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "No active session" in call_text
-
- @pytest.mark.asyncio
- async def test_cmd_status_with_session(self, mock_update, mock_context):
- """Test /status with active session"""
- bot.user_sessions = {"12345": {"current_session": "abc123def456", "sessions": ["abc123def456"]}}
-
- await bot.cmd_status(mock_update, mock_context)
-
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "Current session" in call_text
-
-
-class TestMessageHandlers:
- """Test voice and text message handlers"""
-
- @pytest.fixture
- def mock_update_voice(self):
- """Create mock update with voice message"""
- update = AsyncMock()
- update.effective_user.id = 12345
- update.effective_user.is_bot = False
- update.effective_chat.id = 12345
- update.message.reply_text = AsyncMock(return_value=AsyncMock())
- update.message.reply_voice = AsyncMock()
- update.message.voice.get_file = AsyncMock()
- update.message.voice.get_file.return_value.download_as_bytearray = AsyncMock(return_value=bytearray(b"fake_audio"))
- update.message.message_thread_id = None
- return update
-
- @pytest.fixture
- def mock_update_text(self):
- """Create mock update with text message"""
- update = AsyncMock()
- update.effective_user.id = 12345
- update.effective_user.is_bot = False
- update.effective_chat.id = 12345
- update.message.text = "Hello V!"
- update.message.reply_text = AsyncMock(return_value=AsyncMock())
- update.message.reply_voice = AsyncMock()
- update.message.message_thread_id = None
- return update
-
- @pytest.fixture
- def mock_context(self):
- return Mock()
-
- @pytest.mark.asyncio
- async def test_handle_voice_complete_flow(self, mock_update_voice, mock_context):
- """Test complete voice message handling"""
- bot.user_sessions = {}
- bot.user_rate_limits = {} # Reset rate limits
-
- with patch('bot.transcribe_voice', new_callable=AsyncMock) as mock_transcribe, \
- patch('bot.call_claude', new_callable=AsyncMock) as mock_claude, \
- patch('bot.text_to_speech', new_callable=AsyncMock) as mock_tts, \
- patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
-
- mock_transcribe.return_value = "hello world"
- mock_claude.return_value = ("V says hello back!", "session-123", {"cost": 0.01})
- mock_tts.return_value = BytesIO(b"audio_response")
-
- await bot.handle_voice(mock_update_voice, mock_context)
-
- mock_transcribe.assert_called_once()
- mock_claude.assert_called_once()
- mock_tts.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_handle_voice_transcription_error(self, mock_update_voice, mock_context):
- """Test voice handling with transcription error"""
- bot.user_sessions = {}
- bot.user_rate_limits = {} # Reset rate limits
-
- with patch('bot.transcribe_voice', new_callable=AsyncMock) as mock_transcribe, \
- patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- mock_transcribe.return_value = "[Transcription error: API failed]"
-
- await bot.handle_voice(mock_update_voice, mock_context)
-
- # Should have edited the message with error
- edit_calls = mock_update_voice.message.reply_text.return_value.edit_text.call_args_list
- assert any("Transcription error" in str(call) for call in edit_calls)
-
- @pytest.mark.asyncio
- async def test_handle_text_complete_flow(self, mock_update_text, mock_context):
- """Test complete text message handling"""
- bot.user_sessions = {}
- bot.user_rate_limits = {} # Reset rate limits
-
- with patch('bot.call_claude', new_callable=AsyncMock) as mock_claude, \
- patch('bot.text_to_speech', new_callable=AsyncMock) as mock_tts, \
- patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
-
- mock_claude.return_value = ("V responds to your text!", "session-456", {"cost": 0.02})
- mock_tts.return_value = BytesIO(b"audio_response")
-
- await bot.handle_text(mock_update_text, mock_context)
-
- mock_claude.assert_called_once()
- # Text handler should also send voice
- mock_tts.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_handle_text_updates_session(self, mock_update_text, mock_context):
- """Test that text handler updates session state"""
- bot.user_sessions = {"12345": {"current_session": None, "sessions": []}}
- bot.user_rate_limits = {} # Reset rate limits
-
- with patch('bot.call_claude', new_callable=AsyncMock) as mock_claude, \
- patch('bot.text_to_speech', new_callable=AsyncMock) as mock_tts, \
- patch('bot.save_state') as mock_save, \
- patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
-
- mock_claude.return_value = ("response", "new-session-id", {})
- mock_tts.return_value = BytesIO(b"audio")
-
- await bot.handle_text(mock_update_text, mock_context)
-
- # Session should be updated
- state = bot.get_user_state(12345)
- assert state["current_session"] == "new-session-id"
-
-
-class TestHelperFunctions:
- """Test helper functions"""
-
- @pytest.mark.asyncio
- async def test_send_long_message_short(self):
- """Test send_long_message with short text"""
- mock_first_msg = AsyncMock()
-
- await bot.send_long_message(Mock(), mock_first_msg, "Short message")
-
- mock_first_msg.edit_text.assert_called_once_with("Short message")
-
- @pytest.mark.asyncio
- async def test_send_long_message_long(self):
- """Test send_long_message with long text that needs splitting"""
- mock_update = Mock()
- mock_update.message.reply_text = AsyncMock()
- mock_first_msg = AsyncMock()
-
- # Create text longer than chunk size
- long_text = "A" * 5000
-
- await bot.send_long_message(mock_update, mock_first_msg, long_text, chunk_size=4000)
-
- # Should have edited first message and sent additional
- mock_first_msg.edit_text.assert_called_once()
-
- def test_save_and_load_state(self):
- """Test state persistence"""
- import tempfile
- with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
- temp_path = Path(f.name)
-
- original_state_file = bot.STATE_FILE
- bot.STATE_FILE = temp_path
-
- try:
- bot.user_sessions = {"test": {"current_session": "abc", "sessions": ["abc"]}}
- bot.save_state()
-
- bot.user_sessions = {}
- bot.load_state()
-
- assert "test" in bot.user_sessions
- assert bot.user_sessions["test"]["current_session"] == "abc"
- finally:
- bot.STATE_FILE = original_state_file
- temp_path.unlink(missing_ok=True)
-
-
-class TestHealthCheckHandler:
- """Test health check command handler"""
-
- @pytest.fixture
- def mock_update(self):
- """Create mock update for health check"""
- update = AsyncMock()
- update.effective_user.id = 12345
- update.effective_chat.id = 12345
- update.message.reply_text = AsyncMock()
- update.message.message_thread_id = None
- return update
-
- @pytest.fixture
- def mock_context(self):
- return Mock()
-
- @pytest.mark.asyncio
- async def test_cmd_health_runs(self, mock_update, mock_context):
- """Test health check command executes"""
- bot.user_sessions = {"12345": {"current_session": None, "sessions": []}}
-
- with patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_tts, \
- patch('subprocess.run') as mock_run:
- mock_tts.return_value = iter([b'test_audio'])
- mock_run.return_value = Mock(returncode=0, stdout='{"result":"OK"}', stderr='')
-
- await bot.cmd_health(mock_update, mock_context)
-
- mock_update.message.reply_text.assert_called_once()
- call_text = mock_update.message.reply_text.call_args[0][0]
- assert "Health Check" in call_text
-
-
-class TestErrorHandling:
- """Test error handling paths"""
-
- @pytest.mark.asyncio
- async def test_call_claude_exception(self):
- """Test Claude SDK call generic exception handling"""
- mock_client = AsyncMock()
- mock_client.__aenter__ = AsyncMock(side_effect=Exception("Connection failed"))
- mock_client.__aexit__ = AsyncMock()
-
- with patch('bot.ClaudeSDKClient', return_value=mock_client):
- response, session_id, metadata = await bot.call_claude("test")
-
- assert "Error" in response
-
- @pytest.mark.asyncio
- async def test_call_claude_sdk_error(self):
- """Test Claude SDK call error handling"""
- mock_client = AsyncMock()
- mock_client.__aenter__ = AsyncMock(side_effect=RuntimeError("SDK initialization failed"))
- mock_client.__aexit__ = AsyncMock()
-
- with patch('bot.ClaudeSDKClient', return_value=mock_client):
- response, session_id, metadata = await bot.call_claude("test")
-
- assert "Error" in response
-
- @pytest.mark.asyncio
- async def test_handle_voice_exception(self):
- """Test voice handler exception handling"""
- update = AsyncMock()
- update.effective_user.id = 12345
- update.effective_user.is_bot = False
- update.effective_chat.id = 12345
- update.message.reply_text = AsyncMock(return_value=AsyncMock())
- update.message.voice.get_file = AsyncMock(side_effect=Exception("Download failed"))
- update.message.message_thread_id = None
-
- bot.user_sessions = {}
- bot.user_rate_limits = {} # Reset rate limits
-
- with patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- await bot.handle_voice(update, Mock())
-
- # Should have handled error gracefully
- edit_calls = update.message.reply_text.return_value.edit_text.call_args_list
- assert any("Error" in str(call) for call in edit_calls)
-
- @pytest.mark.asyncio
- async def test_handle_text_exception(self):
- """Test text handler exception handling"""
- update = AsyncMock()
- update.effective_user.id = 12345
- update.effective_user.is_bot = False
- update.effective_chat.id = 12345
- update.message.text = "test"
- update.message.reply_text = AsyncMock(return_value=AsyncMock())
- update.message.message_thread_id = None
-
- bot.user_sessions = {}
- bot.user_rate_limits = {} # Reset rate limits
-
- with patch('bot.call_claude', new_callable=AsyncMock) as mock_claude, \
- patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- mock_claude.side_effect = Exception("Claude call failed")
-
- await bot.handle_text(update, Mock())
-
- edit_calls = update.message.reply_text.return_value.edit_text.call_args_list
- assert any("Error" in str(call) for call in edit_calls)
-
-
-class TestClaudeCallMetadata:
- """Test Claude call metadata extraction"""
-
- @pytest.mark.asyncio
- async def test_call_claude_extracts_metadata(self):
- """Test that metadata is extracted from Claude SDK response"""
- mock_client = create_mock_client([
- make_result_message(
- result="test response",
- session_id="sess-123",
- total_cost_usd=0.05,
- num_turns=3,
- duration_ms=5000,
- )
- ])
-
- with patch('bot.ClaudeSDKClient', return_value=mock_client):
- response, session_id, metadata = await bot.call_claude("test")
-
- assert metadata.get("cost") == 0.05
- assert metadata.get("num_turns") == 3
- assert metadata.get("duration_ms") == 5000
-
- @pytest.mark.asyncio
- async def test_call_claude_no_megg_on_continue(self):
- """Test megg context is not loaded when continuing"""
- mock_client = create_mock_client([make_result_message(result="ok")])
-
- with patch('bot.ClaudeSDKClient', return_value=mock_client), \
- patch('bot.load_megg_context') as mock_megg:
-
- await bot.call_claude("test", continue_last=True)
-
- mock_megg.assert_not_called()
-
- @pytest.mark.asyncio
- async def test_call_claude_no_megg_on_resume(self):
- """Test megg context is not loaded when resuming"""
- mock_client = create_mock_client([make_result_message(result="ok")])
-
- with patch('bot.ClaudeSDKClient', return_value=mock_client), \
- patch('bot.load_megg_context') as mock_megg:
-
- await bot.call_claude("test", session_id="existing-session")
-
- mock_megg.assert_not_called()
-
-
-class TestSendLongMessage:
- """Test long message splitting"""
-
- @pytest.mark.asyncio
- async def test_split_at_newline(self):
- """Test that long messages split at newlines"""
- mock_update = Mock()
- mock_update.message.reply_text = AsyncMock()
- mock_first_msg = AsyncMock()
-
- # Text with newlines
- text = "First part\n" + "A" * 4000 + "\nSecond part"
-
- await bot.send_long_message(mock_update, mock_first_msg, text, chunk_size=4050)
-
- mock_first_msg.edit_text.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_split_at_space(self):
- """Test that messages split at spaces when no newline"""
- mock_update = Mock()
- mock_update.message.reply_text = AsyncMock()
- mock_first_msg = AsyncMock()
-
- # Text with spaces but no newlines near split point
- text = "word " * 1000 # Many words
-
- await bot.send_long_message(mock_update, mock_first_msg, text, chunk_size=100)
-
- mock_first_msg.edit_text.assert_called_once()
-
-
-class TestMeggContextEdgeCases:
- """Test megg context edge cases"""
-
- def test_megg_returns_empty_on_failure(self):
- """Test megg returns empty string on subprocess failure"""
- with patch('subprocess.run') as mock_run:
- mock_run.return_value = Mock(returncode=1, stderr="error")
-
- result = bot.load_megg_context()
-
- assert result == ""
-
- def test_megg_returns_output_on_success(self):
- """Test megg returns output on success"""
- with patch('subprocess.run') as mock_run:
- mock_run.return_value = Mock(returncode=0, stdout="megg context data")
-
- result = bot.load_megg_context()
-
- assert result == "megg context data"
-
-
-class TestMultipleSessionSwitch:
- """Test session switching edge cases"""
-
- @pytest.mark.asyncio
- async def test_switch_multiple_matches(self):
- """Test switch with multiple matching sessions"""
- update = AsyncMock()
- update.effective_user.id = 12345
- update.effective_chat.id = 12345
- update.message.reply_text = AsyncMock()
- update.message.message_thread_id = None
-
- context = Mock()
- context.args = ["abc"] # Matches both sessions
-
- bot.user_sessions = {"12345": {
- "current_session": None,
- "sessions": ["abc123", "abc456"] # Both start with "abc"
- }}
-
- with patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- await bot.cmd_switch(update, context)
-
- call_text = update.message.reply_text.call_args[0][0]
- assert "Multiple" in call_text or "specific" in call_text.lower()
-
-
-# ============ NEW FEATURE TESTS ============
-
-class TestUserSettings:
- """Test user settings management"""
-
- def test_get_user_settings_creates_default(self):
- """get_user_settings should create defaults for new user"""
- bot.user_settings = {}
-
- settings = bot.get_user_settings(99999)
-
- assert settings is not None
- assert settings["audio_enabled"] == True
- assert settings["voice_speed"] == bot.VOICE_SETTINGS["speed"]
-
- def test_get_user_settings_returns_existing(self):
- """get_user_settings should return existing settings"""
- bot.user_settings = {"12345": {
- "audio_enabled": False,
- "voice_speed": 0.9,
- }}
-
- settings = bot.get_user_settings(12345)
-
- assert settings["audio_enabled"] == False
- assert settings["voice_speed"] == 0.9
-
- def test_save_and_load_settings(self):
- """Test settings persistence"""
- import tempfile
- with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
- temp_path = Path(f.name)
-
- original_settings_file = bot.SETTINGS_FILE
- bot.SETTINGS_FILE = temp_path
-
- try:
- bot.user_settings = {"test": {
- "audio_enabled": False,
- "voice_speed": 0.8,
- }}
- bot.save_settings()
-
- bot.user_settings = {}
- bot.load_settings()
-
- assert "test" in bot.user_settings
- assert bot.user_settings["test"]["audio_enabled"] == False
- assert bot.user_settings["test"]["voice_speed"] == 0.8
- finally:
- bot.SETTINGS_FILE = original_settings_file
- temp_path.unlink(missing_ok=True)
-
-
-class TestSettingsCommand:
- """Test /settings command and callbacks"""
-
- @pytest.fixture
- def mock_update(self):
- """Create mock update for settings"""
- update = AsyncMock()
- update.effective_user.id = 12345
- update.message.reply_text = AsyncMock()
- update.message.message_thread_id = None
- return update
-
- @pytest.fixture
- def mock_context(self):
- return Mock()
-
- @pytest.mark.asyncio
- async def test_cmd_settings_shows_menu(self, mock_update, mock_context):
- """Test /settings shows settings menu"""
- bot.user_settings = {}
- # Ensure update has chat ID
- mock_update.effective_chat.id = 12345
-
- with patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- await bot.cmd_settings(mock_update, mock_context)
-
- mock_update.message.reply_text.assert_called_once()
- call_args = mock_update.message.reply_text.call_args
- assert "Settings:" in call_args[0][0]
- # Check reply_markup was passed
- assert 'reply_markup' in call_args[1]
-
- @pytest.mark.asyncio
- async def test_settings_callback_audio_toggle(self):
- """Test audio toggle callback"""
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- }}
-
- query = AsyncMock()
- query.data = "setting_audio_toggle"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- with patch('bot.save_settings'):
- await bot.handle_settings_callback(update, context)
-
- # Audio should be toggled off
- assert bot.user_settings["12345"]["audio_enabled"] == False
- query.answer.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_settings_callback_speed_change(self):
- """Test speed change callback"""
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- }}
-
- query = AsyncMock()
- query.data = "setting_speed_0.9"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- with patch('bot.save_settings'):
- await bot.handle_settings_callback(update, context)
-
- # Speed should be changed
- assert bot.user_settings["12345"]["voice_speed"] == 0.9
-
- @pytest.mark.asyncio
- async def test_settings_callback_mode_toggle(self):
- """Test mode toggle callback"""
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- "mode": "go_all",
- "watch_enabled": False,
- }}
-
- query = AsyncMock()
- query.data = "setting_mode_toggle"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- with patch('bot.save_settings'):
- await bot.handle_settings_callback(update, context)
-
- # Mode should be toggled to approve
- assert bot.user_settings["12345"]["mode"] == "approve"
-
- @pytest.mark.asyncio
- async def test_settings_callback_watch_toggle(self):
- """Test watch toggle callback"""
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- "mode": "go_all",
- "watch_enabled": False,
- }}
-
- query = AsyncMock()
- query.data = "setting_watch_toggle"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- with patch('bot.save_settings'):
- await bot.handle_settings_callback(update, context)
-
- # Watch should be toggled on
- assert bot.user_settings["12345"]["watch_enabled"] == True
-
-
-class TestModeAndWatchSettings:
- """Test Mode (Go All/Approve) and Watch (ON/OFF) settings"""
-
- @pytest.mark.asyncio
- async def test_default_settings_include_mode_and_watch(self):
- """New user settings should include mode and watch"""
- bot.user_settings = {}
-
- settings = bot.get_user_settings(99999)
-
- assert "mode" in settings
- assert settings["mode"] == "go_all"
- assert "watch_enabled" in settings
- assert settings["watch_enabled"] == False
-
- @pytest.mark.asyncio
- async def test_existing_settings_get_mode_and_watch(self):
- """Existing users without mode/watch should get defaults"""
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- }}
-
- settings = bot.get_user_settings(12345)
-
- assert settings["mode"] == "go_all"
- assert settings["watch_enabled"] == False
-
- @pytest.mark.asyncio
- async def test_call_claude_with_approve_mode(self):
- """Claude call with approve mode should set can_use_tool callback"""
- mock_client = create_mock_client([make_result_message()])
-
- with patch('bot.ClaudeSDKClient') as mock_sdk:
- mock_sdk.return_value = mock_client
- await bot.call_claude(
- "test prompt",
- user_settings={"mode": "approve", "watch_enabled": False}
- )
-
- # Verify ClaudeSDKClient was called with options containing can_use_tool
- assert mock_sdk.called
- call_kwargs = mock_sdk.call_args[1]
- options = call_kwargs.get('options')
- assert options is not None
- # In approve mode, can_use_tool should be set
- assert options.can_use_tool is not None
-
- @pytest.mark.asyncio
- async def test_call_claude_with_go_all_mode(self):
- """Claude call with go_all mode should not set can_use_tool callback"""
- mock_client = create_mock_client([make_result_message()])
-
- with patch('bot.ClaudeSDKClient') as mock_sdk:
- mock_sdk.return_value = mock_client
- await bot.call_claude(
- "test prompt",
- user_settings={"mode": "go_all", "watch_enabled": False}
- )
-
- # Verify ClaudeSDKClient was called with options
- assert mock_sdk.called
- call_kwargs = mock_sdk.call_args[1]
- options = call_kwargs.get('options')
- assert options is not None
- # In go_all mode, can_use_tool should be None (allowed_tools used instead)
- assert options.can_use_tool is None
-
- @pytest.mark.asyncio
- async def test_approval_callback_approve(self):
- """Test approval callback approves tool"""
- bot.pending_approvals = {}
-
- # Create a pending approval
- import asyncio
- approval_id = "test123"
- event = asyncio.Event()
- bot.pending_approvals[approval_id] = {
- "user_id": 12345, # Add user_id for security check
- "event": event,
- "approved": None,
- "tool_name": "Read",
- "input": {"path": "/test"},
- }
-
- query = AsyncMock()
- query.data = f"approve_{approval_id}"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- await bot.handle_approval_callback(update, context)
-
- # Check approval was recorded and event is set
- assert event.is_set()
- # Note: approval is popped from dict during processing
- # assert bot.pending_approvals[approval_id]["approved"] == True
-
- @pytest.mark.asyncio
- async def test_approval_callback_reject(self):
- """Test approval callback rejects tool"""
- bot.pending_approvals = {}
-
- import asyncio
- approval_id = "test456"
- event = asyncio.Event()
- bot.pending_approvals[approval_id] = {
- "user_id": 12345, # Add user_id for security check
- "event": event,
- "approved": None,
- "tool_name": "Write",
- "input": {"path": "/test"},
- }
-
- query = AsyncMock()
- query.data = f"reject_{approval_id}"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- await bot.handle_approval_callback(update, context)
-
- # Check rejection was recorded
- assert event.is_set()
-
- @pytest.mark.asyncio
- async def test_approval_callback_expired(self):
- """Test approval callback with expired approval ID"""
- bot.pending_approvals = {}
-
- query = AsyncMock()
- query.data = "approve_expired123"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- await bot.handle_approval_callback(update, context)
-
- # Should show expired message
- query.edit_message_text.assert_called_with("Approval expired")
-
-
-class TestDynamicPrompt:
- """Test dynamic system prompt generation"""
-
- def test_build_dynamic_prompt_includes_timestamp(self):
- """Dynamic prompt should include current date/time"""
- prompt = bot.build_dynamic_prompt()
-
- assert "Current date and time:" in prompt
-
- def test_build_dynamic_prompt_includes_base(self):
- """Dynamic prompt should include base prompt content"""
- prompt = bot.build_dynamic_prompt()
-
- # Should include content from BASE_SYSTEM_PROMPT
- assert len(prompt) > len("Current date and time:")
-
- def test_build_dynamic_prompt_with_settings(self):
- """Dynamic prompt should include settings summary when relevant"""
- settings = {
- "audio_enabled": False,
- "voice_speed": 1.0
- }
-
- prompt = bot.build_dynamic_prompt(settings)
-
- assert "Audio responses disabled" in prompt
-
- def test_build_dynamic_prompt_no_settings_summary_when_defaults(self):
- """Dynamic prompt should not include settings summary when defaults"""
- settings = {
- "audio_enabled": True,
- "voice_speed": 1.1
- }
-
- prompt = bot.build_dynamic_prompt(settings)
-
- # Should NOT include settings summary since all are default
- assert "User settings:" not in prompt
-
-
-class TestAudioEnabledSetting:
- """Test audio enabled setting"""
-
- @pytest.fixture
- def mock_update_text(self):
- update = AsyncMock()
- update.effective_user.id = 12345
- update.effective_user.is_bot = False
- update.effective_chat.id = 12345
- update.message.text = "Hello!"
- update.message.reply_text = AsyncMock(return_value=AsyncMock())
- update.message.reply_voice = AsyncMock()
- update.message.message_thread_id = None
- return update
-
- @pytest.fixture
- def mock_context(self):
- context = Mock()
- context.user_data = {}
- return context
-
- @pytest.mark.asyncio
- async def test_handle_text_audio_disabled(self, mock_update_text, mock_context):
- """Test text handler skips TTS when audio disabled"""
- bot.user_sessions = {}
- bot.user_rate_limits = {} # Reset rate limits
- bot.user_settings = {"12345": {
- "audio_enabled": False,
- "voice_speed": 1.1,
- }}
-
- with patch('bot.call_claude', new_callable=AsyncMock) as mock_claude, \
- patch('bot.text_to_speech', new_callable=AsyncMock) as mock_tts, \
- patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- mock_claude.return_value = ("Response", "session-123", {})
-
- await bot.handle_text(mock_update_text, mock_context)
-
- # TTS should NOT be called
- mock_tts.assert_not_called()
-
- @pytest.mark.asyncio
- async def test_handle_text_audio_enabled(self, mock_update_text, mock_context):
- """Test text handler calls TTS when audio enabled"""
- bot.user_sessions = {}
- bot.user_rate_limits = {} # Reset rate limits
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- }}
-
- with patch('bot.call_claude', new_callable=AsyncMock) as mock_claude, \
- patch('bot.text_to_speech', new_callable=AsyncMock) as mock_tts, \
- patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- mock_claude.return_value = ("Response", "session-123", {})
- mock_tts.return_value = BytesIO(b"audio")
-
- await bot.handle_text(mock_update_text, mock_context)
-
- # TTS should be called
- mock_tts.assert_called_once()
-
-
-class TestVoiceSpeedSetting:
- """Test voice speed setting"""
-
- @pytest.mark.asyncio
- async def test_tts_uses_custom_speed(self):
- """TTS should use provided speed parameter"""
- with patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_convert:
- mock_convert.return_value = iter([b'fake_audio_data'])
-
- await bot.text_to_speech("test text", speed=0.9)
-
- call_kwargs = mock_convert.call_args[1]
- assert call_kwargs['voice_settings']['speed'] == 0.9
-
- @pytest.mark.asyncio
- async def test_tts_uses_default_speed_when_none(self):
- """TTS should use default speed when not provided"""
- with patch.object(bot.elevenlabs.text_to_speech, 'convert') as mock_convert:
- mock_convert.return_value = iter([b'fake_audio_data'])
-
- await bot.text_to_speech("test text")
-
- call_kwargs = mock_convert.call_args[1]
- assert call_kwargs['voice_settings']['speed'] == bot.VOICE_SETTINGS['speed']
-
-
-class TestClaudeCallWithUserSettings:
- """Test Claude call with user settings"""
-
- @pytest.mark.asyncio
- async def test_call_claude_uses_dynamic_prompt(self):
- """Claude call should use dynamic prompt with user settings"""
- user_settings = {
- "audio_enabled": False,
- "voice_speed": 1.0
- }
-
- with patch('subprocess.run') as mock_run, \
- patch('bot.build_dynamic_prompt') as mock_build:
- mock_run.return_value = Mock(
- returncode=0,
- stdout=json.dumps({"result": "test", "session_id": "abc123"})
- )
- mock_build.return_value = "dynamic prompt content"
-
- await bot.call_claude("test", user_settings=user_settings)
-
- mock_build.assert_called_once_with(user_settings)
-
-
-class TestChatIDAuthentication:
- """Test chat ID authentication security"""
-
- @pytest.fixture
- def mock_update_authorized(self):
- """Create mock update from authorized chat"""
- update = AsyncMock()
- update.effective_user.id = 12345
- update.effective_user.is_bot = False
- update.effective_chat.id = 12345 # Matches test ALLOWED_CHAT_ID
- update.message.reply_text = AsyncMock()
- update.message.message_thread_id = None
- return update
-
- @pytest.fixture
- def mock_update_unauthorized(self):
- """Create mock update from unauthorized chat"""
- update = AsyncMock()
- update.effective_user.id = 99999
- update.effective_user.is_bot = False
- update.effective_chat.id = 99999 # Does NOT match test ALLOWED_CHAT_ID
- update.message.reply_text = AsyncMock()
- update.message.message_thread_id = None
- return update
-
- @pytest.fixture
- def mock_context(self):
- return Mock()
-
- @pytest.mark.asyncio
- async def test_cmd_start_rejects_unauthorized_chat(self, mock_update_unauthorized, mock_context):
- """Test /start rejects unauthorized chat ID"""
- with patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- await bot.cmd_start(mock_update_unauthorized, mock_context)
- # Should NOT send a reply
- mock_update_unauthorized.message.reply_text.assert_not_called()
-
- @pytest.mark.asyncio
- async def test_cmd_start_accepts_authorized_chat(self, mock_update_authorized, mock_context):
- """Test /start accepts authorized chat ID"""
- with patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- await bot.cmd_start(mock_update_authorized, mock_context)
- # Should send a reply
- mock_update_authorized.message.reply_text.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_cmd_start_accepts_all_when_zero(self, mock_update_unauthorized, mock_context):
- """Test /start accepts all when ALLOWED_CHAT_ID is 0"""
- with patch.object(bot, 'ALLOWED_CHAT_ID', 0):
- await bot.cmd_start(mock_update_unauthorized, mock_context)
- # Should send a reply even though chat doesn't match
- mock_update_unauthorized.message.reply_text.assert_called_once()
-
- @pytest.mark.asyncio
- async def test_handle_voice_rejects_unauthorized_chat(self):
- """Test voice handler rejects unauthorized chat ID"""
- update = AsyncMock()
- update.effective_user.id = 99999
- update.effective_user.is_bot = False
- update.effective_chat.id = 99999 # Unauthorized
- update.message.reply_text = AsyncMock(return_value=AsyncMock())
- update.message.voice.get_file = AsyncMock()
- update.message.message_thread_id = None
-
- bot.user_sessions = {}
-
- with patch.object(bot, 'ALLOWED_CHAT_ID', 12345):
- await bot.handle_voice(update, Mock())
-
- # Should NOT start processing
- update.message.reply_text.assert_not_called()
- update.message.voice.get_file.assert_not_called()
-
- @pytest.mark.asyncio
- async def test_handle_text_rejects_unauthorized_chat(self):
- """Test text handler rejects unauthorized chat ID"""
- update = AsyncMock()
- update.effective_user.id = 99999
- update.effective_user.is_bot = False
- update.effective_chat.id = 99999 # Unauthorized
- update.message.text = "test message"
- update.message.reply_text = AsyncMock()
- update.message.message_thread_id = None
-
- bot.user_sessions = {}
-
- with patch.object(bot, 'ALLOWED_CHAT_ID', 12345), \
- patch('bot.call_claude', new_callable=AsyncMock) as mock_claude:
-
- await bot.handle_text(update, Mock())
-
- # Should NOT call Claude
- mock_claude.assert_not_called()
- update.message.reply_text.assert_not_called()
-
-
-class TestApprovalUserValidation:
- """Test approval callback user validation"""
-
- @pytest.mark.asyncio
- async def test_approval_callback_rejects_different_user(self):
- """Test approval callback rejects different user"""
- bot.pending_approvals = {}
-
- import asyncio
- approval_id = "test789"
- event = asyncio.Event()
- bot.pending_approvals[approval_id] = {
- "user_id": 12345, # Original requester
- "event": event,
- "approved": None,
- "tool_name": "Read",
- "input": {"path": "/test"},
- }
-
- query = AsyncMock()
- query.data = f"approve_{approval_id}"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 99999 # Different user
-
- context = Mock()
-
- await bot.handle_approval_callback(update, context)
-
- # Should answer with rejection message
- query.answer.assert_called_with("Only the requester can approve this")
- # Event should NOT be set
- assert not event.is_set()
-
- @pytest.mark.asyncio
- async def test_approval_callback_accepts_same_user(self):
- """Test approval callback accepts same user"""
- bot.pending_approvals = {}
-
- import asyncio
- approval_id = "test790"
- event = asyncio.Event()
- bot.pending_approvals[approval_id] = {
- "user_id": 12345, # Original requester
- "event": event,
- "approved": None,
- "tool_name": "Read",
- "input": {"path": "/test"},
- }
-
- query = AsyncMock()
- query.data = f"approve_{approval_id}"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345 # Same user
-
- context = Mock()
-
- await bot.handle_approval_callback(update, context)
-
- # Should process approval
- assert event.is_set()
- assert bot.pending_approvals[approval_id]["approved"] == True
-
-
-class TestSpeedValidation:
- """Test speed callback input validation"""
-
- @pytest.mark.asyncio
- async def test_settings_callback_rejects_invalid_speed_float(self):
- """Test speed callback rejects non-float values"""
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- }}
-
- query = AsyncMock()
- query.data = "setting_speed_not_a_number"
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- await bot.handle_settings_callback(update, context)
-
- # Should answer with error
- query.answer.assert_called_with("Invalid speed value")
- # Speed should NOT change
- assert bot.user_settings["12345"]["voice_speed"] == 1.1
-
- @pytest.mark.asyncio
- async def test_settings_callback_rejects_out_of_range_speed_low(self):
- """Test speed callback rejects speed below 0.7"""
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- }}
-
- query = AsyncMock()
- query.data = "setting_speed_0.5" # Too low
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- await bot.handle_settings_callback(update, context)
-
- # Should answer with error
- query.answer.assert_called_with("Invalid speed range")
- # Speed should NOT change
- assert bot.user_settings["12345"]["voice_speed"] == 1.1
-
- @pytest.mark.asyncio
- async def test_settings_callback_rejects_out_of_range_speed_high(self):
- """Test speed callback rejects speed above 1.2"""
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- }}
-
- query = AsyncMock()
- query.data = "setting_speed_1.5" # Too high
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- await bot.handle_settings_callback(update, context)
-
- # Should answer with error
- query.answer.assert_called_with("Invalid speed range")
- # Speed should NOT change
- assert bot.user_settings["12345"]["voice_speed"] == 1.1
-
- @pytest.mark.asyncio
- async def test_settings_callback_accepts_valid_speed(self):
- """Test speed callback accepts valid speed in range"""
- bot.user_settings = {"12345": {
- "audio_enabled": True,
- "voice_speed": 1.1,
- }}
-
- query = AsyncMock()
- query.data = "setting_speed_0.9" # Valid
- query.answer = AsyncMock()
- query.edit_message_text = AsyncMock()
-
- update = AsyncMock()
- update.callback_query = query
- update.effective_user.id = 12345
-
- context = Mock()
-
- with patch('bot.save_settings'):
- await bot.handle_settings_callback(update, context)
-
- # Speed should change
- assert bot.user_settings["12345"]["voice_speed"] == 0.9
-
-
-class TestLogLevel:
- """Test configurable log level"""
-
- def test_log_level_from_env(self):
- """Test log level is configurable via env"""
- # Check that LOG_LEVEL variable exists
- assert hasattr(bot, 'LOG_LEVEL')
-
- def test_log_level_defaults_to_info(self):
- """Test log level defaults to INFO when not set"""
- # In tests, LOG_LEVEL is not set, so should default to INFO
- # (or DEBUG if set in test env)
- assert bot.LOG_LEVEL in ["INFO", "DEBUG", "WARNING", "ERROR", "CRITICAL"]
-
-
-# Run pytest with coverage
-if __name__ == "__main__":
- pytest.main([__file__, "-v", "--tb=short"])
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/test_automations.py b/tests/test_automations.py
new file mode 100644
index 0000000..e0bb667
--- /dev/null
+++ b/tests/test_automations.py
@@ -0,0 +1,117 @@
+import sys, os
+sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
+
+import asyncio
+import json
+import subprocess
+from unittest.mock import patch, MagicMock
+
+from bot import cron_to_human, run_remote_trigger_list, run_remote_trigger_run, run_remote_trigger_toggle, build_automations_list, build_automation_card
+
+def _run(coro):
+ return asyncio.run(coro)
+
+def test_daily():
+ assert cron_to_human("0 7 * * *") == "Codziennie 07:00"
+
+def test_weekdays():
+ assert cron_to_human("0 9 * * 1-5") == "Pn-Pt 09:00"
+
+def test_weekly_monday():
+ assert cron_to_human("0 10 * * 1") == "Pn 10:00"
+
+def test_hourly():
+ assert cron_to_human("0 * * * *") == "Co godzinę"
+
+def test_unknown_falls_back():
+ assert cron_to_human("*/15 * * * *") == "*/15 * * * *"
+
+def test_zero_padded():
+ assert cron_to_human("0 8 * * *") == "Codziennie 08:00"
+
+def test_list_returns_triggers():
+ mock_output = json.dumps({
+ "result": '[{"id":"trig_1","name":"Daily Standup","cron_expression":"0 7 * * *","enabled":true}]'
+ })
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=0, stdout=mock_output, stderr="")
+ triggers = _run(run_remote_trigger_list())
+ assert len(triggers) == 1
+ assert triggers[0]["name"] == "Daily Standup"
+ assert triggers[0]["enabled"] is True
+
+def test_list_returns_empty_on_error():
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=1, stdout="", stderr="error")
+ triggers = _run(run_remote_trigger_list())
+ assert triggers == []
+
+def test_run_trigger_returns_true_on_success():
+ mock_output = json.dumps({"result": "Trigger started successfully"})
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=0, stdout=mock_output, stderr="")
+ ok = _run(run_remote_trigger_run("trig_1"))
+ assert ok is True
+
+def test_run_trigger_returns_false_on_error():
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=1, stdout="", stderr="err")
+ ok = _run(run_remote_trigger_run("trig_1"))
+ assert ok is False
+
+def test_toggle_trigger():
+ mock_output = json.dumps({"result": "Updated"})
+ with patch("subprocess.run") as mock_run:
+ mock_run.return_value = MagicMock(returncode=0, stdout=mock_output, stderr="")
+ ok = _run(run_remote_trigger_toggle("trig_1", enable=False))
+ assert ok is True
+ # verify RemoteTrigger update was called with enabled=false in prompt
+ call_args = mock_run.call_args[0][0]
+ assert "RemoteTrigger" in " ".join(call_args)
+
+
+from telegram import InlineKeyboardMarkup
+
+SAMPLE_TRIGGERS = [
+ {"id": "trig_1", "name": "Daily Standup", "cron_expression": "0 8 * * *", "enabled": True},
+ {"id": "trig_2", "name": "Dep Audit", "cron_expression": "0 10 * * 1", "enabled": False},
+]
+
+def test_build_list_text():
+ text, markup = build_automations_list(SAMPLE_TRIGGERS)
+ assert "Daily Standup" in text
+ assert "Dep Audit" in text
+ assert isinstance(markup, InlineKeyboardMarkup)
+
+def test_build_list_empty():
+ text, markup = build_automations_list([])
+ assert "brak" in text.lower() or "automacj" in text.lower()
+ assert isinstance(markup, InlineKeyboardMarkup)
+
+def test_build_list_buttons_contain_ids():
+ _, markup = build_automations_list(SAMPLE_TRIGGERS)
+ all_data = [btn.callback_data for row in markup.inline_keyboard for btn in row if btn.callback_data]
+ assert any("trig_1" in d for d in all_data)
+ assert any("trig_2" in d for d in all_data)
+
+def test_build_card_full():
+ trigger = SAMPLE_TRIGGERS[0]
+ text, markup = build_automation_card(trigger, style="full")
+ assert "Daily Standup" in text
+ assert "08:00" in text
+ assert isinstance(markup, InlineKeyboardMarkup)
+ all_data = [btn.callback_data for row in markup.inline_keyboard for btn in row if btn.callback_data]
+ assert any("auto_run_trig_1" in d for d in all_data)
+ assert any("auto_list" in d for d in all_data)
+
+def test_build_card_compact():
+ trigger = SAMPLE_TRIGGERS[0]
+ text, markup = build_automation_card(trigger, style="compact")
+ assert "Daily Standup" in text
+ assert isinstance(markup, InlineKeyboardMarkup)
+
+def test_build_card_paused_shows_resume():
+ trigger = SAMPLE_TRIGGERS[1] # enabled=False
+ _, markup = build_automation_card(trigger, style="compact")
+ all_labels = [btn.text for row in markup.inline_keyboard for btn in row]
+ assert any("Resume" in l for l in all_labels)
diff --git a/tests/test_bot_logic.py b/tests/test_bot_logic.py
new file mode 100644
index 0000000..b451666
--- /dev/null
+++ b/tests/test_bot_logic.py
@@ -0,0 +1,713 @@
+"""
+Real tests for bot.py logic — no mocks, no API calls.
+Tests actual behavior: file I/O, rate limiting, auth logic.
+"""
+import asyncio
+import json
+import os
+import sys
+import time
+import pytest
+import tempfile
+from datetime import datetime
+from pathlib import Path
+from unittest.mock import patch
+
+# We need to set required env vars BEFORE importing bot
+# so it doesn't crash on missing TELEGRAM_BOT_TOKEN
+os.environ.setdefault("TELEGRAM_BOT_TOKEN", "test:token")
+os.environ.setdefault("TELEGRAM_DEFAULT_CHAT_ID", "0")
+
+# Add parent dir to path
+sys.path.insert(0, str(Path(__file__).parent.parent))
+
+import bot
+
+
+# ─────────────────────────────────────────────
+# TestResolveProvider
+# ─────────────────────────────────────────────
+
+class TestResolveProvider:
+ def setup_method(self):
+ # Clean env before each test
+ for var in ("TTS_PROVIDER", "STT_PROVIDER", "ELEVENLABS_API_KEY", "OPENAI_API_KEY"):
+ os.environ.pop(var, None)
+
+ def test_explicit_elevenlabs(self):
+ os.environ["TTS_PROVIDER"] = "elevenlabs"
+ assert bot.resolve_provider("TTS_PROVIDER") == "elevenlabs"
+
+ def test_explicit_openai(self):
+ os.environ["TTS_PROVIDER"] = "openai"
+ assert bot.resolve_provider("TTS_PROVIDER") == "openai"
+
+ def test_explicit_invalid_ignored(self):
+ os.environ["TTS_PROVIDER"] = "invalid"
+ assert bot.resolve_provider("TTS_PROVIDER") == "none"
+
+ def test_fallback_elevenlabs(self):
+ os.environ["ELEVENLABS_API_KEY"] = "sk_test"
+ assert bot.resolve_provider("TTS_PROVIDER") == "elevenlabs"
+
+ def test_fallback_openai_when_no_elevenlabs(self):
+ os.environ["OPENAI_API_KEY"] = "sk-test"
+ assert bot.resolve_provider("TTS_PROVIDER") == "openai"
+
+ def test_elevenlabs_wins_over_openai(self):
+ os.environ["ELEVENLABS_API_KEY"] = "sk_test"
+ os.environ["OPENAI_API_KEY"] = "sk-test"
+ assert bot.resolve_provider("TTS_PROVIDER") == "elevenlabs"
+
+ def test_none_when_no_keys(self):
+ assert bot.resolve_provider("TTS_PROVIDER") == "none"
+
+ def teardown_method(self):
+ for var in ("TTS_PROVIDER", "STT_PROVIDER", "ELEVENLABS_API_KEY", "OPENAI_API_KEY"):
+ os.environ.pop(var, None)
+
+
+# ─────────────────────────────────────────────
+# TestLoadSaveState
+# ─────────────────────────────────────────────
+
+class TestLoadSaveState:
+ def setup_method(self):
+ self.tmp = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
+ self.tmp.close()
+ self.orig_state_file = bot.STATE_FILE
+ bot.STATE_FILE = Path(self.tmp.name)
+ bot.user_sessions = {}
+
+ def teardown_method(self):
+ bot.STATE_FILE = self.orig_state_file
+ Path(self.tmp.name).unlink(missing_ok=True)
+
+ def test_roundtrip(self):
+ bot.user_sessions = {"123": {"current_session": "abc", "sessions": ["abc"]}}
+ bot.save_state()
+ bot.user_sessions = {}
+ bot.load_state()
+ assert bot.user_sessions["123"]["current_session"] == "abc"
+
+ def test_corrupted_json(self):
+ Path(self.tmp.name).write_text("not valid json{{{{")
+ bot.load_state() # Must NOT raise
+ assert bot.user_sessions == {}
+
+ def test_missing_file(self):
+ Path(self.tmp.name).unlink()
+ bot.load_state() # Must NOT raise
+ assert bot.user_sessions == {}
+
+ def test_empty_file(self):
+ Path(self.tmp.name).write_text("")
+ bot.load_state() # Must NOT raise
+ assert bot.user_sessions == {}
+
+
+# ─────────────────────────────────────────────
+# TestLoadSaveSettings
+# ─────────────────────────────────────────────
+
+class TestLoadSaveSettings:
+ def setup_method(self):
+ self.tmp = tempfile.NamedTemporaryFile(suffix=".json", delete=False)
+ self.tmp.close()
+ self.orig_settings_file = bot.SETTINGS_FILE
+ bot.SETTINGS_FILE = Path(self.tmp.name)
+ bot.user_settings = {}
+
+ def teardown_method(self):
+ bot.SETTINGS_FILE = self.orig_settings_file
+ Path(self.tmp.name).unlink(missing_ok=True)
+
+ def test_corrupted_json(self):
+ Path(self.tmp.name).write_text("{bad json")
+ bot.load_settings() # Must NOT raise
+ assert bot.user_settings == {}
+
+ def test_roundtrip(self):
+ bot.user_settings = {"456": {"mode": "approve", "audio_enabled": False}}
+ bot.save_settings()
+ bot.user_settings = {}
+ bot.load_settings()
+ assert bot.user_settings["456"]["mode"] == "approve"
+
+
+# ─────────────────────────────────────────────
+# TestRateLimiter
+# ─────────────────────────────────────────────
+
+class TestRateLimiter:
+ def setup_method(self):
+ # Clear rate limit state
+ bot.rate_limits.clear()
+
+ def test_first_message_allowed(self):
+ allowed, msg = bot.check_rate_limit(999)
+ assert allowed is True
+
+ def test_cooldown_blocks_immediate_second(self):
+ bot.check_rate_limit(999)
+ allowed, msg = bot.check_rate_limit(999)
+ assert allowed is False
+ assert "wait" in msg.lower() or "second" in msg.lower() or "slow" in msg.lower()
+
+ def test_per_minute_cap(self):
+ user_id = 12345
+ # Simulate 10 messages spaced out to pass cooldown
+ # by manipulating rate_limits directly
+ bot.rate_limits[str(user_id)] = {
+ "last_message": time.time() - 10, # 10s ago — passes cooldown
+ "minute_start": time.time(),
+ "minute_count": 10, # Already at limit
+ }
+ allowed, msg = bot.check_rate_limit(user_id)
+ assert allowed is False
+ assert "10" in msg or "limit" in msg.lower() or "minute" in msg.lower()
+
+ def test_per_minute_resets_after_minute(self):
+ user_id = 77777
+ bot.rate_limits[str(user_id)] = {
+ "last_message": time.time() - 10,
+ "minute_start": time.time() - 65, # minute started 65s ago → resets
+ "minute_count": 10,
+ }
+ allowed, msg = bot.check_rate_limit(user_id)
+ assert allowed is True
+
+
+# ─────────────────────────────────────────────
+# TestCheckClaudeAuth
+# ─────────────────────────────────────────────
+
+class TestCheckClaudeAuth:
+ def setup_method(self):
+ for var in ("ANTHROPIC_API_KEY", "CLAUDE_CODE_OAUTH_TOKEN"):
+ os.environ.pop(var, None)
+
+ def test_api_key(self):
+ os.environ["ANTHROPIC_API_KEY"] = "sk-ant-test"
+ ok, method = bot.check_claude_auth()
+ assert ok is True
+ assert method == "api_key"
+
+ def test_saved_token(self):
+ os.environ["CLAUDE_CODE_OAUTH_TOKEN"] = "sk-ant-oat01-test"
+ ok, method = bot.check_claude_auth()
+ assert ok is True
+ assert method == "saved_token"
+
+ def test_api_key_takes_priority_over_token(self):
+ os.environ["ANTHROPIC_API_KEY"] = "sk-ant-key"
+ os.environ["CLAUDE_CODE_OAUTH_TOKEN"] = "sk-ant-oat01-token"
+ ok, method = bot.check_claude_auth()
+ assert method == "api_key"
+
+ def test_no_auth_returns_false(self):
+ ok, method = bot.check_claude_auth()
+ assert ok is False
+ assert method == "none"
+
+ def test_credentials_file_valid(self, tmp_path):
+ claude_dir = tmp_path / ".claude"
+ claude_dir.mkdir()
+ creds_file = claude_dir / ".credentials.json"
+ future_time = int(time.time() * 1000) + 3600000 # +1h
+ creds_file.write_text(json.dumps({
+ "claudeAiOauth": {
+ "accessToken": "test-token",
+ "expiresAt": future_time,
+ }
+ }))
+ with patch.object(bot.Path, "home", return_value=tmp_path):
+ ok, method = bot.check_claude_auth()
+ assert ok is True
+ assert method == "oauth"
+
+ def test_credentials_file_expired_with_refresh(self, tmp_path):
+ creds_file = tmp_path / ".credentials.json"
+ (tmp_path / ".claude").mkdir(exist_ok=True)
+ creds_file = tmp_path / ".claude" / ".credentials.json"
+ past_time = int(time.time() * 1000) - 3600000 # -1h (expired)
+ creds_file.write_text(json.dumps({
+ "claudeAiOauth": {
+ "accessToken": "old-token",
+ "expiresAt": past_time,
+ "refreshToken": "refresh-token",
+ }
+ }))
+ with patch.object(bot.Path, "home", return_value=tmp_path):
+ ok, method = bot.check_claude_auth()
+ assert ok is True # Has refresh token → SDK will refresh
+
+ def teardown_method(self):
+ for var in ("ANTHROPIC_API_KEY", "CLAUDE_CODE_OAUTH_TOKEN"):
+ os.environ.pop(var, None)
+
+
+# ─────────────────────────────────────────────
+# TestMaxVoiceChars
+# ─────────────────────────────────────────────
+
+class TestMaxVoiceChars:
+ def test_max_voice_chars_is_positive_int(self):
+ assert isinstance(bot.MAX_VOICE_CHARS, int)
+ assert bot.MAX_VOICE_CHARS > 0
+
+ def test_truncation_logic(self):
+ # Test the truncation logic directly (as used in handle_voice/handle_text)
+ max_chars = 100
+ long_response = "x" * 200
+ tts_text = long_response[:max_chars] if len(long_response) > max_chars else long_response
+ assert len(tts_text) == max_chars
+
+ def test_short_response_not_truncated(self):
+ max_chars = 100
+ short_response = "hello"
+ tts_text = short_response[:max_chars] if len(short_response) > max_chars else short_response
+ assert tts_text == "hello"
+
+
+# ─────────────────────────────────────────────
+# TestAdminUserIds
+# ─────────────────────────────────────────────
+
+class TestAdminUserIds:
+ def test_is_authorized_with_zero_chat_id(self):
+ """ALLOWED_CHAT_ID=0 means all chats allowed."""
+ orig = bot.ALLOWED_CHAT_ID
+ bot.ALLOWED_CHAT_ID = 0
+
+ class FakeUpdate:
+ class effective_chat:
+ id = 12345
+
+ assert bot._is_authorized(FakeUpdate()) is True
+ bot.ALLOWED_CHAT_ID = orig
+
+ def test_is_authorized_matching_chat_id(self):
+ orig = bot.ALLOWED_CHAT_ID
+ bot.ALLOWED_CHAT_ID = 99999
+
+ class FakeUpdate:
+ class effective_chat:
+ id = 99999
+
+ assert bot._is_authorized(FakeUpdate()) is True
+ bot.ALLOWED_CHAT_ID = orig
+
+ def test_is_authorized_wrong_chat_id(self):
+ orig = bot.ALLOWED_CHAT_ID
+ bot.ALLOWED_CHAT_ID = 99999
+
+ class FakeUpdate:
+ class effective_chat:
+ id = 11111
+
+ assert bot._is_authorized(FakeUpdate()) is False
+ bot.ALLOWED_CHAT_ID = orig
+
+ def test_is_admin_empty_admin_ids_allows_authorized(self):
+ """No ADMIN_USER_IDS configured → anyone in authorized chat is admin."""
+ orig_chat = bot.ALLOWED_CHAT_ID
+ orig_admin = bot.ADMIN_USER_IDS
+ bot.ALLOWED_CHAT_ID = 0
+ bot.ADMIN_USER_IDS = set()
+
+ class FakeUpdate:
+ class effective_chat:
+ id = 12345
+ class effective_user:
+ id = 99999
+
+ assert bot._is_admin(FakeUpdate()) is True
+ bot.ALLOWED_CHAT_ID = orig_chat
+ bot.ADMIN_USER_IDS = orig_admin
+
+ def test_is_admin_with_admin_ids_matching(self):
+ orig_chat = bot.ALLOWED_CHAT_ID
+ orig_admin = bot.ADMIN_USER_IDS
+ bot.ALLOWED_CHAT_ID = 0
+ bot.ADMIN_USER_IDS = {111, 222}
+
+ class FakeUpdate:
+ class effective_chat:
+ id = 12345
+ class effective_user:
+ id = 111
+
+ assert bot._is_admin(FakeUpdate()) is True
+ bot.ALLOWED_CHAT_ID = orig_chat
+ bot.ADMIN_USER_IDS = orig_admin
+
+ def test_is_admin_with_admin_ids_not_matching(self):
+ orig_chat = bot.ALLOWED_CHAT_ID
+ orig_admin = bot.ADMIN_USER_IDS
+ bot.ALLOWED_CHAT_ID = 0
+ bot.ADMIN_USER_IDS = {111, 222}
+
+ class FakeUpdate:
+ class effective_chat:
+ id = 12345
+ class effective_user:
+ id = 999 # not in admin list
+
+ assert bot._is_admin(FakeUpdate()) is False
+ bot.ALLOWED_CHAT_ID = orig_chat
+ bot.ADMIN_USER_IDS = orig_admin
+
+
+class TestSettingsJson:
+ """Validate settings.json if it exists."""
+
+ SETTINGS_PATH = Path(__file__).parent.parent / "settings.json"
+
+ def test_settings_example_valid_json(self):
+ """settings.example.json should always be valid JSON."""
+ example_path = Path(__file__).parent.parent / "settings.example.json"
+ content = example_path.read_text()
+ data = json.loads(content)
+ assert "permissions" in data or "mcpServers" in data
+
+ def test_settings_example_has_megg(self):
+ """settings.example.json should include MEGG MCP config."""
+ example_path = Path(__file__).parent.parent / "settings.example.json"
+ data = json.loads(example_path.read_text())
+ assert "mcpServers" in data
+ assert "megg" in data["mcpServers"]
+ megg = data["mcpServers"]["megg"]
+ assert "command" in megg
+ assert megg["command"] == "npx"
+
+ def test_settings_json_valid_if_exists(self):
+ """settings.json should be valid JSON if it exists."""
+ if not self.SETTINGS_PATH.exists():
+ pytest.skip("settings.json not present (expected in local dev only)")
+ content = self.SETTINGS_PATH.read_text()
+ data = json.loads(content)
+ assert isinstance(data, dict)
+
+ def test_settings_json_has_permissions_if_exists(self):
+ """settings.json should contain permissions block if it exists."""
+ if not self.SETTINGS_PATH.exists():
+ pytest.skip("settings.json not present (expected in local dev only)")
+ data = json.loads(self.SETTINGS_PATH.read_text())
+ assert "permissions" in data, "settings.json missing permissions block"
+
+
+class TestPhotoHandler:
+ """Test photo handler logic."""
+
+ def test_photo_prompt_with_caption(self):
+ """Photo with caption produces correct prompt."""
+ caption = "What's in this image?"
+ path = Path("/sandbox/photo_20260405_120000.jpg")
+
+ # Replicate the prompt building logic from handle_photo
+ if caption:
+ prompt = f"I sent you a photo. It's saved at: {path}\n\nMy message: {caption}"
+ else:
+ prompt = f"I sent you a photo. It's saved at: {path}\n\nPlease look at it and describe what you see, or help me with whatever is shown."
+
+ assert str(path) in prompt
+ assert caption in prompt
+
+ def test_photo_prompt_without_caption(self):
+ """Photo without caption produces fallback prompt."""
+ caption = ""
+ path = Path("/sandbox/photo_20260405_120000.jpg")
+
+ if caption:
+ prompt = f"I sent you a photo. It's saved at: {path}\n\nMy message: {caption}"
+ else:
+ prompt = f"I sent you a photo. It's saved at: {path}\n\nPlease look at it and describe what you see, or help me with whatever is shown."
+
+ assert str(path) in prompt
+ assert "describe" in prompt
+
+ def test_photo_filename_format(self):
+ """Photo filename includes timestamp."""
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ filename = f"photo_{timestamp}.jpg"
+ assert filename.startswith("photo_")
+ assert filename.endswith(".jpg")
+ assert len(filename) == len("photo_20260405_120000.jpg")
+
+
+class TestCancellation:
+ """Test cancellation event logic."""
+
+ def test_cancel_event_initial_state(self):
+ """A new asyncio.Event is not set."""
+ event = asyncio.Event()
+ assert not event.is_set()
+
+ def test_cancel_event_set_and_check(self):
+ """Setting an event makes is_set() return True."""
+ event = asyncio.Event()
+ event.set()
+ assert event.is_set()
+
+ def test_cancel_event_clear_resets(self):
+ """Clearing an event makes is_set() return False again."""
+ event = asyncio.Event()
+ event.set()
+ event.clear()
+ assert not event.is_set()
+
+ def test_cancel_events_dict_per_user(self):
+ """cancel_events dict tracks separate events per user_id."""
+ cancel_events = {}
+
+ # Simulate start of call_claude for user 123
+ user_id = 123
+ if user_id not in cancel_events:
+ cancel_events[user_id] = asyncio.Event()
+ cancel_events[user_id].clear()
+
+ assert not cancel_events[123].is_set()
+
+ # Simulate /cancel
+ cancel_events[123].set()
+ assert cancel_events[123].is_set()
+
+ # Different user not affected
+ assert 456 not in cancel_events
+
+ def test_cancel_no_effect_on_already_cancelled(self):
+ """Setting an already-set event is idempotent."""
+ event = asyncio.Event()
+ event.set()
+ event.set() # Should not raise
+ assert event.is_set()
+
+
+class TestCompact:
+ """Test /compact session logic."""
+
+ def test_compact_summary_stored_in_state(self):
+ """compact_summary key gets saved to user state dict."""
+ state = {"current_session": "sess_abc", "sessions": ["sess_abc"]}
+ summary = "We discussed X, decided Y, working on Z."
+
+ # Simulate what cmd_compact does after getting summary
+ state["compact_summary"] = summary
+ state["current_session"] = None
+
+ assert state["compact_summary"] == summary
+ assert state["current_session"] is None
+
+ def test_compact_summary_prepended_to_next_message(self):
+ """compact_summary is injected into the next message prompt."""
+ state = {
+ "current_session": None,
+ "sessions": [],
+ "compact_summary": "Previous: discussed auth system.",
+ }
+ user_text = "Continue with the login form."
+
+ # Simulate what handlers do
+ compact_summary = state.pop("compact_summary", None)
+ if compact_summary:
+ text = f"\n{compact_summary}\n\n\n{user_text}"
+ else:
+ text = user_text
+
+ assert "" in text
+ assert "Previous: discussed auth system." in text
+ assert "Continue with the login form." in text
+
+ def test_compact_summary_cleared_after_use(self):
+ """compact_summary is removed from state after being injected."""
+ state = {"compact_summary": "some summary", "current_session": None, "sessions": []}
+
+ compact_summary = state.pop("compact_summary", None)
+ assert compact_summary == "some summary"
+ assert "compact_summary" not in state
+
+ def test_compact_no_summary_no_injection(self):
+ """Without compact_summary, text is unchanged."""
+ state = {"current_session": None, "sessions": []}
+ user_text = "Hello"
+
+ compact_summary = state.pop("compact_summary", None)
+ if compact_summary:
+ text = f"\n{compact_summary}\n\n\n{user_text}"
+ else:
+ text = user_text
+
+ assert text == user_text
+
+ def test_compact_requires_active_session(self):
+ """compact should not proceed if no active session."""
+ state = {"current_session": None, "sessions": []}
+
+ # Simulate the guard in cmd_compact
+ has_session = bool(state.get("current_session"))
+ assert not has_session
+
+
+# ─────────────────────────────────────────────
+# TestMcpStatus
+# ─────────────────────────────────────────────
+
+class TestMcpStatus:
+ """Test MCP status helper function."""
+
+ def test_no_settings_file_configured(self, tmp_path):
+ """Empty CLAUDE_SETTINGS_FILE returns informative message."""
+ settings_file = ""
+ if not settings_file:
+ result = ["MCP: CLAUDE_SETTINGS_FILE not configured"]
+ assert result == ["MCP: CLAUDE_SETTINGS_FILE not configured"]
+
+ def test_settings_file_not_found(self, tmp_path):
+ """Nonexistent file returns not-found message."""
+ fake_path = str(tmp_path / "missing_settings.json")
+
+ from pathlib import Path
+ settings_path = Path(fake_path)
+ if not settings_path.is_absolute():
+ settings_path = Path(".") / fake_path
+
+ if not settings_path.exists():
+ result = [f"MCP config: settings file not found ({fake_path})"]
+
+ assert "not found" in result[0]
+
+ def test_settings_with_valid_mcp_command(self, tmp_path):
+ """Settings with npx (available) returns OK status."""
+ import shutil, json
+ settings = {
+ "mcpServers": {
+ "megg": {"command": "npx", "args": ["-y", "megg@latest"]}
+ }
+ }
+ settings_file = tmp_path / "settings.json"
+ settings_file.write_text(json.dumps(settings))
+
+ # Parse logic
+ data = json.loads(settings_file.read_text())
+ mcp_servers = data.get("mcpServers", {})
+ lines = ["MCP Servers:"]
+ for name, config in mcp_servers.items():
+ cmd = config.get("command", "")
+ if cmd and shutil.which(cmd):
+ lines.append(f" {name}: OK ({cmd})")
+ elif cmd:
+ lines.append(f" {name}: MISSING ({cmd} not found in PATH)")
+
+ # npx should be available in the test environment
+ if shutil.which("npx"):
+ assert any("OK" in line for line in lines)
+ else:
+ assert any("MISSING" in line for line in lines)
+
+ def test_settings_with_missing_command(self, tmp_path):
+ """Settings with unavailable command returns MISSING status."""
+ import shutil, json
+ settings = {
+ "mcpServers": {
+ "fake-tool": {"command": "this-binary-does-not-exist-xyz", "args": []}
+ }
+ }
+ settings_file = tmp_path / "settings.json"
+ settings_file.write_text(json.dumps(settings))
+
+ data = json.loads(settings_file.read_text())
+ mcp_servers = data.get("mcpServers", {})
+ lines = ["MCP Servers:"]
+ for name, config in mcp_servers.items():
+ cmd = config.get("command", "")
+ if cmd and shutil.which(cmd):
+ lines.append(f" {name}: OK ({cmd})")
+ elif cmd:
+ lines.append(f" {name}: MISSING ({cmd} not found in PATH)")
+
+ assert any("MISSING" in line for line in lines)
+
+ def test_settings_empty_mcp_servers(self, tmp_path):
+ """Settings with empty mcpServers returns 'none configured'."""
+ import json
+ settings = {"permissions": {"allow": []}, "mcpServers": {}}
+ settings_file = tmp_path / "settings.json"
+ settings_file.write_text(json.dumps(settings))
+
+ data = json.loads(settings_file.read_text())
+ mcp_servers = data.get("mcpServers", {})
+ if not mcp_servers:
+ result = ["MCP Servers: none configured"]
+
+ assert result == ["MCP Servers: none configured"]
+
+ def test_corrupted_settings_file(self, tmp_path):
+ """Corrupted settings.json returns error message."""
+ settings_file = tmp_path / "settings.json"
+ settings_file.write_text("{invalid json{{")
+
+ try:
+ import json
+ json.loads(settings_file.read_text())
+ result = []
+ except json.JSONDecodeError as e:
+ result = [f"MCP config: ERROR reading settings - {e}"]
+
+ assert len(result) == 1
+ assert "ERROR" in result[0]
+
+ def test_get_mcp_status_no_settings(self):
+ """bot.get_mcp_status with empty string returns not-configured message."""
+ result = bot.get_mcp_status("")
+ assert result == ["MCP: CLAUDE_SETTINGS_FILE not configured"]
+
+ def test_get_mcp_status_missing_file(self, tmp_path):
+ """bot.get_mcp_status with nonexistent path returns not-found message."""
+ fake = str(tmp_path / "nosuchfile.json")
+ result = bot.get_mcp_status(fake)
+ assert len(result) == 1
+ assert "not found" in result[0]
+
+ def test_get_mcp_status_valid_settings(self, tmp_path):
+ """bot.get_mcp_status with real settings file returns structured lines."""
+ import shutil as _shutil
+ settings = {
+ "mcpServers": {
+ "fake-xyz": {"command": "this-binary-does-not-exist-xyz", "args": []},
+ }
+ }
+ settings_file = tmp_path / "settings.json"
+ settings_file.write_text(json.dumps(settings))
+
+ result = bot.get_mcp_status(str(settings_file))
+ assert result[0] == "MCP Servers:"
+ assert any("fake-xyz" in line for line in result)
+ assert any("MISSING" in line for line in result)
+
+ def test_get_mcp_status_corrupted_json(self, tmp_path):
+ """bot.get_mcp_status with bad JSON returns error line."""
+ settings_file = tmp_path / "settings.json"
+ settings_file.write_text("{not valid json{{")
+
+ result = bot.get_mcp_status(str(settings_file))
+ assert len(result) == 1
+ assert "ERROR" in result[0]
+
+ def test_get_mcp_status_empty_servers(self, tmp_path):
+ """bot.get_mcp_status with empty mcpServers returns 'none configured'."""
+ settings_file = tmp_path / "settings.json"
+ settings_file.write_text(json.dumps({"mcpServers": {}}))
+
+ result = bot.get_mcp_status(str(settings_file))
+ assert result == ["MCP Servers: none configured"]
+
+ def test_get_mcp_status_no_command_field(self, tmp_path):
+ """MCP server entry with no command field returns misconfigured."""
+ settings_file = tmp_path / "settings.json"
+ settings_file.write_text(json.dumps({
+ "mcpServers": {"broken": {"args": []}}
+ }))
+
+ result = bot.get_mcp_status(str(settings_file))
+ assert any("misconfigured" in line for line in result)