Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ This project comes packed with features designed for a robust and intelligent ag
- [Contributing](#contributing)
- [License](#license)
- [References](#references)
- [LM Studio (Local) Usage](#lm-studio-local-usage)

## Quick Start

Expand Down Expand Up @@ -247,3 +248,33 @@ This project is licensed under the [MIT License](LICENSE).
---

**⭐ If this project helps you, please give it a Star!**
## LM Studio (Local) Usage

You can run Mini Agent against an OpenAI-compatible local server like LM Studio.

Steps:
- In LM Studio, start the local server (default base URL: `http://localhost:1234/v1`) and load your model.
- Update your config to use the OpenAI-compatible provider.

Example config (same file as above):

```yaml
provider: "openai-compatible"
api_key: "lm-studio" # LM Studio accepts any non-empty key
api_base: "http://localhost:1234/v1" # LM Studio local server default
model: "YOUR_LOADED_MODEL_NAME" # Must match the loaded model in LM Studio
```

Notes:
- Tool/function calling is supported via the OpenAI tools schema.
- Reasoning/Thinking with LM Studio:
- If you enable LM Studio → Settings → Developer → "When applicable, separate reasoning_content and content in API responses",
Mini Agent will display the model’s reasoning in the "🧠 Thinking" panel.
- If the model emits thoughts as `<think>...</think>` inside normal content, Mini Agent will extract and move those
to the "🧠 Thinking" panel automatically and show only the visible answer under "🤖 Assistant".

Interleaved thinking (MiniMax vs LM Studio)
- MiniMax M2 (Anthropic-compatible): returns structured content blocks, including `{"type":"thinking","thinking":"..."}` and
tool use blocks. Mini Agent preserves and resubmits these blocks to support true interleaved thinking across steps.
- LM Studio (OpenAI-compatible): returns standard Chat Completions. Reasoning may be provided via `reasoning_content` (when the
setting is enabled) or inline `<think>...</think>` tags. Mini Agent supports both and renders them in the "🧠 Thinking" panel.
40 changes: 34 additions & 6 deletions mini_agent/agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Core Agent implementation."""

import json
import re
import unicodedata
from pathlib import Path

import tiktoken
Expand Down Expand Up @@ -267,12 +269,38 @@ async def run(self) -> str:
# Check and summarize message history to prevent context overflow
await self._summarize_messages()

# Step header
print(f"\n{Colors.DIM}╭{'─' * 58}╮{Colors.RESET}")
print(
f"{Colors.DIM}│{Colors.RESET} {Colors.BOLD}{Colors.BRIGHT_CYAN}💭 Step {step + 1}/{self.max_steps}{Colors.RESET}{' ' * (49 - len(f'Step {step + 1}/{self.max_steps}'))}{Colors.DIM}│{Colors.RESET}"
)
print(f"{Colors.DIM}╰{'─' * 58}╯{Colors.RESET}")
# Step header (ANSI/emoji aware padding)
INNER_WIDTH = 58

ansi_re = re.compile(r"\x1b\[[0-9;]*m")

def col_width(s: str) -> int:
s = ansi_re.sub("", s)
total = 0
for ch in s:
if unicodedata.combining(ch):
continue
code = ord(ch)
if 0x1F300 <= code <= 0x1FAFF:
total += 2
continue
eaw = unicodedata.east_asian_width(ch)
total += 2 if eaw in ("W", "F") else 1
return total

def box_line(text: str):
max_content = INNER_WIDTH - 1
# Ellipsize overly long
if col_width(text) > max_content:
plain = ansi_re.sub("", text)
text = plain[: max_content - 1] + "…" if max_content > 1 else plain[:max_content]
pad = max(0, INNER_WIDTH - 1 - col_width(text))
print(f"{Colors.DIM}│{Colors.RESET} {text}{' ' * pad}{Colors.DIM}│{Colors.RESET}")

print(f"\n{Colors.DIM}╭{'─' * INNER_WIDTH}╮{Colors.RESET}")
header = f"{Colors.BOLD}{Colors.BRIGHT_CYAN}💭 Step {step + 1}/{self.max_steps}{Colors.RESET}"
box_line(header)
print(f"{Colors.DIM}╰{'─' * INNER_WIDTH}╯{Colors.RESET}")

# Get tool schemas
tool_schemas = [tool.to_schema() for tool in self.tools.values()]
Expand Down
102 changes: 74 additions & 28 deletions mini_agent/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@

import argparse
import asyncio
import re
import unicodedata
from datetime import datetime
from pathlib import Path
from typing import List
Expand Down Expand Up @@ -68,14 +70,38 @@ class Colors:
BG_BLUE = "\033[44m"


ansi_re = re.compile(r"\x1b\[[0-9;]*m")


def _col_width(s: str) -> int:
"""Approximate terminal column width (ANSI-stripped, emoji/EAW-aware)."""
s = ansi_re.sub("", s)
total = 0
for ch in s:
if unicodedata.combining(ch):
continue
code = ord(ch)
# Treat most emoji as width 2
if 0x1F300 <= code <= 0x1FAFF:
total += 2
continue
eaw = unicodedata.east_asian_width(ch)
total += 2 if eaw in ("W", "F") else 1
return total


def print_banner():
"""Print welcome banner"""
"""Print welcome banner with robust centering."""
INNER_WIDTH = 58
print()
print(f"{Colors.BOLD}{Colors.BRIGHT_CYAN}╔{'═' * 58}╗{Colors.RESET}")
print(
f"{Colors.BOLD}{Colors.BRIGHT_CYAN}║{Colors.RESET} {Colors.BOLD}🤖 Mini Agent - Multi-turn Interactive Session{Colors.RESET} {Colors.BOLD}{Colors.BRIGHT_CYAN}║{Colors.RESET}"
)
print(f"{Colors.BOLD}{Colors.BRIGHT_CYAN}╚{'═' * 58}╝{Colors.RESET}")
print(f"{Colors.BOLD}{Colors.BRIGHT_CYAN}╔{'═' * INNER_WIDTH}╗{Colors.RESET}")
text = f"{Colors.BOLD}🤖 Mini Agent - Multi-turn Interactive Session{Colors.RESET}"
w = _col_width(text)
# Two spaces of left margin inside the box looks nice
left = max(0, (INNER_WIDTH - w) // 2)
right = max(0, INNER_WIDTH - w - left)
print(f"{Colors.BOLD}{Colors.BRIGHT_CYAN}║{Colors.RESET}{' ' * left}{text}{' ' * right}{Colors.BOLD}{Colors.BRIGHT_CYAN}║{Colors.RESET}")
print(f"{Colors.BOLD}{Colors.BRIGHT_CYAN}╚{'═' * INNER_WIDTH}╝{Colors.RESET}")
print()


Expand Down Expand Up @@ -108,29 +134,48 @@ def print_help():


def print_session_info(agent: Agent, workspace_dir: Path, model: str):
"""Print session information"""
print(f"{Colors.DIM}┌{'─' * 58}┐{Colors.RESET}")
print(
f"{Colors.DIM}│{Colors.RESET} {Colors.BRIGHT_CYAN}Session Info{Colors.RESET} {Colors.DIM}│{Colors.RESET}"
)
print(f"{Colors.DIM}├{'─' * 58}┤{Colors.RESET}")
print(f"{Colors.DIM}│{Colors.RESET} Model: {model}{' ' * max(0, 49 - len(str(model)))} {Colors.DIM}│{Colors.RESET}")
print(
f"{Colors.DIM}│{Colors.RESET} Workspace: {workspace_dir}{' ' * max(0, 45 - len(str(workspace_dir)))} {Colors.DIM}│{Colors.RESET}"
)
msg_text = f"{len(agent.messages)} messages"
print(
f"{Colors.DIM}│{Colors.RESET} Message History: {msg_text}{' ' * max(0, 38 - len(msg_text))} {Colors.DIM}│{Colors.RESET}"
)
tools_text = f"{len(agent.tools)} tools"
print(
f"{Colors.DIM}│{Colors.RESET} Available Tools: {tools_text}{' ' * max(0, 41 - len(tools_text))} {Colors.DIM}│{Colors.RESET}"
)
print(f"{Colors.DIM}└{'─' * 58}┘{Colors.RESET}")
"""Print session information with robust padding.

Uses a fixed inner width and computes padding based on visible length
(ANSI color codes are ignored for width calculation).
"""

INNER_WIDTH = 58

def box_line(text: str):
# Fit text into INNER_WIDTH with a leading single space and trailing padding
max_content = INNER_WIDTH - 1 # account for the single leading space
# Ellipsize if too long (no color inside these labels by default)
if _col_width(text) > max_content:
plain = ansi_re.sub("", text)
# Leave 1 char for ellipsis
if max_content > 1:
text = plain[: max_content - 1] + "…"
else:
text = plain[: max_content]
pad = max(0, INNER_WIDTH - 1 - _col_width(text))
print(f"{Colors.DIM}│{Colors.RESET} {text}{' ' * pad}{Colors.DIM}│{Colors.RESET}")

# Top border
print(f"{Colors.DIM}┌{'─' * INNER_WIDTH}┐{Colors.RESET}")
# Header centered
header = f"{Colors.BRIGHT_CYAN}Session Info{Colors.RESET}"
# Center by adding left padding inside text
free_space = INNER_WIDTH - _col_width(header)
left_pad = max(0, (free_space - 1) // 2) # subtract 1 for the fixed leading space
box_line(" " * left_pad + header)
# Divider
print(f"{Colors.DIM}├{'─' * INNER_WIDTH}┤{Colors.RESET}")

box_line(f"Model: {model}")
box_line(f"Workspace: {workspace_dir}")
box_line(f"Message History: {len(agent.messages)} messages")
box_line(f"Available Tools: {len(agent.tools)} tools")

# Bottom border and helper hint
print(f"{Colors.DIM}└{'─' * INNER_WIDTH}┘{Colors.RESET}")
print()
print(
f"{Colors.DIM}Type {Colors.BRIGHT_GREEN}/help{Colors.DIM} for help, {Colors.BRIGHT_GREEN}/exit{Colors.DIM} to quit{Colors.RESET}"
)
print(f"{Colors.DIM}Type {Colors.BRIGHT_GREEN}/help{Colors.DIM} for help, {Colors.BRIGHT_GREEN}/exit{Colors.DIM} to quit{Colors.RESET}")
print()


Expand Down Expand Up @@ -376,6 +421,7 @@ def on_retry(exception: Exception, attempt: int):
api_key=config.llm.api_key,
api_base=config.llm.api_base,
model=config.llm.model,
provider=getattr(config.llm, "provider", "anthropic"),
retry_config=retry_config if config.llm.retry.enabled else None,
)

Expand Down
5 changes: 5 additions & 0 deletions mini_agent/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ class LLMConfig(BaseModel):
api_key: str
api_base: str = "https://api.minimax.io/anthropic"
model: str = "MiniMax-M2"
# Provider for the API. Supported values:
# - "anthropic" (default, MiniMax Anthropic-compatible endpoint)
# - "openai-compatible" (e.g., LM Studio local server or any OpenAI-compatible API)
provider: str = "anthropic"
retry: RetryConfig = Field(default_factory=RetryConfig)


Expand Down Expand Up @@ -106,6 +110,7 @@ def from_yaml(cls, config_path: str | Path) -> "Config":
api_key=data["api_key"],
api_base=data.get("api_base", "https://api.minimax.io/anthropic"),
model=data.get("model", "MiniMax-M2"),
provider=data.get("provider", "anthropic"),
retry=retry_config,
)

Expand Down
14 changes: 13 additions & 1 deletion mini_agent/config/config-example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,12 @@
# - All config files (config.yaml, mcp.json, system_prompt.md) are in the same directory

# ===== LLM Configuration =====
# MiniMax API Configuration
# Provider selection:
# - anthropic (default): MiniMax Anthropic-compatible API
# - openai-compatible: OpenAI-compatible servers (e.g., LM Studio local API)
provider: "anthropic"

# MiniMax API Configuration (anthropic)
# MiniMax provides both global and China platforms:
# - Global: https://platform.minimax.io -> api_base: https://api.minimax.io/anthropic
# - China: https://platform.minimaxi.com -> api_base: https://api.minimaxi.com/anthropic
Expand All @@ -21,6 +26,13 @@ api_base: "https://api.minimax.io/anthropic" # Global users
# api_base: "https://api.minimaxi.com/anthropic" # China users
model: "MiniMax-M2"

# --- LM Studio (OpenAI-compatible) Example ---
# To use LM Studio, change these fields (uncomment and adjust):
# provider: "openai-compatible"
# api_key: "lm-studio" # LM Studio accepts any non-empty key
# api_base: "http://localhost:1234/v1" # LM Studio local server default
# model: "YOUR_LOADED_MODEL_NAME" # Must match the model loaded in LM Studio

# ===== Retry Configuration =====
retry:
enabled: true # Enable retry mechanism
Expand Down
Loading