diff --git a/Dockerfile b/Dockerfile index 77bd516a..e2151515 100644 --- a/Dockerfile +++ b/Dockerfile @@ -83,6 +83,7 @@ ENV PATH="/home/pentester/.local/bin:$PATH" COPY --chown=pentester:pentester pyproject.toml README.md /app/ COPY --chown=pentester:pentester pentestgpt/ /app/pentestgpt/ COPY --chown=pentester:pentester scripts/entrypoint.sh /home/pentester/entrypoint.sh +COPY --chown=pentester:pentester scripts/ccr-config-template.json /app/scripts/ccr-config-template.json # Install Python dependencies as root to system Python # Allow pip to override system packages in Docker diff --git a/Makefile b/Makefile index 1f38910e..f6cf3c1f 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ help: @echo "Docker Workflow (Primary Usage):" @echo " make install Install dependencies (uv sync) and build Docker image" @echo " make config Configure authentication (interactive)" - @echo " Options: OpenRouter, Anthropic API, Manual Login" + @echo " Options: Claude Login, OpenRouter, Anthropic API, Local LLM" @echo " make connect Connect to container (main entry point)" @echo " make start Start container in background" @echo " make stop Stop container (keeps config)" diff --git a/README.md b/README.md index 0dcee790..57d49744 100644 --- a/README.md +++ b/README.md @@ -78,10 +78,11 @@ ### Prerequisites - **Docker** (required) - [Install Docker](https://docs.docker.com/get-docker/) -- **Claude Model** (prioritized) - PentestGPT is optimized for Claude models via: +- **LLM Provider** (choose one): - Anthropic API Key from [console.anthropic.com](https://console.anthropic.com/) - Claude OAuth Login (requires Claude subscription) - OpenRouter for alternative models at [openrouter.ai](https://openrouter.ai/keys) + - Local LLM via LM Studio, Ollama, or compatible server (see [Using Local LLMs](#using-local-llms)) ### Installation @@ -142,6 +143,52 @@ pentestgpt --target 10.10.11.50 --instruction "WordPress site, focus on plugin v --- +## Using Local LLMs + +PentestGPT supports routing requests to local LLM servers (LM Studio, Ollama, text-generation-webui, etc.) running on your host machine. + +### Prerequisites + +- Local LLM server with an OpenAI-compatible API endpoint + - **LM Studio**: Enable server mode (default port 1234) + - **Ollama**: Run `ollama serve` (default port 11434) + +### Setup + +```bash +# Configure PentestGPT for local LLM +make config +# Select option 4: Local LLM + +# Start your local LLM server on the host machine +# Then connect to the container +make connect +``` + +### Customizing Models + +Edit `scripts/ccr-config-template.json` to customize: + +- **`localLLM.api_base_url`**: Your LLM server URL (default: `host.docker.internal:1234`) +- **`localLLM.models`**: Available model names on your server +- **Router section**: Which models handle which operations + +| Route | Purpose | Default Model | +|-------|---------|---------------| +| `default` | General tasks | openai/gpt-oss-20b | +| `background` | Background operations | openai/gpt-oss-20b | +| `think` | Reasoning-heavy tasks | qwen/qwen3-coder-30b | +| `longContext` | Large context handling | qwen/qwen3-coder-30b | +| `webSearch` | Web search operations | openai/gpt-oss-20b | + +### Troubleshooting + +- **Connection refused**: Ensure your LLM server is running and listening on the configured port +- **Docker networking**: Use `host.docker.internal` (not `localhost`) to access host services from Docker +- **Check CCR logs**: Inside the container, run `cat /tmp/ccr.log` + +--- + ## Telemetry PentestGPT collects anonymous usage data to help improve the tool. This data is sent to our [Langfuse](https://langfuse.com) project and includes: diff --git a/scripts/ccr-config-template.json b/scripts/ccr-config-template.json new file mode 100644 index 00000000..d34c7a3b --- /dev/null +++ b/scripts/ccr-config-template.json @@ -0,0 +1,44 @@ +{ + "LOG": false, + "LOG_LEVEL": "debug", + "CLAUDE_PATH": "", + "HOST": "127.0.0.1", + "PORT": 3456, + "APIKEY": "", + "API_TIMEOUT_MS": "600000", + "PROXY_URL": "", + "transformers": [], + "Providers": [ + { + "name": "openrouter", + "api_base_url": "https://openrouter.ai/api/v1/chat/completions", + "api_key": "__OPENROUTER_API_KEY__", + "models": [ + "google/gemini-2.5-pro-preview", + "google/gemini-3-pro-preview", + "openai/gpt-5.1" + ], + "transformer": { + "use": ["openrouter"] + } + }, + { + "name": "localLLM", + "api_base_url": "http://host.docker.internal:1234/v1/chat/completions", + "api_key": "not-needed", + "models": ["qwen/qwen3-coder-30b", "openai/gpt-oss-20b"] + } + ], + "StatusLine": { + "enabled": false, + "currentStyle": "default", + "default": { + "modules": [] + }, + "powerline": { + "modules": [] + } + }, + "Router": "__ROUTER_CONFIG__", + "CUSTOM_ROUTER_PATH": "" +} diff --git a/scripts/config.sh b/scripts/config.sh index 74bac2b9..027dcdbd 100755 --- a/scripts/config.sh +++ b/scripts/config.sh @@ -39,8 +39,11 @@ echo "" echo -e " ${GREEN}[3]${NC} Anthropic API Key" echo -e " Use Anthropic's Claude directly with your API key" echo "" +echo -e " ${GREEN}[4]${NC} Local LLM (via LM Studio, Ollama, etc.)" +echo -e " Route requests to a local LLM server on your host machine" +echo "" -read -p "Enter your choice [1-3] (default: 1): " choice +read -p "Enter your choice [1-4] (default: 1): " choice choice="${choice:-1}" case $choice in @@ -111,6 +114,26 @@ EOF echo -e "${GREEN}Anthropic API key saved!${NC}" ;; + 4) + # Save auth mode for Local LLM + cat > "$ENV_FILE" << EOF +# PentestGPT Authentication Configuration +# Generated by make config +PENTESTGPT_AUTH_MODE=local +EOF + + echo -e "${GREEN}Local LLM mode selected!${NC}" + echo "" + echo -e "${BLUE}Setup Instructions:${NC}" + echo " 1. Start your local LLM server (e.g., LM Studio) on your host machine" + echo " Default expected URL: http://localhost:1234/v1/chat/completions" + echo "" + echo " 2. To customize models or URL, edit:" + echo " scripts/ccr-config-template.json" + echo "" + echo " 3. Run 'make connect' to start PentestGPT" + ;; + *) echo -e "${RED}Invalid choice. Exiting.${NC}" exit 1 diff --git a/scripts/entrypoint.sh b/scripts/entrypoint.sh index e6e6f7e5..a5200aa1 100644 --- a/scripts/entrypoint.sh +++ b/scripts/entrypoint.sh @@ -15,44 +15,40 @@ BLUE='\033[0;34m' YELLOW='\033[0;33m' NC='\033[0m' -setup_openrouter() { - local api_key="$1" +# Router configurations for different modes +OPENROUTER_ROUTER='{"default":"openrouter,openai/gpt-5.1","background":"openrouter,openai/gpt-5.1","think":"openrouter,openai/gpt-5.1","longContext":"openrouter,openai/gpt-5.1","longContextThreshold":60000,"webSearch":"openrouter,google/gemini-3-pro-preview"}' +LOCAL_ROUTER='{"default":"localLLM,openai/gpt-oss-20b","background":"localLLM,openai/gpt-oss-20b","think":"localLLM,qwen/qwen3-coder-30b","longContext":"localLLM,qwen/qwen3-coder-30b","longContextThreshold":60000,"webSearch":"localLLM,openai/gpt-oss-20b"}' + +setup_ccr() { + local mode="$1" + local api_key="$2" + local template_file="/app/scripts/ccr-config-template.json" # Create CCR config directory if needed mkdir -p "$CCR_CONFIG_DIR" - # Generate CCR config with OpenRouter - cat > "$CCR_CONFIG_FILE" << EOF -{ - "LOG": false, - "HOST": "127.0.0.1", - "PORT": 3456, - "API_TIMEOUT_MS": "600000", - "Providers": [ - { - "name": "openrouter", - "api_base_url": "https://openrouter.ai/api/v1/chat/completions", - "api_key": "${api_key}", - "models": [ - "google/gemini-2.5-pro-preview", - "google/gemini-3-pro-preview", - "openai/gpt-5.1" - ], - "transformer": { - "use": ["openrouter"] - } - } - ], - "Router": { - "default": "openrouter,openai/gpt-5.1", - "background": "openrouter,openai/gpt-5.1", - "think": "openrouter,openai/gpt-5.1", - "longContext": "openrouter,openai/gpt-5.1", - "longContextThreshold": 60000, - "webSearch": "openrouter,google/gemini-3-pro-preview" - } -} -EOF + # Check if template exists + if [ ! -f "$template_file" ]; then + echo -e "${YELLOW}Error: CCR config template not found at $template_file${NC}" + exit 1 + fi + + # Copy template and substitute placeholders + cp "$template_file" "$CCR_CONFIG_FILE" + + # Substitute API key (for openrouter mode) + if [ -n "$api_key" ]; then + sed -i "s/__OPENROUTER_API_KEY__/${api_key}/g" "$CCR_CONFIG_FILE" + fi + + # Substitute Router config based on mode (use | as delimiter to avoid conflicts with /) + if [ "$mode" = "openrouter" ]; then + sed -i "s|\"__ROUTER_CONFIG__\"|${OPENROUTER_ROUTER}|g" "$CCR_CONFIG_FILE" + local display_model="openai/gpt-5.1" + else + sed -i "s|\"__ROUTER_CONFIG__\"|${LOCAL_ROUTER}|g" "$CCR_CONFIG_FILE" + local display_model="localLLM (qwen/qwen3-coder-30b, openai/gpt-oss-20b)" + fi echo -e "${BLUE}Starting Claude Code Router...${NC}" @@ -75,14 +71,14 @@ EOF sed -i '/eval "$(ccr activate)"/d' "$BASHRC_FILE" 2>/dev/null || true # Add ccr activation to bashrc - echo '# CCR activation for OpenRouter' >> "$BASHRC_FILE" + echo "# CCR activation for ${mode}" >> "$BASHRC_FILE" echo 'eval "$(ccr activate 2>/dev/null)" || true' >> "$BASHRC_FILE" # Also export for the current session (will be inherited by exec'd shell) eval "$(ccr activate 2>/dev/null)" || true - echo -e "${GREEN}CCR activated with OpenRouter backend${NC}" - echo -e "${BLUE}Default model: openai/gpt-5.1${NC}" + echo -e "${GREEN}CCR activated with ${mode} backend${NC}" + echo -e "${BLUE}Default model: ${display_model}${NC}" } echo "" @@ -95,7 +91,12 @@ case "$AUTH_MODE" in echo "Please run 'make config' and select OpenRouter option" exit 1 fi - setup_openrouter "$OPENROUTER_API_KEY" + setup_ccr "openrouter" "$OPENROUTER_API_KEY" + ;; + local) + echo -e "${GREEN}Local LLM mode${NC}" + echo -e "Ensure your local LLM server is running on host.docker.internal:1234" + setup_ccr "local" "" ;; anthropic) if [ -z "$ANTHROPIC_API_KEY" ]; then