Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
148 changes: 99 additions & 49 deletions CLAUDE.md

Large diffs are not rendered by default.

114 changes: 113 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,12 @@ PROJECT = exploit-lab
KIND_CLUSTER = exploit-lab-k8s

.PHONY: lab-up lab-down lab-status lab-logs lab-shell lab-restart \
lab-k8s-up lab-k8s-down lab-k8s-status
lab-k8s-up lab-k8s-down lab-k8s-status \
lab-adcs-up lab-adcs-down lab-adcs-destroy \
lab-llm-up lab-llm-down \
lab-saml-up lab-saml-down \
lab-databricks-up lab-databricks-down \
lab-oidc-up lab-oidc-down

## Start the contained lab environment
lab-up:
Expand Down Expand Up @@ -93,3 +98,110 @@ lab-k8s-status:
kubectl --context kind-$(KIND_CLUSTER) get pods --all-namespaces
@echo ""
kubectl --context kind-$(KIND_CLUSTER) get svc --all-namespaces

## ── AD CS lab (WS-C) ─────────────────────────────────────────────────────────
## Requires: vagrant, virtualbox, vagrant plugin install vagrant-reload
## Stands up dc01 (DC + Enterprise CA) + ws01 + ws02 (workstations)
## Domain: corp.lab.local Network: 192.168.56.0/24 (host-only, no internet)

## Create and provision the AD CS Vagrant lab (dc01 + ws01 + ws02)
lab-adcs-up:
@command -v vagrant >/dev/null 2>&1 || { echo "ERROR: vagrant not found. Install: https://www.vagrantup.com/"; exit 1; }
@command -v VBoxManage >/dev/null 2>&1 || { echo "ERROR: VirtualBox not found. Install: https://www.virtualbox.org/"; exit 1; }
@echo "==> Starting AD CS lab..."
cd infra/lab/ad-cs && vagrant up
@echo ""
@echo "=== AD CS Lab is running ==="
@echo " dc01 (DC + CA) : 192.168.56.10 domain: corp.lab.local"
@echo " ws01 : 192.168.56.11"
@echo " ws02 : 192.168.56.12"
@echo ""
@echo " Enumerate templates:"
@echo " EXPLOIT_LAB_ACTIVE=1 EXPLOIT_LAB_OFFLINE_VM=1 EXPLOIT_FIXTURE_ROOT=/tmp/lab \\"
@echo " python tools/ad-cs/enum/enum.py --domain corp.lab.local --dc-ip 192.168.56.10 \\"
@echo " --username alice --password 'AlicePass!1' --output /tmp/lab/findings.json"
@echo ""
@echo " Run ESC1 exploit:"
@echo " EXPLOIT_LAB_ACTIVE=1 EXPLOIT_LAB_OFFLINE_VM=1 EXPLOIT_FIXTURE_ROOT=/tmp/lab \\"
@echo " python tools/ad-cs/exploit/esc01/exploit.py --domain corp.lab.local \\"
@echo " --dc-ip 192.168.56.10 --username alice --password 'AlicePass!1' \\"
@echo " --target-user administrator --output-dir /tmp/lab/esc01-out"

## Stop (halt) the AD CS Vagrant lab VMs
lab-adcs-down:
cd infra/lab/ad-cs && vagrant halt
@echo "AD CS lab halted."

## Destroy the AD CS Vagrant lab VMs (removes all VMs and disks)
lab-adcs-destroy:
cd infra/lab/ad-cs && vagrant destroy -f
@echo "AD CS lab destroyed."

## ── LLM/Agent attack lab (WS-E) ─────────────────────────────────────────────
## Requires: docker, docker compose, ~5GB for Ollama model download
## Stands up: Ollama (port 11434) + copilot Flask app (port 8080)
## Internal network only — no internet access.

## Start the LLM target lab (Ollama + enterprise copilot app)
lab-llm-up:
docker compose -f infra/lab/llm-target/docker-compose.yml up -d --build
@echo ""
@echo "=== LLM Lab is running ==="
@echo " Copilot app: http://127.0.0.1:8080"
@echo " Ollama API: http://127.0.0.1:11434"
@echo ""
@echo " Pull model (first run): docker exec ollama ollama pull llama3.1:8b"
@echo " Run injection eval: EXPLOIT_LAB_ACTIVE=1 python tools/llm-attacks/indirect-injection/eval_injection.py --target http://127.0.0.1:8080"

## Stop the LLM target lab
lab-llm-down:
docker compose -f infra/lab/llm-target/docker-compose.yml down -v --remove-orphans
@echo "LLM lab stopped."

## ── Mock SAML lab (WS-D) ──────────────────────────────────────────────────────
## Stands up SimpleSAMLphp-equivalent Flask SAML SP on port 9400

## Start the mock SAML SP/IdP
lab-saml-up:
docker build -t mock-saml infra/lab/mock-saml/
docker run -d --name mock-saml --network exploit-lab_internal -p 9400:9400 \
-e LAB_SAML_TRUST_ALL=1 mock-saml || \
docker compose -f infra/lab/mock-saml/../../../docker-compose.lab.yml up -d mock-saml 2>/dev/null || \
docker run -d --name mock-saml -p 9400:9400 -e LAB_SAML_TRUST_ALL=1 mock-saml
@echo "Mock SAML SP: http://127.0.0.1:9400"

## Stop the mock SAML SP/IdP
lab-saml-down:
docker stop mock-saml && docker rm mock-saml || true
@echo "Mock SAML stopped."

## ── Mock Databricks lab (WS-D) ───────────────────────────────────────────────
## Stands up mock Databricks Apps OAuth endpoint on port 9500

## Start the mock Databricks Apps OAuth mock
lab-databricks-up:
docker build -t mock-databricks infra/lab/mock-databricks/
docker run -d --name mock-databricks -p 9500:9500 mock-databricks
@echo "Mock Databricks: http://127.0.0.1:9500"
@echo " OAuth token: POST http://127.0.0.1:9500/oidc/v1/token"
@echo " OBO flow: POST http://127.0.0.1:9500/oidc/v1/token (grant_type=urn:ietf:params:oauth:grant-type:jwt-bearer)"

## Stop the mock Databricks Apps OAuth mock
lab-databricks-down:
docker stop mock-databricks && docker rm mock-databricks || true
@echo "Mock Databricks stopped."

## ── Mock OIDC Issuer lab (WS-D) ──────────────────────────────────────────────
## Stands up GitHub Actions OIDC issuer simulation on port 9300

## Start the mock OIDC issuer (GitHub Actions simulation)
lab-oidc-up:
@echo "Starting mock OIDC issuer on 127.0.0.1:9300..."
EXPLOIT_LAB_ACTIVE=1 python3 tools/cloud-identity/wif/mock_oidc_issuer.py &
@echo "Mock OIDC issuer: http://127.0.0.1:9300"
@echo " OIDC config: http://127.0.0.1:9300/.well-known/openid-configuration"

## Stop the mock OIDC issuer
lab-oidc-down:
pkill -f mock_oidc_issuer.py || true
@echo "Mock OIDC issuer stopped."
Loading
Loading