forked from PsycheFoundation/nousnet
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathjustfile
More file actions
290 lines (234 loc) · 11 KB
/
justfile
File metadata and controls
290 lines (234 loc) · 11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
mod nix
mod dev 'architectures/decentralized/justfile'
default:
just --list
check-client:
cargo run -p psyche-solana-client -- --help
# test inference network discovery (2 nodes in tmux)
test-inference-network:
./scripts/test-inference-network.sh
# format & lint-fix code
fmt:
echo "deprecated, use 'nix fmt' instead..."
sleep 5
cargo clippy --fix --allow-staged --all-targets
cargo fmt
nixfmt .
# spin up a local testnet
local-testnet *args='':
OLTP_METRICS_URL="http://localhost:4318/v1/metrics" OLTP_TRACING_URL="http://localhost:4318/v1/traces" OLTP_LOGS_URL="http://localhost:4318/v1/logs" cargo run -p psyche-centralized-local-testnet -- start {{ args }}
# run integration tests
integration-test test_name="":
if [ "{{ test_name }}" = "" ]; then \
cargo test --release -p psyche-centralized-testing --test integration_tests; \
else \
cargo test --release -p psyche-centralized-testing --test integration_tests -- --nocapture "{{ test_name }}"; \
fi
# Determine whether to use Python support based on environment variable
use_python := env("USE_PYTHON", "0")
# Run decentralized integration tests with optional Python support and test filtering
decentralized-integration-tests test_name="":
#!/usr/bin/env bash
set -euo pipefail
if [[ "{{ use_python }}" == "1" ]]; then
echo "Running tests with Python support"
just setup_python_test_infra
if [[ -z "{{ test_name }}" ]]; then
cargo test --release \
-p psyche-decentralized-testing \
--features python,parallelism \
--test integration_tests \
-- --nocapture
else
cargo test --release \
-p psyche-decentralized-testing \
--features python,parallelism \
--test integration_tests \
-- --nocapture "{{ test_name }}"
fi
else
echo "Running tests without Python support"
just setup_test_infra
if [[ -z "{{ test_name }}" ]]; then
cargo test --release \
-p psyche-decentralized-testing \
--test integration_tests \
-- --nocapture
else
cargo test --release \
-p psyche-decentralized-testing \
--test integration_tests \
-- --nocapture "{{ test_name }}"
fi
fi
# run integration decentralized chaos tests
decentralized-chaos-integration-test test_name="":
if [ "{{ test_name }}" = "" ]; then \
cargo test --release -p psyche-decentralized-testing --test chaos_tests -- --nocapture; \
else \
cargo test --release -p psyche-decentralized-testing --test chaos_tests -- --nocapture "{{ test_name }}"; \
fi
solana-client-tests:
cargo test --package psyche-solana-client --features solana-localnet-tests
build_book output-dir="../book": generate_cli_docs
mdbook build psyche-book -d {{ output-dir }}
# run an interactive development server for psyche-book
serve_book: generate_cli_docs
mdbook serve psyche-book --open
generate_cli_docs:
echo "generating CLI --help outputs for mdbook..."
mkdir -p psyche-book/generated/cli/
cargo run -p psyche-centralized-client print-all-help --markdown > psyche-book/generated/cli/psyche-centralized-client.md
cargo run -p psyche-centralized-server print-all-help --markdown > psyche-book/generated/cli/psyche-centralized-server.md
cargo run -p psyche-centralized-local-testnet print-all-help --markdown > psyche-book/generated/cli/psyche-centralized-local-testnet.md
cargo run -p psyche-sidecar print-all-help --markdown > psyche-book/generated/cli/psyche-sidecar.md
cargo run -p psyche-solana-client print-all-help --markdown > psyche-book/generated/cli/psyche-solana-client.md
run_docker_client *ARGS:
just nix build_docker_solana_client
docker run -d {{ ARGS }} --gpus all psyche-solana-client
# Setup clients assigning one available GPU to each of them.
# There's no way to do this using the replicas from docker compose file, so we have to do it manually.
setup_gpu_clients num_clients="1":
./scripts/coordinator-address-check.sh
just nix build_docker_solana_test_client
./scripts/train-multiple-gpu-localnet.sh {{ num_clients }}
clean_stale_images:
docker rmi $(docker images -f dangling=true -q)
# Build & push the centralized client Docker image
docker_push_centralized_client:
just nix docker_build_centralized_client
docker push docker.io/nousresearch/psyche-centralized-client
# Setup the infrastructure for testing locally using Docker.
setup_test_infra:
cd architectures/decentralized/solana-coordinator && anchor build
cd architectures/decentralized/solana-authorizer && anchor build
just nix build_docker_solana_test_client_no_python
just nix build_docker_solana_test_validator
# Setup the infrastructure for testing locally using Docker.
setup_python_test_infra:
cd architectures/decentralized/solana-coordinator && anchor build
cd architectures/decentralized/solana-authorizer && anchor build
just nix build_docker_solana_test_client
just nix build_docker_solana_test_validator
run_test_infra num_clients="1":
#!/usr/bin/env bash
set -e
cd docker/test
# Start validator only first
echo "Starting validator and deploying contracts..."
docker compose up -d --wait psyche-solana-test-validator
sleep 2 # Extra buffer for RPC to be fully ready
# Run setup script from project root
echo "Setting up test run..."
cd ../..
./scripts/setup-test-run.sh
# Now start the client services
cd docker/test
echo "Starting clients..."
if [ "${USE_GPU}" != "0" ] && command -v nvidia-smi &> /dev/null; then
echo "GPU detected and USE_GPU not set to 0, enabling GPU support"
NUM_REPLICAS={{ num_clients }} docker compose -f docker-compose.yml -f docker-compose.gpu.yml up -d psyche-test-client
else
echo "Running without GPU support"
NUM_REPLICAS={{ num_clients }} docker compose -f docker-compose.yml up -d psyche-test-client
fi
run_test_infra_with_proxies_validator num_clients="1":
#!/usr/bin/env bash
set -e
cd docker/test/subscriptions_test
# Start validator only first
echo "Starting validator and deploying contracts..."
docker compose -f ../docker-compose.yml up -d --wait psyche-solana-test-validator
sleep 2 # Extra buffer for RPC to be fully ready
# Run setup script from project root
echo "Setting up test run..."
cd ../../..
RPC="http://127.0.0.1:8899" WS_RPC="ws://127.0.0.1:8900" RUN_ID="test" ./scripts/setup-test-run.sh
# Now start the client and proxy services
cd docker/test/subscriptions_test
echo "Starting clients and proxies..."
if [ "${USE_GPU}" != "0" ] && command -v nvidia-smi &> /dev/null; then
echo "GPU detected and USE_GPU not set to 0, enabling GPU support"
NUM_REPLICAS={{ num_clients }} docker compose -f ../docker-compose.yml -f docker-compose.yml -f ../docker-compose.gpu.yml up -d psyche-test-client nginx nginx_2
else
echo "Running without GPU support"
NUM_REPLICAS={{ num_clients }} docker compose -f ../docker-compose.yml -f docker-compose.yml up -d psyche-test-client nginx nginx_2
fi
stop_test_infra:
cd docker/test && docker compose -f docker-compose.yml -f subscriptions_test/docker-compose.yml down
# Run inference node with a local model (requires Python venv with vLLM)
inference-node model="gpt2":
RUST_LOG=info,psyche_network=debug nix run .#psyche-inference-node -- \
--model-name {{ model }} \
--discovery-mode n0 \
--relay-kind n0
# Run gateway node (HTTP API for inference requests)
gateway-node:
RUST_LOG=info,psyche_network=debug nix run .#bin-psyche-inference-node-gateway-node -- \
--discovery-mode n0 \
--relay-kind n0
# Run full inference stack (gateway + inference node in tmux)
inference-stack model="gpt2":
#!/usr/bin/env bash
set -euo pipefail
# Check if tmux is available
if ! command -v tmux &> /dev/null; then
echo "Error: tmux is required but not installed"
exit 1
fi
SESSION="psyche-inference"
GATEWAY_PEER_FILE="/tmp/psyche-gateway-peer.json"
# Clean up old peer file
rm -f "$GATEWAY_PEER_FILE"
# Kill existing session if it exists
tmux kill-session -t $SESSION 2>/dev/null || true
echo "building gateway and inference node..."
nix build .#bin-psyche-inference-node-gateway-node .#psyche-inference-node
echo "Starting gateway node (bootstrap node)..."
# Create new session with gateway (starts first to be bootstrap node)
tmux new-session -d -s $SESSION -n gateway
tmux send-keys -t $SESSION:gateway "PSYCHE_GATEWAY_ENDPOINT_FILE=$GATEWAY_PEER_FILE RUST_LOG=info,psyche_network=debug nix run .#bin-psyche-inference-node-gateway-node -- --discovery-mode n0 --relay-kind n0" C-m
# Wait for gateway to start and write peer file
echo "Waiting for gateway to initialize and write endpoint..."
for i in $(seq 1 30); do
if [ -f "$GATEWAY_PEER_FILE" ]; then
echo "Gateway peer file created"
break
fi
sleep 1
done
if [ ! -f "$GATEWAY_PEER_FILE" ]; then
echo "Error: Gateway failed to create peer file"
exit 1
fi
# Wait a bit more for gateway HTTP server
sleep 2
echo "Gateway ready"
echo ""
echo "Starting inference node..."
# Create window for inference node (bootstraps from gateway)
tmux new-window -t $SESSION -n inference
tmux send-keys -t $SESSION:inference "PSYCHE_GATEWAY_BOOTSTRAP_FILE=$GATEWAY_PEER_FILE RUST_LOG=info,psyche_network=debug nix run .#psyche-inference-node -- --model-name {{ model }} --discovery-mode n0 --relay-kind n0" C-m
# Wait for inference node to start
sleep 3
echo "Inference node started"
echo ""
# Create window for testing
tmux new-window -t $SESSION -n test
tmux send-keys -t $SESSION:test "echo 'Test inference with:'; echo 'curl -X POST http://127.0.0.1:8000/v1/chat/completions -H \"Content-Type: application/json\" -d '\"'\"'{\"messages\": [{\"role\": \"user\", \"content\": \"Hello, world!\"}], \"max_tokens\": 50}'\"'\"''" C-m
# Attach to session
echo "Starting inference stack in tmux session '$SESSION'"
echo "Windows: inference (node), gateway (HTTP API), test (for curl commands)"
echo ""
echo "To attach: tmux attach -t $SESSION"
echo "To kill: tmux kill-session -t $SESSION"
echo ""
tmux attach -t $SESSION
# Test inference via HTTP (requires inference stack to be running)
test-inference prompt="Hello, world!" max_tokens="50":
curl -X POST http://127.0.0.1:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{"messages": [{"role": "user", "content": "{{ prompt }}"}], "max_tokens": {{ max_tokens }}}'
# Run end-to-end test: start nodes, send request, verify response
test-inference-e2e model="gpt2" prompt="Hello, world!":
./scripts/test-inference-e2e.sh "{{ model }}" "{{ prompt }}"