Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
154be8d
Port DeepSeek-V4-Pro FP4 disaggregated vLLM sweep from gb200 to gb300-cr
Oseltamivir Apr 25, 2026
017b66a
Fill in PR link for gb300-cr changelog entry
Oseltamivir Apr 25, 2026
b91ca49
Rename gb300-cr to gb300-cw; fix model path to /mnt/vast/models/dsv4/
Oseltamivir Apr 25, 2026
b6ebbd3
Fix gb300-cw SLURM account and extend runner group to _2/_3
Oseltamivir Apr 25, 2026
c6b45fd
Pin runner-side uv to /tmp so x86 binary doesn't leak to ARM64 compute
Oseltamivir Apr 25, 2026
aaea407
Force --segment per recipe via sbatch_directives
Oseltamivir Apr 25, 2026
3bd82f1
Cap cargo parallelism via CARGO_BUILD_JOBS=4 in gb300 recipes
Oseltamivir Apr 25, 2026
b3d2b12
Force --mem=0 (use full node memory) on every gb300 recipe; fix hered…
Oseltamivir Apr 25, 2026
b3d2bd8
Merge branch 'main' into dsv4-fp4-gb300-dynamo-vllm-disagg
Oseltamivir Apr 25, 2026
33f6eb4
Update perf-changelog.yaml
Oseltamivir Apr 25, 2026
43c3bc4
Update gb300 recipe headers — segment is recipe-driven, not auto
Oseltamivir Apr 25, 2026
32aca3e
Set NVIDIA_VISIBLE_DEVICES + DRIVER_CAPABILITIES so enroot mounts lib…
Oseltamivir Apr 25, 2026
e66e667
Cache dynamo wheel build globally on /mnt/vast (gb300-cw)
Oseltamivir Apr 25, 2026
9cb8ee5
Switch dynamo cache lock from flock to mkdir (NFS doesn't honor flock)
Oseltamivir Apr 25, 2026
369b1ed
Pre-build dynamo wheel via single-node srun before sbatch (gb300-cw)
Oseltamivir Apr 25, 2026
f37eb70
Prebuild srun: add --mem=0, cap CARGO_BUILD_JOBS=8, drop rustc debuginfo
Oseltamivir Apr 25, 2026
86ac394
Mount /mnt/vast/dynamo_cache into worker containers (extra_mount)
Oseltamivir Apr 25, 2026
6997f95
Patch vllm HANDSHAKE_TIMEOUT_MINS 5->30 in setup script
Oseltamivir Apr 25, 2026
3900434
Drop NVL-only NCCL flags + add NCCL_DEBUG=INFO
Oseltamivir Apr 25, 2026
7851967
Re-add NCCL_MNNVL_ENABLE, add debug diagnostics, reduce to 1p1d repro…
Oseltamivir Apr 25, 2026
87bdf1f
Remove vLLM HANDSHAKE_TIMEOUT_MINS sed patch from setup script
Oseltamivir Apr 25, 2026
7f526db
Restore handshake timeout patch, add DP Coordinator logging, drop NCC…
Oseltamivir Apr 25, 2026
6415458
Rewrite coordinator patch to match actual vLLM source strings
Oseltamivir Apr 25, 2026
cedac56
Rewrite coordinator patch: regex matching + inspect.getsource verify
Oseltamivir Apr 25, 2026
ff4ab3a
Merge branch 'main' into dsv4-fp4-gb300-dynamo-vllm-disagg
Oseltamivir Apr 25, 2026
8570717
more
Oseltamivir Apr 26, 2026
b39f41e
Merge branch 'main' into dsv4-fp4-gb300-dynamo-vllm-disagg
Oseltamivir Apr 26, 2026
3f33f27
Merge branch 'main' into dsv4-fp4-gb300-dynamo-vllm-disagg
Oseltamivir Apr 26, 2026
df79838
configs
Oseltamivir Apr 26, 2026
05a31a1
PR84 copy
Oseltamivir Apr 28, 2026
0899117
Merge branch 'main' into dsv4-fp4-gb300-dynamo-vllm-disagg
Oseltamivir Apr 28, 2026
e92a224
PR84 copy
Oseltamivir Apr 28, 2026
d025d56
Merge branch 'main' into dsv4-fp4-gb300-dynamo-vllm-disagg
Oseltamivir Apr 28, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 93 additions & 0 deletions .github/configs/nvidia-master.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7714,3 +7714,96 @@ dsv4-fp4-gb200-dynamo-vllm:
tp: 16
ep: 16
dp-attn: true

dsv4-fp4-gb300-dynamo-vllm:
image: vllm/vllm-openai@sha256:d29a90b13bb9758821839dd810db9679055e8adf7c670df9f0a432f45f2488a5
model: deepseek-ai/DeepSeek-V4-Pro
model-prefix: dsv4
runner: gb300-cw
precision: fp4
framework: dynamo-vllm
multinode: true
disagg: true
# Mirrors NVIDIA/srt-slurm PR #84 (ywang96/gb300-vllm) at SHA
# 228febcfe9c76347cd619a7622af83ca52ca35a4. 8k/1k only — PR 84
# publishes 5 recipes spanning low-conc (TP=4 decode) → mid (DP=4/8
# decode + DP=4 prefill workers) → max (14p1d-dep4-dep16, 18 nodes).
# Each recipe rack-pins via its own sbatch_directives.segment.
seq-len-configs:
- isl: 8192
osl: 1024
search-space:
# Low-conc / interactivity: 1 prefill (DP=4 + EP) + 1 decode (TP=4).
# 2 nodes total. Decode is plain TP, no EP/DP.
- conc-list: [4, 8, 16, 32, 64, 128, 256]
prefill:
num-worker: 1
tp: 4
ep: 4
dp-attn: true
additional-settings:
- "CONFIG_FILE=recipes/vllm/deepseek-v4/8k1k/disagg-gb300-1p1d-dep4-tp4-c4-c8-c32-c64-c128-c256.yaml"
decode:
num-worker: 1
tp: 4
ep: 1
dp-attn: false
# Mid-low: 1 prefill (DP=4) + 1 decode (DP=4 + EP). 2 nodes total.
# Decode swings to DP+EP at conc 256/512 to spread the MoE experts.
- conc-list: [256, 512]
prefill:
num-worker: 1
tp: 4
ep: 4
dp-attn: true
additional-settings:
- "CONFIG_FILE=recipes/vllm/deepseek-v4/8k1k/disagg-gb300-1p1d-dep4-dep4-c512.yaml"
decode:
num-worker: 1
tp: 4
ep: 4
dp-attn: true
# Mid-high: 6 prefills (DP=4 each) + 1 decode (DP=8 + EP). 10 nodes
# per upstream resources block (decode_nodes:4 verbatim from PR 84).
- conc-list: [1024, 2048]
prefill:
num-worker: 6
tp: 4
ep: 4
dp-attn: true
additional-settings:
- "CONFIG_FILE=recipes/vllm/deepseek-v4/8k1k/disagg-gb300-6p1d-dep4-dep8-32-c2048.yaml"
decode:
num-worker: 1
tp: 8
ep: 8
dp-attn: true
# High: 12 prefills (DP=4 each) + 1 wide decode (DP=16 + EP). 16 nodes.
- conc-list: [3072, 4096]
prefill:
num-worker: 12
tp: 4
ep: 4
dp-attn: true
additional-settings:
- "CONFIG_FILE=recipes/vllm/deepseek-v4/8k1k/disagg-gb300-12p1d-dep4-dep16-56-c4096.yaml"
decode:
num-worker: 1
tp: 16
ep: 16
dp-attn: true
# Max: 14 prefills (DP=4 each) + 1 wide decode (DP=16 + EP). 18 nodes
# — fills exactly one cw rack.
- conc-list: [6144, 8192]
prefill:
num-worker: 14
tp: 4
ep: 4
dp-attn: true
additional-settings:
- "CONFIG_FILE=recipes/vllm/deepseek-v4/8k1k/disagg-gb300-14p1d-dep4-dep16-72-c8192.yaml"
decode:
num-worker: 1
tp: 16
ep: 16
dp-attn: true
5 changes: 5 additions & 0 deletions .github/configs/runners.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -139,3 +139,8 @@ gb300:
- 'gb300-nv_0'
- 'gb300-nv_1'
- 'gb300-nv_2'
gb300-cw:
- 'gb300-cw_0'
- 'gb300-cw_1'
- 'gb300-cw_2'
- 'gb300-cw_3'
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
name: "dsv4-vllm-disagg-gb300-12p1d-dep4-dep16"

# Mirrors NVIDIA/srt-slurm PR #84 (ywang96/gb300-vllm) at SHA
# 228febcfe9c76347cd619a7622af83ca52ca35a4. High 8k/1k:
# 12 prefills (DP=4 each) + 1 wide decode (DP=16). 16 nodes total.
# Fits within one cw rack (18 nodes).

model:
path: "deepseek-v4-pro"
container: "vllm/vllm-openai@sha256:d29a90b13bb9758821839dd810db9679055e8adf7c670df9f0a432f45f2488a5"
precision: "fp4"

dynamo:
version: 1.0.2
install: true

setup_script: vllm-container-deps.sh

sbatch_directives:
segment: "16"
mem: "0"

slurm:
time_limit: "3:00:00"

health_check:
max_attempts: 1440
interval_seconds: 10

resources:
gpu_type: "gb300"
gpus_per_node: 4
prefill_nodes: 12
decode_nodes: 4
prefill_workers: 12
decode_workers: 1
gpus_per_prefill: 4
gpus_per_decode: 16

frontend:
type: dynamo
enable_multiple_frontends: false

backend:
type: vllm
connector: null

prefill_environment:
TILELANG_CLEANUP_TEMP_FILES: "1"
VLLM_LOG_STATS_INTERVAL: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"
NCCL_P2P_LEVEL: NVL
UCX_MEMTYPE_CACHE: "n"
UCX_MEMTYPE_REG_WHOLE: "n"
UCX_TLS: "cuda_copy,cuda_ipc,tcp"
UCX_CUDA_IPC_ENABLE_MNNVL: "y"
PYTORCH_CUDA_ALLOC_CONF: "expandable_segments:True"

decode_environment:
TILELANG_CLEANUP_TEMP_FILES: "1"
VLLM_LOG_STATS_INTERVAL: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"
NCCL_P2P_LEVEL: NVL
UCX_MEMTYPE_CACHE: "n"
UCX_MEMTYPE_REG_WHOLE: "n"
UCX_TLS: "cuda_copy,cuda_ipc,tcp"
UCX_CUDA_IPC_ENABLE_MNNVL: "y"

vllm_config:
prefill:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "deepseek-ai/DeepSeek-V4-Pro"
kv-cache-dtype: "fp8"
tensor-parallel-size: 1
pipeline-parallel-size: 1
data-parallel-size: 4
data-parallel-rpc-port: 13345
enable-expert-parallel: true
enforce-eager: true
max-model-len: 16384
max-num-seqs: 10
max-num-batched-tokens: 81920
trust-remote-code: true
no-enable-prefix-caching: true
no-enable-flashinfer-autotune: true
safetensors-load-strategy: "prefetch"
no-async-scheduling: true
block-size: 256
gpu-memory-utilization: 0.92
no-disable-hybrid-kv-cache-manager: true
numa-bind: true
offload-group-size: 3
offload-num-in-group: 1
offload-prefetch-step: 2
tokenizer-mode: deepseek_v4
enable-ep-weight-filter: true
enable-sleep-mode: true

decode:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "deepseek-ai/DeepSeek-V4-Pro"
kv-cache-dtype: "fp8"
tensor-parallel-size: 1
pipeline-parallel-size: 1
data-parallel-size: 16
data-parallel-rpc-port: 13345
enable-expert-parallel: true
max-model-len: 16384
max-num-seqs: 512
max-cudagraph-capture-size: 512
max-num-batched-tokens: 512
safetensors-load-strategy: "prefetch"
trust-remote-code: true
no-enable-prefix-caching: true
block-size: 256
compilation-config: '{"cudagraph_mode":"FULL_DECODE_ONLY","mode":0}'
gpu-memory-utilization: 0.9
stream-interval: 50
no-disable-hybrid-kv-cache-manager: true
tokenizer-mode: deepseek_v4
enable-ep-weight-filter: true
all2all-backend: "flashinfer_nvlink_one_sided"
enable-sleep-mode: true

benchmark:
type: "sa-bench"
isl: 8192
osl: 1024
concurrencies: "3072x4096"
req_rate: "inf"
use_chat_template: false
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
name: "dsv4-vllm-disagg-gb300-14p1d-dep4-dep16"

# Mirrors NVIDIA/srt-slurm PR #84 (ywang96/gb300-vllm) at SHA
# 228febcfe9c76347cd619a7622af83ca52ca35a4. Max 8k/1k:
# 14 prefills (DP=4 each) + 1 wide decode (DP=16). 18 nodes total —
# fills exactly one cw rack.

model:
path: "deepseek-v4-pro"
container: "vllm/vllm-openai@sha256:d29a90b13bb9758821839dd810db9679055e8adf7c670df9f0a432f45f2488a5"
precision: "fp4"

dynamo:
version: 1.0.2
install: true

setup_script: vllm-container-deps.sh

sbatch_directives:
segment: "18"
mem: "0"

slurm:
time_limit: "3:00:00"

health_check:
max_attempts: 1440
interval_seconds: 10

resources:
gpu_type: "gb300"
gpus_per_node: 4
prefill_nodes: 14
decode_nodes: 4
prefill_workers: 14
decode_workers: 1
gpus_per_prefill: 4
gpus_per_decode: 16

frontend:
type: dynamo
enable_multiple_frontends: false

backend:
type: vllm
connector: null

prefill_environment:
TILELANG_CLEANUP_TEMP_FILES: "1"
VLLM_LOG_STATS_INTERVAL: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"
NCCL_P2P_LEVEL: NVL
UCX_MEMTYPE_CACHE: "n"
UCX_MEMTYPE_REG_WHOLE: "n"
UCX_TLS: "cuda_copy,cuda_ipc,tcp"
UCX_CUDA_IPC_ENABLE_MNNVL: "y"
PYTORCH_CUDA_ALLOC_CONF: "expandable_segments:True"

decode_environment:
TILELANG_CLEANUP_TEMP_FILES: "1"
VLLM_LOG_STATS_INTERVAL: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"
NCCL_P2P_LEVEL: NVL
UCX_MEMTYPE_CACHE: "n"
UCX_MEMTYPE_REG_WHOLE: "n"
UCX_TLS: "cuda_copy,cuda_ipc,tcp"
UCX_CUDA_IPC_ENABLE_MNNVL: "y"

vllm_config:
prefill:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "deepseek-ai/DeepSeek-V4-Pro"
kv-cache-dtype: "fp8"
tensor-parallel-size: 1
pipeline-parallel-size: 1
data-parallel-size: 4
data-parallel-rpc-port: 13345
enable-expert-parallel: true
enforce-eager: true
max-model-len: 16384
max-num-seqs: 10
max-num-batched-tokens: 81920
trust-remote-code: true
no-enable-prefix-caching: true
no-enable-flashinfer-autotune: true
safetensors-load-strategy: "prefetch"
no-async-scheduling: true
block-size: 256
gpu-memory-utilization: 0.92
no-disable-hybrid-kv-cache-manager: true
numa-bind: true
offload-group-size: 3
offload-num-in-group: 1
offload-prefetch-step: 2
tokenizer-mode: deepseek_v4
enable-ep-weight-filter: true
enable-sleep-mode: true

decode:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "deepseek-ai/DeepSeek-V4-Pro"
kv-cache-dtype: "fp8"
tensor-parallel-size: 1
pipeline-parallel-size: 1
data-parallel-size: 16
data-parallel-rpc-port: 13345
enable-expert-parallel: true
max-model-len: 16384
max-num-seqs: 512
max-cudagraph-capture-size: 512
max-num-batched-tokens: 512
safetensors-load-strategy: "prefetch"
trust-remote-code: true
no-enable-prefix-caching: true
block-size: 256
compilation-config: '{"cudagraph_mode":"FULL_DECODE_ONLY","mode":0}'
gpu-memory-utilization: 0.9
stream-interval: 50
no-disable-hybrid-kv-cache-manager: true
tokenizer-mode: deepseek_v4
enable-ep-weight-filter: true
all2all-backend: "flashinfer_nvlink_one_sided"
enable-sleep-mode: true

benchmark:
type: "sa-bench"
isl: 8192
osl: 1024
concurrencies: "6144x8192"
req_rate: "inf"
use_chat_template: false
Loading
Loading