diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 9d99372b2..05b19d802 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -1706,6 +1706,27 @@ dsv4-fp4-b200-sglang: # DP-attention (DP_ATTENTION=true) — max-throughput CONC range - { tp: 8, ep: 8, dp-attn: true, conc-start: 256, conc-end: 512 } +dsv4-fp4-b200-vllm: + image: vllm/vllm-openai:deepseekv4-cu130 + model: deepseek-ai/DeepSeek-V4-Pro + model-prefix: dsv4 + runner: b200-dsv4 + precision: fp4 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 8, conc-start: 1, conc-end: 64 } + - { tp: 8, ep: 8, conc-start: 128, conc-end: 128 } + - { tp: 8, ep: 8, dp-attn: true, conc-start: 256, conc-end: 4096 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 8, conc-start: 1, conc-end: 32 } + - { tp: 8, ep: 8, dp-attn: true, conc-start: 64, conc-end: 1024 } + # NOTE: At the time of submission, https://cookbook.sglang.io/autoregressive/DeepSeek/DeepSeek-R1 # does not have a B300-specific recipe, so this config reuses the existing DSR1 FP4 # B200 SGLang recipe as-is until B300-specific tuning is available. diff --git a/benchmarks/single_node/dsv4_fp4_b200_vllm.sh b/benchmarks/single_node/dsv4_fp4_b200_vllm.sh new file mode 100755 index 000000000..d37dc5282 --- /dev/null +++ b/benchmarks/single_node/dsv4_fp4_b200_vllm.sh @@ -0,0 +1,120 @@ +#!/usr/bin/env bash + +# DeepSeek-V4-Pro B200 single-node vLLM recipe derived from the B200 pareto +# sweep. TP mode (dp-attn=false) runs without expert parallel; DP mode +# (dp-attn=true) enables expert parallel (EP_SIZE=TP value = DP size). + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + DP_ATTENTION \ + CONC \ + ISL \ + OSL \ + MAX_MODEL_LEN \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +nvidia-smi + +hf download "$MODEL" + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +# DeepSeek-V4-Pro weights are large; engine startup can exceed the default +# 600s. Give it an hour to load. +export VLLM_ENGINE_READY_TIMEOUT_S=3600 + +PARALLEL_ARGS=(--tensor-parallel-size "$TP" --data-parallel-size 1) +if [ "${DP_ATTENTION}" = "true" ]; then + PARALLEL_ARGS=(--tensor-parallel-size 1 --data-parallel-size "$TP") +fi + +EP_ARGS=() +if [ "${EP_SIZE:-1}" -gt 1 ]; then + EP_ARGS=(--enable-expert-parallel) +fi + +GMU_ARGS=() +if [ "${DP_ATTENTION}" = "true" ]; then + GMU_ARGS=(--gpu-memory-utilization 0.85) +fi + +if [ "${ISL}" -eq 8192 ] && [ "${CONC}" -le 128 ]; then + MAX_NUM_BATCHED_TOKENS=${ISL} +else + MAX_NUM_BATCHED_TOKENS=2048 +fi + +BENCHMARK_MAX_MODEL_LEN="$MAX_MODEL_LEN" +if [ "$ISL" -eq 1024 ] && [ "$OSL" -eq 1024 ]; then + BENCHMARK_MAX_MODEL_LEN=4096 +fi + +if [ "${EVAL_ONLY}" = "true" ]; then + EVAL_MAX_MODEL_LEN=$(compute_eval_context_length "$MODEL" "$BENCHMARK_MAX_MODEL_LEN") + export EVAL_MAX_MODEL_LEN + SERVE_MAX_MODEL_LEN="$EVAL_MAX_MODEL_LEN" +else + SERVE_MAX_MODEL_LEN="$BENCHMARK_MAX_MODEL_LEN" +fi + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x +vllm serve "$MODEL" --host 0.0.0.0 --port "$PORT" \ + "${PARALLEL_ARGS[@]}" \ + --pipeline-parallel-size 1 \ + --kv-cache-dtype fp8 \ + --trust-remote-code \ + --block-size 256 \ + --no-enable-prefix-caching \ + "${EP_ARGS[@]}" \ + "${GMU_ARGS[@]}" \ + --compilation-config '{"cudagraph_mode":"FULL_AND_PIECEWISE","custom_ops":["all"]}' \ + --attention_config.use_fp4_indexer_cache True \ + --tokenizer-mode deepseek_v4 \ + --tool-call-parser deepseek_v4 \ + --enable-auto-tool-choice \ + --reasoning-parser deepseek_v4 \ + --max-cudagraph-capture-size 2048 \ + --max-model-len "$SERVE_MAX_MODEL_LEN" \ + --max-num-batched-tokens "$MAX_NUM_BATCHED_TOKENS" > "$SERVER_LOG" 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +pip install -q datasets pandas + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 09a791e46..22b6743b5 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1898,3 +1898,12 @@ description: - Add low-latency configs and remove non-pareto configs pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1193 + +- config-keys: + - dsv4-fp4-b200-vllm + description: + - "Add DeepSeek-V4-Pro single-node B200 vLLM benchmark derived from B200 pareto sweep" + - "ISL=1024: TP8 conc 4-128; DP8 (dp-attn) conc 256-4096" + - "ISL=8192: TP8 conc 4-32; DP8 (dp-attn) conc 64-1024" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1156 + diff --git a/runners/launch_b200-cw.sh b/runners/launch_b200-cw.sh index ec7ba9a97..0b2dbf305 100644 --- a/runners/launch_b200-cw.sh +++ b/runners/launch_b200-cw.sh @@ -6,6 +6,14 @@ export PORT=8888 MODEL_CODE="${EXP_NAME%%_*}" FRAMEWORK_SUFFIX=$([[ "$FRAMEWORK" == "trt" ]] && printf '_trt' || printf '') SPEC_SUFFIX=$([[ "$SPEC_DECODING" == "mtp" ]] && printf '_mtp' || printf '') +# Prefer a framework-tagged script (e.g. dsv4_fp4_b200_vllm.sh) so models +# with multiple inference engines can coexist; fall back to the historical +# name without an engine suffix (`_trt` for trt, bare for everyone else). +BENCH_BASE="benchmarks/single_node/${MODEL_CODE}_${PRECISION}_b200" +BENCH_SCRIPT="${BENCH_BASE}_${FRAMEWORK}${SPEC_SUFFIX}.sh" +if [[ ! -f "$BENCH_SCRIPT" ]]; then + BENCH_SCRIPT="${BENCH_BASE}${FRAMEWORK_SUFFIX}${SPEC_SUFFIX}.sh" +fi PARTITION="b200" SQUASH_FILE="/tmp/gharunner/squash/$(echo "$IMAGE" | sed 's/[\/:@#]/_/g').sqsh" @@ -58,6 +66,6 @@ srun --jobid=$JOB_ID \ --container-mount-home \ --container-workdir=$CONTAINER_MOUNT_DIR \ --no-container-entrypoint --export=ALL \ -bash benchmarks/single_node/${MODEL_CODE}_${PRECISION}_b200${FRAMEWORK_SUFFIX}${SPEC_SUFFIX}.sh +bash "$BENCH_SCRIPT" scancel $JOB_ID diff --git a/runners/launch_b200-dgxc.sh b/runners/launch_b200-dgxc.sh index c07037ff4..edf5db957 100644 --- a/runners/launch_b200-dgxc.sh +++ b/runners/launch_b200-dgxc.sh @@ -253,6 +253,14 @@ else SQUASH_FILE="/home/sa-shared/containers/$(echo "$IMAGE" | sed 's/[\/:@#]/_/g').sqsh" FRAMEWORK_SUFFIX=$([[ "$FRAMEWORK" == "trt" ]] && printf '_trt' || printf '') SPEC_SUFFIX=$([[ "$SPEC_DECODING" == "mtp" ]] && printf '_mtp' || printf '') + # Prefer a framework-tagged script (e.g. dsv4_fp4_b200_vllm.sh) so models + # with multiple inference engines can coexist; fall back to the historical + # name without an engine suffix (`_trt` for trt, bare for everyone else). + BENCH_BASE="benchmarks/single_node/${EXP_NAME%%_*}_${PRECISION}_b200" + BENCH_SCRIPT="${BENCH_BASE}_${FRAMEWORK}${SPEC_SUFFIX}.sh" + if [[ ! -f "$BENCH_SCRIPT" ]]; then + BENCH_SCRIPT="${BENCH_BASE}${FRAMEWORK_SUFFIX}${SPEC_SUFFIX}.sh" + fi LOCK_FILE="${SQUASH_FILE}.lock" # TODO(Cam): lmsysorg/sglang:deepseek-v4-blackwell installs sglang editable at @@ -290,5 +298,5 @@ else --no-container-mount-home \ --container-workdir=$CONTAINER_MOUNT_DIR \ --no-container-entrypoint --export=ALL,PORT=8888 \ - bash benchmarks/single_node/${EXP_NAME%%_*}_${PRECISION}_b200${FRAMEWORK_SUFFIX}${SPEC_SUFFIX}.sh + bash "$BENCH_SCRIPT" fi diff --git a/runners/launch_b200-nb.sh b/runners/launch_b200-nb.sh index 6b411fec2..e0c8d92fb 100644 --- a/runners/launch_b200-nb.sh +++ b/runners/launch_b200-nb.sh @@ -4,6 +4,14 @@ HF_HUB_CACHE_MOUNT="/mnt/data/gharunners/hf-hub-cache/" PARTITION="main" FRAMEWORK_SUFFIX=$([[ "$FRAMEWORK" == "trt" ]] && printf '_trt' || printf '') SPEC_SUFFIX=$([[ "$SPEC_DECODING" == "mtp" ]] && printf '_mtp' || printf '') +# Prefer a framework-tagged script (e.g. dsv4_fp4_b200_vllm.sh) so models +# with multiple inference engines can coexist; fall back to the historical +# name without an engine suffix (`_trt` for trt, bare for everyone else). +BENCH_BASE="benchmarks/single_node/${EXP_NAME%%_*}_${PRECISION}_b200" +BENCH_SCRIPT="${BENCH_BASE}_${FRAMEWORK}${SPEC_SUFFIX}.sh" +if [[ ! -f "$BENCH_SCRIPT" ]]; then + BENCH_SCRIPT="${BENCH_BASE}${FRAMEWORK_SUFFIX}${SPEC_SUFFIX}.sh" +fi UCX_NET_DEVICES=eth0 @@ -27,4 +35,4 @@ srun --partition=$PARTITION --gres=gpu:$TP --exclusive --job-name="$RUNNER_NAME" --container-writable \ --container-workdir=$CONTAINER_MOUNT_DIR \ --no-container-entrypoint --export=ALL,PORT=8888,UCX_NET_DEVICES=$UCX_NET_DEVICES \ -bash benchmarks/single_node/${EXP_NAME%%_*}_${PRECISION}_b200${FRAMEWORK_SUFFIX}${SPEC_SUFFIX}.sh \ No newline at end of file +bash "$BENCH_SCRIPT" \ No newline at end of file