From 77f602be692735883794261cea76666b30d25d0d Mon Sep 17 00:00:00 2001 From: functionstackx <47992694+functionstackx@users.noreply.github.com> Date: Fri, 17 Apr 2026 02:04:01 -0400 Subject: [PATCH 1/2] Add B300 config: dsr1-fp8-sglang (non-MTP) At the time of submission, the SGLang DSR1 cookbook does not have a B300-specific recipe, so this config reuses the existing B200 DSR1 FP8 SGLang recipe as-is until B300-specific tuning is available. Image bumped to v0.5.10.post1-cu130 to match the standard B300 SGLang image used by other B300 configs. Co-Authored-By: Claude Opus 4.7 (1M context) --- .github/configs/nvidia-master.yaml | 22 +++++ benchmarks/single_node/dsr1_fp8_b300.sh | 113 ++++++++++++++++++++++++ perf-changelog.yaml | 8 ++ 3 files changed, 143 insertions(+) create mode 100644 benchmarks/single_node/dsr1_fp8_b300.sh diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 47d66aa2d..3015cbf16 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -1774,6 +1774,28 @@ dsr1-fp8-b200-sglang: - { tp: 8, ep: 1, conc-start: 4, conc-end: 4 } - { tp: 4, ep: 1, conc-start: 4, conc-end: 32 } +# NOTE: At the time of submission, https://cookbook.sglang.io/autoregressive/DeepSeek/DeepSeek-R1 +# does not have a B300-specific recipe, so this config reuses the existing DSR1 FP8 +# B200 SGLang recipe as-is until B300-specific tuning is available. +dsr1-fp8-b300-sglang: + image: lmsysorg/sglang:v0.5.10.post1-cu130 + model: deepseek-ai/DeepSeek-R1-0528 + model-prefix: dsr1 + runner: b300 + precision: fp8 + framework: sglang + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 8, ep: 1, conc-start: 4, conc-end: 64 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 8, ep: 1, conc-start: 4, conc-end: 4 } + - { tp: 4, ep: 1, conc-start: 4, conc-end: 32 } + qwen3.5-bf16-b200-sglang: image: lmsysorg/sglang:nightly-dev-20260216-d3bae71e model: Qwen/Qwen3.5-397B-A17B diff --git a/benchmarks/single_node/dsr1_fp8_b300.sh b/benchmarks/single_node/dsr1_fp8_b300.sh new file mode 100644 index 000000000..f0ebda29e --- /dev/null +++ b/benchmarks/single_node/dsr1_fp8_b300.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +# NOTE: At the time of submission, https://cookbook.sglang.io/autoregressive/DeepSeek/DeepSeek-R1 +# does not have a B300-specific recipe, so this script reuses the existing +# DSR1 FP8 B200 SGLang recipe as-is until B300-specific tuning is available. + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME \ + EP_SIZE + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +nvidia-smi + +hf download "$MODEL" + +export SGL_ENABLE_JIT_DEEPGEMM=false +export SGLANG_ENABLE_FLASHINFER_GEMM=true +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +# Default: recv every ~10 requests; if CONC ≥ 16, relax to ~30 requests between scheduler recv polls. +if [[ $TP -eq 8 ]]; then + if [[ $CONC -ge 16 ]]; then + SCHEDULER_RECV_INTERVAL=30 + else + SCHEDULER_RECV_INTERVAL=10 + fi + + # Setting these values (passed in to --cuda-graph-max-bs and --max-running-requests) as the maximum concurrency + # this will help us save memory from being unnecessary used. + MAX_RUNNING_REQUESTS=128 + CUDA_GRAPH_MAX_BATCH_SIZE=128 + + MEM_FRAC_STATIC=0.82 + CHUNKED_PREFILL_SIZE=32768 + MAX_PREFILL_TOKENS=32768 +elif [[ $TP -eq 4 ]]; then + if [[ $ISL -ne 8192 ]] || [[ $OSL -ne 1024 ]]; then + echo "TP=4 not yet supported for ISL=$ISL OSL=$OSL!" + exit 1 + fi + + # Setting these values (passed in to --cuda-graph-max-bs and --max-running-requests) as the maximum concurrency + # this will help us save memory from being unnecessary used. + MAX_RUNNING_REQUESTS=32 + CUDA_GRAPH_MAX_BATCH_SIZE=32 + + MEM_FRAC_STATIC=0.95 + CHUNKED_PREFILL_SIZE=8192 + MAX_PREFILL_TOKENS=8192 + + SCHEDULER_RECV_INTERVAL=10 +else + echo "Unrecognized TP size $TP!" + exit 1 +fi +echo "SCHEDULER_RECV_INTERVAL: $SCHEDULER_RECV_INTERVAL, CONC: $CONC, ISL: $ISL, OSL: $OSL" + +EVAL_CONTEXT_ARGS="" +if [ "${EVAL_ONLY}" = "true" ]; then + setup_eval_context + EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +fi +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x +PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \ +--tensor-parallel-size=$TP --data-parallel-size=1 \ +--cuda-graph-max-bs $CUDA_GRAPH_MAX_BATCH_SIZE --max-running-requests $MAX_RUNNING_REQUESTS \ +--mem-fraction-static $MEM_FRAC_STATIC --kv-cache-dtype fp8_e4m3 --chunked-prefill-size $CHUNKED_PREFILL_SIZE --max-prefill-tokens $MAX_PREFILL_TOKENS \ +--enable-flashinfer-allreduce-fusion --scheduler-recv-interval $SCHEDULER_RECV_INTERVAL --disable-radix-cache \ +--attention-backend trtllm_mla --stream-interval 30 --ep-size $EP_SIZE --moe-runner-backend flashinfer_trtllm --quantization fp8 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +pip install -q datasets pandas + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 75fae41e3..2928b7fa0 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1404,3 +1404,11 @@ - "Image: lmsysorg/sglang:v0.5.10.post1-cu130" - "At the time of submission, https://cookbook.sglang.io/autoregressive/DeepSeek/DeepSeek-R1 does not have a B300-specific recipe, so this reuses the existing DSR1 FP4 B200 SGLang recipe as-is" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1049 + +- config-keys: + - dsr1-fp8-b300-sglang + description: + - "Add DeepSeek-R1-0528 FP8 B300 SGLang benchmark (non-MTP)" + - "Image: lmsysorg/sglang:v0.5.10.post1-cu130" + - "At the time of submission, https://cookbook.sglang.io/autoregressive/DeepSeek/DeepSeek-R1 does not have a B300-specific recipe, so this reuses the existing DSR1 FP8 B200 SGLang recipe as-is" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXXX From 43e2882508e5f1b9025befc4d1f2eac6f6b9ec8f Mon Sep 17 00:00:00 2001 From: functionstackx <47992694+functionstackx@users.noreply.github.com> Date: Fri, 17 Apr 2026 02:04:23 -0400 Subject: [PATCH 2/2] Fill in PR link for dsr1-fp8-b300-sglang changelog entry Co-Authored-By: Claude Opus 4.7 (1M context) --- perf-changelog.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 2928b7fa0..67237ec1b 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1411,4 +1411,4 @@ - "Add DeepSeek-R1-0528 FP8 B300 SGLang benchmark (non-MTP)" - "Image: lmsysorg/sglang:v0.5.10.post1-cu130" - "At the time of submission, https://cookbook.sglang.io/autoregressive/DeepSeek/DeepSeek-R1 does not have a B300-specific recipe, so this reuses the existing DSR1 FP8 B200 SGLang recipe as-is" - pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXXX + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1050