From d0a7b8f950742f74760e1e6ca07607083d057db1 Mon Sep 17 00:00:00 2001 From: functionstackx <47992694+functionstackx@users.noreply.github.com> Date: Fri, 17 Apr 2026 04:28:29 -0400 Subject: [PATCH 1/3] Add B300 config: kimi-k2.5-fp4-vllm At the time of submission, the vLLM Kimi-K2.5 recipes page (https://docs.vllm.ai/projects/recipes/en/latest/moonshotai/Kimi-K2.5.html) does not have a B300-specific recipe, so this config reuses the existing Kimi-K2.5 FP4 (NVFP4) B200 vLLM recipe as-is until B300-specific tuning is available. Co-Authored-By: Claude Opus 4.7 (1M context) --- .github/configs/nvidia-master.yaml | 23 ++++++ benchmarks/single_node/kimik2.5_fp4_b300.sh | 80 +++++++++++++++++++++ perf-changelog.yaml | 8 +++ 3 files changed, 111 insertions(+) create mode 100755 benchmarks/single_node/kimik2.5_fp4_b300.sh diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 26b380de9..2252bcf89 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -2045,6 +2045,29 @@ kimik2.5-fp4-b200-vllm: - { tp: 8, ep: 1, conc-start: 4, conc-end: 4 } - { tp: 4, ep: 1, conc-start: 4, conc-end: 64 } +# NOTE: At the time of submission, https://docs.vllm.ai/projects/recipes/en/latest/moonshotai/Kimi-K2.5.html +# does not have a B300-specific recipe, so this config reuses the existing +# Kimi-K2.5 FP4 B200 vLLM recipe as-is until B300-specific tuning is available. +kimik2.5-fp4-b300-vllm: + image: vllm/vllm-openai:v0.17.0 + model: nvidia/Kimi-K2.5-NVFP4 + model-prefix: kimik2.5 + runner: b300 + precision: fp4 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 8, ep: 1, conc-start: 4, conc-end: 4 } + - { tp: 4, ep: 1, conc-start: 4, conc-end: 64 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 8, ep: 1, conc-start: 4, conc-end: 4 } + - { tp: 4, ep: 1, conc-start: 4, conc-end: 64 } + dsr1-fp8-b200-sglang-mtp: image: lmsysorg/sglang:v0.5.9-cu130 model: deepseek-ai/DeepSeek-R1-0528 diff --git a/benchmarks/single_node/kimik2.5_fp4_b300.sh b/benchmarks/single_node/kimik2.5_fp4_b300.sh new file mode 100755 index 000000000..ad636f6ed --- /dev/null +++ b/benchmarks/single_node/kimik2.5_fp4_b300.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +# NOTE: At the time of submission, https://docs.vllm.ai/projects/recipes/en/latest/moonshotai/Kimi-K2.5.html +# does not have a B300-specific recipe, so this script reuses the existing +# Kimi-K2.5 FP4 B200 vLLM recipe as-is until B300-specific tuning is available. + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + MAX_MODEL_LEN \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +hf download "$MODEL" + +nvidia-smi + +export TORCH_CUDA_ARCH_LIST="10.0" +export PYTHONNOUSERSITE=1 + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +if [ "${EVAL_ONLY}" = "true" ]; then + setup_eval_context + MAX_MODEL_LEN="$EVAL_MAX_MODEL_LEN" +fi +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x +vllm serve $MODEL --host 0.0.0.0 --port $PORT \ +--tensor-parallel-size=$TP \ +--gpu-memory-utilization 0.90 \ +--max-model-len $MAX_MODEL_LEN \ +--max-num-seqs $CONC \ +--reasoning-parser kimi_k2 \ +--tool-call-parser kimi_k2 \ +--compilation_config.pass_config.fuse_allreduce_rms true \ +--no-enable-prefix-caching \ +--trust-remote-code > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +pip install -q datasets pandas + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts $(( CONC * 10 )) \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 062af89ba..35e7ab1c1 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1460,3 +1460,11 @@ - "Image: vllm/vllm-openai:v0.19.0-cu130" - "At the time of submission, https://docs.vllm.ai/projects/recipes/en/latest/MiniMax/MiniMax-M2.html does not have a B300-specific recipe, so this reuses the existing MiniMax-M2.5 FP8 B200 vLLM recipe as-is" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1054 + +- config-keys: + - kimik2.5-fp4-b300-vllm + description: + - "Add Kimi-K2.5 FP4 (NVFP4) B300 vLLM benchmark" + - "Image: vllm/vllm-openai:v0.17.0" + - "At the time of submission, https://docs.vllm.ai/projects/recipes/en/latest/moonshotai/Kimi-K2.5.html does not have a B300-specific recipe, so this reuses the existing Kimi-K2.5 FP4 B200 vLLM recipe as-is" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXXX From 7ad24761107e933db86ce0e09d23250e584884af Mon Sep 17 00:00:00 2001 From: functionstackx <47992694+functionstackx@users.noreply.github.com> Date: Fri, 17 Apr 2026 04:28:51 -0400 Subject: [PATCH 2/3] Fill in PR link for kimi-k2.5-fp4-b300-vllm changelog entry Co-Authored-By: Claude Opus 4.7 (1M context) --- perf-changelog.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 35e7ab1c1..a82b06e8f 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1467,4 +1467,4 @@ - "Add Kimi-K2.5 FP4 (NVFP4) B300 vLLM benchmark" - "Image: vllm/vllm-openai:v0.17.0" - "At the time of submission, https://docs.vllm.ai/projects/recipes/en/latest/moonshotai/Kimi-K2.5.html does not have a B300-specific recipe, so this reuses the existing Kimi-K2.5 FP4 B200 vLLM recipe as-is" - pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXXX + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1056 From fe13a8d61273eef37029fb55033741c1cd06990c Mon Sep 17 00:00:00 2001 From: functionstackx <47992694+functionstackx@users.noreply.github.com> Date: Fri, 17 Apr 2026 05:06:20 -0400 Subject: [PATCH 3/3] Bump B300 image to vllm/vllm-openai:v0.19.0-cu130 Align with the standard B300 vLLM image used by other B300 vLLM configs. Co-Authored-By: Claude Opus 4.7 (1M context) --- .github/configs/nvidia-master.yaml | 2 +- perf-changelog.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 2252bcf89..9ea0644b4 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -2049,7 +2049,7 @@ kimik2.5-fp4-b200-vllm: # does not have a B300-specific recipe, so this config reuses the existing # Kimi-K2.5 FP4 B200 vLLM recipe as-is until B300-specific tuning is available. kimik2.5-fp4-b300-vllm: - image: vllm/vllm-openai:v0.17.0 + image: vllm/vllm-openai:v0.19.0-cu130 model: nvidia/Kimi-K2.5-NVFP4 model-prefix: kimik2.5 runner: b300 diff --git a/perf-changelog.yaml b/perf-changelog.yaml index a82b06e8f..827919941 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1465,6 +1465,6 @@ - kimik2.5-fp4-b300-vllm description: - "Add Kimi-K2.5 FP4 (NVFP4) B300 vLLM benchmark" - - "Image: vllm/vllm-openai:v0.17.0" + - "Image: vllm/vllm-openai:v0.19.0-cu130" - "At the time of submission, https://docs.vllm.ai/projects/recipes/en/latest/moonshotai/Kimi-K2.5.html does not have a B300-specific recipe, so this reuses the existing Kimi-K2.5 FP4 B200 vLLM recipe as-is" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1056