From b90400363b2b4db60ffafaba74c249fd3ee11702 Mon Sep 17 00:00:00 2001 From: hshrivastava-droid Date: Fri, 17 Apr 2026 11:35:19 -0700 Subject: [PATCH 1/4] update Gemm flag --- benchmarks/single_node/minimaxm2.5_fp8_b200.sh | 2 +- perf-changelog.yaml | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/benchmarks/single_node/minimaxm2.5_fp8_b200.sh b/benchmarks/single_node/minimaxm2.5_fp8_b200.sh index d892d72f1..0ba6437c4 100755 --- a/benchmarks/single_node/minimaxm2.5_fp8_b200.sh +++ b/benchmarks/single_node/minimaxm2.5_fp8_b200.sh @@ -24,7 +24,7 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} -export VLLM_FLASHINFER_ALLREDUCE_BACKEND=mnnvl +export VLLM_FLOAT32_MATMUL_PRECISION=high if [ "$EP_SIZE" -gt 1 ]; then EP=" --enable-expert-parallel" diff --git a/perf-changelog.yaml b/perf-changelog.yaml index f4ba37423..5e39671fe 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1473,3 +1473,9 @@ - "Expand GPT-OSS 120B FP4 MI300X TP=1 concurrency from 64 to 256 for 1k1k" - "Higher concurrency improves MoE weight amortization: 8552 total TPS at conc=256 vs 4016 at conc=64 (2.1x)" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1053 + +- config-keys: + - minimaxm2.5-fp8-b200-vllm + description: + - "Add VLLM_FLOAT32_MATMUL_PRECISION=high, remove VLLM_FLASHINFER_ALLREDUCE_BACKEND=mnnvl" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXX From 300a69de1471f8a0375b5e193d450a5d31d67cca Mon Sep 17 00:00:00 2001 From: hshrivastava-droid Date: Fri, 17 Apr 2026 12:06:10 -0700 Subject: [PATCH 2/4] update PR number --- perf-changelog.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 5e39671fe..30b425ba0 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1478,4 +1478,5 @@ - minimaxm2.5-fp8-b200-vllm description: - "Add VLLM_FLOAT32_MATMUL_PRECISION=high, remove VLLM_FLASHINFER_ALLREDUCE_BACKEND=mnnvl" - pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/XXX + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1068 + From dbcd37352011e6be3276e5d64aff3c761aafe96c Mon Sep 17 00:00:00 2001 From: hshrivastava-droid Date: Fri, 17 Apr 2026 13:38:00 -0700 Subject: [PATCH 3/4] uodate conc --- .github/configs/nvidia-master.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index b9867c03a..3905cb81b 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -3342,15 +3342,14 @@ minimaxm2.5-fp8-b200-vllm: - isl: 1024 osl: 1024 search-space: - - { tp: 2, conc-start: 4, conc-end: 512 } - - { tp: 4, conc-start: 4, conc-end: 512 } - - { tp: 2, ep: 2, conc-start: 512, conc-end: 512 } + - { tp: 2, ep: 2, conc-start: 512, conc-end: 1024 } + - { tp: 4, conc-start: 4, conc-end: 128 } - { tp: 4, ep: 4, conc-start: 256, conc-end: 512 } - isl: 8192 osl: 1024 search-space: - - { tp: 2, conc-start: 4, conc-end: 512 } - - { tp: 4, conc-start: 4, conc-end: 512 } + - { tp: 2, conc-start: 4, conc-end: 1024 } + - { tp: 4, conc-start: 4, conc-end: 1024 } # NOTE: At the time of submission, https://docs.vllm.ai/projects/recipes/en/latest/MiniMax/MiniMax-M2.html # does not have a B300-specific recipe, so this config reuses the existing From 64b721ef1b87571119f0f11ea01afe32b0674fcd Mon Sep 17 00:00:00 2001 From: hshrivastava-droid Date: Mon, 20 Apr 2026 14:29:50 -0700 Subject: [PATCH 4/4] update conc --- .github/configs/nvidia-master.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 772afe762..82b81a2e2 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -3490,14 +3490,14 @@ minimaxm2.5-fp8-b200-vllm: - isl: 1024 osl: 1024 search-space: - - { tp: 2, ep: 2, conc-start: 512, conc-end: 1024 } + - { tp: 2, ep: 2, conc-start: 512, conc-end: 512 } - { tp: 4, conc-start: 4, conc-end: 128 } - { tp: 4, ep: 4, conc-start: 256, conc-end: 512 } - isl: 8192 osl: 1024 search-space: - - { tp: 2, conc-start: 4, conc-end: 1024 } - - { tp: 4, conc-start: 4, conc-end: 1024 } + - { tp: 2, conc-start: 4, conc-end: 512 } + - { tp: 4, conc-start: 4, conc-end: 512 } # NOTE: At the time of submission, https://docs.vllm.ai/projects/recipes/en/latest/MiniMax/MiniMax-M2.html # does not have a B300-specific recipe, so this config reuses the existing