diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 776413032..59a1ec753 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -3513,9 +3513,8 @@ minimaxm2.5-fp8-b200-vllm: - isl: 1024 osl: 1024 search-space: - - { tp: 2, conc-start: 4, conc-end: 512 } - - { tp: 4, conc-start: 4, conc-end: 512 } - { tp: 2, ep: 2, conc-start: 512, conc-end: 512 } + - { tp: 4, conc-start: 4, conc-end: 128 } - { tp: 4, ep: 4, conc-start: 256, conc-end: 512 } - isl: 8192 osl: 1024 diff --git a/benchmarks/single_node/minimaxm2.5_fp8_b200.sh b/benchmarks/single_node/minimaxm2.5_fp8_b200.sh index d892d72f1..0ba6437c4 100755 --- a/benchmarks/single_node/minimaxm2.5_fp8_b200.sh +++ b/benchmarks/single_node/minimaxm2.5_fp8_b200.sh @@ -24,7 +24,7 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} -export VLLM_FLASHINFER_ALLREDUCE_BACKEND=mnnvl +export VLLM_FLOAT32_MATMUL_PRECISION=high if [ "$EP_SIZE" -gt 1 ]; then EP=" --enable-expert-parallel" diff --git a/perf-changelog.yaml b/perf-changelog.yaml index cc9baf1c0..552bdd7ab 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1690,3 +1690,9 @@ description: - "Add VLLM_FLOAT32_MATMUL_PRECISION=high, update search space concurrency ranges" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1107 + +- config-keys: + - minimaxm2.5-fp8-b200-vllm + description: + - "Add VLLM_FLOAT32_MATMUL_PRECISION=high, remove VLLM_FLASHINFER_ALLREDUCE_BACKEND=mnnvl" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1068