From 3ed70d614f5b962846b5842d1c6bd1eb5fb7f658 Mon Sep 17 00:00:00 2001 From: seungrokj Date: Thu, 16 Apr 2026 08:22:47 +0000 Subject: [PATCH 1/6] atom minimax fp4 on mi355x Signed-off-by: seungrokj --- .github/configs/amd-master.yaml | 26 ++++++ .../minimaxm2.5_fp4_mi355x_atom.sh | 80 +++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 benchmarks/single_node/minimaxm2.5_fp4_mi355x_atom.sh diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 89318004b..3c1a8ee8e 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -415,6 +415,32 @@ minimaxm2.5-fp8-mi355x-atom: - { tp: 4, conc-start: 4, conc-end: 128 } - { tp: 8, ep: 8, conc-start: 32, conc-end: 256 } +minimaxm2.5-fp4-mi355x-atom: + # TODO: + image: TBD + model: MiniMaxAI/MiniMax-M2.5 + model-prefix: minimaxm2.5 + runner: mi355x + precision: fp4 + framework: atom + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + # TODO: + search-space: + - { tp: 1, conc-start: 4, conc-end: 256 } + - { tp: 2, conc-start: 4, conc-end: 256 } + - { tp: 4, conc-start: 4, conc-end: 256 } + - { tp: 8, conc-start: 4, conc-end: 256 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 1, conc-start: 4, conc-end: 256 } + - { tp: 2, conc-start: 4, conc-end: 256 } + - { tp: 4, conc-start: 4, conc-end: 256 } + - { tp: 8, conc-start: 4, conc-end: 256 } + minimaxm2.5-fp8-mi300x-vllm: image: vllm/vllm-openai-rocm:v0.16.0 model: MiniMaxAI/MiniMax-M2.5 diff --git a/benchmarks/single_node/minimaxm2.5_fp4_mi355x_atom.sh b/benchmarks/single_node/minimaxm2.5_fp4_mi355x_atom.sh new file mode 100644 index 000000000..ca84f8228 --- /dev/null +++ b/benchmarks/single_node/minimaxm2.5_fp4_mi355x_atom.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + CONC \ + ISL \ + OSL \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME \ + EP_SIZE \ + DP_ATTENTION + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +echo "TP: $TP, CONC: $CONC, ISL: $ISL, OSL: $OSL, EP_SIZE: $EP_SIZE, DP_ATTENTION: $DP_ATTENTION" + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +export OMP_NUM_THREADS=1 + +# Calculate max-model-len based on ISL and OSL +if [ "$ISL" = "1024" ] && [ "$OSL" = "1024" ]; then + CALCULATED_MAX_MODEL_LEN="" +else + CALCULATED_MAX_MODEL_LEN=" --max-model-len 10240 " +fi + +if [ "$EP_SIZE" -gt 1 ]; then + EP=" --enable-expert-parallel" +else + EP=" " +fi + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x + +python3 -m atom.entrypoints.openai_server \ + --model $MODEL \ + --server-port $PORT \ + -tp $TP \ + --kv_cache_dtype fp8 $CALCULATED_MAX_MODEL_LEN $EP \ + --trust-remote-code \ + > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +export PYTHONDONTWRITEBYTECODE=1 +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x From ecce815b61c541a5eecbc073876eb884e0398d3c Mon Sep 17 00:00:00 2001 From: seungrokj <144636725+seungrokj@users.noreply.github.com> Date: Fri, 17 Apr 2026 17:43:29 +0900 Subject: [PATCH 2/6] Update amd-master.yaml --- .github/configs/amd-master.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 3c1a8ee8e..21010f688 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -418,7 +418,7 @@ minimaxm2.5-fp8-mi355x-atom: minimaxm2.5-fp4-mi355x-atom: # TODO: image: TBD - model: MiniMaxAI/MiniMax-M2.5 + model: amd/MiniMax-M2.5-MXFP4 model-prefix: minimaxm2.5 runner: mi355x precision: fp4 From d2e4cb5c47bb2cc093579dacea02f061e43c2c86 Mon Sep 17 00:00:00 2001 From: seungrokj Date: Sat, 25 Apr 2026 14:10:29 +0900 Subject: [PATCH 3/6] Update minimaxm2.5-fp4-mi355x-atom config and add minimaxm2.5-fp4-mi355x-vllm Co-Authored-By: Claude Sonnet 4.6 --- .github/configs/amd-master.yaml | 42 ++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 21010f688..718579b6d 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -416,8 +416,7 @@ minimaxm2.5-fp8-mi355x-atom: - { tp: 8, ep: 8, conc-start: 32, conc-end: 256 } minimaxm2.5-fp4-mi355x-atom: - # TODO: - image: TBD + image: rocm/atom:rocm7.2.2_ubuntu24.04_py3.12_pytorch_release_2.10.0_atom0.1.2.post model: amd/MiniMax-M2.5-MXFP4 model-prefix: minimaxm2.5 runner: mi355x @@ -427,19 +426,40 @@ minimaxm2.5-fp4-mi355x-atom: seq-len-configs: - isl: 1024 osl: 1024 - # TODO: search-space: - - { tp: 1, conc-start: 4, conc-end: 256 } - - { tp: 2, conc-start: 4, conc-end: 256 } - - { tp: 4, conc-start: 4, conc-end: 256 } - - { tp: 8, conc-start: 4, conc-end: 256 } + - { tp: 1, conc-start: 4, conc-end: 1024 } + - { tp: 2, conc-start: 4, conc-end: 1024 } + - { tp: 4, conc-start: 4, conc-end: 128 } + - { tp: 8, conc-start: 4, conc-end: 16 } - isl: 8192 osl: 1024 search-space: - - { tp: 1, conc-start: 4, conc-end: 256 } - - { tp: 2, conc-start: 4, conc-end: 256 } - - { tp: 4, conc-start: 4, conc-end: 256 } - - { tp: 8, conc-start: 4, conc-end: 256 } + - { tp: 1, conc-start: 4, conc-end: 1024 } + - { tp: 2, conc-start: 4, conc-end: 1024 } + - { tp: 4, conc-start: 4, conc-end: 128 } + - { tp: 8, conc-start: 4, conc-end: 16 } + +minimaxm2.5-fp4-mi355x-vllm: + image: vllm/vllm-openai-rocm:v0.19.1 + model: amd/MiniMax-M2.5-MXFP4 + model-prefix: minimaxm2.5 + runner: mi355x + precision: fp4 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 1, conc-start: 4, conc-end: 32 } + - { tp: 2, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 1, conc-start: 4, conc-end: 32 } + - { tp: 2, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } minimaxm2.5-fp8-mi300x-vllm: image: vllm/vllm-openai-rocm:v0.16.0 From d6131383916a5f8501bd1655bda64bafa5619a4b Mon Sep 17 00:00:00 2001 From: seungrokj Date: Sat, 25 Apr 2026 14:13:46 +0900 Subject: [PATCH 4/6] Fill in perf-changelog description for minimaxm2.5-fp4-mi355x-atom Co-Authored-By: Claude Sonnet 4.6 --- perf-changelog.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index a6c811748..478df6692 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1812,3 +1812,10 @@ - "Topologies: low-conc 1p1d-dep8-tep8 (4 nodes, mirrored from NVIDIA srt-slurm PR #71 with offload kept and numa-bind dropped); mid 1p1d-dep8-dep16 (6 nodes) and high 3p1d-dep8-dep16 (10 nodes) hand-rolled, structurally derived from the kimi-k2.5 1k/1k pattern" - "Recipes stored under benchmarks/multi_node/srt-slurm-recipes/ and overlaid onto the upstream srt-slurm checkout at runtime" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1129 + +- config-keys: + - minimaxm2.5-fp4-mi355x-atom + description: + - "Add MiniMax-M2.5 MXFP4 MI355X Atom benchmark (rocm/atom:rocm7.2.2_ubuntu24.04_py3.12_pytorch_release_2.10.0_atom0.1.2.post)" + - "Single-node sweep: TP1–TP8, 1k/1k and 8k/1k ISL/OSL" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1042 From 9f8b31b71eacb98a2b2553b286535ff24b46171d Mon Sep 17 00:00:00 2001 From: seungrokj Date: Wed, 29 Apr 2026 09:47:19 +0900 Subject: [PATCH 5/6] perf-changelog: add entries for dsv4 B200/B300 vLLM/SGLang, MI355X ATOM/vLLM, and B300 MTP configs Co-Authored-By: Claude Sonnet 4.6 --- perf-changelog.yaml | 136 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 131 insertions(+), 5 deletions(-) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 24ab876f8..323142349 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1830,11 +1830,137 @@ - config-keys: - dsv4-fp4-mi355x-atom description: - - "Add DeepSeek-V4-Pro FP4 GB200 disaggregated vLLM benchmarks via Dynamo (1k/1k sweep; 8k/1k currently commented out)" - - "Container: vllm/vllm-openai:deepseekv4-cu130; model from /mnt/numa1/models/deepseek-v4-pro/ (compute-node-local NVMe)" - - "Topologies: low-conc 1p1d-dep8-tep8 (4 nodes, mirrored from NVIDIA srt-slurm PR #71 with offload kept and numa-bind dropped); mid 1p1d-dep8-dep16 (6 nodes) and high 3p1d-dep8-dep16 (10 nodes) hand-rolled, structurally derived from the kimi-k2.5 1k/1k pattern" - - "Recipes stored under benchmarks/multi_node/srt-slurm-recipes/ and overlaid onto the upstream srt-slurm checkout at runtime" - pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1129 + - "Add DeepSeek-V4-Pro FP4 MI355X ATOM Day-0 marker (single-sequence, TP=8, conc=1)" + - "Image: rocm/atom:rocm7.2.2_ubuntu24.04_py3.12_pytorch_release_2.10.0_atom0.1.2.post (matches qwen3.5-fp8-mi355x-atom base); ROCm/ATOM#650 overlaid at runtime via pip install --no-deps -e . from a pinned PR SHA (cdbff35) inside the benchmark script" + - "triton_kernels is missing from the release image (build-stage path /triton-test/python/triton_kernels/ is cleaned up); the script falls back to ROCm/triton@e491726 (RI3.5.x), which has matmul_ogs.py and routing.py (PR #650 imports both — upstream triton-lang/triton refactored matmul_ogs into matmul.py and removed routing) plus CDNA4MXScaleLayout and a target_info.py compatible with the image's bundled triton" + - "Model: deepseek-ai/DeepSeek-V4-Pro (same canonical checkpoint used by dsv4-fp4-b300-vllm); compatibility with PR #650's WeightsMapper not yet verified — first run will tell us" + - "Pinned to PR1 limitations: single-sequence kv_cache hardcode, --enforce-eager required, ATOM_USE_TRITON_MOE=1 (aiter fused_moe broken on gfx950)" + - "Sweep will expand to TP=4/8 conc 4–256 once ROCm/ATOM PR3 (multi-request) and PR4 (CUDAGraph) land" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1165 + +- config-keys: + - dsv4-fp4-mi355x-atom + description: + - "Add DeepSeek-V4-Pro FP4 MI355X ATOM Day-0 marker (single-sequence, TP=8, conc=1)" + - "Image: rocm/atom:rocm7.2.2_ubuntu24.04_py3.12_pytorch_release_2.10.0_atom0.1.2.post (matches qwen3.5-fp8-mi355x-atom base); ROCm/ATOM#650 overlaid at runtime via pip install --no-deps -e . from a pinned PR SHA (cdbff35) inside the benchmark script" + - "triton_kernels is missing from the release image (build-stage path /triton-test/python/triton_kernels/ is cleaned up); the script falls back to ROCm/triton@e491726 (RI3.5.x), which has matmul_ogs.py and routing.py (PR #650 imports both — upstream triton-lang/triton refactored matmul_ogs into matmul.py and removed routing) plus CDNA4MXScaleLayout and a target_info.py compatible with the image's bundled triton" + - "Model: deepseek-ai/DeepSeek-V4-Pro (same canonical checkpoint used by dsv4-fp4-b300-vllm); compatibility with PR #650's WeightsMapper not yet verified — first run will tell us" + - "Pinned to PR1 limitations: single-sequence kv_cache hardcode, --enforce-eager required, ATOM_USE_TRITON_MOE=1 (aiter fused_moe broken on gfx950)" + - "Sweep will expand to TP=4/8 conc 4–256 once ROCm/ATOM PR3 (multi-request) and PR4 (CUDAGraph) land" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1170 + +- config-keys: + - dsv4-fp4-b300-sglang-mtp + description: + - "Add DeepSeek-V4-Pro FP4 B300 SGLang benchmark with EAGLE/MTP speculative decoding" + - "Image: lmsysorg/sglang:deepseek-v4-b300@sha256:26e116bd211e300dbb76924d56c5cbe6cc3ee5ee2fe314859cb8774f5bc070f3 (pinned for deep_gemm transform_weights_for_mega_moe support; same digest as PR #1158)" + - "Model: deepseek-ai/DeepSeek-V4-Pro" + - "EAGLE/MTP flags hardcoded in script: num-steps=3, eagle-topk=1, num-draft-tokens=4" + - "Recipe (MoE backend, chunked-prefill) selected in script by dp-attn: TP-only + flashinfer_mxfp4 (small batch) vs DP-attn + deepep mega_moe (large batch)" + - "Three CONC bands: A=TP8 (1-8), B=TP4 (16-128), C=DP4 dp-attn (64-512); B/C overlap at conc 64,128" + - "Configs: 1k1k and 8k1k, no validation.py / launcher / yaml-field changes (knob-free)" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1166 + +- config-keys: + - dsv4-fp4-b300-vllm + description: + - "Update search space based on B300 pareto sweep results" + - "ISL=1024: TP4 conc 4-128; DP4 (dp-attn) conc 256-4096; DP8 (dp-attn) conc 2048-8192" + - "ISL=8192: TP4 conc 4-64; DP4 (dp-attn) conc 128-1024; DP8 (dp-attn) conc 1024-8192" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1155 + +- config-keys: + - dsv4-fp4-b300-sglang + description: + - "Recipe-per-CONC split for DeepSeek-V4-Pro on B300: low-latency (TP=8, EP=1), balanced (TP=4, EP=1) at conc=32, max-throughput (TP=4, EP=4, DP-attn + DeepEP) at conc=512, for both 1k1k and 8k1k" + - "Recipes from https://docs.sglang.io/cookbook/autoregressive/DeepSeek/DeepSeek-V4" + - "Image pinned to lmsysorg/sglang:deepseek-v4-b300@sha256:26e116bd211e300dbb76924d56c5cbe6cc3ee5ee2fe314859cb8774f5bc070f3" + - "DP-attention path enables SGLANG_OPT_SWA_EVICT_DROP_PAGE_MARGIN=1 for better SWA eviction behavior" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1185 + +- config-keys: + - dsv4-fp4-b200-sglang + description: + - "Two-recipe dispatch for DeepSeek-V4-Pro on B200, selected by DP_ATTENTION knob: low-latency (TP=8, EP=1, flashinfer_mxfp4) for conc 1-32, DP-attention (TP=8, EP=8, DP-attn + DeepEP + mega_moe) for conc 64-{512,1024}. The DP-attention recipe uses identical flags across balanced and max-throughput CONC ranges; only --max-running-requests scales with CONC." + - "Recipes from https://docs.sglang.io/cookbook/autoregressive/DeepSeek/DeepSeek-V4" + - "Image pinned to lmsysorg/sglang:deepseek-v4-blackwell@sha256:df18bfc4aa9ecf59451002b49ba00cae58042de9e2a96378bbd21b404dd62c7b" + - "Adds SGLANG_OPT_* env knobs (SWA_SPLIT_LEAF_ON_INSERT, USE_JIT_NORM, USE_JIT_INDEXER_METADATA, USE_TOPK_V2, USE_CUSTOM_ALL_REDUCE_V2)" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1187 + +- config-keys: + - dsv4-fp4-b300-sglang-mtp + description: + - "Pass --dsv4 to run_benchmark_serving so MTP benchmarks use the DSv4 chat template (PR #1153)" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1182 + +- config-keys: + - dsv4-fp4-b300-vllm + description: + - Add low-latency configs and remove non-pareto configs + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1193 + +- config-keys: + - dsv4-fp4-b200-vllm + description: + - "Add DeepSeek-V4-Pro single-node B200 vLLM benchmark derived from B200 pareto sweep" + - "ISL=1024: TP8 conc 4-128; DP8 (dp-attn) conc 256-4096" + - "ISL=8192: TP8 conc 4-32; DP8 (dp-attn) conc 64-1024" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1156 + +- config-keys: + - dsv4-fp4-b300-sglang-mtp + description: + - "Add DeepSeek-V4-Pro FP4 B300 SGLang benchmark with EAGLE/MTP speculative decoding" + - "Image: lmsysorg/sglang:deepseek-v4-b300@sha256:26e116bd211e300dbb76924d56c5cbe6cc3ee5ee2fe314859cb8774f5bc070f3 (pinned for deep_gemm transform_weights_for_mega_moe support; same digest as PR #1158)" + - "Model: deepseek-ai/DeepSeek-V4-Pro" + - "EAGLE/MTP flags hardcoded in script: num-steps=3, eagle-topk=1, num-draft-tokens=4" + - "Recipe (MoE backend, chunked-prefill) selected in script by dp-attn: TP-only + flashinfer_mxfp4 (small batch) vs DP-attn + deepep mega_moe (large batch)" + - "Three CONC bands: A=TP8 (1-8), B=TP4 (16-128), C=DP4 dp-attn (64-512); B/C overlap at conc 64,128" + - "Configs: 1k1k and 8k1k, no validation.py / launcher / yaml-field changes (knob-free)" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1180 + +- config-keys: + - dsv4-fp8-mi355x-vllm + description: + - "Add vLLM DeepSeek-V4-Pro FP8 benchmark for MI355X with AITER-accelerated MLA decode (vllm-project/vllm#40889, stacked on #40871)" + - "Base image rocm/atom:rocm7.2.2 (MI355X ROCm 7.2.2, aiter with MLA decode); vLLM rebuilt from PR branch at pinned SHA b3a4a44 at runtime via --no-deps overlay" + - "Key flags: --enforce-eager, --moe-backend triton_unfused, --kv-cache-dtype fp8, VLLM_ROCM_USE_AITER=1" + - "Search space: TP=8, concurrency 4-64, 1k1k and 8k1k" + - "MI355X runner updated to resolve framework-specific script names (dsv4_fp8_mi355x_vllm.sh) with fallback to generic names" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1188 + +- config-keys: + - dsv4-fp4-b300-sglang + description: + - "conc=2048/4096: mega_moe deepep backend; conc=2048 cuda-graph-max-bs 288, mem 0.87; conc=4096 cuda-graph-max-bs 544, mem 0.835, swa-ratio 0.075, tokenizer-workers 8" + - "1k1k conc=512/1024: add mega_moe deepep backend with cuda-graph-max-bs 550, chunked-prefill 16384, max-running-requests 768" + - "ep=8 naming convention in yaml distinguishes mega_moe from existing flashinfer_mxfp4 ep=4 entries" + - "Recipes from https://docs.sglang.io/cookbook/autoregressive/DeepSeek/DeepSeek-V4" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1179 + +- config-keys: + - dsv4-fp4-mi355x-atom + description: + - "Use ROCm/aiter#2916 mhc_pre device-allocation fix instead of disabling ATOM mhc_pre" + - "Patch installed aiter/ops/mhc.py at runtime to allocate mhc_pre intermediates on residual.device, preserving the aiter MHC fast path without rebuilding aiter" + - "Remove the ATOM deepseek_v4.py sed workaround that forced mhc_pre to torch fallback" + - "Keep dsv4-fp4-mi355x-atom at CONC=1 only; run 24953107645 showed high-concurrency DSv4 ATOM OOMs in PR #650 torch sparse-attention fallbacks before upstream AITER sparse-attention support lands" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1202 + +- config-keys: + - dsv4-fp4-b300-vllm-mtp + description: + - "Add preliminary vLLM MTP configs for DeepSeek-V4-Pro on B300" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1210 + +- config-keys: + - dsv4-fp4-b200-vllm + description: + - "Pin image to vllm/vllm-openai:v0.20.0-cu130 (was floating deepseekv4-cu130 tag); DeepGEMM is preinstalled in this image" + - "Use --attention_config.use_fp4_indexer_cache=True and --compilation-config {\"cudagraph_mode\": \"FULL_AND_PIECEWISE\", \"custom_ops\": [\"all\"]} for all configs" + - "Gate --moe-backend deep_gemm_mega_moe and --gpu-memory-utilization 0.85 on DP_ATTENTION=true per the v0.20.0 recipe" + - "Drop --pipeline-parallel-size 1; keep --no-enable-prefix-caching and --max-cudagraph-capture-size 2048" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1204 - config-keys: - minimaxm2.5-fp4-mi355x-atom From 217f30c7f9ce5e71fd12b90fedc22b099d7e6707 Mon Sep 17 00:00:00 2001 From: Cam Quilici Date: Thu, 30 Apr 2026 15:22:21 -0500 Subject: [PATCH 6/6] perf-changelog: clean up minimaxm2.5 entry whitespace Remove trailing whitespace on the line after the minimaxm2.5 pr-link and on the dsv4-fp4-gb200-dynamo-vllm config-keys line. The original PR's diff bled into the dsv4 entry instead of being a clean standalone insertion. Co-Authored-By: Claude Opus 4.7 (1M context) --- perf-changelog.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index eebb55cc8..5d924045d 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1968,8 +1968,8 @@ - "Add MiniMax-M2.5 MXFP4 MI355X Atom benchmark (rocm/atom:rocm7.2.2_ubuntu24.04_py3.12_pytorch_release_2.10.0_atom0.1.2.post)" - "Single-node sweep: TP1–TP8, 1k/1k and 8k/1k ISL/OSL" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1042 - -- config-keys: + +- config-keys: - dsv4-fp4-gb200-dynamo-vllm description: - "DSV4-Pro FP4 GB200 dynamo-vLLM disagg against srt-slurm aflowers/vllm-gb200-v0.20.0"