From d236f29c390fb271b502271fc1cf7d60b8056ca4 Mon Sep 17 00:00:00 2001 From: Chun Fang Date: Fri, 1 May 2026 10:43:19 +0000 Subject: [PATCH 1/2] [ajith's work] update image and search-space for glm5-fp8-mi355x-sglang-mtp --- .github/configs/amd-master.yaml | 8 +++++--- benchmarks/single_node/glm5_fp8_mi355x_mtp.sh | 12 ++++++------ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 893210ef6..8f03def09 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -364,7 +364,7 @@ glm5-fp8-mi355x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } glm5-fp8-mi355x-sglang-mtp: - image: lmsysorg/sglang-rocm:v0.5.10rc0-rocm720-mi35x-20260413 + image: lmsysorg/sglang-rocm:v0.5.10rc0-rocm720-mi35x-20260415 model: zai-org/GLM-5-FP8 model-prefix: glm5 runner: mi355x @@ -375,11 +375,13 @@ glm5-fp8-mi355x-sglang-mtp: - isl: 1024 osl: 1024 search-space: - - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: mtp } + - { tp: 4, conc-start: 4, conc-end: 128, spec-decoding: mtp } + - { tp: 8, conc-start: 4, conc-end: 8, spec-decoding: mtp } - isl: 8192 osl: 1024 search-space: - - { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: mtp } + - { tp: 4, conc-start: 4, conc-end: 128, spec-decoding: mtp } + - { tp: 8, conc-start: 4, conc-end: 8, spec-decoding: mtp } glm5-fp8-mi355x-atom: image: rocm/atom:rocm7.2.2_ubuntu24.04_py3.12_pytorch_release_2.10.0_atom0.1.2.post diff --git a/benchmarks/single_node/glm5_fp8_mi355x_mtp.sh b/benchmarks/single_node/glm5_fp8_mi355x_mtp.sh index f4b899011..5c28ebeaf 100755 --- a/benchmarks/single_node/glm5_fp8_mi355x_mtp.sh +++ b/benchmarks/single_node/glm5_fp8_mi355x_mtp.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +set -x source "$(dirname "$0")/../benchmark_lib.sh" @@ -15,11 +16,6 @@ if [[ -n "$SLURM_JOB_ID" ]]; then echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" fi -# GLM-5 requires transformers with glm_moe_dsa model type support. -# However, the Image rocm/sgl-dev:v0.5.8.post1-rocm720-mi35x-20260219 doesn't provide this support. -python3 -m pip install -U --no-cache-dir \ - "git+https://github.com/huggingface/transformers.git@6ed9ee36f608fd145168377345bfc4a5de12e1e2" - hf download "$MODEL" # ROCm / SGLang performance tuning for MI355X @@ -30,6 +26,7 @@ export SGLANG_ENABLE_SPEC_V2=1 SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 32)) EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then @@ -45,9 +42,11 @@ python3 -m sglang.launch_server \ --port $PORT \ --tensor-parallel-size $TP \ --trust-remote-code \ + --cuda-graph-max-bs $CONC \ + --context-length $CONTEXT_LENGTH \ + --mem-fraction-static 0.85 \ --tool-call-parser glm47 \ --reasoning-parser glm45 \ - --mem-fraction-static 0.85 \ --model-loader-extra-config '{"enable_multithread_load": true, "num_threads": 8}' \ --nsa-prefill-backend tilelang \ --nsa-decode-backend tilelang $EVAL_CONTEXT_ARGS \ @@ -56,6 +55,7 @@ python3 -m sglang.launch_server \ --speculative-num-steps 3 \ --speculative-eagle-topk 1 \ --speculative-num-draft-tokens 4 \ + --tokenizer-worker-num $((TP*2)) \ --disable-radix-cache> $SERVER_LOG 2>&1 & SERVER_PID=$! From 67bd1f01671be0afa2a0cadf8407ff6d52596832 Mon Sep 17 00:00:00 2001 From: Chun Fang Date: Fri, 1 May 2026 10:50:51 +0000 Subject: [PATCH 2/2] Update Perf-changelog --- perf-changelog.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 0403c2385..3b02add0c 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -2069,3 +2069,12 @@ - "Recipes cover 8k/1k aggregate TP8 low-latency conc=1, low-latency bridge 1P DEP8 + 4D TP8 no-offload conc=16/32/64, mid 1P/1D DEP8 MegaMOE conc=128, and high-throughput 2P/1D DEP8 MegaMOE conc=1024" - "All recipes enable FP4 indexer cache and speculative-config mtp with num_speculative_tokens=2" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1242 + +- config-keys: + - glm5-fp8-mi355x-sglang-mtp + description: + - "Updated the Image for glm5-fp8-mi355x-sglang-mtp" + - "Optimized the search space" + - "Removed redundant transformer installation" + - "Optimization model serves configs" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1252