Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions .github/configs/amd-master.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -381,6 +381,24 @@ glm5-fp8-mi355x-sglang-mtp:
search-space:
- { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: mtp }

glm5-fp4-mi355x-sglang-mtp:
image: lmsysorg/sglang-rocm:v0.5.10.post1-rocm700-mi35x-20260428
model: amd/GLM-5-MXFP4
model-prefix: glm5
runner: mi355x
precision: fp4
framework: sglang
multinode: false
seq-len-configs:
- isl: 1024
osl: 1024
search-space:
- { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: mtp }
- isl: 8192
osl: 1024
search-space:
- { tp: 8, conc-start: 4, conc-end: 64, spec-decoding: mtp }

glm5-fp8-mi355x-atom:
image: rocm/atom:rocm7.2.2_ubuntu24.04_py3.12_pytorch_release_2.10.0_atom0.1.2.post
model: zai-org/GLM-5-FP8
Expand Down
78 changes: 78 additions & 0 deletions benchmarks/single_node/glm5_fp4_mi355x_mtp.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
#!/usr/bin/env bash

source "$(dirname "$0")/../benchmark_lib.sh"

check_env_vars \
MODEL \
TP \
CONC \
ISL \
OSL \
RANDOM_RANGE_RATIO \
RESULT_FILENAME

if [[ -n "$SLURM_JOB_ID" ]]; then
echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME"
fi

hf download "$MODEL"

export SGLANG_ENABLE_SPEC_V2=1

SERVER_LOG=/workspace/server.log
PORT=${PORT:-8888}

EVAL_CONTEXT_ARGS=""
if [ "${EVAL_ONLY}" = "true" ]; then
setup_eval_context
EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN"
fi
# Start GPU monitoring (power, temperature, clocks every second)
start_gpu_monitor

python3 -m sglang.launch_server \
--model-path $MODEL \
--host=0.0.0.0 \
--port $PORT \
--trust-remote-code \
--tp $TP \
--chunked-prefill-size 131072 \
--disable-radix-cache \
--mem-fraction-static 0.85 \
--model-loader-extra-config '{"enable_multithread_load": true}' \
--watchdog-timeout 1200 \
--reasoning-parser glm45 \
--tool-call-parser glm47 \
--speculative-algorithm EAGLE \
--speculative-num-steps 3 \
--speculative-eagle-topk 1 \
--speculative-num-draft-tokens 4 \
$EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 &

SERVER_PID=$!

# Wait for server to be ready
wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID"

run_benchmark_serving \
--model "$MODEL" \
--port "$PORT" \
--backend vllm \
--input-len "$ISL" \
--output-len "$OSL" \
--random-range-ratio "$RANDOM_RANGE_RATIO" \
--num-prompts "$((CONC * 10))" \
--max-concurrency "$CONC" \
--result-filename "$RESULT_FILENAME" \
--result-dir /workspace/ \
--use-chat-template

# After throughput, run evaluation only if RUN_EVAL is true
if [ "${RUN_EVAL}" = "true" ]; then
run_eval --framework lm-eval --port "$PORT"
append_lm_eval_summary
fi

# Stop GPU monitoring
stop_gpu_monitor
set +x
12 changes: 12 additions & 0 deletions perf-changelog.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2069,3 +2069,15 @@
- "Recipes cover 8k/1k aggregate TP8 low-latency conc=1, low-latency bridge 1P DEP8 + 4D TP8 no-offload conc=16/32/64, mid 1P/1D DEP8 MegaMOE conc=128, and high-throughput 2P/1D DEP8 MegaMOE conc=1024"
- "All recipes enable FP4 indexer cache and speculative-config mtp with num_speculative_tokens=2"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1242

- config-keys:
- glm5-fp4-mi355x-sglang-mtp
description:
- "Add GLM-5 MXFP4 MI355X SGLang MTP benchmark"
- "Image: lmsysorg/sglang-rocm:v0.5.10.post1-rocm700-mi35x-20260428"
- "Model: amd/GLM-5-MXFP4"
- "EAGLE speculative decoding (num-steps=3, eagle-topk=1, num-draft-tokens=4) behind SGLANG_ENABLE_SPEC_V2=1"
- "Image ships transformers with glm_moe_dsa support, so no extra pip install is needed (unlike glm5-fp8-mi355x-sglang)"
- "Configs: 1k1k and 8k1k, TP=8 conc 4-64 with spec-decoding=mtp"
- "Requires benchmark_serving.py tokenizer fix: https://github.com/SemiAnalysisAI/InferenceX/pull/1253"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1254
Loading