Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/configs/amd-master.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
dsr1-fp4-mi355x-sglang:
image: rocm/7.0:rocm7.0_ubuntu_22.04_sgl-dev-v0.5.2-rocm7.0-mi35x-20250915
image: lmsysorg/sglang:v0.5.6.post1-rocm700-mi35x
model: amd/DeepSeek-R1-0528-MXFP4-Preview
model-prefix: dsr1
runner: mi355x
Expand Down
5 changes: 4 additions & 1 deletion benchmarks/dsr1_fp4_mi355x_docker.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
# RESULT_FILENAME
# NUM_PROMPTS
export SGLANG_USE_AITER=1
export ROCM_QUICK_REDUCE_QUANTIZATION=INT4

PREFILL_SIZE=196608
if [[ "$ISL" == "8192" && "$OSL" == "1024" ]]; then
Expand All @@ -30,7 +31,9 @@ python3 -m sglang.launch_server --model-path=$MODEL --trust-remote-code \
--disable-radix-cache \
--num-continuous-decode-steps=4 \
--max-prefill-tokens=$PREFILL_SIZE \
--cuda-graph-max-bs=128 > $SERVER_LOG 2>&1 &
--cuda-graph-max-bs=128 \
--attention-backend aiter \
--kv-cache-dtype fp8_e4m3 > $SERVER_LOG 2>&1 &

SERVER_PID=$!

Expand Down
5 changes: 5 additions & 0 deletions perf-changelog.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -81,3 +81,8 @@
- Update vLLM image for NVIDIA configs from vLLM 0.11.0 to vLLM 0.11.2
- Adds kv-cache-dtype: fp8 to benchmarks/gptoss_fp4_b200_docker.sh
PR: https://github.com/InferenceMAX/InferenceMAX/pull/273
- config-keys:
- dsr1-fp4-mi355x-sglang
description: |
- Updating MI355x Deepseek-R1 FP4 SGLang Image to upstream v0.5.6.post1
PR: https://github.com/InferenceMAX/InferenceMAX/pull/330