diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index a2674153a..a61f4fdd3 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -1,5 +1,5 @@ dsr1-fp4-mi355x-sglang: - image: rocm/7.0:rocm7.0_ubuntu_22.04_sgl-dev-v0.5.2-rocm7.0-mi35x-20250915 + image: lmsysorg/sglang:v0.5.6.post1-rocm700-mi35x model: amd/DeepSeek-R1-0528-MXFP4-Preview model-prefix: dsr1 runner: mi355x diff --git a/benchmarks/dsr1_fp4_mi355x_docker.sh b/benchmarks/dsr1_fp4_mi355x_docker.sh index ca1255802..8b3750a64 100644 --- a/benchmarks/dsr1_fp4_mi355x_docker.sh +++ b/benchmarks/dsr1_fp4_mi355x_docker.sh @@ -11,6 +11,7 @@ # RESULT_FILENAME # NUM_PROMPTS export SGLANG_USE_AITER=1 +export ROCM_QUICK_REDUCE_QUANTIZATION=INT4 PREFILL_SIZE=196608 if [[ "$ISL" == "8192" && "$OSL" == "1024" ]]; then @@ -30,7 +31,9 @@ python3 -m sglang.launch_server --model-path=$MODEL --trust-remote-code \ --disable-radix-cache \ --num-continuous-decode-steps=4 \ --max-prefill-tokens=$PREFILL_SIZE \ ---cuda-graph-max-bs=128 > $SERVER_LOG 2>&1 & +--cuda-graph-max-bs=128 \ +--attention-backend aiter \ +--kv-cache-dtype fp8_e4m3 > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 112145f10..926ac7e1d 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -81,3 +81,8 @@ - Update vLLM image for NVIDIA configs from vLLM 0.11.0 to vLLM 0.11.2 - Adds kv-cache-dtype: fp8 to benchmarks/gptoss_fp4_b200_docker.sh PR: https://github.com/InferenceMAX/InferenceMAX/pull/273 +- config-keys: + - dsr1-fp4-mi355x-sglang + description: | + - Updating MI355x Deepseek-R1 FP4 SGLang Image to upstream v0.5.6.post1 + PR: https://github.com/InferenceMAX/InferenceMAX/pull/330