diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index c396e6788..0f6788569 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -22,7 +22,7 @@ dsr1-fp4-mi355x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } dsr1-fp8-mi300x-sglang: - image: rocm/7.0:rocm7.0_ubuntu_22.04_sgl-dev-v0.5.2-rocm7.0-mi30x-20250915 + image: lmsysorg/sglang:v0.5.5.post3-rocm700-mi30x model: deepseek-ai/DeepSeek-R1-0528 model-prefix: dsr1 runner: mi300x @@ -44,7 +44,7 @@ dsr1-fp8-mi300x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } dsr1-fp8-mi325x-sglang: - image: rocm/7.0:rocm7.0_ubuntu_22.04_sgl-dev-v0.5.2-rocm7.0-mi30x-20250915 + image: lmsysorg/sglang:v0.5.5.post3-rocm700-mi30x model: deepseek-ai/DeepSeek-R1-0528 model-prefix: dsr1 runner: mi325x @@ -66,7 +66,7 @@ dsr1-fp8-mi325x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } dsr1-fp8-mi355x-sglang: - image: rocm/7.0:rocm7.0_ubuntu_22.04_sgl-dev-v0.5.2-rocm7.0-mi35x-20250915 + image: lmsysorg/sglang:v0.5.5.post3-rocm700-mi35x model: deepseek-ai/DeepSeek-R1-0528 model-prefix: dsr1 runner: mi355x diff --git a/benchmarks/dsr1_fp8_mi355x_docker.sh b/benchmarks/dsr1_fp8_mi355x_docker.sh index d3abe2964..a2c6fec1b 100644 --- a/benchmarks/dsr1_fp8_mi355x_docker.sh +++ b/benchmarks/dsr1_fp8_mi355x_docker.sh @@ -15,10 +15,13 @@ # https://rocm.docs.amd.com/en/docs-7.0-docker/benchmark-docker/inference-sglang-deepseek-r1-fp8.html export SGLANG_USE_AITER=1 +export RCCL_MSCCL_ENABLE=0 +export ROCM_QUICK_REDUCE_QUANTIZATION=INT4 SERVER_LOG=$(mktemp /tmp/server-XXXXXX.log) python3 -m sglang.launch_server \ + --attention-backend aiter \ --model-path $MODEL \ --host=0.0.0.0 \ --port $PORT \ @@ -28,6 +31,7 @@ python3 -m sglang.launch_server \ --mem-fraction-static 0.8 --disable-radix-cache \ --num-continuous-decode-steps 4 \ --max-prefill-tokens 196608 \ + --enable-torch-compile \ --cuda-graph-max-bs 128 > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/dsr1_fp8_mi355x_slurm.sh b/benchmarks/dsr1_fp8_mi355x_slurm.sh index c046ede17..c6f0fade7 100644 --- a/benchmarks/dsr1_fp8_mi355x_slurm.sh +++ b/benchmarks/dsr1_fp8_mi355x_slurm.sh @@ -13,11 +13,14 @@ # PORT_OFFSET export SGLANG_USE_AITER=1 +export RCCL_MSCCL_ENABLE=0 +export ROCM_QUICK_REDUCE_QUANTIZATION=INT4 SERVER_LOG=$(mktemp /tmp/server-XXXXXX.log) PORT=$(( 8888 + $PORT_OFFSET )) python3 -m sglang.launch_server \ + --attention-backend aiter \ --model-path $MODEL \ --host=0.0.0.0 \ --port $PORT \ @@ -27,7 +30,8 @@ python3 -m sglang.launch_server \ --mem-fraction-static 0.8 --disable-radix-cache \ --num-continuous-decode-steps 4 \ --max-prefill-tokens 196608 \ - --cuda-graph-max-bs 128 > $SERVER_LOG 2>&1 & + --cuda-graph-max-bs 128 \ + --enable-torch-compile > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 6c3052a3e..c7f68885c 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -133,3 +133,11 @@ - "Update vLLM image from v0.11.2 to v0.13.0" - "Add VLLM_MXFP4_USE_MARLIN=1 to H100 and H200 benchmark scripts" pr-link: https://github.com/InferenceMAX/InferenceMAX/pull/327 + +- config-keys: + - dsr1-fp8-mi300x-sglang + - dsr1-fp8-mi325x-sglang + - dsr1-fp8-mi355x-sglang + description: + - Use upstream SGLang images on mi300, mi325 and mi355 for dsr1fp8 + pr-link: https://github.com/InferenceMAX/InferenceMAX/pull/332