diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 89318004b..95c26c075 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -114,7 +114,7 @@ dsr1-fp8-mi355x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-bf16-mi355x-sglang: - image: rocm/sgl-dev:v0.5.8.post1-rocm720-mi35x-20260215 + image: lmsysorg/sglang-rocm:v0.5.10rc0-rocm720-mi35x-20260415 model: Qwen/Qwen3.5-397B-A17B model-prefix: qwen3.5 runner: mi355x @@ -125,11 +125,11 @@ qwen3.5-bf16-mi355x-sglang: - isl: 1024 osl: 1024 search-space: - - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 8, ep: 1, conc-start: 4, conc-end: 256 } - isl: 8192 osl: 1024 search-space: - - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 8, ep: 1, conc-start: 4, conc-end: 256 } qwen3.5-bf16-mi300x-sglang: image: lmsysorg/sglang:v0.5.10-rocm720-mi30x @@ -186,7 +186,7 @@ qwen3.5-fp8-mi325x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-fp8-mi355x-sglang: - image: rocm/sgl-dev:v0.5.8.post1-rocm720-mi35x-20260218 + image: lmsysorg/sglang-rocm:v0.5.10rc0-rocm720-mi35x-20260414 model: Qwen/Qwen3.5-397B-A17B-FP8 model-prefix: qwen3.5 runner: mi355x @@ -197,11 +197,14 @@ qwen3.5-fp8-mi355x-sglang: - isl: 1024 osl: 1024 search-space: - - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 8, ep: 1, conc-start: 4, conc-end: 32 } + - { tp: 8, ep: 8, conc-start: 64, conc-end: 256 } + - { tp: 2, ep: 2, conc-start: 128, conc-end: 256 } - isl: 8192 osl: 1024 search-space: - - { tp: 8, conc-start: 4, conc-end: 64 } + - { tp: 2, ep: 2, conc-start: 4, conc-end: 32 } + - { tp: 4, ep: 1, conc-start: 32, conc-end: 256 } qwen3.5-fp4-mi355x-sglang: image: lmsysorg/sglang:v0.5.10-rocm720-mi35x diff --git a/benchmarks/single_node/qwen3.5_bf16_mi355x.sh b/benchmarks/single_node/qwen3.5_bf16_mi355x.sh index ce82b9a53..6d40e3e3f 100755 --- a/benchmarks/single_node/qwen3.5_bf16_mi355x.sh +++ b/benchmarks/single_node/qwen3.5_bf16_mi355x.sh @@ -39,7 +39,6 @@ python3 -m sglang.launch_server \ --port $PORT \ --tensor-parallel-size $TP \ --ep-size $EP_SIZE \ - --data-parallel-size 1 \ --trust-remote-code \ --tokenizer-worker-num 6 \ --enable-aiter-allreduce-fusion \ @@ -47,7 +46,7 @@ python3 -m sglang.launch_server \ --disable-radix-cache \ --max-prefill-tokens $MAX_PREFILL_TOKENS \ --scheduler-recv-interval 30 \ - --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --mem-fraction-static 0.8 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh index ce82b9a53..6d40e3e3f 100644 --- a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh +++ b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh @@ -39,7 +39,6 @@ python3 -m sglang.launch_server \ --port $PORT \ --tensor-parallel-size $TP \ --ep-size $EP_SIZE \ - --data-parallel-size 1 \ --trust-remote-code \ --tokenizer-worker-num 6 \ --enable-aiter-allreduce-fusion \ @@ -47,7 +46,7 @@ python3 -m sglang.launch_server \ --disable-radix-cache \ --max-prefill-tokens $MAX_PREFILL_TOKENS \ --scheduler-recv-interval 30 \ - --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --mem-fraction-static 0.8 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 6721dbb1e..3cfd2b377 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1358,3 +1358,13 @@ description: - "Enable SGLANG_ENABLE_SPEC_V2=1 for Qwen3.5 FP8 H200 SGLang MTP" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1017 + +- config-keys: + - qwen3.5-fp8-mi355x-sglang + - qwen3.5-bf16-mi355x-sglang + description: + - "Update cli args of Qwen3.5 FP8 and BF16 SGLang benchmarks for MI355X to achieve better performance" + - "Use lmsysorg/sglang-rocm:v0.5.10rc0-rocm720-mi35x-20260415 for BF16 benchmark" + - "Use lmsysorg/sglang-rocm:v0.5.10rc0-rocm720-mi35x-20260414 for FP8 benchmark" + - "Image includes upstream SGLang PRs: https://github.com/sgl-project/sglang/pull/21188, https://github.com/sgl-project/sglang/pull/21421, https://github.com/sgl-project/sglang/pull/20736" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1036