diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 1f1093e38..b35716e78 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -132,7 +132,7 @@ qwen3.5-bf16-mi355x-sglang: - { tp: 8, ep: 1, conc-start: 4, conc-end: 256 } qwen3.5-bf16-mi300x-sglang: - image: lmsysorg/sglang:v0.5.9-rocm720-mi30x + image: lmsysorg/sglang:v0.5.10-rocm720-mi30x model: Qwen/Qwen3.5-397B-A17B model-prefix: qwen3.5 runner: mi300x @@ -150,7 +150,7 @@ qwen3.5-bf16-mi300x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-bf16-mi325x-sglang: - image: lmsysorg/sglang:v0.5.9-rocm720-mi30x + image: lmsysorg/sglang:v0.5.10-rocm720-mi30x model: Qwen/Qwen3.5-397B-A17B model-prefix: qwen3.5 runner: mi325x @@ -168,7 +168,7 @@ qwen3.5-bf16-mi325x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } qwen3.5-fp8-mi325x-sglang: - image: lmsysorg/sglang:v0.5.9-rocm720-mi30x + image: lmsysorg/sglang:v0.5.10-rocm720-mi30x model: Qwen/Qwen3.5-397B-A17B-FP8 model-prefix: qwen3.5 runner: mi325x @@ -227,7 +227,7 @@ qwen3.5-fp4-mi355x-sglang: - { tp: 4, conc-start: 4, conc-end: 16 } qwen3.5-fp8-mi300x-sglang: - image: lmsysorg/sglang:v0.5.9-rocm720-mi30x + image: lmsysorg/sglang:v0.5.10-rocm720-mi30x model: Qwen/Qwen3.5-397B-A17B-FP8 model-prefix: qwen3.5 runner: mi300x diff --git a/benchmarks/single_node/qwen3.5_bf16_mi300x.sh b/benchmarks/single_node/qwen3.5_bf16_mi300x.sh index 8aca9860a..f7c71963d 100755 --- a/benchmarks/single_node/qwen3.5_bf16_mi300x.sh +++ b/benchmarks/single_node/qwen3.5_bf16_mi300x.sh @@ -19,25 +19,34 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor # following Andy Luo linkedin's recipe https://www.linkedin.com/feed/update/urn:li:activity:7429203734389280768/ python3 -m sglang.launch_server \ - --attention-backend triton \ + --attention-backend aiter \ --model-path $MODEL \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 \ - --disable-radix-cache $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_bf16_mi325x.sh b/benchmarks/single_node/qwen3.5_bf16_mi325x.sh index 8aca9860a..f7c71963d 100644 --- a/benchmarks/single_node/qwen3.5_bf16_mi325x.sh +++ b/benchmarks/single_node/qwen3.5_bf16_mi325x.sh @@ -19,25 +19,34 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor # following Andy Luo linkedin's recipe https://www.linkedin.com/feed/update/urn:li:activity:7429203734389280768/ python3 -m sglang.launch_server \ - --attention-backend triton \ + --attention-backend aiter \ --model-path $MODEL \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 \ - --disable-radix-cache $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_fp8_mi300x.sh b/benchmarks/single_node/qwen3.5_fp8_mi300x.sh index 00cc9cf91..fe761d88d 100755 --- a/benchmarks/single_node/qwen3.5_fp8_mi300x.sh +++ b/benchmarks/single_node/qwen3.5_fp8_mi300x.sh @@ -19,11 +19,14 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -31,14 +34,20 @@ start_gpu_monitor # following AMD Andy linkedin's recipe # https://www.linkedin.com/feed/update/urn:li:activity:7429203734389280768/ python3 -m sglang.launch_server \ - --attention-backend triton \ + --attention-backend aiter \ --model-path $MODEL \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 \ - --disable-radix-cache $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_fp8_mi325x.sh b/benchmarks/single_node/qwen3.5_fp8_mi325x.sh index 00cc9cf91..fe761d88d 100755 --- a/benchmarks/single_node/qwen3.5_fp8_mi325x.sh +++ b/benchmarks/single_node/qwen3.5_fp8_mi325x.sh @@ -19,11 +19,14 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 EVAL_CONTEXT_ARGS="" if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context EVAL_CONTEXT_ARGS="--context-length $EVAL_MAX_MODEL_LEN" +else EVAL_CONTEXT_ARGS="--context-length $CONTEXT_LENGTH" fi # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -31,14 +34,20 @@ start_gpu_monitor # following AMD Andy linkedin's recipe # https://www.linkedin.com/feed/update/urn:li:activity:7429203734389280768/ python3 -m sglang.launch_server \ - --attention-backend triton \ + --attention-backend aiter \ --model-path $MODEL \ --host=0.0.0.0 \ --port $PORT \ --tensor-parallel-size $TP \ + --data-parallel-size 1 \ --trust-remote-code \ - --mem-fraction-static 0.8 \ - --disable-radix-cache $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & + --tokenizer-worker-num 6 \ + --enable-aiter-allreduce-fusion \ + --cuda-graph-max-bs $CONC \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ + --mem-fraction-static 0.75 $EVAL_CONTEXT_ARGS > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/perf-changelog.yaml b/perf-changelog.yaml index f4ba37423..82cbf1467 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1314,6 +1314,16 @@ - "Configs: 1k1k (TP4 conc 4-128), 8k1k (TP4 conc 4-128)" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/820 +- config-keys: + - qwen3.5-bf16-mi300x-sglang + - qwen3.5-bf16-mi325x-sglang + - qwen3.5-fp8-mi300x-sglang + - qwen3.5-fp8-mi325x-sglang + description: + - "Update cli args of Qwen3.5 FP8 and BF16 SGLang benchmarks for MI300X and MI325X to achieve better performance" + - "Use lmsysorg/sglang:v0.5.10-rocm720-mi30x" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1063 + - config-keys: - glm5-fp4-b200-sglang description: diff --git a/runners/launch_mi325x-amds.sh b/runners/launch_mi325x-amds.sh index 61da61fc1..67f93a309 100644 --- a/runners/launch_mi325x-amds.sh +++ b/runners/launch_mi325x-amds.sh @@ -35,6 +35,6 @@ srun --jobid=$JOB_ID \ --container-remap-root \ --container-workdir=/workspace/ \ --no-container-entrypoint --export=ALL \ -bash benchmarks/single_node/${EXP_NAME%%_*}_${PRECISION}_mi325xs.sh +bash benchmarks/single_node/${EXP_NAME%%_*}_${PRECISION}_mi325x.sh scancel $JOB_ID