diff --git a/benchmarks/single_node/qwen3.5_bf16_mi355x.sh b/benchmarks/single_node/qwen3.5_bf16_mi355x.sh index f77390707..e339c9658 100755 --- a/benchmarks/single_node/qwen3.5_bf16_mi355x.sh +++ b/benchmarks/single_node/qwen3.5_bf16_mi355x.sh @@ -19,6 +19,8 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -30,6 +32,13 @@ python3 -m sglang.launch_server \ --port $PORT \ --tensor-parallel-size $TP \ --trust-remote-code \ + --tokenizer-worker-num 6 \ + --enable-torch-compile \ + --cuda-graph-max-bs $CONC \ + --context-length $CONTEXT_LENGTH \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ --mem-fraction-static 0.8 > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh index f77390707..e339c9658 100644 --- a/benchmarks/single_node/qwen3.5_fp8_mi355x.sh +++ b/benchmarks/single_node/qwen3.5_fp8_mi355x.sh @@ -19,6 +19,8 @@ hf download "$MODEL" SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} +CONTEXT_LENGTH=$((ISL + OSL + 20)) +MAX_PREFILL_TOKENS=32768 # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor @@ -30,6 +32,13 @@ python3 -m sglang.launch_server \ --port $PORT \ --tensor-parallel-size $TP \ --trust-remote-code \ + --tokenizer-worker-num 6 \ + --enable-torch-compile \ + --cuda-graph-max-bs $CONC \ + --context-length $CONTEXT_LENGTH \ + --disable-radix-cache \ + --max-prefill-tokens $MAX_PREFILL_TOKENS \ + --scheduler-recv-interval 30 \ --mem-fraction-static 0.8 > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 1a19fd6a5..b7db494db 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1055,3 +1055,10 @@ - "Enable VLLM_USE_FLASHINFER_MOE_INT4=1 for Kimi K2.5 INT4 B200 benchmark" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/935 +- config-keys: + - qwen3.5-fp8-mi355x-sglang + - qwen3.5-bf16-mi355x-sglang + description: + - "Update cli args of Qwen3.5 FP8 and BF16 SGLang benchmarks for MI355X to achieve better performance" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/942 +