diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 467d3df06..f4570fd2c 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -1650,7 +1650,7 @@ dsr1-fp8-b300-dynamo-trt: dp-attn: true dsr1-fp4-b200-sglang: - image: lmsysorg/sglang:v0.5.6-cu129-amd64 + image: lmsysorg/sglang:v0.5.9-cu130 model: nvidia/DeepSeek-R1-0528-FP4-V2 model-prefix: dsr1 runner: b200 @@ -1760,7 +1760,7 @@ dsr1-fp4-b200-trt-mtp: - { tp: 8, ep: 8, dp-attn: true, conc-start: 64, conc-end: 256, spec-decoding: mtp } dsr1-fp8-b200-sglang: - image: lmsysorg/sglang:v0.5.6-cu129-amd64 + image: lmsysorg/sglang:v0.5.9-cu130 model: deepseek-ai/DeepSeek-R1-0528 model-prefix: dsr1 runner: b200 @@ -1942,7 +1942,7 @@ kimik2.5-fp4-b200-vllm: - { tp: 4, ep: 4, conc-start: 4, conc-end: 64 } dsr1-fp8-b200-sglang-mtp: - image: lmsysorg/sglang:v0.5.8-cu130-amd64 + image: lmsysorg/sglang:v0.5.9-cu130 model: deepseek-ai/DeepSeek-R1-0528 model-prefix: dsr1 runner: b200 @@ -2021,7 +2021,7 @@ dsr1-fp8-b200-trt-mtp: - { tp: 8, ep: 1, conc-start: 4, conc-end: 256, spec-decoding: mtp } dsr1-fp8-h200-sglang: - image: lmsysorg/sglang:v0.5.9-cu129-amd64 + image: lmsysorg/sglang:v0.5.9-cu130 model: deepseek-ai/DeepSeek-R1-0528 model-prefix: dsr1 runner: h200 diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 1a19fd6a5..03fb6e082 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1055,3 +1055,16 @@ - "Enable VLLM_USE_FLASHINFER_MOE_INT4=1 for Kimi K2.5 INT4 B200 benchmark" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/935 +- config-keys: + - dsr1-fp4-b200-sglang + - dsr1-fp8-b200-sglang + - dsr1-fp8-b200-sglang-mtp + - dsr1-fp8-h200-sglang + description: + - "Update SGLang image to v0.5.9-cu130 for all DSR1 SGLang configs" + - "dsr1-fp4-b200-sglang: v0.5.6-cu129-amd64 → v0.5.9-cu130" + - "dsr1-fp8-b200-sglang: v0.5.6-cu129-amd64 → v0.5.9-cu130" + - "dsr1-fp8-b200-sglang-mtp: v0.5.8-cu130-amd64 → v0.5.9-cu130" + - "dsr1-fp8-h200-sglang: v0.5.9-cu129-amd64 → v0.5.9-cu130" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/943 +