diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index c7688ffd1..44cc07419 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -222,7 +222,7 @@ qwen3.5-fp8-mi300x-sglang: - { tp: 8, conc-start: 4, conc-end: 64 } glm5-fp8-mi355x-sglang: - image: lmsysorg/sglang:v0.5.10-rocm720-mi35x + image: rocm/sgl-dev:v0.5.8.post1-rocm720-mi35x-20260219 model: zai-org/GLM-5-FP8 model-prefix: glm5 runner: mi355x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 5782e11db..6c0185ca2 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1285,19 +1285,12 @@ - "Runner script updated to clone NVIDIA/srt-slurm and map vLLM container image" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1008 -- config-keys: - - glm5-fp8-mi355x-sglang - description: - - "Upgrade SGLang image to v0.5.10" - - "Resolve the issue: https://github.com/sgl-project/sglang/issues/19028" - pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1014 - - config-keys: - minimaxm2.5-fp8-b200-vllm description: - "Update MiniMax-M2.5 FP8 B200 config with new search spaces" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1010 - + - config-keys: - minimaxm2.5-fp4-b200-vllm description: