Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 54 additions & 0 deletions .github/configs/amd-master.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,60 @@ kimik2.5-int4-mi300x-vllm:
search-space:
- { tp: 8, conc-start: 4, conc-end: 64 }

kimik2.5-int4-mi355x-vllm-mtp:
image: vllm/vllm-openai-rocm:v0.19.1
model: moonshotai/Kimi-K2.5
model-prefix: kimik2.5
runner: mi355x
precision: int4
framework: vllm
multinode: false
seq-len-configs:
- isl: 1024
osl: 1024
search-space:
- { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: mtp }
- isl: 8192
osl: 1024
search-space:
- { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: mtp }

kimik2.5-int4-mi300x-vllm-mtp:
image: vllm/vllm-openai-rocm:v0.19.1
model: moonshotai/Kimi-K2.5
model-prefix: kimik2.5
runner: mi300x
precision: int4
framework: vllm
multinode: false
seq-len-configs:
- isl: 1024
osl: 1024
search-space:
- { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: mtp }
- isl: 8192
osl: 1024
search-space:
- { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: mtp }

kimik2.5-int4-mi325x-vllm-mtp:
image: vllm/vllm-openai-rocm:v0.19.1
model: moonshotai/Kimi-K2.5
model-prefix: kimik2.5
runner: mi325x
precision: int4
framework: vllm
multinode: false
seq-len-configs:
- isl: 1024
osl: 1024
search-space:
- { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: mtp }
- isl: 8192
osl: 1024
search-space:
- { tp: 4, conc-start: 4, conc-end: 64, spec-decoding: mtp }

kimik2.5-fp4-mi355x-vllm:
image: vllm/vllm-openai-rocm:v0.18.0
model: amd/Kimi-K2.5-MXFP4
Expand Down
85 changes: 85 additions & 0 deletions benchmarks/single_node/kimik2.5_int4_mi300x_mtp.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
#!/usr/bin/env bash

source "$(dirname "$0")/../benchmark_lib.sh"

check_env_vars \
MODEL \
TP \
CONC \
ISL \
OSL \
MAX_MODEL_LEN \
RANDOM_RANGE_RATIO \
RESULT_FILENAME

if [[ -n "$SLURM_JOB_ID" ]]; then
echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME"
fi

hf download "$MODEL"

# Set HIP_VISIBLE_DEVICES to match ROCR_VISIBLE_DEVICES for Ray compatibility in vLLM 0.14+
if [ -n "$ROCR_VISIBLE_DEVICES" ]; then
export HIP_VISIBLE_DEVICES="$ROCR_VISIBLE_DEVICES"
fi

SERVER_LOG=/workspace/server.log
PORT=${PORT:-8888}

if [ "${EVAL_ONLY}" = "true" ]; then
setup_eval_context
MAX_MODEL_LEN="$EVAL_MAX_MODEL_LEN"
fi
# Start GPU monitoring (power, temperature, clocks every second)
start_gpu_monitor

echo "Ensuring host benchmark dependencies (aiohttp/transformers/numpy/tqdm/huggingface-hub/tiktoken)..."
python3 -m pip install --no-cache-dir aiohttp transformers numpy tqdm huggingface-hub tiktoken



set -x
export VLLM_USE_V2_MODEL_RUNNER=1
export VLLM_ROCM_USE_AITER=1
export VLLM_ROCM_QUICK_REDUCE_QUANTIZATION=INT4
export VLLM_ROCM_USE_AITER_RMSNORM=0
export VLLM_SPEC_CONFIG="${VLLM_SPEC_CONFIG:-{\"model\": \"nvidia/Kimi-K2.5-Thinking-Eagle3\", \"method\": \"eagle3\", \"num_speculative_tokens\": 3}}"

vllm serve $MODEL --port $PORT \
--tensor-parallel-size=$TP \
--gpu-memory-utilization 0.9 \
--max-model-len $MAX_MODEL_LEN \
--block-size=64 \
--trust-remote-code \
--no-enable-prefix-caching \
--max-num-seqs 256 \
--speculative-config "$VLLM_SPEC_CONFIG" \
--mm-encoder-tp-mode data > $SERVER_LOG 2>&1 &

SERVER_PID=$!

# Wait for server to be ready
wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID"

run_benchmark_serving \
--model "$MODEL" \
--port "$PORT" \
--backend vllm \
--input-len "$ISL" \
--output-len "$OSL" \
--random-range-ratio "$RANDOM_RANGE_RATIO" \
--num-prompts "$((CONC * 10))" \
--max-concurrency "$CONC" \
--result-filename "$RESULT_FILENAME" \
--result-dir /workspace/ \
--trust-remote-code

# After throughput, run evaluation only if RUN_EVAL is true
if [ "${RUN_EVAL}" = "true" ]; then
run_eval --framework lm-eval --port "$PORT"
append_lm_eval_summary
fi

# Stop GPU monitoring
stop_gpu_monitor
set +x
85 changes: 85 additions & 0 deletions benchmarks/single_node/kimik2.5_int4_mi325x_mtp.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
#!/usr/bin/env bash

source "$(dirname "$0")/../benchmark_lib.sh"

check_env_vars \
MODEL \
TP \
CONC \
ISL \
OSL \
MAX_MODEL_LEN \
RANDOM_RANGE_RATIO \
RESULT_FILENAME

if [[ -n "$SLURM_JOB_ID" ]]; then
echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME"
fi

hf download "$MODEL"

# Set HIP_VISIBLE_DEVICES to match ROCR_VISIBLE_DEVICES for Ray compatibility in vLLM 0.14+
if [ -n "$ROCR_VISIBLE_DEVICES" ]; then
export HIP_VISIBLE_DEVICES="$ROCR_VISIBLE_DEVICES"
fi

SERVER_LOG=/workspace/server.log
PORT=${PORT:-8888}

if [ "${EVAL_ONLY}" = "true" ]; then
setup_eval_context
MAX_MODEL_LEN="$EVAL_MAX_MODEL_LEN"
fi
# Start GPU monitoring (power, temperature, clocks every second)
start_gpu_monitor

echo "Ensuring host benchmark dependencies (aiohttp/transformers/numpy/tqdm/huggingface-hub/tiktoken)..."
python3 -m pip install --no-cache-dir aiohttp transformers numpy tqdm huggingface-hub tiktoken



set -x
export VLLM_USE_V2_MODEL_RUNNER=1
export VLLM_ROCM_USE_AITER=1
export VLLM_ROCM_QUICK_REDUCE_QUANTIZATION=INT4
export VLLM_ROCM_USE_AITER_RMSNORM=0
export VLLM_SPEC_CONFIG="${VLLM_SPEC_CONFIG:-{\"model\": \"nvidia/Kimi-K2.5-Thinking-Eagle3\", \"method\": \"eagle3\", \"num_speculative_tokens\": 3}}"

vllm serve $MODEL --port $PORT \
--tensor-parallel-size=$TP \
--gpu-memory-utilization 0.9 \
--max-model-len $MAX_MODEL_LEN \
--block-size=64 \
--trust-remote-code \
--no-enable-prefix-caching \
--max-num-seqs 256 \
--speculative-config "$VLLM_SPEC_CONFIG" \
--mm-encoder-tp-mode data > $SERVER_LOG 2>&1 &

SERVER_PID=$!

# Wait for server to be ready
wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID"

run_benchmark_serving \
--model "$MODEL" \
--port "$PORT" \
--backend vllm \
--input-len "$ISL" \
--output-len "$OSL" \
--random-range-ratio "$RANDOM_RANGE_RATIO" \
--num-prompts "$((CONC * 10))" \
--max-concurrency "$CONC" \
--result-filename "$RESULT_FILENAME" \
--result-dir /workspace/ \
--trust-remote-code

# After throughput, run evaluation only if RUN_EVAL is true
if [ "${RUN_EVAL}" = "true" ]; then
run_eval --framework lm-eval --port "$PORT"
append_lm_eval_summary
fi

# Stop GPU monitoring
stop_gpu_monitor
set +x
85 changes: 85 additions & 0 deletions benchmarks/single_node/kimik2.5_int4_mi355x_mtp.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
#!/usr/bin/env bash

source "$(dirname "$0")/../benchmark_lib.sh"

check_env_vars \
MODEL \
TP \
CONC \
ISL \
OSL \
MAX_MODEL_LEN \
RANDOM_RANGE_RATIO \
RESULT_FILENAME

if [[ -n "$SLURM_JOB_ID" ]]; then
echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME"
fi

hf download "$MODEL"

# Set HIP_VISIBLE_DEVICES to match ROCR_VISIBLE_DEVICES for Ray compatibility in vLLM 0.14+
if [ -n "$ROCR_VISIBLE_DEVICES" ]; then
export HIP_VISIBLE_DEVICES="$ROCR_VISIBLE_DEVICES"
fi

SERVER_LOG=/workspace/server.log
PORT=${PORT:-8888}

if [ "${EVAL_ONLY}" = "true" ]; then
setup_eval_context
MAX_MODEL_LEN="$EVAL_MAX_MODEL_LEN"
fi
# Start GPU monitoring (power, temperature, clocks every second)
start_gpu_monitor

echo "Ensuring host benchmark dependencies (aiohttp/transformers/numpy/tqdm/huggingface-hub/tiktoken)..."
python3 -m pip install --no-cache-dir aiohttp transformers numpy tqdm huggingface-hub tiktoken



set -x
export VLLM_USE_V2_MODEL_RUNNER=1
export VLLM_ROCM_USE_AITER=1
export VLLM_ROCM_QUICK_REDUCE_QUANTIZATION=INT4
export VLLM_ROCM_USE_AITER_RMSNORM=0
export VLLM_SPEC_CONFIG="${VLLM_SPEC_CONFIG:-{\"model\": \"nvidia/Kimi-K2.5-Thinking-Eagle3\", \"method\": \"eagle3\", \"num_speculative_tokens\": 3}}"

vllm serve $MODEL --port $PORT \
--tensor-parallel-size=$TP \
--gpu-memory-utilization 0.9 \
--max-model-len $MAX_MODEL_LEN \
--block-size=64 \
--trust-remote-code \
--no-enable-prefix-caching \
--max-num-seqs 256 \
--speculative-config "$VLLM_SPEC_CONFIG" \
--mm-encoder-tp-mode data > $SERVER_LOG 2>&1 &

SERVER_PID=$!

# Wait for server to be ready
wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID"

run_benchmark_serving \
--model "$MODEL" \
--port "$PORT" \
--backend vllm \
--input-len "$ISL" \
--output-len "$OSL" \
--random-range-ratio "$RANDOM_RANGE_RATIO" \
--num-prompts "$((CONC * 10))" \
--max-concurrency "$CONC" \
--result-filename "$RESULT_FILENAME" \
--result-dir /workspace/ \
--trust-remote-code

# After throughput, run evaluation only if RUN_EVAL is true
if [ "${RUN_EVAL}" = "true" ]; then
run_eval --framework lm-eval --port "$PORT"
append_lm_eval_summary
fi

# Stop GPU monitoring
stop_gpu_monitor
set +x
Loading