diff --git a/backend/cpp/llama-cpp/grpc-server.cpp b/backend/cpp/llama-cpp/grpc-server.cpp index 46066202ea38..386aa78d05f5 100644 --- a/backend/cpp/llama-cpp/grpc-server.cpp +++ b/backend/cpp/llama-cpp/grpc-server.cpp @@ -291,6 +291,11 @@ static void params_parse(server_context& ctx_server, const backend::ModelOptions } } + if (!params.kv_overrides.empty()) { + params.kv_overrides.emplace_back(); + params.kv_overrides.back().key[0] = 0; + } + // TODO: Add yarn if (!request->tensorsplit().empty()) { diff --git a/docs/content/docs/advanced/advanced-usage.md b/docs/content/docs/advanced/advanced-usage.md index 8feaf95188ca..77e6ef63efc4 100644 --- a/docs/content/docs/advanced/advanced-usage.md +++ b/docs/content/docs/advanced/advanced-usage.md @@ -233,6 +233,15 @@ n_draft: 0 # Quantization settings for the model, impacting memory and processing speed. quantization: "" +# List of KV Overrides for llama.cpp (--override-kv flag) +# Format: KEY=TYPE:VALUE +# Example: `qwen3moe.expert_used_count=int:10` +# Use this to override model configuration values at runtime. +# Supported types include: int, float, string, bool. +# Multiple overrides can be specified as a list. +overrides: + - KEY=TYPE:VALUE + # Utilization percentage of GPU memory to allocate for the model. (vLLM) gpu_memory_utilization: 0