From 92733a5af9a798e0693a0f9723bb82efe0b36fe0 Mon Sep 17 00:00:00 2001 From: "John W. Leimgruber III" Date: Sat, 26 Apr 2025 13:01:49 -0400 Subject: [PATCH 1/6] Porting saood06's ik_llama.cpp utility To make it easier to compare performance across forks --- examples/CMakeLists.txt | 3 + examples/sweep-bench/CMakeLists.txt | 5 + examples/sweep-bench/README.md | 65 ++++++++ examples/sweep-bench/sweep-bench-plot.py | 118 ++++++++++++++ examples/sweep-bench/sweep-bench.cpp | 189 +++++++++++++++++++++++ 5 files changed, 380 insertions(+) create mode 100644 examples/sweep-bench/CMakeLists.txt create mode 100644 examples/sweep-bench/README.md create mode 100755 examples/sweep-bench/sweep-bench-plot.py create mode 100644 examples/sweep-bench/sweep-bench.cpp diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index abc4fa1c893..bad1b60d7ab 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -31,6 +31,9 @@ else() add_subdirectory(simple-chat) add_subdirectory(speculative) add_subdirectory(speculative-simple) + add_subdirectory(sweep-bench) + add_subdirectory(tokenize) + add_subdirectory(tts) add_subdirectory(gen-docs) add_subdirectory(training) add_subdirectory(diffusion) diff --git a/examples/sweep-bench/CMakeLists.txt b/examples/sweep-bench/CMakeLists.txt new file mode 100644 index 00000000000..e49f0fea02a --- /dev/null +++ b/examples/sweep-bench/CMakeLists.txt @@ -0,0 +1,5 @@ +set(TARGET llama-sweep-bench) +add_executable(${TARGET} sweep-bench.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/sweep-bench/README.md b/examples/sweep-bench/README.md new file mode 100644 index 00000000000..d92740de221 --- /dev/null +++ b/examples/sweep-bench/README.md @@ -0,0 +1,65 @@ +# ik_llama.cpp/example/sweep-bench + +Benchmark the prompt processing and token generation performance of `ik_llama.cpp` +by doing a sweep over a whole context size and gathering performance metrics +in each ubatch-sized window. Only a single token sequence is used. + +The benchmark steps are: + +for each ubatch-sized window in context: + + 1. generate ubatch/4 tokens (not the whole window to save some time) + 2. measure generation performance + 3. remove generated tokens from KV cache + 4. prepare a ubatch-sized batch of random tokens + 4. process prepated batch + 5. measure prompt processing performance + +The purpose of the benchmark is to visualize how the performance changes with +the context size without averaging the metrics values over the whole context. + +## Usage + +./llama-sweep-bench -c 8704 -ub 512 -m models/Meta-Llama-3.2-3B-Instruct-Q8_0.gguf + +## Sample results + +- `PP` - prompt tokens per ubatch +- `TG` - generated tokens per ubatch +- `N_KV` - current KV cache size +- `T_PP` - prompt processing time (i.e. time to first token) +- `S_PP` - prompt processing speed (`(B*PP)/T_PP` or `PP/T_PP`) +- `T_TG` - time to generate all batches +- `S_TG` - text generation speed (`(B*TG)/T_TG`) + +| PP | TG | N_KV | T_PP s | S_PP t/s | T_TG s | S_TG t/s | +|-------|--------|--------|----------|----------|----------|----------| +| 512 | 128 | 0 | 1.100 | 465.51 | 2.311 | 55.38 | +| 512 | 128 | 512 | 1.183 | 432.97 | 1.895 | 67.55 | +| 512 | 128 | 1024 | 1.305 | 392.38 | 2.071 | 61.81 | +| 512 | 128 | 1536 | 1.279 | 400.42 | 2.164 | 59.14 | +| 512 | 128 | 2048 | 1.571 | 325.96 | 2.280 | 56.14 | +| 512 | 128 | 2560 | 1.431 | 357.87 | 2.418 | 52.94 | +| 512 | 128 | 3072 | 1.515 | 337.93 | 2.566 | 49.88 | +| 512 | 128 | 3584 | 1.588 | 322.34 | 2.722 | 47.03 | +| 512 | 128 | 4096 | 1.675 | 305.70 | 2.864 | 44.69 | +| 512 | 128 | 4608 | 1.769 | 289.50 | 2.999 | 42.68 | +| 512 | 128 | 5120 | 1.845 | 277.48 | 3.102 | 41.26 | +| 512 | 128 | 5632 | 1.893 | 270.46 | 3.219 | 39.76 | +| 512 | 128 | 6144 | 1.953 | 262.20 | 3.348 | 38.23 | +| 512 | 128 | 6656 | 2.018 | 253.71 | 3.474 | 36.84 | +| 512 | 128 | 7168 | 2.078 | 246.34 | 3.589 | 35.66 | +| 512 | 128 | 7680 | 2.140 | 239.22 | 3.717 | 34.43 | +| 512 | 128 | 8192 | 2.196 | 233.15 | 3.854 | 33.21 | + +### JSONL output + +Pass `--output-format jsonl` to output JSONL instead of Markdown, รก la + +```json lines +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 0, "t_pp": 1.093814, "speed_pp": 468.086884, "t_tg": 1.780312, "speed_tg": 71.897514 } +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 512, "t_pp": 1.169302, "speed_pp": 437.868073, "t_tg": 1.897474, "speed_tg": 67.458099 } +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 1024, "t_pp": 1.183700, "speed_pp": 432.542053, "t_tg": 2.059179, "speed_tg": 62.160694 } +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 1536, "t_pp": 1.428625, "speed_pp": 358.386566, "t_tg": 2.160639, "speed_tg": 59.241734 } +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 2048, "t_pp": 1.360647, "speed_pp": 376.291595, "t_tg": 2.274003, "speed_tg": 56.288403 } +``` diff --git a/examples/sweep-bench/sweep-bench-plot.py b/examples/sweep-bench/sweep-bench-plot.py new file mode 100755 index 00000000000..481a604c257 --- /dev/null +++ b/examples/sweep-bench/sweep-bench-plot.py @@ -0,0 +1,118 @@ +import pandas as pd +import matplotlib.pyplot as plt +import numpy as np +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('file', nargs='+') +args = parser.parse_args() + +df = None + +#for jsonl_file in args.file: +# # Read JSONL file into DataFrame +# df_part = pd.read_json(jsonl_file, lines=True) +# df_part['label'] = jsonl_file +# if df is None: +# df = df_part +# else: +# df = pd.concat([df, df_part]) +# + + + +for md_file in args.file: + # Read markdown table file into DataFrame + df_part = pd.read_csv(md_file, sep=r'\s*\|\s*', engine='python', + header=0, skiprows=[1]) + + # Clean up columns (remove empty columns from markdown formatting) + df_part = df_part.iloc[:, 1:-1] + df_part.columns = [col.strip() for col in df_part.columns] + + # Rename columns to match expected names + df_part = df_part.rename(columns={ + 'N_KV': 'n_kv', + 'S_PP t/s': 'speed_pp', + 'S_TG t/s': 'speed_tg' + }) + + # Convert to numeric types + df_part['n_kv'] = pd.to_numeric(df_part['n_kv']) + df_part['speed_pp'] = pd.to_numeric(df_part['speed_pp']) + df_part['speed_tg'] = pd.to_numeric(df_part['speed_tg']) + + # Add label and append to main DataFrame + df_part['label'] = md_file + df = pd.concat([df, df_part]) if df is not None else df_part + +# Group by label and n_kv, calculate mean and std for both speed metrics +df_grouped = df.groupby(['label', 'n_kv']).agg({ + 'speed_pp': ['mean', 'std'], + 'speed_tg': ['mean', 'std'] +}).reset_index() + +# Flatten multi-index columns +df_grouped.columns = ['label', 'n_kv', 'speed_pp_mean', 'speed_pp_std', + 'speed_tg_mean', 'speed_tg_std'] + +# Replace NaN with 0 (std for a single sample is NaN) +df_grouped['speed_pp_std'] = df_grouped['speed_pp_std'].fillna(0) +df_grouped['speed_tg_std'] = df_grouped['speed_tg_std'].fillna(0) + +# Prepare ticks values for X axis (prune for readability) +x_ticks = df['n_kv'].unique() +while len(x_ticks) > 16: + x_ticks = x_ticks[::2] + +# Get unique labels and color map +labels = df_grouped['label'].unique() +colors = plt.cm.rainbow(np.linspace(0, 1, len(labels))) + +# Create prompt processing plot +plt.figure(figsize=(10, 6)) +ax1 = plt.gca() +plt.grid() +ax1.set_xticks(x_ticks) + +# Plot each label's data +for label, color in zip(labels, colors): + label_data = df_grouped[df_grouped['label'] == label].sort_values('n_kv') + pp = ax1.errorbar(label_data['n_kv'], label_data['speed_pp_mean'], + yerr=label_data['speed_pp_std'], color=color, + marker='o', linestyle='-', label=label) + +# Add labels and title +ax1.set_xlabel('Context Length (tokens)') +ax1.set_ylabel('Prompt Processing Rate (t/s)') +plt.title('Prompt Processing Performance Comparison') +ax1.legend(loc='upper right') + +# Adjust layout and save +plt.tight_layout() +plt.savefig('performance_comparison_pp.png', bbox_inches='tight') +plt.close() + +# Create token generation plot +plt.figure(figsize=(10, 6)) +ax1 = plt.gca() +plt.grid() +ax1.set_xticks(x_ticks) + +# Plot each model's data +for label, color in zip(labels, colors): + label_data = df_grouped[df_grouped['label'] == label].sort_values('n_kv') + tg = ax1.errorbar(label_data['n_kv'], label_data['speed_tg_mean'], + yerr=label_data['speed_tg_std'], color=color, + marker='s', linestyle='-', label=label) + +# Add labels and title +ax1.set_xlabel('Context Length (n_kv)') +ax1.set_ylabel('Token Generation Rate (t/s)') +plt.title('Token Generation Performance Comparison') +ax1.legend(loc='upper right') + +# Adjust layout and save +plt.tight_layout() +plt.savefig('performance_comparison_tg.png', bbox_inches='tight') +plt.close() diff --git a/examples/sweep-bench/sweep-bench.cpp b/examples/sweep-bench/sweep-bench.cpp new file mode 100644 index 00000000000..27510687905 --- /dev/null +++ b/examples/sweep-bench/sweep-bench.cpp @@ -0,0 +1,189 @@ +#include "ggml.h" +#include "llama.h" +#include "common.h" +#include "llama-vocab.h" + +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#ifndef NOMINMAX +# define NOMINMAX +#endif +#include +#endif + +#include +#include +#include +#include +#include + +static void print_usage(int, char ** argv) { + LOG_TEE("\nexample usage:\n"); + LOG_TEE("\n %s -m model.gguf -c 8192 -b 2048 -ub 512\n", argv[0]); + LOG_TEE("\n"); +} + +int main(int argc, char ** argv) { + + gpt_params params; + + if (!gpt_params_parse(argc, argv, params)) { + print_usage(argc, argv); + return 1; + } + + // init LLM + + llama_backend_init(); + llama_numa_init(params.numa); + + // initialize the model + + llama_model_params model_params = llama_model_params_from_gpt_params(params); + + llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); + + if (model == NULL) { + fprintf(stderr , "%s: error: unable to load model\n" , __func__); + return 1; + } + + llama_context_params ctx_params = llama_context_params_from_gpt_params(params); + + llama_context * ctx = llama_new_context_with_model(model, ctx_params); + + if (ctx == NULL) { + fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); + return 1; + } + + const unsigned int n_kv_max = llama_n_ctx(ctx); + + + const llama_vocab * vocab = llama_get_vocab(ctx); + llama_token bos = llama_token_bos_impl(*vocab); + //llama_token eos = llama_token_eos_impl(*vocab); + + const unsigned int n_vocab = llama_n_vocab(model); + + // decode in batches of ctx_params.n_batch tokens + auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch) { + for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) { + const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i)); + + llama_batch batch_view = { + n_tokens, + batch.token + i, + nullptr, + batch.pos + i, + batch.n_seq_id + i, + batch.seq_id + i, + batch.logits + i, + }; + + const int ret = llama_decode(ctx, batch_view); + if (ret != 0) { + LOG_TEE("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret); + return false; + } + + llama_synchronize(ctx); + } + + return true; + }; + + const unsigned int pp = params.n_ubatch; + const unsigned int tg = params.n_ubatch / 4; + + if (!params.sweep_bench_output_jsonl) { + LOG_TEE("\n"); + LOG_TEE("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch); + LOG_TEE("\n"); + LOG_TEE("|%6s | %6s | %6s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s"); + LOG_TEE("|%6s-|-%6s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "------", "--------", "--------", "--------", "--------"); + } + + llama_batch batch = llama_batch_init(n_kv_max, 0, 1); + + // warm up + { + llama_batch_add(batch, bos, 0, { 0 }, false); + + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { + LOG_TEE("%s: llama_decode() failed\n", __func__); + return 1; + } + } + + llama_batch_clear(batch); + llama_kv_cache_clear(ctx); + + for (unsigned int n_kv = 0; n_kv < n_kv_max; n_kv += params.n_ubatch) { + // clean up KV cache before generation + llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); + + // first measure token generation performance at this context size + const auto t_tg_start = ggml_time_us(); + + for (unsigned int i = 0; i < tg; ++i) { + llama_batch_clear(batch); + llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true); + + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { + LOG_TEE("%s: llama_decode() failed\n", __func__); + return 1; + } + } + + const auto t_tg_end = ggml_time_us(); + + // clean up KV cache after generation + llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); + + // prepare batch of pp size for prompt processing performance measurement + llama_batch_clear(batch); + + for (unsigned int i = 0; i < pp; ++i) { + llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false); + } + batch.logits[batch.n_tokens - 1] = true; + + // measure prompt processing performance + const auto t_pp_start = ggml_time_us(); + + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { + LOG_TEE("%s: llama_decode() failed\n", __func__); + return 1; + } + + const auto t_pp_end = ggml_time_us(); + + // calculate and print metrics + const float t_pp = (t_pp_end - t_pp_start) / 1000000.0f; + const float t_tg = (t_tg_end - t_tg_start) / 1000000.0f; + + const float speed_pp = pp / t_pp; + const float speed_tg = tg / t_tg; + + if(params.sweep_bench_output_jsonl) { + LOG_TEE( + "{\"n_kv_max\": %d, \"n_batch\": %d, \"n_ubatch\": %d, \"flash_attn\": %d, \"n_gpu_layers\": %d, \"n_threads\": %u, \"n_threads_batch\": %u, " + "\"pp\": %d, \"tg\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f }\n", + n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch, + pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg + ); + } else { + LOG_TEE("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg); + } + } + + llama_batch_free(batch); + + llama_free(ctx); + llama_free_model(model); + + llama_backend_free(); + + return 0; +} From d69c3f1bceedf66fd9ecb1add951b23daaa3f0e6 Mon Sep 17 00:00:00 2001 From: "John W. Leimgruber III" Date: Sat, 26 Apr 2025 13:42:41 -0400 Subject: [PATCH 2/6] Refactor ik_llama.cpp back to llama.cpp This is based on saood06's PR https://github.com/ikawrakow/ik_llama.cpp/pull/225 --- examples/sweep-bench/sweep-bench.cpp | 116 ++++++++++++--------------- 1 file changed, 50 insertions(+), 66 deletions(-) diff --git a/examples/sweep-bench/sweep-bench.cpp b/examples/sweep-bench/sweep-bench.cpp index 27510687905..b725ccd92c1 100644 --- a/examples/sweep-bench/sweep-bench.cpp +++ b/examples/sweep-bench/sweep-bench.cpp @@ -1,7 +1,9 @@ +#include "arg.h" +#include "log.h" #include "ggml.h" #include "llama.h" #include "common.h" -#include "llama-vocab.h" +#include "../src/llama-vocab.h" #ifdef _WIN32 #define WIN32_LEAN_AND_MEAN @@ -18,55 +20,49 @@ #include static void print_usage(int, char ** argv) { - LOG_TEE("\nexample usage:\n"); - LOG_TEE("\n %s -m model.gguf -c 8192 -b 2048 -ub 512\n", argv[0]); - LOG_TEE("\n"); + LOG_INF("\nexample usage:\n"); + LOG_INF("\n %s -m model.gguf -c 8192 -b 2048 -ub 512\n", argv[0]); + LOG_INF("\n"); } int main(int argc, char ** argv) { + common_params params; - gpt_params params; - - if (!gpt_params_parse(argc, argv, params)) { - print_usage(argc, argv); + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { return 1; } - // init LLM + common_init(); + // init LLM llama_backend_init(); llama_numa_init(params.numa); // initialize the model + common_init_result llama_init = common_init_from_params(params); - llama_model_params model_params = llama_model_params_from_gpt_params(params); - - llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); - if (model == NULL) { - fprintf(stderr , "%s: error: unable to load model\n" , __func__); + if (model == nullptr || ctx == nullptr) { + LOG_ERR("%s : failed to init\n", __func__); return 1; } - llama_context_params ctx_params = llama_context_params_from_gpt_params(params); - - llama_context * ctx = llama_new_context_with_model(model, ctx_params); - - if (ctx == NULL) { - fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); - return 1; + // print system information + { + LOG_INF("\n"); + LOG_INF("%s\n", common_params_get_system_info(params).c_str()); + LOG_INF("\n"); } const unsigned int n_kv_max = llama_n_ctx(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + llama_token bos = vocab->token_bos(); + const unsigned int n_vocab = llama_vocab_n_tokens(vocab); - const llama_vocab * vocab = llama_get_vocab(ctx); - llama_token bos = llama_token_bos_impl(*vocab); - //llama_token eos = llama_token_eos_impl(*vocab); - - const unsigned int n_vocab = llama_n_vocab(model); - - // decode in batches of ctx_params.n_batch tokens + // decode in batches of n_batch tokens auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch) { for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) { const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i)); @@ -83,7 +79,7 @@ int main(int argc, char ** argv) { const int ret = llama_decode(ctx, batch_view); if (ret != 0) { - LOG_TEE("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret); + LOG_INF("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret); return false; } @@ -96,42 +92,44 @@ int main(int argc, char ** argv) { const unsigned int pp = params.n_ubatch; const unsigned int tg = params.n_ubatch / 4; - if (!params.sweep_bench_output_jsonl) { - LOG_TEE("\n"); - LOG_TEE("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch); - LOG_TEE("\n"); - LOG_TEE("|%6s | %6s | %6s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s"); - LOG_TEE("|%6s-|-%6s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "------", "--------", "--------", "--------", "--------"); - } + const unsigned int n_threads = params.cpuparams.n_threads; + const unsigned int n_threads_batch = params.cpuparams_batch.n_threads; + const int32_t n_batch = llama_n_batch(ctx); + + LOG_INF("\n"); + LOG_INF("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, n_threads, n_threads_batch); + LOG_INF("\n"); + LOG_INF("|%6s | %6s | %6s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s"); + LOG_INF("|%6s-|-%6s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "------", "--------", "--------", "--------", "--------"); llama_batch batch = llama_batch_init(n_kv_max, 0, 1); // warm up { - llama_batch_add(batch, bos, 0, { 0 }, false); + common_batch_add(batch, bos, 0, { 0 }, false); - if (!decode_helper(ctx, batch, ctx_params.n_batch)) { - LOG_TEE("%s: llama_decode() failed\n", __func__); + if (!decode_helper(ctx, batch, n_batch)) { + LOG_INF("%s: llama_decode() failed\n", __func__); return 1; } } - llama_batch_clear(batch); - llama_kv_cache_clear(ctx); + common_batch_clear(batch); + llama_kv_self_clear(ctx); for (unsigned int n_kv = 0; n_kv < n_kv_max; n_kv += params.n_ubatch) { // clean up KV cache before generation - llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); + llama_kv_self_seq_rm(ctx, 0,n_kv, -1); // first measure token generation performance at this context size const auto t_tg_start = ggml_time_us(); for (unsigned int i = 0; i < tg; ++i) { - llama_batch_clear(batch); - llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true); + common_batch_clear(batch); + common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true); - if (!decode_helper(ctx, batch, ctx_params.n_batch)) { - LOG_TEE("%s: llama_decode() failed\n", __func__); + if (!decode_helper(ctx, batch, n_batch)) { + LOG_INF("%s: llama_decode() failed\n", __func__); return 1; } } @@ -139,21 +137,21 @@ int main(int argc, char ** argv) { const auto t_tg_end = ggml_time_us(); // clean up KV cache after generation - llama_kv_cache_seq_rm(ctx, 0, n_kv, -1); + llama_kv_self_seq_rm(ctx, 0, n_kv, -1); // prepare batch of pp size for prompt processing performance measurement - llama_batch_clear(batch); + common_batch_clear(batch); for (unsigned int i = 0; i < pp; ++i) { - llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false); + common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false); } batch.logits[batch.n_tokens - 1] = true; // measure prompt processing performance const auto t_pp_start = ggml_time_us(); - if (!decode_helper(ctx, batch, ctx_params.n_batch)) { - LOG_TEE("%s: llama_decode() failed\n", __func__); + if (!decode_helper(ctx, batch, n_batch)) { + LOG_INF("%s: llama_decode() failed\n", __func__); return 1; } @@ -166,23 +164,9 @@ int main(int argc, char ** argv) { const float speed_pp = pp / t_pp; const float speed_tg = tg / t_tg; - if(params.sweep_bench_output_jsonl) { - LOG_TEE( - "{\"n_kv_max\": %d, \"n_batch\": %d, \"n_ubatch\": %d, \"flash_attn\": %d, \"n_gpu_layers\": %d, \"n_threads\": %u, \"n_threads_batch\": %u, " - "\"pp\": %d, \"tg\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f }\n", - n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch, - pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg - ); - } else { - LOG_TEE("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg); - } + LOG_INF("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg); } - llama_batch_free(batch); - - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); return 0; From 709dd6dedd261a63601b5e17ed891b95e1062bc5 Mon Sep 17 00:00:00 2001 From: "John W. Leimgruber III" Date: Sat, 3 May 2025 14:57:47 -0400 Subject: [PATCH 3/6] Use ikawrakow's adaptation of sweep-bench This patch is not my own work but taken from this gzip: https://github.com/ikawrakow/ik_llama.cpp/discussions/354 Thanks ikawrakow and saood06 for this! --- examples/CMakeLists.txt | 2 - examples/sweep-bench/sweep-bench.cpp | 136 ++++++++++++++++++++------- 2 files changed, 101 insertions(+), 37 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index bad1b60d7ab..db25d84c46e 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -32,8 +32,6 @@ else() add_subdirectory(speculative) add_subdirectory(speculative-simple) add_subdirectory(sweep-bench) - add_subdirectory(tokenize) - add_subdirectory(tts) add_subdirectory(gen-docs) add_subdirectory(training) add_subdirectory(diffusion) diff --git a/examples/sweep-bench/sweep-bench.cpp b/examples/sweep-bench/sweep-bench.cpp index b725ccd92c1..8df12f59736 100644 --- a/examples/sweep-bench/sweep-bench.cpp +++ b/examples/sweep-bench/sweep-bench.cpp @@ -1,9 +1,10 @@ +#include "common.h" #include "arg.h" -#include "log.h" #include "ggml.h" #include "llama.h" #include "common.h" -#include "../src/llama-vocab.h" +//#include "llama-vocab.h" +#include "log.h" #ifdef _WIN32 #define WIN32_LEAN_AND_MEAN @@ -20,49 +21,96 @@ #include static void print_usage(int, char ** argv) { - LOG_INF("\nexample usage:\n"); - LOG_INF("\n %s -m model.gguf -c 8192 -b 2048 -ub 512\n", argv[0]); - LOG_INF("\n"); + LOG("\nexample usage:\n"); + LOG("\n %s -m model.gguf -c 8192 -b 2048 -ub 512\n", argv[0]); + LOG("\n"); } int main(int argc, char ** argv) { - common_params params; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { + std::vector args; + args.reserve(argc); + args.push_back(argv[0]); + + bool sweep_bench_output_jsonl = false; + + for (int i = 1; i < argc; ++i) { + std::string arg{argv[1]}; + if (arg == "--output-format") { + bool invalid_arg = false; + if (i < argc-1) { + arg = argv[++i]; + if (arg == "jsonl") sweep_bench_output_jsonl = true; + else if (arg == "md") sweep_bench_output_jsonl = false; + else invalid_arg = true; + } else { + invalid_arg = true; + } + if (invalid_arg) { + LOG("Invalid arg"); return 1; + } + } else { + args.push_back(argv[i]); + } + } + + common_params params; + if (!common_params_parse(args.size(), args.data(), params, LLAMA_EXAMPLE_BENCH, print_usage)) { return 1; } common_init(); + //gpt_params params; + + //if (!gpt_params_parse(argc, argv, params)) { + // print_usage(argc, argv); + // return 1; + //} + // init LLM + llama_backend_init(); llama_numa_init(params.numa); // initialize the model - common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model.get(); - llama_context * ctx = llama_init.context.get(); + //llama_model_params model_params = llama_model_params_from_gpt_params(params); + llama_model_params model_params = common_model_params_to_llama(params); + + //llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); + llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params); - if (model == nullptr || ctx == nullptr) { - LOG_ERR("%s : failed to init\n", __func__); + if (model == NULL) { + fprintf(stderr , "%s: error: unable to load model\n" , __func__); return 1; } - // print system information - { - LOG_INF("\n"); - LOG_INF("%s\n", common_params_get_system_info(params).c_str()); - LOG_INF("\n"); + //llama_context_params ctx_params = llama_context_params_from_gpt_params(params); + llama_context_params ctx_params = common_context_params_to_llama(params); + + //llama_context * ctx = llama_new_context_with_model(model, ctx_params); + llama_context * ctx = llama_init_from_model(model, ctx_params); + + if (ctx == NULL) { + fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); + return 1; } const unsigned int n_kv_max = llama_n_ctx(ctx); - const llama_vocab * vocab = llama_model_get_vocab(model); - llama_token bos = vocab->token_bos(); - const unsigned int n_vocab = llama_vocab_n_tokens(vocab); - // decode in batches of n_batch tokens + auto vocab = llama_model_get_vocab(model); + auto n_vocab = llama_vocab_n_tokens(vocab); + auto bos = llama_vocab_bos(vocab); + + //const llama_vocab * vocab = llama_get_vocab(ctx); + //llama_token bos = llama_token_bos_impl(*vocab); + //llama_token eos = llama_token_eos_impl(*vocab); + + //const unsigned int n_vocab = llama_n_vocab(model); + + // decode in batches of ctx_params.n_batch tokens auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch) { for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) { const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i)); @@ -92,34 +140,34 @@ int main(int argc, char ** argv) { const unsigned int pp = params.n_ubatch; const unsigned int tg = params.n_ubatch / 4; - const unsigned int n_threads = params.cpuparams.n_threads; - const unsigned int n_threads_batch = params.cpuparams_batch.n_threads; - const int32_t n_batch = llama_n_batch(ctx); - - LOG_INF("\n"); - LOG_INF("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, n_threads, n_threads_batch); - LOG_INF("\n"); - LOG_INF("|%6s | %6s | %6s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s"); - LOG_INF("|%6s-|-%6s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "------", "--------", "--------", "--------", "--------"); + if (!sweep_bench_output_jsonl) { + LOG_INF("\n"); + LOG_INF("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch); + LOG_INF("\n"); + LOG_INF("|%6s | %6s | %6s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s"); + LOG_INF("|%6s-|-%6s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "------", "--------", "--------", "--------", "--------"); + } llama_batch batch = llama_batch_init(n_kv_max, 0, 1); // warm up { common_batch_add(batch, bos, 0, { 0 }, false); + //llama_batch_add(batch, bos, 0, { 0 }, false); - if (!decode_helper(ctx, batch, n_batch)) { + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { LOG_INF("%s: llama_decode() failed\n", __func__); return 1; } } common_batch_clear(batch); + //llama_batch_clear(batch); llama_kv_self_clear(ctx); for (unsigned int n_kv = 0; n_kv < n_kv_max; n_kv += params.n_ubatch) { // clean up KV cache before generation - llama_kv_self_seq_rm(ctx, 0,n_kv, -1); + llama_kv_self_seq_rm(ctx, 0, n_kv, -1); // first measure token generation performance at this context size const auto t_tg_start = ggml_time_us(); @@ -127,8 +175,10 @@ int main(int argc, char ** argv) { for (unsigned int i = 0; i < tg; ++i) { common_batch_clear(batch); common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true); + //llama_batch_clear(batch); + //llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true); - if (!decode_helper(ctx, batch, n_batch)) { + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { LOG_INF("%s: llama_decode() failed\n", __func__); return 1; } @@ -141,16 +191,18 @@ int main(int argc, char ** argv) { // prepare batch of pp size for prompt processing performance measurement common_batch_clear(batch); + //llama_batch_clear(batch); for (unsigned int i = 0; i < pp; ++i) { common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false); + //llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false); } batch.logits[batch.n_tokens - 1] = true; // measure prompt processing performance const auto t_pp_start = ggml_time_us(); - if (!decode_helper(ctx, batch, n_batch)) { + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { LOG_INF("%s: llama_decode() failed\n", __func__); return 1; } @@ -164,9 +216,23 @@ int main(int argc, char ** argv) { const float speed_pp = pp / t_pp; const float speed_tg = tg / t_tg; - LOG_INF("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg); + if(sweep_bench_output_jsonl) { + LOG_INF( + "{\"n_kv_max\": %d, \"n_batch\": %d, \"n_ubatch\": %d, \"flash_attn\": %d, \"n_gpu_layers\": %d, \"n_threads\": %u, \"n_threads_batch\": %u, " + "\"pp\": %d, \"tg\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f }\n", + n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch, + pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg + ); + } else { + LOG_INF("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg); + } } + llama_batch_free(batch); + + llama_free(ctx); + llama_model_free(model); + llama_backend_free(); return 0; From 1f9a72c77584055f21e9335dbf3d8f9c7723f30a Mon Sep 17 00:00:00 2001 From: "John W. Leimgruber III" Date: Wed, 7 May 2025 17:25:57 -0400 Subject: [PATCH 4/6] Adapt batch warmup to sweep-bench From https://github.com/ikawrakow/ik_llama.cpp/pull/375 Hardcoded to true to always run to avoid adding more arguments. --- examples/sweep-bench/sweep-bench.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/examples/sweep-bench/sweep-bench.cpp b/examples/sweep-bench/sweep-bench.cpp index 8df12f59736..4a36c480c97 100644 --- a/examples/sweep-bench/sweep-bench.cpp +++ b/examples/sweep-bench/sweep-bench.cpp @@ -161,6 +161,25 @@ int main(int argc, char ** argv) { } } + // Adapted into mainline from original PR: https://github.com/ikawrakow/ik_llama.cpp/pull/375 + //if (params.batch_warmup) { + if (true) { + // clean up KV cache after generation + llama_kv_self_clear(ctx); + + // prepare batch of pp size for prompt processing performance measurement + common_batch_clear(batch); + + for (unsigned int i = 0; i < (unsigned int)params.n_ubatch; ++i) { + common_batch_add(batch, std::rand() % n_vocab, i, { 0 }, false); + } + + if (!decode_helper(ctx, batch, ctx_params.n_ubatch)) { + LOG_INF("%s: llama_decode() failed\n", __func__); + return 1; + } + } + common_batch_clear(batch); //llama_batch_clear(batch); llama_kv_self_clear(ctx); From 9f7a068d1456263067a760ee02e59a04216a6a91 Mon Sep 17 00:00:00 2001 From: "John W. Leimgruber III" Date: Fri, 11 Jul 2025 15:10:21 -0400 Subject: [PATCH 5/6] Refactor llama_get_memory(ctx) and llama_memory_ API. Due to changes with `14030` --- examples/sweep-bench/sweep-bench.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/examples/sweep-bench/sweep-bench.cpp b/examples/sweep-bench/sweep-bench.cpp index 4a36c480c97..41b00a71e73 100644 --- a/examples/sweep-bench/sweep-bench.cpp +++ b/examples/sweep-bench/sweep-bench.cpp @@ -91,6 +91,7 @@ int main(int argc, char ** argv) { //llama_context * ctx = llama_new_context_with_model(model, ctx_params); llama_context * ctx = llama_init_from_model(model, ctx_params); + auto * mem = llama_get_memory(ctx); if (ctx == NULL) { fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); @@ -165,7 +166,9 @@ int main(int argc, char ** argv) { //if (params.batch_warmup) { if (true) { // clean up KV cache after generation - llama_kv_self_clear(ctx); + // llama_kv_self_clear(ctx); + llama_memory_clear(mem, true); + // prepare batch of pp size for prompt processing performance measurement common_batch_clear(batch); @@ -182,11 +185,13 @@ int main(int argc, char ** argv) { common_batch_clear(batch); //llama_batch_clear(batch); - llama_kv_self_clear(ctx); + //llama_kv_self_clear(ctx); + llama_memory_clear(mem, true); for (unsigned int n_kv = 0; n_kv < n_kv_max; n_kv += params.n_ubatch) { // clean up KV cache before generation - llama_kv_self_seq_rm(ctx, 0, n_kv, -1); + //llama_kv_self_seq_rm(ctx, 0, n_kv, -1); + llama_memory_seq_rm(mem, 0, n_kv, -1); // first measure token generation performance at this context size const auto t_tg_start = ggml_time_us(); @@ -206,7 +211,8 @@ int main(int argc, char ** argv) { const auto t_tg_end = ggml_time_us(); // clean up KV cache after generation - llama_kv_self_seq_rm(ctx, 0, n_kv, -1); + //llama_kv_self_seq_rm(ctx, 0, n_kv, -1); + llama_memory_seq_rm(mem, 0, n_kv, -1); // prepare batch of pp size for prompt processing performance measurement common_batch_clear(batch); From e46e9b36f2ac7e7288081e7398b124a28f6336e0 Mon Sep 17 00:00:00 2001 From: "John W. Leimgruber III" Date: Tue, 2 Sep 2025 16:06:06 -0400 Subject: [PATCH 6/6] Update FA argument. Thanks @Thireus and @AesSedai Behavior of mainline llama.cpp `-fa` changed and now *requires* an argument of `on` or `1` it seems to enable flash attenion explicitly. This diverges from ik_llama.cpp behavior which omitting it is disabled, however on mainline that means `auto` which means "probably enabled" I believe. Details here: https://github.com/ggml-org/llama.cpp/pull/15434 This patch just changes all `s/flash_attn/flash_attn_type/g`. --- examples/sweep-bench/sweep-bench.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/sweep-bench/sweep-bench.cpp b/examples/sweep-bench/sweep-bench.cpp index 41b00a71e73..66f8de1d0e3 100644 --- a/examples/sweep-bench/sweep-bench.cpp +++ b/examples/sweep-bench/sweep-bench.cpp @@ -143,7 +143,7 @@ int main(int argc, char ** argv) { if (!sweep_bench_output_jsonl) { LOG_INF("\n"); - LOG_INF("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch); + LOG_INF("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn_type = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn_type, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch); LOG_INF("\n"); LOG_INF("|%6s | %6s | %6s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s"); LOG_INF("|%6s-|-%6s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "------", "--------", "--------", "--------", "--------"); @@ -243,9 +243,9 @@ int main(int argc, char ** argv) { if(sweep_bench_output_jsonl) { LOG_INF( - "{\"n_kv_max\": %d, \"n_batch\": %d, \"n_ubatch\": %d, \"flash_attn\": %d, \"n_gpu_layers\": %d, \"n_threads\": %u, \"n_threads_batch\": %u, " + "{\"n_kv_max\": %d, \"n_batch\": %d, \"n_ubatch\": %d, \"flash_attn_type\": %d, \"n_gpu_layers\": %d, \"n_threads\": %u, \"n_threads_batch\": %u, " "\"pp\": %d, \"tg\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f }\n", - n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch, + n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn_type, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch, pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg ); } else {