diff --git a/.github/actions/windows-setup-cuda/action.yml b/.github/actions/windows-setup-cuda/action.yml index 5575caeca31..deb7d83e8d3 100644 --- a/.github/actions/windows-setup-cuda/action.yml +++ b/.github/actions/windows-setup-cuda/action.yml @@ -36,32 +36,32 @@ runs: echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 echo "CUDA_PATH_V11_7=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 - - name: Install Cuda Toolkit 12.4 - if: ${{ inputs.cuda_version == '12.4' }} + - name: Install Cuda Toolkit 12.8 + if: ${{ inputs.cuda_version == '12.8' }} shell: pwsh run: | - mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" + mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" choco install unzip -y - curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-12.4.127-archive.zip" - curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-12.4.131-archive.zip" - curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-12.4.127-archive.zip" - curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-12.4.5.8-archive.zip" - curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-12.4.127-archive.zip" - curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_profiler_api/windows-x86_64/cuda_profiler_api-windows-x86_64-12.4.127-archive.zip" - curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-12.4.127-archive.zip" - curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-12.4.127-archive.zip" - curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-12.4.127-archive.zip" - unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" - xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cudart-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y - xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvcc-windows-x86_64-12.4.131-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y - xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvrtc-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y - xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libcublas-windows-x86_64-12.4.5.8-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y - xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvtx-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y - xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_profiler_api-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y - xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\visual_studio_integration-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y - xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvprof-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y - xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cccl-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y - echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 - echo "CUDA_PATH_V12_4=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-12.8.90-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-12.8.93-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-12.8.93-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-12.8.4.1-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-12.8.90-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_profiler_api/windows-x86_64/cuda_profiler_api-windows-x86_64-12.8.90-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-12.8.90-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-12.8.90-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-12.8.90-archive.zip" + unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\cuda_cudart-windows-x86_64-12.8.90-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\cuda_nvcc-windows-x86_64-12.8.93-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\cuda_nvrtc-windows-x86_64-12.8.93-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\libcublas-windows-x86_64-12.8.4.1-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\cuda_nvtx-windows-x86_64-12.8.90-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\cuda_profiler_api-windows-x86_64-12.8.90-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\visual_studio_integration-windows-x86_64-12.8.90-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\cuda_nvprof-windows-x86_64-12.8.90-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\cuda_cccl-windows-x86_64-12.8.90-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" /E /I /H /Y + echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + echo "CUDA_PATH_V12_8=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.8" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 43553ac13bd..331d49fac20 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -969,7 +969,7 @@ jobs: strategy: matrix: - cuda: ['12.4'] + cuda: ['12.8'] steps: - name: Clone diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5367637e428..a9f9a9ef911 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,7 +18,7 @@ concurrency: env: BRANCH_NAME: ${{ github.head_ref || github.ref_name }} - CMAKE_ARGS: "-DLLAMA_BUILD_EXAMPLES=OFF -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=ON -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON" + CMAKE_ARGS: "-DLLAMA_BUILD_EXAMPLES=ON -DLLAMA_BUILD_TESTS=OFF -DLLAMA_BUILD_TOOLS=ON -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON" jobs: macOS-arm64: @@ -390,7 +390,7 @@ jobs: strategy: matrix: - cuda: ['12.4'] + cuda: ['12.8'] steps: - name: Clone diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 11ff38762b8..8c3121571ec 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -31,6 +31,7 @@ else() add_subdirectory(simple-chat) add_subdirectory(speculative) add_subdirectory(speculative-simple) + add_subdirectory(sweep-bench) add_subdirectory(gen-docs) add_subdirectory(training) add_subdirectory(diffusion) diff --git a/examples/sweep-bench/CMakeLists.txt b/examples/sweep-bench/CMakeLists.txt new file mode 100644 index 00000000000..e49f0fea02a --- /dev/null +++ b/examples/sweep-bench/CMakeLists.txt @@ -0,0 +1,5 @@ +set(TARGET llama-sweep-bench) +add_executable(${TARGET} sweep-bench.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/sweep-bench/README.md b/examples/sweep-bench/README.md new file mode 100644 index 00000000000..d92740de221 --- /dev/null +++ b/examples/sweep-bench/README.md @@ -0,0 +1,65 @@ +# ik_llama.cpp/example/sweep-bench + +Benchmark the prompt processing and token generation performance of `ik_llama.cpp` +by doing a sweep over a whole context size and gathering performance metrics +in each ubatch-sized window. Only a single token sequence is used. + +The benchmark steps are: + +for each ubatch-sized window in context: + + 1. generate ubatch/4 tokens (not the whole window to save some time) + 2. measure generation performance + 3. remove generated tokens from KV cache + 4. prepare a ubatch-sized batch of random tokens + 4. process prepated batch + 5. measure prompt processing performance + +The purpose of the benchmark is to visualize how the performance changes with +the context size without averaging the metrics values over the whole context. + +## Usage + +./llama-sweep-bench -c 8704 -ub 512 -m models/Meta-Llama-3.2-3B-Instruct-Q8_0.gguf + +## Sample results + +- `PP` - prompt tokens per ubatch +- `TG` - generated tokens per ubatch +- `N_KV` - current KV cache size +- `T_PP` - prompt processing time (i.e. time to first token) +- `S_PP` - prompt processing speed (`(B*PP)/T_PP` or `PP/T_PP`) +- `T_TG` - time to generate all batches +- `S_TG` - text generation speed (`(B*TG)/T_TG`) + +| PP | TG | N_KV | T_PP s | S_PP t/s | T_TG s | S_TG t/s | +|-------|--------|--------|----------|----------|----------|----------| +| 512 | 128 | 0 | 1.100 | 465.51 | 2.311 | 55.38 | +| 512 | 128 | 512 | 1.183 | 432.97 | 1.895 | 67.55 | +| 512 | 128 | 1024 | 1.305 | 392.38 | 2.071 | 61.81 | +| 512 | 128 | 1536 | 1.279 | 400.42 | 2.164 | 59.14 | +| 512 | 128 | 2048 | 1.571 | 325.96 | 2.280 | 56.14 | +| 512 | 128 | 2560 | 1.431 | 357.87 | 2.418 | 52.94 | +| 512 | 128 | 3072 | 1.515 | 337.93 | 2.566 | 49.88 | +| 512 | 128 | 3584 | 1.588 | 322.34 | 2.722 | 47.03 | +| 512 | 128 | 4096 | 1.675 | 305.70 | 2.864 | 44.69 | +| 512 | 128 | 4608 | 1.769 | 289.50 | 2.999 | 42.68 | +| 512 | 128 | 5120 | 1.845 | 277.48 | 3.102 | 41.26 | +| 512 | 128 | 5632 | 1.893 | 270.46 | 3.219 | 39.76 | +| 512 | 128 | 6144 | 1.953 | 262.20 | 3.348 | 38.23 | +| 512 | 128 | 6656 | 2.018 | 253.71 | 3.474 | 36.84 | +| 512 | 128 | 7168 | 2.078 | 246.34 | 3.589 | 35.66 | +| 512 | 128 | 7680 | 2.140 | 239.22 | 3.717 | 34.43 | +| 512 | 128 | 8192 | 2.196 | 233.15 | 3.854 | 33.21 | + +### JSONL output + +Pass `--output-format jsonl` to output JSONL instead of Markdown, รก la + +```json lines +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 0, "t_pp": 1.093814, "speed_pp": 468.086884, "t_tg": 1.780312, "speed_tg": 71.897514 } +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 512, "t_pp": 1.169302, "speed_pp": 437.868073, "t_tg": 1.897474, "speed_tg": 67.458099 } +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 1024, "t_pp": 1.183700, "speed_pp": 432.542053, "t_tg": 2.059179, "speed_tg": 62.160694 } +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 1536, "t_pp": 1.428625, "speed_pp": 358.386566, "t_tg": 2.160639, "speed_tg": 59.241734 } +{"n_kv_max": 8704, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "n_gpu_layers": -1, "n_threads": 32, "n_threads_batch": 32, "pp": 512, "tg": 128, "n_kv": 2048, "t_pp": 1.360647, "speed_pp": 376.291595, "t_tg": 2.274003, "speed_tg": 56.288403 } +``` diff --git a/examples/sweep-bench/sweep-bench-plot.py b/examples/sweep-bench/sweep-bench-plot.py new file mode 100755 index 00000000000..481a604c257 --- /dev/null +++ b/examples/sweep-bench/sweep-bench-plot.py @@ -0,0 +1,118 @@ +import pandas as pd +import matplotlib.pyplot as plt +import numpy as np +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('file', nargs='+') +args = parser.parse_args() + +df = None + +#for jsonl_file in args.file: +# # Read JSONL file into DataFrame +# df_part = pd.read_json(jsonl_file, lines=True) +# df_part['label'] = jsonl_file +# if df is None: +# df = df_part +# else: +# df = pd.concat([df, df_part]) +# + + + +for md_file in args.file: + # Read markdown table file into DataFrame + df_part = pd.read_csv(md_file, sep=r'\s*\|\s*', engine='python', + header=0, skiprows=[1]) + + # Clean up columns (remove empty columns from markdown formatting) + df_part = df_part.iloc[:, 1:-1] + df_part.columns = [col.strip() for col in df_part.columns] + + # Rename columns to match expected names + df_part = df_part.rename(columns={ + 'N_KV': 'n_kv', + 'S_PP t/s': 'speed_pp', + 'S_TG t/s': 'speed_tg' + }) + + # Convert to numeric types + df_part['n_kv'] = pd.to_numeric(df_part['n_kv']) + df_part['speed_pp'] = pd.to_numeric(df_part['speed_pp']) + df_part['speed_tg'] = pd.to_numeric(df_part['speed_tg']) + + # Add label and append to main DataFrame + df_part['label'] = md_file + df = pd.concat([df, df_part]) if df is not None else df_part + +# Group by label and n_kv, calculate mean and std for both speed metrics +df_grouped = df.groupby(['label', 'n_kv']).agg({ + 'speed_pp': ['mean', 'std'], + 'speed_tg': ['mean', 'std'] +}).reset_index() + +# Flatten multi-index columns +df_grouped.columns = ['label', 'n_kv', 'speed_pp_mean', 'speed_pp_std', + 'speed_tg_mean', 'speed_tg_std'] + +# Replace NaN with 0 (std for a single sample is NaN) +df_grouped['speed_pp_std'] = df_grouped['speed_pp_std'].fillna(0) +df_grouped['speed_tg_std'] = df_grouped['speed_tg_std'].fillna(0) + +# Prepare ticks values for X axis (prune for readability) +x_ticks = df['n_kv'].unique() +while len(x_ticks) > 16: + x_ticks = x_ticks[::2] + +# Get unique labels and color map +labels = df_grouped['label'].unique() +colors = plt.cm.rainbow(np.linspace(0, 1, len(labels))) + +# Create prompt processing plot +plt.figure(figsize=(10, 6)) +ax1 = plt.gca() +plt.grid() +ax1.set_xticks(x_ticks) + +# Plot each label's data +for label, color in zip(labels, colors): + label_data = df_grouped[df_grouped['label'] == label].sort_values('n_kv') + pp = ax1.errorbar(label_data['n_kv'], label_data['speed_pp_mean'], + yerr=label_data['speed_pp_std'], color=color, + marker='o', linestyle='-', label=label) + +# Add labels and title +ax1.set_xlabel('Context Length (tokens)') +ax1.set_ylabel('Prompt Processing Rate (t/s)') +plt.title('Prompt Processing Performance Comparison') +ax1.legend(loc='upper right') + +# Adjust layout and save +plt.tight_layout() +plt.savefig('performance_comparison_pp.png', bbox_inches='tight') +plt.close() + +# Create token generation plot +plt.figure(figsize=(10, 6)) +ax1 = plt.gca() +plt.grid() +ax1.set_xticks(x_ticks) + +# Plot each model's data +for label, color in zip(labels, colors): + label_data = df_grouped[df_grouped['label'] == label].sort_values('n_kv') + tg = ax1.errorbar(label_data['n_kv'], label_data['speed_tg_mean'], + yerr=label_data['speed_tg_std'], color=color, + marker='s', linestyle='-', label=label) + +# Add labels and title +ax1.set_xlabel('Context Length (n_kv)') +ax1.set_ylabel('Token Generation Rate (t/s)') +plt.title('Token Generation Performance Comparison') +ax1.legend(loc='upper right') + +# Adjust layout and save +plt.tight_layout() +plt.savefig('performance_comparison_tg.png', bbox_inches='tight') +plt.close() diff --git a/examples/sweep-bench/sweep-bench.cpp b/examples/sweep-bench/sweep-bench.cpp new file mode 100644 index 00000000000..41b00a71e73 --- /dev/null +++ b/examples/sweep-bench/sweep-bench.cpp @@ -0,0 +1,264 @@ +#include "common.h" +#include "arg.h" +#include "ggml.h" +#include "llama.h" +#include "common.h" +//#include "llama-vocab.h" +#include "log.h" + +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#ifndef NOMINMAX +# define NOMINMAX +#endif +#include +#endif + +#include +#include +#include +#include +#include + +static void print_usage(int, char ** argv) { + LOG("\nexample usage:\n"); + LOG("\n %s -m model.gguf -c 8192 -b 2048 -ub 512\n", argv[0]); + LOG("\n"); +} + +int main(int argc, char ** argv) { + + std::vector args; + args.reserve(argc); + args.push_back(argv[0]); + + bool sweep_bench_output_jsonl = false; + + for (int i = 1; i < argc; ++i) { + std::string arg{argv[1]}; + if (arg == "--output-format") { + bool invalid_arg = false; + if (i < argc-1) { + arg = argv[++i]; + if (arg == "jsonl") sweep_bench_output_jsonl = true; + else if (arg == "md") sweep_bench_output_jsonl = false; + else invalid_arg = true; + } else { + invalid_arg = true; + } + if (invalid_arg) { + LOG("Invalid arg"); return 1; + } + } else { + args.push_back(argv[i]); + } + } + + common_params params; + if (!common_params_parse(args.size(), args.data(), params, LLAMA_EXAMPLE_BENCH, print_usage)) { + return 1; + } + + common_init(); + + //gpt_params params; + + //if (!gpt_params_parse(argc, argv, params)) { + // print_usage(argc, argv); + // return 1; + //} + + // init LLM + + llama_backend_init(); + llama_numa_init(params.numa); + + // initialize the model + + //llama_model_params model_params = llama_model_params_from_gpt_params(params); + llama_model_params model_params = common_model_params_to_llama(params); + + //llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); + llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params); + + if (model == NULL) { + fprintf(stderr , "%s: error: unable to load model\n" , __func__); + return 1; + } + + //llama_context_params ctx_params = llama_context_params_from_gpt_params(params); + llama_context_params ctx_params = common_context_params_to_llama(params); + + //llama_context * ctx = llama_new_context_with_model(model, ctx_params); + llama_context * ctx = llama_init_from_model(model, ctx_params); + auto * mem = llama_get_memory(ctx); + + if (ctx == NULL) { + fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); + return 1; + } + + const unsigned int n_kv_max = llama_n_ctx(ctx); + + + auto vocab = llama_model_get_vocab(model); + auto n_vocab = llama_vocab_n_tokens(vocab); + auto bos = llama_vocab_bos(vocab); + + //const llama_vocab * vocab = llama_get_vocab(ctx); + //llama_token bos = llama_token_bos_impl(*vocab); + //llama_token eos = llama_token_eos_impl(*vocab); + + //const unsigned int n_vocab = llama_n_vocab(model); + + // decode in batches of ctx_params.n_batch tokens + auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch) { + for (int32_t i = 0; i < (int32_t) batch.n_tokens; i += n_batch) { + const int32_t n_tokens = std::min(n_batch, (int32_t) (batch.n_tokens - i)); + + llama_batch batch_view = { + n_tokens, + batch.token + i, + nullptr, + batch.pos + i, + batch.n_seq_id + i, + batch.seq_id + i, + batch.logits + i, + }; + + const int ret = llama_decode(ctx, batch_view); + if (ret != 0) { + LOG_INF("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret); + return false; + } + + llama_synchronize(ctx); + } + + return true; + }; + + const unsigned int pp = params.n_ubatch; + const unsigned int tg = params.n_ubatch / 4; + + if (!sweep_bench_output_jsonl) { + LOG_INF("\n"); + LOG_INF("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch); + LOG_INF("\n"); + LOG_INF("|%6s | %6s | %6s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s"); + LOG_INF("|%6s-|-%6s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "------", "--------", "--------", "--------", "--------"); + } + + llama_batch batch = llama_batch_init(n_kv_max, 0, 1); + + // warm up + { + common_batch_add(batch, bos, 0, { 0 }, false); + //llama_batch_add(batch, bos, 0, { 0 }, false); + + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { + LOG_INF("%s: llama_decode() failed\n", __func__); + return 1; + } + } + + // Adapted into mainline from original PR: https://github.com/ikawrakow/ik_llama.cpp/pull/375 + //if (params.batch_warmup) { + if (true) { + // clean up KV cache after generation + // llama_kv_self_clear(ctx); + llama_memory_clear(mem, true); + + + // prepare batch of pp size for prompt processing performance measurement + common_batch_clear(batch); + + for (unsigned int i = 0; i < (unsigned int)params.n_ubatch; ++i) { + common_batch_add(batch, std::rand() % n_vocab, i, { 0 }, false); + } + + if (!decode_helper(ctx, batch, ctx_params.n_ubatch)) { + LOG_INF("%s: llama_decode() failed\n", __func__); + return 1; + } + } + + common_batch_clear(batch); + //llama_batch_clear(batch); + //llama_kv_self_clear(ctx); + llama_memory_clear(mem, true); + + for (unsigned int n_kv = 0; n_kv < n_kv_max; n_kv += params.n_ubatch) { + // clean up KV cache before generation + //llama_kv_self_seq_rm(ctx, 0, n_kv, -1); + llama_memory_seq_rm(mem, 0, n_kv, -1); + + // first measure token generation performance at this context size + const auto t_tg_start = ggml_time_us(); + + for (unsigned int i = 0; i < tg; ++i) { + common_batch_clear(batch); + common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true); + //llama_batch_clear(batch); + //llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true); + + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { + LOG_INF("%s: llama_decode() failed\n", __func__); + return 1; + } + } + + const auto t_tg_end = ggml_time_us(); + + // clean up KV cache after generation + //llama_kv_self_seq_rm(ctx, 0, n_kv, -1); + llama_memory_seq_rm(mem, 0, n_kv, -1); + + // prepare batch of pp size for prompt processing performance measurement + common_batch_clear(batch); + //llama_batch_clear(batch); + + for (unsigned int i = 0; i < pp; ++i) { + common_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false); + //llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, false); + } + batch.logits[batch.n_tokens - 1] = true; + + // measure prompt processing performance + const auto t_pp_start = ggml_time_us(); + + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { + LOG_INF("%s: llama_decode() failed\n", __func__); + return 1; + } + + const auto t_pp_end = ggml_time_us(); + + // calculate and print metrics + const float t_pp = (t_pp_end - t_pp_start) / 1000000.0f; + const float t_tg = (t_tg_end - t_tg_start) / 1000000.0f; + + const float speed_pp = pp / t_pp; + const float speed_tg = tg / t_tg; + + if(sweep_bench_output_jsonl) { + LOG_INF( + "{\"n_kv_max\": %d, \"n_batch\": %d, \"n_ubatch\": %d, \"flash_attn\": %d, \"n_gpu_layers\": %d, \"n_threads\": %u, \"n_threads_batch\": %u, " + "\"pp\": %d, \"tg\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f }\n", + n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch, + pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg + ); + } else { + LOG_INF("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg); + } + } + + llama_batch_free(batch); + + llama_free(ctx); + llama_model_free(model); + + llama_backend_free(); + + return 0; +} diff --git a/ggml/src/ggml-cuda/CMakeLists.txt b/ggml/src/ggml-cuda/CMakeLists.txt index ea824965aae..85517f06c3e 100644 --- a/ggml/src/ggml-cuda/CMakeLists.txt +++ b/ggml/src/ggml-cuda/CMakeLists.txt @@ -15,6 +15,7 @@ if (CUDAToolkit_FOUND) # 80 == Ampere, asynchronous data loading, faster tensor core instructions # 86 == RTX 3000, needs CUDA v11.1 # 89 == RTX 4000, needs CUDA v11.8 + # 120 == RTX 5000, needs CUDA v12.8 # # XX-virtual == compile CUDA code as PTX, do JIT compilation to binary code on first run # XX-real == compile CUDA code as device code for this specific architecture @@ -23,13 +24,33 @@ if (CUDAToolkit_FOUND) # The default behavior for a non-native is to build virtual architectures as needed to cover all features needed # for best performance and to also build real architectures for the most commonly used GPUs. if (GGML_NATIVE AND CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.6" AND CMAKE_VERSION VERSION_GREATER_EQUAL "3.24") + # Use the GPUs available on this system set(CMAKE_CUDA_ARCHITECTURES "native") else() - if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.8") - set(CMAKE_CUDA_ARCHITECTURES "50-virtual;61-virtual;70-virtual;75-virtual;80-virtual;86-real;89-real") + set(ARCH_LIST "") + + # Base architectures - depending on feature flags + if (GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16) + # FP16 support (Pascal and newer) + list(APPEND ARCH_LIST "60-virtual") else() - set(CMAKE_CUDA_ARCHITECTURES "50-virtual;61-virtual;70-virtual;75-virtual;80-virtual;86-real") + # Maxwell and newer + list(APPEND ARCH_LIST "50-virtual") endif() + + # Always included after base architecture assuming CUDA toolkit version is 11.1 or higher + list(APPEND ARCH_LIST "61-virtual" "70-virtual" "75-virtual" "80-virtual" "86-real") + + # Version-dependent architectures for newer GPUs + if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.8") + list(APPEND ARCH_LIST "89-real") + endif() + + if (CUDAToolkit_VERSION VERSION_GREATER_EQUAL "12.8") + list(APPEND ARCH_LIST "120-real") + endif() + + set(CMAKE_CUDA_ARCHITECTURES ${ARCH_LIST}) endif() endif() message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}") diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index f71c40f8e3f..45fe91d224d 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -478,6 +478,17 @@ llama_model_loader::llama_model_loader( trace = atoi(getenv("LLAMA_TRACE")); } + #ifdef _WIN32 + // Cap at MSVC's hard limit of 8192 - https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/setmaxstdio?view=msvc-160 + #define _GGML_STDIO_TARGET 2048 + int _setmaxstdio_ret = _setmaxstdio(_GGML_STDIO_TARGET); + if (_setmaxstdio_ret == -1) { + LLAMA_LOG_INFO("%s: failed to set max stdio to %d. (setmaxstdio returned -1)\n", __func__, _GGML_STDIO_TARGET); + } else { + LLAMA_LOG_INFO("%s: max stdio successfully set to %d\n", __func__, _setmaxstdio_ret); + } + #endif // _WIN32 + if (param_overrides_p != nullptr) { for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) { kv_overrides.insert({std::string(p->key), *p});