From 583c81c91cdb679c8bf7dd08ce70de655008c0cb Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 05:11:55 +0800 Subject: [PATCH 01/13] align GEMM dispatch --- CMakeLists.txt | 19 +++++--- ggml-sycl.cpp | 123 +++++++++++++++++++++++++------------------------ 2 files changed, 76 insertions(+), 66 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c5add8239c2..a827eda96c6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -96,8 +96,8 @@ option(LLAMA_LLAMAFILE "llama: use llamafile SGEMM" set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor") option(LLAMA_CUDA "llama: use CUDA" OFF) option(LLAMA_CUBLAS "llama: use CUDA (deprecated, use LLAMA_CUDA)" OFF) -option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF) -option(LLAMA_CUDA_FORCE_MMQ "llama: use mmq kernels instead of cuBLAS" OFF) +option(LLAMA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF) +option(LLAMA_FORCE_MMQ "llama: use mmq kernels instead of cuBLAS" OFF) set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels") set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels") option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some calculations" OFF) @@ -405,10 +405,10 @@ if (LLAMA_CUDA) add_compile_definitions(GGML_USE_CUDA) add_compile_definitions(GGML_CUDA_USE_GRAPHS) - if (LLAMA_CUDA_FORCE_DMMV) + if (LLAMA_FORCE_DMMV) add_compile_definitions(GGML_CUDA_FORCE_DMMV) endif() - if (LLAMA_CUDA_FORCE_MMQ) + if (LLAMA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif() if (LLAMA_CUDA_NO_VMM) @@ -578,11 +578,11 @@ if (LLAMA_HIPBLAS) add_compile_definitions(GGML_HIP_UMA) endif() - if (LLAMA_CUDA_FORCE_DMMV) + if (LLAMA_FORCE_DMMV) add_compile_definitions(GGML_CUDA_FORCE_DMMV) endif() - if (LLAMA_CUDA_FORCE_MMQ) + if (LLAMA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif() @@ -628,6 +628,13 @@ if (LLAMA_SYCL) add_compile_definitions(GGML_SYCL_F16) endif() + if (LLAMA_SYCL_FORCE_DMMV) + add_compile_definitions(GGML_SYCL_FORCE_DMMV) + endif() + if (LLAMA_SYCL_FORCE_MMQ) + add_compile_definitions(GGML_SYCL_FORCE_MMQ) + endif() + add_compile_options(-I./) #include DPCT add_compile_options(-I/${SYCL_INCLUDE_DIR}) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 8839f775d5b..623202d9aa9 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -2971,7 +2971,7 @@ static int g_work_group_size = 0; // typedef sycl::half ggml_fp16_t; #define __SYCL_ARCH__ DPCT_COMPATIBILITY_TEMP -#define VER_4VEC 610 //todo for hardward optimize. +#define VER_4VEC 130 //todo for hardward optimize. #define VER_GEN9 700 //todo for hardward optimize. #define VER_GEN12 1000000 //todo for hardward optimize. #define VER_GEN13 (VER_GEN12 + 1030) //todo for hardward optimize. @@ -2984,7 +2984,7 @@ static int g_work_group_size = 0; #define SYCL_USE_XMX // max batch size to use MMQ kernels when tensor cores are available -#define XMX_MAX_BATCH_SIZE 32 +#define MMQ_MAX_BATCH_SIZE 32 #if defined(_MSC_VER) @@ -15193,6 +15193,25 @@ catch (sycl::exception const &exc) { std::exit(1); } +bool ggml_sycl_supports_mmq(enum ggml_type type) { + // TODO: accuracy issues in MMQ + return false; + // switch (type) { + // case GGML_TYPE_Q4_0: + // case GGML_TYPE_Q4_1: + // case GGML_TYPE_Q5_0: + // case GGML_TYPE_Q5_1: + // case GGML_TYPE_Q8_0: + // case GGML_TYPE_Q2_K: + // case GGML_TYPE_Q3_K: + // case GGML_TYPE_Q4_K: + // case GGML_TYPE_Q5_K: + // case GGML_TYPE_Q6_K: + // return true; + // default: + // return false; + // } +} static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { const bool all_on_device = @@ -15209,76 +15228,60 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } } +#if !defined(GGML_SYCL_FORCE_MMQ) + #define SYCL_USE_XMX +#endif + #ifdef SYCL_USE_XMX - const bool use_xmx = true; + bool use_xmx = true; #else - const bool use_xmx = false; + bool use_xmx = false; #endif - // debug helpers - //printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]); - //printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]); - //printf("src1: %8d %8d %8d %8d\n", src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3]); - //printf(" %8d %8d %8d %8d\n", src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3]); - //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); - //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); + // check data types and tensor shapes for custom matrix multiplication kernels: + bool use_dequantize_mul_mat_vec = (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) + && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 + && src0->ne[0] % GGML_SYCL_DMMV_X == 0 && src1->ne[1] == 1; + + bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) + && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 + && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE; + + bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type) + && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; + + // fp16 performance always better on gen12+ + const bool fp16_performance_good = true; - if (!split && all_on_device && !use_xmx && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { + // mmvq and mmq need the __dp4a instruction which is available for gen12+ + use_mul_mat_vec_q = use_mul_mat_vec_q; // Check dp4a + // Workaround in https://github.com/ggerganov/llama.cpp/commit/95f84d5ce8b449a9b16009434aca800df504a02e + use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS); +#ifdef SYCL_USE_XMX + use_mul_mat_q = use_mul_mat_q && (!fp16_performance_good || src1->ne[1] <= MMQ_MAX_BATCH_SIZE); +#endif // SYCL_USE_XMX + +#infdef GGML_SYCL_FORCE_DMMV + use_dequantize_mul_mat_vec = use_dequantize_mul_mat_vec && !use_mul_mat_vec_q; +#endif // GGML_SYCL_FORCE_DMMV + + if (!split && !fp16_performance_good && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { // KQ single-batch - // GGML_SYCL_DEBUG("ggml_sycl_mul_mat_vec_p021\n"); ggml_sycl_mul_mat_vec_p021(src0, src1, dst); - } else if (!split && all_on_device && !use_xmx && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { + } else if (!split && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch - // GGML_SYCL_DEBUG("ggml_sycl_mul_mat_vec_nc\n"); ggml_sycl_mul_mat_vec_nc(src0, src1, dst); - } else if (!split && all_on_device && use_xmx && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { + } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || fp16_performance_good) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { // KQ + KQV multi-batch - // GGML_SYCL_DEBUG("ggml_sycl_mul_mat_batched_sycl\n"); ggml_sycl_mul_mat_batched_sycl(src0, src1, dst); - } else if (src0->type == GGML_TYPE_F32) { - // GGML_SYCL_DEBUG("ggml_sycl_op_mul_mat\n"); - ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_sycl, false); - } else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) { - // GGML_SYCL_DEBUG("ggml_is_quantized or GGML_TYPE_F16\n"); - if (src1->ne[1] == 1 && src0->ne[0] % GGML_SYCL_DMMV_X == 0) { -#ifdef GGML_SYCL_FORCE_DMMV - const bool use_mul_mat_vec_q = false; -#else - bool use_mul_mat_vec_q = min_compute_capability >= VER_4VEC && ggml_is_quantized(src0->type); - use_mul_mat_vec_q = use_mul_mat_vec_q || - (src0->type == GGML_TYPE_IQ2_XXS) || (src0->type == GGML_TYPE_IQ2_XS) || (src0->type == GGML_TYPE_IQ2_S) || - (src0->type == GGML_TYPE_IQ3_XXS) || (src0->type == GGML_TYPE_IQ3_S) || - (src0->type == GGML_TYPE_IQ4_NL) || (src0->type == GGML_TYPE_IQ4_XS) || - (src0->type == GGML_TYPE_IQ1_S) || (src0->type == GGML_TYPE_IQ1_M); - - -#endif // GGML_SYCL_FORCE_DMMV - - if (use_mul_mat_vec_q) { - // GGML_SYCL_DEBUG("ggml_sycl_mul_mat ggml_sycl_op_mul_mat_vec_q path\n"); - ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_vec_q, true); - } else { - // GGML_SYCL_DEBUG("ggml_sycl_mul_mat ggml_sycl_op_dequantize_mul_mat_vec path\n"); - ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_dequantize_mul_mat_vec, false); - } - } else { - bool use_mul_mat_q = min_compute_capability >= VER_4VEC && ggml_is_quantized(src0->type); - use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS); - - if (use_xmx && min_compute_capability >= VER_GEN9 && src1->ne[1] > XMX_MAX_BATCH_SIZE) { - use_mul_mat_q = false; - } - - if (use_mul_mat_q) { - // GGML_SYCL_DEBUG("ggml_sycl_mul_mat ggml_sycl_op_mul_mat_q path\n"); - ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_q, true); - } else { - // GGML_SYCL_DEBUG("ggml_sycl_mul_mat ggml_sycl_op_mul_mat_sycl path\n"); - ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_sycl, false); - } - } + } else if (use_dequantize_mul_mat_vec) { + ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_dequantize_mul_mat_vec, false); + } else if (use_mul_mat_vec_q) { + ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_vec_q, true); + } else if (use_mul_mat_q) { + ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_q, true); } else { - GGML_ASSERT(false); + ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_sycl, false); } } From abe594a0589689b0e3e65a0bff10677652782411 Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 06:39:21 +0800 Subject: [PATCH 02/13] fix typo --- CMakeLists.txt | 8 ++++---- ggml-sycl.cpp | 11 +++-------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a827eda96c6..4c585c2d7ad 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -96,8 +96,8 @@ option(LLAMA_LLAMAFILE "llama: use llamafile SGEMM" set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor") option(LLAMA_CUDA "llama: use CUDA" OFF) option(LLAMA_CUBLAS "llama: use CUDA (deprecated, use LLAMA_CUDA)" OFF) -option(LLAMA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF) -option(LLAMA_FORCE_MMQ "llama: use mmq kernels instead of cuBLAS" OFF) +option(LLAMA_FORCE_DMMV "llama: use dmmv instead of mmvq kernels on GPU" OFF) +option(LLAMA_FORCE_MMQ "llama: use mmq kernels instead of Math Lib" OFF) set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels") set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels") option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some calculations" OFF) @@ -628,10 +628,10 @@ if (LLAMA_SYCL) add_compile_definitions(GGML_SYCL_F16) endif() - if (LLAMA_SYCL_FORCE_DMMV) + if (LLAMA_FORCE_DMMV) add_compile_definitions(GGML_SYCL_FORCE_DMMV) endif() - if (LLAMA_SYCL_FORCE_MMQ) + if (LLAMA_FORCE_MMQ) add_compile_definitions(GGML_SYCL_FORCE_MMQ) endif() diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 623202d9aa9..47dbfcde333 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -2978,10 +2978,9 @@ static int g_work_group_size = 0; #define GGML_SYCL_MAX_NODES 8192 //TODO: adapt to hardwares - -//define for XMX in Intel GPU -//TODO: currently, it's not used for XMX really. -#define SYCL_USE_XMX +#if !defined(GGML_SYCL_FORCE_MMQ) + #define SYCL_USE_XMX +#endif // max batch size to use MMQ kernels when tensor cores are available #define MMQ_MAX_BATCH_SIZE 32 @@ -15228,10 +15227,6 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } } -#if !defined(GGML_SYCL_FORCE_MMQ) - #define SYCL_USE_XMX -#endif - #ifdef SYCL_USE_XMX bool use_xmx = true; #else From 19dc47c064ce8b14bfd58e631cf3a73fe0d40d33 Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 06:41:11 +0800 Subject: [PATCH 03/13] remove useless use_xmx --- ggml-sycl.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 47dbfcde333..605c7f3c43c 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15227,12 +15227,6 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } } -#ifdef SYCL_USE_XMX - bool use_xmx = true; -#else - bool use_xmx = false; -#endif - // check data types and tensor shapes for custom matrix multiplication kernels: bool use_dequantize_mul_mat_vec = (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 From bfed2838acbb5ffdded12c975863622b64b1d842 Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 06:52:48 +0800 Subject: [PATCH 04/13] update readme --- Makefile | 2 +- README.md | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 5caf31cdf37..2a25ebd0be5 100644 --- a/Makefile +++ b/Makefile @@ -457,7 +457,7 @@ endif # CUDA_DOCKER_ARCH ifdef LLAMA_CUDA_FORCE_DMMV MK_NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV endif # LLAMA_CUDA_FORCE_DMMV -ifdef LLAMA_CUDA_FORCE_MMQ +ifdef LLAMA_FORCE_MMQ MK_NVCCFLAGS += -DGGML_CUDA_FORCE_MMQ endif # LLAMA_CUDA_FORCE_MMQ ifdef LLAMA_CUDA_DMMV_X diff --git a/README.md b/README.md index 15519c97f43..4ea1d7e2a36 100644 --- a/README.md +++ b/README.md @@ -475,9 +475,10 @@ Building the program with BLAS support may lead to some performance improvements | Option | Legal values | Default | Description | |--------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| - | LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. | + | LLAMA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. | | LLAMA_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. | - | LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. | + | LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. + | LLAMA_FORCE_MMQ | Boolean | false | Force the use of dequantization + matrix multiplication kernels instead of leveraging Math libraries. | | | LLAMA_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. | | LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. | | LLAMA_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. | From 4bf6133b0e19ec3a89969b28b08fb9e890b46b81 Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 14:06:45 +0800 Subject: [PATCH 05/13] typo --- ggml-sycl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 605c7f3c43c..44a4d7078b9 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15250,7 +15250,7 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 use_mul_mat_q = use_mul_mat_q && (!fp16_performance_good || src1->ne[1] <= MMQ_MAX_BATCH_SIZE); #endif // SYCL_USE_XMX -#infdef GGML_SYCL_FORCE_DMMV +#ifndef GGML_SYCL_FORCE_DMMV use_dequantize_mul_mat_vec = use_dequantize_mul_mat_vec && !use_mul_mat_vec_q; #endif // GGML_SYCL_FORCE_DMMV From c7ed1d8ddc6964eda8df3c2c85ee6b89addd737e Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 16:03:41 +0800 Subject: [PATCH 06/13] revert FORCE_DMMV both in cuda and sycl --- CMakeLists.txt | 17 +++++++---------- Makefile | 2 +- README.md | 4 ++-- ggml-sycl.cpp | 26 ++++++++++++++++++++------ 4 files changed, 30 insertions(+), 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4c585c2d7ad..fbbc38644ef 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -96,8 +96,8 @@ option(LLAMA_LLAMAFILE "llama: use llamafile SGEMM" set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor") option(LLAMA_CUDA "llama: use CUDA" OFF) option(LLAMA_CUBLAS "llama: use CUDA (deprecated, use LLAMA_CUDA)" OFF) -option(LLAMA_FORCE_DMMV "llama: use dmmv instead of mmvq kernels on GPU" OFF) -option(LLAMA_FORCE_MMQ "llama: use mmq kernels instead of Math Lib" OFF) +option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF) +option(LLAMA_CUDA_FORCE_MMQ "llama: use mmq kernels instead of cuBLAS" OFF) set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels") set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels") option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some calculations" OFF) @@ -405,10 +405,10 @@ if (LLAMA_CUDA) add_compile_definitions(GGML_USE_CUDA) add_compile_definitions(GGML_CUDA_USE_GRAPHS) - if (LLAMA_FORCE_DMMV) + if (LLAMA_CUDA_FORCE_DMMV) add_compile_definitions(GGML_CUDA_FORCE_DMMV) endif() - if (LLAMA_FORCE_MMQ) + if (LLAMA_CUDA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif() if (LLAMA_CUDA_NO_VMM) @@ -578,11 +578,11 @@ if (LLAMA_HIPBLAS) add_compile_definitions(GGML_HIP_UMA) endif() - if (LLAMA_FORCE_DMMV) + if (LLAMA_CUDA_FORCE_DMMV) add_compile_definitions(GGML_CUDA_FORCE_DMMV) endif() - if (LLAMA_FORCE_MMQ) + if (LLAMA_CUDA_FORCE_MMQ) add_compile_definitions(GGML_CUDA_FORCE_MMQ) endif() @@ -628,10 +628,7 @@ if (LLAMA_SYCL) add_compile_definitions(GGML_SYCL_F16) endif() - if (LLAMA_FORCE_DMMV) - add_compile_definitions(GGML_SYCL_FORCE_DMMV) - endif() - if (LLAMA_FORCE_MMQ) + if (LLAMA_CUDA_FORCE_MMQ) add_compile_definitions(GGML_SYCL_FORCE_MMQ) endif() diff --git a/Makefile b/Makefile index 2a25ebd0be5..5caf31cdf37 100644 --- a/Makefile +++ b/Makefile @@ -457,7 +457,7 @@ endif # CUDA_DOCKER_ARCH ifdef LLAMA_CUDA_FORCE_DMMV MK_NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV endif # LLAMA_CUDA_FORCE_DMMV -ifdef LLAMA_FORCE_MMQ +ifdef LLAMA_CUDA_FORCE_MMQ MK_NVCCFLAGS += -DGGML_CUDA_FORCE_MMQ endif # LLAMA_CUDA_FORCE_MMQ ifdef LLAMA_CUDA_DMMV_X diff --git a/README.md b/README.md index 4ea1d7e2a36..dd92d4452ec 100644 --- a/README.md +++ b/README.md @@ -475,10 +475,10 @@ Building the program with BLAS support may lead to some performance improvements | Option | Legal values | Default | Description | |--------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| - | LLAMA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. | + | LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. | | LLAMA_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. | | LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. - | LLAMA_FORCE_MMQ | Boolean | false | Force the use of dequantization + matrix multiplication kernels instead of leveraging Math libraries. | | + | LLAMA_CUDA_FORCE_MMQ | Boolean | false | Force the use of dequantization + matrix multiplication kernels instead of leveraging Math libraries. | | | LLAMA_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. | | LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. | | LLAMA_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. | diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 44a4d7078b9..390652b3ea0 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15212,6 +15212,25 @@ bool ggml_sycl_supports_mmq(enum ggml_type type) { // } } +bool ggml_sycl_supports_dmmv(enum ggml_type type) { + switch (type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_F16: + return true; + default: + return false; + } +} + static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { const bool all_on_device = (src0->backend == GGML_BACKEND_TYPE_GPU || src0->backend == GGML_BACKEND_TYPE_GPU_SPLIT) && @@ -15228,7 +15247,7 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } // check data types and tensor shapes for custom matrix multiplication kernels: - bool use_dequantize_mul_mat_vec = (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) + bool use_dequantize_mul_mat_vec = ggml_sycl_supports_dmmv(src0->type) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && src0->ne[0] % GGML_SYCL_DMMV_X == 0 && src1->ne[1] == 1; @@ -15243,17 +15262,12 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 const bool fp16_performance_good = true; // mmvq and mmq need the __dp4a instruction which is available for gen12+ - use_mul_mat_vec_q = use_mul_mat_vec_q; // Check dp4a // Workaround in https://github.com/ggerganov/llama.cpp/commit/95f84d5ce8b449a9b16009434aca800df504a02e use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS); #ifdef SYCL_USE_XMX use_mul_mat_q = use_mul_mat_q && (!fp16_performance_good || src1->ne[1] <= MMQ_MAX_BATCH_SIZE); #endif // SYCL_USE_XMX -#ifndef GGML_SYCL_FORCE_DMMV - use_dequantize_mul_mat_vec = use_dequantize_mul_mat_vec && !use_mul_mat_vec_q; -#endif // GGML_SYCL_FORCE_DMMV - if (!split && !fp16_performance_good && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { // KQ single-batch ggml_sycl_mul_mat_vec_p021(src0, src1, dst); From 8eb0549fd0b67473aa8bf23c3da7a1618da68117 Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 16:05:57 +0800 Subject: [PATCH 07/13] revert typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dd92d4452ec..1cab7f19d59 100644 --- a/README.md +++ b/README.md @@ -477,7 +477,7 @@ Building the program with BLAS support may lead to some performance improvements |--------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. | | LLAMA_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. | - | LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. + | LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. | | LLAMA_CUDA_FORCE_MMQ | Boolean | false | Force the use of dequantization + matrix multiplication kernels instead of leveraging Math libraries. | | | LLAMA_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. | | LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. | From d0e9e0e14dda592c48ff66008e273d454c9ee335 Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 16:20:00 +0800 Subject: [PATCH 08/13] remove fp16 replacing fp32 --- ggml-sycl.cpp | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 390652b3ea0..855015978fc 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15258,23 +15258,20 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; - // fp16 performance always better on gen12+ - const bool fp16_performance_good = true; - // mmvq and mmq need the __dp4a instruction which is available for gen12+ // Workaround in https://github.com/ggerganov/llama.cpp/commit/95f84d5ce8b449a9b16009434aca800df504a02e use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS); #ifdef SYCL_USE_XMX - use_mul_mat_q = use_mul_mat_q && (!fp16_performance_good || src1->ne[1] <= MMQ_MAX_BATCH_SIZE); + use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE); #endif // SYCL_USE_XMX - if (!split && !fp16_performance_good && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { + if (!split && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { // KQ single-batch ggml_sycl_mul_mat_vec_p021(src0, src1, dst); - } else if (!split && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { + } else if (!split && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch ggml_sycl_mul_mat_vec_nc(src0, src1, dst); - } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || fp16_performance_good) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { + } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { // KQ + KQV multi-batch ggml_sycl_mul_mat_batched_sycl(src0, src1, dst); } else if (use_dequantize_mul_mat_vec) { From 6a8432bf436d5ab3a5214dced294c789ffa543a2 Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 20:59:44 +0800 Subject: [PATCH 09/13] Update ggml-sycl.cpp Co-authored-by: Neo Zhang Jianyu --- ggml-sycl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 855015978fc..d1ebf26e5d5 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15251,7 +15251,7 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && src0->ne[0] % GGML_SYCL_DMMV_X == 0 && src1->ne[1] == 1; - bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) + bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE; From 1723c147c2c1d96468014854add294f6548f9119 Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 20:59:54 +0800 Subject: [PATCH 10/13] Update ggml-sycl.cpp Co-authored-by: Neo Zhang Jianyu --- ggml-sycl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index d1ebf26e5d5..2f105e3003b 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15255,7 +15255,7 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE; - bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type) + bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; // mmvq and mmq need the __dp4a instruction which is available for gen12+ From fc08e1a72926a8f78dd97fb647a0ac4fed34aded Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 21:00:01 +0800 Subject: [PATCH 11/13] Update ggml-sycl.cpp Co-authored-by: Neo Zhang Jianyu --- ggml-sycl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 2f105e3003b..838ada399be 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15262,7 +15262,7 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 // Workaround in https://github.com/ggerganov/llama.cpp/commit/95f84d5ce8b449a9b16009434aca800df504a02e use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS); #ifdef SYCL_USE_XMX - use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE); + use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE); #endif // SYCL_USE_XMX if (!split && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { From d63f6b66d4eec428718c4a60cca26552e3c0747b Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 21:00:30 +0800 Subject: [PATCH 12/13] Update ggml-sycl.cpp Co-authored-by: Neo Zhang Jianyu --- ggml-sycl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 838ada399be..8f7d9d0f137 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15260,7 +15260,7 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 // mmvq and mmq need the __dp4a instruction which is available for gen12+ // Workaround in https://github.com/ggerganov/llama.cpp/commit/95f84d5ce8b449a9b16009434aca800df504a02e - use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS); + use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS); #ifdef SYCL_USE_XMX use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE); #endif // SYCL_USE_XMX From 732c3c977a05e8165dd1220ad310f66e31213d47 Mon Sep 17 00:00:00 2001 From: "Meng, Hengyu" Date: Tue, 28 May 2024 21:02:41 +0800 Subject: [PATCH 13/13] rm unused --- ggml-sycl.cpp | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 8f7d9d0f137..2e1e9a877ea 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15192,24 +15192,9 @@ catch (sycl::exception const &exc) { std::exit(1); } -bool ggml_sycl_supports_mmq(enum ggml_type type) { +inline bool ggml_sycl_supports_mmq(enum ggml_type type) { // TODO: accuracy issues in MMQ return false; - // switch (type) { - // case GGML_TYPE_Q4_0: - // case GGML_TYPE_Q4_1: - // case GGML_TYPE_Q5_0: - // case GGML_TYPE_Q5_1: - // case GGML_TYPE_Q8_0: - // case GGML_TYPE_Q2_K: - // case GGML_TYPE_Q3_K: - // case GGML_TYPE_Q4_K: - // case GGML_TYPE_Q5_K: - // case GGML_TYPE_Q6_K: - // return true; - // default: - // return false; - // } } bool ggml_sycl_supports_dmmv(enum ggml_type type) {