From aad358f64707fefcf865fef2cc279cd48bc559c7 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Tue, 17 Jun 2025 15:16:01 +0000 Subject: [PATCH] fix doc Signed-off-by: jiqing-feng --- bitsandbytes/backends/cpu/ops.py | 5 ----- bitsandbytes/backends/xpu/ops.py | 7 ------- docs/source/installation.mdx | 20 ++++++-------------- 3 files changed, 6 insertions(+), 26 deletions(-) diff --git a/bitsandbytes/backends/cpu/ops.py b/bitsandbytes/backends/cpu/ops.py index d1548aa1d..b715b1d00 100644 --- a/bitsandbytes/backends/cpu/ops.py +++ b/bitsandbytes/backends/cpu/ops.py @@ -1,6 +1,5 @@ from collections.abc import Sequence import ctypes as ct -import warnings import torch @@ -119,7 +118,3 @@ def _( shape, dtype, ) -else: - warnings.warn( - "You can install intel_extension_for_pytorch to get better performance on NF4 if you are using Intel CPUs." - ) diff --git a/bitsandbytes/backends/xpu/ops.py b/bitsandbytes/backends/xpu/ops.py index 1bee4a001..740a6dd1b 100644 --- a/bitsandbytes/backends/xpu/ops.py +++ b/bitsandbytes/backends/xpu/ops.py @@ -57,13 +57,6 @@ def _dequantize_4bit_impl( def _dequantize_blockwise_impl( A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype, out: torch.Tensor ) -> None: - # torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128, 64]) - # torch._check(A.dtype == torch.uint8, lambda: f"A must be uint8, got {A.dtype}") - # torch._check( - # dtype in [torch.float16, torch.bfloat16, torch.float32], - # lambda: f"Blockwise dequantization only supports 16bit/32bit floating types, got {dtype}", - # ) - args = ( get_ptr(code), get_ptr(A), diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx index e61ce4655..9b3449870 100644 --- a/docs/source/installation.mdx +++ b/docs/source/installation.mdx @@ -237,24 +237,16 @@ pip install -e . # `-e` for "editable" install, when developing BNB (otherwise #### Intel CPU + XPU - -If you are using Intel CPU on Linux or Intel XPU on Linux/Windows, please follow the [instruction](https://pytorch-extension.intel.com/) or the following command to install intel_extension_for_pytorch so you can get better performance. - -CPU: `pip install intel_extension_for_pytorch` -XPU: `pip install intel_extension_for_pytorch --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/` - -Install bitsandbytes: -CPU: Need to build CPU C++ codes +CPU needs to build CPU C++ codes, while xpu needs to build sycl codes. +Run `export bnb_device=xpu` if you are using xpu, run `export bnb_device=cpu` if you are using cpu. ``` git clone https://github.com/bitsandbytes-foundation/bitsandbytes.git && cd bitsandbytes/ -cmake -DCOMPUTE_BACKEND=cpu -S . +cmake -DCOMPUTE_BACKEND=$bnb_device -S . make -pip install . -``` -XPU: -``` -pip install git+https://github.com/bitsandbytes-foundation/bitsandbytes.git +pip install -e . ``` +Note: You can run `pip install intel_extension_for_pytorch to get better performance on CPU` +