diff --git a/bitsandbytes/optim/adam.py b/bitsandbytes/optim/adam.py index 1a8800843..22a217c3b 100644 --- a/bitsandbytes/optim/adam.py +++ b/bitsandbytes/optim/adam.py @@ -3,7 +3,6 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. - from bitsandbytes.optim.optimizer import Optimizer2State @@ -100,8 +99,10 @@ def __init__( The weight decay value for the optimizer. amsgrad (`bool`, defaults to `False`): Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead. + Note: This parameter is not supported in Adam8bit and must be False. optim_bits (`int`, defaults to 32): The number of bits of the optimizer state. + Note: This parameter is not used in Adam8bit as it always uses 8-bit optimization. args (`object`, defaults to `None`): An object with additional arguments. min_8bit_size (`int`, defaults to 4096): @@ -113,6 +114,15 @@ def __init__( is_paged (`bool`, defaults to `False`): Whether the optimizer is a paged optimizer or not. """ + # Validate unsupported parameters + if amsgrad: + raise ValueError("Adam8bit does not support amsgrad=True") + + if optim_bits != 32: + # We allow the default value of 32 to maintain compatibility with the function signature, + # but any other value is invalid since Adam8bit always uses 8-bit optimization + raise ValueError("Adam8bit only supports optim_bits=32 (default value for compatibility)") + super().__init__( "adam", params, @@ -120,7 +130,7 @@ def __init__( betas, eps, weight_decay, - 8, + 8, # Hardcoded to 8 bits args, min_8bit_size, percentile_clipping, @@ -283,8 +293,10 @@ def __init__( The weight decay value for the optimizer. amsgrad (`bool`, defaults to `False`): Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead. + Note: This parameter is not supported in PagedAdam8bit and must be False. optim_bits (`int`, defaults to 32): The number of bits of the optimizer state. + Note: This parameter is not used in PagedAdam8bit as it always uses 8-bit optimization. args (`object`, defaults to `None`): An object with additional arguments. min_8bit_size (`int`, defaults to 4096): @@ -296,6 +308,15 @@ def __init__( is_paged (`bool`, defaults to `False`): Whether the optimizer is a paged optimizer or not. """ + # Validate unsupported parameters + if amsgrad: + raise ValueError("PagedAdam8bit does not support amsgrad=True") + + if optim_bits != 32: + # We allow the default value of 32 to maintain compatibility with the function signature, + # but any other value is invalid since PagedAdam8bit always uses 8-bit optimization + raise ValueError("PagedAdam8bit only supports optim_bits=32 (default value for compatibility)") + super().__init__( "adam", params, @@ -303,7 +324,7 @@ def __init__( betas, eps, weight_decay, - 8, + 8, # Hardcoded to 8 bits args, min_8bit_size, percentile_clipping, diff --git a/bitsandbytes/optim/adamw.py b/bitsandbytes/optim/adamw.py index 4bf3f6436..a32394bd5 100644 --- a/bitsandbytes/optim/adamw.py +++ b/bitsandbytes/optim/adamw.py @@ -2,6 +2,7 @@ # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. + from bitsandbytes.optim.optimizer import Optimizer2State @@ -98,8 +99,10 @@ def __init__( The weight decay value for the optimizer. amsgrad (`bool`, defaults to `False`): Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead. + Note: This parameter is not supported in AdamW8bit and must be False. optim_bits (`int`, defaults to 32): The number of bits of the optimizer state. + Note: This parameter is not used in AdamW8bit as it always uses 8-bit optimization. args (`object`, defaults to `None`): An object with additional arguments. min_8bit_size (`int`, defaults to 4096): @@ -111,6 +114,15 @@ def __init__( is_paged (`bool`, defaults to `False`): Whether the optimizer is a paged optimizer or not. """ + # Validate unsupported parameters + if amsgrad: + raise ValueError("AdamW8bit does not support amsgrad=True") + + if optim_bits != 32: + # We allow the default value of 32 to maintain compatibility with the function signature, + # but any other value is invalid since AdamW8bit always uses 8-bit optimization + raise ValueError("AdamW8bit only supports optim_bits=32 (default value for compatibility)") + super().__init__( "adam", params, @@ -118,7 +130,7 @@ def __init__( betas, eps, weight_decay, - 8, + 8, # Hardcoded to 8 bits args, min_8bit_size, percentile_clipping, @@ -279,8 +291,10 @@ def __init__( The weight decay value for the optimizer. amsgrad (`bool`, defaults to `False`): Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead. + Note: This parameter is not supported in PagedAdamW8bit and must be False. optim_bits (`int`, defaults to 32): The number of bits of the optimizer state. + Note: This parameter is not used in PagedAdamW8bit as it always uses 8-bit optimization. args (`object`, defaults to `None`): An object with additional arguments. min_8bit_size (`int`, defaults to 4096): @@ -292,6 +306,15 @@ def __init__( is_paged (`bool`, defaults to `False`): Whether the optimizer is a paged optimizer or not. """ + # Validate unsupported parameters + if amsgrad: + raise ValueError("PagedAdamW8bit does not support amsgrad=True") + + if optim_bits != 32: + # We allow the default value of 32 to maintain compatibility with the function signature, + # but any other value is invalid since PagedAdamW8bit always uses 8-bit optimization + raise ValueError("PagedAdamW8bit only supports optim_bits=32 (default value for compatibility)") + super().__init__( "adam", params, @@ -299,7 +322,7 @@ def __init__( betas, eps, weight_decay, - 8, + 8, # Hardcoded to 8 bits args, min_8bit_size, percentile_clipping,