From cafb45ef9db496b1667d3c88f53a3002ebd562eb Mon Sep 17 00:00:00 2001 From: Nicholas Broad Date: Sat, 3 Aug 2024 00:28:18 -0700 Subject: [PATCH 1/2] link for optimizer names Add a note and link to where the user can find more optimizer names easily because there are many more optimizers than are mentioned in the docstring. --- src/transformers/training_args.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 638875bb536d..fe6a6cd638ec 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -611,8 +611,9 @@ class TrainingArguments: The options should be separated by whitespaces. optim (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch"`): - The optimizer to use: adamw_hf, adamw_torch, adamw_torch_fused, adamw_apex_fused, adamw_anyprecision or - adafactor. + The optimizer to use, such as "adamw_hf", "adamw_torch", "adamw_torch_fused", "adamw_apex_fused", "adamw_anyprecision", + "adafactor". See `OptimizerNames` in [training_args.py](https://github.com/huggingface/transformers/blob/main/src/transformers/training_args.py) + for a full list of optimizers. optim_args (`str`, *optional*): Optional arguments that are supplied to AnyPrecisionAdamW. group_by_length (`bool`, *optional*, defaults to `False`): From 867970543817f814e3976af9a63c254691f982c4 Mon Sep 17 00:00:00 2001 From: Nicholas Broad Date: Tue, 6 Aug 2024 17:41:54 -0700 Subject: [PATCH 2/2] make fixup --- src/transformers/training_args.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index fe6a6cd638ec..ec8f575b6c3e 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -611,8 +611,8 @@ class TrainingArguments: The options should be separated by whitespaces. optim (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `"adamw_torch"`): - The optimizer to use, such as "adamw_hf", "adamw_torch", "adamw_torch_fused", "adamw_apex_fused", "adamw_anyprecision", - "adafactor". See `OptimizerNames` in [training_args.py](https://github.com/huggingface/transformers/blob/main/src/transformers/training_args.py) + The optimizer to use, such as "adamw_hf", "adamw_torch", "adamw_torch_fused", "adamw_apex_fused", "adamw_anyprecision", + "adafactor". See `OptimizerNames` in [training_args.py](https://github.com/huggingface/transformers/blob/main/src/transformers/training_args.py) for a full list of optimizers. optim_args (`str`, *optional*): Optional arguments that are supplied to AnyPrecisionAdamW.