From c9e3ed666ee48afcb1c711b8e429afd0118c98c8 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Thu, 31 Oct 2024 18:22:57 +0000 Subject: [PATCH 01/10] fix tests --- docs/source/en/kv_cache.md | 2 +- .../models/bloom/modeling_bloom.py | 2 +- src/transformers/models/dbrx/modeling_dbrx.py | 2 +- .../modeling_gpt_neox_japanese.py | 2 +- .../models/granite/modeling_granite.py | 39 +++++++------------ .../models/granitemoe/modeling_granitemoe.py | 4 +- .../models/idefics/modeling_idefics.py | 2 +- .../models/olmoe/modeling_olmoe.py | 3 +- .../models/paligemma/modeling_paligemma.py | 2 +- .../models/qwen2_vl/modeling_qwen2_vl.py | 2 +- tests/generation/test_utils.py | 28 ++++++++----- .../chameleon/test_modeling_chameleon.py | 5 --- tests/models/dbrx/test_modeling_dbrx.py | 4 -- tests/models/idefics/test_modeling_idefics.py | 4 -- tests/utils/test_cache_utils.py | 2 +- 15 files changed, 43 insertions(+), 60 deletions(-) diff --git a/docs/source/en/kv_cache.md b/docs/source/en/kv_cache.md index 05ab9eafa723..0c3ea5e75dad 100644 --- a/docs/source/en/kv_cache.md +++ b/docs/source/en/kv_cache.md @@ -346,7 +346,7 @@ In case you are using Sink Cache, you have to crop your inputs to that maximum l >>> user_prompts = ["Hello, what's your name?", "Btw, yesterday I was on a rock concert."] >>> past_key_values = DynamicCache() ->>> max_cache_length = past_key_values.get_max_length() +>>> max_cache_length = past_key_values.get_max_cache_shape() >>> messages = [] >>> for prompt in user_prompts: diff --git a/src/transformers/models/bloom/modeling_bloom.py b/src/transformers/models/bloom/modeling_bloom.py index b0e9a4bbcb91..b3dd3446cd84 100644 --- a/src/transformers/models/bloom/modeling_bloom.py +++ b/src/transformers/models/bloom/modeling_bloom.py @@ -911,7 +911,7 @@ def prepare_inputs_for_generation( # This part differs from other models because BLOOM needs a 2D mask to construct alibi tensor # The only difference is the usage of 2D instead of 4D mask, but the shape will be static if isinstance(past_key_values, StaticCache) and attention_mask is not None: - target_length = past_key_values.get_max_length() + target_length = past_key_values.get_max_cache_shape() batch_size, seq_length = attention_mask.shape diff = target_length - seq_length diff --git a/src/transformers/models/dbrx/modeling_dbrx.py b/src/transformers/models/dbrx/modeling_dbrx.py index 659fa154ecf7..63cd6a92469e 100644 --- a/src/transformers/models/dbrx/modeling_dbrx.py +++ b/src/transformers/models/dbrx/modeling_dbrx.py @@ -832,7 +832,7 @@ class DbrxPreTrainedModel(PreTrainedModel): _supports_sdpa = True _supports_cache_class = True _supports_quantized_cache = True - _supports_static_cache = True + _supports_static_cache = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) def _init_weights(self, module: nn.Module): std = self.config.initializer_range diff --git a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py index 6c3f3313f57f..1f7701f9db4c 100755 --- a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py @@ -52,7 +52,7 @@ class GPTNeoXJapanesePreTrainedModel(PreTrainedModel): _skip_keys_device_placement = "past_key_values" _supports_cache_class = True _supports_quantized_cache = True - _supports_static_cache = True + _supports_static_cache = False # TODO (fix me): compilation fails due to a stide error? def _init_weights(self, module): """Initialize the weights""" diff --git a/src/transformers/models/granite/modeling_granite.py b/src/transformers/models/granite/modeling_granite.py index 50c5b538af30..c48a887e943d 100644 --- a/src/transformers/models/granite/modeling_granite.py +++ b/src/transformers/models/granite/modeling_granite.py @@ -871,6 +871,7 @@ def forward( attentions=all_self_attns, ) + # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask def _update_causal_mask( self, attention_mask: torch.Tensor, @@ -879,11 +880,6 @@ def _update_causal_mask( past_key_values: Cache, output_attentions: bool, ): - # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static - # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. - # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using - # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 - if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and 0.0 in attention_mask: return attention_mask @@ -906,10 +902,9 @@ def _update_causal_mask( return None dtype, device = input_tensor.dtype, input_tensor.device - min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: - target_length = past_key_values.get_max_length() + target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] @@ -917,24 +912,17 @@ def _update_causal_mask( else past_seen_tokens + sequence_length + 1 ) - if attention_mask is not None and attention_mask.dim() == 4: - causal_mask = attention_mask - else: - causal_mask = torch.full( - (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device - ) - if sequence_length != 1: - causal_mask = torch.triu(causal_mask, diagonal=1) - causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) - causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) - if attention_mask is not None: - causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit - mask_length = attention_mask.shape[-1] - padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] - padding_mask = padding_mask == 0 - causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( - padding_mask, min_dtype - ) + # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). + causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( + attention_mask, + sequence_length=sequence_length, + target_length=target_length, + dtype=dtype, + device=device, + cache_position=cache_position, + batch_size=input_tensor.shape[0], + ) + if ( self.config._attn_implementation == "sdpa" and attention_mask is not None @@ -944,6 +932,7 @@ def _update_causal_mask( # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 + min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask diff --git a/src/transformers/models/granitemoe/modeling_granitemoe.py b/src/transformers/models/granitemoe/modeling_granitemoe.py index 07b42822621a..6cceff00df36 100644 --- a/src/transformers/models/granitemoe/modeling_granitemoe.py +++ b/src/transformers/models/granitemoe/modeling_granitemoe.py @@ -838,7 +838,7 @@ class GraniteMoePreTrainedModel(PreTrainedModel): _supports_sdpa = True _supports_cache_class = True _supports_quantized_cache = True - _supports_static_cache = True + _supports_static_cache = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) def _init_weights(self, module): std = self.config.initializer_range @@ -1142,7 +1142,7 @@ def _update_causal_mask( min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] if using_static_cache: - target_length = past_key_values.get_max_length() + target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] diff --git a/src/transformers/models/idefics/modeling_idefics.py b/src/transformers/models/idefics/modeling_idefics.py index 8bd24728b038..90a55a2335f9 100644 --- a/src/transformers/models/idefics/modeling_idefics.py +++ b/src/transformers/models/idefics/modeling_idefics.py @@ -917,7 +917,7 @@ class IdeficsPreTrainedModel(PreTrainedModel): _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"] _supports_sdpa = True _supports_cache_class = True - _supports_static_cache = True + _supports_static_cache = False # IDEFICS cannot compile due to dynamic control flow when checking inputs def _init_weights(self, module): # important: this ported version of Idefics isn't meant for training from scratch - only diff --git a/src/transformers/models/olmoe/modeling_olmoe.py b/src/transformers/models/olmoe/modeling_olmoe.py index cbb8db0f59dd..5b3e410c57b7 100644 --- a/src/transformers/models/olmoe/modeling_olmoe.py +++ b/src/transformers/models/olmoe/modeling_olmoe.py @@ -782,7 +782,6 @@ def forward( "The bare Olmoe Model outputting raw hidden-states without any specific head on top.", OLMOE_START_DOCSTRING, ) -# Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->Olmoe class OlmoePreTrainedModel(PreTrainedModel): config_class = OlmoeConfig base_model_prefix = "model" @@ -793,7 +792,7 @@ class OlmoePreTrainedModel(PreTrainedModel): _supports_sdpa = True _supports_cache_class = True _supports_quantized_cache = True - _supports_static_cache = True + _supports_static_cache = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) def _init_weights(self, module): std = self.config.initializer_range diff --git a/src/transformers/models/paligemma/modeling_paligemma.py b/src/transformers/models/paligemma/modeling_paligemma.py index e198dab420ab..63cfbb6e6a5f 100644 --- a/src/transformers/models/paligemma/modeling_paligemma.py +++ b/src/transformers/models/paligemma/modeling_paligemma.py @@ -353,7 +353,7 @@ def _update_causal_mask( min_dtype = torch.finfo(dtype).min sequence_length = inputs_embeds.shape[1] if using_static_cache: - target_length = past_key_values.get_max_length() + target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] diff --git a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py index 9c0d0b45ee8e..de496d0d1ce1 100644 --- a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py @@ -962,7 +962,7 @@ class Qwen2VLPreTrainedModel(PreTrainedModel): _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True - _supports_static_cache = True + _supports_static_cache = False # TODO (joao): fix. torch.compile failing probably due to `cache_positions` def _init_weights(self, module): std = self.config.initializer_range diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index cbcb0665eb30..64175657bbb0 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -28,6 +28,7 @@ from transformers import AutoConfig, is_torch_available, pipeline, set_seed from transformers.testing_utils import ( is_flaky, + parse_flag_from_env, require_accelerate, require_flash_attn, require_optimum_quanto, @@ -1951,22 +1952,26 @@ def test_generate_with_quant_cache(self): @parameterized.expand( [ - ("forward_only", False), # TODO (@joao): a few models failing. After fixed, this should not be "@slow" + ("forward_only", False), ("end_to_end", True), # TODO (@joao): end-to-end compilation is broken with torch 2.5+, explore and fix ] ) @pytest.mark.generate @require_torch_gpu - @slow def test_generate_compile(self, _, end_to_end): """ Tests that `.generate` is compatible with torch.compile without graph breaks, keeping the same results. Tests end-to-end compilation and forward pass compilation only. ⚠️ Runs two sequential generations to ensure the cache doesn't get stuck after the first compiled run! ⚠️ """ + # Equivalent to `@slow` when `end_to_end=True`; + # To be removed when end-to-end compilation is fixed and not super slow + if end_to_end and not parse_flag_from_env("RUN_SLOW", default=False): + self.skipTest("test is slow") + for model_class in self.all_generative_model_classes: if not model_class._supports_static_cache: - self.skipTest("This model doesn't support static cache") + self.skipTest("This model doesn't support static cache (= no expectations of compilation support)") # TODO (joao) -- fix and enable me :) if end_to_end and any(model_name in model_class.__name__.lower() for model_name in ["whisper"]): @@ -1980,11 +1985,11 @@ def test_generate_compile(self, _, end_to_end): model = model_class(config).to(torch_device) model.eval() # otherwise `self.training` is `True` -- this flag is used at attn mask creation time - input_ids = inputs_dict["input_ids"].to(torch_device) + main_input = inputs_dict[model.main_input_name].to(torch_device) # creates two sets of *different* inputs with the same shape - half_batch_size = input_ids.shape[0] // 2 - input_ids_sets = [input_ids[:half_batch_size, :], input_ids[half_batch_size : half_batch_size * 2, :]] - self.assertTrue(input_ids_sets[0].shape == input_ids_sets[1].shape) + half_batch_size = main_input.shape[0] // 2 + main_input_sets = [main_input[:half_batch_size, :], main_input[half_batch_size : half_batch_size * 2, :]] + self.assertTrue(main_input_sets[0].shape == main_input_sets[1].shape) generation_kwargs = { "do_sample": False, @@ -1994,11 +1999,14 @@ def test_generate_compile(self, _, end_to_end): } # end-to-end works best with dynamic cache, forward compilation works best with static cache if not end_to_end: - generation_kwargs["cache_implementation"] = "static" + if "gemma2" in model_class.__name__.lower(): + generation_kwargs["cache_implementation"] = "hybrid" + else: + generation_kwargs["cache_implementation"] = "static" # get eager + dynamic cache results for future comparison dynamic_outputs = [] - for model_inputs in input_ids_sets: + for model_inputs in main_input_sets: dynamic_outputs.append(model.generate(model_inputs, **generation_kwargs)) # get compiled results @@ -2011,7 +2019,7 @@ def test_generate_compile(self, _, end_to_end): model.forward = torch.compile(model.forward, fullgraph=True, mode="reduce-overhead") compiled_outputs = [] - for model_inputs in input_ids_sets: + for model_inputs in main_input_sets: compiled_outputs.append(model.generate(model_inputs, generation_config=generation_config)) for dynamic_result, compiled_result in zip(dynamic_outputs, compiled_outputs): diff --git a/tests/models/chameleon/test_modeling_chameleon.py b/tests/models/chameleon/test_modeling_chameleon.py index 2a8e7633ba40..62f284b71c9a 100644 --- a/tests/models/chameleon/test_modeling_chameleon.py +++ b/tests/models/chameleon/test_modeling_chameleon.py @@ -330,11 +330,6 @@ def test_model_rope_scaling(self, scaling_type): def test_batching_equivalence(self): pass - # TODO (joao, raushan): fix me -- the problem is in `cache_position[0] == 0`, i.e. dynamic control flow - @unittest.skip("Chameleon is not compatible with end-to-end generation compilation") - def test_generate_compile_fullgraph(self): - pass - @require_torch class ChameleonIntegrationTest(unittest.TestCase): diff --git a/tests/models/dbrx/test_modeling_dbrx.py b/tests/models/dbrx/test_modeling_dbrx.py index d38a479ab36e..06c82c949cb3 100644 --- a/tests/models/dbrx/test_modeling_dbrx.py +++ b/tests/models/dbrx/test_modeling_dbrx.py @@ -368,10 +368,6 @@ def test_disk_offload_safetensors(self): def test_disk_offload_bin(self): pass - @unittest.skip("Dbrx does not support `torch.compile` with `fullgraph=True`.") - def test_generate_compile_fullgraph(self): - pass - @require_torch class DbrxModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/idefics/test_modeling_idefics.py b/tests/models/idefics/test_modeling_idefics.py index d19d10932bfc..04dcef557755 100644 --- a/tests/models/idefics/test_modeling_idefics.py +++ b/tests/models/idefics/test_modeling_idefics.py @@ -770,10 +770,6 @@ def test_contrastive_generate_low_memory(self): def test_custom_4d_attention_mask(self): pass - @unittest.skip(reason="IDEFICS cannot compile due to dynamic control flow when checking inputs") - def test_generate_compile_fullgraph(self): - pass - @unittest.skip(reason="We only test the model that takes in multiple images") def test_model(self): pass diff --git a/tests/utils/test_cache_utils.py b/tests/utils/test_cache_utils.py index 4a6dae67cbc8..c3e0d4175bb8 100644 --- a/tests/utils/test_cache_utils.py +++ b/tests/utils/test_cache_utils.py @@ -362,7 +362,7 @@ def test_sink_cache_iterative_prompts(self): input_ids = gen_out # We went well beyond the cache length - self.assertTrue(input_ids.shape[1] > cache.get_max_length() * 1.5) + self.assertTrue(input_ids.shape[1] > cache.get_max_cache_shape() * 1.5) # And it still produces a coherent english decoded = tokenizer.batch_decode(input_ids, skip_special_tokens=True) From d789a572bf431d1a2cfdffcc5ac044e41689bad4 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Wed, 22 Jan 2025 11:42:25 +0000 Subject: [PATCH 02/10] Update tests/generation/test_utils.py --- tests/generation/test_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index d0596b5c873a..f8709d6e2625 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -1978,7 +1978,6 @@ def test_generate_with_quant_cache(self): with self.assertRaises(ValueError): model.generate(**generation_kwargs, **inputs_dict) - @pytest.mark.generate @require_torch_accelerator def test_generate_compile_model_forward(self): From 43f96af1e7c1af71499efb627588fc7902ef4826 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Wed, 22 Jan 2025 11:52:49 +0000 Subject: [PATCH 03/10] make fixup --- src/transformers/models/granite/modeling_granite.py | 1 - tests/generation/test_utils.py | 10 +++++----- tests/models/chameleon/test_modeling_chameleon.py | 1 + tests/models/dbrx/test_modeling_dbrx.py | 1 + tests/models/qwen2_vl/test_modeling_qwen2_vl.py | 4 ---- 5 files changed, 7 insertions(+), 10 deletions(-) diff --git a/src/transformers/models/granite/modeling_granite.py b/src/transformers/models/granite/modeling_granite.py index 96b42a2ce77e..3c887d3a1b91 100644 --- a/src/transformers/models/granite/modeling_granite.py +++ b/src/transformers/models/granite/modeling_granite.py @@ -634,7 +634,6 @@ def forward( ) return output if return_dict else output.to_tuple() - # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask def _update_causal_mask( self, attention_mask: torch.Tensor, diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index f8709d6e2625..56c59e107266 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -30,7 +30,6 @@ from transformers import AutoConfig, is_torch_available, pipeline from transformers.testing_utils import ( is_flaky, - parse_flag_from_env, require_accelerate, require_flash_attn, require_optimum_quanto, @@ -2006,10 +2005,11 @@ def test_generate_compile_model_forward(self): "return_dict_in_generate": True, "output_scores": True, } - if "gemma2" in model_class.__name__.lower(): - generation_kwargs["cache_implementation"] = "hybrid" - else: - generation_kwargs["cache_implementation"] = "static" + + if "gemma2" in model_class.__name__.lower(): + generation_kwargs["cache_implementation"] = "hybrid" + else: + generation_kwargs["cache_implementation"] = "static" # get eager + dynamic cache results for future comparison dynamic_outputs = [] diff --git a/tests/models/chameleon/test_modeling_chameleon.py b/tests/models/chameleon/test_modeling_chameleon.py index c45a2e4c2d03..b9f3e18eb5c4 100644 --- a/tests/models/chameleon/test_modeling_chameleon.py +++ b/tests/models/chameleon/test_modeling_chameleon.py @@ -331,6 +331,7 @@ def test_model_rope_scaling(self, scaling_type): def test_batching_equivalence(self): pass + @require_torch class ChameleonIntegrationTest(unittest.TestCase): @slow diff --git a/tests/models/dbrx/test_modeling_dbrx.py b/tests/models/dbrx/test_modeling_dbrx.py index ac1fbcee4c02..06c82c949cb3 100644 --- a/tests/models/dbrx/test_modeling_dbrx.py +++ b/tests/models/dbrx/test_modeling_dbrx.py @@ -368,6 +368,7 @@ def test_disk_offload_safetensors(self): def test_disk_offload_bin(self): pass + @require_torch class DbrxModelIntegrationTest(unittest.TestCase): @slow diff --git a/tests/models/qwen2_vl/test_modeling_qwen2_vl.py b/tests/models/qwen2_vl/test_modeling_qwen2_vl.py index 18224c50bf16..8864185abf4f 100644 --- a/tests/models/qwen2_vl/test_modeling_qwen2_vl.py +++ b/tests/models/qwen2_vl/test_modeling_qwen2_vl.py @@ -332,10 +332,6 @@ def test_beam_search_low_memory(self): def test_generate_from_inputs_embeds_with_static_cache(self): pass - @unittest.skip(reason="Can't compile fullgraph due to dynamic control flow in `prepare_inputs_for_generate`") - def test_generate_compile_model_forward(self): - pass - @require_torch class Qwen2VLIntegrationTest(unittest.TestCase): From 3f165c813c6fde1af78ab0dffae61c1a2e293003 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Wed, 22 Jan 2025 14:18:59 +0000 Subject: [PATCH 04/10] tmp commit --- src/transformers/generation/utils.py | 1 - src/transformers/modeling_utils.py | 11 ++++++ src/transformers/models/aria/modeling_aria.py | 3 +- tests/generation/test_utils.py | 35 +++++++++++-------- 4 files changed, 34 insertions(+), 16 deletions(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 461d7e121581..b59a22a6684c 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -3179,7 +3179,6 @@ def _sample( model_forward = self.__call__ if isinstance(model_kwargs.get("past_key_values"), StaticCache): if self.device.type == "cuda": - logger.warning_once("Using `torch.compile`.") os.environ["TOKENIZERS_PARALLELISM"] = "0" model_forward = self.get_compiled_call(generation_config.compile_config) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 3a9b044c1168..602aa700aa10 100755 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -5177,12 +5177,23 @@ def get_compiled_call(self, compile_config: CompileConfig): non-compiled/compiled `forward` during inference, especially to switch between prefill (where we don't want to use compiled version to avoid recomputing the graph with new shapes) and iterative decoding (where we want the speed-ups of compiled version with static shapes).""" + # Edge case: the user has manually specified compilation, and it may clash with this automatic compilation + # method. Compiled functions have a `get_compiler_config` method. + if not hasattr(self, "_compiled_call"): + user_has_manually_specified_compilation = ( + hasattr(self.__call__, "get_compiler_config") or hasattr(self.forward, "get_compiler_config") + ) + if user_has_manually_specified_compilation: + return self.__call__ + + breakpoint() # Only reset it if not present or different from previous config default_config = getattr(self.generation_config, "compile_config", CompileConfig()) if ( not hasattr(self, "_compiled_call") or getattr(self, "_last_compile_config", default_config) != compile_config ): + logger.warning_once("Using `torch.compile` on the model's `__call__`") self._last_compile_config = compile_config self._compiled_call = torch.compile(self.__call__, **compile_config.to_dict()) return self._compiled_call diff --git a/src/transformers/models/aria/modeling_aria.py b/src/transformers/models/aria/modeling_aria.py index 0b330b4aeeda..0534ef6835f6 100644 --- a/src/transformers/models/aria/modeling_aria.py +++ b/src/transformers/models/aria/modeling_aria.py @@ -707,7 +707,7 @@ class AriaPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_cache_class = True _supports_quantized_cache = True - _supports_static_cache = True + _supports_static_cache = False # MoE models don't work with torch.compile (dynamic slicing) def _init_weights(self, module): std = self.config.initializer_range @@ -1553,6 +1553,7 @@ def forward( output_hidden_states=output_hidden_states, return_dict=return_dict, num_logits_to_keep=num_logits_to_keep, + cache_position=cache_position, ) logits = outputs[0] diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 56c59e107266..40746dc14dc8 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -1978,7 +1978,6 @@ def test_generate_with_quant_cache(self): model.generate(**generation_kwargs, **inputs_dict) @pytest.mark.generate - @require_torch_accelerator def test_generate_compile_model_forward(self): """ Tests that `.generate` is compatible with torch.compile without graph breaks, keeping the same results. @@ -1988,7 +1987,7 @@ def test_generate_compile_model_forward(self): if not model_class._supports_static_cache: self.skipTest("This model doesn't support static cache (= no expectations of compilation support)") - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=4) model = model_class(config).to(torch_device) model.eval() # otherwise `self.training` is `True` -- this flag is used at attn mask creation time @@ -1996,36 +1995,44 @@ def test_generate_compile_model_forward(self): main_input = inputs_dict[model.main_input_name].to(torch_device) # creates two sets of *different* inputs with the same shape half_batch_size = main_input.shape[0] // 2 - main_input_sets = [main_input[:half_batch_size, :], main_input[half_batch_size : half_batch_size * 2, :]] - self.assertTrue(main_input_sets[0].shape == main_input_sets[1].shape) + input_1 = {} + input_2 = {} + for key, value in inputs_dict.items(): + if isinstance(value, torch.Tensor): + input_1[key] = value[:half_batch_size, :].to(torch_device) + input_2[key] = value[half_batch_size : half_batch_size * 2, :].to(torch_device) + else: + input_1[key] = value + input_2[key] = value + main_input_sets = [input_1, input_2] + self.assertTrue( + main_input_sets[0][model.main_input_name].shape == main_input_sets[1][model.main_input_name].shape + ) generation_kwargs = { "do_sample": False, - "max_new_tokens": 10, + "max_new_tokens": 5, "return_dict_in_generate": True, "output_scores": True, } + # get eager + dynamic cache results for future comparison + dynamic_outputs = [] + for model_inputs in main_input_sets: + dynamic_outputs.append(model.generate(**model_inputs, **generation_kwargs)) + if "gemma2" in model_class.__name__.lower(): generation_kwargs["cache_implementation"] = "hybrid" else: generation_kwargs["cache_implementation"] = "static" - # get eager + dynamic cache results for future comparison - dynamic_outputs = [] - for model_inputs in main_input_sets: - dynamic_outputs.append(model.generate(model_inputs, **generation_kwargs)) - # get compiled results - generation_config = copy.deepcopy(model.generation_config) - generation_config.update(**generation_kwargs) torch.compiler.reset() - model.forward = torch.compile(model.forward, fullgraph=True, mode="reduce-overhead") compiled_outputs = [] for model_inputs in main_input_sets: - compiled_outputs.append(model.generate(model_inputs, generation_config=generation_config)) + compiled_outputs.append(model.generate(**model_inputs, **generation_kwargs)) for dynamic_result, compiled_result in zip(dynamic_outputs, compiled_outputs): self._check_similar_generate_outputs(dynamic_result, compiled_result) From 524b3cb499f62ef77a6147c2274476ae88b297cc Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Wed, 22 Jan 2025 17:02:50 +0000 Subject: [PATCH 05/10] rely on auto compilation for the tests --- src/transformers/cache_utils.py | 12 +++++++++++ src/transformers/generation/utils.py | 5 +++-- src/transformers/modeling_utils.py | 11 ---------- src/transformers/models/aria/modular_aria.py | 3 +++ tests/generation/test_utils.py | 21 +++++++++++++------- 5 files changed, 32 insertions(+), 20 deletions(-) diff --git a/src/transformers/cache_utils.py b/src/transformers/cache_utils.py index b2be3f238d0c..427e1d4e3aea 100644 --- a/src/transformers/cache_utils.py +++ b/src/transformers/cache_utils.py @@ -29,6 +29,8 @@ class Cache(torch.nn.Module): Base, abstract class for all caches. The actual data structure is specific to each subclass. """ + is_compileable = False + def __init__(self): super().__init__() @@ -1098,6 +1100,8 @@ class StaticCache(Cache): ``` """ + is_compileable = True + # TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well. @deprecate_kwarg("layer_device_map", version="4.52.0") def __init__( @@ -1297,6 +1301,7 @@ class SlidingWindowCache(StaticCache): """ is_sliding = True + is_compileable = True # TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well. def __init__( @@ -1421,6 +1426,7 @@ def __init__(self, self_attention_cache: Cache, cross_attention_cache: Cache): super().__init__() self.self_attention_cache = self_attention_cache self.cross_attention_cache = cross_attention_cache + self.is_compileable = getattr(self.self_attention_cache, "is_compileable", False) self.is_updated = {} for layer_idx in range(len(cross_attention_cache.key_cache)): @@ -1612,6 +1618,8 @@ class HybridCache(Cache): ``` """ + is_compileable = True + # TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well. @deprecate_kwarg("layer_device_map", version="4.52.0") def __init__( @@ -1832,6 +1840,8 @@ class MambaCache: ``` """ + is_compileable = True + # TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well. def __init__( self, @@ -1975,6 +1985,8 @@ class OffloadedStaticCache(StaticCache): ``` """ + is_compileable = True + @deprecate_kwarg("layer_device_map", version="4.52.0") def __init__( self, diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index b59a22a6684c..a7e3cb4bb869 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -3177,8 +3177,9 @@ def _sample( model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs) model_forward = self.__call__ - if isinstance(model_kwargs.get("past_key_values"), StaticCache): - if self.device.type == "cuda": + if isinstance(model_kwargs.get("past_key_values"), Cache): + is_compileable = model_kwargs["past_key_values"].is_compileable + if is_compileable and self.device.type == "cuda": os.environ["TOKENIZERS_PARALLELISM"] = "0" model_forward = self.get_compiled_call(generation_config.compile_config) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 602aa700aa10..3a9b044c1168 100755 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -5177,23 +5177,12 @@ def get_compiled_call(self, compile_config: CompileConfig): non-compiled/compiled `forward` during inference, especially to switch between prefill (where we don't want to use compiled version to avoid recomputing the graph with new shapes) and iterative decoding (where we want the speed-ups of compiled version with static shapes).""" - # Edge case: the user has manually specified compilation, and it may clash with this automatic compilation - # method. Compiled functions have a `get_compiler_config` method. - if not hasattr(self, "_compiled_call"): - user_has_manually_specified_compilation = ( - hasattr(self.__call__, "get_compiler_config") or hasattr(self.forward, "get_compiler_config") - ) - if user_has_manually_specified_compilation: - return self.__call__ - - breakpoint() # Only reset it if not present or different from previous config default_config = getattr(self.generation_config, "compile_config", CompileConfig()) if ( not hasattr(self, "_compiled_call") or getattr(self, "_last_compile_config", default_config) != compile_config ): - logger.warning_once("Using `torch.compile` on the model's `__call__`") self._last_compile_config = compile_config self._compiled_call = torch.compile(self.__call__, **compile_config.to_dict()) return self._compiled_call diff --git a/src/transformers/models/aria/modular_aria.py b/src/transformers/models/aria/modular_aria.py index 295e2dcb7465..239921b96a63 100644 --- a/src/transformers/models/aria/modular_aria.py +++ b/src/transformers/models/aria/modular_aria.py @@ -1222,6 +1222,8 @@ def _init_weights(self, module): class AriaPreTrainedModel(LlamaPreTrainedModel): + _supports_static_cache = False # MoE models don't work with torch.compile (dynamic slicing) + def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): @@ -1530,6 +1532,7 @@ def forward( output_hidden_states=output_hidden_states, return_dict=return_dict, num_logits_to_keep=num_logits_to_keep, + cache_position=cache_position, ) logits = outputs[0] diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 40746dc14dc8..a521129a5487 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -2019,20 +2019,27 @@ def test_generate_compile_model_forward(self): # get eager + dynamic cache results for future comparison dynamic_outputs = [] for model_inputs in main_input_sets: - dynamic_outputs.append(model.generate(**model_inputs, **generation_kwargs)) - + gen_out = model.generate(**model_inputs, **generation_kwargs) + dynamic_outputs.append(gen_out) + # sanity checks + self.assertTrue(isinstance(gen_out.past_key_values, DynamicCache)) + self.assertFalse(gen_out.past_key_values.is_compileable) + self.assertFalse(hasattr(model, "_compiled_call")) # our auto compile should NOT have been called + + # get compiled results -- relies on the automatic compilation triggered by specific "cache_implementation" if "gemma2" in model_class.__name__.lower(): generation_kwargs["cache_implementation"] = "hybrid" else: generation_kwargs["cache_implementation"] = "static" - # get compiled results - torch.compiler.reset() - model.forward = torch.compile(model.forward, fullgraph=True, mode="reduce-overhead") - compiled_outputs = [] for model_inputs in main_input_sets: - compiled_outputs.append(model.generate(**model_inputs, **generation_kwargs)) + gen_out = model.generate(**model_inputs, **generation_kwargs) + compiled_outputs.append(gen_out) + # sanity checks + self.assertFalse(isinstance(gen_out.past_key_values, DynamicCache)) + self.assertTrue(gen_out.past_key_values.is_compileable) + self.assertTrue(hasattr(model, "_compiled_call")) # our auto compile should have been called for dynamic_result, compiled_result in zip(dynamic_outputs, compiled_outputs): self._check_similar_generate_outputs(dynamic_result, compiled_result) From ab51b67ddcc9528816ae23a8cfa131bca41aa9f8 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Mon, 27 Jan 2025 19:32:39 +0000 Subject: [PATCH 06/10] fix a few more cases (a few to go) --- src/transformers/models/emu3/modeling_emu3.py | 1 + src/transformers/models/emu3/modular_emu3.py | 1 + .../models/mixtral/modeling_mixtral.py | 2 +- .../models/mixtral/modular_mixtral.py | 10 +++++++++ .../models/phimoe/modeling_phimoe.py | 2 +- tests/generation/test_utils.py | 22 +++++++++---------- 6 files changed, 25 insertions(+), 13 deletions(-) diff --git a/src/transformers/models/emu3/modeling_emu3.py b/src/transformers/models/emu3/modeling_emu3.py index b42a222f6ce9..5beea9633114 100644 --- a/src/transformers/models/emu3/modeling_emu3.py +++ b/src/transformers/models/emu3/modeling_emu3.py @@ -1796,6 +1796,7 @@ def forward( class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin): _tied_weights_keys = ["text_model.lm_head.weight"] + _supports_static_cache = False # `get_image_tokens()`, called when `pixel_values` is passed, is not compileable def __init__(self, config): super().__init__(config) diff --git a/src/transformers/models/emu3/modular_emu3.py b/src/transformers/models/emu3/modular_emu3.py index aacf52fe31c6..43d10bd62d86 100644 --- a/src/transformers/models/emu3/modular_emu3.py +++ b/src/transformers/models/emu3/modular_emu3.py @@ -1108,6 +1108,7 @@ def forward(**super_kwargs): class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin): _tied_weights_keys = ["text_model.lm_head.weight"] + _supports_static_cache = False # `get_image_tokens()`, called when `pixel_values` is passed, is not compileable def __init__(self, config): super().__init__(config) diff --git a/src/transformers/models/mixtral/modeling_mixtral.py b/src/transformers/models/mixtral/modeling_mixtral.py index 8cf2d0e8fa8d..242752168029 100644 --- a/src/transformers/models/mixtral/modeling_mixtral.py +++ b/src/transformers/models/mixtral/modeling_mixtral.py @@ -484,7 +484,7 @@ class MixtralPreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_cache_class = True _supports_quantized_cache = True - _supports_static_cache = True + _supports_static_cache = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) def _init_weights(self, module): std = self.config.initializer_range diff --git a/src/transformers/models/mixtral/modular_mixtral.py b/src/transformers/models/mixtral/modular_mixtral.py index a6069f69b334..3e53dda54539 100644 --- a/src/transformers/models/mixtral/modular_mixtral.py +++ b/src/transformers/models/mixtral/modular_mixtral.py @@ -45,7 +45,9 @@ MistralForSequenceClassification, MistralForTokenClassification, MistralModel, + MistralPreTrainedModel, MistralRMSNorm, + MistralRotaryEmbedding, ) from .configuration_mixtral import MixtralConfig @@ -313,6 +315,14 @@ def forward( return outputs +class MixtralRotaryEmbedding(MistralRotaryEmbedding): + pass + + +class MixtralPreTrainedModel(MistralPreTrainedModel): + _supports_static_cache = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) + + class MixtralModel(MistralModel): def __init__(self, config: MixtralConfig): super().__init__(config) diff --git a/src/transformers/models/phimoe/modeling_phimoe.py b/src/transformers/models/phimoe/modeling_phimoe.py index b540dd18300e..9db42f8e84b1 100644 --- a/src/transformers/models/phimoe/modeling_phimoe.py +++ b/src/transformers/models/phimoe/modeling_phimoe.py @@ -913,7 +913,7 @@ class PhimoePreTrainedModel(PreTrainedModel): _supports_flex_attn = True _supports_cache_class = True _supports_quantized_cache = True - _supports_static_cache = True + _supports_static_cache = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) def _init_weights(self, module): std = self.config.initializer_range diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index a521129a5487..7baa577afd64 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -2004,9 +2004,9 @@ def test_generate_compile_model_forward(self): else: input_1[key] = value input_2[key] = value - main_input_sets = [input_1, input_2] + model_input_sets = [input_1, input_2] self.assertTrue( - main_input_sets[0][model.main_input_name].shape == main_input_sets[1][model.main_input_name].shape + model_input_sets[0][model.main_input_name].shape == model_input_sets[1][model.main_input_name].shape ) generation_kwargs = { @@ -2015,25 +2015,25 @@ def test_generate_compile_model_forward(self): "return_dict_in_generate": True, "output_scores": True, } + has_defined_cache_implementation = model.generation_config.cache_implementation is not None # get eager + dynamic cache results for future comparison dynamic_outputs = [] - for model_inputs in main_input_sets: + for model_inputs in model_input_sets: gen_out = model.generate(**model_inputs, **generation_kwargs) dynamic_outputs.append(gen_out) - # sanity checks - self.assertTrue(isinstance(gen_out.past_key_values, DynamicCache)) - self.assertFalse(gen_out.past_key_values.is_compileable) - self.assertFalse(hasattr(model, "_compiled_call")) # our auto compile should NOT have been called + # sanity checks for the default cache implementation + if not has_defined_cache_implementation: + self.assertTrue(isinstance(gen_out.past_key_values, DynamicCache)) + self.assertFalse(gen_out.past_key_values.is_compileable) + self.assertFalse(hasattr(model, "_compiled_call")) # our auto compile should NOT have been called # get compiled results -- relies on the automatic compilation triggered by specific "cache_implementation" - if "gemma2" in model_class.__name__.lower(): - generation_kwargs["cache_implementation"] = "hybrid" - else: + if not has_defined_cache_implementation: generation_kwargs["cache_implementation"] = "static" compiled_outputs = [] - for model_inputs in main_input_sets: + for model_inputs in model_input_sets: gen_out = model.generate(**model_inputs, **generation_kwargs) compiled_outputs.append(gen_out) # sanity checks From 7777cf15bd11bdbf6a3a4d04f8dedd79db37fa7d Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 28 Jan 2025 12:11:15 +0000 Subject: [PATCH 07/10] all working :D --- tests/generation/test_utils.py | 18 ++++++++++++++---- tests/models/whisper/test_modeling_whisper.py | 5 +++++ 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 7baa577afd64..6f2587ec356e 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -2024,8 +2024,13 @@ def test_generate_compile_model_forward(self): dynamic_outputs.append(gen_out) # sanity checks for the default cache implementation if not has_defined_cache_implementation: - self.assertTrue(isinstance(gen_out.past_key_values, DynamicCache)) - self.assertFalse(gen_out.past_key_values.is_compileable) + decoder_cache = ( + gen_out.past_key_values.self_attention_cache + if config.is_encoder_decoder + else gen_out.past_key_values + ) + self.assertTrue(isinstance(decoder_cache, DynamicCache)) + self.assertFalse(decoder_cache.is_compileable) self.assertFalse(hasattr(model, "_compiled_call")) # our auto compile should NOT have been called # get compiled results -- relies on the automatic compilation triggered by specific "cache_implementation" @@ -2037,8 +2042,13 @@ def test_generate_compile_model_forward(self): gen_out = model.generate(**model_inputs, **generation_kwargs) compiled_outputs.append(gen_out) # sanity checks - self.assertFalse(isinstance(gen_out.past_key_values, DynamicCache)) - self.assertTrue(gen_out.past_key_values.is_compileable) + decoder_cache = ( + gen_out.past_key_values.self_attention_cache + if config.is_encoder_decoder + else gen_out.past_key_values + ) + self.assertFalse(isinstance(decoder_cache, DynamicCache)) + self.assertTrue(decoder_cache.is_compileable) self.assertTrue(hasattr(model, "_compiled_call")) # our auto compile should have been called for dynamic_result, compiled_result in zip(dynamic_outputs, compiled_outputs): diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py index 2eff406a3b56..fda4ea84b72d 100644 --- a/tests/models/whisper/test_modeling_whisper.py +++ b/tests/models/whisper/test_modeling_whisper.py @@ -1602,6 +1602,11 @@ def test_labels_sequence_max_length_error_after_changing_config(self): with self.assertRaises(ValueError): model(input_features=input_features, labels=labels) + # TODO (joao, eustache): fix me :) + @unittest.skip(reason="Whisper's custom generate is not consistent regarding the cache return types") + def test_generate_compile_model_forward(self): + pass + @require_torch @require_torchaudio From 07539631d8ac44ee448e15a2278a7d30fdf365d5 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 28 Jan 2025 12:29:52 +0000 Subject: [PATCH 08/10] make fixup --- src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py b/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py index 2046deef0b3e..78a11176e192 100644 --- a/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +++ b/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py @@ -332,7 +332,7 @@ class Qwen2_5_VLPreTrainedModel(PreTrainedModel): _supports_flash_attn_2 = True _supports_sdpa = True _supports_cache_class = True - _supports_static_cache = True + _supports_static_cache = False # TODO (joao): fix. torch.compile failing probably due to `cache_positions` def _init_weights(self, module): std = self.config.initializer_range From 4430684867d66b3ce76229d7a936c2b8ba1b538c Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 28 Jan 2025 12:47:43 +0000 Subject: [PATCH 09/10] add compile cache reset --- tests/generation/test_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 9ae8b76c9a4d..8338cd771359 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -1987,6 +1987,8 @@ def test_generate_compile_model_forward(self): if not model_class._supports_static_cache: self.skipTest("This model doesn't support static cache (= no expectations of compilation support)") + torch.compiler.reset() # prevent cached compilation from being used in the test + config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=4) model = model_class(config).to(torch_device) From bc4e8b31fdc6e880c3a8809a3a64c5db5f5b0fbc Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 28 Jan 2025 13:50:11 +0000 Subject: [PATCH 10/10] allow compilation on cpu --- src/transformers/generation/configuration_utils.py | 6 ++++-- src/transformers/generation/utils.py | 4 +++- tests/generation/test_utils.py | 8 +++++--- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/src/transformers/generation/configuration_utils.py b/src/transformers/generation/configuration_utils.py index 3f142ce77298..a0e96c31cb59 100644 --- a/src/transformers/generation/configuration_utils.py +++ b/src/transformers/generation/configuration_utils.py @@ -1579,7 +1579,7 @@ def construct_processor(self, vocab_size: int, device) -> "WatermarkLogitsProces @dataclass -class CompileConfig(object): +class CompileConfig: """ Class that holds arguments relative to `torch.compile` behavior, when using automatic compilation in `generate`. See [`torch.compile`](https://pytorch.org/docs/stable/generated/torch.compile.html) for more details on the arguments. @@ -1620,7 +1620,9 @@ class CompileConfig(object): backend: Union[str, Callable] = "inductor" mode: str = "reduce-overhead" options: Optional[dict] = None + # Used to flag our `generate` call to compile on e.g. CPU. Often not optimal, but useful for testing purposes. + _compile_all_devices = None def to_dict(self) -> Dict[str, Any]: """Serializes this instance to a Python dictionary.""" - return copy.deepcopy(self.__dict__) + return copy.deepcopy({key: value for key, value in self.__dict__.items() if key != "_compile_all_devices"}) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 89dc294cf56d..cb6ec15bb901 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -3179,7 +3179,9 @@ def _sample( model_forward = self.__call__ if isinstance(model_kwargs.get("past_key_values"), Cache): is_compileable = model_kwargs["past_key_values"].is_compileable - if is_compileable and self.device.type == "cuda": + if is_compileable and ( + self.device.type == "cuda" or generation_config.compile_config._compile_all_devices + ): os.environ["TOKENIZERS_PARALLELISM"] = "0" model_forward = self.get_compiled_call(generation_config.compile_config) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 8338cd771359..102153fc4b85 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -1987,8 +1987,6 @@ def test_generate_compile_model_forward(self): if not model_class._supports_static_cache: self.skipTest("This model doesn't support static cache (= no expectations of compilation support)") - torch.compiler.reset() # prevent cached compilation from being used in the test - config, inputs_dict = self.prepare_config_and_inputs_for_generate(batch_size=4) model = model_class(config).to(torch_device) @@ -2011,13 +2009,17 @@ def test_generate_compile_model_forward(self): model_input_sets[0][model.main_input_name].shape == model_input_sets[1][model.main_input_name].shape ) + # compilation-specific setup + torch.compiler.reset() # prevent cached compilation from being used in the test + has_defined_cache_implementation = model.generation_config.cache_implementation is not None + model.generation_config.compile_config._compile_all_devices = True # force compilation (e.g. fast CI, CPU) + generation_kwargs = { "do_sample": False, "max_new_tokens": 5, "return_dict_in_generate": True, "output_scores": True, } - has_defined_cache_implementation = model.generation_config.cache_implementation is not None # get eager + dynamic cache results for future comparison dynamic_outputs = []