From 3cae4d4eecc91b31d1421363bf66c834186cee3d Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 4 Jan 2024 11:38:25 +0530 Subject: [PATCH 1/6] edebug --- tests/lora/test_lora_layers_old_backend.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/lora/test_lora_layers_old_backend.py b/tests/lora/test_lora_layers_old_backend.py index 09bb87c85163..bb10d5107d59 100644 --- a/tests/lora/test_lora_layers_old_backend.py +++ b/tests/lora/test_lora_layers_old_backend.py @@ -1516,6 +1516,7 @@ def test_lora_processors(self): model.to(torch_device) # test that attn processors can be set to itself + print(f"Attention processors: {list(model.attn_processors.keys())}") model.set_attn_processor(model.attn_processors) with torch.no_grad(): From a5e2fcec0608155f7b135f9bac4be073f89e3cbe Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 4 Jan 2024 11:39:49 +0530 Subject: [PATCH 2/6] debug --- tests/lora/test_lora_layers_old_backend.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/lora/test_lora_layers_old_backend.py b/tests/lora/test_lora_layers_old_backend.py index bb10d5107d59..855d6ee1f4f8 100644 --- a/tests/lora/test_lora_layers_old_backend.py +++ b/tests/lora/test_lora_layers_old_backend.py @@ -1524,11 +1524,11 @@ def test_lora_processors(self): sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample sample4 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample - assert (sample1 - sample2).abs().max() < 3e-3 - assert (sample3 - sample4).abs().max() < 3e-3 + assert (sample1 - sample2).abs().max() < 3e-3 + 1e4 + assert (sample3 - sample4).abs().max() < 3e-3 + 1e4 # sample 2 and sample 3 should be different - assert (sample2 - sample3).abs().max() > 1e-4 + assert (sample2 - sample3).abs().max() > 1e-4 + 1e4 def test_lora_on_off(self, expected_max_diff=1e-3): # enable deterministic behavior for gradient checkpointing From 321fbf3ea0ee0228ea72a5c0fd2e43a56a98ebcf Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 4 Jan 2024 11:50:22 +0530 Subject: [PATCH 3/6] more debug --- tests/lora/test_lora_layers_old_backend.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/lora/test_lora_layers_old_backend.py b/tests/lora/test_lora_layers_old_backend.py index 855d6ee1f4f8..c6ec8aba789a 100644 --- a/tests/lora/test_lora_layers_old_backend.py +++ b/tests/lora/test_lora_layers_old_backend.py @@ -1517,6 +1517,7 @@ def test_lora_processors(self): # test that attn processors can be set to itself print(f"Attention processors: {list(model.attn_processors.keys())}") + print(type(list(model.attn_processors.keys())[0])) model.set_attn_processor(model.attn_processors) with torch.no_grad(): From 3870dec79101124587bd152299b87ca9be0df738 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 4 Jan 2024 11:51:44 +0530 Subject: [PATCH 4/6] more more debug --- tests/lora/test_lora_layers_old_backend.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/lora/test_lora_layers_old_backend.py b/tests/lora/test_lora_layers_old_backend.py index c6ec8aba789a..e0c63974302f 100644 --- a/tests/lora/test_lora_layers_old_backend.py +++ b/tests/lora/test_lora_layers_old_backend.py @@ -1517,7 +1517,8 @@ def test_lora_processors(self): # test that attn processors can be set to itself print(f"Attention processors: {list(model.attn_processors.keys())}") - print(type(list(model.attn_processors.keys())[0])) + for k, v in model.attn_processors.items(): + print(f"{k}: {type(v)}") model.set_attn_processor(model.attn_processors) with torch.no_grad(): From 4ed8a511e17a89c56a38d92c2e4f78d10f6733c4 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 4 Jan 2024 11:55:30 +0530 Subject: [PATCH 5/6] remove tests for LoRAAttnProcessors. --- tests/lora/test_lora_layers_old_backend.py | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/tests/lora/test_lora_layers_old_backend.py b/tests/lora/test_lora_layers_old_backend.py index e0c63974302f..93a2f725c29f 100644 --- a/tests/lora/test_lora_layers_old_backend.py +++ b/tests/lora/test_lora_layers_old_backend.py @@ -1497,7 +1497,7 @@ def prepare_init_args_and_inputs_for_common(self): inputs_dict = self.dummy_input return init_dict, inputs_dict - def test_lora_processors(self): + def test_lora_at_different_scales(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() @@ -1515,22 +1515,16 @@ def test_lora_processors(self): model.load_attn_procs(lora_params) model.to(torch_device) - # test that attn processors can be set to itself - print(f"Attention processors: {list(model.attn_processors.keys())}") - for k, v in model.attn_processors.items(): - print(f"{k}: {type(v)}") - model.set_attn_processor(model.attn_processors) - with torch.no_grad(): sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample sample4 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample - assert (sample1 - sample2).abs().max() < 3e-3 + 1e4 - assert (sample3 - sample4).abs().max() < 3e-3 + 1e4 + assert (sample1 - sample2).abs().max() < 3e-3 + assert (sample3 - sample4).abs().max() < 3e-3 # sample 2 and sample 3 should be different - assert (sample2 - sample3).abs().max() > 1e-4 + 1e4 + assert (sample2 - sample3).abs().max() > 1e-4 def test_lora_on_off(self, expected_max_diff=1e-3): # enable deterministic behavior for gradient checkpointing @@ -1642,7 +1636,7 @@ def prepare_init_args_and_inputs_for_common(self): inputs_dict = self.dummy_input return init_dict, inputs_dict - def test_lora_processors(self): + def test_lora_at_different_scales(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = 8 @@ -1659,9 +1653,6 @@ def test_lora_processors(self): model.load_attn_procs(unet_lora_params) model.to(torch_device) - # test that attn processors can be set to itself - model.set_attn_processor(model.attn_processors) - with torch.no_grad(): sample2 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.0}).sample sample3 = model(**inputs_dict, cross_attention_kwargs={"scale": 0.5}).sample From c89cecf6ce109042bd088da98cea47ffddb6bafe Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Thu, 4 Jan 2024 11:58:30 +0530 Subject: [PATCH 6/6] rename --- tests/lora/test_lora_layers_old_backend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/lora/test_lora_layers_old_backend.py b/tests/lora/test_lora_layers_old_backend.py index 93a2f725c29f..9030a62e37c8 100644 --- a/tests/lora/test_lora_layers_old_backend.py +++ b/tests/lora/test_lora_layers_old_backend.py @@ -1593,7 +1593,7 @@ def test_lora_xformers_on_off(self, expected_max_diff=6e-4): @deprecate_after_peft_backend -class UNet3DConditionModelTests(unittest.TestCase): +class UNet3DConditionLoRAModelTests(unittest.TestCase): model_class = UNet3DConditionModel main_input_name = "sample"