From cb97e87c634a546afa81e2d6c52d17a7401f7882 Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Tue, 28 Apr 2026 12:07:57 +0200 Subject: [PATCH] TST Run fast PEFT tests in normal CI PEFT tests were marked as slow, even though they're not slow. According to a comment there, this was probably only done to avoid running the tests before PEFT was released. The @slow marker is now removed. All these tests use tiny models and on my machine, with warm HF cache, they passed in <12 sec on CPU without parallelism. It should thus be safe to assume that they are indeed fast enough. With these tests now running in normal CI, we should be able to prevent PEFT regressions in the future. Note that the hotswapping tests are still marked as slow, as they partly require calling torch.compile. --- tests/peft_integration/test_peft_integration.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/peft_integration/test_peft_integration.py b/tests/peft_integration/test_peft_integration.py index 33880c88135d..dc30cbf96d4e 100644 --- a/tests/peft_integration/test_peft_integration.py +++ b/tests/peft_integration/test_peft_integration.py @@ -61,8 +61,6 @@ class PeftTesterMixin: transformers_test_model_classes = (AutoModelForCausalLM, OPTForCausalLM) -# TODO: run it with CI after PEFT release. -@slow class PeftIntegrationTester(unittest.TestCase, PeftTesterMixin): """ A testing suite that makes sure that the PeftModel class is correctly integrated into the transformers library. @@ -1003,7 +1001,7 @@ def test_mixtral_lora_conversion(self): if version.parse(importlib.metadata.version("peft")) < version.parse("0.19.0"): self.skipTest("For this test to pass, PEFT 0.19 is required.") - inputs = torch.arange(10).view(1, -1).to(0) + inputs = torch.arange(10).view(1, -1).to(torch_device) model_name = "hf-internal-testing/Mixtral-tiny" adapter_name = "peft-internal-testing/mixtral-pre-v5-lora"