From 6b5c135a47d08a096aff772eff4b03b9eafb7a3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9B=A8=E6=B3=93?= Date: Wed, 22 Apr 2026 02:11:36 +0800 Subject: [PATCH] fix bug --- src/twinkle/model/multi_lora.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/twinkle/model/multi_lora.py b/src/twinkle/model/multi_lora.py index daa786dd..38371f38 100644 --- a/src/twinkle/model/multi_lora.py +++ b/src/twinkle/model/multi_lora.py @@ -336,7 +336,7 @@ def _lora_A(x, *args, **kwargs): def _get_weight_tensors(self): tensors = self._get_weight_tensors_origin() - return [t[:_lora.tenant_config.r, :] for t in tensors] + return [t[:_lora.tenant_config.r, :].contiguous() for t in tensors] lora_A._get_weight_tensors_origin = lora_A._get_weight_tensors lora_A._get_weight_tensors = MethodType(_get_weight_tensors, lora_A) @@ -353,7 +353,7 @@ def _lora_B(x, *args, **kwargs): def _get_weight_tensors(self): tensors = self._get_weight_tensors_origin() - return [t[:, :_lora.tenant_config.r] for t in tensors] + return [t[:, :_lora.tenant_config.r].contiguous() for t in tensors] lora_B._get_weight_tensors_origin = lora_B._get_weight_tensors lora_B._get_weight_tensors = MethodType(_get_weight_tensors, lora_B) @@ -572,13 +572,13 @@ def save_lora_converter(self, name, parameter, adapter_name): if _param is None: pass elif 'embedding_A' in name: - _param = _param[:, :_lora.tenant_config.r] + _param = _param[:, :_lora.tenant_config.r].clone() elif 'embedding_B' in name: - _param = _param[:_lora.tenant_config.r, :] + _param = _param[:_lora.tenant_config.r, :].clone() elif '_A' in name: - _param = _param[:_lora.tenant_config.r, :] + _param = _param[:_lora.tenant_config.r, :].clone() elif '_B' in name: - _param = _param[:, :_lora.tenant_config.r] + _param = _param[:, :_lora.tenant_config.r].clone() name = name.replace(f'.{_lora.adapter_name}.', '.') return name, _param else: