8000 TST Make DoRA tests pass on XPU (#2493) · huggingface/peft@f7cda1f · GitHub
[go: up one dir, main page]

Skip to content

Commit f7cda1f

Browse files
authored
TST Make DoRA tests pass on XPU (#2493)
1 parent 36160a5 commit f7cda1f

File tree

3 files changed

+22
-4
lines changed

3 files changed

+22
-4
lines changed

tests/test_common_gpu.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@
6060
device_count,
6161
load_cat_image,
6262
require_bitsandbytes,
63+
require_deterministic_for_xpu,
6364
require_multi_accelerator,
6465
require_non_cpu,
6566
)
@@ -1124,8 +1125,8 @@ def test_serialization_shared_tensors(self):
11241125

11251126
@require_non_cpu
11261127
@pytest.mark.single_gpu_tests
1128+
@require_deterministic_for_xpu
11271129
@require_bitsandbytes
1128-
@pytest.mark.skipif(not torch.cuda.is_available(), reason="XPU have numerial errors")
11291130
def test_4bit_dora_inference(self):
11301131
# check for same result with and without DoRA when initializing with init_lora_weights=False
11311132
bnb_config = BitsAndBytesConfig(
@@ -1164,8 +1165,8 @@ def test_4bit_dora_inference(self):
11641165

11651166
@require_non_cpu
11661167
@pytest.mark.single_gpu_tests
1168+
@require_deterministic_for_xpu
11671169
@require_bitsandbytes
1168-
@pytest.mark.skipif(not torch.cuda.is_available(), reason="XPU have numerial errors")
11691170
def test_8bit_dora_inference(self):
11701171
# check for same result with and without DoRA when initializing with init_lora_weights=False
11711172
model = AutoModelForCausalLM.from_pretrained(

tests/test_initialization.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@
6464
from peft.utils import infer_device
6565
from peft.utils.hotswap import hotswap_adapter, prepare_model_for_compiled_hotswap
6666

67-
from .testing_utils import load_dataset_english_quotes
67+
from .testing_utils import load_dataset_english_quotes, require_deterministic_for_xpu
6868

6969

7070
class TestLoraInitialization:
@@ -1053,6 +1053,7 @@ def test_lora_rslora_scaling_pattern(self):
10531053
assert model.embed.scaling["default"] == expected_scaling["embed"]
10541054
assert model.conv2d.scaling["default"] == expected_scaling["conv2d"]
10551055

1056+
@require_deterministic_for_xpu
10561057
def test_lora_use_dora_linear(self, data):
10571058
# check that dora is a no-op when initialized
10581059
torch.manual_seed(0)

tests/testing_utils.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414
import unittest
1515
from contextlib import contextmanager
16-
from functools import lru_cache
16+
from functools import lru_cache, wraps
1717

1818
import numpy as np
1919
import pytest
@@ -159,6 +159,22 @@ def require_torchao(test_case):
159159
return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
160160

161161

162+
def require_deterministic_for_xpu(test_case):
163+
@wraps(test_case)
164+
def wrapper(*args, **kwargs):
165+
if torch_device == "xpu":
166+
original_state = torch.are_deterministic_algorithms_enabled()
167+
try:
168+
torch.use_deterministic_algorithms(True)
169+
return test_case(*args, **kwargs)
170+
finally:
171+
torch.use_deterministic_algorithms(original_state)
172+
else:
173+
return test_case(*args, **kwargs)
174+
175+
return wrapper
176+
177+
162178
@contextmanager
163179
def temp_seed(seed: int):
164180
"""Temporarily set the random seed. This works for python numpy, pytorch."""

0 commit comments

Comments
 (0)
0