Skip to content

Commit a65f960

Browse files
committed
[release/2.7] Fix merge conflicts
1 parent 727707c commit a65f960

File tree

5 files changed

+4
-54
lines changed

5 files changed

+4
-54
lines changed

test/distributed/_tools/test_sac_ilp.py

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -19,22 +19,13 @@
1919
)
2020
from torch.testing._internal.common_cuda import TEST_CUDA
2121
from torch.testing._internal.common_utils import (
22-
<<<<<<< HEAD
2322
MI300_ARCH,
23+
NAVI4_ARCH,
2424
run_tests,
2525
skipIfRocmArch,
2626
skipIfTorchDynamo,
2727
TestCase,
2828
)
29-
=======
30-
run_tests,
31-
skipIfTorchDynamo,
32-
TestCase,
33-
skipIfRocmArch,
34-
NAVI4_ARCH,
35-
)
36-
37-
>>>>>>> 4ca33638af ([release/2.6][SWDEV-523736] Skip&Fix some testcases for archs without SDPA or Navi4x (#2213))
3829
from torch.testing._internal.distributed._tensor.common_dtensor import (
3930
ModelArgs,
4031
Transformer,
@@ -146,11 +137,8 @@ def _collect_module_info_with_fake_tensor_mode(self) -> ModuleInfo:
146137

147138
@skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/115653")
148139
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
149-
<<<<<<< HEAD
150140
@skipIfRocmArch(MI300_ARCH)
151-
=======
152141
@skipIfRocmArch(NAVI4_ARCH)
153-
>>>>>>> 4ca33638af ([release/2.6][SWDEV-523736] Skip&Fix some testcases for archs without SDPA or Navi4x (#2213))
154142
def test_sac_ilp_case1(self):
155143
"""
156144
This is a case where the memory budget is either binding or too tight,

test/dynamo/test_activation_checkpointing.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1285,12 +1285,8 @@ def fn(x, ys):
12851285
self.assertEqual(ref, res)
12861286

12871287
@requires_cuda
1288-
<<<<<<< HEAD
1289-
def test_pattern_matcher(self, device):
1290-
=======
12911288
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Some archs don't support SDPA")
1292-
def test_pattern_matcher(self):
1293-
>>>>>>> 4ca33638af ([release/2.6][SWDEV-523736] Skip&Fix some testcases for archs without SDPA or Navi4x (#2213))
1289+
def test_pattern_matcher(self, device):
12941290
# Check that the sdpa op is recomputed in the backward graph
12951291
# tests percolate_tags
12961292

test/dynamo/test_repros.py

Lines changed: 1 addition & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -6336,31 +6336,6 @@ def fn(x):
63366336
inp = torch.randn(3, 3)
63376337
self.assertEqual(fn(inp), opt_fn(inp))
63386338

6339-
<<<<<<< HEAD
6340-
=======
6341-
@requires_cuda
6342-
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Some archs don't support SDPA")
6343-
def test_sdpa_dynamic_shapes(self):
6344-
def f(x, s0, s1, s2):
6345-
q = x.view(2, s0, s2, s0)
6346-
return torch._C._nn.scaled_dot_product_attention(
6347-
q, q, q, attn_mask=None, dropout_p=0.0, is_causal=True
6348-
)
6349-
6350-
x = torch.randn(2, 32, 4096, dtype=torch.bfloat16, device="cuda")
6351-
x_ref = x.clone().detach().requires_grad_()
6352-
s0 = 32
6353-
s1 = 64
6354-
s2 = 128
6355-
6356-
f_compiled = torch.compile(f, dynamic=True)
6357-
6358-
with torch._dynamo.config.patch(assume_static_by_default=False):
6359-
out_ref = f(x_ref, s0, s1, s2)
6360-
out = f_compiled(x, s0, s1, s2)
6361-
self.assertEqual(out_ref, out)
6362-
6363-
>>>>>>> 4ca33638af ([release/2.6][SWDEV-523736] Skip&Fix some testcases for archs without SDPA or Navi4x (#2213))
63646339
def test_bitwise_op_guard(self):
63656340
# attempt evaluating a guard with BitwiseFn_bitwise_[and/or]
63666341
def fn(x):
@@ -6842,6 +6817,7 @@ def f(x):
68426817
self.assertEqual(cnt.frame_count, 1)
68436818

68446819
@requires_cuda
6820+
@unittest.skipIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Some archs don't support SDPA")
68456821
def test_sdpa_dynamic_shapes(self, device):
68466822
def f(x, s0, s1, s2):
68476823
q = x.view(2, s0, s2, s0)

test/inductor/test_flex_decoding.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,8 @@
33

44
import functools
55
from collections import namedtuple
6-
<<<<<<< HEAD
76
from typing import Callable, Optional, Union
8-
from unittest import expectedFailure, skipUnless
9-
=======
10-
from typing import Callable, Optional, Tuple, Union
11-
from unittest import expectedFailure, skipUnless, skipIf
12-
>>>>>>> 4ca33638af ([release/2.6][SWDEV-523736] Skip&Fix some testcases for archs without SDPA or Navi4x (#2213))
7+
from unittest import expectedFailure, skipIf, skipUnless
138
from unittest.mock import patch
149

1510
import torch

test/inductor/test_mkldnn_pattern_matcher.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@
2828
parametrize,
2929
skipIfNoXPU,
3030
skipIfRocm,
31-
skipIfRocmArch,
3231
TEST_ACL,
3332
TEST_MKL,
3433
xfailIfACL,
@@ -1047,11 +1046,7 @@ def test_qconv2d_xpu(self):
10471046
@skipIfNoDynamoSupport
10481047
@skipIfNoONEDNNBF16
10491048
@skipIfNoONEDNN
1050-
<<<<<<< HEAD
1051-
@skipIfRocmArch(MI300_ARCH)
1052-
=======
10531049
@skipIfRocm
1054-
>>>>>>> 4ca33638af ([release/2.6][SWDEV-523736] Skip&Fix some testcases for archs without SDPA or Navi4x (#2213))
10551050
def test_qconv2d_int8_mixed_bf16(self):
10561051
r"""
10571052
This testcase will quantize a single Conv2d module with int8_mixed_bf16 quantization.

0 commit comments

Comments
 (0)