Skip to content

Commit fc47a36

Browse files
committed
fix ci by skipping pallas-cuda on new tests
1 parent 4bce3be commit fc47a36

1 file changed

Lines changed: 3 additions & 0 deletions

File tree

test/inductor/test_pallas.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2164,6 +2164,7 @@ def transformer(x, mask, *layer_params):
21642164
expected = transformer(x, mask, *all_params)
21652165
self.assertEqual(result, expected, atol=atol, rtol=rtol)
21662166

2167+
@skip_if_cuda
21672168
def test_transformer_tiny(self):
21682169
"""Test a 4-layer Llama-style transformer at tiny dimensions."""
21692170
self._run_transformer(
@@ -2177,6 +2178,7 @@ def test_transformer_tiny(self):
21772178
rtol=1e-2,
21782179
)
21792180

2181+
@skip_if_cuda
21802182
def test_transformer_medium(self):
21812183
"""Test a 4-layer transformer at Llama-7B-like dimensions."""
21822184
self._run_transformer(
@@ -2454,6 +2456,7 @@ def fn(indices, table):
24542456
expected = fn(indices, table)
24552457
self.assertEqual(result, expected)
24562458

2459+
@skip_if_cuda
24572460
def test_transformer_with_final_norm_and_lm_head(self):
24582461
"""Test multi-layer transformer + final RMSNorm + LM head (no embedding)."""
24592462
torch._dynamo.reset()

0 commit comments

Comments
 (0)