8000 Update on "[dynamo][not ready] polyfill infra for classes" · pytorch/pytorch@f770084 · GitHub
[go: up one dir, main page]

Skip to content

Commit f770084

Browse files
committed
Update on "[dynamo][not ready] polyfill infra for classes"
cc voznesenskym penguinwu EikanWang jgong5 Guobing-Chen XiaobingSuper zhuhaozhe blzheng wenzhe-nrv jiayisunx chenyang78 kadeng chauhang amjames [ghstack-poisoned]
2 parents 6eaf290 + 0da1d3c commit f770084

File tree

2 files changed

+6
-7
lines changed

2 files changed

+6
-7
lines changed

aten/src/ATen/native/cuda/Math.cuh

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -758,11 +758,10 @@ const auto sinc_string = jiterator_stringify(
758758
T sinc(T a) {
759759
if (a == T(0)) {
760760
return T(1);
761-
} else {
762-
constexpr T pi = T(3.14159265358979323846L);
763-
T product = pi * a;
764-
return std::sin(product) / product;
765761
}
762+
constexpr T pi = T(3.14159265358979323846L);
763+
T product = pi * a;
764+
return std::sin(product) / product;
766765
}
767766
); // sinc_string
768767

test/inductor/test_flex_attention.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2510,9 +2510,9 @@ def mask_mod(b, h, q, kv):
25102510
@supported_platform
25112511
def test_strided_backwards(self):
25122512
shape = (1, 2, 4096, 64)
2513-
Q = torch.randn(shape, requires_grad=True, device="cuda", dtype=torch.bfloat16)
2514-
K = torch.randn(shape, requires_grad=True, device="cuda", dtype=torch.bfloat16)
2515-
V = torch.randn(shape, requires_grad=True, device="cuda", dtype=torch.bfloat16)
2513+
Q = torch.randn(shape, requires_grad=True, device="cuda")
2514+
K = torch.randn(shape, requires_grad=True, device="cuda")
2515+
V = torch.randn(shape, requires_grad=True, device="cuda")
25162516
func = torch.compile(flex_attention, dynamic=True, fullgraph=True)
25172517

25182518
K_sliced = K[:, :, :-128]

0 commit comments

Comments
 (0)
0