8000 [Intel GPU] qlinear.pointwise with mixed dtype support by ZhiweiYan-96 · Pull Request #136753 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

[Intel GPU] qlinear.pointwise with mixed dtype support #136753

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 58 commits into from
Closed
Changes from 1 commit
Commits
Show all changes
58 commits
Select commit Hold shift + click to select a range
b1eb10e
Update
ZhiweiYan-96 Sep 26, 2024
dae8179
Update
ZhiweiYan-96 Oct 9, 2024
9c0eda1
Update
ZhiweiYan-96 Oct 9, 2024
db98241
Update
ZhiweiYan-96 Oct 17, 2024
833de1e
Update
ZhiweiYan-96 Oct 21, 2024
21ee3c3
Update
ZhiweiYan-96 Oct 23, 2024
8f1ee44
Update
ZhiweiYan-96 Oct 23, 2024
80e557a
Update
ZhiweiYan-96 Oct 24, 2024
73179b4
Update
ZhiweiYan-96 Oct 24, 2024
2ef0fdf
Update
ZhiweiYan-96 Oct 26, 2024
5d50dbc
Update
ZhiweiYan-96 Oct 27, 2024
7b1979b
Update
ZhiweiYan-96 Oct 29, 2024
cb5ac81
Update
ZhiweiYan-96 Oct 29, 2024
72e233c
Update
ZhiweiYan-96 Oct 29, 2024
8ae02a4
Update
ZhiweiYan-96 Oct 30, 2024
6eb4147
Update
ZhiweiYan-96 Oct 31, 2024
f276e7a
Update
ZhiweiYan-96 Oct 31, 2024
4b079ec
Update
ZhiweiYan-96 Oct 31, 2024
b9693ad
Update
ZhiweiYan-96 Nov 2, 2024
4039680
Update
ZhiweiYan-96 Nov 3, 2024
c81073c
Update
ZhiweiYan-96 Nov 4, 2024
9cf8dc8
Update
ZhiweiYan-96 Nov 4, 2024
36f84ff
Update
ZhiweiYan-96 Nov 4, 2024
8c6b9a4
Update
ZhiweiYan-96 Nov 4, 2024
e597d92
Update
ZhiweiYan-96 Nov 4, 2024
0671273
Update
ZhiweiYan-96 Nov 4, 2024
62a73eb
Update
ZhiweiYan-96 Nov 4, 2024
a7d21f9
Update
ZhiweiYan-96 Nov 5, 2024
d1e60e8
Update
ZhiweiYan-96 Nov 5, 2024
ac8e729
Update
ZhiweiYan-96 Nov 21, 2024
f6c2f09
Update
ZhiweiYan-96 Nov 28, 2024
64d364c
Update
ZhiweiYan-96 Dec 30, 2024
525e0e5
Update
ZhiweiYan-96 Jan 2, 2025
2638b20
Update
ZhiweiYan-96 Jan 2, 2025
2bd304f
Update
ZhiweiYan-96 Jan 3, 2025
3277e09
Update
ZhiweiYan-96 Jan 6, 2025
0aca22a
Update
ZhiweiYan-96 Jan 6, 2025
9bc38e6
Update
ZhiweiYan-96 Jan 7, 2025
a57e04b
Update
ZhiweiYan-96 Jan 7, 2025
d50865a
Update
ZhiweiYan-96 Jan 7, 2025
3636740
Update
ZhiweiYan-96 Jan 8, 2025
628487a
Update
ZhiweiYan-96 Jan 8, 2025
3fafa15
Update
ZhiweiYan-96 Jan 9, 2025
1db9b8d
Update
ZhiweiYan-96 Jan 10, 2025
9da2d41
Update
ZhiweiYan-96 Jan 16, 2025
1f1bd79
Update
ZhiweiYan-96 Jan 17, 2025
9aaa7cc
Update
ZhiweiYan-96 Jan 17, 2025
09ba9bf
Update
ZhiweiYan-96 Jan 20, 2025
eea72ec
Update
ZhiweiYan-96 Jan 20, 2025
a2410dd
Update
ZhiweiYan-96 Jan 22, 2025
9fa7182
Update
ZhiweiYan-96 Jan 23, 2025
dc1149b
Update
ZhiweiYan-96 Feb 10, 2025
384279a
Update
guangyey Feb 10, 2025
e51af94
Update
guangyey Feb 11, 2025
cef0193
Update
ZhiweiYan-96 Feb 11, 2025
56fa0cf
Update
ZhiweiYan-96 Feb 11, 2025
7fbe418
Update
ZhiweiYan-96 Feb 12, 2025
41c7207
Update
ZhiweiYan-96 Feb 17, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Update
[ghstack-poisoned]
  • Loading branch information
ZhiweiYan-96 committed Oct 9, 2024
commit dae8179f3cc84c1c1f930b1ab3523dcf8ac13a65
81 changes: 31 additions & 50 deletions test/inductor/test_mkldnn_pattern_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -1705,7 +1705,7 @@ def test_qlinear_gelu_int8_mixed_bf16(self, device="cpu"):
(torch.randn((2, 4)).to(device=device),), gelu, device=device, int8_mixed_bf16=True
)

def _qlinear_add_cpu_test_helper(self, device="cpu", use_relu=False, int8_mixed_bf16=False):
def _qlinear_add_cpu_test_helper(self, device="cpu", use_relu=False, int8_mixed_bf16=False, is_qat=True, is_dynamic=True):
r"""
This testcase will quantize two consecutive Linear->Add(->relu) patterns as:
X
Expand Down Expand Up @@ -1809,14 +1809,18 @@ def matcher_check_fn():
# (4 if is_dynamic else 5) + 2 * use_relu + to_bf16_after_binary,
# )

# is_qat_list = [False, True]
is_qat_list = [False]
# is_dynamic_list = [False, True]
is_dynamic_list = [False]
cases = itertools.product(is_qat_list, is_dynamic_list)
for is_qat, is_dynamic in cases:
print("int8_mixed_bf16:", int8_mixed_bf16)
self._test_common(
self._test_common(
mod,
(v,),
check_quantization=True,
check_autocast=torch.bfloat16 if int8_mixed_bf16 else torch.float,
matcher_check_fn=matcher_check_fn,
is_qat=is_qat,
is_dynamic=is_dynamic,
)
if torch._inductor.config.cpp_wrapper:
# For CPP wrapper
self._test_code_common(
mod,
(v,),
[
Expand Down Expand Up @@ -1845,54 +1849,31 @@ def matcher_check_fn():
check_quantization=True,
num_include_ops=[2, 2],
)
# if torch._inductor.config.cpp_wrapper:
# # For CPP wrapper
# self._test_code_common(
# mod,
# (v,),
# [
# "op_qlinear_pointwise.call",
# "op_qlinear_pointwise_binary.call",
# ],
# [],
# check_quantization=True,
# num_include_ops=[2, 2],
# )
# else:
# # For python wrapper
# self._test_code_common(
# mod,
# (v,),
# [
# "torch.ops.onednn.qlinear_pointwise.tensor",
# "torch.ops.onednn.qlinear_pointwise.binary",
# ],
# [],
# check_quantization=True,
# num_include_ops=[2, 2],
# )

@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_add_mkldnn(self, device="cpu"):
self._qlinear_add_cpu_test_helper(device=device)

@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_add_int8_mixed_bf16(self, device="cpu"):
self._qlinear_add_cpu_test_helper(device=device, int8_mixed_bf16=True)

@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_add_relu(self, device="cpu"):
self._qlinear_add_cpu_test_helper(device=device, use_relu=True)
@parametrize("use_relu", [True, False])
@parametrize("is_qat", [True, False])
@parametrize("is_dynamic", [True, False])
def test_qlinear_add_cpu(self, use_relu, is_qat, is_dynamic):
self._qlinear_add_cpu_test_helper(
use_relu=use_relu, is_qat=is_qat, is_dynamic=is_dynamic
)

@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_add_relu_int8_mixed_bf16(self, device="cpu"):
self._qlinear_add_cpu_test_helper(device=device, use_relu=True, int8_mixed_bf16=True)
@parametrize("use_relu", [True, False])
@parametrize("is_qat", [True, False])
@parametrize("is_dynamic", [True, False])
def test_qlinear_add_int8_mixed_bf16(self, device, use_relu, is_qat, is_dynamic):
self._qlinear_add_cpu_test_helper(
device=device,
int8_mixed_bf16=True,
use_relu=use_relu,
is_qat=is_qat,
is_dynamic=is_dynamic,
)

def _qlinear_dequant_promotion_cpu_test_helper(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
def _qlinear_dequant_promotion_cpu_test_helper(
def _qlinear_dequant_promotion_test_helper(

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for reminding, has changed all the naming in this file.

self,
Expand Down
Loading
You are viewing a condensed version of this merge commit. You can view the full changes here.
0