8000 softmax: add device check for xpu with half_to_float by weishi-deng · Pull Request #150278 · pytorch/pytorch · GitHub
[go: up one dir, main page]

Skip to content

softmax: add device check for xpu with half_to_float #150278

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 51 commits into
base: main
Choose a base branch
from
Open
Changes from 1 commit
Commits
Show all changes
51 commits
Select commit Hold shift + click to select a range
58f8b21
softmax: add device check for xpu with half_to_float
weishi-deng Mar 31, 2025
54bd692
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
a7435e5
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
b3b8a32
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
9f9854c
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
88c89c4
Merge branch 'main' into xpu-softmax
weishi-deng Apr 21, 2025
745f7e6
softmax: add device check for xpu with half_to_float
weishi-deng Mar 31, 2025
269cac2
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
8afc462
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
1011f94
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
c78ce25
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
508df2c
Merge branch 'xpu-softmax' of https://github.com/weishi-deng/pytorch …
weishi-deng May 7, 2025
16479ba
add ut
weishi-deng May 7, 2025
8f74e86
Merge branch 'pytorch:main' into xpu-softmax
weishi-deng May 9, 2025
df16602
Update test/xpu/test_softmax.py
weishi-deng May 9, 2025
76f7464
update unit test
weishi-deng May 9, 2025
0e0b150
update ut
weishi-deng May 9, 2025
e001a8b
Update test/test_xpu.py
guangyey May 9, 2025
321fdba
Update test/test_xpu.py
guangyey May 9, 2025
5e844f2
Update test/test_xpu.py
guangyey May 9, 2025
2c1136d
Update test/test_xpu.py
guangyey May 9, 2025
698e641
Update test/test_xpu.py
guangyey May 9, 2025
0f19bb4
Update test/test_xpu.py
guangyey May 9, 2025
a8f156a
Update test_xpu.py
guangyey May 9, 2025
71b8d22
Update test/test_xpu.py
guangyey May 9, 2025
6ac21b2
Update test_xpu.py
guangyey May 11, 2025
653b161
Merge branch 'pytorch:main' into xpu-softmax
weishi-deng May 12, 2025
883fc0d
softmax: add device check for xpu with half_to_float
weishi-deng Mar 31, 2025
4d28339
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
3895903
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
ff7ea33
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
ded3cee
Update aten/src/ATen/native/SoftMax.cpp
weishi-deng Apr 8, 2025
af88dd9
add ut
weishi-deng May 7, 2025
53c8b95
Update test/xpu/test_softmax.py
weishi-deng May 9, 2025
a9b352b
update unit test
weishi-deng May 9, 2025
0cf9205
update ut
weishi-deng May 9, 2025
820b210
Update test/test_xpu.py
guangyey May 9, 2025
e208d9d
Update test/test_xpu.py
guangyey May 9, 2025
164a3b0
Update test/test_xpu.py
guangyey May 9, 2025
0f17fbd
Update test/test_xpu.py
guangyey May 9, 2025
0b40c23
Update test/test_xpu.py
guangyey May 9, 2025
dc6f6ca
Update test/test_xpu.py
guangyey May 9, 2025
7c322c6
Update test_xpu.py
guangyey May 9, 2025
5ba0f46
Update test/test_xpu.py
guangyey May 9, 2025
3bae7b7
Update test_xpu.py
guangyey May 11, 2025
400db32
Merge branch 'xpu-softmax' of https://github.com/weishi-deng/pytorch …
weishi-deng May 12, 2025
7d1b23d
update ut
weishi-deng May 14, 2025
e83021d
Merge branch 'pytorch:main' into xpu-softmax
weishi-deng May 14, 2025
6bc08b1
Merge branch 'pytorch:main' into xpu-softmax
weishi-deng May 15, 2025
0e5f1a3
Merge branch 'pytorch:main' into xpu-softmax
weishi-deng May 19, 2025
0de4614
Merge branch 'pytorch:main' into xpu-softmax
weishi-deng Jun 26, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
softmax: add device check for xpu with half_to_float
  • Loading branch information
weishi-deng committed Mar 31, 2025
commit 58f8b215caad0cf47ae9c73fec60e1756ccd59b7
8 changes: 4 additions & 4 deletions aten/src/ATen/native/SoftMax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ TORCH_IMPL_FUNC(log_softmax_backward_cpu_out) (
Tensor softmax(const Tensor& input_, const int64_t dim_, std::optional<ScalarType> dtype) {
auto result = [&]() {
NoNamesGuard guard;
if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half && dtype == ScalarType::Float){
if ((input_.is_cuda()||input_.is_xpu()) && input_.scalar_type() == ScalarType::Half && dtype == ScalarType::Float){
return at::_softmax(input_, dim_, true);
} else {
Tensor converted = dtype.has_value() ? input_.toType(dtype.value()) : input_;
Expand All @@ -428,7 +428,7 @@ Tensor& softmax_out(
std::optional<ScalarType> dtype,
Tensor& output_) {
Tensor output_temp;
if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half &&
if ((input_.is_cuda()||input_.is_xpu()) && input_.scalar_type() == ScalarType::Half &&
dtype == ScalarType::Float) {
if (!output_.is_contiguous()) {
auto options =
Expand Down Expand Up @@ -467,7 +467,7 @@ Tensor special_softmax(const Tensor& input_, const int64_t dim_, std::optional<S
Tensor log_softmax(const Tensor& input_, const int64_t dim_, std::optional<ScalarType> dtype) {
auto result = [&]() {
NoNamesGuard guard;
if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half && dtype == ScalarType::Float){
if ((input_.is_cuda()||input_.is_xpu()) && input_.scalar_type() == ScalarType::Half && dtype == ScalarType::Float){
return at::_log_softmax(input_, dim_, true);
} else {
Tensor converted = dtype.has_value()? input_.toType(dtype.value()) : input_;
Expand All @@ -484,7 +484,7 @@ Tensor& log_softmax_out(
std::optional<ScalarType> dtype,
Tensor& output_) {
Tensor output_temp;
if (input_.is_cuda() && input_.scalar_type() == ScalarType::Half &&
if ((input_.is_cuda()||input_.is_xpu()) && input_.scalar_type() == ScalarType::Half &&
dtype == ScalarType::Float) {
if (!output_.is_contiguous()) {
auto options =
Expand Down
Loading
0