8000 CLN use @ instead of np.dot · scikit-learn/scikit-learn@a0428e2 · GitHub
[go: up one dir, main page]

Skip to content

Commit a0428e2

Browse files
committed
CLN use @ instead of np.dot
1 parent 3e1b2f5 commit a0428e2

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

sklearn/linear_model/_linear_loss.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@ def sandwich_dot(X, W):
1818
# most.
1919
# While a dedicated Cython routine could exploit the symmetry, it is very hard to
2020
# beat BLAS GEMM, even thought the latter cannot exploit the symmetry, unless one
21-
# pays the price of a taking square roots and implements
21+
# pays the price of taking square roots and implements
2222
# sqrtWX = sqrt(W)[: None] * X
23-
# return np.dot(sqrtWX.T, sqrtWX)
23+
# return sqrtWX.T @ sqrtWX
2424
# which (might) detect the symmetry and use BLAS SYRK under the hood.
2525
n_samples = X.shape[0]
2626
if sparse.issparse(X):
@@ -31,7 +31,7 @@ def sandwich_dot(X, W):
3131
# np.einsum may use less memory but the following, using BLAS matrix
3232
# multiplication (gemm), is by far faster.
3333
WX = W[:, None] * X
34-
return np.dot(X.T, WX)
34+
return X.T @ WX
3535

3636

3737
class LinearModelLoss:

0 commit comments

Comments
 (0)
0