8000 [MRG] MNT: Drop `n_tasks` in `enet_coordinate_descent` (#11712) · scikit-learn/scikit-learn@b3a81cf · GitHub
[go: up one dir, main page]

Skip to content

Commit b3a81cf

Browse files
jakirkhamagramfort
authored andcommitted
[MRG] MNT: Drop n_tasks in enet_coordinate_descent (#11712)
* MNT: Drop `n_tasks` in `enet_coordinate_descent` In `enet_coordinate_descent`, `y` is always C-contiguous as it is required by the function itself. So there is no need to compute the stride length of `y`, `n_tasks`. Instead it can be treated as `1`, which it is. * MNT: Drop `n_tasks` in `enet_coordinate_descent` In `sparse_enet_coordinate_descent`, `y` is always C-contiguous as it is required by the function itself. So there is no need to compute the stride length of `y`, `n_tasks`. Instead it can be treated as `1`, which it is.
1 parent 71ee6d2 commit b3a81cf

File tree

1 file changed

+3
-11
lines changed

1 file changed

+3
-11
lines changed

sklearn/linear_model/cd_fast.pyx

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -172,9 +172,6 @@ def enet_coordinate_descent(np.ndarray[floating, ndim=1, mode='c'] w,
172172
cdef unsigned int n_samples = X.shape[0]
173173
cdef unsigned int n_features = X.shape[1]
174174

175-
# get the number of tasks indirectly, using strides
176-
cdef unsigned int n_tasks = y.strides[0] / sizeof(floating)
177-
178175
# compute norms of the columns of X
179176
cdef np.ndarray[floating, ndim=1] norm_cols_X = (X**2).sum(axis=0)
180177

@@ -218,7 +215,7 @@ def enet_coordinate_descent(np.ndarray[floating, ndim=1, mode='c'] w,
218215
R[i] = y[i] - dot(n_features, &X_data[i], n_samples, w_data, 1)
219216

220217
# tol *= np.dot(y, y)
221-
tol *= dot(n_samples, y_data, n_tasks, y_data, n_tasks)
218+
tol *= dot(n_samples, y_data, 1, y_data, 1)
222219

223220
for n_iter in range(max_iter):
224221
w_max = 0.0
@@ -296,7 +293,7 @@ def enet_coordinate_descent(np.ndarray[floating, ndim=1, mode='c'] w,
296293

297294
# np.dot(R.T, y)
298295
gap += (alpha * l1_norm
299-
- const * dot(n_samples, R_data, 1, y_data, n_tasks)
296+
- const * dot(n_samples, R_data, 1, y_data, 1)
300297
+ 0.5 * beta * (1 + const ** 2) * (w_norm2))
301298

302299
if gap < tol:
@@ -336,9 +333,6 @@ def sparse_enet_coordinate_descent(floating [::1] w,
336333
cdef unsigned int startptr = X_indptr[0]
337334
cdef unsigned int endptr
338335

339-
# get the number of tasks indirectly, using strides
340-
cdef unsigned int n_tasks
341-
342336
# initial value of the residuals
343337
cdef floating[:] R = y.copy()
344338

@@ -348,12 +342,10 @@ def sparse_enet_coordinate_descent(floating [::1] w,
348342
# fused types version of BLAS functions
349343
if floating is float:
350344
dtype = np.float32
351-
n_tasks = y.strides[0] / sizeof(float)
352345
dot = sdot
353346
asum = sasum
354347
else:
355348
dtype = np.float64
356-
n_tasks = y.strides[0] / sizeof(DOUBLE)
357349
dot = ddot
358350
asum = dasum
359351

@@ -514,7 +506,7 @@ def sparse_enet_coordinate_descent(floating [::1] w,
514506
gap += (alpha * l1_norm - const * dot(
515507
n_samples,
516508
&R[0], 1,
517-
&y[0], n_tasks
509+
&y[0], 1
518510
)
519511
+ 0.5 * beta * (1 + const ** 2) * w_norm2)
520512

0 commit comments

Comments
 (0)
0