8000 MAINT Remove ReadonlyArrayWrapper from _loss module (#25555) · scikit-learn/scikit-learn@f9a1cf0 · GitHub
[go: up one dir, main page]

Skip to content

Commit f9a1cf0

Browse files
authored
MAINT Remove ReadonlyArrayWrapper from _loss module (#25555)
* MAINT Remove ReadonlyArrayWrapper from _loss module * CLN Remove comments about Cython 3.0
1 parent 4f85597 commit f9a1cf0

File tree

4 files changed

+60
-92
lines changed

4 files changed

+60
-92
lines changed

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ def check_package_status(package, min_version):
269269
{"sources": ["_isotonic.pyx"], "include_np": True},
270270
],
271271
"_loss": [
272-
{"sources": ["_loss.pyx.tp"], "include_np": True},
272+
{"sources": ["_loss.pyx.tp"]},
273273
],
274274
"cluster": [
275275
{"sources": ["_dbscan_inner.pyx"], "language": "c++", "include_np": True},

sklearn/_loss/_loss.pxd

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,15 @@
11
# cython: language_level=3
22

3-
cimport numpy as cnp
4-
5-
cnp.import_array()
6-
7-
83
# Fused types for y_true, y_pred, raw_prediction
94
ctypedef fused Y_DTYPE_C:
10-
cnp.npy_float64
11-
cnp.npy_float32
5+
double
6+
float
127

138

149
# Fused types for gradient and hessian
1510
ctypedef fused G_DTYPE_C:
16-
cnp.npy_float64
17-
cnp.npy_float32
11+
double
12+
float
1813

1914

2015
# Struct to return 2 doubles

sklearn/_loss/_loss.pyx.tp

Lines changed: 55 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -211,8 +211,6 @@ WARNING: Do not edit `sklearn/_loss/_loss.pyx` file directly, as it is generated
211211
# checking like None -> np.empty().
212212
#
213213
# Note: We require 1-dim ndarrays to be contiguous.
214-
# TODO: Use const memoryviews with fused types with Cython 3.0 where
215-
# appropriate (arguments marked by "# IN").
216214

217215
from cython.parallel import parallel, prange
218216
import numpy as np
@@ -245,8 +243,8 @@ cdef inline double log1pexp(double x) nogil:
245243

246244
cdef inline void sum_exp_minus_max(
247245
const int i,
248-
Y_DTYPE_C[:, :] raw_prediction, # IN
249-
Y_DTYPE_C *p # OUT
246+
const Y_DTYPE_C[:, :] raw_prediction, # IN
247+
Y_DTYPE_C *p # OUT
250248
) nogil:
251249
# Thread local buffers are used to stores results of this function via p.
252250
# The results are stored as follows:
@@ -754,15 +752,11 @@ cdef class CyLossFunction:
754752
"""
755753
pass
756754

757-
# Note: With Cython 3.0, fused types can be used together with const:
758-
# const Y_DTYPE_C double[::1] y_true
759-
# See release notes 3.0.0 alpha1
760-
# https://cython.readthedocs.io/en/latest/src/changes.html#alpha-1-2020-04-12
761755
def loss(
762756
self,
763-
Y_DTYPE_C[::1] y_true, # IN
764-
Y_DTYPE_C[::1] raw_prediction, # IN
765-
Y_DTYPE_C[::1] sample_weight, # IN
757+
const Y_DTYPE_C[::1] y_true, # IN
758+
const Y_DTYPE_C[::1] raw_prediction, # IN
759+
const Y_DTYPE_C[::1] sample_weight, # IN
766760
G_DTYPE_C[::1] loss_out, # OUT
767761
int n_threads=1
768762
):
@@ -790,9 +784,9 @@ cdef class CyLossFunction:
790784

791785
def gradient(
792786
self,
793-
Y_DTYPE_C[::1] y_true, # IN
794-
Y_DTYPE_C[::1] raw_prediction, # IN
795-
Y_DTYPE_C[::1] sample_weight, # IN
787+
const Y_DTYPE_C[::1] y_true, # IN
788+
const Y_DTYPE_C[::1] raw_prediction, # IN
789+
const Y_DTYPE_C[::1] sample_weight, # IN
796790
G_DTYPE_C[::1] gradient_out, # OUT
797791
int n_threads=1
798792
):
@@ -820,9 +814,9 @@ cdef class CyLossFunction:
820814

821815
def loss_gradient(
822816
self,
823-
Y_DTYPE_C[::1] y_true, # IN
824-
Y_DTYPE_C[::1] raw_prediction, # IN
825-
Y_DTYPE_C[::1] sample_weight, # IN
817+
const Y_DTYPE_C[::1] y_true, # IN
818+
const Y_DTYPE_C[::1] raw_prediction, # IN
819+
const Y_DTYPE_C[::1] sample_weight, # IN
826820
G_DTYPE_C[::1] loss_out, # OUT
827821
G_DTYPE_C[::1] gradient_out, # OUT
828822
int n_threads=1
@@ -858,9 +852,9 @@ cdef class CyLossFunction:
858852

859853
def gradient_hessian(
860854
self,
861-
Y_DTYPE_C[::1] y_true, # IN
862-
Y_DTYPE_C[::1] raw_prediction, # IN
863-
Y_DTYPE_C[::1] sample_weight, # IN
855+
const Y_DTYPE_C[::1] y_true, # IN
856+
const Y_DTYPE_C[::1] raw_prediction, # IN
857+
const Y_DTYPE_C[::1] sample_weight, # IN
864858
G_DTYPE_C[::1] gradient_out, # OUT
865859
G_DTYPE_C[::1] hessian_out, # OUT
866860
int n_threads=1
@@ -920,10 +914,10 @@ cdef class {{name}}(CyLossFunction):
920914

921915
def loss(
922916
self,
923-
Y_DTYPE_C[::1] y_true, # IN
924-
Y_DTYPE_C[::1] raw_prediction, # IN
925-
Y_DTYPE_C[::1] sample_weight, # IN
926-
G_DTYPE_C[::1] loss_out, # OUT
917+
const Y_DTYPE_C[::1] y_true, # IN
918+
const Y_DTYPE_C[::1] raw_prediction, # IN
919+
const Y_DTYPE_C[::1] sample_weight, # IN
920+
G_DTYPE_C[::1] loss_out, # OUT
927921
int n_threads=1
928922
):
929923
cdef:
@@ -946,11 +940,11 @@ cdef class {{name}}(CyLossFunction):
946940
{{if closs_grad is not None}}
947941< 10000 code class="diff-text syntax-highlighted-line">
def loss_gradient(
948942
self,
949-
Y_DTYPE_C[::1] y_true, # IN
950-
Y_DTYPE_C[::1] raw_prediction, # IN
951-
Y_DTYPE_C[::1] sample_weight, # IN
952-
G_DTYPE_C[::1] loss_out, # OUT
953-
G_DTYPE_C[::1] gradient_out, # OUT
943+
const Y_DTYPE_C[::1] y_true, # IN
944+
const Y_DTYPE_C[::1] raw_prediction, # IN
945+
const Y_DTYPE_C[::1] sample_weight, # IN
946+
G_DTYPE_C[::1] loss_out, # OUT
947+
G_DTYPE_C[::1] gradient_out, # OUT
954948
int n_threads=1
955949
):
956950
cdef:
@@ -978,10 +972,10 @@ cdef class {{name}}(CyLossFunction):
978972

979973
def gradient(
980974
self,
981-
Y_DTYPE_C[::1] y_true, # IN
982-
Y_DTYPE_C[::1] raw_prediction, # IN
983-
Y_DTYPE_C[::1] sample_weight, # IN
984-
G_DTYPE_C[::1] gradient_out, # OUT
975+
const Y_DTYPE_C[::1] y_true, # IN
976+
const Y_DTYPE_C[::1] raw_prediction, # IN
977+
const Y_DTYPE_C[::1] sample_weight, # IN
978+
G_DTYPE_C[::1] gradient_out, # OUT
985979
int n_threads=1
986980
):
987981
cdef:
@@ -1003,11 +997,11 @@ cdef class {{name}}(CyLossFunction):
1003997

1004998
def gradient_hessian(
1005999
self,
1006-
Y_DTYPE_C[::1] y_true, # IN
1007-
Y_DTYPE_C[::1] raw_prediction, # IN
1008-
Y_DTYPE_C[::1] sample_weight, # IN
1009-
G_DTYPE_C[::1] gradient_out, # OUT
1010-
G_DTYPE_C[::1] h F42D essian_out, # OUT
1000+
const Y_DTYPE_C[::1] y_true, # IN
1001+
const Y_DTYPE_C[::1] raw_prediction, # IN
1002+
const Y_DTYPE_C[::1] sample_weight, # IN
1003+
G_DTYPE_C[::1] gradient_out, # OUT
1004+
G_DTYPE_C[::1] hessian_out, # OUT
10111005
int n_threads=1
10121006
):
10131007
cdef:
@@ -1056,10 +1050,10 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
10561050
# opposite are welcome.
10571051
def loss(
10581052
self,
1059-
Y_DTYPE_C[::1] y_true, # IN
1060-
Y_DTYPE_C[:, :] raw_prediction, # IN
1061-
Y_DTYPE_C[::1] sample_weight, # IN
1062-
G_DTYPE_C[::1] loss_out, # OUT
1053+
const Y_DTYPE_C[::1] y_true, # IN
1054+
const Y_DTYPE_C[:, :] raw_prediction, # IN
1055+
const Y_DTYPE_C[::1] sample_weight, # IN
1056+
G_DTYPE_C[::1] loss_out, # OUT
10631057
int n_threads=1
10641058
):
10651059
cdef:
@@ -1116,11 +1110,11 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
11161110

11171111
def loss_gradient(
11181112
self,
1119-
Y_DTYPE_C[::1] y_true, # IN
1120-
Y_DTYPE_C[:, :] raw_prediction, # IN
1121-
Y_DTYPE_C[::1] sample_weight, # IN
1122-
G_DTYPE_C[::1] loss_out, # OUT
1123-
G_DTYPE_C[:, :] gradient_out, # OUT
1113+
const Y_DTYPE_C[::1] y_true, # IN
1114+
const Y_DTYPE_C[:, :] raw_prediction, # IN
1115+
const Y_DTYPE_C[::1] sample_weight, # IN
1116+
G_DTYPE_C[::1] loss_out, # OUT
1117+
G_DTYPE_C[:, :] gradient_out, # OUT
11241118
int n_threads=1
11251119
):
11261120
cdef:
@@ -1178,10 +1172,10 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
11781172

11791173
def gradient(
11801174
self,
1181-
Y_DTYPE_C[::1] y_true, # IN
1182-
Y_DTYPE_C[:, :] raw_prediction, # IN
1183-
Y_DTYPE_C[::1] sample_weight, # IN
1184-
G_DTYPE_C[:, :] gradient_out, # OUT
1175+
const Y_DTYPE_C[::1] y_true, # IN
1176+
const Y_DTYPE_C[:, :] raw_prediction, # IN
1177+
const Y_DTYPE_C[::1] sample_weight, # IN
1178+
G_DTYPE_C[:, :] gradient_out, # OUT
11851179
int n_threads=1
11861180
):
11871181
cdef:
@@ -1227,11 +1221,11 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
12271221

12281222
def gradient_hessian(
12291223
self,
1230-
Y_DTYPE_C[::1] y_true, # IN
1231-
Y_DTYPE_C[:, :] raw_prediction, # IN
1232-
Y_DTYPE_C[::1] sample_weight, # IN
1233-
G_DTYPE_C[:, :] gradient_out, # OUT
1234-
G_DTYPE_C[:, :] hessian_out, # OUT
1224+
const Y_DTYPE_C[::1] y_true, # IN
1225+
const Y_DTYPE_C[:, :] raw_prediction, # IN
1226+
const Y_DTYPE_C[::1] sample_weight, # IN
1227+
G_DTYPE_C[:, :] gradient_out, # OUT
1228+
G_DTYPE_C[:, :] hessian_out, # OUT
12351229
int n_threads=1
12361230
):
12371231
cdef:
@@ -1285,11 +1279,11 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
12851279
# diagonal (in the classes) approximation as implemented above.
12861280
def gradient_proba(
12871281
self,
1288-
Y_DTYPE_C[::1] y_true, # IN
1289-
Y_DTYPE_C[:, :] raw_prediction, # IN
1290-
Y_DTYPE_C[::1] sample_weight, # IN
1291-
G_DTYPE_C[:, :] gradient_out, # OUT
1292-
G_DTYPE_C[:, :] proba_out, # OUT
1282+
const Y_DTYPE_C[::1] y_true, # IN
1283+
const Y_DTYPE_C[:, :] raw_prediction, # IN
1284+
const Y_DTYPE_C[::1] sample_weight, # IN
1285+
G_DTYPE_C[:, :] gradient_out, # OUT
1286+
G_DTYPE_C[:, :] proba_out, # OUT
12931287
int n_threads=1
12941288
):
12951289
cdef:

sklearn/_loss/loss.py

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@
3737
MultinomialLogit,
3838
)
3939
from ..utils import check_scalar
40-
from ..utils._readonly_array_wrapper import ReadonlyArrayWrapper
4140
from ..utils.stats import _weighted_percentile
4241

4342

@@ -185,10 +184,6 @@ def loss(
185184
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
186185
raw_prediction = raw_prediction.squeeze(1)
187186

188-
y_true = ReadonlyArrayWrapper(y_true)
189-
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
190-
if sample_weight is not None:
191-
sample_weight = ReadonlyArrayWrapper(sample_weight)
192187
return self.closs.loss(
193188
y_true=y_true,
194189
raw_prediction=raw_prediction,
@@ -250,10 +245,6 @@ def loss_gradient(
250245
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
251246
gradient_out = gradient_out.squeeze(1)
252247

253-
y_true = ReadonlyArrayWrapper(y_true)
254-
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
255-
if sample_weight is not None:
256-
sample_weight = ReadonlyArrayWrapper(sample_weight)
257248
return self.closs.loss_gradient(
258249
y_true=y_true,
259250
raw_prediction=raw_prediction,
@@ -303,10 +294,6 @@ def gradient(
303294
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
304295
gradient_out = gradient_out.squeeze(1)
305296

306-
y_true = ReadonlyArrayWrapper(y_true)
307-
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
308-
if sample_weight is not None:
309-
sample_weight = ReadonlyArrayWrapper(sample_weight)
310297
return self.closs.gradient(
311298
y_true=y_true,
312299
raw_prediction=raw_prediction,
@@ -371,10 +358,6 @@ def gradient_hessian(
371358
if hessian_out.ndim == 2 and hessian_out.shape[1] == 1:
372359
hessian_out = hessian_out.squeeze(1)
373360

374-
y_true = ReadonlyArrayWrapper(y_true)
375-
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
376-
if sample_weight is not None:
377-
sample_weight = ReadonlyArrayWrapper(sample_weight)
378361
return self.closs.gradient_hessian(
379362
y_true=y_true,
380363
raw_prediction=raw_prediction,
@@ -1001,10 +984,6 @@ def gradient_proba(
1001984
elif proba_out is None:
1002985
proba_out = np.empty_like(gradient_out)
1003986

1004-
y_true = ReadonlyArrayWrapper(y_true)
1005-
raw_prediction = ReadonlyArrayWrapper(raw_prediction)
1006-
if sample_weight is not None:
1007-
sample_weight = ReadonlyArrayWrapper(sample_weight)
1008987
return self.closs.gradient_proba(
1009988
y_true=y_true,
1010989
raw_prediction=raw_prediction,

0 commit comments

Comments
 (0)
0