@@ -211,8 +211,6 @@ WARNING: Do not edit `sklearn/_loss/_loss.pyx` file directly, as it is generated
211211# checking like None -> np.empty().
212212#
213213# Note: We require 1-dim ndarrays to be contiguous.
214- # TODO: Use const memoryviews with fused types with Cython 3.0 where
215- # appropriate (arguments marked by "# IN").
216214
217215from cython.parallel import parallel, prange
218216import numpy as np
@@ -245,8 +243,8 @@ cdef inline double log1pexp(double x) nogil:
245243
246244cdef inline void sum_exp_minus_max(
247245 const int i,
248- Y_DTYPE_C[:, :] raw_prediction, # IN
249- Y_DTYPE_C *p # OUT
246+ const Y_DTYPE_C[:, :] raw_prediction, # IN
247+ Y_DTYPE_C *p # OUT
250248) nogil:
251249 # Thread local buffers are used to stores results of this function via p.
252250 # The results are stored as follows:
@@ -754,15 +752,11 @@ cdef class CyLossFunction:
754752 """
755753 pass
756754
757- # Note: With Cython 3.0, fused types can be used together with const:
758- # const Y_DTYPE_C double[::1] y_true
759- # See release notes 3.0.0 alpha1
760- # https://cython.readthedocs.io/en/latest/src/changes.html#alpha-1-2020-04-12
761755 def loss(
762756 self,
763- Y_DTYPE_C[::1] y_true, # IN
764- Y_DTYPE_C[::1] raw_prediction, # IN
765- Y_DTYPE_C[::1] sample_weight, # IN
757+ const Y_DTYPE_C[::1] y_true, # IN
758+ const Y_DTYPE_C[::1] raw_prediction, # IN
759+ const Y_DTYPE_C[::1] sample_weight, # IN
766760 G_DTYPE_C[::1] loss_out, # OUT
767761 int n_threads=1
768762 ):
@@ -790,9 +784,9 @@ cdef class CyLossFunction:
790784
791785 def gradient(
792786 self,
793- Y_DTYPE_C[::1] y_true, # IN
794- Y_DTYPE_C[::1] raw_prediction, # IN
795- Y_DTYPE_C[::1] sample_weight, # IN
787+ const Y_DTYPE_C[::1] y_true, # IN
788+ const Y_DTYPE_C[::1] raw_prediction, # IN
789+ const Y_DTYPE_C[::1] sample_weight, # IN
796790 G_DTYPE_C[::1] gradient_out, # OUT
797791 int n_threads=1
798792 ):
@@ -820,9 +814,9 @@ cdef class CyLossFunction:
820814
821815 def loss_gradient(
822816 self,
823- Y_DTYPE_C[::1] y_true, # IN
824- Y_DTYPE_C[::1] raw_prediction, # IN
825- Y_DTYPE_C[::1] sample_weight, # IN
817+ const Y_DTYPE_C[::1] y_true, # IN
818+ const Y_DTYPE_C[::1] raw_prediction, # IN
819+ const Y_DTYPE_C[::1] sample_weight, # IN
826820 G_DTYPE_C[::1] loss_out, # OUT
827821 G_DTYPE_C[::1] gradient_out, # OUT
828822 int n_threads=1
@@ -858,9 +852,9 @@ cdef class CyLossFunction:
858852
859853 def gradient_hessian(
860854 self,
861- Y_DTYPE_C[::1] y_true, # IN
862- Y_DTYPE_C[::1] raw_prediction, # IN
863- Y_DTYPE_C[::1] sample_weight, # IN
855+ const Y_DTYPE_C[::1] y_true, # IN
856+ const Y_DTYPE_C[::1] raw_prediction, # IN
857+ const Y_DTYPE_C[::1] sample_weight, # IN
864858 G_DTYPE_C[::1] gradient_out, # OUT
865859 G_DTYPE_C[::1] hessian_out, # OUT
866860 int n_threads=1
@@ -920,10 +914,10 @@ cdef class {{name}}(CyLossFunction):
920914
921915 def loss(
922916 self,
923- Y_DTYPE_C[::1] y_true, # IN
924- Y_DTYPE_C[::1] raw_prediction, # IN
925- Y_DTYPE_C[::1] sample_weight, # IN
926- G_DTYPE_C[::1] loss_out, # OUT
917+ const Y_DTYPE_C[::1] y_true, # IN
918+ const Y_DTYPE_C[::1] raw_prediction, # IN
919+ const Y_DTYPE_C[::1] sample_weight, # IN
920+ G_DTYPE_C[::1] loss_out, # OUT
927921 int n_threads=1
928922 ):
929923 cdef:
@@ -946,11 +940,11 @@ cdef class {{name}}(CyLossFunction):
946940 {{if closs_grad is not None}}
947941 def loss_gradient(
948942 self,
949- Y_DTYPE_C[::1] y_true, # IN
950- Y_DTYPE_C[::1] raw_prediction, # IN
951- Y_DTYPE_C[::1] sample_weight, # IN
952- G_DTYPE_C[::1] loss_out, # OUT
953- G_DTYPE_C[::1] gradient_out, # OUT
943+ const Y_DTYPE_C[::1] y_true, # IN
944+ const Y_DTYPE_C[::1] raw_prediction, # IN
945+ const Y_DTYPE_C[::1] sample_weight, # IN
946+
EDBF
G_DTYPE_C[::1] loss_out, # OUT
947+ G_DTYPE_C[::1] gradient_out, # OUT
954948 int n_threads=1
955949 ):
956950 cdef:
@@ -978,10 +972,10 @@ cdef class {{name}}(CyLossFunction):
978972
979973 def gradient(
980974 self,
981- Y_DTYPE_C[::1] y_true, # IN
982- Y_DTYPE_C[::1] raw_prediction, # IN
983- Y_DTYPE_C[::1] sample_weight, # IN
984- G_DTYPE_C[::1] gradient_out, # OUT
975+ const Y_DTYPE_C[::1] y_true, # IN
976+ const Y_DTYPE_C[::1] raw_prediction, # IN
977+ const Y_DTYPE_C[::1] sample_weight, # IN
978+ G_DTYPE_C[::1] gradient_out, # OUT
985979 int n_threads=1
986980 ):
987981 cdef:
@@ -1003,11 +997,11 @@ cdef class {{name}}(CyLossFunction):
1003997
1004998 def gradient_hessian(
1005999 self,
1006- Y_DTYPE_C[::1] y_true, # IN
1007- Y_DTYPE_C[::1] raw_prediction, # IN
1008- Y_DTYPE_C[::1] sample_weight, # IN
1009- G_DTYPE_C[::1] gradient_out, # OUT
1010- G_DTYPE_C[::1] hessian_out, # OUT
1000+ const Y_DTYPE_C[::1] y_true, # IN
1001+ const Y_DTYPE_C[::1] raw_prediction, # IN
1002+ const Y_DTYPE_C[::1] sample_weight, # IN
1003+ G_DTYPE_C[::1] gradient_out, # OUT
1004+ G_DTYPE_C[::1] hessian_out, # OUT
10111005 int n_threads=1
10121006 ):
10131007 cdef:
@@ -1056,10 +1050,10 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
10561050 # opposite are welcome.
10571051 def loss(
10581052 self,
1059- Y_DTYPE_C[::1] y_true, # IN
1060- Y_DTYPE_C[:, :] raw_prediction, # IN
1061- Y_DTYPE_C[::1] sample_weight, # IN
1062- G_DTYPE_C[::1] loss_out, # OUT
1053+ const Y_DTYPE_C[::1] y_true, # IN
1054+ const Y_DTYPE_C[:, :] raw_prediction, # IN
1055+ const Y_DTYPE_C[::1] sample_weight, # IN
1056+ G_DTYPE_C[::1] loss_out, # OUT
10631057 int n_threads=1
10641058 ):
10651059 cdef:
@@ -1116,11 +1110,11 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
11161110
11171111 def loss_gradient(
11181112 self,
1119- Y_DTYPE_C[::1] y_true, # IN
1120- Y_DTYPE_C[:, :] raw_prediction, # IN
1121- Y_DTYPE_C[::1] sample_weight, # IN
1122- G_DTYPE_C[::1] loss_out, # OUT
1123- G_DTYPE_C[:, :] gradient_out, # OUT
1113+ const Y_DTYPE_C[::1] y_true, # IN
1114+ const Y_DTYPE_C[:, :] raw_prediction, # IN
1115+ const Y_DTYPE_C[::1] sample_weight, # IN
1116+ G_DTYPE_C[::1] loss_out, # OUT
1117+ G_DTYPE_C[:, :] gradient_out, # OUT
11241118 int n_threads=1
11251119 ):
11261120 cdef:
@@ -1178,10 +1172,10 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
11781172
11791173 def gradient(
11801174 self,
1181- Y_DTYPE_C[::1] y_true, # IN
1182- Y_DTYPE_C[:, :] raw_prediction, # IN
1183- Y_DTYPE_C[::1] sample_weight, # IN
1184- G_DTYPE_C[:, :] gradient_out, # OUT
1175+ const Y_DTYPE_C[::1] y_true, # IN
1176+ const Y_DTYPE_C[:, :] raw_prediction, # IN
1177+ const Y_DTYPE_C[::1] sample_weight, # IN
1178+ G_DTYPE_C[:, :] gradient_out, # OUT
11851179 int n_threads=1
11861180 ):
11871181 cdef:
@@ -1227,11 +1221,11 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
12271221
12281222 def gradient_hessian(
12291223 self,
1230- Y_DTYPE_C[::1] y_true, # IN
1231- Y_DTYPE_C[:, :] raw_prediction, # IN
1232- Y_DTYPE_C[::1] sample_weight, # IN
1233- G_DTYPE_C[:, :] gradient_out, # OUT
1234- G_DTYPE_C[:, :] hessian_out, # OUT
1224+ const Y_DTYPE_C[::1] y_true, # IN
1225+ const Y_DTYPE_C[:, :] raw_prediction, # IN
1226+ const Y_DTYPE_C[::1] sample_weight, # IN
1227+ G_DTYPE_C[:, :] gradient_out, # OUT
1228+ G_DTYPE_C[:, :] hessian_out, # OUT
12351229 int n_threads=1
12361230 ):
12371231 cdef:
@@ -1285,11 +1279,11 @@ cdef class CyHalfMultinomialLoss(CyLossFunction):
12851279 # diagonal (in the classes) approximation as implemented above.
12861280 def gradient_proba(
12871281 self,
1288- Y_DTYPE_C[::1] y_true, # IN
1289- Y_DTYPE_C[:, :] raw_prediction, # IN
1290- Y_DTYPE_C[::1] sample_weight, # IN
1291- G_DTYPE_C[:, :] gradient_out, # OUT
1292- G_DTYPE_C[:, :] proba_out, # OUT
1282+ const Y_DTYPE_C[::1] y_true, # IN
1283+ const Y_DTYPE_C[:, :] raw_prediction, # IN
1284+ const Y_DTYPE_C[::1] sample_weight, # IN
1285+ G_DTYPE_C[:, :] gradient_out, # OUT
1286+ G_DTYPE_C[:, :] proba_out, # OUT
12931287 int n_threads=1
12941288 ):
12951289 cdef:
0 commit comments