@@ -66,7 +66,14 @@ cdef class LossFunction:
66
66
"""
67
67
return 0.
68
68
69
- def dloss (self , double p , double y ):
69
+ def py_dloss (self , double p , double y ):
70
+ """ Python version of `dloss` for testing.
71
+
72
+ Pytest needs a python function and can't use cdef functions.
73
+ """
74
+ return self .dloss(p, y)
75
+
76
+ cdef double dloss(self , double p, double y) nogil:
70
77
""" Evaluate the derivative of the loss function with respect to
71
78
the prediction `p`.
72
79
@@ -81,11 +88,6 @@ cdef class LossFunction:
81
88
double
82
89
The derivative of the loss function with regards to `p`.
83
90
"""
84
- return self ._dloss(p, y)
85
-
86
- cdef double _dloss(self , double p, double y) nogil:
87
- # Implementation of dloss; separate function because cpdef and nogil
88
- # can't be combined.
89
91
return 0.
90
92
91
93
@@ -95,7 +97,7 @@ cdef class Regression(LossFunction):
95
97
cdef double loss(self , double p, double y) nogil:
96
98
return 0.
97
99
98
- cdef double _dloss (self , double p, double y) nogil:
100
+ cdef double dloss (self , double p, double y) nogil:
99
101
return 0.
100
102
101
103
@@ -105,7 +107,7 @@ cdef class Classification(LossFunction):
105
107
cdef double loss(self , double p, double y) nogil:
106
108
return 0.
107
109
108
- cdef double _dloss (self , double p, double y) nogil:
110
+ cdef double dloss (self , double p, double y) nogil:
109
111
return 0.
110
112
111
113
@@ -126,7 +128,7 @@ cdef class ModifiedHuber(Classification):
126
128
else :
127
129
return - 4.0 * z
128
130
129
- cdef double _dloss (self , double p, double y) nogil:
131
+ cdef double dloss (self , double p, double y) nogil:
130
132
cdef double z = p * y
131
133
if z >= 1.0 :
132
134
return 0.0
@@ -161,7 +163,7 @@ cdef class Hinge(Classification):
161
163
return self .threshold - z
162
164
return 0.0
163
165
164
- cdef double _dloss (self , double p, double y) nogil:
166
+ cdef double dloss (self , double p, double y) nogil:
165
167
cdef double z = p * y
166
168
if z <= self .threshold:
167
169
return - y
@@ -193,7 +195,7 @@ cdef class SquaredHinge(Classification):
193
195
return z * z
194
196
return 0.0
195
197
196
- cdef double _dloss (self , double p, double y) nogil:
198
+ cdef double dloss (self , double p, double y) nogil:
197
199
cdef double z = self .threshold - p * y
198
200
if z > 0 :
199
201
return - 2 * y * z
@@ -215,7 +217,7 @@ cdef class Log(Classification):
215
217
return - z
216
218
return log(1.0 + exp(- z))
217
219
218
- cdef double _dloss (self , double p, double y) nogil:
220
+ cdef double dloss (self , double p, double y) nogil:
219
221
cdef double z = p * y
220
222
# approximately equal and saves the computation of the log
221
223
if z > 18.0 :
@@ -233,7 +235,7 @@ cdef class SquaredLoss(Regression):
233
235
cdef double loss(self , double p, double y) nogil:
234
236
return 0.5 * (p - y) * (p - y)
235
237
236
- cdef double _dloss (self , double p, double y) nogil:
238
+ cdef double dloss (self , double p, double y) nogil:
237
239
return p - y
238
240
239
241
def __reduce__ (self ):
@@ -262,7 +264,7 @@ cdef class Huber(Regression):
262
264
else :
263
265
return self .c * abs_r - (0.5 * self .c * self .c)
264
266
265
- cdef double _dloss (self , double p, double y) nogil:
267
+ cdef double dloss (self , double p, double y) nogil:
266
268
cdef double r = p - y
267
269
cdef double abs_r = fabs(r)
268
270
if abs_r <= self .c:
@@ -291,7 +293,7 @@ cdef class EpsilonInsensitive(Regression):
291
293
cdef double ret = fabs(y - p) - self .epsilon
292
294
return ret if ret > 0 else 0
293
295
294
- cdef double _dloss (self , double p, double y) nogil:
296
+ cdef double dloss (self , double p, double y) nogil:
295
297
if y - p > self .epsilon:
296
298
return - 1
297
299
elif p - y > self .epsilon:
@@ -318,7 +320,7 @@ cdef class SquaredEpsilonInsensitive(Regression):
318
320
cdef double ret = fabs(y - p) - self .epsilon
319
321
return ret * ret if ret > 0 else 0
320
322
321
- cdef double _dloss (self , double p, double y) nogil:
323
+ cdef double dloss (self , double p, double y) nogil:
322
324
cdef double z
323
325
z = y - p
324
326
if z > self .epsilon:
@@ -542,7 +544,7 @@ def _plain_sgd(np.ndarray[double, ndim=1, mode='c'] weights,
542
544
update = sqnorm(x_data_ptr, x_ind_ptr, xnnz)
543
545
update = loss.loss(p, y) / (update + 0.5 / C)
544
546
else :
545
- dloss = loss._dloss (p, y)
547
+ dloss = loss.dloss (p, y)
546
548
# clip dloss with large values to avoid numerical
547
549
# instabilities
548
550
if dloss < - MAX_DLOSS:
0 commit comments