15
15
HalfMultinomialLoss ,
16
16
HalfPoissonLoss ,
17
17
HalfSquaredError ,
18
+ PinballLoss ,
18
19
)
19
20
from ...base import BaseEstimator , RegressorMixin , ClassifierMixin , is_classifier
20
21
from ...utils import check_random_state , resample
42
43
"least_squares" : HalfSquaredError ,
43
44
"least_absolute_deviation" : AbsoluteError ,
44
45
"poisson" : HalfPoissonLoss ,
46
+ "quantile" : PinballLoss ,
45
47
"binary_crossentropy" : HalfBinomialLoss ,
46
48
"categorical_crossentropy" : HalfMultinomialLoss ,
47
49
}
@@ -1115,17 +1117,21 @@ class HistGradientBoostingRegressor(RegressorMixin, BaseHistGradientBoosting):
1115
1117
1116
1118
Parameters
1117
1119
----------
1118
- loss : {'squared_error', 'absolute_error', 'poisson'}, \
1120
+ loss : {'squared_error', 'absolute_error', 'poisson', 'quantile' }, \
1119
1121
default='squared_error'
1120
1122
The loss function to use in the boosting process. Note that the
1121
1123
"squared error" and "poisson" losses actually implement
1122
1124
"half least squares loss" and "half poisson deviance" to simplify the
1123
1125
computation of the gradient. Furthermore, "poisson" loss internally
1124
1126
uses a log-link and requires ``y >= 0``.
1127
+ "quantile" uses the pinball loss.
1125
1128
1126
1129
.. versionchanged:: 0.23
1127
1130
Added option 'poisson'.
1128
1131
1132
+ .. versionchanged:: 1.1
1133
+ Added option 'quantile'.
1134
+
1129
1135
.. deprecated:: 1.0
1130
1136
The loss 'least_squares' was deprecated in v1.0 and will be removed
1131
1137
in version 1.2. Use `loss='squared_error'` which is equivalent.
@@ -1135,6 +1141,9 @@ class HistGradientBoostingRegressor(RegressorMixin, BaseHistGradientBoosting):
1135
1141
be removed in version 1.2. Use `loss='absolute_error'` which is
1136
1142
equivalent.
1137
1143
1144
+ quantile : float, default=None
1145
+ If loss is "quantile", this parameter specifies which quantile to be estimated
1146
+ and must be between 0 and 1.
1138
1147
learning_rate : float, default=0.1
1139
1148
The learning rate, also known as *shrinkage*. This is used as a
1140
1149
multiplicative factor for the leaves values. Use ``1`` for no
@@ -1294,12 +1303,14 @@ class HistGradientBoostingRegressor(RegressorMixin, BaseHistGradientBoosting):
1294
1303
"absolute_error" ,
1295
1304
"least_absolute_deviation" ,
1296
1305
"poisson" ,
1306
+ "quantile" ,
1297
1307
)
1298
1308
1299
1309
def __init__ (
1300
1310
self ,
1301
1311
loss = "squared_error" ,
1302
1312
* ,
1313
+ quantile = None ,
1303
1314
learning_rate = 0.1 ,
1304
1315
max_iter = 100 ,
1305
1316
max_leaf_nodes = 31 ,
@@ -1338,6 +1349,7 @@ def __init__(
1338
1349
verbose = verbose ,
1339
1350
random_state = random_state ,
1340
1351
)
1352
+ self .quantile = quantile
1341
1353
1342
1354
def predict (self , X ):
1343
1355
"""Predict values for X.
@@ -1409,7 +1421,12 @@ def _get_loss(self, sample_weight):
1409
1421
)
1410
1422
return _LOSSES ["absolute_error" ](sample_weight = sample_weight )
1411
1423
1412
- return _LOSSES [self .loss ](sample_weight = sample_weight )
1424
+ if self .loss == "quantile" :
1425
+ return _LOSSES [self .loss ](
1426
+ sample_weight = sample_weight , quantile = self .quantile
1427
+ )
1428
+ else :
1429
+ return _LOSSES [self .loss ](sample_weight = sample_weight )
1413
1430
1414
1431
1415
1432
class HistGradientBoostingClassifier (ClassifierMixin , BaseHistGradientBoosting ):
0 commit comments