@@ -1035,19 +1035,20 @@ def _boost(self, iboost, X, y, sample_weight, random_state):
1035
1035
1036
1036
error_vect = np .abs (y_predict - y )
1037
1037
sample_mask = sample_weight > 0
1038
- error_max = error_vect [sample_mask ].max ()
1038
+ masked_sample_weight = sample_weight [sample_mask ]
1039
+ masked_error_vector = error_vect [sample_mask ]
1039
1040
1041
+ error_max = masked_error_vector .max ()
1040
1042
if error_max != 0 :
1041
- error_vect /= error_max
1043
+ masked_error_vector /= error_max
1042
1044
1043
1045
if self .loss == 'square' :
1044
- error_vect **= 2
1046
+ masked_error_vector **= 2
1045
1047
elif self .loss == 'exponential' :
1046
- error_vect = 1. - np .exp (- error_vect )
1048
+ masked_error_vector = 1. - np .exp (- masked_error_vector )
1047
1049
1048
1050
# Calculate the average loss
1049
- estimator_error = (sample_weight [sample_mask ] *
1050
- error_vect [sample_mask ]).sum ()
1051
+ estimator_error = (masked_sample_weight * masked_error_vector ).sum ()
1051
1052
1052
1053
if estimator_error <= 0 :
1053
1054
# Stop if fit is perfect
@@ -1066,8 +1067,8 @@ def _boost(self, iboost, X, y, sample_weight, random_state):
1066
1067
1067
1068
if not iboost == self .n_estimators - 1 :
1068
1069
sample_weight [sample_mask ] *= np .power (
1069
- beta ,
1070
- ( 1. - error_vect [ sample_mask ]) * self . learning_rate )
1070
+ beta , ( 1. - masked_error_vector ) * self . learning_rate
1071
+ )
1071
1072
1072
1073
return sample_weight , estimator_weight , estimator_error
1073
1074
0 commit comments