@@ -223,7 +223,7 @@ def staged_score(self, X, y, sample_weight=None):
223
223
sample_weight : array-like of shape (n_samples,), default=None
224
224
Sample weights.
225
225
226
- Returns
226
+ Yields
227
227
-------
228
228
z : float
229
229
"""
@@ -242,7 +242,8 @@ def feature_importances_(self):
242
242
243
243
Returns
244
244
-------
245
- feature_importances_ : array, shape = [n_features]
245
+ feature_importances_ : ndarray of shape (n_features,)
246
+ The feature importances.
246
247
"""
247
248
if self .estimators_ is None or len (self .estimators_ ) == 0 :
248
249
raise ValueError ("Estimator not fitted, "
@@ -302,9 +303,9 @@ class AdaBoostClassifier(ClassifierMixin, BaseWeightBoosting):
302
303
The base estimator from which the boosted ensemble is built.
303
304
Support for sample weighting is required, as well as proper
304
305
``classes_`` and ``n_classes_`` attributes. If ``None``, then
305
- the base estimator is ``DecisionTreeClassifier(max_depth=1)``
306
+ the base estimator is ``DecisionTreeClassifier(max_depth=1)``.
306
307
307
- n_estimators : integer , optional (default=50)
308
+ n_estimators : int , optional (default=50)
308
309
The maximum number of estimators at which boosting is terminated.
309
310
In case of perfect fit, the learning procedure is stopped early.
310
311
@@ -350,6 +351,32 @@ class AdaBoostClassifier(ClassifierMixin, BaseWeightBoosting):
350
351
feature_importances_ : ndarray of shape (n_features,)
351
352
The feature importances if supported by the ``base_estimator``.
352
353
354
+ See Also
355
+ --------
356
+ AdaBoostRegressor
357
+ An AdaBoost regressor that begins by fitting a regressor on the
358
+ original dataset and then fits additional copies of the regressor
359
+ on the same dataset but where the weights of instances are
360
+ adjusted according to the error of the current prediction.
361
+
362
+ GradientBoostingClassifier
363
+ GB builds an additive model in a forward stage-wise fashion. Regression
364
+ trees are fit on the negative gradient of the binomial or multinomial
365
+ deviance loss function. Binary classification is a special case where
366
+ only a single regression tree is induced.
367
+
368
+ sklearn.tree.DecisionTreeClassifier
369
+ A non-parametric supervised learning method used for classification.
370
+ Creates a model that predicts the value of a target variable by
371
+ learning simple decision rules inferred from the data features.
372
+
373
+ References
374
+ ----------
375
+ .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
376
+ on-Line Learning and an Application to Boosting", 1995.
377
+
378
+ .. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
379
+
353
380
Examples
354
381
--------
355
382
>>> from sklearn.ensemble import AdaBoostClassifier
@@ -366,19 +393,6 @@ class AdaBoostClassifier(ClassifierMixin, BaseWeightBoosting):
366
393
array([1])
367
394
>>> clf.score(X, y)
368
395
0.983...
369
-
370
- See also
371
- --------
372
- AdaBoostRegressor, GradientBoostingClassifier,
373
- sklearn.tree.DecisionTreeClassifier
374
-
375
- References
376
- ----------
377
- .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
378
- on-Line Learning and an Application to Boosting", 1995.
379
-
380
- .. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
381
-
382
396
"""
383
397
def __init__ (self ,
384
398
base_estimator = None ,
@@ -414,6 +428,7 @@ def fit(self, X, y, sample_weight=None):
414
428
Returns
415
429
-------
416
430
self : object
431
+ A fitted estimator.
417
432
"""
418
433
# Check that algorithm is supported
419
434
if self .algorithm not in ('SAMME' , 'SAMME.R' ):
@@ -632,7 +647,7 @@ def staged_predict(self, X):
632
647
The input samples. Sparse matrix can be CSC, CSR, COO,
633
648
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
634
649
635
- Returns
650
+ Yields
636
651
-------
637
652
y : generator of array, shape = [n_samples]
638
653
The predicted classes.
@@ -703,7 +718,7 @@ def staged_decision_function(self, X):
703
718
The training input samples. Sparse matrix can be CSC, CSR, COO,
704
719
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
705
720
706
- Returns
721
+ Yields
707
722
-------
708
723
score : generator of array, shape = [n_samples, k]
709
724
The decision function of the input samples. The order of
@@ -811,7 +826,7 @@ def staged_predict_proba(self, X):
811
826
The training input samples. Sparse matrix can be CSC, CSR, COO,
812
827
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
813
828
814
- Returns
829
+ Yields
815
830
-------
816
831
p : generator of array, shape = [n_samples]
817
832
The class probabilities of the input samples. The order of
@@ -1130,7 +1145,7 @@ def staged_predict(self, X):
1130
1145
X : {array-like, sparse matrix} of shape (n_samples, n_features)
1131
1146
The training input samples.
1132
1147
1133
- Returns
1148
+ Yields
1134
1149
-------
1135
1150
y : generator of array, shape = [n_samples]
1136
1151
The predicted regression values.
0 commit comments