diff --git a/doc/contents.rst b/doc/contents.rst index 2fd871de8eedb..a28634621d558 100644 --- a/doc/contents.rst +++ b/doc/contents.rst @@ -2,6 +2,7 @@ .. include:: tune_toc.rst .. Places global toc into the sidebar + :globalsidebartoc: True ================= diff --git a/doc/developers/index.rst b/doc/developers/index.rst index e84d6f93f431d..e64adf5ac73a9 100644 --- a/doc/developers/index.rst +++ b/doc/developers/index.rst @@ -1,4 +1,5 @@ .. Places global toc into the sidebar + :globalsidebartoc: True .. _developers_guide: diff --git a/doc/preface.rst b/doc/preface.rst index 63e56315297c8..447083a3a8136 100644 --- a/doc/preface.rst +++ b/doc/preface.rst @@ -2,6 +2,7 @@ useful for PDF output as this section is not linked from elsewhere. .. Places global toc into the sidebar + :globalsidebartoc: True .. _preface_menu: diff --git a/doc/tutorial/index.rst b/doc/tutorial/index.rst index 8953e77e37a16..cfd63719321f2 100644 --- a/doc/tutorial/index.rst +++ b/doc/tutorial/index.rst @@ -1,4 +1,5 @@ .. Places global toc into the sidebar + :globalsidebartoc: True .. _tutorial_menu: diff --git a/doc/user_guide.rst b/doc/user_guide.rst index 7ded7c26f8b91..48679aa961782 100644 --- a/doc/user_guide.rst +++ b/doc/user_guide.rst @@ -1,4 +1,5 @@ .. Places global toc into the sidebar + :globalsidebartoc: True .. title:: User guide: contents diff --git a/examples/neighbors/approximate_nearest_neighbors.py b/examples/neighbors/approximate_nearest_neighbors.py index 19f0f702fe759..4042f057f4e98 100644 --- a/examples/neighbors/approximate_nearest_neighbors.py +++ b/examples/neighbors/approximate_nearest_neighbors.py @@ -18,29 +18,28 @@ compatibility reasons, one extra neighbor is computed when `mode == 'distance'`. Please note that we do the same in the proposed wrappers. -Sample output: - -``` -Benchmarking on MNIST_2000: ---------------------------- -AnnoyTransformer: 0.583 sec -NMSlibTransformer: 0.321 sec -KNeighborsTransformer: 1.225 sec -TSNE with AnnoyTransformer: 4.903 sec -TSNE with NMSlibTransformer: 5.009 sec -TSNE with KNeighborsTransformer: 6.210 sec -TSNE with internal NearestNeighbors: 6.365 sec - -Benchmarking on MNIST_10000: ----------------------------- -AnnoyTransformer: 4.457 sec -NMSlibTransformer: 2.080 sec -KNeighborsTransformer: 30.680 sec -TSNE with AnnoyTransformer: 30.225 sec -TSNE with NMSlibTransformer: 43.295 sec -TSNE with KNeighborsTransformer: 64.845 sec -TSNE with internal NearestNeighbors: 64.984 sec -``` +Sample output:: + + Benchmarking on MNIST_2000: + --------------------------- + AnnoyTransformer: 0.583 sec + NMSlibTransformer: 0.321 sec + KNeighborsTransformer: 1.225 sec + TSNE with AnnoyTransformer: 4.903 sec + TSNE with NMSlibTransformer: 5.009 sec + TSNE with KNeighborsTransformer: 6.210 sec + TSNE with internal NearestNeighbors: 6.365 sec + + Benchmarking on MNIST_10000: + ---------------------------- + AnnoyTransformer: 4.457 sec + NMSlibTransformer: 2.080 sec + KNeighborsTransformer: 30.680 sec + TSNE with AnnoyTransformer: 30.225 sec + TSNE with NMSlibTransformer: 43.295 sec + TSNE with KNeighborsTransformer: 64.845 sec + TSNE with internal NearestNeighbors: 64.984 sec + """ # Author: Tom Dupre la Tour # diff --git a/sklearn/ensemble/_stacking.py b/sklearn/ensemble/_stacking.py index 97f66aa077772..019256e64367e 100644 --- a/sklearn/ensemble/_stacking.py +++ b/sklearn/ensemble/_stacking.py @@ -342,8 +342,8 @@ class StackingClassifier(ClassifierMixin, _BaseStacking): `'predict_proba'`, `'decision_function'` or `'predict'` in that order. * otherwise, one of `'predict_proba'`, `'decision_function'` or - `'predict'`. If the method is not implemented by the estimator, it - will raise an error. + `'predict'`. If the method is not implemented by the estimator, it + will raise an error. n_jobs : int, default=None The number of jobs to run in parallel all `estimators` `fit`. diff --git a/sklearn/ensemble/forest.py b/sklearn/ensemble/forest.py index 9874bcc7d0a21..38f886f4e9664 100644 --- a/sklearn/ensemble/forest.py +++ b/sklearn/ensemble/forest.py @@ -989,10 +989,11 @@ class RandomForestClassifier(ForestClassifier): max_samples : int or float, default=None If bootstrap is True, the number of samples to draw from X to train each base estimator. - - If None (default), then draw `X.shape[0]` samples. - - If int, then draw `max_samples` samples. - - If float, then draw `max_samples * X.shape[0]` samples. Thus, - `max_samples` should be in the interval `(0, 1)`. + + - If None (default), then draw `X.shape[0]` samples. + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. Thus, + `max_samples` should be in the interval `(0, 1)`. .. versionadded:: 0.22 @@ -1277,10 +1278,11 @@ class RandomForestRegressor(ForestRegressor): max_samples : int or float, default=None If bootstrap is True, the number of samples to draw from X to train each base estimator. - - If None (default), then draw `X.shape[0]` samples. - - If int, then draw `max_samples` samples. - - If float, then draw `max_samples * X.shape[0]` samples. Thus, - `max_samples` should be in the interval `(0, 1)`. + + - If None (default), then draw `X.shape[0]` samples. + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. Thus, + `max_samples` should be in the interval `(0, 1)`. .. versionadded:: 0.22 @@ -1576,10 +1578,11 @@ class ExtraTreesClassifier(ForestClassifier): max_samples : int or float, default=None If bootstrap is True, the number of samples to draw from X to train each base estimator. - - If None (default), then draw `X.shape[0]` samples. - - If int, then draw `max_samples` samples. - - If float, then draw `max_samples * X.shape[0]` samples. Thus, - `max_samples` should be in the interval `(0, 1)`. + + - If None (default), then draw `X.shape[0]` samples. + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. Thus, + `max_samples` should be in the interval `(0, 1)`. .. versionadded:: 0.22 @@ -1841,10 +1844,11 @@ class ExtraTreesRegressor(ForestRegressor): max_samples : int or float, default=None If bootstrap is True, the number of samples to draw from X to train each base estimator. - - If None (default), then draw `X.shape[0]` samples. - - If int, then draw `max_samples` samples. - - If float, then draw `max_samples * X.shape[0]` samples. Thus, - `max_samples` should be in the interval `(0, 1)`. + + - If None (default), then draw `X.shape[0]` samples. + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. Thus, + `max_samples` should be in the interval `(0, 1)`. .. versionadded:: 0.22 @@ -2069,10 +2073,11 @@ class RandomTreesEmbedding(BaseForest): max_samples : int or float, default=None If bootstrap is True, the number of samples to draw from X to train each base estimator. - - If None (default), then draw `X.shape[0]` samples. - - If int, then draw `max_samples` samples. - - If float, then draw `max_samples * X.shape[0]` samples. Thus, - `max_samples` should be in the interval `(0, 1)`. + + - If None (default), then draw `X.shape[0]` samples. + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. Thus, + `max_samples` should be in the interval `(0, 1)`. .. versionadded:: 0.22 diff --git a/sklearn/ensemble/partial_dependence.py b/sklearn/ensemble/partial_dependence.py index cdac458006842..2b4b5eb66b555 100644 --- a/sklearn/ensemble/partial_dependence.py +++ b/sklearn/ensemble/partial_dependence.py @@ -211,6 +211,7 @@ def plot_partial_dependence(gbrt, X, features, feature_names=None, ---------- gbrt : BaseGradientBoosting A fitted gradient boosting model. + X : array-like of shape (n_samples, n_features) The data on which ``gbrt`` was trained. diff --git a/sklearn/inspection/partial_dependence.py b/sklearn/inspection/partial_dependence.py index a899e26549bbe..69d87947ec8df 100644 --- a/sklearn/inspection/partial_dependence.py +++ b/sklearn/inspection/partial_dependence.py @@ -683,11 +683,13 @@ class PartialDependenceDisplay: Feature names corrsponding to the indicies in ``features``. target_idx : int + - In a multiclass setting, specifies the class for which the PDPs should be computed. Note that for binary classification, the positive class (index 1) is always used. - In a multioutput setting, specifies the task for which the PDPs should be computed. + Ignored in binary classification or classical regression settings. pdp_lim : dict diff --git a/sklearn/manifold/isomap.py b/sklearn/manifold/isomap.py index a1fe5243c6ca2..545c96aed8f5d 100644 --- a/sklearn/manifold/isomap.py +++ b/sklearn/manifold/isomap.py @@ -168,8 +168,8 @@ def _fit_transform(self, X): self.embedding_ = self.kernel_pca_.fit_transform(G) @property - @deprecated("Attribute training_data_ was deprecated in version 0.22 and " - "will be removed in 0.24.") + @deprecated("Attribute `training_data_` was deprecated in version 0.22 and" + " will be removed in 0.24.") def training_data_(self): check_is_fitted(self) return self.nbrs_._fit_X diff --git a/sklearn/utils/__init__.py b/sklearn/utils/__init__.py index a87beb96df977..f68cfb1ec2e16 100644 --- a/sklearn/utils/__init__.py +++ b/sklearn/utils/__init__.py @@ -285,10 +285,13 @@ def safe_indexing(X, indices, axis=0): X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series Data from which to sample rows, items or columns. `list` are only supported when `axis=0`. + indices : bool, int, str, slice, array-like + - If `axis=0`, boolean and integer array-like, integer slice, and scalar integer are supported. - If `axis=1`: + - to select a single column, `indices` can be of `int` type for all `X` types and `str` only for dataframe. The selected subset will be 1D, unless `X` is a sparse matrix in which case it will @@ -298,6 +301,7 @@ def safe_indexing(X, indices, axis=0): these containers can be one of the following: `int`, 'bool' and `str`. However, `str` is only supported when `X` is a dataframe. The selected subset will be 2D. + axis : int, default=0 The axis along which `X` will be subsampled. `axis=0` will select rows while `axis=1` will select columns.