8000 DOC Adds missing imports into examples and checks them in tests by thomasjpfan · Pull Request #21186 · scikit-learn/scikit-learn · GitHub
[go: up one dir, main page]

Skip to content

DOC Adds missing imports into examples and checks them in tests #21186

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 29, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions sklearn/compose/_column_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1002,6 +1002,7 @@ class make_column_selector:
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
>>> from sklearn.compose import make_column_transformer
>>> from sklearn.compose import make_column_selector
>>> import numpy as np
>>> import pandas as pd # doctest: +SKIP
>>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'],
... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP
Expand Down
7 changes: 7 additions & 0 deletions sklearn/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,13 @@ def pytest_collection_modifyitems(config, items):
except ImportError:
pass

# Normally doctest has the entire module's scope. Here we set globs to an empty dict
# to remove the module's scope:
# https://docs.python.org/3/library/doctest.html#what-s-the-execution-context
for item in items:
if isinstance(item, DoctestItem):
item.dtest.globs = {}

if skip_doctests:
skip_marker = pytest.mark.skip(reason=reason)

Expand Down
1 change: 1 addition & 0 deletions sklearn/feature_selection/_variance_threshold.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ class VarianceThreshold(SelectorMixin, BaseEstimator):
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::

>>> from sklearn.feature_selection import VarianceThreshold
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
Expand Down
2 changes: 2 additions & 0 deletions sklearn/linear_model/_coordinate_descent.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,8 @@ def lasso_path(

Comparing lasso_path and lars_path with interpolation:

>>> import numpy as np
>>> from sklearn.linear_model import lasso_path
8000 >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
Expand Down
3 changes: 3 additions & 0 deletions sklearn/metrics/_ranking.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,6 +523,7 @@ class scores must correspond to the order of ``labels``,

Multilabel case:

>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> X, y = make_multilabel_classification(random_state=0)
Expand Down Expand Up @@ -1429,6 +1430,7 @@ def dcg_score(

Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import dcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
Expand Down Expand Up @@ -1578,6 +1580,7 @@ def ndcg_score(y_true, y_score, *, k=None, sample_weight=None, ignore_ties=False

Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import ndcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
Expand Down
1 change: 1 addition & 0 deletions sklearn/model_selection/_search_successive_halving.py
Original file line number Diff line number Diff line change
Expand Up @@ -1002,6 +1002,7 @@ class HalvingRandomSearchCV(BaseSuccessiveHalving):
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> from sklearn.model_selection import HalvingRandomSearchCV
>>> from scipy.stats import randint
>>> import numpy as np
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
Expand Down
1 change: 1 addition & 0 deletions sklearn/neighbors/_kde.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ class KernelDensity(BaseEstimator):
--------
Compute a gaussian kernel density estimate with a fixed bandwidth.

>>> from sklearn.neighbors import KernelDensity
>>> import numpy as np
>>> rng = np.random.RandomState(42)
>>> X = rng.random_sample((100, 3))
Expand Down
1 change: 1 addition & 0 deletions sklearn/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -864,6 +864,7 @@ def make_pipeline(*steps, memory=None, verbose=False):
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
Expand Down
1 change: 1 addition & 0 deletions sklearn/preprocessing/_discretization.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ class KBinsDiscretizer(TransformerMixin, BaseEstimator):

Examples
--------
>>> from sklearn.preprocessing import KBinsDiscretizer
>>> X = [[-2, 1, -4, -1],
... [-1, 2, -3, -0.5],
... [ 0, 3, -2, 0.5],
Expand Down
2 changes: 1 addition & 1 deletion sklearn/random_projection.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1):

Examples
--------

>>> from sklearn.random_projection import johnson_lindenstrauss_min_dim
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663

Expand Down
5 changes: 5 additions & 0 deletions sklearn/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ class Bunch(dict):

Examples
--------
>>> from sklearn.utils import Bunch
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
Expand Down Expand Up @@ -493,6 +494,7 @@ def resample(*arrays, replace=True, n_samples=None, random_state=None, stratify=
--------
It is possible to mix sparse and dense arrays in the same run::

>>> import numpy as np
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])

Expand Down Expand Up @@ -630,6 +632,7 @@ def shuffle(*arrays, random_state=None, n_samples=None):
--------
It is possible to mix sparse and dense arrays in the same run::

>>> import numpy as np
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])

Expand Down Expand Up @@ -999,6 +1002,8 @@ def is_scalar_nan(x):

Examples
--------
>>> import numpy as np
>>> from sklearn.utils import is_scalar_nan
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
Expand Down
2 changes: 2 additions & 0 deletions sklearn/utils/_testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,8 @@ def ignore_warnings(obj=None, category=Warning):

Examples
--------
>>> import warnings
>>> from sklearn.utils._testing import ignore_warnings
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')

Expand Down
1 change: 1 addition & 0 deletions sklearn/utils/extmath.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,6 +670,7 @@ def cartesian(arrays, out=None):

Examples
--------
>>> from sklearn.utils.extmath import cartesian
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
Expand Down
1 change: 1 addition & 0 deletions sklearn/utils/multiclass.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ def type_of_target(y):

Examples
--------
>>> from sklearn.utils.multiclass import type_of_target
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
Expand Down
2 changes: 2 additions & 0 deletions sklearn/utils/validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1068,6 +1068,7 @@ def has_fit_parameter(estimator, parameter):
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.utils.validation import has_fit_parameter
>>> has_fit_parameter(SVC(), "sample_weight")
True

Expand Down Expand Up @@ -1372,6 +1373,7 @@ def _check_psd_eigenvalues(lambdas, enable_warnings=False):

Examples
--------
>>> from sklearn.utils.validation import _check_psd_eigenvalues
>>> _check_psd_eigenvalues([1, 2]) # nominal case
array([1, 2])
>>> _check_psd_eigenvalues([5, 5j]) # significant imag part
Expand Down
0