8000 DOC Adds missing imports into examples and test them (#21186) · samronsin/scikit-learn@82300d7 · GitHub
[go: up one dir, main page]

Skip to content

Commit 82300d7

Browse files
thomasjpfansamronsin
authored andcommitted
DOC Adds missing imports into examples and test them (scikit-learn#21186)
1 parent 4d1173c commit 82300d7

File tree

15 files changed

+30
-1
lines changed

15 files changed

+30
-1
lines changed

sklearn/compose/_column_transformer.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1002,6 +1002,7 @@ class make_column_selector:
10021002
>>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
10031003
>>> from sklearn.compose import make_column_transformer
10041004
>>> from sklearn.compose import make_column_selector
1005+
>>> import numpy as np
10051006
>>> import pandas as pd # doctest: +SKIP
10061007
>>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'],
10071008
... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP

sklearn/conftest.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,13 @@ def pytest_collection_modifyitems(config, items):
154154
except ImportError:
155155
pass
156156

157+
# Normally doctest has the entire module's scope. Here we set globs to an empty dict
158+
# to remove the module's scope:
159+
# https://docs.python.org/3/library/doctest.html#what-s-the-execution-context
160+
for item in items:
161+
if isinstance(item, DoctestItem):
162+
item.dtest.globs = {}
163+
157164
if skip_doctests:
158165
skip_marker = pytest.mark.skip(reason=reason)
159166

sklearn/feature_selection/_variance_threshold.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ class VarianceThreshold(SelectorMixin, BaseEstimator):
5858
The following dataset has integer features, two of which are the same
5959
in every sample. These are removed with the default setting for threshold::
6060
61+
>>> from sklearn.feature_selection import VarianceThreshold
6162
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
6263
>>> selector = VarianceThreshold()
6364
>>> selector.fit_transform(X)

sklearn/linear_model/_coordinate_descent.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -325,6 +325,8 @@ def lasso_path(
325325
326326
Comparing lasso_path and lars_path with interpolation:
327327
328+
>>> import numpy as np
329+
>>> from sklearn.linear_model import lasso_path
328330
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
329331
>>> y = np.array([1, 2, 3.1])
330332
>>> # Use lasso_path to compute a coefficient path

sklearn/metrics/_ranking.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -523,6 +523,7 @@ class scores must correspond to the order of ``labels``,
523523
524524
Multilabel case:
525525
526+
>>> import numpy as np
526527
>>> from sklearn.datasets import make_multilabel_classification
527528
>>> from sklearn.multioutput import MultiOutputClassifier
528529
>>> X, y = make_multilabel_classification(random_state=0)
@@ -1429,6 +1430,7 @@ def dcg_score(
14291430
14301431
Examples
14311432
--------
1433+
>>> import numpy as np
14321434
>>> from sklearn.metrics import dcg_score
14331435
>>> # we have groud-truth relevance of some answers to a query:
14341436
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
@@ -1578,6 +1580,7 @@ def ndcg_score(y_true, y_score, *, k=None, sample_weight=None, ignore_ties=False
15781580
15791581
Examples
15801582
--------
1583+
>>> import numpy as np
15811584
>>> from sklearn.metrics import ndcg_score
15821585
>>> # we have groud-truth relevance of some answers to a query:
15831586
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])

sklearn/model_selection/_search_successive_halving.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1002,6 +1002,7 @@ class HalvingRandomSearchCV(BaseSuccessiveHalving):
10021002
>>> from sklearn.experimental import enable_halving_search_cv # noqa
10031003
>>> from sklearn.model_selection import HalvingRandomSearchCV
10041004
>>> from scipy.stats import randint
1005+
>>> import numpy as np
10051006
...
10061007
>>> X, y = load_iris(return_X_y=True)
10071008
>>> clf = RandomForestClassifier(random_state=0)

sklearn/neighbors/_kde.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ class KernelDensity(BaseEstimator):
102102
--------
103103
Compute a gaussian kernel density estimate with a fixed bandwidth.
104104
105+
>>> from sklearn.neighbors import KernelDensity
105106
>>> import numpy as np
106107
>>> rng = np.random.RandomState(42)
107108
>>> X = rng.random_sample((100, 3))

sklearn/pipeline.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -864,6 +864,7 @@ def make_pipeline(*steps, memory=None, verbose=False):
864864
--------
865865
>>> from sklearn.naive_bayes import GaussianNB
866866
>>> from sklearn.preprocessing import StandardScaler
867+
>>> from sklearn.pipeline import make_pipeline
867868
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
868869
Pipeline(steps=[('standardscaler', StandardScaler()),
869870
('gaussiannb', GaussianNB())])

sklearn/preprocessing/_discretization.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ class KBinsDiscretizer(TransformerMixin, BaseEstimator):
107107
108108
Examples
109109
--------
110+
>>> from sklearn.preprocessing import KBinsDiscretizer
110111
>>> X = [[-2, 1, -4, -1],
111112
... [-1, 2, -3, -0.5],
112113
... [ 0, 3, -2, 0.5],

sklearn/random_projection.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1):
9696
9797
Examples
9898
--------
99-
99+
>>> from sklearn.random_projection import johnson_lindenstrauss_min_dim
100100
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
101101
663
102102

0 commit comments

Comments
 (0)
0