8000 MAINT Fix typos found by codespell (#26448) · scikit-learn/scikit-learn@2fd022d · GitHub
[go: up one dir, main page]

Skip to content

Commit 2fd022d

Browse files
MAINT Fix typos found by codespell (#26448)
1 parent 41b0bd8 commit 2fd022d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+62
-62
lines changed

azure-pipelines.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ jobs:
7171
#
7272
# The nogil build relies on a dedicated PyPI-style index to install patched
7373
# versions of NumPy, SciPy and Cython maintained by @colesbury and that
74-
# include specifc fixes to make them run correctly without relying on the GIL.
74+
# include specific fixes to make them run correctly without relying on the GIL.
7575
#
7676
# The goal of this CI entry is to make sure that we do not introduce any
7777
# dependency on the GIL in scikit-learn itself. An auxiliary goal is to early

build_tools/linting.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ bad_deprecation_property_order=`git grep -A 10 "@property" -- "*.py" | awk '/@p
2727
if [ ! -z "$bad_deprecation_property_order" ]
2828
then
2929
echo "property decorator should come before deprecated decorator"
30-
echo "found the following occurrencies:"
30+
echo "found the following occurrences:"
3131
echo $bad_deprecation_property_order
3232
exit 1
3333
fi

doc/developers/advanced_installation.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ which allows you to edit the code in-place. This builds the extension in place a
185185
creates a link to the development directory (see `the pip docs
186186
<https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs>`_).
187187

188-
As the doc aboves explains, this is fundamentally similar to using the command
188+
As the doc above explains, this is fundamentally similar to using the command
189189
``python setup.py develop``. (see `the setuptool docs
190190
<https://setuptools.pypa.io/en/latest/userguide/development_mode.html>`_).
191191
It is however preferred to use pip.

doc/modules/linear_model.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1194,7 +1194,7 @@ Examples of use cases include:
11941194
* Risk modeling / insurance policy pricing: number of claim events /
11951195
policyholder per year (Poisson), cost per event (Gamma), total cost per
11961196
policyholder per year (Tweedie / Compound Poisson Gamma).
1197-
* Credit Default: probability that a loan can't be payed back (Bernouli).
1197+
* Credit Default: probability that a loan can't be paid back (Bernouli).
11981198
* Fraud Detection: probability that a financial transaction like a cash transfer
11991199
is a fraudulent transaction (Bernoulli).
12001200
* Predictive maintenance: number of production interruption events per year

examples/cluster/plot_adjusted_for_chance_measures.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def fixed_classes_uniform_labelings_scores(
102102

103103

104104
# %%
105-
# In this first example we set the number of clases (true number of clusters) to
105+
# In this first example we set the number of classes (true number of clusters) to
106106
# `n_classes=10`. The number of clusters varies over the values provided by
107107
# `n_clusters_range`.
108108

examples/cluster/plot_dbscan.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@
8989
# ------------
9090
#
9191
# Core samples (large dots) and non-core samples (small dots) are color-coded
92-
# according to the asigned cluster. Samples tagged as noise are represented in
92+
# according to the assigned cluster. Samples tagged as noise are represented in
9393
# black.
9494

9595
unique_labels = set(labels)

examples/cluster/plot_face_compress.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@
9999
# image is still looking good.
100100
#
101101
# We observe that the distribution of pixels values have been mapped to 8
102-
# different values. We can check the correspondance between such values and the
102+
# different values. We can check the correspondence between such values and the
103103
# original pixel values.
104104

105105
bin_edges = encoder.bin_edges_[0]

examples/cluster/plot_mini_batch_kmeans.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,8 +132,8 @@
132132
for k in range(n_clusters):
133133
different += (k_means_labels == k) != (mbk_means_labels == k)
134134

135-
identic = np.logical_not(different)
136-
ax.plot(X[identic, 0], X[identic, 1], "w", markerfacecolor="#bbbbbb", marker=".")
135+
identical = np.logical_not(different)
136+
ax.plot(X[identical, 0], X[identical, 1], "w", markerfacecolor="#bbbbbb", marker=".")
137137
ax.plot(X[different, 0], X[different, 1], "w", markerfacecolor="m", marker=".")
138138
ax.set_title("Difference")
139139
ax.set_xticks(())

examples/ensemble/plot_monotonic_constraints.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@
9393
# Using feature names to specify monotonic constraints
9494
# ----------------------------------------------------
9595
#
96-
# Note that if the training data has feature names, it's possible to specifiy the
96+
# Note that if the training data has feature names, it's possible to specify the
9797
# monotonic constraints by passing a dictionary:
9898
import pandas as pd
9999

examples/linear_model/plot_tweedie_regression_insurance_claims.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -452,7 +452,7 @@ def score_estimator(
452452
#
453453
# We conclude that the claim amount is very challenging to predict. Still, the
454454
# :class:`~sklearn.linear.GammaRegressor` is able to leverage some information
455-
# from the input features to slighly improve upon the mean baseline in terms
455+
# from the input features to slightly improve upon the mean baseline in terms
456456
# of D².
457457
#
458458
# Note that the resulting model is the average claim amount per claim. As such,

examples/miscellaneous/plot_kernel_ridge_regression.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@
122122
# The previous figure compares the learned model of KRR and SVR when both
123123
# complexity/regularization and bandwidth of the RBF kernel are optimized using
124124
# grid-search. The learned functions are very similar; however, fitting KRR is
125-
# approximatively 3-4 times faster than fitting SVR (both with grid-search).
125+
# approximately 3-4 times faster than fitting SVR (both with grid-search).
126126
#
127127
# Prediction of 100000 target values could be in theory approximately three
128128
# times faster with SVR since it has learned a sparse model using only

examples/miscellaneous/plot_set_output.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@
6565

6666
# %%
6767
# Next we load the titanic dataset to demonstrate `set_output` with
68-
# :class:`compose.ColumnTransformer` and heterogenous data.
68+
# :class:`compose.ColumnTransformer` and heterogeneous data.
6969
from sklearn.datasets import fetch_openml
7070

7171
X, y = fetch_openml(

examples/mixture/plot_gmm_init.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757

5858

5959
def get_initial_means(X, init_params, r):
60-
# Run a GaussianMixture with max_iter=0 to output the initalization means
60+
# Run a GaussianMixture with max_iter=0 to output the initialization means
6161
gmm = GaussianMixture(
6262
n_components=4, init_params=init_params, tol=1e-9, max_iter=0, random_state=r
6363
).fit(X)

examples/model_selection/plot_det.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@
5656
# Define the classifiers
5757
# ----------------------
5858
#
59-
# Here we define two different classifiers. The goal is to visualy compare their
59+
# Here we define two different classifiers. The goal is to visually compare their
6060
# statistical performance across thresholds using the ROC and DET curves. There
6161
# is no particular reason why these classifiers are chosen other classifiers
6262
# available in scikit-learn.

examples/model_selection/plot_likelihood_ratios.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def extract_score(cv_results):
224224
disp.ax_.legend(*scatter.legend_elements())
225225

226226
# %%
227-
# We define a function for bootstraping.
227+
# We define a function for bootstrapping.
228228

229229

230230
def scoring_on_bootstrap(estimator, X, y, rng, n_bootstrap=100):
@@ -241,7 +241,7 @@ def scoring_on_bootstrap(estimator, X, y, rng, n_bootstrap=100):
241241

242242

243243
# %%
244-
# We score the base model for each prevalence using bootstraping.
244+
# We score the base model for each prevalence using bootstrapping.
245245

246246
results = defaultdict(list)
247247
n_bootstrap = 100

examples/preprocessing/plot_scaling_importance.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ def fit_and_plot_model(X_plot, y, clf, ax):
100100
_ = ax2.set_title("KNN with scaling")
101101

102102
# %%
103-
# Here the desicion boundary shows that fitting scaled or non-scaled data lead
103+
# Here the decision boundary shows that fitting scaled or non-scaled data lead
104104
# to completely different models. The reason is that the variable "proline" has
105105
# values which vary between 0 and 1,000; whereas the variable "hue" varies
106106
# between 1 and 10. Because of this, distances between samples are mostly
@@ -187,7 +187,7 @@ def fit_and_plot_model(X_plot, y, clf, ax):
187187
# %%
188188
# From the plot above we observe that scaling the features before reducing the
189189
# dimensionality results in components with the same order of magnitude. In this
190-
# case it also improves the separability of the clases. Indeed, in the next
190+
# case it also improves the separability of the classes. Indeed, in the next
191191
# section we confirm that a better separability has a good repercussion on the
192192
# overall model's performance.
193193
#

examples/text/plot_document_clustering.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ def fit_and_evaluate(km, X, name=None, n_runs=5):
197197
# `max_df=0.5`) and terms that are not present in at least 5 documents (set by
198198
# `min_df=5`), the resulting number of unique terms `n_features` is around
199199
# 8,000. We can additionally quantify the sparsity of the `X_tfidf` matrix as
200-
# the fraction of non-zero entries devided by the total number of elements.
200+
# the fraction of non-zero entries divided by the total number of elements.
201201

202202
print(f"{X_tfidf.nnz / np.prod(X_tfidf.shape):.3f}")
203203

@@ -230,7 +230,7 @@ def fit_and_evaluate(km, X, name=None, n_runs=5):
230230
random_state=seed,
231231
).fit(X_tfidf)
232232
cluster_ids, cluster_sizes = np.unique(kmeans.labels_, return_counts=True)
233-
print(f"Number of elements asigned to each cluster: {cluster_sizes}")
233+
print(f"Number of elements assigned to each cluster: {cluster_sizes}")
234234
print()
235235
print(
236236
"True number of documents in each category according to the class labels: "

sklearn/datasets/_openml.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def _retry_with_clean_cache(
4545
"""If the first call to the decorated function fails, the local cached
4646
file is removed, and the function is called again. If ``data_home`` is
4747
``None``, then the function is called once. We can provide a specific
48-
exception to not retry on usign `no_retry_exception` parameter.
48+
exception to not retry on using `no_retry_exception` parameter.
4949
"""
5050

5151
def decorator(f):
@@ -998,22 +998,22 @@ def fetch_openml(
998998
if as_frame:
999999
err_msg = (
10001000
"Returning pandas objects requires pandas to be installed. "
1001-
"Alternatively, explicitely set `as_frame=False` and "
1001+
"Alternatively, explicitly set `as_frame=False` and "
10021002
"`parser='liac-arff'`."
10031003
)
10041004
raise ImportError(err_msg) from exc
10051005
else:
10061006
err_msg = (
10071007
f"Using `parser={parser_!r}` requires pandas to be installed. "
1008-
"Alternatively, explicitely set `parser='liac-arff'`."
1008+
"Alternatively, explicitly set `parser='liac-arff'`."
10091009
)
10101010
if parser == "auto":
10111011
# TODO(1.4): In version 1.4, we will raise an error instead of
10121012
# a warning.
10131013
warn(
10141014
(
10151015
"From version 1.4, `parser='auto'` with `as_frame=False` "
1016-
"will use pandas. Either install pandas or set explicitely "
1016+
"will use pandas. Either install pandas or set explicitly "
10171017
"`parser='liac-arff'` to preserve the current behavior."
10181018
),
10191019
FutureWarning,

sklearn/datasets/tests/test_openml.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -603,7 +603,7 @@ def test_fetch_openml_difference_parsers(monkeypatch):
603603

604604
###############################################################################
605605
# Test the ARFF parsing on several dataset to check if detect the correct
606-
# types (categories, intgers, floats).
606+
# types (categories, integers, floats).
607607

608608

609609
@pytest.fixture(scope="module")
@@ -1009,7 +1009,7 @@ def test_fetch_openml_requires_pandas_error(monkeypatch, params):
10091009
check_pandas_support("test_fetch_openml_requires_pandas")
10101010
except ImportError:
10111011
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
1012-
err_msg = "requires pandas to be installed. Alternatively, explicitely"
1012+
err_msg = "requires pandas to be installed. Alternatively, explicitly"
10131013
with pytest.raises(ImportError, match=err_msg):
10141014
fetch_openml(data_id=data_id, **params)
10151015
else:

sklearn/decomposition/_lda.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def _update_doc_distribution(
107107
X_indptr = X.indptr
108108

109109
# These cython functions are called in a nested loop on usually very small arrays
110-
# (lenght=n_topics). In that case, finding the appropriate signature of the
110+
# (length=n_topics). In that case, finding the appropriate signature of the
111111
# fused-typed function can be more costly than its execution, hence the dispatch
112112
# is done outside of the loop.
113113
ctype = "float" if X.dtype == np.float32 else "double"

sklearn/externals/_lobpcg.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ def lobpcg(
214214
Notes
215215
-----
216216
The iterative loop in lobpcg runs maxit=maxiter (or 20 if maxit=None)
217-
iterations at most and finishes earler if the tolerance is met.
217+
iterations at most and finishes earlier if the tolerance is met.
218218
Breaking backward compatibility with the previous version, lobpcg
219219
now returns the block of iterative vectors with the best accuracy rather
220220
than the last one iterated, as a cure for possible divergence.

sklearn/inspection/_plot/tests/test_boundary_decision_display.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ def test_dataframe_labels_used(pyplot, fitted_clf):
308308
assert ax.get_xlabel() == "hello"
309309
assert ax.get_ylabel() == "world"
310310

311-
# labels get overriden only if provided to the `plot` method
311+
# labels get overridden only if provided to the `plot` method
312312
disp.plot(ax=ax, xlabel="overwritten_x", ylabel="overwritten_y")
313313
assert ax.get_xlabel() == "overwritten_x"
314314
assert ax.get_ylabel() == "overwritten_y"

sklearn/linear_model/_glm/glm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,8 @@ class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator):
124124
HalfSquaredError identity y any real number
125125
HalfPoissonLoss log 0 <= y
126126
HalfGammaLoss log 0 < y
127-
HalfTweedieLoss log dependend on tweedie power
128-
HalfTweedieLossIdentity identity dependend on tweedie power
127+
HalfTweedieLoss log dependent on tweedie power
128+
HalfTweedieLossIdentity identity dependent on tweedie power
129129
======================= ======== ==========================
130130
131131
The link function of the GLM, i.e. mapping from linear predictor

sklearn/linear_model/_glm/tests/test_glm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def glm_dataset(global_random_seed, request):
109109
Last column of 1, i.e. intercept.
110110
y : ndarray
111111
coef_unpenalized : ndarray
112-
Minimum norm solutions, i.e. min sum(loss(w)) (with mininum ||w||_2 in
112+
Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in
113113
case of ambiguity)
114114
Last coefficient is intercept.
115115
coef_penalized : ndarray

sklearn/linear_model/tests/test_common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@
7171
RidgeCV(),
7272
pytest.param(
7373
SGDRegressor(tol=1e-15),
74-
marks=pytest.mark.xfail(reason="Unsufficient precision."),
74+
marks=pytest.mark.xfail(reason="Insufficient precision."),
7575
),
7676
SGDRegressor(penalty="elasticnet", max_iter=10_000),
7777
TweedieRegressor(power=0), # same as Ridge

sklearn/linear_model/tests/test_ridge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def ols_ridge_dataset(global_random_seed, request):
104104
Last column of 1, i.e. intercept.
105105
y : ndarray
106106
coef_ols : ndarray of shape
107-
Minimum norm OLS solutions, i.e. min ||X w - y||_2_2 (with mininum ||w||_2 in
107+
Minimum norm OLS solutions, i.e. min ||X w - y||_2_2 (with minimum ||w||_2 in
108108
case of ambiguity)
109109
Last coefficient is intercept.
110110
coef_ridge : ndarray of shape (5,)

sklearn/manifold/_t_sne.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -498,7 +498,7 @@ def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"):
498498
(ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.
499499
500500
.. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving
501-
Local Structure. Proceedings of the Twelth International Conference on
501+
Local Structure. Proceedings of the Twelfth International Conference on
502502
Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.
503503
"""
504504
n_samples = X.shape[0]

sklearn/metrics/_dist_metrics.pyx.tp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2456,7 +2456,7 @@ cdef class RussellRaoDistance{{name_suffix}}(DistanceMetric{{name_suffix}}):
24562456
else:
24572457
i2 = i2 + 1
24582458

2459-
# We don't need to go through all the longuest
2459+
# We don't need to go through all the longest
24602460
# vector because tf1 or tf2 will be false
24612461
# and thus n_tt won't be increased.
24622462

sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ class ArgKmin(BaseDistancesReductionDispatcher):
163163
ArgKmin is typically used to perform
164164
bruteforce k-nearest neighbors queries.
165165
166-
This class is not meant to be instanciated, one should only use
166+
This class is not meant to be instantiated, one should only use
167167
its :meth:`compute` classmethod which handles allocation and
168168
deallocation consistently.
169169
"""
@@ -301,7 +301,7 @@ class RadiusNeighbors(BaseDistancesReductionDispatcher):
301301
The distance function `dist` depends on the values of the `metric`
302302
and `metric_kwargs` parameters.
303303
304-
This class is not meant to be instanciated, one should only use
304+
This class is not meant to be instantiated, one should only use
305305
its :meth:`compute` classmethod which handles allocation and
306306
deallocation consistently.
307307
"""
@@ -446,7 +446,7 @@ class ArgKminClassMode(BaseDistancesReductionDispatcher):
446446
queries when the weighted mode of the labels for the k-nearest neighbors
447447
are required, such as in `predict` methods.
448448
449-
This class is not meant to be instanciated, one should only use
449+
This class is not meant to be instantiated, one should only use
450450
its :meth:`compute` classmethod which handles allocation and
451451
deallocation consistently.
452452
"""

sklearn/metrics/_plot/tests/test_roc_curve_display.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ def test_roc_curve_chance_level_line(
141141
chance_level_kw,
142142
constructor_name,
143143
):
144-
"""Check the chance leve line plotting behaviour."""
144+
"""Check the chance level line plotting behaviour."""
145145
X, y = data_binary
146146

147147
lr = LogisticRegression()

sklearn/metrics/_ranking.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1055,7 +1055,7 @@ def roc_curve(
10551055
are reversed upon returning them to ensure they correspond to both ``fpr``
10561056
and ``tpr``, which are sorted in reversed order during their calculation.
10571057
1058-
An arbritrary threshold is added for the case `tpr=0` and `fpr=0` to
1058+
An arbitrary threshold is added for the case `tpr=0` and `fpr=0` to
10591059
ensure that the curve starts at `(0, 0)`. This threshold corresponds to the
10601060
`np.inf`.
10611061

sklearn/metrics/tests/test_dist_metrics.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ def test_distance_metrics_dtype_consistency(metric_param_grid):
232232
D64 = dm64.pairwise(X64)
233233
D32 = dm32.pairwise(X32)
234234

235-
# Both results are np.float64 dtype because the accumulation accross
235+
# Both results are np.float64 dtype because the accumulation across
236236
# features is done in float64. However the input data and the element
237237
# wise arithmetic operations are done in float32 so we can expect a
238238
# small discrepancy.

sklearn/mixture/_bayesian_mixture.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -541,7 +541,7 @@ def _estimate_weights(self, nk):
541541
),
542542
)
543543
else:
544-
# case Variationnal Gaussian mixture with dirichlet distribution
544+
# case Variational Gaussian mixture with dirichlet distribution
545545
self.weight_concentration_ = self.weight_concentration_prior_ + nk
546546

547547
def _estimate_means(self, nk, xk):
@@ -749,7 +749,7 @@ def _estimate_log_weights(self):
749749
+ np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1]))
750750
)
751751
else:
752-
# case Variationnal Gaussian mixture with dirichlet distribution
752+
# case Variational Gaussian mixture with dirichlet distribution
753753
return digamma(self.weight_concentration_) - digamma(
754754
np.sum(self.weight_concentration_)
755755
)

sklearn/neural_network/_multilayer_perceptron.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -932,7 +932,7 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
932932
933933
best_loss_ : float or None
934934
The minimum loss reached by the solver throughout fitting.
935-
If `early_stopping=True`, this attribute is set ot `None`. Refer to
935+
If `early_stopping=True`, this attribute is set to `None`. Refer to
936936
the `best_validation_score_` fitted attribute instead.
937937
938938
loss_curve_ : list of shape (`n_iter_`,)

0 commit comments

Comments
 (0)
0