10000 API Rename force_all_finite into ensure_all_finite (#29404) · scikit-learn/scikit-learn@1c47967 · GitHub
[go: up one dir, main page]

Skip to content

Commit 1c47967

Browse files
API Rename force_all_finite into ensure_all_finite (#29404)
Co-authored-by: Adrin Jalali <adrin.jalali@gmail.com>
1 parent 0a2ca15 commit 1c47967

37 files changed

+354
-164
lines changed

doc/whats_new/v1.6.rst

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -231,6 +231,11 @@ Changelog
231231
scoring="max_error" which is now deprecated.
232232
:pr:`29462` by :user:`Farid "Freddie" Taba <artificialfintelligence>`.
233233

234+
- |API| the `assert_all_finite` parameter of functions
235+
:func:`metrics.pairwise.check_pairwise_arrays` and :func:`metrics.pairwise_distances`
236+
is renamed into `ensure_all_finite`. `force_all_finite` will be removed in 1.8.
237+
:pr:`29404` by :user:`Jérémie du Boisberranger <jeremiedb>`.
238+
234239
:mod:`sklearn.model_selection`
235240
..............................
236241

@@ -272,6 +277,14 @@ Changelog
272277
traversed.
273278
:pr:`27966` by :user:`Adam Li <adam2392>`.
274279

280+
:mod:`sklearn.utils`
281+
....................
282+
283+
- |API| the `assert_all_finite` parameter of functions :func:`utils.check_array`,
284+
:func:`utils.check_X_y`, :func:`utils.as_float_array` is renamed into
285+
`ensure_all_finite`. `force_all_finite` will be removed in 1.8.
286+
:pr:`29404` by :user:`Jérémie du Boisberranger <jeremiedb>`.
287+
275288
.. rubric:: Code and documentation contributors
276289

277290
Thanks to everyone who has contributed to the maintenance and improvement of

sklearn/cluster/_hdbscan/hdbscan.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -737,7 +737,7 @@ def fit(self, X, y=None):
737737
X = self._validate_data(
738738
X,
739739
accept_sparse=["csr", "lil"],
740-
force_all_finite=False,
740+
ensure_all_finite=False,
741741
dtype=np.float64,
742742
)
743743
self._raw_data = X
@@ -782,7 +782,7 @@ def fit(self, X, y=None):
782782
# Perform data validation after removing infinite values (numpy.inf)
783783
# from the given distance matrix.
784784
X = self._validate_data(
785-
X, force_all_finite=False, dtype=np.float64, force_writeable=True
785+
X, ensure_all_finite=False, dtype=np.float64, force_writeable=True
786786
)
787787
if np.isnan(X).any():
788788
# TODO: Support np.nan in Cython implementation for precomputed

sklearn/compose/_column_transformer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1137,7 +1137,7 @@ def _hstack(self, Xs, *, n_samples):
11371137
# in a sparse matrix, `check_array` is used for the
11381138
# dtype conversion if necessary.
11391139
converted_Xs = [
1140-
check_array(X, accept_sparse=True, force_all_finite=False)
1140+
check_array(X, accept_sparse=True, ensure_all_finite=False)
11411141
for X in Xs
11421142
]
11431143
except ValueError as e:
@@ -1325,7 +1325,7 @@ def _check_X(X):
13251325
"""Use check_array only when necessary, e.g. on lists and other non-array-likes."""
13261326
if hasattr(X, "__array__") or hasattr(X, "__dataframe__") or sparse.issparse(X):
13271327
return X
1328-
return check_array(X, force_all_finite="allow-nan", dtype=object)
1328+
return check_array(X, ensure_all_finite="allow-nan", dtype=object)
13291329

13301330

13311331
def _is_empty_column_selection(column):

sklearn/compose/_target.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ def fit(self, X, y, **fit_params):
262262
y,
263263
input_name="y",
264264
accept_sparse=False,
265-
force_all_finite=True,
265+
ensure_all_finite=True,
266266
ensure_2d=False,
267267
dtype="numeric",
268268
allow_nd=True,

sklearn/covariance/_empirical_covariance.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ def empirical_covariance(X, *, assume_centered=False):
9090
[0.25, 0.25, 0.25],
9191
[0.25, 0.25, 0.25]])
9292
"""
93-
X = check_array(X, ensure_2d=False, force_all_finite=False)
93+
X = check_array(X, ensure_2d=False, ensure_all_finite=False)
9494

9595
if X.ndim == 1:
9696
X = np.reshape(X, (1, -1))

sklearn/ensemble/_bagging.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -391,7 +391,7 @@ def fit(self, X, y, *, sample_weight=None, **fit_params):
391391
y,
392392
accept_sparse=["csr", "csc"],
393393
dtype=None,
394-
force_all_finite=False,
394+
ensure_all_finite=False,
395395
multi_output=True,
396396
)
397397

@@ -941,7 +941,7 @@ def predict_proba(self, X):
941941
X,
942942
accept_sparse=["csr", "csc"],
943943
dtype=None,
944-
force_all_finite=False,
944+
ensure_all_finite=False,
945945
reset=False,
946946
)
947947

@@ -991,7 +991,7 @@ def predict_log_proba(self, X):
991991
X,
992992
accept_sparse=["csr", "csc"],
993993
dtype=None,
994-
force_all_finite=False,
994+
ensure_all_finite=False,
995995
reset=False,
996996
)
997997

@@ -1046,7 +1046,7 @@ def decision_function(self, X):
10461046
X,
10471047
accept_sparse=["csr", "csc"],
10481048
dtype=None,
1049-
force_all_finite=False,
1049+
ensure_all_finite=False,
10501050
reset=False,
10511051
)
10521052

@@ -1279,7 +1279,7 @@ def predict(self, X):
12791279
X,
12801280
accept_sparse=["csr", "csc"],
12811281
dtype=None,
1282-
force_all_finite=False,
1282+
ensure_all_finite=False,
12831283
reset=False,
12841284
)
12851285

sklearn/ensemble/_forest.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -362,7 +362,7 @@ def fit(self, X, y, sample_weight=None):
362362
multi_output=True,
363363
accept_sparse="csc",
364364
dtype=DTYPE,
365-
force_all_finite=False,
365+
ensure_all_finite=False,
366366
)
367367
# _compute_missing_values_in_feature_mask checks if X has missing values and
368368
# will raise an error if the underlying tree base estimator can't handle missing
@@ -630,16 +630,16 @@ def _validate_X_predict(self, X):
630630
Validate X whenever one tries to predict, apply, predict_proba."""
631631
check_is_fitted(self)
632632
if self.estimators_[0]._support_missing_values(X):
633-
force_all_finite = "allow-nan"
633+
ensure_all_finite = "allow-nan"
634634
else:
635-
force_all_finite = True
635+
ensure_all_finite = True
636636

637637
X = self._validate_data(
638638
X,
639639
dtype=DTYPE,
640640
accept_sparse="csr",
641641
reset=False,
642-
force_all_finite=force_all_finite,
642+
ensure_all_finite=ensure_all_finite,
643643
)
644644
if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):
645645
raise ValueError("No support for np.int64 index based sparse matrices")

sklearn/ensemble/_gb.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -773,7 +773,7 @@ def fit(self, X, y, sample_weight=None, monitor=None):
773773
dtype=DTYPE,
774774
order="C",
775775
accept_sparse="csr",
776-
force_all_finite=False,
776+
ensure_all_finite=False,
777777
)
778778
raw_predictions = self._raw_predict(X_train)
779779
self._resize_state()

sklearn/ensemble/_hist_gradient_boosting/binning.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ def fit(self, X, y=None):
194194
)
195195
)
196196

197-
X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)
197+
X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False)
198198
max_bins = self.n_bins - 1
199199

200200
rng = check_random_state(self.random_state)
@@ -275,7 +275,7 @@ def transform(self, X):
275275
X_binned : array-like of shape (n_samples, n_features)
276276
The binned data (fortran-aligned).
277277
"""
278-
X = check_array(X, dtype=[X_DTYPE], force_all_finite=False)
278+
X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False)
279279
check_is_fitted(self)
280280
if X.shape[1] != self.n_bins_non_missing_.shape[0]:
281281
raise ValueError(

sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ def _preprocess_X(self, X, *, reset):
263263
"""
264264
# If there is a preprocessor, we let the preprocessor handle the validation.
265265
# Otherwise, we validate the data ourselves.
266-
check_X_kwargs = dict(dtype=[X_DTYPE], force_all_finite=False)
266+
check_X_kwargs = dict(dtype=[X_DTYPE], ensure_all_finite=False)
267267
if not reset:
268268
if self._preprocessor is None:
269269
return self._validate_data(X, reset=False, **check_X_kwargs)

0 commit comments

Comments
 (0)
0