8000 MAINT remove np.product and inf/nan aliases in favor of canonical nam… · scikit-learn/scikit-learn@d926ff1 · GitHub
[go: up one dir, main page]

Skip to content

Commit d926ff1

Browse files
authored
MAINT remove np.product and inf/nan aliases in favor of canonical names (#25741)
1 parent c9810e8 commit d926ff1

File tree

15 files changed

+24
-24
lines changed

15 files changed

+24
-24
lines changed

benchmarks/bench_20newsgroups.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
print(f"X_train.shape = {X_train.shape}")
4848
print(f"X_train.format = {X_train.format}")
4949
print(f"X_train.dtype = {X_train.dtype}")
50-
print(f"X_train density = {X_train.nnz / np.product(X_train.shape)}")
50+
print(f"X_train density = {X_train.nnz / np.prod(X_train.shape)}")
5151
print(f"y_train {y_train.shape}")
5252
print(f"X_test {X_test.shape}")
5353
print(f"X_test.format = {X_test.format}")

doc/developers/develop.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -533,7 +533,7 @@ general only be determined at runtime.
533533
The current set of estimator tags are:
534534

535535
allow_nan (default=False)
536-
whether the estimator supports data with missing values encoded as np.NaN
536+
whether the estimator supports data with missing values encoded as np.nan
537537

538538
binary_only (default=False)
539539
whether estimator supports binary classification but lacks multi-class

doc/modules/grid_search.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -660,8 +660,8 @@ Robustness to failure
660660
Some parameter settings may result in a failure to ``fit`` one or more folds
661661
of the data. By default, this will cause the entire search to fail, even if
662662
some parameter settings could be fully evaluated. Setting ``error_score=0``
663-
(or `=np.NaN`) will make the procedure robust to such failure, issuing a
664-
warning and setting the score for that fold to 0 (or `NaN`), but completing
663+
(or `=np.nan`) will make the procedure robust to such failure, issuing a
664+
warning and setting the score for that fold to 0 (or `nan`), but completing
665665
the search.
666666

667667
.. _alternative_cv:

sklearn/_loss/glm_distribution.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ def power(self, power):
222222

223223
if power <= 0:
224224
# Extreme Stable or Normal distribution
225-
self._lower_bound = DistributionBoundary(-np.Inf, inclusive=False)
225+
self._lower_bound = DistributionBoundary(-np.inf, inclusive=False)
226226
elif 0 < power < 1:
227227
raise ValueError(
228228
"Tweedie distribution is only defined for power<=0 and power>=1."

sklearn/decomposition/_nmf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def _beta_divergence(X, W, H, beta, square_root=False):
155155
# Itakura-Saito divergence
156156
elif beta == 0:
157157
div = X_data / WH_data
158-
res = np.sum(div) - np.product(X.shape) - np.sum(np.log(div))
158+
res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
159159

160160
# beta-divergence, beta not in (0, 1, 2)
161161
else:

sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -294,9 +294,9 @@ def test_missing_values_support(n_bins, n_bins_non_missing, X_trans_expected):
294294

295295
X = [
296296
[1, 1, 0],
297-
[np.NaN, np.NaN, 0],
297+
[np.nan, np.nan, 0],
298298
[2, 1, 0],
299-
[np.NaN, 2, 1],
299+
[np.nan, 2, 1],
300300
[3, 2, 1],
301301
[4, 1, 0],
302302
]

sklearn/ensemble/tests/test_bagging.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -831,7 +831,7 @@ def test_bagging_regressor_with_missing_inputs():
831831
[2, None, 6],
832832
[2, np.nan, 6],
833833
[2, np.inf, 6],
834-
[2, np.NINF, 6],
834+
[2, -np.inf, 6],
835835
]
836836
)
837837
y_values = [
@@ -872,7 +872,7 @@ def test_bagging_classifier_with_missing_inputs():
872872
[2, None, 6],
873873
[2, np.nan, 6],
874874
[2, np.inf, 6],
875-
[2, np.NINF, 6],
875+
[2, -np.inf, 6],
876876
]
877877
)
878878
y = np.array([3, 6, 6, 6, 6])

sklearn/feature_selection/tests/test_from_model.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -532,8 +532,8 @@ def test_fit_accepts_nan_inf():
532532
model = SelectFromModel(estimator=clf)
533533

534534
nan_data = data.copy()
535-
nan_data[0] = np.NaN
536-
nan_data[1] = np.Inf
535+
nan_data[0] = np.nan
536+
nan_data[1] = np.inf
537537

538538
model.fit(data, y)
539539

@@ -546,8 +546,8 @@ def test_transform_accepts_nan_inf():
546546
model = SelectFromModel(estimator=clf)
547547
model.fit(nan_data, y)
548548

549-
nan_data[0] = np.NaN
550-
nan_data[1] = np.Inf
549+
nan_data[0] = np.nan
550+
nan_data[1] = np.inf
551551

552552
model.transform(nan_data)
553553

sklearn/feature_selection/tests/test_rfe.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -505,8 +505,8 @@ def test_rfe_allow_nan_inf_in_x(cv):
505505
y = iris.target
506506

507507
# add nan and inf value to X
508-
X[0][0] = np.NaN
509-
X[0][1] = np.Inf
508+
X[0][0] = np.nan
509+
X[0][1] = np.inf
510510

511511
clf = MockClassifier()
512512
if cv is not None:

sklearn/feature_selection/tests/test_variance_threshold.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,9 @@ def test_zero_variance_floating_point_error():
5454
def test_variance_nan():
5555
arr = np.array(data, dtype=np.float64)
5656
# add single NaN and feature should still be included
57-
arr[0, 0] = np.NaN
57+
arr[0, 0] = np.nan
5858
# make all values in feature NaN and feature should be rejected
59-
arr[:, 1] = np.NaN
59+
arr[:, 1] = np.nan
6060

6161
for X in [arr, csr_matrix(arr), csc_matrix(arr), bsr_matrix(arr)]:
6262
sel = VarianceThreshold().fit(X)

sklearn/mixture/tests/test_bayesian_mixture.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ def test_monotonic_likelihood():
244244
random_state=rng,
245245
tol=1e-3,
246246
)
247-
current_lower_bound = -np.infty
247+
current_lower_bound = -np.inf
248248
# Do one training iteration at a time so we can make sure that the
249249
# training log likelihood increases after each iteration.
250250
for _ in range(600):

sklearn/mixture/tests/test_gaussian_mixture.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -986,7 +986,7 @@ def test_monotonic_likelihood():
986986
random_state=rng,
987987
tol=1e-7,
988988
)
989-
current_log_likelihood = -np.infty
989+
current_log_likelihood = -np.inf
990990
with warnings.catch_warnings():
991991
warnings.simplefilter("ignore", ConvergenceWarning)
992992
# Do one training iteration at a time so we can make sure that the

sklearn/model_selection/_search.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def __iter__(self):
151151

152152
def __len__(self):
153153
"""Number of points on the grid."""
154-
# Product function that can handle iterables (np.product can't).
154+
# Product function that can handle iterables (np.prod can't).
155155
product = partial(reduce, operator.mul)
156156
return sum(
157157
product(len(v) for v in p.values()) if p else 1 for p in self.param_grid
@@ -184,7 +184,7 @@ def __getitem__(self, ind):
184184
# Reverse so most frequent cycling parameter comes first
185185
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
186186
sizes = [len(v_list) for v_list in values_lists]
187-
total = np.product(sizes)
187+
total = np.prod(sizes)
188188

189189
if ind >= total:
190190
# Try the next grid

sklearn/utils/sparsefuncs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -452,7 +452,7 @@ def _sparse_min_or_max(X, axis, min_or_max):
452452
if X.nnz == 0:
453453
return zero
454454
m = min_or_max.reduce(X.data.ravel())
455-
if X.nnz != np.product(X.shape):
455+
if X.nnz != np.prod(X.shape):
456456
m = min_or_max(zero, m)
457457
return m
458458
if axis < 0:

sklearn/utils/tests/test_pprint.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,7 @@ def test_changed_only():
278278
expected = """SimpleImputer(missing_values=0)"""
279279
assert imputer.__repr__() == expected
280280

281-
# Defaults to np.NaN, trying with float('NaN')
281+
# Defaults to np.nan, trying with float('NaN')
282282
imputer = SimpleImputer(missing_values=float("NaN"))
283283
expected = """SimpleImputer()"""
284284
assert imputer.__repr__() == expected

0 commit comments

Comments
 (0)
0