8000 Correct the deprecation of the random_integers numpy function. (#6712) · scikit-learn/scikit-learn@78a6748 · GitHub
[go: up one dir, main page]

Skip to content

Commit 78a6748

Browse files
tguillemotTomDLT
authored andcommitted
Correct the deprecation of the random_integers numpy function. (#6712)
1 parent 427179b commit 78a6748

File tree

9 files changed

+23
-27
lines changed

9 files changed

+23
-27
lines changed

benchmarks/bench_plot_fastkmeans.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def compute_bench(samples_range, features_range):
2323
print('Iteration %03d of %03d' % (it, max_it))
2424
print('==============================')
2525
print()
26-
data = nr.random_integers(-50, 50, (n_samples, n_features))
26+
data = nr.randint(-50, 51, (n_samples, n_features))
2727

2828
print('K-Means')
2929
tstart = time()

examples/cluster/plot_adjusted_for_chance_measures.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,18 +41,17 @@ def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
4141
When fixed_n_classes is not None the first labeling is considered a ground
4242
truth class assignment with fixed number of classes.
4343
"""
44-
random_labels = np.random.RandomState(seed).random_integers
44+
random_labels = np.random.RandomState(seed).randint
4545
scores = np.zeros((len(n_clusters_range), n_runs))
4646

4747
if fixed_n_classes is not None:
48-
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
49-
size=n_samples)
48+
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
5049

5150
for i, k in enumerate(n_clusters_range):
5251
for j in range(n_runs):
5352
if fixed_n_classes is None:
54-
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
55-
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
53+
labels_a = random_labels(low=0, high=k, size=n_samples)
54+
labels_b = random_labels(low=0, high=k, size=n_samples)
5655
scores[i, j] = score_func(labels_a, labels_b)
5756
return scores
5857

sklearn/cluster/k_means_.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -626,8 +626,7 @@ def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
626626
"Setting it to 3*k" % (init_size, k),
627627
RuntimeWarning, stacklevel=2)
628628
init_size = 3 * k
629-
init_indices = random_state.random_integers(
630-
0, n_samples - 1, init_size)
629+
init_indices = random_state.randint(0, n_samples, init_size)
631630
X = X[init_indices]
632631
x_squared_norms = x_squared_norms[init_indices]
633632
n_samples = X.shape[0]
@@ -1275,8 +1274,7 @@ def fit(self, X, y=None):
12751274
init_size = n_samples
12761275
self.init_size_ = init_size
12771276

1278-
validation_indices = random_state.random_integers(
1279-
0, n_samples - 1, init_size)
1277+
validation_indices = random_state.randint(0, n_samples, init_size)
12801278
X_valid = X[validation_indices]
12811279
x_squared_norms_valid = x_squared_norms[validation_indices]
12821280

@@ -1324,8 +1322,8 @@ def fit(self, X, y=None):
13241322
# criterion
13251323
for iteration_idx in range(n_iter):
13261324
# Sample a minibatch from the full dataset
1327-
minibatch_indices = random_state.random_integers(
1328-
0, n_samples - 1, self.batch_size)
1325+
minibatch_indices = random_state.randint(
1326+
0, n_samples, self.batch_size)
13291327

13301328
# Perform the actual update step on the minibatch data
13311329
batch_inertia, centers_squared_diff = _mini_batch_step(

sklearn/cluster/tests/test_spectral.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def test_discretize(seed=8):
181181
for n_samples in [50, 100, 150, 500]:
182182
for n_class in range(2, 10):
183183
# random class labels
184-
y_true = random_state.random_integers(0, n_class, n_samples)
184+
y_true = random_state.randint(0, n_class + 1, n_samples)
185185 y_true = np.array(y_true, np.float)
186186
# noise class assignment matrix
187187
y_indicator = sparse.coo_matrix((np.ones(n_samples),

sklearn/linear_model/randomized_l1.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
4646
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
4747
pre_dispatch=pre_dispatch)(
4848
delayed(estimator_func)(
49-
X, y, weights=scaling * random_state.random_integers(
50-
0, 1, size=(n_features,)),
49+
X, y, weights=scaling * random_state.randint(
50+
0, 2, size=(n_features,)),
5151
mask=(random_state.rand(n_samples) < sample_fraction),
5252
verbose=max(0, verbose - 1),
5353
**params)
@@ -627,8 +627,7 @@ def lasso_stability_path(X, y, scaling=0.5, random_state=None,
627627
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
628628
delayed(_lasso_stability_path)(
629629
X, y, mask=rng.rand(n_samples) < sample_fraction,
630-
weights=1. - scaling * rng.random_integers(0, 1,
631-
size=(n_features,)),
630+
weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),
632631
eps=eps)
633632
for k in range(n_resampling))
634633

sklearn/metrics/cluster/tests/test_supervised.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -110,12 +110,12 @@ def test_non_consicutive_labels():
110110
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
111111
seed=42):
112112
# Compute score for random uniform cluster labelings
113-
random_labels = np.random.RandomState(seed).random_integers
113+
random_labels = np.random.RandomState(seed).randint
114114
scores = np.zeros((len(k_range), n_runs))
115115
for i, k in enumerate(k_range):
116116
for j in range(n_runs):
117-
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
118-
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
117+
labels_a = random_labels(low=0, high=k, size=n_samples)
118+
labels_b = random_labels(low=0, high=k, size=n_samples)
119119
scores[i, j] = score_func(labels_a, labels_b)
120120
return scores
121121

@@ -195,8 +195,8 @@ def test_v_measure_and_mutual_information(seed=36):
195195
# Check relation between v_measure, entropy and mutual information
196196
for i in np.logspace(1, 4, 4).astype(np.int):
197197
random_state = np.random.RandomState(seed)
198-
labels_a, labels_b = random_state.random_integers(0, 10, i),\
199-
random_state.random_integers(0, 10, i)
198+
labels_a, labels_b = random_state.randint(0, 10, i),\
199+
random_state.randint(0, 10, i)
200200
assert_almost_equal(v_measure_score(labels_a, labels_b),
201201
2.0 * mutual_info_score(labels_a, labels_b) /
202202
(entropy(labels_a) + entropy(labels_b)), 0)

sklearn/preprocessing/tests/test_data.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,7 @@ def test_standard_scaler_partial_fit_numerical_stability():
420420
# Sparse input
421421
size = (100, 3)
422422
scale = 1e20
423-
X = rng.random_integers(0, 1, size).astype(np.float64) * scale
423+
X = rng.randint(0, 2, size).astype(np.float64) * scale
424424
X_csr = sparse.csr_matrix(X)
425425
X_csc = sparse.csc_matrix(X)
426426

sklearn/semi_supervised/label_propagation.py

Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
>>> from sklearn.semi_supervised import LabelPropagation
3434
>>> label_prop_model = LabelPropagation()
3535
>>> iris = datasets.load_iris()
36-
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
36+
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
3737
... size=len(iris.target)))
3838
>>> labels = np.copy(iris.target)
3939
>>> labels[random_unlabeled_points] = -1
@@ -323,7 +323,7 @@ class LabelPropagation(BaseLabelPropagation):
323323
>>> from sklearn.semi_supervised import LabelPropagation
324324
>>> label_prop_model = LabelPropagation()
325325
>>> iris = datasets.load_iris()
326-
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
326+
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
327327
... size=len(iris.target)))
328328
>>> labels = np.copy(iris.target)
329329
>>> labels[random_unlabeled_points] = -1
@@ -417,7 +417,7 @@ class LabelSpreading(BaseLabelPropagation):
417417
>>> from sklearn.semi_supervised import LabelSpreading
418418
>>> label_prop_model = LabelSpreading()
419419
>>> iris = datasets.load_iris()
420-
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
420+
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
421421
... size=len(iris.target)))
422422
>>> labels = np.copy(iris.target)
423423
>>> labels[random_unlabeled_points] = -1

sklearn/utils/tests/test_sparsefuncs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def test_incr_mean_variance_axis():
8888
rng = np.random.RandomState(0)
8989
n_features = 50
9090
n_samples = 10
91-
data_chunks = [rng.random_integers(0, 1, size=n_features)
91+
data_chunks = [rng.randint(0, 2, size=n_features)
9292
for i in range(n_samples)]
9393

9494
# default params for incr_mean_variance

0 commit comments

Comments
 (0)
0