@@ -226,8 +226,8 @@ def make_classification(
226
226
centroids *= 2 * class_sep
227
227
centroids -= class_sep
228
228
if not hypercube :
229
- centroids *= generator .rand ( n_clusters , 1 )
230
- centroids *= generator .rand ( 1 , n_informative )
229
+ centroids *= generator .uniform ( size = ( n_clusters , 1 ) )
230
+ centroids *= generator .uniform ( size = ( 1 , n_informative ) )
231
231
232
232
# Initially draw informative features from the standard normal
233
233
X [:, :n_informative ] = generator .standard_normal (size = (n_samples , n_informative ))
@@ -239,22 +239,22 @@ def make_classification(
239
239
y [start :stop ] = k % n_classes # assign labels
240
240
X_k = X [start :stop , :n_informative ] # slice a view of the cluster
241
241
242
- A = 2 * generator .
8000
rand ( n_informative , n_informative ) - 1
242
+ A = 2 * generator .uniform ( size = ( n_informative , n_informative ) ) - 1
243
243
X_k [...] = np .dot (X_k , A ) # introduce random covariance
244
244
245
245
X_k += centroid # shift the cluster to a vertex
246
246
247
247
# Create redundant features
248
248
if n_redundant > 0 :
249
- B = 2 * generator .rand ( n_informative , n_redundant ) - 1
249
+ B = 2 * generator .uniform ( size = ( n_informative , n_redundant ) ) - 1
250
250
X [:, n_informative : n_informative + n_redundant ] = np .dot (
251
251
X [:, :n_informative ], B
252
252
)
253
253
254
254
# Repeat some features
255
255
if n_repeated > 0 :
256
256
n = n_informative + n_redundant
257
- indices = ((n - 1 ) * generator .rand ( n_repeated ) + 0.5 ).astype (np .intp )
257
+ indices = ((n - 1 ) * generator .uniform ( size = n_repeated ) + 0.5 ).astype (np .intp )
258
258
X [:, n : n + n_repeated ] = X [:, indices ]
259
259
260
260
# Fill useless features
@@ -263,16 +263,16 @@ def make_classification(
263
263
264
264
# Randomly replace labels
265
265
if flip_y >= 0.0 :
266
- flip_mask = generator .rand ( n_samples ) < flip_y
266
+ flip_mask = generator .uniform ( size = n_samples ) < flip_y
267
267
y [flip_mask ] = generator .randint (n_classes , size = flip_mask .sum ())
268
268
269
269
# Randomly shift and scale
270
270
if shift is None :
271
- shift = (2 * generator .rand ( n_features ) - 1 ) * class_sep
271
+ shift = (2 * generator .uniform ( size = n_features ) - 1 ) * class_sep
272
272
X += shift
273
273
274
274
if scale is None :
275
- scale = 1 + 100 * generator .rand ( n_features )
275
+ scale = 1 + 100 * generator .uniform ( size = n_features )
276
276
X *= scale
277
277
278
278
if shuffle :
@@ -391,10 +391,10 @@ def make_multilabel_classification(
391
391
)
392
392
393
393
generator = check_random_state (random_state )
394
- p_c = generator .rand ( n_classes )
394
+ p_c = generator .uniform ( size = n_classes )
395
395
p_c /= p_c .sum ()
396
396
cumulative_p_c = np .cumsum (p_c )
397
- p_w_c = generator .rand ( n_features , n_classes )
397
+ p_w_c = generator .uniform ( size = ( n_features , n_classes ) )
398
398
p_w_c /= np .sum (p_w_c , axis = 0 )
399
399
400
400
def sample_example ():
@@ -409,7 +409,7 @@ def sample_example():
409
409
y = set ()
410
410
while len (y ) != y_size :
411
411
# pick a class with probability P(c)
412
- c = np .searchsorted (cumulative_p_c , generator .rand ( y_size - len (y )))
412
+ c = np .searchsorted (cumulative_p_c , generator .uniform ( size = y_size - len (y )))
413
413
y .update (c )
414
414
y = list (y )
415
415
@@ -427,7 +427,7 @@ def sample_example():
427
427
# sample words with replacement from selected classes
428
428
cumulative_p_w_sample = p_w_c .take (y , axis = 1 ).sum (axis = 1 ).cumsum ()
429
429
cumulative_p_w_sample /= cumulative_p_w_sample [- 1 ]
430
- words = np .searchsorted (cumulative_p_w_sample, generator .rand ( n_words ))
430
+ words = np .searchsorted (cumulative_p_w_sample , generator .uniform ( size = n_words ))
431
431
return words , y
432
432
433
433
X_indices = array .array ("i" )
@@ -610,7 +610,9 @@ def make_regression(
610
610
# zeros (the other features are not correlated to y and should be ignored
611
611
# by a sparsifying regularizers such as L1 or elastic net)
612
612
ground_truth = np .zeros ((n_features , n_targets ))
613
- ground_truth [:n_informative , :] = 100 * generator .rand (n_informative , n_targets )
613
+ ground_truth [:n_informative , :] = 100 * generator .uniform (
614
+ size = (n_informative , n_targets )
615
+ )
614
616
615
617
y = np .dot (X , ground_truth ) + bias
616
618
@@ -1015,7 +1017,7 @@ def make_friedman1(n_samples=100, n_features=10, *, noise=0.0, random_state=None
1015
1017
1016
1018
generator = check_random_state (random_state )
1017
1019
1018
- X = generator .rand ( n_samples , n_features )
1020
+ X = generator .uniform ( size = ( n_samples , n_features ) )
1019
1021
y = (
1020
1022
10 * np .sin (np .pi * X [:, 0 ] * X [:, 1 ])
1021
1023
+ 20 * (X [:, 2 ] - 0.5 ) ** 2
@@ -1078,7 +1080,7 @@ def make_friedman2(n_samples=100, *, noise=0.0, random_state=None):
1078
1080
"""
1079
1081
generator = check_random_state (random_state )
1080
1082
1081
- X = generator .rand ( n_samples , 4 )
1083
+ X = generator .uniform ( size = ( n_samples , 4 ) )
1082
1084
X [:, 0 ] *= 100
1083
1085
X [:, 1 ] *= 520 * np .pi
1084
1086
X [:, 1 ] += 40 * np .pi
@@ -1143,7 +1145,7 @@ def make_friedman3(n_samples=100, *, noise=0.0, random_state=None):
1143
1145
"""
1144
1146
generator = check_random_state (random_state )
1145
1147
1146
- X = generator .rand ( n_samples , 4 )
1148
+ X = generator .uniform ( size = ( n_samples , 4 ) )
1147
1149
X [:, 0 ] *= 100
1148
1150
X [:, 1 ] *= 520 * np .pi
1149
1151
X [:, 1 ] += 40 * np .pi
@@ -1379,9 +1381,9 @@ def make_spd_matrix(n_dim, *, random_state=None):
1379
1381
"""
1380
1382
generator = check_random_state (random_state )
1381
1383
1382
- A = generator .rand ( n_dim , n_dim )
1384
+ A = generator .uniform ( size = ( n_dim , n_dim ) )
1383
1385
U , _ , Vt = linalg .svd (np .dot (A .T , A ), check_finite = False )
1384
- X = np .dot (np .dot (U , 1.0 + np .diag (generator .rand ( n_dim ))), Vt )
1386
+ X = np .dot (np .dot (U , 1.0 + np .diag (generator .uniform ( size = n_dim ))), Vt )
1385
1387
1386
1388
return X
1387
1389
@@ -1441,11 +1443,11 @@ def make_sparse_spd_matrix(
1441
1443
random_state = check_random_state (random_state )
1442
1444
1443
1445
chol = - np .eye (dim )
1444
- aux = random_state .rand ( dim , dim )
1446
+ aux = random_state .uniform ( size = ( dim , dim ) )
1445
1447
aux [aux < alpha ] = 0
1446
1448
aux [aux > alpha ] = smallest_coef + (
1447
1449
largest_coef - smallest_coef
1448
- ) * random_state .rand ( np .sum (aux > alpha ))
1450
+ ) * random_state .uniform ( size = np .sum (aux > alpha ))
1449
1451
aux = np .tril (aux , k = - 1 )
1450
1452
1451
1453
# Permute the lines: we don't want to have asymmetries in the final
@@ -1509,15 +1511,15 @@ def make_swiss_roll(n_samples=100, *, noise=0.0, random_state=None, hole=False):
1509
1511
generator = check_random_state (random_state )
1510
1512
1511
1513
if not hole :
1512
- t = 1.5 * np .pi * (1 + 2 * generator .rand ( n_samples ))
1513
- y = 21 * generator .rand ( n_samples )
1514
+ t = 1.5 * np .pi * (1 + 2 * generator .uniform ( size = n_samples ))
1515
+ y = 21 * generator .uniform ( size = n_samples )
1514
1516
else :
1515
1517
corners = np .array (
1516
1518
[[np .pi * (1.5 + i ), j * 7 ] for i in range (3 ) for j in range (3 )]
1517
1519
)
1518
1520
corners = np .delete (corners , 4 , axis = 0 )
1519
1521
corner_index = generator .choice (8 , n_samples )
1520
- parameters = generator .rand ( 2 , n_samples ) * np .array ([[np .pi ], [7 ]])
1522
+ parameters = generator .uniform ( size = ( 2 , n_samples ) ) * np .array ([[np .pi ], [7 ]])
1521
1523
t , y = corners [corner_index ].T + parameters
1522
1524
1523
1525
x = t * np .cos (t )
@@ -1560,9 +1562,9 @@ def make_s_curve(n_samples=100, *, noise=0.0, random_state=None):
1560
1562
"""
1561
1563
generator = check_random_state (random_state )
1562
1564
1563
- t = 3 * np .pi * (generator .rand ( 1 , n_samples ) - 0.5 )
1565
+ t = 3 * np .pi * (generator .uniform ( size = ( 1 , n_samples ) ) - 0.5 )
1564
1566
x = np .sin (t )
1565
- y = 2.0 * generator .rand ( 1 , n_samples )
1567
+ y = 2.0 * generator .uniform ( size = ( 1 , n_samples ) )
1566
1568
z = np .sign (t ) * (np .cos (t ) - 1 )
1567
1569
1568
1570
X = np .concatenate ((x , y , z ))
0 commit comments