@@ -440,7 +440,8 @@ def randomized_pca(A, n_components, n_oversamples=10, n_iter="auto",
440
440
"""
441
441
if n_iter == "auto" :
442
442
# Checks if the number of iterations is explicitly specified
443
- # Adjust n_iter. 7 was found a good compromise for PCA. See sklearn #5299
443
+ # Adjust n_iter. 7 was found a good compromise for PCA.
444
+ # See sklearn #5299
444
445
n_iter = 7 if n_components < .1 * min (A .shape ) else 4
445
446
446
447
# Deal with "auto" mode
@@ -464,30 +465,36 @@ def randomized_pca(A, n_components, n_oversamples=10, n_iter="auto",
464
465
465
466
# Normalized power iterations
466
467
for _ in range (n_iter ):
467
- Q = safe_sparse_dot (A .T , Q ) - safe_sparse_dot (c .T , Q .sum (axis = 0 )[None , :])
468
+ Q = safe_sparse_dot (A .T , Q ) - \
469
+ safe_sparse_dot (c .T , Q .sum (axis = 0 )[None , :])
468
470
Q = _normalize_power_iteration (Q , power_iteration_normalizer )
469
471
Q = safe_sparse_dot (A , Q ) - safe_sparse_dot (c , Q )
470
472
Q = _normalize_power_iteration (Q , power_iteration_normalizer )
471
473
472
474
Q , _ = linalg .qr (Q , mode = "economic" )
473
475
474
- QA = safe_sparse_dot (A .T , Q ) - safe_sparse_dot (c .T , Q .sum (axis = 0 )[None , :])
476
+ QA = safe_sparse_dot (A .T , Q ) - \
477
+ safe_sparse_dot (c .T , Q .sum (axis = 0 )[None , :])
475
478
R , s , V = linalg .svd (QA .T , full_matrices = False )
476
479
U = Q .dot (R )
477
480
478
481
else : # n_features > n_samples
479
- Q = random_state .normal (size = (n_samples , n_components + n_oversamples ))
482
+ Q = random_state .normal (
483
+ size = (n_samples , n_components + n_oversamples )
484
+ )
480
485
if A .dtype .kind == "f" :
481
486
# Ensure f32 is preserved as f32
482
487
Q = Q .astype (A .dtype , copy = False )
483
488
484
- Q = safe_sparse_dot (A .T , Q ) - safe_sparse_dot (c .T , Q .sum (axis = 0 )[None , :])
489
+ Q = safe_sparse_dot (A .T , Q ) - \
490
+ safe_sparse_dot (c .T , Q .sum (axis = 0 )[None , :])
485
491
486
492
# Normalized power iterations
487
493
for _ in range (n_iter ):
488
494
Q = safe_sparse_dot (A , Q ) - safe_sparse_dot (c , Q )
489
495
Q = _normalize_power_iteration (Q , power_iteration_normalizer )
490
- Q = safe_sparse_dot (A .T , Q ) - safe_sparse_dot (c .T , Q .sum (axis = 0 )[None , :])
496
+ Q = safe_sparse_dot (A .T , Q ) - \
497
+ safe_sparse_dot (c .T , Q .sum (axis = 0 )[None , :])
491
498
Q = _normalize_power_iteration (Q , power_iteration_normalizer )
492
499
493
500
Q , _ = linalg .qr (Q , mode = "economic" )
0 commit comments