8000 Apply pylint fixes in extmath to randomized_pca · scikit-learn/scikit-learn@99c1b1d · GitHub
[go: up one dir, main page]

Skip to content

Commit 99c1b1d

Browse files
Apply pylint fixes in extmath to randomized_pca
1 parent 46c2540 commit 99c1b1d

File tree

1 file changed

+16
-7
lines changed

1 file changed

+16
-7
lines changed

sklearn/utils/extmath.py

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -440,7 +440,8 @@ def randomized_pca(A, n_components, n_oversamples=10, n_iter="auto",
440440
"""
441441
if n_iter == "auto":
442442
# Checks if the number of iterations is explicitly specified
443- 10000
# Adjust n_iter. 7 was found a good compromise for PCA. See sklearn #5299
443+
# Adjust n_iter. 7 was found a good compromise for PCA.
444+
# See sklearn #5299
444445
n_iter = 7 if n_components < .1 * min(A.shape) else 4
445446

446447
# Deal with "auto" mode
@@ -455,7 +456,9 @@ def randomized_pca(A, n_components, n_oversamples=10, n_iter="auto",
455456
c = np.atleast_2d(A.mean(axis=0))
456457

457458
if n_samples >= n_features:
458-
Q = random_state.normal(size=(n_features, n_components + n_oversamples))
459+
Q = random_state.normal(
460+
size=(n_features, n_components + n_oversamples)
461+
)
459462
if A.dtype.kind == "f":
460463
# Ensure f32 is preserved as f32
461464
Q = Q.astype(A.dtype, copy=False)
@@ -464,30 +467,36 @@ def randomized_pca(A, n_components, n_oversamples=10, n_iter="auto",
464467

465468
# Normalized power iterations
466469
for _ in range(n_iter):
467-
Q = safe_sparse_dot(A.T, Q) - safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
470+
Q = safe_sparse_dot(A.T, Q) - \
471+
safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
468472
Q = _normalize_power_iteration(Q, power_iteration_normalizer)
469473
Q = safe_sparse_dot(A, Q) - safe_sparse_dot(c, Q)
470474
Q = _normalize_power_iteration(Q, power_iteration_normalizer)
471475

472476
Q, _ = linalg.qr(Q, mode="economic")
473477

474-
QA = safe_sparse_dot(A.T, Q) - safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
478+
QA = safe_sparse_dot(A.T, Q) - \
479+
safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
475480
R, s, V = linalg.svd(QA.T, full_matrices=False)
476481
U = Q.dot(R)
477482

478483
else: # n_features > n_samples
479-
Q = random_state.normal(size=(n_samples, n_components + n_oversamples))
484+
Q = random_state.normal(
485+
size=(n_samples, n_components + n_oversamples)
486+
)
480487
if A.dtype.kind == "f":
481488
# Ensure f32 is preserved as f32
482489
Q = Q.astype(A.dtype, copy=False)
483490

484-
Q = safe_sparse_dot(A.T, Q) - safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
491+
Q = safe_sparse_dot(A.T, Q) - \
492+
safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
485493

486494
# Normalized power iterations
487495
for _ in range(n_iter):
488496
Q = safe_sparse_dot(A, Q) - safe_sparse_dot(c, Q)
489497
Q = _normalize_power_iteration(Q, power_iteration_normalizer)
490-
Q = safe_sparse_dot(A.T, Q) - safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
498+
Q = safe_sparse_dot(A.T, Q) - \
499+
safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
491500
Q = _normalize_power_iteration(Q, power_iteration_normalizer)
492501

493502
Q, _ = linalg.qr(Q, mode="economic")

0 commit comments

Comments
 (0)
0