8000 MAINT Copy latest version of file from upstream · scikit-learn/scikit-learn@e6b1d82 · GitHub
[go: up one dir, main page]

Skip to content

Commit e6b1d82

Browse files
MAINT Copy latest version of file from upstream
Actually scipy/sparse/linalg/eigen/lobpcg/lobpcg.py has not been removed from SciPy 1.3.0 (or even current 1.7.1).
1 parent 6e2388f commit e6b1d82

File tree

1 file changed

+32
-39
lines changed

1 file changed

+32
-39
lines changed

sklearn/externals/_lobpcg.py

Lines changed: 32 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""
2-
scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.3.0
3-
to be deleted after scipy 1.3.0 becomes a dependency in scikit-lean
2+
scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.7.1
3+
was supposed to be deleted after scipy 1.3.0, but somehow hasn't been
4+
deleted yet, becomes a dependency in scikit-learn
45
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
56
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
67
@@ -10,41 +11,28 @@
1011
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
1112
Block Preconditioned Conjugate Gradient Method.
1213
SIAM Journal on Scientific Computing 23, no. 2,
13-
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
14+
pp. 517-541. :doi:`10.1137/S1064827500366124`
1415
1516
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
1617
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
17-
in hypre and PETSc. https://arxiv.org/abs/0705.2626
18+
in hypre and PETSc. :arxiv:`0705.2626`
1819
1920
.. [3] A. V. Knyazev's C and MATLAB implementations:
20-
https://bitbucket.org/joseroman/blopex
21+
https://github.com/lobpcg/blopex
2122
"""
2223

23-
from __future__ import division, print_function, absolute_import
2424
import numpy as np
25-
from scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky, orth,
25+
from scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky,
2626
LinAlgError)
2727
from scipy.sparse.linalg import aslinearoperator
28+
from numpy import block as bmat
2829

2930
__all__ = ['lobpcg']
3031

3132

32-
def bmat(*args, **kwargs):
33-
import warnings
34-
with warnings.catch_warnings(record=True):
35-
warnings.filterwarnings(
36-
'ignore', '.*the matrix subclass is not the recommended way.*')
37-
return np.bmat(*args, **kwargs)
38-
39-
40-
def _save(ar, fileName):
41-
# Used only when verbosity level > 10.
42-
np.savetxt(fileName, ar)
43-
44-
4533
def _report_nonhermitian(M, name):
4634
"""
47-
Report if `M` is not a hermitian matrix given its type.
35+
Report if `M` is not a Hermitian matrix given its type.
4836
"""
4937
from scipy.linalg import norm
5038

@@ -118,7 +106,7 @@ def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
118106
else:
119107
blockVectorBV = None
120108
except LinAlgError:
121-
# raise ValueError('Cholesky has failed')
109+
#raise ValueError('Cholesky has failed')
122110
blockVectorV = None
123111
blockVectorBV = None
124112
VBV = None
@@ -142,7 +130,7 @@ def _get_indx(_lambda, num, largest):
142130

143131
def lobpcg(A, X,
144132
B=None, M=None, Y=None,
145-
tol=None, maxiter=20,
133+
tol=None, maxiter=None,
146134
largest=True, verbosityLevel=0,
147135
retLambdaHistory=False, retResidualNormsHistory=False):
148136
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
@@ -172,7 +160,7 @@ def lobpcg(A, X,
172160
Solver tolerance (stopping criterion).
173161
The default is ``tol=n*sqrt(eps)``.
174162
maxiter : int, optional
175-
Maximum number of iterations. The default is ``maxiter=min(n, 20)``.
163+
Maximum number of iterations. The default is ``maxiter = 20``.
176164
largest : bool, optional
177165
When True, solve for the largest eigenvalues, otherwise the smallest.
178166
verbosityLevel : int, optional
@@ -213,8 +201,7 @@ def lobpcg(A, X,
213201
It is not that ``n`` should be large for the LOBPCG to work, but rather the
214202
ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
215203
and ``n=10``, it works though ``n`` is small. The method is intended
216-
for extremely large ``n / m``, see e.g., reference [28] in
217-
https://arxiv.org/abs/0705.2626
204+
for extremely large ``n / m`` [4]_.
218205
219206
The convergence speed depends basically on two factors:
220207
@@ -234,15 +221,21 @@ def lobpcg(A, X,
234221
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
235222
Block Preconditioned Conjugate Gradient Method.
236223
SIAM Journal on Scientific Computing 23, no. 2,
237-
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
224+
pp. 517-541. :doi:`10.1137/S1064827500366124`
238225
239226
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
240227
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
241-
(BLOPEX) in hypre and PETSc. https://arxiv.org/abs/0705.2626
228+
(BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`
242229
243230
.. [3] A. V. Knyazev's C and MATLAB implementations:
244231
https://bitbucket.org/joseroman/blopex
245232
233+
.. [4] S. Yamada, T. Imamura, T. Kano, and M. Machida (2006),
234+
High-performance computing for exact numerical approaches to
235+
quantum many-body problems on the earth simulator. In Proceedings
236+
of the 2006 ACM/IEEE Conference on Supercomputing.
237+
:doi:`10.1145/1188455.1188504`
238+
246239
Examples
247240
--------
248241
@@ -270,7 +263,8 @@ def lobpcg(A, X,
270263
Initial guess for eigenvectors, should have linearly independent
271264
columns. Column dimension = number of requested eigenvalues.
272265
273-
>>> X = np.random.rand(n, 3)
266+
>>> rng = np.random.default_rng()
267+
>>> X = rng.random((n, 3))
274268
275269
Preconditioner in the inverse of A in this example:
276270
@@ -302,7 +296,8 @@ def lobpcg(A, X,
302296
blockVectorX = X
303297
blockVectorY = Y
304298
residualTolerance = tol
305-
maxIterations = maxiter
299+
if maxiter is None:
300+
maxiter = 20
306301

307302
if blockVectorY is not None:
308303
sizeY = blockVectorY.shape[1]
@@ -429,7 +424,7 @@ def lobpcg(A, X,
429424
iterationNumber = -1
430425
restart = True
431426
explicitGramFlag = False
432-
while iterationNumber < maxIterations:
427+
while iterationNumber < maxiter:
433428
iterationNumber += 1
434429
if verbosityLevel > 0:
435430
print('iteration %d' % iterationNumber)
@@ -487,15 +482,13 @@ def lobpcg(A, X,
487482
##
488483
# B-orthogonalize the preconditioned residuals to X.
489484
if B is not None:
490-
activeBlockVectorR = activeBlockVectorR - \
491-
np.matmul(blockVectorX,
492-
np.matmul(blockVectorBX.T.conj(),
493-
activeBlockVectorR))
485+
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
486+
np.matmul(blockVectorBX.T.conj(),
487+
activeBlockVectorR))
494488
else:
495-
activeBlockVectorR = activeBlockVectorR - \
496-
np.matmul(blockVectorX,
497-
np.matmul(blockVectorX.T.conj(),
498-
activeBlockVectorR))
489+
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
490+
np.matmul(blockVectorX.T.conj(),
491+
activeBlockVectorR))
499492

500493
##
501494
# B-orthonormalize the preconditioned residuals.

0 commit comments

Comments
 (0)
0