1
1
"""
2
- scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.3.0
2
+ scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.7.1
3
3
to be deleted after scipy 1.3.0 becomes a dependency in scikit-lean
4
4
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
5
5
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
10
10
8000
td> Toward the Optimal Preconditioned Eigensolver: Locally Optimal
11
11
Block Preconditioned Conjugate Gradient Method.
12
12
SIAM Journal on Scientific Computing 23, no. 2,
13
- pp. 517-541. http://dx. doi.org/ 10.1137/S1064827500366124
13
+ pp. 517-541. : doi:` 10.1137/S1064827500366124`
14
14
15
15
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
16
16
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
17
- in hypre and PETSc. https:// arxiv.org/abs/ 0705.2626
17
+ in hypre and PETSc. : arxiv:` 0705.2626`
18
18
19
19
.. [3] A. V. Knyazev's C and MATLAB implementations:
20
- https://bitbucket.org/joseroman /blopex
20
+ https://github.com/lobpcg /blopex
21
21
"""
22
22
23
- from __future__ import division , print_function , absolute_import
24
23
import numpy as np
25
- from scipy .linalg import (inv , eigh , cho_factor , cho_solve , cholesky , orth ,
24
+ from scipy .linalg import (inv , eigh , cho_factor , cho_solve , cholesky ,
26
25
LinAlgError )
27
26
from scipy .sparse .linalg import aslinearoperator
27
+ from numpy import block as bmat
28
28
29
29
__all__ = ['lobpcg' ]
30
30
31
31
32
- def bmat (* args , ** kwargs ):
33
- import warnings
34
- with warnings .catch_warnings (record = True ):
35
- warnings .filterwarnings (
36
- 'ignore' , '.*the matrix subclass is not the recommended way.*' )
37
- return np .bmat (* args , ** kwargs )
38
-
39
-
40
- def _save (ar , fileName ):
41
- # Used only when verbosity level > 10.
42
- np .savetxt (fileName , ar )
43
-
44
-
45
32
def _report_nonhermitian (M , name ):
46
33
"""
47
- Report if `M` is not a hermitian matrix given its type.
34
+ Report if `M` is not a Hermitian matrix given its type.
48
35
"""
49
36
from scipy .linalg import norm
50
37
@@ -118,7 +105,7 @@ def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
118
105
else :
119
106
blockVectorBV = None
120
107
except LinAlgError :
121
- # raise ValueError('Cholesky has failed')
108
+ #raise ValueError('Cholesky has failed')
122
109
blockVectorV = None
123
110
blockVectorBV = None
124
111
VBV = None
@@ -142,7 +129,7 @@ def _get_indx(_lambda, num, largest):
142
129
143
130
def lobpcg (A , X ,
144
131
B = None , M = None , Y = None ,
145
- tol = None , maxiter = 20 ,
132
+ tol = None , maxiter = None ,
146
133
largest = True , verbosityLevel = 0 ,
147
134
retLambdaHistory = False , retResidualNormsHistory = False ):
148
135
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
@@ -172,7 +159,7 @@ def lobpcg(A, X,
172
159
Solver tolerance (stopping criterion).
173
160
The default is ``tol=n*sqrt(eps)``.
174
161
maxiter : int, optional
175
- Maximum number of iterations. The default is ``maxiter=min(n, 20) ``.
162
+ Maximum number of iterations. The default is ``maxiter = 20``.
176
163
largest : bool, optional
177
164
When True, solve for the largest eigenvalues, otherwise the smallest.
178
165
verbosityLevel : int, optional
@@ -213,8 +200,7 @@ def lobpcg(A, X,
213
200
It is not that ``n`` should be large for the LOBPCG to work, but rather the
214
201
ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
215
202
and ``n=10``, it works though ``n`` is small. The method is intended
216
- for extremely large ``n / m``, see e.g., reference [28] in
217
- https://arxiv.org/abs/0705.2626
203
+ for extremely large ``n / m`` [4]_.
218
204
219
205
The convergence speed depends basically on two factors:
220
206
@@ -234,15 +220,21 @@ def lobpcg(A, X,
234
220
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
235
221
Block Preconditioned Conjugate Gradient Method.
236
222
SIAM Journal on Scientific Computing 23, no. 2,
237
- pp. 517-541. http://dx. doi.org/ 10.1137/S1064827500366124
223
+ pp. 517-541. : doi:` 10.1137/S1064827500366124`
238
224
239
225
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
240
226
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
241
- (BLOPEX) in hypre and PETSc. https:// arxiv.org/abs/ 0705.2626
227
+ (BLOPEX) in hypre and PETSc. : arxiv:` 0705.2626`
242
228
243
229
.. [3] A. V. Knyazev's C and MATLAB implementations:
244
230
https://bitbucket.org/joseroman/blopex
245
231
232
+ .. [4] S. Yamada, T. Imamura, T. Kano, and M. Machida (2006),
233
+ High-performance computing for exact numerical approaches to
234
+ quantum many-body problems on the earth simulator. In Proceedings
235
+ of the 2006 ACM/IEEE Conference on Supercomputing.
236
+ :doi:`10.1145/1188455.1188504`
237
+
246
238
Examples
247
239
--------
248
240
@@ -270,7 +262,8 @@ def lobpcg(A, X,
270
262
Initial guess for eigenvectors, should have linearly independent
271
263
columns. Column dimension = number of requested eigenvalues.
272
264
273
- >>> X = np.random.rand(n, 3)
265
+ >>> rng = np.random.default_rng()
266
+ >>> X = rng.random((n, 3))
274
267
275
268
Preconditioner in the inverse of A in this example:
276
269
@@ -302,7 +295,8 @@ def lobpcg(A, X,
302
295
blockVectorX = X
303
296
blockVectorY = Y
304
297
residualTolerance = tol
305
- maxIterations = maxiter
298
+ if maxiter is None :
299
+ maxiter = 20
306
300
307
301
if blockVectorY is not None :
308
302
sizeY = blockVectorY .shape [1 ]
@@ -429,7 +423,7 @@ def lobpcg(A, X,
429
423
iterationNumber = - 1
430
424
restart = True
431
425
explicitGramFlag = False
432
- while iterationNumber < maxIterations :
426
+ while iterationNumber < maxiter :
433
427
iterationNumber += 1
434
428
if verbosityLevel > 0 :
435
429
print ('iteration %d' % iterationNumber )
@@ -487,15 +481,13 @@ def lobpcg(A, X,
487
481
##
488
482
# B-orthogonalize the preconditioned residuals to X.
489
483
if B is not None :
490
- activeBlockVectorR = activeBlockVectorR - \
491
- np .matmul (blockVectorX ,
492
- np .matmul (blockVectorBX .T .conj (),
493
- activeBlockVectorR ))
484
+ activeBlockVectorR = activeBlockVectorR - np .matmul (blockVectorX ,
485
+ np .matmul (blockVectorBX .T .conj (),
486
+ activeBlockVectorR ))
494
487
else :
495
- activeBlockVectorR = activeBlockVectorR - \
496
- np .matmul (blockVectorX ,
497
- np .matmul (blockVectorX .T .conj (),
498
- activeBlockVectorR ))
488
+ activeBlockVectorR = activeBlockVectorR - np .matmul (blockVectorX ,
489
+ np .matmul (blockVectorX .T .conj (),
490
+ activeBlockVectorR ))
499
491
500
492
##
501
493
# B-orthonormalize the preconditioned residuals.
0 commit comments