1
1
"""
2
- scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.3.0
3
- to be deleted after scipy 1.3.0 becomes a dependency in scikit-lean
2
+ scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.7.1
3
+ was supposed to be deleted after scipy 1.3.0, but somehow hasn't been
4
+ deleted yet, becomes a dependency in scikit-learn
4
5
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
5
6
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
6
7
10
11
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
11
12
Block Preconditioned Conjugate Gradient Method.
12
13
SIAM Journal on Scientific Computing 23, no. 2,
13
- pp. 517-541. http://dx. doi.org/ 10.1137/S1064827500366124
14
+ pp. 517-541. : doi:` 10.1137/S1064827500366124`
14
15
15
16
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
16
17
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
17
- in hypre and PETSc. https:// arxiv.org/abs/ 0705.2626
18
+ in hypre and PETSc. : arxiv:` 0705.2626`
18
19
19
20
.. [3] A. V. Knyazev's C and MATLAB implementations:
20
- https://bitbucket.org/joseroman /blopex
21
+ https://github.com/lobpcg /blopex
21
22
"""
22
23
23
- from __future__ import division , print_function , absolute_import
24
24
import numpy as np
25
- from scipy .linalg import (inv , eigh , cho_factor , cho_solve , cholesky , orth ,
25
+ from scipy .linalg import (inv , eigh , cho_factor , cho_solve , cholesky ,
26
26
LinAlgError )
27
27
from scipy .sparse .linalg import aslinearoperator
28
+ from numpy import block as bmat
28
29
29
30
__all__ = ['lobpcg' ]
30
31
31
32
32
- def bmat (* args , ** kwargs ):
33
- import warnings
34
- with warnings .catch_warnings (record = True ):
35
- warnings .filterwarnings (
36
- 'ignore' , '.*the matrix subclass is not the recommended way.*' )
37
- return np .bmat (* args , ** kwargs )
38
-
39
-
40
- def _save (ar , fileName ):
41
- # Used only when verbosity level > 10.
42
- np .savetxt (fileName , ar )
43
-
44
-
45
33
def _report_nonhermitian (M , name ):
46
34
"""
47
- Report if `M` is not a hermitian matrix given its type.
35
+ Report if `M` is not a Hermitian matrix given its type.
48
36
"""
49
37
from scipy .linalg import norm
50
38
@@ -118,7 +106,7 @@ def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
118
106
else :
119
107
blockVectorBV = None
120
108
except LinAlgError :
121
- # raise ValueError('Cholesky has failed')
109
+ #raise ValueError('Cholesky has failed')
122
110
blockVectorV = None
123
111
blockVectorBV = None
124
112
VBV = None
@@ -142,7 +130,7 @@ def _get_indx(_lambda, num, largest):
142
130
143
131
def lobpcg (A , X ,
144
132
B = None , M = None , Y = None ,
145
- tol = None , maxiter = 20 ,
133
+ tol = None , maxiter = None ,
146
134
largest = True , verbosityLevel = 0 ,
147
135
retLambdaHistory = False , retResidualNormsHistory = False ):
148
136
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
@@ -172,7 +160,7 @@ def lobpcg(A, X,
172
160
Solver tolerance (stopping criterion).
173
161
The default is ``tol=n*sqrt(eps)``.
174
162
maxiter : int, optional
175
- Maximum number of iterations. The default is ``maxiter=min(n, 20) ``.
163
+ Maximum number of iterations. The default is ``maxiter = 20``.
176
164
largest : bool, optional
177
165
When True, solve for the largest eigenvalues, otherwise the smallest.
178
166
verbosityLevel : int, optional
@@ -213,8 +201,7 @@ def lobpcg(A, X,
213
201
It is not that ``n`` should be large for the LOBPCG to work, but rather the
214
202
ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
215
203
and ``n=10``, it works though ``n`` is small. The method is intended
216
- for extremely large ``n / m``, see e.g., reference [28] in
217
- https://arxiv.org/abs/0705.2626
204
+ for extremely large ``n / m`` [4]_.
218
205
219
206
The convergence speed depends basically on two factors:
220
207
@@ -234,15 +221,21 @@ def lobpcg(A, X,
234
221
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
235
222
Block Preconditioned Conjugate Gradient Method.
236
223
SIAM Journal on Scientific Computing 23, no. 2,
237
- pp. 517-541. http://dx. doi.org/ 10.1137/S1064827500366124
224
+ pp. 517-541. : doi:` 10.1137/S1064827500366124`
238
225
239
226
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
240
227
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
241
- (BLOPEX) in hypre and PETSc. https:// arxiv.org/abs/ 0705.2626
228
+ (BLOPEX) in hypre and PETSc. : arxiv:` 0705.2626`
242
229
243
230
.. [3] A. V. Knyazev's C and MATLAB implementations:
244
231
https://bitbucket.org/joseroman/blopex
245
232
233
+ .. [4] S. Yamada, T. Imamura, T. Kano, and M. Machida (2006),
234
+ High-performance computing for exact numerical approaches to
235
+ quantum many-body problems on the earth simulator. In Proceedings
236
+ of the 2006 ACM/IEEE Conference on Supercomputing.
237
+ :doi:`10.1145/1188455.1188504`
238
+
246
239
Examples
247
240
--------
248
241
@@ -270,7 +263,8 @@ def lobpcg(A, X,
270
263
Initial guess for eigenvectors, should have linearly independent
271
264
columns. Column dimension = number of requested eigenvalues.
272
265
273
- >>> X = np.random.rand(n, 3)
266
+ >>> rng = np.random.default_rng()
267
+ >>> X = rng.random((n, 3))
274
268
275
269
Preconditioner in the inverse of A in this example:
276
270
@@ -302,7 +296,8 @@ def lobpcg(A, X,
302
296
blockVectorX = X
303
297
blockVectorY = Y
304
298
residualTolerance = tol
305
- maxIterations = maxiter
299
+ if maxiter is None :
300
+ maxiter = 20
306
301
307
302
if blockVectorY is not None :
308
303
sizeY = blockVectorY .shape [1 ]
@@ -429,7 +424,7 @@ def lobpcg(A, X,
429
424
iterationNumber = - 1
430
425
restart = True
431
426
explicitGramFlag = False
432
- while iterationNumber < maxIterations :
427
+ while iterationNumber < maxiter :
433
428
iterationNumber += 1
434
429
if verbosityLevel > 0 :
435
430
print ('iteration %d' % iterationNumber )
@@ -487,15 +482,13 @@ def lobpcg(A, X,
487
482
##
488
483
# B-orthogonalize the preconditioned residuals to X.
489
484
if B is not None :
490
- activeBlockVectorR = activeBlockVectorR - \
491
- np .matmul (blockVectorX ,
492
- np .matmul (blockVectorBX .T .conj (),
493
- activeBlockVectorR ))
485
+ activeBlockVectorR = activeBlockVectorR - np .matmul (blockVectorX ,
486
+ np .matmul (blockVectorBX .T .conj (),
487
+ activeBlockVectorR ))
494
488
else :
495
- activeBlockVectorR = activeBlockVectorR - \
496
- np .matmul (blockVectorX ,
497
- np .matmul (blockVectorX .T .conj (),
498
- activeBlockVectorR ))
489
+ activeBlockVectorR = activeBlockVectorR - np .matmul (blockVectorX ,
490
+ np .matmul (blockVectorX .T .conj (),
491
+ activeBlockVectorR ))
499
492
500
493
##
501
494
# B-orthonormalize the preconditioned residuals.
0 commit comments