8000 MAINT update externals/_lobpcg.py with upstream (#21163) · scikit-learn/scikit-learn@0578921 · GitHub
[go: up one dir, main page]

Skip to content

Commit 0578921

Browse files
DimitriPapadopoulosglemaitre
authored andcommitted
MAINT update externals/_lobpcg.py with upstream (#21163)
1 parent 333a080 commit 0578921

File tree

1 file changed

+30
-38
lines changed

1 file changed

+30
-38
lines changed

sklearn/externals/_lobpcg.py

Lines changed: 30 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""
2-
scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.3.0
2+
scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.7.1
33
to be deleted after scipy 1.3.0 becomes a dependency in scikit-lean
44
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
55
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
@@ -10,41 +10,28 @@
1010
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
1111
Block Preconditioned Conjugate Gradient Method.
1212
SIAM Journal on Scientific Computing 23, no. 2,
13-
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
13+
pp. 517-541. :doi:`10.1137/S1064827500366124`
1414
1515
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
1616
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
17-
in hypre and PETSc. https://arxiv.org/abs/0705.2626
17+
in hypre and PETSc. :arxiv:`0705.2626`
1818
1919
.. [3] A. V. Knyazev's C and MATLAB implementations:
20-
https://bitbucket.org/joseroman/blopex
20+
https://github.com/lobpcg/blopex
2121
"""
2222

23-
from __future__ import division, print_function, absolute_import
2423
import numpy as np
25-
from scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky, orth,
24+
from scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky,
2625
LinAlgError)
2726
from scipy.sparse.linalg import aslinearoperator
27+
from numpy import block as bmat
2828

2929
__all__ = ['lobpcg']
3030

3131

32-
def bmat(*args, **kwargs):
33-
import warnings
34-
with warnings.catch_warnings(record=True):
35-
warnings.filterwarnings(
36-
'ignore', '.*the matrix subclass is not the recommended way.*')
37-
return np.bmat(*args, **kwargs)
38-
39-
40-
def _save(ar, fileName):
41-
# Used only when verbosity level > 10.
42-
np.savetxt(fileName, ar)
43-
44-
4532
def _report_nonhermitian(M, name):
4633
"""
47-
Report if `M` is not a hermitian matrix given its type.
34+
Report if `M` is not a Hermitian matrix given its type.
4835
"""
4936
from scipy.linalg import norm
5037

@@ -118,7 +105,7 @@ def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
118105
else:
119106
blockVectorBV = None
120107
except LinAlgError:
121-
# raise ValueError('Cholesky has failed')
108+
#raise ValueError('Cholesky has failed')
122109
blockVectorV = None
123110
blockVectorBV = None
124111
VBV = None
@@ -142,7 +129,7 @@ def _get_indx(_lambda, num, largest):
142129

143130
def lobpcg(A, X,
144131
B=None, M=None, Y=None,
145-
tol=None, maxiter=20,
132+
tol=None, maxiter=None,
146133
largest=True, verbosityLevel=0,
147134
retLambdaHistory=False, retResidualNormsHistory=False):
148135
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
@@ -172,7 +159,7 @@ def lobpcg(A, X,
172159
Solver tolerance (stopping criterion).
173160
The default is ``tol=n*sqrt(eps)``.
174161
maxiter : int, optional
175-
Maximum number of iterations. The default is ``maxiter=min(n, 20)``.
162+
Maximum number of iterations. The default is ``maxiter = 20``.
176163
largest : bool, optional
177164
When True, solve for the largest eigenvalues, otherwise the smallest.
178165
verbosityLevel : int, optional
@@ -213,8 +200,7 @@ def lobpcg(A, X,
213200
It is not that ``n`` should be large for the LOBPCG to work, but rather the
214201
ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
215202
and ``n=10``, it works though ``n`` is small. The method is intended
216-
for extremely large ``n / m``, see e.g., reference [28] in
217-
https://arxiv.org/abs/0705.2626
203+
for extremely large ``n / m`` [4]_.
218204
219205
The convergence speed depends basically on two factors:
220206
@@ -234,15 +220,21 @@ def lobpcg(A, X,
234220
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
235221
Block Preconditioned Conjugate Gradient Method.
236222
SIAM Journal on Scientific Computing 23, no. 2,
237-
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
223+
pp. 517-541. :doi:`10.1137/S1064827500366124`
238224
239225
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
240226
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
241-
(BLOPEX) in hypre and PETSc. https://arxiv.org/abs/0705.2626
227+
(BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`
242228
243229
.. [3] A. V. Knyazev's C and MATLAB implementations:
244230
https://bitbucket.org/joseroman/blopex
245231
232+
.. [4] S. Yamada, T. Imamura, T. Kano, and M. Machida (2006),
233+
High-performance computing for exact numerical approaches to
234+
quantum many-body problems on the earth simulator. In Proceedings
235+
of the 2006 ACM/IEEE Conference on Supercomputing.
236+
:doi:`10.1145/1188455.1188504`
237+
246238
Examples
247239
--------
248240
@@ -270,7 +262,8 @@ def lobpcg(A, X,
270262
Initial guess for eigenvectors, should have linearly independent
271263
columns. Column dimension = number of requested eigenvalues.
272264
273-
>>> X = np.random.rand(n, 3)
265+
>>> rng = np.random.default_rng()
266+
>>> X = rng.random((n, 3))
274267
275268
Preconditioner in the inverse of A in this example:
276269
@@ -302,7 +295,8 @@ def lobpcg(A, X,
302295
blockVectorX = X
303296
blockVectorY = Y
304297
residualTolerance = tol
305-
maxIterations = maxiter
298+
if maxiter is None:
299+
maxiter = 20
306300

307301
if blockVectorY is not None:
308302
sizeY = blockVectorY.shape[1]
@@ -429,7 +423,7 @@ def lobpcg(A, X,
429423
iterationNumber = -1
430424
restart = True
431425
explicitGramFlag = False
432-
while iterationNumber < maxIterations:
426+
while iterationNumber < maxiter:
433427
iterationNumber += 1
434428
if verbosityLevel > 0:
435429
print('iteration %d' % iterationNumber)
@@ -487,15 +481,13 @@ def lobpcg(A, X,
487481
##
488482
# B-orthogonalize the preconditioned residuals to X.
489483
if B is not None:
490-
activeBlockVectorR = activeBlockVectorR - \
491-
np.matmul(blockVectorX,
492-
np.matmul(blockVectorBX.T.conj(),
493-
activeBlockVectorR))
484+
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
485+
np.matmul(blockVectorBX.T.conj(),
486+
activeBlockVectorR))
494487
else:
495-
activeBlockVectorR = activeBlockVectorR - \
496-
np.matmul(blockVectorX,
497-
np.matmul(blockVectorX.T.conj(),
498-
activeBlockVectorR))
488+
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
489+
np.matmul(blockVectorX.T.conj(),
490+
activeBlockVectorR))
499491

500492
##
501493
# B-orthonormalize the preconditioned residuals.

0 commit comments

Comments
 (0)
0