Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
|
@ -0,0 +1,16 @@
|
|||
"""
|
||||
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
|
||||
|
||||
LOBPCG is a preconditioned eigensolver for large symmetric positive definite
|
||||
(SPD) generalized eigenproblems.
|
||||
|
||||
Call the function lobpcg - see help for lobpcg.lobpcg.
|
||||
|
||||
"""
|
||||
from .lobpcg import *
|
||||
|
||||
__all__ = [s for s in dir() if not s.startswith('_')]
|
||||
|
||||
from scipy._lib._testutils import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,710 @@
|
|||
"""
|
||||
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] A. V. Knyazev (2001),
|
||||
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
|
||||
Block Preconditioned Conjugate Gradient Method.
|
||||
SIAM Journal on Scientific Computing 23, no. 2,
|
||||
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
|
||||
|
||||
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
|
||||
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
|
||||
in hypre and PETSc. https://arxiv.org/abs/0705.2626
|
||||
|
||||
.. [3] A. V. Knyazev's C and MATLAB implementations:
|
||||
https://github.com/lobpcg/blopex
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky,
|
||||
LinAlgError)
|
||||
from scipy.sparse.linalg import aslinearoperator
|
||||
from numpy import block as bmat
|
||||
|
||||
__all__ = ['lobpcg']
|
||||
|
||||
|
||||
def _report_nonhermitian(M, name):
|
||||
"""
|
||||
Report if `M` is not a hermitian matrix given its type.
|
||||
"""
|
||||
from scipy.linalg import norm
|
||||
|
||||
md = M - M.T.conj()
|
||||
|
||||
nmd = norm(md, 1)
|
||||
tol = 10 * np.finfo(M.dtype).eps
|
||||
tol = max(tol, tol * norm(M, 1))
|
||||
if nmd > tol:
|
||||
print('matrix %s of the type %s is not sufficiently Hermitian:'
|
||||
% (name, M.dtype))
|
||||
print('condition: %.e < %e' % (nmd, tol))
|
||||
|
||||
|
||||
def _as2d(ar):
|
||||
"""
|
||||
If the input array is 2D return it, if it is 1D, append a dimension,
|
||||
making it a column vector.
|
||||
"""
|
||||
if ar.ndim == 2:
|
||||
return ar
|
||||
else: # Assume 1!
|
||||
aux = np.array(ar, copy=False)
|
||||
aux.shape = (ar.shape[0], 1)
|
||||
return aux
|
||||
|
||||
|
||||
def _makeOperator(operatorInput, expectedShape):
|
||||
"""Takes a dense numpy array or a sparse matrix or
|
||||
a function and makes an operator performing matrix * blockvector
|
||||
products."""
|
||||
if operatorInput is None:
|
||||
return None
|
||||
else:
|
||||
operator = aslinearoperator(operatorInput)
|
||||
|
||||
if operator.shape != expectedShape:
|
||||
raise ValueError('operator has invalid shape')
|
||||
|
||||
return operator
|
||||
|
||||
|
||||
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
|
||||
"""Changes blockVectorV in place."""
|
||||
YBV = np.dot(blockVectorBY.T.conj(), blockVectorV)
|
||||
tmp = cho_solve(factYBY, YBV)
|
||||
blockVectorV -= np.dot(blockVectorY, tmp)
|
||||
|
||||
|
||||
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
|
||||
"""B-orthonormalize the given block vector using Cholesky."""
|
||||
normalization = blockVectorV.max(axis=0)+np.finfo(blockVectorV.dtype).eps
|
||||
blockVectorV = blockVectorV / normalization
|
||||
if blockVectorBV is None:
|
||||
if B is not None:
|
||||
blockVectorBV = B(blockVectorV)
|
||||
else:
|
||||
blockVectorBV = blockVectorV # Shared data!!!
|
||||
else:
|
||||
blockVectorBV = blockVectorBV / normalization
|
||||
VBV = np.matmul(blockVectorV.T.conj(), blockVectorBV)
|
||||
try:
|
||||
# VBV is a Cholesky factor from now on...
|
||||
VBV = cholesky(VBV, overwrite_a=True)
|
||||
VBV = inv(VBV, overwrite_a=True)
|
||||
blockVectorV = np.matmul(blockVectorV, VBV)
|
||||
# blockVectorV = (cho_solve((VBV.T, True), blockVectorV.T)).T
|
||||
if B is not None:
|
||||
blockVectorBV = np.matmul(blockVectorBV, VBV)
|
||||
# blockVectorBV = (cho_solve((VBV.T, True), blockVectorBV.T)).T
|
||||
else:
|
||||
blockVectorBV = None
|
||||
except LinAlgError:
|
||||
#raise ValueError('Cholesky has failed')
|
||||
blockVectorV = None
|
||||
blockVectorBV = None
|
||||
VBV = None
|
||||
|
||||
if retInvR:
|
||||
return blockVectorV, blockVectorBV, VBV, normalization
|
||||
else:
|
||||
return blockVectorV, blockVectorBV
|
||||
|
||||
|
||||
def _get_indx(_lambda, num, largest):
|
||||
"""Get `num` indices into `_lambda` depending on `largest` option."""
|
||||
ii = np.argsort(_lambda)
|
||||
if largest:
|
||||
ii = ii[:-num-1:-1]
|
||||
else:
|
||||
ii = ii[:num]
|
||||
|
||||
return ii
|
||||
|
||||
|
||||
def lobpcg(A, X,
|
||||
B=None, M=None, Y=None,
|
||||
tol=None, maxiter=None,
|
||||
largest=True, verbosityLevel=0,
|
||||
retLambdaHistory=False, retResidualNormsHistory=False):
|
||||
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
|
||||
|
||||
LOBPCG is a preconditioned eigensolver for large symmetric positive
|
||||
definite (SPD) generalized eigenproblems.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : {sparse matrix, dense matrix, LinearOperator}
|
||||
The symmetric linear operator of the problem, usually a
|
||||
sparse matrix. Often called the "stiffness matrix".
|
||||
X : ndarray, float32 or float64
|
||||
Initial approximation to the ``k`` eigenvectors (non-sparse). If `A`
|
||||
has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``.
|
||||
B : {dense matrix, sparse matrix, LinearOperator}, optional
|
||||
The right hand side operator in a generalized eigenproblem.
|
||||
By default, ``B = Identity``. Often called the "mass matrix".
|
||||
M : {dense matrix, sparse matrix, LinearOperator}, optional
|
||||
Preconditioner to `A`; by default ``M = Identity``.
|
||||
`M` should approximate the inverse of `A`.
|
||||
Y : ndarray, float32 or float64, optional
|
||||
n-by-sizeY matrix of constraints (non-sparse), sizeY < n
|
||||
The iterations will be performed in the B-orthogonal complement
|
||||
of the column-space of Y. Y must be full rank.
|
||||
tol : scalar, optional
|
||||
Solver tolerance (stopping criterion).
|
||||
The default is ``tol=n*sqrt(eps)``.
|
||||
maxiter : int, optional
|
||||
Maximum number of iterations. The default is ``maxiter = 20``.
|
||||
largest : bool, optional
|
||||
When True, solve for the largest eigenvalues, otherwise the smallest.
|
||||
verbosityLevel : int, optional
|
||||
Controls solver output. The default is ``verbosityLevel=0``.
|
||||
retLambdaHistory : bool, optional
|
||||
Whether to return eigenvalue history. Default is False.
|
||||
retResidualNormsHistory : bool, optional
|
||||
Whether to return history of residual norms. Default is False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
w : ndarray
|
||||
Array of ``k`` eigenvalues
|
||||
v : ndarray
|
||||
An array of ``k`` eigenvectors. `v` has the same shape as `X`.
|
||||
lambdas : list of ndarray, optional
|
||||
The eigenvalue history, if `retLambdaHistory` is True.
|
||||
rnorms : list of ndarray, optional
|
||||
The history of residual norms, if `retResidualNormsHistory` is True.
|
||||
|
||||
Notes
|
||||
-----
|
||||
If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True,
|
||||
the return tuple has the following format
|
||||
``(lambda, V, lambda history, residual norms history)``.
|
||||
|
||||
In the following ``n`` denotes the matrix size and ``m`` the number
|
||||
of required eigenvalues (smallest or largest).
|
||||
|
||||
The LOBPCG code internally solves eigenproblems of the size ``3m`` on every
|
||||
iteration by calling the "standard" dense eigensolver, so if ``m`` is not
|
||||
small enough compared to ``n``, it does not make sense to call the LOBPCG
|
||||
code, but rather one should use the "standard" eigensolver, e.g. numpy or
|
||||
scipy function in this case.
|
||||
If one calls the LOBPCG algorithm for ``5m > n``, it will most likely break
|
||||
internally, so the code tries to call the standard function instead.
|
||||
|
||||
It is not that ``n`` should be large for the LOBPCG to work, but rather the
|
||||
ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
|
||||
and ``n=10``, it works though ``n`` is small. The method is intended
|
||||
for extremely large ``n / m``, see e.g., reference [28] in
|
||||
https://arxiv.org/abs/0705.2626
|
||||
|
||||
The convergence speed depends basically on two factors:
|
||||
|
||||
1. How well relatively separated the seeking eigenvalues are from the rest
|
||||
of the eigenvalues. One can try to vary ``m`` to make this better.
|
||||
|
||||
2. How well conditioned the problem is. This can be changed by using proper
|
||||
preconditioning. For example, a rod vibration test problem (under tests
|
||||
directory) is ill-conditioned for large ``n``, so convergence will be
|
||||
slow, unless efficient preconditioning is used. For this specific
|
||||
problem, a good simple preconditioner function would be a linear solve
|
||||
for `A`, which is easy to code since A is tridiagonal.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] A. V. Knyazev (2001),
|
||||
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
|
||||
Block Preconditioned Conjugate Gradient Method.
|
||||
SIAM Journal on Scientific Computing 23, no. 2,
|
||||
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
|
||||
|
||||
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
|
||||
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
|
||||
(BLOPEX) in hypre and PETSc. https://arxiv.org/abs/0705.2626
|
||||
|
||||
.. [3] A. V. Knyazev's C and MATLAB implementations:
|
||||
https://bitbucket.org/joseroman/blopex
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Solve ``A x = lambda x`` with constraints and preconditioning.
|
||||
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import spdiags, issparse
|
||||
>>> from scipy.sparse.linalg import lobpcg, LinearOperator
|
||||
>>> n = 100
|
||||
>>> vals = np.arange(1, n + 1)
|
||||
>>> A = spdiags(vals, 0, n, n)
|
||||
>>> A.toarray()
|
||||
array([[ 1., 0., 0., ..., 0., 0., 0.],
|
||||
[ 0., 2., 0., ..., 0., 0., 0.],
|
||||
[ 0., 0., 3., ..., 0., 0., 0.],
|
||||
...,
|
||||
[ 0., 0., 0., ..., 98., 0., 0.],
|
||||
[ 0., 0., 0., ..., 0., 99., 0.],
|
||||
[ 0., 0., 0., ..., 0., 0., 100.]])
|
||||
|
||||
Constraints:
|
||||
|
||||
>>> Y = np.eye(n, 3)
|
||||
|
||||
Initial guess for eigenvectors, should have linearly independent
|
||||
columns. Column dimension = number of requested eigenvalues.
|
||||
|
||||
>>> X = np.random.rand(n, 3)
|
||||
|
||||
Preconditioner in the inverse of A in this example:
|
||||
|
||||
>>> invA = spdiags([1./vals], 0, n, n)
|
||||
|
||||
The preconditiner must be defined by a function:
|
||||
|
||||
>>> def precond( x ):
|
||||
... return invA @ x
|
||||
|
||||
The argument x of the preconditioner function is a matrix inside `lobpcg`,
|
||||
thus the use of matrix-matrix product ``@``.
|
||||
|
||||
The preconditioner function is passed to lobpcg as a `LinearOperator`:
|
||||
|
||||
>>> M = LinearOperator(matvec=precond, matmat=precond,
|
||||
... shape=(n, n), dtype=float)
|
||||
|
||||
Let us now solve the eigenvalue problem for the matrix A:
|
||||
|
||||
>>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False)
|
||||
>>> eigenvalues
|
||||
array([4., 5., 6.])
|
||||
|
||||
Note that the vectors passed in Y are the eigenvectors of the 3 smallest
|
||||
eigenvalues. The results returned are orthogonal to those.
|
||||
|
||||
"""
|
||||
blockVectorX = X
|
||||
blockVectorY = Y
|
||||
residualTolerance = tol
|
||||
if maxiter is None:
|
||||
maxiter = 20
|
||||
|
||||
if blockVectorY is not None:
|
||||
sizeY = blockVectorY.shape[1]
|
||||
else:
|
||||
sizeY = 0
|
||||
|
||||
# Block size.
|
||||
if len(blockVectorX.shape) != 2:
|
||||
raise ValueError('expected rank-2 array for argument X')
|
||||
|
||||
n, sizeX = blockVectorX.shape
|
||||
|
||||
if verbosityLevel:
|
||||
aux = "Solving "
|
||||
if B is None:
|
||||
aux += "standard"
|
||||
else:
|
||||
aux += "generalized"
|
||||
aux += " eigenvalue problem with"
|
||||
if M is None:
|
||||
aux += "out"
|
||||
aux += " preconditioning\n\n"
|
||||
aux += "matrix size %d\n" % n
|
||||
aux += "block size %d\n\n" % sizeX
|
||||
if blockVectorY is None:
|
||||
aux += "No constraints\n\n"
|
||||
else:
|
||||
if sizeY > 1:
|
||||
aux += "%d constraints\n\n" % sizeY
|
||||
else:
|
||||
aux += "%d constraint\n\n" % sizeY
|
||||
print(aux)
|
||||
|
||||
A = _makeOperator(A, (n, n))
|
||||
B = _makeOperator(B, (n, n))
|
||||
M = _makeOperator(M, (n, n))
|
||||
|
||||
if (n - sizeY) < (5 * sizeX):
|
||||
# warn('The problem size is small compared to the block size.' \
|
||||
# ' Using dense eigensolver instead of LOBPCG.')
|
||||
|
||||
sizeX = min(sizeX, n)
|
||||
|
||||
if blockVectorY is not None:
|
||||
raise NotImplementedError('The dense eigensolver '
|
||||
'does not support constraints.')
|
||||
|
||||
# Define the closed range of indices of eigenvalues to return.
|
||||
if largest:
|
||||
eigvals = (n - sizeX, n-1)
|
||||
else:
|
||||
eigvals = (0, sizeX-1)
|
||||
|
||||
A_dense = A(np.eye(n, dtype=A.dtype))
|
||||
B_dense = None if B is None else B(np.eye(n, dtype=B.dtype))
|
||||
|
||||
vals, vecs = eigh(A_dense, B_dense, eigvals=eigvals,
|
||||
check_finite=False)
|
||||
if largest:
|
||||
# Reverse order to be compatible with eigs() in 'LM' mode.
|
||||
vals = vals[::-1]
|
||||
vecs = vecs[:, ::-1]
|
||||
|
||||
return vals, vecs
|
||||
|
||||
if (residualTolerance is None) or (residualTolerance <= 0.0):
|
||||
residualTolerance = np.sqrt(1e-15) * n
|
||||
|
||||
# Apply constraints to X.
|
||||
if blockVectorY is not None:
|
||||
|
||||
if B is not None:
|
||||
blockVectorBY = B(blockVectorY)
|
||||
else:
|
||||
blockVectorBY = blockVectorY
|
||||
|
||||
# gramYBY is a dense array.
|
||||
gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
|
||||
try:
|
||||
# gramYBY is a Cholesky factor from now on...
|
||||
gramYBY = cho_factor(gramYBY)
|
||||
except LinAlgError:
|
||||
raise ValueError('cannot handle linearly dependent constraints')
|
||||
|
||||
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
|
||||
|
||||
##
|
||||
# B-orthonormalize X.
|
||||
blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)
|
||||
|
||||
##
|
||||
# Compute the initial Ritz vectors: solve the eigenproblem.
|
||||
blockVectorAX = A(blockVectorX)
|
||||
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
|
||||
|
||||
_lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
|
||||
ii = _get_indx(_lambda, sizeX, largest)
|
||||
_lambda = _lambda[ii]
|
||||
|
||||
eigBlockVector = np.asarray(eigBlockVector[:, ii])
|
||||
blockVectorX = np.dot(blockVectorX, eigBlockVector)
|
||||
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
|
||||
if B is not None:
|
||||
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
|
||||
|
||||
##
|
||||
# Active index set.
|
||||
activeMask = np.ones((sizeX,), dtype=bool)
|
||||
|
||||
lambdaHistory = [_lambda]
|
||||
residualNormsHistory = []
|
||||
|
||||
previousBlockSize = sizeX
|
||||
ident = np.eye(sizeX, dtype=A.dtype)
|
||||
ident0 = np.eye(sizeX, dtype=A.dtype)
|
||||
|
||||
##
|
||||
# Main iteration loop.
|
||||
|
||||
blockVectorP = None # set during iteration
|
||||
blockVectorAP = None
|
||||
blockVectorBP = None
|
||||
|
||||
iterationNumber = -1
|
||||
restart = True
|
||||
explicitGramFlag = False
|
||||
while iterationNumber < maxiter:
|
||||
iterationNumber += 1
|
||||
if verbosityLevel > 0:
|
||||
print('iteration %d' % iterationNumber)
|
||||
|
||||
if B is not None:
|
||||
aux = blockVectorBX * _lambda[np.newaxis, :]
|
||||
else:
|
||||
aux = blockVectorX * _lambda[np.newaxis, :]
|
||||
|
||||
blockVectorR = blockVectorAX - aux
|
||||
|
||||
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
||||
residualNorms = np.sqrt(aux)
|
||||
|
||||
residualNormsHistory.append(residualNorms)
|
||||
|
||||
ii = np.where(residualNorms > residualTolerance, True, False)
|
||||
activeMask = activeMask & ii
|
||||
if verbosityLevel > 2:
|
||||
print(activeMask)
|
||||
|
||||
currentBlockSize = activeMask.sum()
|
||||
if currentBlockSize != previousBlockSize:
|
||||
previousBlockSize = currentBlockSize
|
||||
ident = np.eye(currentBlockSize, dtype=A.dtype)
|
||||
|
||||
if currentBlockSize == 0:
|
||||
break
|
||||
|
||||
if verbosityLevel > 0:
|
||||
print('current block size:', currentBlockSize)
|
||||
print('eigenvalue:', _lambda)
|
||||
print('residual norms:', residualNorms)
|
||||
if verbosityLevel > 10:
|
||||
print(eigBlockVector)
|
||||
|
||||
activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
|
||||
|
||||
if iterationNumber > 0:
|
||||
activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
|
||||
activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
|
||||
if B is not None:
|
||||
activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
|
||||
|
||||
if M is not None:
|
||||
# Apply preconditioner T to the active residuals.
|
||||
activeBlockVectorR = M(activeBlockVectorR)
|
||||
|
||||
##
|
||||
# Apply constraints to the preconditioned residuals.
|
||||
if blockVectorY is not None:
|
||||
_applyConstraints(activeBlockVectorR,
|
||||
gramYBY, blockVectorBY, blockVectorY)
|
||||
|
||||
##
|
||||
# B-orthogonalize the preconditioned residuals to X.
|
||||
if B is not None:
|
||||
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
|
||||
np.matmul(blockVectorBX.T.conj(),
|
||||
activeBlockVectorR))
|
||||
else:
|
||||
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
|
||||
np.matmul(blockVectorX.T.conj(),
|
||||
activeBlockVectorR))
|
||||
|
||||
##
|
||||
# B-orthonormalize the preconditioned residuals.
|
||||
aux = _b_orthonormalize(B, activeBlockVectorR)
|
||||
activeBlockVectorR, activeBlockVectorBR = aux
|
||||
|
||||
activeBlockVectorAR = A(activeBlockVectorR)
|
||||
|
||||
if iterationNumber > 0:
|
||||
if B is not None:
|
||||
aux = _b_orthonormalize(B, activeBlockVectorP,
|
||||
activeBlockVectorBP, retInvR=True)
|
||||
activeBlockVectorP, activeBlockVectorBP, invR, normal = aux
|
||||
else:
|
||||
aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True)
|
||||
activeBlockVectorP, _, invR, normal = aux
|
||||
# Function _b_orthonormalize returns None if Cholesky fails
|
||||
if activeBlockVectorP is not None:
|
||||
activeBlockVectorAP = activeBlockVectorAP / normal
|
||||
activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)
|
||||
restart = False
|
||||
else:
|
||||
restart = True
|
||||
|
||||
##
|
||||
# Perform the Rayleigh Ritz Procedure:
|
||||
# Compute symmetric Gram matrices:
|
||||
|
||||
if activeBlockVectorAR.dtype == 'float32':
|
||||
myeps = 1
|
||||
elif activeBlockVectorR.dtype == 'float32':
|
||||
myeps = 1e-4
|
||||
else:
|
||||
myeps = 1e-8
|
||||
|
||||
if residualNorms.max() > myeps and not explicitGramFlag:
|
||||
explicitGramFlag = False
|
||||
else:
|
||||
# Once explicitGramFlag, forever explicitGramFlag.
|
||||
explicitGramFlag = True
|
||||
|
||||
# Shared memory assingments to simplify the code
|
||||
if B is None:
|
||||
blockVectorBX = blockVectorX
|
||||
activeBlockVectorBR = activeBlockVectorR
|
||||
if not restart:
|
||||
activeBlockVectorBP = activeBlockVectorP
|
||||
|
||||
# Common submatrices:
|
||||
gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
|
||||
gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
|
||||
|
||||
if explicitGramFlag:
|
||||
gramRAR = (gramRAR + gramRAR.T.conj())/2
|
||||
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
|
||||
gramXAX = (gramXAX + gramXAX.T.conj())/2
|
||||
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
|
||||
gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
|
||||
gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
|
||||
else:
|
||||
gramXAX = np.diag(_lambda)
|
||||
gramXBX = ident0
|
||||
gramRBR = ident
|
||||
gramXBR = np.zeros((sizeX, currentBlockSize), dtype=A.dtype)
|
||||
|
||||
def _handle_gramA_gramB_verbosity(gramA, gramB):
|
||||
if verbosityLevel > 0:
|
||||
_report_nonhermitian(gramA, 'gramA')
|
||||
_report_nonhermitian(gramB, 'gramB')
|
||||
if verbosityLevel > 10:
|
||||
# Note: not documented, but leave it in here for now
|
||||
np.savetxt('gramA.txt', gramA)
|
||||
np.savetxt('gramB.txt', gramB)
|
||||
|
||||
if not restart:
|
||||
gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
|
||||
gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
|
||||
gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
|
||||
gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
|
||||
gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
|
||||
if explicitGramFlag:
|
||||
gramPAP = (gramPAP + gramPAP.T.conj())/2
|
||||
gramPBP = np.dot(activeBlockVectorP.T.conj(),
|
||||
activeBlockVectorBP)
|
||||
else:
|
||||
gramPBP = ident
|
||||
|
||||
gramA = bmat([[gramXAX, gramXAR, gramXAP],
|
||||
[gramXAR.T.conj(), gramRAR, gramRAP],
|
||||
[gramXAP.T.conj(), gramRAP.T.conj(), gramPAP]])
|
||||
gramB = bmat([[gramXBX, gramXBR, gramXBP],
|
||||
[gramXBR.T.conj(), gramRBR, gramRBP],
|
||||
[gramXBP.T.conj(), gramRBP.T.conj(), gramPBP]])
|
||||
|
||||
_handle_gramA_gramB_verbosity(gramA, gramB)
|
||||
|
||||
try:
|
||||
_lambda, eigBlockVector = eigh(gramA, gramB,
|
||||
check_finite=False)
|
||||
except LinAlgError:
|
||||
# try again after dropping the direction vectors P from RR
|
||||
restart = True
|
||||
|
||||
if restart:
|
||||
gramA = bmat([[gramXAX, gramXAR],
|
||||
[gramXAR.T.conj(), gramRAR]])
|
||||
gramB = bmat([[gramXBX, gramXBR],
|
||||
[gramXBR.T.conj(), gramRBR]])
|
||||
|
||||
_handle_gramA_gramB_verbosity(gramA, gramB)
|
||||
|
||||
try:
|
||||
_lambda, eigBlockVector = eigh(gramA, gramB,
|
||||
check_finite=False)
|
||||
except LinAlgError:
|
||||
raise ValueError('eigh has failed in lobpcg iterations')
|
||||
|
||||
ii = _get_indx(_lambda, sizeX, largest)
|
||||
if verbosityLevel > 10:
|
||||
print(ii)
|
||||
print(_lambda)
|
||||
|
||||
_lambda = _lambda[ii]
|
||||
eigBlockVector = eigBlockVector[:, ii]
|
||||
|
||||
lambdaHistory.append(_lambda)
|
||||
|
||||
if verbosityLevel > 10:
|
||||
print('lambda:', _lambda)
|
||||
# # Normalize eigenvectors!
|
||||
# aux = np.sum( eigBlockVector.conj() * eigBlockVector, 0 )
|
||||
# eigVecNorms = np.sqrt( aux )
|
||||
# eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis, :]
|
||||
# eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )
|
||||
|
||||
if verbosityLevel > 10:
|
||||
print(eigBlockVector)
|
||||
|
||||
# Compute Ritz vectors.
|
||||
if B is not None:
|
||||
if not restart:
|
||||
eigBlockVectorX = eigBlockVector[:sizeX]
|
||||
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
|
||||
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
|
||||
|
||||
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
||||
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
|
||||
|
||||
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
||||
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
|
||||
|
||||
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
|
||||
bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
|
||||
else:
|
||||
eigBlockVectorX = eigBlockVector[:sizeX]
|
||||
eigBlockVectorR = eigBlockVector[sizeX:]
|
||||
|
||||
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
||||
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
||||
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
|
||||
|
||||
if verbosityLevel > 10:
|
||||
print(pp)
|
||||
print(app)
|
||||
print(bpp)
|
||||
|
||||
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
|
||||
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
|
||||
blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
|
||||
|
||||
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
|
||||
|
||||
else:
|
||||
if not restart:
|
||||
eigBlockVectorX = eigBlockVector[:sizeX]
|
||||
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
|
||||
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
|
||||
|
||||
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
||||
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
|
||||
|
||||
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
||||
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
|
||||
else:
|
||||
eigBlockVectorX = eigBlockVector[:sizeX]
|
||||
eigBlockVectorR = eigBlockVector[sizeX:]
|
||||
|
||||
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
||||
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
||||
|
||||
if verbosityLevel > 10:
|
||||
print(pp)
|
||||
print(app)
|
||||
|
||||
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
|
||||
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
|
||||
|
||||
blockVectorP, blockVectorAP = pp, app
|
||||
|
||||
if B is not None:
|
||||
aux = blockVectorBX * _lambda[np.newaxis, :]
|
||||
|
||||
else:
|
||||
aux = blockVectorX * _lambda[np.newaxis, :]
|
||||
|
||||
blockVectorR = blockVectorAX - aux
|
||||
|
||||
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
||||
residualNorms = np.sqrt(aux)
|
||||
|
||||
# Future work: Need to add Postprocessing here:
|
||||
# Making sure eigenvectors "exactly" satisfy the blockVectorY constrains?
|
||||
# Making sure eigenvecotrs are "exactly" othonormalized by final "exact" RR
|
||||
# Computing the actual true residuals
|
||||
|
||||
if verbosityLevel > 0:
|
||||
print('final eigenvalue:', _lambda)
|
||||
print('final residual norms:', residualNorms)
|
||||
|
||||
if retLambdaHistory:
|
||||
if retResidualNormsHistory:
|
||||
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
|
||||
else:
|
||||
return _lambda, blockVectorX, lambdaHistory
|
||||
else:
|
||||
if retResidualNormsHistory:
|
||||
return _lambda, blockVectorX, residualNormsHistory
|
||||
else:
|
||||
return _lambda, blockVectorX
|
|
@ -0,0 +1,13 @@
|
|||
|
||||
def configuration(parent_package='',top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration
|
||||
|
||||
config = Configuration('lobpcg',parent_package,top_path)
|
||||
config.add_data_dir('tests')
|
||||
|
||||
return config
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
setup(**configuration(top_path='').todict())
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,405 @@
|
|||
""" Test functions for the sparse.linalg.eigen.lobpcg module
|
||||
"""
|
||||
import itertools
|
||||
import platform
|
||||
|
||||
import numpy as np
|
||||
from numpy.testing import (assert_almost_equal, assert_equal,
|
||||
assert_allclose, assert_array_less)
|
||||
|
||||
import pytest
|
||||
|
||||
from numpy import ones, r_, diag
|
||||
from numpy.random import rand
|
||||
from scipy.linalg import eig, eigh, toeplitz, orth
|
||||
from scipy.sparse import spdiags, diags, eye
|
||||
from scipy.sparse.linalg import eigs, LinearOperator
|
||||
from scipy.sparse.linalg.eigen.lobpcg import lobpcg
|
||||
|
||||
def ElasticRod(n):
|
||||
"""Build the matrices for the generalized eigenvalue problem of the
|
||||
fixed-free elastic rod vibration model.
|
||||
"""
|
||||
L = 1.0
|
||||
le = L/n
|
||||
rho = 7.85e3
|
||||
S = 1.e-4
|
||||
E = 2.1e11
|
||||
mass = rho*S*le/6.
|
||||
k = E*S/le
|
||||
A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
|
||||
B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
|
||||
return A, B
|
||||
|
||||
|
||||
def MikotaPair(n):
|
||||
"""Build a pair of full diagonal matrices for the generalized eigenvalue
|
||||
problem. The Mikota pair acts as a nice test since the eigenvalues are the
|
||||
squares of the integers n, n=1,2,...
|
||||
"""
|
||||
x = np.arange(1, n+1)
|
||||
B = diag(1./x)
|
||||
y = np.arange(n-1, 0, -1)
|
||||
z = np.arange(2*n-1, 0, -2)
|
||||
A = diag(z)-diag(y, -1)-diag(y, 1)
|
||||
return A, B
|
||||
|
||||
|
||||
def compare_solutions(A, B, m):
|
||||
"""Check eig vs. lobpcg consistency.
|
||||
"""
|
||||
n = A.shape[0]
|
||||
np.random.seed(0)
|
||||
V = rand(n, m)
|
||||
X = orth(V)
|
||||
eigvals, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False)
|
||||
eigvals.sort()
|
||||
w, _ = eig(A, b=B)
|
||||
w.sort()
|
||||
assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
|
||||
|
||||
|
||||
def test_Small():
|
||||
A, B = ElasticRod(10)
|
||||
compare_solutions(A, B, 10)
|
||||
A, B = MikotaPair(10)
|
||||
compare_solutions(A, B, 10)
|
||||
|
||||
|
||||
def test_ElasticRod():
|
||||
A, B = ElasticRod(100)
|
||||
compare_solutions(A, B, 20)
|
||||
|
||||
|
||||
def test_MikotaPair():
|
||||
A, B = MikotaPair(100)
|
||||
compare_solutions(A, B, 20)
|
||||
|
||||
|
||||
def test_regression():
|
||||
"""Check the eigenvalue of the identity matrix is one.
|
||||
"""
|
||||
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
|
||||
n = 10
|
||||
X = np.ones((n, 1))
|
||||
A = np.identity(n)
|
||||
w, _ = lobpcg(A, X)
|
||||
assert_allclose(w, [1])
|
||||
|
||||
|
||||
def test_diagonal():
|
||||
"""Check for diagonal matrices.
|
||||
"""
|
||||
# This test was moved from '__main__' in lobpcg.py.
|
||||
# Coincidentally or not, this is the same eigensystem
|
||||
# required to reproduce arpack bug
|
||||
# https://forge.scilab.org/p/arpack-ng/issues/1397/
|
||||
# even using the same n=100.
|
||||
|
||||
np.random.seed(1234)
|
||||
|
||||
# The system of interest is of size n x n.
|
||||
n = 100
|
||||
|
||||
# We care about only m eigenpairs.
|
||||
m = 4
|
||||
|
||||
# Define the generalized eigenvalue problem Av = cBv
|
||||
# where (c, v) is a generalized eigenpair,
|
||||
# and where we choose A to be the diagonal matrix whose entries are 1..n
|
||||
# and where B is chosen to be the identity matrix.
|
||||
vals = np.arange(1, n+1, dtype=float)
|
||||
A = diags([vals], [0], (n, n))
|
||||
B = eye(n)
|
||||
|
||||
# Let the preconditioner M be the inverse of A.
|
||||
M = diags([1./vals], [0], (n, n))
|
||||
|
||||
# Pick random initial vectors.
|
||||
X = np.random.rand(n, m)
|
||||
|
||||
# Require that the returned eigenvectors be in the orthogonal complement
|
||||
# of the first few standard basis vectors.
|
||||
m_excluded = 3
|
||||
Y = np.eye(n, m_excluded)
|
||||
|
||||
eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y, tol=1e-4, maxiter=40, largest=False)
|
||||
|
||||
assert_allclose(eigvals, np.arange(1+m_excluded, 1+m_excluded+m))
|
||||
_check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
|
||||
|
||||
|
||||
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
|
||||
"""Check if the eigenvalue residual is small.
|
||||
"""
|
||||
mult_wV = np.multiply(w, V)
|
||||
dot_MV = M.dot(V)
|
||||
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
|
||||
|
||||
|
||||
def _check_fiedler(n, p):
|
||||
"""Check the Fiedler vector computation.
|
||||
"""
|
||||
# This is not necessarily the recommended way to find the Fiedler vector.
|
||||
np.random.seed(1234)
|
||||
col = np.zeros(n)
|
||||
col[1] = 1
|
||||
A = toeplitz(col)
|
||||
D = np.diag(A.sum(axis=1))
|
||||
L = D - A
|
||||
# Compute the full eigendecomposition using tricks, e.g.
|
||||
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
|
||||
tmp = np.pi * np.arange(n) / n
|
||||
analytic_w = 2 * (1 - np.cos(tmp))
|
||||
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
|
||||
_check_eigen(L, analytic_w, analytic_V)
|
||||
# Compute the full eigendecomposition using eigh.
|
||||
eigh_w, eigh_V = eigh(L)
|
||||
_check_eigen(L, eigh_w, eigh_V)
|
||||
# Check that the first eigenvalue is near zero and that the rest agree.
|
||||
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
|
||||
assert_allclose(eigh_w[1:], analytic_w[1:])
|
||||
|
||||
# Check small lobpcg eigenvalues.
|
||||
X = analytic_V[:, :p]
|
||||
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
|
||||
assert_equal(lobpcg_w.shape, (p,))
|
||||
assert_equal(lobpcg_V.shape, (n, p))
|
||||
_check_eigen(L, lobpcg_w, lobpcg_V)
|
||||
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
|
||||
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
|
||||
|
||||
# Check large lobpcg eigenvalues.
|
||||
X = analytic_V[:, -p:]
|
||||
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
|
||||
assert_equal(lobpcg_w.shape, (p,))
|
||||
assert_equal(lobpcg_V.shape, (n, p))
|
||||
_check_eigen(L, lobpcg_w, lobpcg_V)
|
||||
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
|
||||
|
||||
# Look for the Fiedler vector using good but not exactly correct guesses.
|
||||
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
|
||||
X = np.vstack((np.ones(n), fiedler_guess)).T
|
||||
lobpcg_w, _ = lobpcg(L, X, largest=False)
|
||||
# Mathematically, the smaller eigenvalue should be zero
|
||||
# and the larger should be the algebraic connectivity.
|
||||
lobpcg_w = np.sort(lobpcg_w)
|
||||
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
|
||||
|
||||
|
||||
def test_fiedler_small_8():
|
||||
"""Check the dense workaround path for small matrices.
|
||||
"""
|
||||
# This triggers the dense path because 8 < 2*5.
|
||||
_check_fiedler(8, 2)
|
||||
|
||||
|
||||
def test_fiedler_large_12():
|
||||
"""Check the dense workaround path avoided for non-small matrices.
|
||||
"""
|
||||
# This does not trigger the dense path, because 2*5 <= 12.
|
||||
_check_fiedler(12, 2)
|
||||
|
||||
|
||||
def test_hermitian():
|
||||
"""Check complex-value Hermitian cases.
|
||||
"""
|
||||
np.random.seed(1234)
|
||||
|
||||
sizes = [3, 10, 50]
|
||||
ks = [1, 3, 10, 50]
|
||||
gens = [True, False]
|
||||
|
||||
for size, k, gen in itertools.product(sizes, ks, gens):
|
||||
if k > size:
|
||||
continue
|
||||
|
||||
H = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
|
||||
H = 10 * np.eye(size) + H + H.T.conj()
|
||||
|
||||
X = np.random.rand(size, k)
|
||||
|
||||
if not gen:
|
||||
B = np.eye(size)
|
||||
w, v = lobpcg(H, X, maxiter=5000)
|
||||
w0, _ = eigh(H)
|
||||
else:
|
||||
B = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
|
||||
B = 10 * np.eye(size) + B.dot(B.T.conj())
|
||||
w, v = lobpcg(H, X, B, maxiter=5000, largest=False)
|
||||
w0, _ = eigh(H, B)
|
||||
|
||||
for wx, vx in zip(w, v.T):
|
||||
# Check eigenvector
|
||||
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
|
||||
/ np.linalg.norm(H.dot(vx)),
|
||||
0, atol=5e-4, rtol=0)
|
||||
|
||||
# Compare eigenvalues
|
||||
j = np.argmin(abs(w0 - wx))
|
||||
assert_allclose(wx, w0[j], rtol=1e-4)
|
||||
|
||||
|
||||
# The n=5 case tests the alternative small matrix code path that uses eigh().
|
||||
@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
|
||||
def test_eigs_consistency(n, atol):
|
||||
"""Check eigs vs. lobpcg consistency.
|
||||
"""
|
||||
vals = np.arange(1, n+1, dtype=np.float64)
|
||||
A = spdiags(vals, 0, n, n)
|
||||
np.random.seed(345678)
|
||||
X = np.random.rand(n, 2)
|
||||
lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
|
||||
vals, _ = eigs(A, k=2)
|
||||
|
||||
_check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
|
||||
assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
|
||||
|
||||
|
||||
def test_verbosity(tmpdir):
|
||||
"""Check that nonzero verbosity level code runs.
|
||||
"""
|
||||
A, B = ElasticRod(100)
|
||||
n = A.shape[0]
|
||||
m = 20
|
||||
np.random.seed(0)
|
||||
V = rand(n, m)
|
||||
X = orth(V)
|
||||
_, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False,
|
||||
verbosityLevel=9)
|
||||
|
||||
|
||||
@pytest.mark.xfail(platform.machine() == 'ppc64le',
|
||||
reason="fails on ppc64le")
|
||||
def test_tolerance_float32():
|
||||
"""Check lobpcg for attainable tolerance in float32.
|
||||
"""
|
||||
np.random.seed(1234)
|
||||
n = 50
|
||||
m = 3
|
||||
vals = -np.arange(1, n + 1)
|
||||
A = diags([vals], [0], (n, n))
|
||||
A = A.astype(np.float32)
|
||||
X = np.random.randn(n, m)
|
||||
X = X.astype(np.float32)
|
||||
eigvals, _ = lobpcg(A, X, tol=1e-9, maxiter=50, verbosityLevel=0)
|
||||
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-5)
|
||||
|
||||
|
||||
def test_random_initial_float32():
|
||||
"""Check lobpcg in float32 for specific initial.
|
||||
"""
|
||||
np.random.seed(3)
|
||||
n = 50
|
||||
m = 4
|
||||
vals = -np.arange(1, n + 1)
|
||||
A = diags([vals], [0], (n, n))
|
||||
A = A.astype(np.float32)
|
||||
X = np.random.rand(n, m)
|
||||
X = X.astype(np.float32)
|
||||
eigvals, _ = lobpcg(A, X, tol=1e-3, maxiter=50, verbosityLevel=1)
|
||||
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-2)
|
||||
|
||||
|
||||
def test_maxit_None():
|
||||
"""Check lobpcg if maxit=None runs 20 iterations (the default)
|
||||
by checking the size of the iteration history output, which should
|
||||
be the number of iterations plus 2 (initial and final values).
|
||||
"""
|
||||
np.random.seed(1566950023)
|
||||
n = 50
|
||||
m = 4
|
||||
vals = -np.arange(1, n + 1)
|
||||
A = diags([vals], [0], (n, n))
|
||||
A = A.astype(np.float32)
|
||||
X = np.random.randn(n, m)
|
||||
X = X.astype(np.float32)
|
||||
_, _, l_h = lobpcg(A, X, tol=1e-8, maxiter=20, retLambdaHistory=True)
|
||||
assert_allclose(np.shape(l_h)[0], 20+2)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
def test_diagonal_data_types():
|
||||
"""Check lobpcg for diagonal matrices for all matrix types.
|
||||
"""
|
||||
np.random.seed(1234)
|
||||
n = 40
|
||||
m = 4
|
||||
# Define the generalized eigenvalue problem Av = cBv
|
||||
# where (c, v) is a generalized eigenpair,
|
||||
# and where we choose A and B to be diagonal.
|
||||
vals = np.arange(1, n + 1)
|
||||
|
||||
list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
|
||||
sparse_formats = len(list_sparse_format)
|
||||
for s_f_i, s_f in enumerate(list_sparse_format):
|
||||
|
||||
As64 = diags([vals * vals], [0], (n, n), format=s_f)
|
||||
As32 = As64.astype(np.float32)
|
||||
Af64 = As64.toarray()
|
||||
Af32 = Af64.astype(np.float32)
|
||||
listA = [Af64, As64, Af32, As32]
|
||||
|
||||
Bs64 = diags([vals], [0], (n, n), format=s_f)
|
||||
Bf64 = Bs64.toarray()
|
||||
listB = [Bf64, Bs64]
|
||||
|
||||
# Define the preconditioner function as LinearOperator.
|
||||
Ms64 = diags([1./vals], [0], (n, n), format=s_f)
|
||||
|
||||
def Ms64precond(x):
|
||||
return Ms64 @ x
|
||||
Ms64precondLO = LinearOperator(matvec=Ms64precond,
|
||||
matmat=Ms64precond,
|
||||
shape=(n, n), dtype=float)
|
||||
Mf64 = Ms64.toarray()
|
||||
|
||||
def Mf64precond(x):
|
||||
return Mf64 @ x
|
||||
Mf64precondLO = LinearOperator(matvec=Mf64precond,
|
||||
matmat=Mf64precond,
|
||||
shape=(n, n), dtype=float)
|
||||
Ms32 = Ms64.astype(np.float32)
|
||||
|
||||
def Ms32precond(x):
|
||||
return Ms32 @ x
|
||||
Ms32precondLO = LinearOperator(matvec=Ms32precond,
|
||||
matmat=Ms32precond,
|
||||
shape=(n, n), dtype=np.float32)
|
||||
Mf32 = Ms32.toarray()
|
||||
|
||||
def Mf32precond(x):
|
||||
return Mf32 @ x
|
||||
Mf32precondLO = LinearOperator(matvec=Mf32precond,
|
||||
matmat=Mf32precond,
|
||||
shape=(n, n), dtype=np.float32)
|
||||
listM = [None, Ms64precondLO, Mf64precondLO,
|
||||
Ms32precondLO, Mf32precondLO]
|
||||
|
||||
# Setup matrix of the initial approximation to the eigenvectors
|
||||
# (cannot be sparse array).
|
||||
Xf64 = np.random.rand(n, m)
|
||||
Xf32 = Xf64.astype(np.float32)
|
||||
listX = [Xf64, Xf32]
|
||||
|
||||
# Require that the returned eigenvectors be in the orthogonal complement
|
||||
# of the first few standard basis vectors (cannot be sparse array).
|
||||
m_excluded = 3
|
||||
Yf64 = np.eye(n, m_excluded, dtype=float)
|
||||
Yf32 = np.eye(n, m_excluded, dtype=np.float32)
|
||||
listY = [Yf64, Yf32]
|
||||
|
||||
tests = list(itertools.product(listA, listB, listM, listX, listY))
|
||||
# This is one of the slower tests because there are >1,000 configs
|
||||
# to test here, instead of checking product of all input, output types
|
||||
# test each configuration for the first sparse format, and then
|
||||
# for one additional sparse format. this takes 2/7=30% as long as
|
||||
# testing all configurations for all sparse formats.
|
||||
if s_f_i > 0:
|
||||
tests = tests[s_f_i - 1::sparse_formats-1]
|
||||
|
||||
for A, B, M, X, Y in tests:
|
||||
eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
|
||||
maxiter=100, largest=False)
|
||||
assert_allclose(eigvals,
|
||||
np.arange(1 + m_excluded, 1 + m_excluded + m))
|
Loading…
Add table
Add a link
Reference in a new issue