Uploaded Test files

This commit is contained in:
Batuhan Berk Başoğlu 2020-11-12 11:05:57 -05:00
parent f584ad9d97
commit 2e81cb7d99
16627 changed files with 2065359 additions and 102444 deletions

View file

@ -0,0 +1,5 @@
"""
External, bundled dependencies.
"""

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,727 @@
"""
scikit-learn copy of scipy/sparse/linalg/eigen/lobpcg/lobpcg.py v1.3.0
to be deleted after scipy 1.3.0 becomes a dependency in scikit-lean
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
References
----------
.. [1] A. V. Knyazev (2001),
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method.
SIAM Journal on Scientific Computing 23, no. 2,
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
in hypre and PETSc. https://arxiv.org/abs/0705.2626
.. [3] A. V. Knyazev's C and MATLAB implementations:
https://bitbucket.org/joseroman/blopex
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky, orth,
LinAlgError)
from scipy.sparse.linalg import aslinearoperator
__all__ = ['lobpcg']
def bmat(*args, **kwargs):
import warnings
with warnings.catch_warnings(record=True):
warnings.filterwarnings(
'ignore', '.*the matrix subclass is not the recommended way.*')
return np.bmat(*args, **kwargs)
def _save(ar, fileName):
# Used only when verbosity level > 10.
np.savetxt(fileName, ar)
def _report_nonhermitian(M, name):
"""
Report if `M` is not a hermitian matrix given its type.
"""
from scipy.linalg import norm
md = M - M.T.conj()
nmd = norm(md, 1)
tol = 10 * np.finfo(M.dtype).eps
tol = max(tol, tol * norm(M, 1))
if nmd > tol:
print('matrix %s of the type %s is not sufficiently Hermitian:'
% (name, M.dtype))
print('condition: %.e < %e' % (nmd, tol))
def _as2d(ar):
"""
If the input array is 2D return it, if it is 1D, append a dimension,
making it a column vector.
"""
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _makeOperator(operatorInput, expectedShape):
"""Takes a dense numpy array or a sparse matrix or
a function and makes an operator performing matrix * blockvector
products."""
if operatorInput is None:
return None
else:
operator = aslinearoperator(operatorInput)
if operator.shape != expectedShape:
raise ValueError('operator has invalid shape')
return operator
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
"""Changes blockVectorV in place."""
YBV = np.dot(blockVectorBY.T.conj(), blockVectorV)
tmp = cho_solve(factYBY, YBV)
blockVectorV -= np.dot(blockVectorY, tmp)
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
"""B-orthonormalize the given block vector using Cholesky."""
normalization = blockVectorV.max(axis=0)+np.finfo(blockVectorV.dtype).eps
blockVectorV = blockVectorV / normalization
if blockVectorBV is None:
if B is not None:
blockVectorBV = B(blockVectorV)
else:
blockVectorBV = blockVectorV # Shared data!!!
else:
blockVectorBV = blockVectorBV / normalization
VBV = np.matmul(blockVectorV.T.conj(), blockVectorBV)
try:
# VBV is a Cholesky factor from now on...
VBV = cholesky(VBV, overwrite_a=True)
VBV = inv(VBV, overwrite_a=True)
blockVectorV = np.matmul(blockVectorV, VBV)
# blockVectorV = (cho_solve((VBV.T, True), blockVectorV.T)).T
if B is not None:
blockVectorBV = np.matmul(blockVectorBV, VBV)
# blockVectorBV = (cho_solve((VBV.T, True), blockVectorBV.T)).T
else:
blockVectorBV = None
except LinAlgError:
# raise ValueError('Cholesky has failed')
blockVectorV = None
blockVectorBV = None
VBV = None
if retInvR:
return blockVectorV, blockVectorBV, VBV, normalization
else:
return blockVectorV, blockVectorBV
def _get_indx(_lambda, num, largest):
"""Get `num` indices into `_lambda` depending on `largest` option."""
ii = np.argsort(_lambda)
if largest:
ii = ii[:-num-1:-1]
else:
ii = ii[:num]
return ii
def lobpcg(A, X,
B=None, M=None, Y=None,
tol=None, maxiter=20,
largest=True, verbosityLevel=0,
retLambdaHistory=False, retResidualNormsHistory=False):
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive
definite (SPD) generalized eigenproblems.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The symmetric linear operator of the problem, usually a
sparse matrix. Often called the "stiffness matrix".
X : ndarray, float32 or float64
Initial approximation to the ``k`` eigenvectors (non-sparse). If `A`
has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``.
B : {dense matrix, sparse matrix, LinearOperator}, optional
The right hand side operator in a generalized eigenproblem.
By default, ``B = Identity``. Often called the "mass matrix".
M : {dense matrix, sparse matrix, LinearOperator}, optional
Preconditioner to `A`; by default ``M = Identity``.
`M` should approximate the inverse of `A`.
Y : ndarray, float32 or float64, optional
n-by-sizeY matrix of constraints (non-sparse), sizeY < n
The iterations will be performed in the B-orthogonal complement
of the column-space of Y. Y must be full rank.
tol : scalar, optional
Solver tolerance (stopping criterion).
The default is ``tol=n*sqrt(eps)``.
maxiter : int, optional
Maximum number of iterations. The default is ``maxiter=min(n, 20)``.
largest : bool, optional
When True, solve for the largest eigenvalues, otherwise the smallest.
verbosityLevel : int, optional
Controls solver output. The default is ``verbosityLevel=0``.
retLambdaHistory : bool, optional
Whether to return eigenvalue history. Default is False.
retResidualNormsHistory : bool, optional
Whether to return history of residual norms. Default is False.
Returns
-------
w : ndarray
Array of ``k`` eigenvalues
v : ndarray
An array of ``k`` eigenvectors. `v` has the same shape as `X`.
lambdas : list of ndarray, optional
The eigenvalue history, if `retLambdaHistory` is True.
rnorms : list of ndarray, optional
The history of residual norms, if `retResidualNormsHistory` is True.
Notes
-----
If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True,
the return tuple has the following format
``(lambda, V, lambda history, residual norms history)``.
In the following ``n`` denotes the matrix size and ``m`` the number
of required eigenvalues (smallest or largest).
The LOBPCG code internally solves eigenproblems of the size ``3m`` on every
iteration by calling the "standard" dense eigensolver, so if ``m`` is not
small enough compared to ``n``, it does not make sense to call the LOBPCG
code, but rather one should use the "standard" eigensolver, e.g. numpy or
scipy function in this case.
If one calls the LOBPCG algorithm for ``5m > n``, it will most likely break
internally, so the code tries to call the standard function instead.
It is not that ``n`` should be large for the LOBPCG to work, but rather the
ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
and ``n=10``, it works though ``n`` is small. The method is intended
for extremely large ``n / m``, see e.g., reference [28] in
https://arxiv.org/abs/0705.2626
The convergence speed depends basically on two factors:
1. How well relatively separated the seeking eigenvalues are from the rest
of the eigenvalues. One can try to vary ``m`` to make this better.
2. How well conditioned the problem is. This can be changed by using proper
preconditioning. For example, a rod vibration test problem (under tests
directory) is ill-conditioned for large ``n``, so convergence will be
slow, unless efficient preconditioning is used. For this specific
problem, a good simple preconditioner function would be a linear solve
for `A`, which is easy to code since A is tridiagonal.
References
----------
.. [1] A. V. Knyazev (2001),
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method.
SIAM Journal on Scientific Computing 23, no. 2,
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
(BLOPEX) in hypre and PETSc. https://arxiv.org/abs/0705.2626
.. [3] A. V. Knyazev's C and MATLAB implementations:
https://bitbucket.org/joseroman/blopex
Examples
--------
Solve ``A x = lambda x`` with constraints and preconditioning.
>>> import numpy as np
>>> from scipy.sparse import spdiags, issparse
>>> from scipy.sparse.linalg import lobpcg, LinearOperator
>>> n = 100
>>> vals = np.arange(1, n + 1)
>>> A = spdiags(vals, 0, n, n)
>>> A.toarray()
array([[ 1., 0., 0., ..., 0., 0., 0.],
[ 0., 2., 0., ..., 0., 0., 0.],
[ 0., 0., 3., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 98., 0., 0.],
[ 0., 0., 0., ..., 0., 99., 0.],
[ 0., 0., 0., ..., 0., 0., 100.]])
Constraints:
>>> Y = np.eye(n, 3)
Initial guess for eigenvectors, should have linearly independent
columns. Column dimension = number of requested eigenvalues.
>>> X = np.random.rand(n, 3)
Preconditioner in the inverse of A in this example:
>>> invA = spdiags([1./vals], 0, n, n)
The preconditiner must be defined by a function:
>>> def precond( x ):
... return invA @ x
The argument x of the preconditioner function is a matrix inside `lobpcg`,
thus the use of matrix-matrix product ``@``.
The preconditioner function is passed to lobpcg as a `LinearOperator`:
>>> M = LinearOperator(matvec=precond, matmat=precond,
... shape=(n, n), dtype=float)
Let us now solve the eigenvalue problem for the matrix A:
>>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False)
>>> eigenvalues
array([4., 5., 6.])
Note that the vectors passed in Y are the eigenvectors of the 3 smallest
eigenvalues. The results returned are orthogonal to those.
"""
blockVectorX = X
blockVectorY = Y
residualTolerance = tol
maxIterations = maxiter
if blockVectorY is not None:
sizeY = blockVectorY.shape[1]
else:
sizeY = 0
# Block size.
if len(blockVectorX.shape) != 2:
raise ValueError('expected rank-2 array for argument X')
n, sizeX = blockVectorX.shape
if verbosityLevel:
aux = "Solving "
if B is None:
aux += "standard"
else:
aux += "generalized"
aux += " eigenvalue problem with"
if M is None:
aux += "out"
aux += " preconditioning\n\n"
aux += "matrix size %d\n" % n
aux += "block size %d\n\n" % sizeX
if blockVectorY is None:
aux += "No constraints\n\n"
else:
if sizeY > 1:
aux += "%d constraints\n\n" % sizeY
else:
aux += "%d constraint\n\n" % sizeY
print(aux)
A = _makeOperator(A, (n, n))
B = _makeOperator(B, (n, n))
M = _makeOperator(M, (n, n))
if (n - sizeY) < (5 * sizeX):
# warn('The problem size is small compared to the block size.' \
# ' Using dense eigensolver instead of LOBPCG.')
sizeX = min(sizeX, n)
if blockVectorY is not None:
raise NotImplementedError('The dense eigensolver '
'does not support constraints.')
# Define the closed range of indices of eigenvalues to return.
if largest:
eigvals = (n - sizeX, n-1)
else:
eigvals = (0, sizeX-1)
A_dense = A(np.eye(n, dtype=A.dtype))
B_dense = None if B is None else B(np.eye(n, dtype=B.dtype))
vals, vecs = eigh(A_dense, B_dense, eigvals=eigvals,
check_finite=False)
if largest:
# Reverse order to be compatible with eigs() in 'LM' mode.
vals = vals[::-1]
vecs = vecs[:, ::-1]
return vals, vecs
if (residualTolerance is None) or (residualTolerance <= 0.0):
residualTolerance = np.sqrt(1e-15) * n
# Apply constraints to X.
if blockVectorY is not None:
if B is not None:
blockVectorBY = B(blockVectorY)
else:
blockVectorBY = blockVectorY
# gramYBY is a dense array.
gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
try:
# gramYBY is a Cholesky factor from now on...
gramYBY = cho_factor(gramYBY)
except LinAlgError:
raise ValueError('cannot handle linearly dependent constraints')
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
##
# B-orthonormalize X.
blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)
##
# Compute the initial Ritz vectors: solve the eigenproblem.
blockVectorAX = A(blockVectorX)
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
_lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
ii = _get_indx(_lambda, sizeX, largest)
_lambda = _lambda[ii]
eigBlockVector = np.asarray(eigBlockVector[:, ii])
blockVectorX = np.dot(blockVectorX, eigBlockVector)
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
if B is not None:
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
##
# Active index set.
activeMask = np.ones((sizeX,), dtype=bool)
lambdaHistory = [_lambda]
residualNormsHistory = []
previousBlockSize = sizeX
ident = np.eye(sizeX, dtype=A.dtype)
ident0 = np.eye(sizeX, dtype=A.dtype)
##
# Main iteration loop.
blockVectorP = None # set during iteration
blockVectorAP = None
blockVectorBP = None
iterationNumber = -1
restart = True
explicitGramFlag = False
while iterationNumber < maxIterations:
iterationNumber += 1
if verbosityLevel > 0:
print('iteration %d' % iterationNumber)
if B is not None:
aux = blockVectorBX * _lambda[np.newaxis, :]
else:
aux = blockVectorX * _lambda[np.newaxis, :]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
residualNorms = np.sqrt(aux)
residualNormsHistory.append(residualNorms)
ii = np.where(residualNorms > residualTolerance, True, False)
activeMask = activeMask & ii
if verbosityLevel > 2:
print(activeMask)
currentBlockSize = activeMask.sum()
if currentBlockSize != previousBlockSize:
previousBlockSize = currentBlockSize
ident = np.eye(currentBlockSize, dtype=A.dtype)
if currentBlockSize == 0:
break
if verbosityLevel > 0:
print('current block size:', currentBlockSize)
print('eigenvalue:', _lambda)
print('residual norms:', residualNorms)
if verbosityLevel > 10:
print(eigBlockVector)
activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
if iterationNumber > 0:
activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
if B is not None:
activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
if M is not None:
# Apply preconditioner T to the active residuals.
activeBlockVectorR = M(activeBlockVectorR)
##
# Apply constraints to the preconditioned residuals.
if blockVectorY is not None:
_applyConstraints(activeBlockVectorR,
gramYBY, blockVectorBY, blockVectorY)
##
# B-orthogonalize the preconditioned residuals to X.
if B is not None:
activeBlockVectorR = activeBlockVectorR - \
np.matmul(blockVectorX,
np.matmul(blockVectorBX.T.conj(),
activeBlockVectorR))
else:
activeBlockVectorR = activeBlockVectorR - \
np.matmul(blockVectorX,
np.matmul(blockVectorX.T.conj(),
activeBlockVectorR))
##
# B-orthonormalize the preconditioned residuals.
aux = _b_orthonormalize(B, activeBlockVectorR)
activeBlockVectorR, activeBlockVectorBR = aux
activeBlockVectorAR = A(activeBlockVectorR)
if iterationNumber > 0:
if B is not None:
aux = _b_orthonormalize(B, activeBlockVectorP,
activeBlockVectorBP, retInvR=True)
activeBlockVectorP, activeBlockVectorBP, invR, normal = aux
else:
aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True)
activeBlockVectorP, _, invR, normal = aux
# Function _b_orthonormalize returns None if Cholesky fails
if activeBlockVectorP is not None:
activeBlockVectorAP = activeBlockVectorAP / normal
activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)
restart = False
else:
restart = True
##
# Perform the Rayleigh Ritz Procedure:
# Compute symmetric Gram matrices:
if activeBlockVectorAR.dtype == 'float32':
myeps = 1
elif activeBlockVectorR.dtype == 'float32':
myeps = 1e-4
else:
myeps = 1e-8
if residualNorms.max() > myeps and not explicitGramFlag:
explicitGramFlag = False
else:
# Once explicitGramFlag, forever explicitGramFlag.
explicitGramFlag = True
# Shared memory assingments to simplify the code
if B is None:
blockVectorBX = blockVectorX
activeBlockVectorBR = activeBlockVectorR
if not restart:
activeBlockVectorBP = activeBlockVectorP
# Common submatrices:
gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
if explicitGramFlag:
gramRAR = (gramRAR + gramRAR.T.conj())/2
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
gramXAX = (gramXAX + gramXAX.T.conj())/2
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
else:
gramXAX = np.diag(_lambda)
gramXBX = ident0
gramRBR = ident
gramXBR = np.zeros((sizeX, currentBlockSize), dtype=A.dtype)
def _handle_gramA_gramB_verbosity(gramA, gramB):
if verbosityLevel > 0:
_report_nonhermitian(gramA, 'gramA')
_report_nonhermitian(gramB, 'gramB')
if verbosityLevel > 10:
# Note: not documented, but leave it in here for now
np.savetxt('gramA.txt', gramA)
np.savetxt('gramB.txt', gramB)
if not restart:
gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
if explicitGramFlag:
gramPAP = (gramPAP + gramPAP.T.conj())/2
gramPBP = np.dot(activeBlockVectorP.T.conj(),
activeBlockVectorBP)
else:
gramPBP = ident
gramA = bmat([[gramXAX, gramXAR, gramXAP],
[gramXAR.T.conj(), gramRAR, gramRAP],
[gramXAP.T.conj(), gramRAP.T.conj(), gramPAP]])
gramB = bmat([[gramXBX, gramXBR, gramXBP],
[gramXBR.T.conj(), gramRBR, gramRBP],
[gramXBP.T.conj(), gramRBP.T.conj(), gramPBP]])
_handle_gramA_gramB_verbosity(gramA, gramB)
try:
_lambda, eigBlockVector = eigh(gramA, gramB,
check_finite=False)
except LinAlgError:
# try again after dropping the direction vectors P from RR
restart = True
if restart:
gramA = bmat([[gramXAX, gramXAR],
[gramXAR.T.conj(), gramRAR]])
gramB = bmat([[gramXBX, gramXBR],
[gramXBR.T.conj(), gramRBR]])
_handle_gramA_gramB_verbosity(gramA, gramB)
try:
_lambda, eigBlockVector = eigh(gramA, gramB,
check_finite=False)
except LinAlgError:
raise ValueError('eigh has failed in lobpcg iterations')
ii = _get_indx(_lambda, sizeX, largest)
if verbosityLevel > 10:
print(ii)
print(_lambda)
_lambda = _lambda[ii]
eigBlockVector = eigBlockVector[:, ii]
lambdaHistory.append(_lambda)
if verbosityLevel > 10:
print('lambda:', _lambda)
# # Normalize eigenvectors!
# aux = np.sum( eigBlockVector.conj() * eigBlockVector, 0 )
# eigVecNorms = np.sqrt( aux )
# eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis, :]
# eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )
if verbosityLevel > 10:
print(eigBlockVector)
# Compute Ritz vectors.
if B is not None:
if not restart:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
if verbosityLevel > 10:
print(pp)
print(app)
print(bpp)
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
else:
if not restart:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
if verbosityLevel > 10:
print(pp)
print(app)
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
blockVectorP, blockVectorAP = pp, app
if B is not None:
aux = blockVectorBX * _lambda[np.newaxis, :]
else:
aux = blockVectorX * _lambda[np.newaxis, :]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
residualNorms = np.sqrt(aux)
# Future work: Need to add Postprocessing here:
# Making sure eigenvectors "exactly" satisfy the blockVectorY constrains?
# Making sure eigenvecotrs are "exactly" othonormalized by final "exact" RR
# Computing the actual true residuals
if verbosityLevel > 0:
print('final eigenvalue:', _lambda)
print('final residual norms:', residualNorms)
if retLambdaHistory:
if retResidualNormsHistory:
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
else:
return _lambda, blockVectorX, lambdaHistory
else:
if retResidualNormsHistory:
return _lambda, blockVectorX, residualNormsHistory
else:
return _lambda, blockVectorX

View file

@ -0,0 +1,58 @@
"""
Backport of PEP 562.
https://pypi.org/search/?q=pep562
Licensed under MIT
Copyright (c) 2018 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
from __future__ import unicode_literals
import sys
__all__ = ('Pep562',)
class Pep562(object):
"""
Backport of PEP 562 <https://pypi.org/search/?q=pep562>.
Wraps the module in a class that exposes the mechanics to override `__dir__` and `__getattr__`.
The given module will be searched for overrides of `__dir__` and `__getattr__` and use them when needed.
"""
def __init__(self, name):
"""Acquire `__getattr__` and `__dir__`, but only replace module for versions less than Python 3.7."""
self._module = sys.modules[name]
self._get_attr = getattr(self._module, '__getattr__', None)
self._get_dir = getattr(self._module, '__dir__', None)
sys.modules[name] = self
def __dir__(self):
"""Return the overridden `dir` if one was provided, else apply `dir` to the module."""
return self._get_dir() if self._get_dir else dir(self._module)
def __getattr__(self, name):
"""Attempt to retrieve the attribute from the module, and if missing, use the overridden function if present."""
try:
return getattr(self._module, name)
except AttributeError:
if self._get_attr:
return self._get_attr(name)
raise

View file

@ -0,0 +1,504 @@
"""
A collection of image utilities using the Python Imaging Library (PIL).
This is a local version of utility functions from scipy that are wrapping PIL
functionality. These functions are deprecated in scipy 1.0.0 and will be
removed in scipy 1.2.0. Therefore, the functionality used in sklearn is copied
here. This file is taken from scipy/misc/pilutil.py in scipy
1.0.0. Modifications include: making this module importable if pillow is not
installed, removal of DeprecationWarning, removal of functions scikit-learn
does not need.
Copyright (c) 2001, 2002 Enthought, Inc.
All rights reserved.
Copyright (c) 2003-2017 SciPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of Enthought nor the names of the SciPy Developers
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division, print_function, absolute_import
import numpy
from numpy import (amin, amax, ravel, asarray, arange, ones, newaxis,
transpose, iscomplexobj, uint8, issubdtype, array)
# Modification of original scipy pilutil.py to make this module importable if
# pillow is not installed. If pillow is not installed, functions will raise
# ImportError when called.
try:
try:
from PIL import Image
except ImportError:
import Image
pillow_installed = True
if not hasattr(Image, 'frombytes'):
Image.frombytes = Image.fromstring
except ImportError:
pillow_installed = False
__all__ = ['bytescale', 'imread', 'imsave', 'fromimage', 'toimage', 'imresize']
PILLOW_ERROR_MESSAGE = (
"The Python Imaging Library (PIL) is required to load data "
"from jpeg files. Please refer to "
"https://pillow.readthedocs.io/en/stable/installation.html "
"for installing PIL."
)
def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> import numpy as np
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(uint8)
def imread(name, flatten=False, mode=None):
"""
Read an image from a file as an array.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
name : str or file object
The file name or file object to be read.
flatten : bool, optional
If True, flattens the color layers into a single gray-scale layer.
mode : str, optional
Mode to convert image to, e.g. ``'RGB'``. See the Notes for more
details.
Returns
-------
imread : ndarray
The array obtained by reading the image.
Notes
-----
`imread` uses the Python Imaging Library (PIL) to read an image.
The following notes are from the PIL documentation.
`mode` can be one of the following strings:
* 'L' (8-bit pixels, black and white)
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
* 'RGB' (3x8-bit pixels, true color)
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
* 'CMYK' (4x8-bit pixels, color separation)
* 'YCbCr' (3x8-bit pixels, color video format)
* 'I' (32-bit signed integer pixels)
* 'F' (32-bit floating point pixels)
PIL also provides limited support for a few special modes, including
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
(true color with premultiplied alpha).
When translating a color image to black and white (mode 'L', 'I' or
'F'), the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
When `flatten` is True, the image is converted using mode 'F'.
When `mode` is not None and `flatten` is True, the image is first
converted according to `mode`, and the result is then flattened using
mode 'F'.
"""
if not pillow_installed:
raise ImportError(PILLOW_ERROR_MESSAGE)
im = Image.open(name)
return fromimage(im, flatten=flatten, mode=mode)
def imsave(name, arr, format=None):
"""
Save an array as an image.
This function is only available if Python Imaging Library (PIL) is installed.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Parameters
----------
name : str or file object
Output file name or file object.
arr : ndarray, MxN or MxNx3 or MxNx4
Array containing image values. If the shape is ``MxN``, the array
represents a grey-level image. Shape ``MxNx3`` stores the red, green
and blue bands along the last dimension. An alpha layer may be
included, specified as the last colour band of an ``MxNx4`` array.
format : str
Image format. If omitted, the format to use is determined from the
file name extension. If a file object was used instead of a file name,
this parameter should always be used.
Examples
--------
Construct an array of gradient intensity values and save to file:
>>> import numpy as np
>>> from scipy.misc import imsave
>>> x = np.zeros((255, 255))
>>> x = np.zeros((255, 255), dtype=np.uint8)
>>> x[:] = np.arange(255)
>>> imsave('gradient.png', x)
Construct an array with three colour bands (R, G, B) and store to file:
>>> rgb = np.zeros((255, 255, 3), dtype=np.uint8)
>>> rgb[..., 0] = np.arange(255)
>>> rgb[..., 1] = 55
>>> rgb[..., 2] = 1 - np.arange(255)
>>> imsave('rgb_gradient.png', rgb)
"""
im = toimage(arr, channel_axis=2)
if format is None:
im.save(name)
else:
im.save(name, format)
return
def fromimage(im, flatten=False, mode=None):
"""
Return a copy of a PIL image as a numpy array.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
im : PIL image
Input image.
flatten : bool
If true, convert the output to grey-scale.
mode : str, optional
Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
`imread` docstring for more details.
Returns
-------
fromimage : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
"""
if not pillow_installed:
raise ImportError(PILLOW_ERROR_MESSAGE)
if not Image.isImageType(im):
raise TypeError("Input is not a PIL image.")
if mode is not None:
if mode != im.mode:
im = im.convert(mode)
elif im.mode == 'P':
# Mode 'P' means there is an indexed "palette". If we leave the mode
# as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
# containing the indices into the palette, and not a 3-D array
# containing the RGB or RGBA values.
if 'transparency' in im.info:
im = im.convert('RGBA')
else:
im = im.convert('RGB')
if flatten:
im = im.convert('F')
elif im.mode == '1':
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
# can cause a seg. fault, or generate garbage. See
# https://github.com/scipy/scipy/issues/2138 and
# https://github.com/python-pillow/Pillow/issues/350.
#
# This converts im from a 1-bit image to an 8-bit image.
im = im.convert('L')
a = array(im)
return a
_errstr = "Mode is unknown or incompatible with input array shape."
def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None,
mode=None, channel_axis=None):
"""Takes a numpy array and returns a PIL image.
This function is only available if Python Imaging Library (PIL) is installed.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional.
"""
if not pillow_installed:
raise ImportError(PILLOW_ERROR_MESSAGE)
data = asarray(arr)
if iscomplexobj(data):
raise ValueError("Cannot convert a complex-valued array.")
shape = list(data.shape)
valid = len(shape) == 2 or ((len(shape) == 3) and
((3 in shape) or (4 in shape)))
if not valid:
raise ValueError("'arr' does not have a suitable array shape for "
"any mode.")
if len(shape) == 2:
shape = (shape[1], shape[0]) # columns show up first
if mode == 'F':
data32 = data.astype(numpy.float32)
image = Image.frombytes(mode, shape, data32.tostring())
return image
if mode in [None, 'L', 'P']:
bytedata = bytescale(data, high=high, low=low,
cmin=cmin, cmax=cmax)
image = Image.frombytes('L', shape, bytedata.tostring())
if pal is not None:
image.putpalette(asarray(pal, dtype=uint8).tostring())
# Becomes a mode='P' automagically.
elif mode == 'P': # default gray-scale
pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] *
ones((3,), dtype=uint8)[newaxis, :])
image.putpalette(asarray(pal, dtype=uint8).tostring())
return image
if mode == '1': # high input gives threshold for 1
bytedata = (data > high)
image = Image.frombytes('1', shape, bytedata.tostring())
return image
if cmin is None:
cmin = amin(ravel(data))
if cmax is None:
cmax = amax(ravel(data))
data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low
if mode == 'I':
data32 = data.astype(numpy.uint32)
image = Image.frombytes(mode, shape, data32.tostring())
else:
raise ValueError(_errstr)
return image
# if here then 3-d array with a 3 or a 4 in the shape length.
# Check for 3 in datacube shape --- 'RGB' or 'YCbCr'
if channel_axis is None:
if (3 in shape):
ca = numpy.flatnonzero(asarray(shape) == 3)[0]
else:
ca = numpy.flatnonzero(asarray(shape) == 4)
if len(ca):
ca = ca[0]
else:
raise ValueError("Could not find channel dimension.")
else:
ca = channel_axis
numch = shape[ca]
if numch not in [3, 4]:
raise ValueError("Channel axis dimension is not valid.")
bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax)
if ca == 2:
strdata = bytedata.tostring()
shape = (shape[1], shape[0])
elif ca == 1:
strdata = transpose(bytedata, (0, 2, 1)).tostring()
shape = (shape[2], shape[0])
elif ca == 0:
strdata = transpose(bytedata, (1, 2, 0)).tostring()
shape = (shape[2], shape[1])
if mode is None:
if numch == 3:
mode = 'RGB'
else:
mode = 'RGBA'
if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']:
raise ValueError(_errstr)
if mode in ['RGB', 'YCbCr']:
if numch != 3:
raise ValueError("Invalid array shape for mode.")
if mode in ['RGBA', 'CMYK']:
if numch != 4:
raise ValueError("Invalid array shape for mode.")
# Here we know data and mode is correct
image = Image.frombytes(mode, shape, strdata)
return image
def imresize(arr, size, interp='bilinear', mode=None):
"""
Resize an image.
This function is only available if Python Imaging Library (PIL) is installed.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Parameters
----------
arr : ndarray
The array of image to be resized.
size : int, float or tuple
* int - Percentage of current size.
* float - Fraction of current size.
* tuple - Size of the output image (height, width).
interp : str, optional
Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear',
'bicubic' or 'cubic').
mode : str, optional
The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing.
If ``mode=None`` (the default), 2-D images will be treated like
``mode='L'``, i.e. casting to long integer. For 3-D and 4-D arrays,
`mode` will be set to ``'RGB'`` and ``'RGBA'`` respectively.
Returns
-------
imresize : ndarray
The resized array of image.
See Also
--------
toimage : Implicitly used to convert `arr` according to `mode`.
scipy.ndimage.zoom : More generic implementation that does not use PIL.
"""
im = toimage(arr, mode=mode)
ts = type(size)
if issubdtype(ts, numpy.signedinteger):
percent = size / 100.0
size = tuple((array(im.size)*percent).astype(int))
elif issubdtype(type(size), numpy.floating):
size = tuple((array(im.size)*size).astype(int))
else:
size = (size[1], size[0])
func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}
imnew = im.resize(size, resample=func[interp])
return fromimage(imnew)

View file

@ -0,0 +1,7 @@
# Do not collect any tests in externals. This is more robust than using
# --ignore because --ignore needs a path and it is not convenient to pass in
# the externals path (very long install-dependent path in site-packages) when
# using --pyargs
def pytest_ignore_collect(path, config):
return True