Fixed database typo and removed unnecessary class identifier.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-14 10:10:37 -04:00
parent 00ad49a143
commit 45fb349a7d
5098 changed files with 952558 additions and 85 deletions

View file

@ -0,0 +1,124 @@
"""
Sparse linear algebra (:mod:`scipy.sparse.linalg`)
==================================================
.. currentmodule:: scipy.sparse.linalg
Abstract linear operators
-------------------------
.. autosummary::
:toctree: generated/
LinearOperator -- abstract representation of a linear operator
aslinearoperator -- convert an object to an abstract linear operator
Matrix Operations
-----------------
.. autosummary::
:toctree: generated/
inv -- compute the sparse matrix inverse
expm -- compute the sparse matrix exponential
expm_multiply -- compute the product of a matrix exponential and a matrix
Matrix norms
------------
.. autosummary::
:toctree: generated/
norm -- Norm of a sparse matrix
onenormest -- Estimate the 1-norm of a sparse matrix
Solving linear problems
-----------------------
Direct methods for linear equation systems:
.. autosummary::
:toctree: generated/
spsolve -- Solve the sparse linear system Ax=b
spsolve_triangular -- Solve the sparse linear system Ax=b for a triangular matrix
factorized -- Pre-factorize matrix to a function solving a linear system
MatrixRankWarning -- Warning on exactly singular matrices
use_solver -- Select direct solver to use
Iterative methods for linear equation systems:
.. autosummary::
:toctree: generated/
bicg -- Use BIConjugate Gradient iteration to solve A x = b
bicgstab -- Use BIConjugate Gradient STABilized iteration to solve A x = b
cg -- Use Conjugate Gradient iteration to solve A x = b
cgs -- Use Conjugate Gradient Squared iteration to solve A x = b
gmres -- Use Generalized Minimal RESidual iteration to solve A x = b
lgmres -- Solve a matrix equation using the LGMRES algorithm
minres -- Use MINimum RESidual iteration to solve Ax = b
qmr -- Use Quasi-Minimal Residual iteration to solve A x = b
gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm
Iterative methods for least-squares problems:
.. autosummary::
:toctree: generated/
lsqr -- Find the least-squares solution to a sparse linear equation system
lsmr -- Find the least-squares solution to a sparse linear equation system
Matrix factorizations
---------------------
Eigenvalue problems:
.. autosummary::
:toctree: generated/
eigs -- Find k eigenvalues and eigenvectors of the square matrix A
eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
Singular values problems:
.. autosummary::
:toctree: generated/
svds -- Compute k singular values/vectors for a sparse matrix
Complete or incomplete LU factorizations
.. autosummary::
:toctree: generated/
splu -- Compute a LU decomposition for a sparse matrix
spilu -- Compute an incomplete LU decomposition for a sparse matrix
SuperLU -- Object representing an LU factorization
Exceptions
----------
.. autosummary::
:toctree: generated/
ArpackNoConvergence
ArpackError
"""
from .isolve import *
from .dsolve import *
from .interface import *
from .eigen import *
from .matfuncs import *
from ._onenormest import *
from ._norm import *
from ._expm_multiply import *
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,711 @@
"""Compute the action of the matrix exponential.
"""
import numpy as np
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse.linalg import aslinearoperator
from scipy.sparse.sputils import is_pydata_spmatrix
__all__ = ['expm_multiply']
def _exact_inf_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=1).flat)
elif is_pydata_spmatrix(A):
return max(abs(A).sum(axis=1))
else:
return np.linalg.norm(A, np.inf)
def _exact_1_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=0).flat)
elif is_pydata_spmatrix(A):
return max(abs(A).sum(axis=0))
else:
return np.linalg.norm(A, 1)
def _trace(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return A.diagonal().sum()
elif is_pydata_spmatrix(A):
return A.to_scipy_sparse().diagonal().sum()
else:
return np.trace(A)
def _ident_like(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
dtype=A.dtype, format=A.format)
elif is_pydata_spmatrix(A):
import sparse
return sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None):
"""
Compute the action of the matrix exponential of A on B.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix or vector to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
Returns
-------
expm_A_B : ndarray
The result of the action :math:`e^{t_k A} B`.
Notes
-----
The optional arguments defining the sequence of evenly spaced time points
are compatible with the arguments of `numpy.linspace`.
The output ndarray shape is somewhat complicated so I explain it here.
The ndim of the output could be either 1, 2, or 3.
It would be 1 if you are computing the expm action on a single vector
at a single time point.
It would be 2 if you are computing the expm action on a vector
at multiple time points, or if you are computing the expm action
on a matrix at a single time point.
It would be 3 if you want the action on a matrix with multiple
columns at multiple time points.
If multiple time points are requested, expm_A_B[0] will always
be the action of the expm at the first time point,
regardless of whether the action is on a vector or a matrix.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
"Computing the Action of the Matrix Exponential,
with an Application to Exponential Integrators."
SIAM Journal on Scientific Computing,
33 (2). pp. 488-511. ISSN 1064-8275
http://eprints.ma.man.ac.uk/1591/
.. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
"Computing Matrix Functions."
Acta Numerica,
19. 159-208. ISSN 0962-4929
http://eprints.ma.man.ac.uk/1451/
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import expm, expm_multiply
>>> A = csc_matrix([[1, 0], [0, 1]])
>>> A.todense()
matrix([[1, 0],
[0, 1]], dtype=int64)
>>> B = np.array([np.exp(-1.), np.exp(-2.)])
>>> B
array([ 0.36787944, 0.13533528])
>>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
array([[ 1. , 0.36787944],
[ 1.64872127, 0.60653066],
[ 2.71828183, 1. ]])
>>> expm(A).dot(B) # Verify 1st timestep
array([ 1. , 0.36787944])
>>> expm(1.5*A).dot(B) # Verify 2nd timestep
array([ 1.64872127, 0.60653066])
>>> expm(2*A).dot(B) # Verify 3rd timestep
array([ 2.71828183, 1. ])
"""
if all(arg is None for arg in (start, stop, num, endpoint)):
X = _expm_multiply_simple(A, B)
else:
X, status = _expm_multiply_interval(A, B, start, stop, num, endpoint)
return X
def _expm_multiply_simple(A, B, t=1.0, balance=False):
"""
Compute the action of the matrix exponential at a single time point.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
t : float
A time point.
balance : bool
Indicates whether or not to apply balancing.
Returns
-------
F : ndarray
:math:`e^{t A} B`
Notes
-----
This is algorithm (3.2) in Al-Mohy and Higham (2011).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
"""
A helper function.
"""
if balance:
raise NotImplementedError
if tol is None:
u_d = 2 ** -53
tol = u_d
F = B
eta = np.exp(t*mu / float(s))
for i in range(s):
c1 = _exact_inf_norm(B)
for j in range(m_star):
coeff = t / float(s*(j+1))
B = coeff * A.dot(B)
c2 = _exact_inf_norm(B)
F = F + B
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
F = eta * F
B = F
return F
# This table helps to compute bounds.
# They seem to have been difficult to calculate, involving symbolic
# manipulation of equations, followed by numerical root finding.
_theta = {
# The first 30 values are from table A.3 of Computing Matrix Functions.
1: 2.29e-16,
2: 2.58e-8,
3: 1.39e-5,
4: 3.40e-4,
5: 2.40e-3,
6: 9.07e-3,
7: 2.38e-2,
8: 5.00e-2,
9: 8.96e-2,
10: 1.44e-1,
# 11
11: 2.14e-1,
12: 3.00e-1,
13: 4.00e-1,
14: 5.14e-1,
15: 6.41e-1,
16: 7.81e-1,
17: 9.31e-1,
18: 1.09,
19: 1.26,
20: 1.44,
# 21
21: 1.62,
22: 1.82,
23: 2.01,
24: 2.22,
25: 2.43,
26: 2.64,
27: 2.86,
28: 3.08,
29: 3.31,
30: 3.54,
# The rest are from table 3.1 of
# Computing the Action of the Matrix Exponential.
35: 4.7,
40: 6.0,
45: 7.2,
50: 8.5,
55: 9.9,
}
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
#XXX Eventually turn this into an API function in the _onenormest module,
#XXX and remove its underscore,
#XXX but wait until expm_multiply goes into scipy.
return scipy.sparse.linalg.onenormest(aslinearoperator(A) ** p)
class LazyOperatorNormInfo:
"""
Information about an operator is lazily computed.
The information includes the exact 1-norm of the operator,
in addition to estimates of 1-norms of powers of the operator.
This uses the notation of Computing the Action (2011).
This class is specialized enough to probably not be of general interest
outside of this module.
"""
def __init__(self, A, A_1_norm=None, ell=2, scale=1):
"""
Provide the operator and some norm-related information.
Parameters
----------
A : linear operator
The operator of interest.
A_1_norm : float, optional
The exact 1-norm of A.
ell : int, optional
A technical parameter controlling norm estimation quality.
scale : int, optional
If specified, return the norms of scale*A instead of A.
"""
self._A = A
self._A_1_norm = A_1_norm
self._ell = ell
self._d = {}
self._scale = scale
def set_scale(self,scale):
"""
Set the scale parameter.
"""
self._scale = scale
def onenorm(self):
"""
Compute the exact 1-norm.
"""
if self._A_1_norm is None:
self._A_1_norm = _exact_1_norm(self._A)
return self._scale*self._A_1_norm
def d(self, p):
"""
Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
"""
if p not in self._d:
est = _onenormest_matrix_power(self._A, p, self._ell)
self._d[p] = est ** (1.0 / p)
return self._scale*self._d[p]
def alpha(self, p):
"""
Lazily compute max(d(p), d(p+1)).
"""
return max(self.d(p), self.d(p+1))
def _compute_cost_div_m(m, p, norm_info):
"""
A helper function for computing bounds.
This is equation (3.10).
It measures cost in terms of the number of required matrix products.
Parameters
----------
m : int
A valid key of _theta.
p : int
A matrix power.
norm_info : LazyOperatorNormInfo
Information about 1-norms of related operators.
Returns
-------
cost_div_m : int
Required number of matrix products divided by m.
"""
return int(np.ceil(norm_info.alpha(p) / _theta[m]))
def _compute_p_max(m_max):
"""
Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
Do this in a slightly dumb way, but safe and not too slow.
Parameters
----------
m_max : int
A count related to bounds.
"""
sqrt_m_max = np.sqrt(m_max)
p_low = int(np.floor(sqrt_m_max))
p_high = int(np.ceil(sqrt_m_max + 1))
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
norm_info : LazyOperatorNormInfo
Information about norms of certain linear operators of interest.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
tol : float
Expected to be
:math:`2^{-24}` for single precision or
:math:`2^{-53}` for double precision.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
best_m : int
Related to bounds for error control.
best_s : int
Amount of scaling.
Notes
-----
This is code fragment (3.1) in Al-Mohy and Higham (2011).
The discussion of default values for m_max and ell
is given between the definitions of equation (3.11)
and the definition of equation (3.12).
"""
if ell < 1:
raise ValueError('expected ell to be a positive integer')
best_m = None
best_s = None
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
for m, theta in _theta.items():
s = int(np.ceil(norm_info.onenorm() / theta))
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
else:
# Equation (3.11).
for p in range(2, _compute_p_max(m_max) + 1):
for m in range(p*(p-1)-1, m_max+1):
if m in _theta:
s = _compute_cost_div_m(m, p, norm_info)
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
best_s = max(best_s, 1)
return best_m, best_s
def _condition_3_13(A_1_norm, n0, m_max, ell):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
A_1_norm : float
The precomputed 1-norm of A.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
value : bool
Indicates whether or not the condition has been met.
Notes
-----
This is condition (3.13) in Al-Mohy and Higham (2011).
"""
# This is the rhs of equation (3.12).
p_max = _compute_p_max(m_max)
a = 2 * ell * p_max * (p_max + 3)
# Evaluate the condition (3.13).
b = _theta[m_max] / float(n0 * m_max)
return A_1_norm <= a * b
def _expm_multiply_interval(A, B, start=None, stop=None,
num=None, endpoint=None, balance=False, status_only=False):
"""
Compute the action of the matrix exponential at multiple time points.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
balance : bool
Indicates whether or not to apply balancing.
status_only : bool
A flag that is set to True for some debugging and testing operations.
Returns
-------
F : ndarray
:math:`e^{t_k A} B`
status : int
An integer status for testing and debugging.
Notes
-----
This is algorithm (5.2) in Al-Mohy and Higham (2011).
There seems to be a typo, where line 15 of the algorithm should be
moved to line 6.5 (between lines 6 and 7).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
# Get the linspace samples, attempting to preserve the linspace defaults.
linspace_kwargs = {'retstep': True}
if num is not None:
linspace_kwargs['num'] = num
if endpoint is not None:
linspace_kwargs['endpoint'] = endpoint
samples, step = np.linspace(start, stop, **linspace_kwargs)
# Convert the linspace output to the notation used by the publication.
nsamples = len(samples)
if nsamples < 2:
raise ValueError('at least two time points are required')
q = nsamples - 1
h = step
t_0 = samples[0]
t_q = samples[q]
# Define the output ndarray.
# Use an ndim=3 shape, such that the last two indices
# are the ones that may be involved in level 3 BLAS operations.
X_shape = (nsamples,) + B.shape
X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
t = t_q - t_0
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
# Compute the expm action up to the initial time point.
X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
# Compute the expm action at the rest of the time points.
if q <= s:
if status_only:
return 0
else:
return _expm_multiply_interval_core_0(A, X,
h, mu, q, norm_info, tol, ell,n0)
elif not (q % s):
if status_only:
return 1
else:
return _expm_multiply_interval_core_1(A, X,
h, mu, m_star, s, q, tol)
elif (q % s):
if status_only:
return 2
else:
return _expm_multiply_interval_core_2(A, X,
h, mu, m_star, s, q, tol)
else:
raise Exception('internal error')
def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
"""
A helper function, for the case q <= s.
"""
# Compute the new values of m_star and s which should be applied
# over intervals of size t/q
if norm_info.onenorm() == 0:
m_star, s = 0, 1
else:
norm_info.set_scale(1./q)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
norm_info.set_scale(1)
for k in range(q):
X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
return X, 0
def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s == 0.
"""
d = q // s
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(s):
Z = X[i*d]
K[0] = Z
high_p = 0
for k in range(1, d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p > high_p:
K[p] = h * A.dot(K[p-1]) / float(p)
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 1
def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s > 0.
"""
d = q // s
j = q // d
r = q - d * j
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(j + 1):
Z = X[i*d]
K[0] = Z
high_p = 0
if i < j:
effective_d = d
else:
effective_d = r
for k in range(1, effective_d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p == high_p + 1:
K[p] = h * A.dot(K[p-1]) / float(p)
high_p = p
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 2

View file

@ -0,0 +1,182 @@
"""Sparse matrix norms.
"""
import numpy as np
from scipy.sparse import issparse
from numpy import Inf, sqrt, abs
__all__ = ['norm']
def _sparse_frobenius_norm(x):
if np.issubdtype(x.dtype, np.complexfloating):
sqnorm = abs(x).power(2).sum()
else:
sqnorm = x.power(2).sum()
return sqrt(sqnorm)
def norm(x, ord=None, axis=None):
"""
Norm of a sparse matrix
This function is able to return one of seven different matrix norms,
depending on the value of the ``ord`` parameter.
Parameters
----------
x : a sparse matrix
Input sparse matrix.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
n : float or ndarray
Notes
-----
Some of the ord are not implemented because some associated functions like,
_multi_svd_norm, are not yet available for sparse matrix.
This docstring is modified based on numpy.linalg.norm.
https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py
The following norms can be calculated:
===== ============================
ord norm for sparse matrices
===== ============================
None Frobenius norm
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
0 abs(x).sum(axis=axis)
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 Not implemented
-2 Not implemented
other Not implemented
===== ============================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from scipy.sparse import *
>>> import numpy as np
>>> from scipy.sparse.linalg import norm
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> b = csr_matrix(b)
>>> norm(b)
7.745966692414834
>>> norm(b, 'fro')
7.745966692414834
>>> norm(b, np.inf)
9
>>> norm(b, -np.inf)
2
>>> norm(b, 1)
7
>>> norm(b, -1)
6
"""
if not issparse(x):
raise TypeError("input is not sparse. use numpy.linalg.norm")
# Check the default case first and handle it immediately.
if axis is None and ord in (None, 'fro', 'f'):
return _sparse_frobenius_norm(x)
# Some norms require functions that are not implemented for all types.
x = x.tocsr()
if axis is None:
axis = (0, 1)
elif not isinstance(axis, tuple):
msg = "'axis' must be None, an integer or a tuple of integers"
try:
int_axis = int(axis)
except TypeError:
raise TypeError(msg)
if axis != int_axis:
raise TypeError(msg)
axis = (int_axis,)
nd = 2
if len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
raise NotImplementedError
#return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
raise NotImplementedError
#return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]
elif ord == Inf:
return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]
elif ord == -1:
return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]
elif ord == -Inf:
return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]
elif ord in (None, 'f', 'fro'):
# The axis order does not matter for this norm.
return _sparse_frobenius_norm(x)
else:
raise ValueError("Invalid norm order for matrices.")
elif len(axis) == 1:
a, = axis
if not (-nd <= a < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if ord == Inf:
M = abs(x).max(axis=a)
elif ord == -Inf:
M = abs(x).min(axis=a)
elif ord == 0:
# Zero norm
M = (x != 0).sum(axis=a)
elif ord == 1:
# special case for speedup
M = abs(x).sum(axis=a)
elif ord in (2, None):
M = sqrt(abs(x).power(2).sum(axis=a))
else:
try:
ord + 1
except TypeError:
raise ValueError('Invalid norm order for vectors.')
M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)
return M.A.ravel()
else:
raise ValueError("Improper number of dimensions to norm.")

View file

@ -0,0 +1,466 @@
"""Sparse block 1-norm estimator.
"""
import numpy as np
from scipy.sparse.linalg import aslinearoperator
__all__ = ['onenormest']
def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False):
"""
Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can be transposed and that can
produce matrix products.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
Notes
-----
This is algorithm 2.4 of [1].
In [2] it is described as follows.
"This algorithm typically requires the evaluation of
about 4t matrix-vector products and almost invariably
produces a norm estimate (which is, in fact, a lower
bound on the norm) correct to within a factor 3."
.. versionadded:: 0.13.0
References
----------
.. [1] Nicholas J. Higham and Francoise Tisseur (2000),
"A Block Algorithm for Matrix 1-Norm Estimation,
with an Application to 1-Norm Pseudospectra."
SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201.
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009),
"A new scaling and squaring algorithm for the matrix exponential."
SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import onenormest
>>> A = csc_matrix([[1., 0., 0.], [5., 8., 2.], [0., -1., 0.]], dtype=float)
>>> A.todense()
matrix([[ 1., 0., 0.],
[ 5., 8., 2.],
[ 0., -1., 0.]])
>>> onenormest(A)
9.0
>>> np.linalg.norm(A.todense(), ord=1)
9.0
"""
# Check the input.
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError('expected the operator to act like a square matrix')
# If the operator size is small compared to t,
# then it is easier to compute the exact norm.
# Otherwise estimate the norm.
n = A.shape[1]
if t >= n:
A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n)))
if A_explicit.shape != (n, n):
raise Exception('internal error: ',
'unexpected shape ' + str(A_explicit.shape))
col_abs_sums = abs(A_explicit).sum(axis=0)
if col_abs_sums.shape != (n, ):
raise Exception('internal error: ',
'unexpected shape ' + str(col_abs_sums.shape))
argmax_j = np.argmax(col_abs_sums)
v = elementary_vector(n, argmax_j)
w = A_explicit[:, argmax_j]
est = col_abs_sums[argmax_j]
else:
est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax)
# Report the norm estimate along with some certificates of the estimate.
if compute_v or compute_w:
result = (est,)
if compute_v:
result += (v,)
if compute_w:
result += (w,)
return result
else:
return est
def _blocked_elementwise(func):
"""
Decorator for an elementwise function, to apply it blockwise along
first dimension, to avoid excessive memory usage in temporaries.
"""
block_size = 2**20
def wrapper(x):
if x.shape[0] < block_size:
return func(x)
else:
y0 = func(x[:block_size])
y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype)
y[:block_size] = y0
del y0
for j in range(block_size, x.shape[0], block_size):
y[j:j+block_size] = func(x[j:j+block_size])
return y
return wrapper
@_blocked_elementwise
def sign_round_up(X):
"""
This should do the right thing for both real and complex matrices.
From Higham and Tisseur:
"Everything in this section remains valid for complex matrices
provided that sign(A) is redefined as the matrix (aij / |aij|)
(and sign(0) = 1) transposes are replaced by conjugate transposes."
"""
Y = X.copy()
Y[Y == 0] = 1
Y /= np.abs(Y)
return Y
@_blocked_elementwise
def _max_abs_axis1(X):
return np.max(np.abs(X), axis=1)
def _sum_abs_axis0(X):
block_size = 2**20
r = None
for j in range(0, X.shape[0], block_size):
y = np.sum(np.abs(X[j:j+block_size]), axis=0)
if r is None:
r = y
else:
r += y
return r
def elementary_vector(n, i):
v = np.zeros(n, dtype=float)
v[i] = 1
return v
def vectors_are_parallel(v, w):
# Columns are considered parallel when they are equal or negative.
# Entries are required to be in {-1, 1},
# which guarantees that the magnitudes of the vectors are identical.
if v.ndim != 1 or v.shape != w.shape:
raise ValueError('expected conformant vectors with entries in {-1,1}')
n = v.shape[0]
return np.dot(v, w) == n
def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y):
for v in X.T:
if not any(vectors_are_parallel(v, w) for w in Y.T):
return False
return True
def column_needs_resampling(i, X, Y=None):
# column i of X needs resampling if either
# it is parallel to a previous column of X or
# it is parallel to a column of Y
n, t = X.shape
v = X[:, i]
if any(vectors_are_parallel(v, X[:, j]) for j in range(i)):
return True
if Y is not None:
if any(vectors_are_parallel(v, w) for w in Y.T):
return True
return False
def resample_column(i, X):
X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1
def less_than_or_close(a, b):
return np.allclose(a, b) or (a < b)
def _algorithm_2_2(A, AT, t):
"""
This is Algorithm 2.2.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Returns
-------
g : sequence
A non-negative decreasing vector
such that g[j] is a lower bound for the 1-norm
of the column of A of jth largest 1-norm.
The first entry of this vector is therefore a lower bound
on the 1-norm of the linear operator A.
This sequence has length t.
ind : sequence
The ith entry of ind is the index of the column A whose 1-norm
is given by g[i].
This sequence of indices has length t, and its entries are
chosen from range(n), possibly with repetition,
where n is the order of the operator A.
Notes
-----
This algorithm is mainly for testing.
It uses the 'ind' array in a way that is similar to
its usage in algorithm 2.4. This algorithm 2.2 may be easier to test,
so it gives a chance of uncovering bugs related to indexing
which could have propagated less noticeably to algorithm 2.4.
"""
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
n = A_linear_operator.shape[0]
# Initialize the X block with columns of unit 1-norm.
X = np.ones((n, t))
if t > 1:
X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1
X /= float(n)
# Iteratively improve the lower bounds.
# Track extra things, to assert invariants for debugging.
g_prev = None
h_prev = None
k = 1
ind = range(t)
while True:
Y = np.asarray(A_linear_operator.matmat(X))
g = _sum_abs_axis0(Y)
best_j = np.argmax(g)
g.sort()
g = g[::-1]
S = sign_round_up(Y)
Z = np.asarray(AT_linear_operator.matmat(S))
h = _max_abs_axis1(Z)
# If this algorithm runs for fewer than two iterations,
# then its return values do not have the properties indicated
# in the description of the algorithm.
# In particular, the entries of g are not 1-norms of any
# column of A until the second iteration.
# Therefore we will require the algorithm to run for at least
# two iterations, even though this requirement is not stated
# in the description of the algorithm.
if k >= 2:
if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])):
break
ind = np.argsort(h)[::-1][:t]
h = h[ind]
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
# Check invariant (2.2).
if k >= 2:
if not less_than_or_close(g_prev[0], h_prev[0]):
raise Exception('invariant (2.2) is violated')
if not less_than_or_close(h_prev[0], g[0]):
raise Exception('invariant (2.2) is violated')
# Check invariant (2.3).
if k >= 3:
for j in range(t):
if not less_than_or_close(g[j], g_prev[j]):
raise Exception('invariant (2.3) is violated')
# Update for the next iteration.
g_prev = g
h_prev = h
k += 1
# Return the lower bounds and the corresponding column indices.
return g, ind
def _onenormest_core(A, AT, t, itmax):
"""
Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
itmax : int, optional
Use at most this many iterations.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
nmults : int, optional
The number of matrix products that were computed.
nresamples : int, optional
The number of times a parallel column was observed,
necessitating a re-randomization of the column.
Notes
-----
This is algorithm 2.4.
"""
# This function is a more or less direct translation
# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
if itmax < 2:
raise ValueError('at least two iterations are required')
if t < 1:
raise ValueError('at least one column is required')
n = A.shape[0]
if t >= n:
raise ValueError('t should be smaller than the order of A')
# Track the number of big*small matrix multiplications
# and the number of resamplings.
nmults = 0
nresamples = 0
# "We now explain our choice of starting matrix. We take the first
# column of X to be the vector of 1s [...] This has the advantage that
# for a matrix with nonnegative elements the algorithm converges
# with an exact estimate on the second iteration, and such matrices
# arise in applications [...]"
X = np.ones((n, t), dtype=float)
# "The remaining columns are chosen as rand{-1,1},
# with a check for and correction of parallel columns,
# exactly as for S in the body of the algorithm."
if t > 1:
for i in range(1, t):
# These are technically initial samples, not resamples,
# so the resampling count is not incremented.
resample_column(i, X)
for i in range(t):
while column_needs_resampling(i, X):
resample_column(i, X)
nresamples += 1
# "Choose starting matrix X with columns of unit 1-norm."
X /= float(n)
# "indices of used unit vectors e_j"
ind_hist = np.zeros(0, dtype=np.intp)
est_old = 0
S = np.zeros((n, t), dtype=float)
k = 1
ind = None
while True:
Y = np.asarray(A_linear_operator.matmat(X))
nmults += 1
mags = _sum_abs_axis0(Y)
est = np.max(mags)
best_j = np.argmax(mags)
if est > est_old or k == 2:
if k >= 2:
ind_best = ind[best_j]
w = Y[:, best_j]
# (1)
if k >= 2 and est <= est_old:
est = est_old
break
est_old = est
S_old = S
if k > itmax:
break
S = sign_round_up(Y)
del Y
# (2)
if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old):
break
if t > 1:
# "Ensure that no column of S is parallel to another column of S
# or to a column of S_old by replacing columns of S by rand{-1,1}."
for i in range(t):
while column_needs_resampling(i, S, S_old):
resample_column(i, S)
nresamples += 1
del S_old
# (3)
Z = np.asarray(AT_linear_operator.matmat(S))
nmults += 1
h = _max_abs_axis1(Z)
del Z
# (4)
if k >= 2 and max(h) == h[ind_best]:
break
# "Sort h so that h_first >= ... >= h_last
# and re-order ind correspondingly."
#
# Later on, we will need at most t+len(ind_hist) largest
# entries, so drop the rest
ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy()
del h
if t > 1:
# (5)
# Break if the most promising t vectors have been visited already.
if np.in1d(ind[:t], ind_hist).all():
break
# Put the most promising unvisited vectors at the front of the list
# and put the visited vectors at the end of the list.
# Preserve the order of the indices induced by the ordering of h.
seen = np.in1d(ind, ind_hist)
ind = np.concatenate((ind[~seen], ind[seen]))
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
new_ind = ind[:t][~np.in1d(ind[:t], ind_hist)]
ind_hist = np.concatenate((ind_hist, new_ind))
k += 1
v = elementary_vector(n, ind_best)
return est, v, w, nmults, nresamples

View file

@ -0,0 +1,29 @@
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
(1) Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
(2) Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
(3) Neither the name of Lawrence Berkeley National Laboratory, U.S. Dept. of
Energy nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,66 @@
"""
Linear Solvers
==============
The default solver is SuperLU (included in the scipy distribution),
which can solve real or complex linear systems in both single and
double precisions. It is automatically replaced by UMFPACK, if
available. Note that UMFPACK works in double precision only, so
switch it off by::
>>> use_solver(useUmfpack=False)
to solve in the single precision. See also use_solver documentation.
Example session::
>>> from scipy.sparse import csc_matrix, spdiags
>>> from numpy import array
>>> from scipy.sparse.linalg import spsolve, use_solver
>>>
>>> print("Inverting a sparse linear system:")
>>> print("The sparse matrix (constructed from diagonals):")
>>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
>>> b = array([1, 2, 3, 4, 5])
>>> print("Solve: single precision complex:")
>>> use_solver( useUmfpack = False )
>>> a = a.astype('F')
>>> x = spsolve(a, b)
>>> print(x)
>>> print("Error: ", a*x-b)
>>>
>>> print("Solve: double precision complex:")
>>> use_solver( useUmfpack = True )
>>> a = a.astype('D')
>>> x = spsolve(a, b)
>>> print(x)
>>> print("Error: ", a*x-b)
>>>
>>> print("Solve: double precision:")
>>> a = a.astype('d')
>>> x = spsolve(a, b)
>>> print(x)
>>> print("Error: ", a*x-b)
>>>
>>> print("Solve: single precision:")
>>> use_solver( useUmfpack = False )
>>> a = a.astype('f')
>>> x = spsolve(a, b.astype('f'))
>>> print(x)
>>> print("Error: ", a*x-b)
"""
#import umfpack
#__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) )
#del umfpack
from .linsolve import *
from ._superlu import SuperLU
from . import _add_newdocs
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,152 @@
from numpy.lib import add_newdoc
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU',
"""
LU factorization of a sparse matrix.
Factorization is represented as::
Pr * A * Pc = L * U
To construct these `SuperLU` objects, call the `splu` and `spilu`
functions.
Attributes
----------
shape
nnz
perm_c
perm_r
L
U
Methods
-------
solve
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
The LU decomposition can be used to solve matrix equations. Consider:
>>> import numpy as np
>>> from scipy.sparse import csc_matrix, linalg as sla
>>> A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]])
This can be solved for a given right-hand side:
>>> lu = sla.splu(A)
>>> b = np.array([1, 2, 3, 4])
>>> x = lu.solve(b)
>>> A.dot(x)
array([ 1., 2., 3., 4.])
The ``lu`` object also contains an explicit representation of the
decomposition. The permutations are represented as mappings of
indices:
>>> lu.perm_r
array([0, 2, 1, 3], dtype=int32)
>>> lu.perm_c
array([2, 0, 1, 3], dtype=int32)
The L and U factors are sparse matrices in CSC format:
>>> lu.L.A
array([[ 1. , 0. , 0. , 0. ],
[ 0. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 1. , 0.5, 0.5, 1. ]])
>>> lu.U.A
array([[ 2., 0., 1., 4.],
[ 0., 2., 1., 1.],
[ 0., 0., 1., 1.],
[ 0., 0., 0., -5.]])
The permutation matrices can be constructed:
>>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4))))
>>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c)))
We can reassemble the original matrix:
>>> (Pr.T * (lu.L * lu.U) * Pc.T).A
array([[ 1., 2., 0., 4.],
[ 1., 0., 0., 1.],
[ 1., 0., 2., 1.],
[ 2., 2., 1., 0.]])
""")
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('solve',
"""
solve(rhs[, trans])
Solves linear system of equations with one or several right-hand sides.
Parameters
----------
rhs : ndarray, shape (n,) or (n, k)
Right hand side(s) of equation
trans : {'N', 'T', 'H'}, optional
Type of system to solve::
'N': A * x == rhs (default)
'T': A^T * x == rhs
'H': A^H * x == rhs
i.e., normal, transposed, and hermitian conjugate.
Returns
-------
x : ndarray, shape ``rhs.shape``
Solution vector(s)
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('L',
"""
Lower triangular factor with unit diagonal as a
`scipy.sparse.csc_matrix`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('U',
"""
Upper triangular factor as a `scipy.sparse.csc_matrix`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('shape',
"""
Shape of the original matrix as a tuple of ints.
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('nnz',
"""
Number of nonzero elements in the matrix.
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_c',
"""
Permutation Pc represented as an array of indices.
The column permutation matrix can be reconstructed via:
>>> Pc = np.zeros((n, n))
>>> Pc[np.arange(n), perm_c] = 1
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_r',
"""
Permutation Pr represented as an array of indices.
The row permutation matrix can be reconstructed via:
>>> Pr = np.zeros((n, n))
>>> Pr[perm_r, np.arange(n)] = 1
"""))

View file

@ -0,0 +1,626 @@
from warnings import warn
import numpy as np
from numpy import asarray
from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix,
SparseEfficiencyWarning, csc_matrix, csr_matrix)
from scipy.sparse.sputils import is_pydata_spmatrix
from scipy.linalg import LinAlgError
import copy
from . import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
noScikit = True
useUmfpack = not noScikit
__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
'MatrixRankWarning', 'spsolve_triangular']
class MatrixRankWarning(UserWarning):
pass
def use_solver(**kwargs):
"""
Select default sparse direct solver to be used.
Parameters
----------
useUmfpack : bool, optional
Use UMFPACK over SuperLU. Has effect only if scikits.umfpack is
installed. Default: True
assumeSortedIndices : bool, optional
Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix.
Has effect only if useUmfpack is True and scikits.umfpack is installed.
Default: False
Notes
-----
The default sparse solver is umfpack when available
(scikits.umfpack is installed). This can be changed by passing
useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
if useUmfpack and 'assumeSortedIndices' in kwargs:
umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])
def _get_umf_family(A):
"""Get umfpack family string given the sparse matrix dtype."""
_families = {
(np.float64, np.int32): 'di',
(np.complex128, np.int32): 'zi',
(np.float64, np.int64): 'dl',
(np.complex128, np.int64): 'zl'
}
f_type = np.sctypeDict[A.dtype.name]
i_type = np.sctypeDict[A.indices.dtype.name]
try:
family = _families[(f_type, i_type)]
except KeyError:
msg = 'only float64 or complex128 matrices with int32 or int64' \
' indices are supported! (got: matrix: %s, indices: %s)' \
% (f_type, i_type)
raise ValueError(msg)
# See gh-8278. Considered converting only if
# A.shape[0]*A.shape[1] > np.iinfo(np.int32).max,
# but that didn't always fix the issue.
family = family[0] + "l"
A_new = copy.copy(A)
A_new.indptr = np.array(A.indptr, copy=False, dtype=np.int64)
A_new.indices = np.array(A.indices, copy=False, dtype=np.int64)
return family, A_new
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
If a vector, b.shape must be (n,) or (n, 1).
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
use_umfpack : bool, optional
if True (default) then use umfpack for the solution. This is
only referenced if b is a vector and ``scikit-umfpack`` is installed.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[1]
If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
Notes
-----
For solving the matrix expression AX = B, this solver assumes the resulting
matrix X is sparse, as is often the case for very sparse inputs. If the
resulting X is dense, the construction of this sparse result will be
relatively expensive. In that case, consider converting A to a dense
matrix and using scipy.linalg.solve or its variants.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import spsolve
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve(A, B)
>>> np.allclose(A.dot(x).todense(), B.todense())
True
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
A = csc_matrix(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning)
# b is a vector only if b have shape (n,) or (n, 1)
b_is_sparse = isspmatrix(b) or is_pydata_spmatrix(b)
if not b_is_sparse:
b = asarray(b)
b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A.asfptype() # upcast to a floating point format
result_dtype = np.promote_types(A.dtype, b.dtype)
if A.dtype != result_dtype:
A = A.astype(result_dtype)
if b.dtype != result_dtype:
b = b.astype(result_dtype)
# validate input shapes
M, N = A.shape
if (M != N):
raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
if M != b.shape[0]:
raise ValueError("matrix - rhs dimension mismatch (%s - %s)"
% (A.shape, b.shape[0]))
use_umfpack = use_umfpack and useUmfpack
if b_is_vector and use_umfpack:
if b_is_sparse:
b_vec = b.toarray()
else:
b_vec = b
b_vec = asarray(b_vec, dtype=A.dtype).ravel()
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
autoTranspose=True)
else:
if b_is_vector and b_is_sparse:
b = b.toarray()
b_is_sparse = False
if not b_is_sparse:
if isspmatrix_csc(A):
flag = 1 # CSC format
else:
flag = 0 # CSR format
options = dict(ColPerm=permc_spec)
x, info = _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr,
b, flag, options=options)
if info != 0:
warn("Matrix is exactly singular", MatrixRankWarning)
x.fill(np.nan)
if b_is_vector:
x = x.ravel()
else:
# b is sparse
Afactsolve = factorized(A)
if not (isspmatrix_csc(b) or is_pydata_spmatrix(b)):
warn('spsolve is more efficient when sparse b '
'is in the CSC matrix format', SparseEfficiencyWarning)
b = csc_matrix(b)
# Create a sparse output matrix by repeatedly applying
# the sparse factorization to solve columns of b.
data_segs = []
row_segs = []
col_segs = []
for j in range(b.shape[1]):
bj = np.asarray(b[:, j].todense()).ravel()
xj = Afactsolve(bj)
w = np.flatnonzero(xj)
segment_length = w.shape[0]
row_segs.append(w)
col_segs.append(np.full(segment_length, j, dtype=int))
data_segs.append(np.asarray(xj[w], dtype=A.dtype))
sparse_data = np.concatenate(data_segs)
sparse_row = np.concatenate(row_segs)
sparse_col = np.concatenate(col_segs)
x = A.__class__((sparse_data, (sparse_row, sparse_col)),
shape=b.shape, dtype=A.dtype)
if is_pydata_spmatrix(b):
x = b.__class__(x)
return x
def splu(A, permc_spec=None, diag_pivot_thresh=None,
relax=None, panel_size=None, options=dict()):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse matrix
Sparse matrix to factorize. Should be in CSR or CSC format.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [1]_
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [1]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [1]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
This function uses the SuperLU library.
References
----------
.. [1] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import splu
>>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = splu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
csc_construct_func = lambda *a, cls=type(A): cls(csc_matrix(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_matrix
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
csc_construct_func=csc_construct_func,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix.
The resulting object is an approximation to the inverse of `A`.
Parameters
----------
A : (N, N) array_like
Sparse matrix to factorize
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
To improve the better approximation to the inverse, you may need to
increase `fill_factor` AND decrease `drop_tol`.
This function uses the SuperLU library.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import spilu
>>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = spilu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
csc_construct_func = lambda *a, cls=type(A): cls(csc_matrix(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_matrix
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
csc_construct_func=csc_construct_func,
ilu=True, options=_options)
def factorized(A):
"""
Return a function for solving a sparse linear system, with A pre-factorized.
Parameters
----------
A : (N, N) array_like
Input.
Returns
-------
solve : callable
To solve the linear system of equations given in `A`, the `solve`
callable should be passed an ndarray of shape (N,).
Examples
--------
>>> from scipy.sparse.linalg import factorized
>>> A = np.array([[ 3. , 2. , -1. ],
... [ 2. , -2. , 4. ],
... [-1. , 0.5, -1. ]])
>>> solve = factorized(A) # Makes LU decomposition.
>>> rhs1 = np.array([1, -2, 0])
>>> solve(rhs1) # Uses the LU factors.
array([ 1., -2., -2.])
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
if useUmfpack:
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A = A.asfptype() # upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
# Make LU decomposition.
umf.numeric(A)
def solve(b):
return umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
return solve
else:
return splu(A).solve
def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
unit_diagonal=False):
"""
Solve the equation `A x = b` for `x`, assuming A is a triangular matrix.
Parameters
----------
A : (M, M) sparse matrix
A sparse square triangular matrix. Should be in CSR format.
b : (M,) or (M, N) array_like
Right-hand side matrix in `A x = b`
lower : bool, optional
Whether `A` is a lower or upper triangular matrix.
Default is lower triangular matrix.
overwrite_A : bool, optional
Allow changing `A`. The indices of `A` are going to be sorted and zero
entries are going to be removed.
Enabling gives a performance gain. Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b`.
Enabling gives a performance gain. Default is False.
If `overwrite_b` is True, it should be ensured that
`b` has an appropriate dtype to be able to store the result.
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and will not be
referenced.
.. versionadded:: 1.4.0
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `A x = b`. Shape of return matches shape of `b`.
Raises
------
LinAlgError
If `A` is singular or not triangular.
ValueError
If shape of `A` or shape of `b` do not match the requirements.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from scipy.sparse.linalg import spsolve_triangular
>>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve_triangular(A, B)
>>> np.allclose(A.dot(x), B)
True
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsr()
# Check the input for correct type and format.
if not isspmatrix_csr(A):
warn('CSR matrix format is required. Converting to CSR matrix.',
SparseEfficiencyWarning)
A = csr_matrix(A)
elif not overwrite_A:
A = A.copy()
if A.shape[0] != A.shape[1]:
raise ValueError(
'A must be a square matrix but its shape is {}.'.format(A.shape))
# sum duplicates for non-canonical format
A.sum_duplicates()
b = np.asanyarray(b)
if b.ndim not in [1, 2]:
raise ValueError(
'b must have 1 or 2 dims but its shape is {}.'.format(b.shape))
if A.shape[0] != b.shape[0]:
raise ValueError(
'The size of the dimensions of A must be equal to '
'the size of the first dimension of b but the shape of A is '
'{} and the shape of b is {}.'.format(A.shape, b.shape))
# Init x as (a copy of) b.
x_dtype = np.result_type(A.data, b, np.float64)
if overwrite_b:
if np.can_cast(b.dtype, x_dtype, casting='same_kind'):
x = b
else:
raise ValueError(
'Cannot overwrite b (dtype {}) with result '
'of type {}.'.format(b.dtype, x_dtype))
else:
x = b.astype(x_dtype, copy=True)
# Choose forward or backward order.
if lower:
row_indices = range(len(b))
else:
row_indices = range(len(b) - 1, -1, -1)
# Fill x iteratively.
for i in row_indices:
# Get indices for i-th row.
indptr_start = A.indptr[i]
indptr_stop = A.indptr[i + 1]
if lower:
A_diagonal_index_row_i = indptr_stop - 1
A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1)
else:
A_diagonal_index_row_i = indptr_start
A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop)
# Check regularity and triangularity of A.
if not unit_diagonal and (indptr_stop <= indptr_start
or A.indices[A_diagonal_index_row_i] < i):
raise LinAlgError(
'A is singular: diagonal {} is zero.'.format(i))
if A.indices[A_diagonal_index_row_i] > i:
raise LinAlgError(
'A is not triangular: A[{}, {}] is nonzero.'
''.format(i, A.indices[A_diagonal_index_row_i]))
# Incorporate off-diagonal entries.
A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i]
A_values_in_row_i = A.data[A_off_diagonal_indices_row_i]
x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i)
# Compute i-th entry of x.
if not unit_diagonal:
x[i] /= A.data[A_diagonal_index_row_i]
return x

View file

@ -0,0 +1,53 @@
from os.path import join, dirname
import sys
import glob
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from scipy._build_utils.system_info import get_info
from scipy._build_utils import numpy_nodepr_api
config = Configuration('dsolve',parent_package,top_path)
config.add_data_dir('tests')
lapack_opt = get_info('lapack_opt',notfound_action=2)
if sys.platform == 'win32':
superlu_defs = [('NO_TIMER',1)]
else:
superlu_defs = []
superlu_defs.append(('USE_VENDOR_BLAS',1))
superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')
sources = sorted(glob.glob(join(superlu_src, '*.c')))
headers = list(glob.glob(join(superlu_src, '*.h')))
config.add_library('superlu_src',
sources=sources,
macros=superlu_defs,
include_dirs=[superlu_src],
)
# Extension
ext_sources = ['_superlumodule.c',
'_superlu_utils.c',
'_superluobject.c']
config.add_extension('_superlu',
sources=ext_sources,
libraries=['superlu_src'],
depends=(sources + headers),
extra_info=lapack_opt,
**numpy_nodepr_api
)
# Add license files
config.add_data_files('SuperLU/License.txt')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())

View file

@ -0,0 +1,777 @@
import sys
import threading
import numpy as np
from numpy import array, finfo, arange, eye, all, unique, ones, dot
import numpy.random as random
from numpy.testing import (
assert_array_almost_equal, assert_almost_equal,
assert_equal, assert_array_equal, assert_, assert_allclose,
assert_warns, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import scipy.linalg
from scipy.linalg import norm, inv
from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
csr_matrix, identity, isspmatrix, dok_matrix, lil_matrix, bsr_matrix)
from scipy.sparse.linalg import SuperLU
from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu,
MatrixRankWarning, _superlu, spsolve_triangular, factorized)
import scipy.sparse
from scipy._lib._testutils import check_free_memory
sup_sparse_efficiency = suppress_warnings()
sup_sparse_efficiency.filter(SparseEfficiencyWarning)
# scikits.umfpack is not a SciPy dependency but it is optionally used in
# dsolve, so check whether it's available
try:
import scikits.umfpack as umfpack
has_umfpack = True
except ImportError:
has_umfpack = False
def toarray(a):
if isspmatrix(a):
return a.toarray()
else:
return a
def setup_bug_8278():
N = 2 ** 6
h = 1/N
Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1],
shape=(N-1, N-1))/(h**2)
eyeN = scipy.sparse.eye(N - 1)
A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D))
+ scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN))
+ scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN)))
b = np.random.rand((N-1)**3)
return A, b
class TestFactorized(object):
def setup_method(self):
n = 5
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
random.seed(1234)
def _check_singular(self):
A = csc_matrix((5,5), dtype='d')
b = ones(5)
assert_array_almost_equal(0. * b, factorized(A)(b))
def _check_non_singular(self):
# Make a diagonal dominant, to make sure it is not singular
n = 5
a = csc_matrix(random.rand(n, n))
b = ones(n)
expected = splu(a).solve(b)
assert_array_almost_equal(factorized(a)(b), expected)
def test_singular_without_umfpack(self):
use_solver(useUmfpack=False)
with assert_raises(RuntimeError, match="Factor is exactly singular"):
self._check_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_singular_with_umfpack(self):
use_solver(useUmfpack=True)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
assert_warns(umfpack.UmfpackWarning, self._check_singular)
def test_non_singular_without_umfpack(self):
use_solver(useUmfpack=False)
self._check_non_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_non_singular_with_umfpack(self):
use_solver(useUmfpack=True)
self._check_non_singular()
def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
msg = "can only factor square matrices"
with assert_raises(ValueError, match=msg):
factorized(self.A[:, :4])
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_factorizes_nonsquare_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
# does not raise
factorized(self.A[:,:4])
def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
with assert_raises(ValueError, match="is of incompatible size"):
solve(b)
with assert_raises(ValueError, match="is of incompatible size"):
solve(B)
with assert_raises(ValueError,
match="object too deep for desired array"):
solve(BB)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
# does not raise
solve(b)
msg = "object too deep for desired array"
with assert_raises(ValueError, match=msg):
solve(B)
with assert_raises(ValueError, match=msg):
solve(BB)
def test_call_with_cast_to_complex_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
with assert_raises(TypeError, match="Cannot cast array data"):
solve(b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_cast_to_complex_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
assert_warns(np.ComplexWarning, solve, b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_assume_sorted_indices_flag(self):
# a sparse matrix with unsorted indices
unsorted_inds = np.array([2, 0, 1, 0])
data = np.array([10, 16, 5, 0.4])
indptr = np.array([0, 1, 2, 4])
A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
b = ones(3)
# should raise when incorrectly assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=True)
with assert_raises(RuntimeError,
match="UMFPACK_ERROR_invalid_matrix"):
factorized(A)
# should sort indices and succeed when not assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=False)
expected = splu(A.copy()).solve(b)
assert_equal(A.has_sorted_indices, 0)
assert_array_almost_equal(factorized(A)(b), expected)
@pytest.mark.slow
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_bug_8278(self):
check_free_memory(8000)
use_solver(useUmfpack=True)
A, b = setup_bug_8278()
A = A.tocsc()
f = factorized(A)
x = f(b)
assert_array_almost_equal(A @ x, b)
class TestLinsolve(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
A = csc_matrix((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_matrix((v, ij.T), shape=(20, 20))
b = np.arange(20)
try:
# should either raise a runtimeerror or return value
# appropriate for singular input
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
except RuntimeError:
pass
def test_twodiags(self):
A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
for format in ['csc','csr']:
Asp = A.astype(t).asformat(format)
x = spsolve(Asp,b)
assert_(norm(b - Asp*x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3)
b = As*x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3, 4)
Bdense = As.dot(x)
Bs = csc_matrix(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.todense())
@sup_sparse_efficiency
def test_non_square(self):
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
@sup_sparse_efficiency
def test_example_comparison(self):
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
M = sM.todense()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
N = sN.todense()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.todense())
@sup_sparse_efficiency
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_shape_compatibility(self):
use_solver(useUmfpack=True)
A = csc_matrix([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_matrix([[1], [6]]),
csr_matrix([[1], [6]]),
dok_matrix([[1], [6]]),
bsr_matrix([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if isspmatrix(b) and x.ndim > 1:
assert_(isspmatrix(x1), repr((b, spmattype, 1)))
assert_(isspmatrix(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_matrix((3, 3))
b = csc_matrix((1, 3))
assert_raises(ValueError, spsolve, A, b)
@sup_sparse_efficiency
def test_ndarray_support(self):
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
for spmatrix in (csc_matrix, csr_matrix):
A = spmatrix(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = "%r %r" % (spmatrix, badop)
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(spmatrix == csc_matrix), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_matrix([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
def test_dtype_cast(self):
A_real = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5]])
A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5 + 1j]])
b_real = np.array([1,1,1])
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
x = spsolve(A_real, b_real)
assert_(np.issubdtype(x.dtype, np.floating))
x = spsolve(A_real, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_real)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
@pytest.mark.slow
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_bug_8278(self):
check_free_memory(8000)
use_solver(useUmfpack=True)
A, b = setup_bug_8278()
x = spsolve(A, b)
assert_array_almost_equal(A @ x, b)
class TestSplu(object):
def setup_method(self):
use_solver(useUmfpack=False)
n = 40
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
random.seed(1234)
def _smoketest(self, spxlu, check, dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
lu = spxlu(A)
rng = random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = "k=%r" % (k,)
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
@sup_sparse_efficiency
def test_splu_smoketest(self):
self._internal_test_splu_smoketest()
def _internal_test_splu_smoketest(self):
# Check that splu works at all
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A * x
assert_(abs(r - b).max() < 1e3*eps, msg)
self._smoketest(splu, check, np.float32)
self._smoketest(splu, check, np.float64)
self._smoketest(splu, check, np.complex64)
self._smoketest(splu, check, np.complex128)
@sup_sparse_efficiency
def test_spilu_smoketest(self):
self._internal_test_spilu_smoketest()
def _internal_test_spilu_smoketest(self):
errors = []
def check(A, b, x, msg=""):
r = A * x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
self._smoketest(spilu, check, np.float32)
self._smoketest(spilu, check, np.float64)
self._smoketest(spilu, check, np.complex64)
self._smoketest(spilu, check, np.complex128)
assert_(max(errors) > 1e-5)
@sup_sparse_efficiency
def test_spilu_drop_rule(self):
# Test passing in the drop_rule argument to spilu.
A = identity(2)
rules = [
b'basic,area'.decode('ascii'), # unicode
b'basic,area', # ascii
[b'basic', b'area'.decode('ascii')]
]
for rule in rules:
# Argument should be accepted
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
def test_splu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_matrix(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_matrix(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
@pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)])
def test_natural_permc(self, splu_fun, rtol):
# Test that the "NATURAL" permc_spec does not permute the matrix
np.random.seed(42)
n = 500
p = 0.01
A = scipy.sparse.random(n, n, p)
x = np.random.rand(n)
# Make A diagonal dominant to make sure it is not singular
A += (n+1)*scipy.sparse.identity(n)
A_ = csc_matrix(A)
b = A_ @ x
# without permc_spec, permutation is not identity
lu = splu_fun(A_)
assert_(np.any(lu.perm_c != np.arange(n)))
# with permc_spec="NATURAL", permutation is identity
lu = splu_fun(A_, permc_spec="NATURAL")
assert_array_equal(lu.perm_c, np.arange(n))
# Also, lu decomposition is valid
x2 = lu.solve(b)
assert_allclose(x, x2, rtol=rtol)
@pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = random.rand(42)
B = random.rand(42, 3)
BB = random.rand(self.n, 3, 9)
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
@sup_sparse_efficiency
def test_superlu_dlamch_i386_nan(self):
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.A
assert_(not np.isnan(B).any())
@sup_sparse_efficiency
def test_lu_attr(self):
def check(dtype, complex_2=False):
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertized
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L * lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
@pytest.mark.slow
@sup_sparse_efficiency
def test_threads_parallel(self):
oks = []
def worker():
try:
self.test_splu_basic()
self._internal_test_splu_smoketest()
self._internal_test_spilu_smoketest()
oks.append(True)
except Exception:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
class TestSpsolveTriangular(object):
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
n = 5
A = csr_matrix((n, n))
b = np.arange(n)
for lower in (True, False):
assert_raises(scipy.linalg.LinAlgError, spsolve_triangular, A, b, lower=lower)
@sup_sparse_efficiency
def test_bad_shape(self):
# A is not square.
A = np.zeros((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve_triangular, A, b)
# A2 and b2 have incompatible shapes.
A2 = csr_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve_triangular, A2, b2)
@sup_sparse_efficiency
def test_input_types(self):
A = array([[1., 0.], [1., 2.]])
b = array([[2., 0.], [2., 2.]])
for matrix_type in (array, csc_matrix, csr_matrix):
x = spsolve_triangular(matrix_type(A), b, lower=True)
assert_array_almost_equal(A.dot(x), b)
@pytest.mark.slow
@sup_sparse_efficiency
def test_random(self):
def random_triangle_matrix(n, lower=True):
A = scipy.sparse.random(n, n, density=0.1, format='coo')
if lower:
A = scipy.sparse.tril(A)
else:
A = scipy.sparse.triu(A)
A = A.tocsr(copy=False)
for i in range(n):
A[i, i] = np.random.rand() + 1
return A
np.random.seed(1234)
for lower in (True, False):
for n in (10, 10**2, 10**3):
A = random_triangle_matrix(n, lower=lower)
for m in (1, 10):
for b in (np.random.rand(n, m),
np.random.randint(-9, 9, (n, m)),
np.random.randint(-9, 9, (n, m)) +
np.random.randint(-9, 9, (n, m)) * 1j):
x = spsolve_triangular(A, b, lower=lower)
assert_array_almost_equal(A.dot(x), b)
x = spsolve_triangular(A, b, lower=lower,
unit_diagonal=True)
A.setdiag(1)
assert_array_almost_equal(A.dot(x), b)

View file

@ -0,0 +1,16 @@
"""
Sparse Eigenvalue Solvers
-------------------------
The submodules of sparse.linalg.eigen:
1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method
"""
from .arpack import *
from .lobpcg import *
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,45 @@
BSD Software License
Pertains to ARPACK and P_ARPACK
Copyright (c) 1996-2008 Rice University.
Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff.
All rights reserved.
Arpack has been renamed to arpack-ng.
Copyright (c) 2001-2011 - Scilab Enterprises
Updated by Allan Cornet, Sylvestre Ledru.
Copyright (c) 2010 - Jordi Gutiérrez Hermoso (Octave patch)
Copyright (c) 2007 - Sébastien Fabbro (gentoo patch)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer listed
in this license in the documentation and/or other materials
provided with the distribution.
- Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,20 @@
"""
Eigenvalue solver using iterative methods.
Find k eigenvectors and eigenvalues of a matrix A using the
Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_.
These methods are most useful for large sparse matrices.
- eigs(A,k)
- eigsh(A,k)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
from .arpack import *

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,51 @@
from os.path import join
def configuration(parent_package='',top_path=None):
from scipy._build_utils.system_info import get_info
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import (get_g77_abi_wrappers,
gfortran_legacy_flag_hook,
blas_ilp64_pre_build_hook,
uses_blas64, get_f2py_int64_options)
if uses_blas64():
lapack_opt = get_info('lapack_ilp64_opt', 2)
pre_build_hook = (gfortran_legacy_flag_hook,
blas_ilp64_pre_build_hook(lapack_opt))
f2py_options = get_f2py_int64_options()
else:
lapack_opt = get_info('lapack_opt')
pre_build_hook = gfortran_legacy_flag_hook
f2py_options = None
config = Configuration('arpack', parent_package, top_path)
arpack_sources = [join('ARPACK','SRC', '*.f')]
arpack_sources.extend([join('ARPACK','UTIL', '*.f')])
arpack_sources += get_g77_abi_wrappers(lapack_opt)
config.add_library('arpack_scipy', sources=arpack_sources,
include_dirs=[join('ARPACK', 'SRC')],
_pre_build_hook=pre_build_hook)
ext = config.add_extension('_arpack',
sources=['arpack.pyf.src'],
libraries=['arpack_scipy'],
f2py_options=f2py_options,
extra_info=lapack_opt,
depends=arpack_sources)
ext._pre_build_hook = pre_build_hook
config.add_data_dir('tests')
# Add license files
config.add_data_files('ARPACK/COPYING')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,16 @@
"""
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
Call the function lobpcg - see help for lobpcg.lobpcg.
"""
from .lobpcg import *
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,710 @@
"""
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
References
----------
.. [1] A. V. Knyazev (2001),
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method.
SIAM Journal on Scientific Computing 23, no. 2,
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
in hypre and PETSc. https://arxiv.org/abs/0705.2626
.. [3] A. V. Knyazev's C and MATLAB implementations:
https://github.com/lobpcg/blopex
"""
import numpy as np
from scipy.linalg import (inv, eigh, cho_factor, cho_solve, cholesky,
LinAlgError)
from scipy.sparse.linalg import aslinearoperator
from numpy import block as bmat
__all__ = ['lobpcg']
def _report_nonhermitian(M, name):
"""
Report if `M` is not a hermitian matrix given its type.
"""
from scipy.linalg import norm
md = M - M.T.conj()
nmd = norm(md, 1)
tol = 10 * np.finfo(M.dtype).eps
tol = max(tol, tol * norm(M, 1))
if nmd > tol:
print('matrix %s of the type %s is not sufficiently Hermitian:'
% (name, M.dtype))
print('condition: %.e < %e' % (nmd, tol))
def _as2d(ar):
"""
If the input array is 2D return it, if it is 1D, append a dimension,
making it a column vector.
"""
if ar.ndim == 2:
return ar
else: # Assume 1!
aux = np.array(ar, copy=False)
aux.shape = (ar.shape[0], 1)
return aux
def _makeOperator(operatorInput, expectedShape):
"""Takes a dense numpy array or a sparse matrix or
a function and makes an operator performing matrix * blockvector
products."""
if operatorInput is None:
return None
else:
operator = aslinearoperator(operatorInput)
if operator.shape != expectedShape:
raise ValueError('operator has invalid shape')
return operator
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
"""Changes blockVectorV in place."""
YBV = np.dot(blockVectorBY.T.conj(), blockVectorV)
tmp = cho_solve(factYBY, YBV)
blockVectorV -= np.dot(blockVectorY, tmp)
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False):
"""B-orthonormalize the given block vector using Cholesky."""
normalization = blockVectorV.max(axis=0)+np.finfo(blockVectorV.dtype).eps
blockVectorV = blockVectorV / normalization
if blockVectorBV is None:
if B is not None:
blockVectorBV = B(blockVectorV)
else:
blockVectorBV = blockVectorV # Shared data!!!
else:
blockVectorBV = blockVectorBV / normalization
VBV = np.matmul(blockVectorV.T.conj(), blockVectorBV)
try:
# VBV is a Cholesky factor from now on...
VBV = cholesky(VBV, overwrite_a=True)
VBV = inv(VBV, overwrite_a=True)
blockVectorV = np.matmul(blockVectorV, VBV)
# blockVectorV = (cho_solve((VBV.T, True), blockVectorV.T)).T
if B is not None:
blockVectorBV = np.matmul(blockVectorBV, VBV)
# blockVectorBV = (cho_solve((VBV.T, True), blockVectorBV.T)).T
else:
blockVectorBV = None
except LinAlgError:
#raise ValueError('Cholesky has failed')
blockVectorV = None
blockVectorBV = None
VBV = None
if retInvR:
return blockVectorV, blockVectorBV, VBV, normalization
else:
return blockVectorV, blockVectorBV
def _get_indx(_lambda, num, largest):
"""Get `num` indices into `_lambda` depending on `largest` option."""
ii = np.argsort(_lambda)
if largest:
ii = ii[:-num-1:-1]
else:
ii = ii[:num]
return ii
def lobpcg(A, X,
B=None, M=None, Y=None,
tol=None, maxiter=None,
largest=True, verbosityLevel=0,
retLambdaHistory=False, retResidualNormsHistory=False):
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
LOBPCG is a preconditioned eigensolver for large symmetric positive
definite (SPD) generalized eigenproblems.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The symmetric linear operator of the problem, usually a
sparse matrix. Often called the "stiffness matrix".
X : ndarray, float32 or float64
Initial approximation to the ``k`` eigenvectors (non-sparse). If `A`
has ``shape=(n,n)`` then `X` should have shape ``shape=(n,k)``.
B : {dense matrix, sparse matrix, LinearOperator}, optional
The right hand side operator in a generalized eigenproblem.
By default, ``B = Identity``. Often called the "mass matrix".
M : {dense matrix, sparse matrix, LinearOperator}, optional
Preconditioner to `A`; by default ``M = Identity``.
`M` should approximate the inverse of `A`.
Y : ndarray, float32 or float64, optional
n-by-sizeY matrix of constraints (non-sparse), sizeY < n
The iterations will be performed in the B-orthogonal complement
of the column-space of Y. Y must be full rank.
tol : scalar, optional
Solver tolerance (stopping criterion).
The default is ``tol=n*sqrt(eps)``.
maxiter : int, optional
Maximum number of iterations. The default is ``maxiter = 20``.
largest : bool, optional
When True, solve for the largest eigenvalues, otherwise the smallest.
verbosityLevel : int, optional
Controls solver output. The default is ``verbosityLevel=0``.
retLambdaHistory : bool, optional
Whether to return eigenvalue history. Default is False.
retResidualNormsHistory : bool, optional
Whether to return history of residual norms. Default is False.
Returns
-------
w : ndarray
Array of ``k`` eigenvalues
v : ndarray
An array of ``k`` eigenvectors. `v` has the same shape as `X`.
lambdas : list of ndarray, optional
The eigenvalue history, if `retLambdaHistory` is True.
rnorms : list of ndarray, optional
The history of residual norms, if `retResidualNormsHistory` is True.
Notes
-----
If both ``retLambdaHistory`` and ``retResidualNormsHistory`` are True,
the return tuple has the following format
``(lambda, V, lambda history, residual norms history)``.
In the following ``n`` denotes the matrix size and ``m`` the number
of required eigenvalues (smallest or largest).
The LOBPCG code internally solves eigenproblems of the size ``3m`` on every
iteration by calling the "standard" dense eigensolver, so if ``m`` is not
small enough compared to ``n``, it does not make sense to call the LOBPCG
code, but rather one should use the "standard" eigensolver, e.g. numpy or
scipy function in this case.
If one calls the LOBPCG algorithm for ``5m > n``, it will most likely break
internally, so the code tries to call the standard function instead.
It is not that ``n`` should be large for the LOBPCG to work, but rather the
ratio ``n / m`` should be large. It you call LOBPCG with ``m=1``
and ``n=10``, it works though ``n`` is small. The method is intended
for extremely large ``n / m``, see e.g., reference [28] in
https://arxiv.org/abs/0705.2626
The convergence speed depends basically on two factors:
1. How well relatively separated the seeking eigenvalues are from the rest
of the eigenvalues. One can try to vary ``m`` to make this better.
2. How well conditioned the problem is. This can be changed by using proper
preconditioning. For example, a rod vibration test problem (under tests
directory) is ill-conditioned for large ``n``, so convergence will be
slow, unless efficient preconditioning is used. For this specific
problem, a good simple preconditioner function would be a linear solve
for `A`, which is easy to code since A is tridiagonal.
References
----------
.. [1] A. V. Knyazev (2001),
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method.
SIAM Journal on Scientific Computing 23, no. 2,
pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
(BLOPEX) in hypre and PETSc. https://arxiv.org/abs/0705.2626
.. [3] A. V. Knyazev's C and MATLAB implementations:
https://bitbucket.org/joseroman/blopex
Examples
--------
Solve ``A x = lambda x`` with constraints and preconditioning.
>>> import numpy as np
>>> from scipy.sparse import spdiags, issparse
>>> from scipy.sparse.linalg import lobpcg, LinearOperator
>>> n = 100
>>> vals = np.arange(1, n + 1)
>>> A = spdiags(vals, 0, n, n)
>>> A.toarray()
array([[ 1., 0., 0., ..., 0., 0., 0.],
[ 0., 2., 0., ..., 0., 0., 0.],
[ 0., 0., 3., ..., 0., 0., 0.],
...,
[ 0., 0., 0., ..., 98., 0., 0.],
[ 0., 0., 0., ..., 0., 99., 0.],
[ 0., 0., 0., ..., 0., 0., 100.]])
Constraints:
>>> Y = np.eye(n, 3)
Initial guess for eigenvectors, should have linearly independent
columns. Column dimension = number of requested eigenvalues.
>>> X = np.random.rand(n, 3)
Preconditioner in the inverse of A in this example:
>>> invA = spdiags([1./vals], 0, n, n)
The preconditiner must be defined by a function:
>>> def precond( x ):
... return invA @ x
The argument x of the preconditioner function is a matrix inside `lobpcg`,
thus the use of matrix-matrix product ``@``.
The preconditioner function is passed to lobpcg as a `LinearOperator`:
>>> M = LinearOperator(matvec=precond, matmat=precond,
... shape=(n, n), dtype=float)
Let us now solve the eigenvalue problem for the matrix A:
>>> eigenvalues, _ = lobpcg(A, X, Y=Y, M=M, largest=False)
>>> eigenvalues
array([4., 5., 6.])
Note that the vectors passed in Y are the eigenvectors of the 3 smallest
eigenvalues. The results returned are orthogonal to those.
"""
blockVectorX = X
blockVectorY = Y
residualTolerance = tol
if maxiter is None:
maxiter = 20
if blockVectorY is not None:
sizeY = blockVectorY.shape[1]
else:
sizeY = 0
# Block size.
if len(blockVectorX.shape) != 2:
raise ValueError('expected rank-2 array for argument X')
n, sizeX = blockVectorX.shape
if verbosityLevel:
aux = "Solving "
if B is None:
aux += "standard"
else:
aux += "generalized"
aux += " eigenvalue problem with"
if M is None:
aux += "out"
aux += " preconditioning\n\n"
aux += "matrix size %d\n" % n
aux += "block size %d\n\n" % sizeX
if blockVectorY is None:
aux += "No constraints\n\n"
else:
if sizeY > 1:
aux += "%d constraints\n\n" % sizeY
else:
aux += "%d constraint\n\n" % sizeY
print(aux)
A = _makeOperator(A, (n, n))
B = _makeOperator(B, (n, n))
M = _makeOperator(M, (n, n))
if (n - sizeY) < (5 * sizeX):
# warn('The problem size is small compared to the block size.' \
# ' Using dense eigensolver instead of LOBPCG.')
sizeX = min(sizeX, n)
if blockVectorY is not None:
raise NotImplementedError('The dense eigensolver '
'does not support constraints.')
# Define the closed range of indices of eigenvalues to return.
if largest:
eigvals = (n - sizeX, n-1)
else:
eigvals = (0, sizeX-1)
A_dense = A(np.eye(n, dtype=A.dtype))
B_dense = None if B is None else B(np.eye(n, dtype=B.dtype))
vals, vecs = eigh(A_dense, B_dense, eigvals=eigvals,
check_finite=False)
if largest:
# Reverse order to be compatible with eigs() in 'LM' mode.
vals = vals[::-1]
vecs = vecs[:, ::-1]
return vals, vecs
if (residualTolerance is None) or (residualTolerance <= 0.0):
residualTolerance = np.sqrt(1e-15) * n
# Apply constraints to X.
if blockVectorY is not None:
if B is not None:
blockVectorBY = B(blockVectorY)
else:
blockVectorBY = blockVectorY
# gramYBY is a dense array.
gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY)
try:
# gramYBY is a Cholesky factor from now on...
gramYBY = cho_factor(gramYBY)
except LinAlgError:
raise ValueError('cannot handle linearly dependent constraints')
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
##
# B-orthonormalize X.
blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX)
##
# Compute the initial Ritz vectors: solve the eigenproblem.
blockVectorAX = A(blockVectorX)
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
_lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
ii = _get_indx(_lambda, sizeX, largest)
_lambda = _lambda[ii]
eigBlockVector = np.asarray(eigBlockVector[:, ii])
blockVectorX = np.dot(blockVectorX, eigBlockVector)
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
if B is not None:
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
##
# Active index set.
activeMask = np.ones((sizeX,), dtype=bool)
lambdaHistory = [_lambda]
residualNormsHistory = []
previousBlockSize = sizeX
ident = np.eye(sizeX, dtype=A.dtype)
ident0 = np.eye(sizeX, dtype=A.dtype)
##
# Main iteration loop.
blockVectorP = None # set during iteration
blockVectorAP = None
blockVectorBP = None
iterationNumber = -1
restart = True
explicitGramFlag = False
while iterationNumber < maxiter:
iterationNumber += 1
if verbosityLevel > 0:
print('iteration %d' % iterationNumber)
if B is not None:
aux = blockVectorBX * _lambda[np.newaxis, :]
else:
aux = blockVectorX * _lambda[np.newaxis, :]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
residualNorms = np.sqrt(aux)
residualNormsHistory.append(residualNorms)
ii = np.where(residualNorms > residualTolerance, True, False)
activeMask = activeMask & ii
if verbosityLevel > 2:
print(activeMask)
currentBlockSize = activeMask.sum()
if currentBlockSize != previousBlockSize:
previousBlockSize = currentBlockSize
ident = np.eye(currentBlockSize, dtype=A.dtype)
if currentBlockSize == 0:
break
if verbosityLevel > 0:
print('current block size:', currentBlockSize)
print('eigenvalue:', _lambda)
print('residual norms:', residualNorms)
if verbosityLevel > 10:
print(eigBlockVector)
activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
if iterationNumber > 0:
activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
if B is not None:
activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
if M is not None:
# Apply preconditioner T to the active residuals.
activeBlockVectorR = M(activeBlockVectorR)
##
# Apply constraints to the preconditioned residuals.
if blockVectorY is not None:
_applyConstraints(activeBlockVectorR,
gramYBY, blockVectorBY, blockVectorY)
##
# B-orthogonalize the preconditioned residuals to X.
if B is not None:
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
np.matmul(blockVectorBX.T.conj(),
activeBlockVectorR))
else:
activeBlockVectorR = activeBlockVectorR - np.matmul(blockVectorX,
np.matmul(blockVectorX.T.conj(),
activeBlockVectorR))
##
# B-orthonormalize the preconditioned residuals.
aux = _b_orthonormalize(B, activeBlockVectorR)
activeBlockVectorR, activeBlockVectorBR = aux
activeBlockVectorAR = A(activeBlockVectorR)
if iterationNumber > 0:
if B is not None:
aux = _b_orthonormalize(B, activeBlockVectorP,
activeBlockVectorBP, retInvR=True)
activeBlockVectorP, activeBlockVectorBP, invR, normal = aux
else:
aux = _b_orthonormalize(B, activeBlockVectorP, retInvR=True)
activeBlockVectorP, _, invR, normal = aux
# Function _b_orthonormalize returns None if Cholesky fails
if activeBlockVectorP is not None:
activeBlockVectorAP = activeBlockVectorAP / normal
activeBlockVectorAP = np.dot(activeBlockVectorAP, invR)
restart = False
else:
restart = True
##
# Perform the Rayleigh Ritz Procedure:
# Compute symmetric Gram matrices:
if activeBlockVectorAR.dtype == 'float32':
myeps = 1
elif activeBlockVectorR.dtype == 'float32':
myeps = 1e-4
else:
myeps = 1e-8
if residualNorms.max() > myeps and not explicitGramFlag:
explicitGramFlag = False
else:
# Once explicitGramFlag, forever explicitGramFlag.
explicitGramFlag = True
# Shared memory assingments to simplify the code
if B is None:
blockVectorBX = blockVectorX
activeBlockVectorBR = activeBlockVectorR
if not restart:
activeBlockVectorBP = activeBlockVectorP
# Common submatrices:
gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
if explicitGramFlag:
gramRAR = (gramRAR + gramRAR.T.conj())/2
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
gramXAX = (gramXAX + gramXAX.T.conj())/2
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
else:
gramXAX = np.diag(_lambda)
gramXBX = ident0
gramRBR = ident
gramXBR = np.zeros((sizeX, currentBlockSize), dtype=A.dtype)
def _handle_gramA_gramB_verbosity(gramA, gramB):
if verbosityLevel > 0:
_report_nonhermitian(gramA, 'gramA')
_report_nonhermitian(gramB, 'gramB')
if verbosityLevel > 10:
# Note: not documented, but leave it in here for now
np.savetxt('gramA.txt', gramA)
np.savetxt('gramB.txt', gramB)
if not restart:
gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
if explicitGramFlag:
gramPAP = (gramPAP + gramPAP.T.conj())/2
gramPBP = np.dot(activeBlockVectorP.T.conj(),
activeBlockVectorBP)
else:
gramPBP = ident
gramA = bmat([[gramXAX, gramXAR, gramXAP],
[gramXAR.T.conj(), gramRAR, gramRAP],
[gramXAP.T.conj(), gramRAP.T.conj(), gramPAP]])
gramB = bmat([[gramXBX, gramXBR, gramXBP],
[gramXBR.T.conj(), gramRBR, gramRBP],
[gramXBP.T.conj(), gramRBP.T.conj(), gramPBP]])
_handle_gramA_gramB_verbosity(gramA, gramB)
try:
_lambda, eigBlockVector = eigh(gramA, gramB,
check_finite=False)
except LinAlgError:
# try again after dropping the direction vectors P from RR
restart = True
if restart:
gramA = bmat([[gramXAX, gramXAR],
[gramXAR.T.conj(), gramRAR]])
gramB = bmat([[gramXBX, gramXBR],
[gramXBR.T.conj(), gramRBR]])
_handle_gramA_gramB_verbosity(gramA, gramB)
try:
_lambda, eigBlockVector = eigh(gramA, gramB,
check_finite=False)
except LinAlgError:
raise ValueError('eigh has failed in lobpcg iterations')
ii = _get_indx(_lambda, sizeX, largest)
if verbosityLevel > 10:
print(ii)
print(_lambda)
_lambda = _lambda[ii]
eigBlockVector = eigBlockVector[:, ii]
lambdaHistory.append(_lambda)
if verbosityLevel > 10:
print('lambda:', _lambda)
# # Normalize eigenvectors!
# aux = np.sum( eigBlockVector.conj() * eigBlockVector, 0 )
# eigVecNorms = np.sqrt( aux )
# eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis, :]
# eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector )
if verbosityLevel > 10:
print(eigBlockVector)
# Compute Ritz vectors.
if B is not None:
if not restart:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
if verbosityLevel > 10:
print(pp)
print(app)
print(bpp)
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
else:
if not restart:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize]
eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
else:
eigBlockVectorX = eigBlockVector[:sizeX]
eigBlockVectorR = eigBlockVector[sizeX:]
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
if verbosityLevel > 10:
print(pp)
print(app)
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
blockVectorP, blockVectorAP = pp, app
if B is not None:
aux = blockVectorBX * _lambda[np.newaxis, :]
else:
aux = blockVectorX * _lambda[np.newaxis, :]
blockVectorR = blockVectorAX - aux
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
residualNorms = np.sqrt(aux)
# Future work: Need to add Postprocessing here:
# Making sure eigenvectors "exactly" satisfy the blockVectorY constrains?
# Making sure eigenvecotrs are "exactly" othonormalized by final "exact" RR
# Computing the actual true residuals
if verbosityLevel > 0:
print('final eigenvalue:', _lambda)
print('final residual norms:', residualNorms)
if retLambdaHistory:
if retResidualNormsHistory:
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
else:
return _lambda, blockVectorX, lambdaHistory
else:
if retResidualNormsHistory:
return _lambda, blockVectorX, residualNormsHistory
else:
return _lambda, blockVectorX

View file

@ -0,0 +1,13 @@
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('lobpcg',parent_package,top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())

View file

@ -0,0 +1,405 @@
""" Test functions for the sparse.linalg.eigen.lobpcg module
"""
import itertools
import platform
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
import pytest
from numpy import ones, r_, diag
from numpy.random import rand
from scipy.linalg import eig, eigh, toeplitz, orth
from scipy.sparse import spdiags, diags, eye
from scipy.sparse.linalg import eigs, LinearOperator
from scipy.sparse.linalg.eigen.lobpcg import lobpcg
def ElasticRod(n):
"""Build the matrices for the generalized eigenvalue problem of the
fixed-free elastic rod vibration model.
"""
L = 1.0
le = L/n
rho = 7.85e3
S = 1.e-4
E = 2.1e11
mass = rho*S*le/6.
k = E*S/le
A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
return A, B
def MikotaPair(n):
"""Build a pair of full diagonal matrices for the generalized eigenvalue
problem. The Mikota pair acts as a nice test since the eigenvalues are the
squares of the integers n, n=1,2,...
"""
x = np.arange(1, n+1)
B = diag(1./x)
y = np.arange(n-1, 0, -1)
z = np.arange(2*n-1, 0, -2)
A = diag(z)-diag(y, -1)-diag(y, 1)
return A, B
def compare_solutions(A, B, m):
"""Check eig vs. lobpcg consistency.
"""
n = A.shape[0]
np.random.seed(0)
V = rand(n, m)
X = orth(V)
eigvals, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False)
eigvals.sort()
w, _ = eig(A, b=B)
w.sort()
assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
def test_Small():
A, B = ElasticRod(10)
compare_solutions(A, B, 10)
A, B = MikotaPair(10)
compare_solutions(A, B, 10)
def test_ElasticRod():
A, B = ElasticRod(100)
compare_solutions(A, B, 20)
def test_MikotaPair():
A, B = MikotaPair(100)
compare_solutions(A, B, 20)
def test_regression():
"""Check the eigenvalue of the identity matrix is one.
"""
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
n = 10
X = np.ones((n, 1))
A = np.identity(n)
w, _ = lobpcg(A, X)
assert_allclose(w, [1])
def test_diagonal():
"""Check for diagonal matrices.
"""
# This test was moved from '__main__' in lobpcg.py.
# Coincidentally or not, this is the same eigensystem
# required to reproduce arpack bug
# https://forge.scilab.org/p/arpack-ng/issues/1397/
# even using the same n=100.
np.random.seed(1234)
# The system of interest is of size n x n.
n = 100
# We care about only m eigenpairs.
m = 4
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A to be the diagonal matrix whose entries are 1..n
# and where B is chosen to be the identity matrix.
vals = np.arange(1, n+1, dtype=float)
A = diags([vals], [0], (n, n))
B = eye(n)
# Let the preconditioner M be the inverse of A.
M = diags([1./vals], [0], (n, n))
# Pick random initial vectors.
X = np.random.rand(n, m)
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors.
m_excluded = 3
Y = np.eye(n, m_excluded)
eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y, tol=1e-4, maxiter=40, largest=False)
assert_allclose(eigvals, np.arange(1+m_excluded, 1+m_excluded+m))
_check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
"""Check if the eigenvalue residual is small.
"""
mult_wV = np.multiply(w, V)
dot_MV = M.dot(V)
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
def _check_fiedler(n, p):
"""Check the Fiedler vector computation.
"""
# This is not necessarily the recommended way to find the Fiedler vector.
np.random.seed(1234)
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, _ = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
def test_fiedler_small_8():
"""Check the dense workaround path for small matrices.
"""
# This triggers the dense path because 8 < 2*5.
_check_fiedler(8, 2)
def test_fiedler_large_12():
"""Check the dense workaround path avoided for non-small matrices.
"""
# This does not trigger the dense path, because 2*5 <= 12.
_check_fiedler(12, 2)
def test_hermitian():
"""Check complex-value Hermitian cases.
"""
np.random.seed(1234)
sizes = [3, 10, 50]
ks = [1, 3, 10, 50]
gens = [True, False]
for size, k, gen in itertools.product(sizes, ks, gens):
if k > size:
continue
H = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
H = 10 * np.eye(size) + H + H.T.conj()
X = np.random.rand(size, k)
if not gen:
B = np.eye(size)
w, v = lobpcg(H, X, maxiter=5000)
w0, _ = eigh(H)
else:
B = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
B = 10 * np.eye(size) + B.dot(B.T.conj())
w, v = lobpcg(H, X, B, maxiter=5000, largest=False)
w0, _ = eigh(H, B)
for wx, vx in zip(w, v.T):
# Check eigenvector
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
/ np.linalg.norm(H.dot(vx)),
0, atol=5e-4, rtol=0)
# Compare eigenvalues
j = np.argmin(abs(w0 - wx))
assert_allclose(wx, w0[j], rtol=1e-4)
# The n=5 case tests the alternative small matrix code path that uses eigh().
@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
def test_eigs_consistency(n, atol):
"""Check eigs vs. lobpcg consistency.
"""
vals = np.arange(1, n+1, dtype=np.float64)
A = spdiags(vals, 0, n, n)
np.random.seed(345678)
X = np.random.rand(n, 2)
lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
vals, _ = eigs(A, k=2)
_check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
def test_verbosity(tmpdir):
"""Check that nonzero verbosity level code runs.
"""
A, B = ElasticRod(100)
n = A.shape[0]
m = 20
np.random.seed(0)
V = rand(n, m)
X = orth(V)
_, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False,
verbosityLevel=9)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_tolerance_float32():
"""Check lobpcg for attainable tolerance in float32.
"""
np.random.seed(1234)
n = 50
m = 3
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = np.random.randn(n, m)
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1e-9, maxiter=50, verbosityLevel=0)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-5)
def test_random_initial_float32():
"""Check lobpcg in float32 for specific initial.
"""
np.random.seed(3)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = np.random.rand(n, m)
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1e-3, maxiter=50, verbosityLevel=1)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-2)
def test_maxit_None():
"""Check lobpcg if maxit=None runs 20 iterations (the default)
by checking the size of the iteration history output, which should
be the number of iterations plus 2 (initial and final values).
"""
np.random.seed(1566950023)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = np.random.randn(n, m)
X = X.astype(np.float32)
_, _, l_h = lobpcg(A, X, tol=1e-8, maxiter=20, retLambdaHistory=True)
assert_allclose(np.shape(l_h)[0], 20+2)
@pytest.mark.slow
def test_diagonal_data_types():
"""Check lobpcg for diagonal matrices for all matrix types.
"""
np.random.seed(1234)
n = 40
m = 4
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A and B to be diagonal.
vals = np.arange(1, n + 1)
list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
sparse_formats = len(list_sparse_format)
for s_f_i, s_f in enumerate(list_sparse_format):
As64 = diags([vals * vals], [0], (n, n), format=s_f)
As32 = As64.astype(np.float32)
Af64 = As64.toarray()
Af32 = Af64.astype(np.float32)
listA = [Af64, As64, Af32, As32]
Bs64 = diags([vals], [0], (n, n), format=s_f)
Bf64 = Bs64.toarray()
listB = [Bf64, Bs64]
# Define the preconditioner function as LinearOperator.
Ms64 = diags([1./vals], [0], (n, n), format=s_f)
def Ms64precond(x):
return Ms64 @ x
Ms64precondLO = LinearOperator(matvec=Ms64precond,
matmat=Ms64precond,
shape=(n, n), dtype=float)
Mf64 = Ms64.toarray()
def Mf64precond(x):
return Mf64 @ x
Mf64precondLO = LinearOperator(matvec=Mf64precond,
matmat=Mf64precond,
shape=(n, n), dtype=float)
Ms32 = Ms64.astype(np.float32)
def Ms32precond(x):
return Ms32 @ x
Ms32precondLO = LinearOperator(matvec=Ms32precond,
matmat=Ms32precond,
shape=(n, n), dtype=np.float32)
Mf32 = Ms32.toarray()
def Mf32precond(x):
return Mf32 @ x
Mf32precondLO = LinearOperator(matvec=Mf32precond,
matmat=Mf32precond,
shape=(n, n), dtype=np.float32)
listM = [None, Ms64precondLO, Mf64precondLO,
Ms32precondLO, Mf32precondLO]
# Setup matrix of the initial approximation to the eigenvectors
# (cannot be sparse array).
Xf64 = np.random.rand(n, m)
Xf32 = Xf64.astype(np.float32)
listX = [Xf64, Xf32]
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors (cannot be sparse array).
m_excluded = 3
Yf64 = np.eye(n, m_excluded, dtype=float)
Yf32 = np.eye(n, m_excluded, dtype=np.float32)
listY = [Yf64, Yf32]
tests = list(itertools.product(listA, listB, listM, listX, listY))
# This is one of the slower tests because there are >1,000 configs
# to test here, instead of checking product of all input, output types
# test each configuration for the first sparse format, and then
# for one additional sparse format. this takes 2/7=30% as long as
# testing all configurations for all sparse formats.
if s_f_i > 0:
tests = tests[s_f_i - 1::sparse_formats-1]
for A, B, M, X, Y in tests:
eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
maxiter=100, largest=False)
assert_allclose(eigvals,
np.arange(1 + m_excluded, 1 + m_excluded + m))

View file

@ -0,0 +1,15 @@
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('eigen',parent_package,top_path)
config.add_subpackage(('arpack'))
config.add_subpackage(('lobpcg'))
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())

View file

@ -0,0 +1,826 @@
"""Abstract linear algebra library.
This module defines a class hierarchy that implements a kind of "lazy"
matrix representation, called the ``LinearOperator``. It can be used to do
linear algebra with extremely large sparse or structured matrices, without
representing those explicitly in memory. Such matrices can be added,
multiplied, transposed, etc.
As a motivating example, suppose you want have a matrix where almost all of
the elements have the value one. The standard sparse matrix representation
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
able to represent such matrices efficiently. First, we need a compact way to
represent an all-ones matrix::
>>> import numpy as np
>>> class Ones(LinearOperator):
... def __init__(self, shape):
... super(Ones, self).__init__(dtype=None, shape=shape)
... def _matvec(self, x):
... return np.repeat(x.sum(), self.shape[0])
Instances of this class emulate ``np.ones(shape)``, but using a constant
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
how this linear operator multiplies with (operates on) a vector. We can now
add this operator to a sparse matrix that stores only offsets from one::
>>> from scipy.sparse import csr_matrix
>>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]])
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
>>> A.dot([1, 2, 3])
array([13, 4, 15])
The result is the same as that given by its dense, explicitly-stored
counterpart::
>>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])
array([13, 4, 15])
Several algorithms in the ``scipy.sparse`` library are able to operate on
``LinearOperator`` instances.
"""
import warnings
import numpy as np
from scipy.sparse import isspmatrix
from scipy.sparse.sputils import isshape, isintlike, asmatrix, is_pydata_spmatrix
__all__ = ['LinearOperator', 'aslinearoperator']
class LinearOperator(object):
"""Common interface for performing matrix vector products
Many iterative methods (e.g. cg, gmres) do not need to know the
individual entries of a matrix to solve a linear system A*x=b.
Such solvers only require the computation of matrix vector
products, A*v where v is a dense vector. This class serves as
an abstract interface between iterative solvers and matrix-like
objects.
To construct a concrete LinearOperator, either pass appropriate
callables to the constructor of this class, or subclass it.
A subclass must implement either one of the methods ``_matvec``
and ``_matmat``, and the attributes/properties ``shape`` (pair of
integers) and ``dtype`` (may be None). It may call the ``__init__``
on this class to have these attributes validated. Implementing
``_matvec`` automatically implements ``_matmat`` (using a naive
algorithm) and vice-versa.
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
to implement the Hermitian adjoint (conjugate transpose). As with
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
``_adjoint`` implements the other automatically. Implementing
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
backwards compatibility.
Parameters
----------
shape : tuple
Matrix dimensions (M, N).
matvec : callable f(v)
Returns returns A * v.
rmatvec : callable f(v)
Returns A^H * v, where A^H is the conjugate transpose of A.
matmat : callable f(V)
Returns A * V, where V is a dense matrix with dimensions (N, K).
dtype : dtype
Data type of the matrix.
rmatmat : callable f(V)
Returns A^H * V, where V is a dense matrix with dimensions (M, K).
Attributes
----------
args : tuple
For linear operators describing products etc. of other linear
operators, the operands of the binary operation.
ndim : int
Number of dimensions (this is always 2)
See Also
--------
aslinearoperator : Construct LinearOperators
Notes
-----
The user-defined matvec() function must properly handle the case
where v has shape (N,) as well as the (N,1) case. The shape of
the return type is handled internally by LinearOperator.
LinearOperator instances can also be multiplied, added with each
other and exponentiated, all lazily: the result of these operations
is always a new, composite LinearOperator, that defers linear
operations to the original operators and combines the results.
More details regarding how to subclass a LinearOperator and several
examples of concrete LinearOperator instances can be found in the
external project `PyLops <https://pylops.readthedocs.io>`_.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import LinearOperator
>>> def mv(v):
... return np.array([2*v[0], 3*v[1]])
...
>>> A = LinearOperator((2,2), matvec=mv)
>>> A
<2x2 _CustomLinearOperator with dtype=float64>
>>> A.matvec(np.ones(2))
array([ 2., 3.])
>>> A * np.ones(2)
array([ 2., 3.])
"""
ndim = 2
def __new__(cls, *args, **kwargs):
if cls is LinearOperator:
# Operate as _CustomLinearOperator factory.
return super(LinearOperator, cls).__new__(_CustomLinearOperator)
else:
obj = super(LinearOperator, cls).__new__(cls)
if (type(obj)._matvec == LinearOperator._matvec
and type(obj)._matmat == LinearOperator._matmat):
warnings.warn("LinearOperator subclass should implement"
" at least one of _matvec and _matmat.",
category=RuntimeWarning, stacklevel=2)
return obj
def __init__(self, dtype, shape):
"""Initialize this LinearOperator.
To be called by subclasses. ``dtype`` may be None; ``shape`` should
be convertible to a length-2 tuple.
"""
if dtype is not None:
dtype = np.dtype(dtype)
shape = tuple(shape)
if not isshape(shape):
raise ValueError("invalid shape %r (must be 2-d)" % (shape,))
self.dtype = dtype
self.shape = shape
def _init_dtype(self):
"""Called from subclasses at the end of the __init__ routine.
"""
if self.dtype is None:
v = np.zeros(self.shape[-1])
self.dtype = np.asarray(self.matvec(v)).dtype
def _matmat(self, X):
"""Default matrix-matrix multiplication handler.
Falls back on the user-defined _matvec method, so defining that will
define matrix multiplication (though in a very suboptimal way).
"""
return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
def _matvec(self, x):
"""Default matrix-vector multiplication handler.
If self is a linear operator of shape (M, N), then this method will
be called on a shape (N,) or (N, 1) ndarray, and should return a
shape (M,) or (M, 1) ndarray.
This default implementation falls back on _matmat, so defining that
will define matrix-vector multiplication as well.
"""
return self.matmat(x.reshape(-1, 1))
def matvec(self, x):
"""Matrix-vector multiplication.
Performs the operation y=A*x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (N,) or (N,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (M,) or (M,1) depending
on the type and shape of the x argument.
Notes
-----
This matvec wraps the user-specified matvec routine or overridden
_matvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (N,) and x.shape != (N,1):
raise ValueError('dimension mismatch')
y = self._matvec(x)
if isinstance(x, np.matrix):
y = asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(M)
elif x.ndim == 2:
y = y.reshape(M,1)
else:
raise ValueError('invalid shape returned by user-defined matvec()')
return y
def rmatvec(self, x):
"""Adjoint matrix-vector multiplication.
Performs the operation y = A^H * x where A is an MxN linear
operator and x is a column vector or 1-d array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (M,) or (M,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (N,) or (N,1) depending
on the type and shape of the x argument.
Notes
-----
This rmatvec wraps the user-specified rmatvec routine or overridden
_rmatvec method to ensure that y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (M,) and x.shape != (M,1):
raise ValueError('dimension mismatch')
y = self._rmatvec(x)
if isinstance(x, np.matrix):
y = asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(N)
elif x.ndim == 2:
y = y.reshape(N,1)
else:
raise ValueError('invalid shape returned by user-defined rmatvec()')
return y
def _rmatvec(self, x):
"""Default implementation of _rmatvec; defers to adjoint."""
if type(self)._adjoint == LinearOperator._adjoint:
# _adjoint not overridden, prevent infinite recursion
raise NotImplementedError
else:
return self.H.matvec(x)
def matmat(self, X):
"""Matrix-matrix multiplication.
Performs the operation y=A*X where A is an MxN linear
operator and X dense N*K matrix or ndarray.
Parameters
----------
X : {matrix, ndarray}
An array with shape (N,K).
Returns
-------
Y : {matrix, ndarray}
A matrix or ndarray with shape (M,K) depending on
the type of the X argument.
Notes
-----
This matmat wraps any user-specified matmat routine or overridden
_matmat method to ensure that y has the correct type.
"""
X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
% X.ndim)
if X.shape[0] != self.shape[1]:
raise ValueError('dimension mismatch: %r, %r'
% (self.shape, X.shape))
Y = self._matmat(X)
if isinstance(Y, np.matrix):
Y = asmatrix(Y)
return Y
def rmatmat(self, X):
"""Adjoint matrix-matrix multiplication.
Performs the operation y = A^H * x where A is an MxN linear
operator and x is a column vector or 1-d array, or 2-d array.
The default implementation defers to the adjoint.
Parameters
----------
X : {matrix, ndarray}
A matrix or 2D array.
Returns
-------
Y : {matrix, ndarray}
A matrix or 2D array depending on the type of the input.
Notes
-----
This rmatmat wraps the user-specified rmatmat routine.
"""
X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
% X.ndim)
if X.shape[0] != self.shape[0]:
raise ValueError('dimension mismatch: %r, %r'
% (self.shape, X.shape))
Y = self._rmatmat(X)
if isinstance(Y, np.matrix):
Y = asmatrix(Y)
return Y
def _rmatmat(self, X):
"""Default implementation of _rmatmat defers to rmatvec or adjoint."""
if type(self)._adjoint == LinearOperator._adjoint:
return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T])
else:
return self.H.matmat(X)
def __call__(self, x):
return self*x
def __mul__(self, x):
return self.dot(x)
def dot(self, x):
"""Matrix-matrix or matrix-vector multiplication.
Parameters
----------
x : array_like
1-d or 2-d array, representing a vector or matrix.
Returns
-------
Ax : array
1-d or 2-d array (depending on the shape of x) that represents
the result of applying this linear operator on x.
"""
if isinstance(x, LinearOperator):
return _ProductLinearOperator(self, x)
elif np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
x = np.asarray(x)
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
return self.matvec(x)
elif x.ndim == 2:
return self.matmat(x)
else:
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
% x)
def __matmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__mul__(other)
def __rmatmul__(self, other):
if np.isscalar(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self.__rmul__(other)
def __rmul__(self, x):
if np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
return NotImplemented
def __pow__(self, p):
if np.isscalar(p):
return _PowerLinearOperator(self, p)
else:
return NotImplemented
def __add__(self, x):
if isinstance(x, LinearOperator):
return _SumLinearOperator(self, x)
else:
return NotImplemented
def __neg__(self):
return _ScaledLinearOperator(self, -1)
def __sub__(self, x):
return self.__add__(-x)
def __repr__(self):
M,N = self.shape
if self.dtype is None:
dt = 'unspecified dtype'
else:
dt = 'dtype=' + str(self.dtype)
return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
def adjoint(self):
"""Hermitian adjoint.
Returns the Hermitian adjoint of self, aka the Hermitian
conjugate or Hermitian transpose. For a complex matrix, the
Hermitian adjoint is equal to the conjugate transpose.
Can be abbreviated self.H instead of self.adjoint().
Returns
-------
A_H : LinearOperator
Hermitian adjoint of self.
"""
return self._adjoint()
H = property(adjoint)
def transpose(self):
"""Transpose this linear operator.
Returns a LinearOperator that represents the transpose of this one.
Can be abbreviated self.T instead of self.transpose().
"""
return self._transpose()
T = property(transpose)
def _adjoint(self):
"""Default implementation of _adjoint; defers to rmatvec."""
return _AdjointLinearOperator(self)
def _transpose(self):
""" Default implementation of _transpose; defers to rmatvec + conj"""
return _TransposedLinearOperator(self)
class _CustomLinearOperator(LinearOperator):
"""Linear operator defined in terms of user-specified operations."""
def __init__(self, shape, matvec, rmatvec=None, matmat=None,
dtype=None, rmatmat=None):
super(_CustomLinearOperator, self).__init__(dtype, shape)
self.args = ()
self.__matvec_impl = matvec
self.__rmatvec_impl = rmatvec
self.__rmatmat_impl = rmatmat
self.__matmat_impl = matmat
self._init_dtype()
def _matmat(self, X):
if self.__matmat_impl is not None:
return self.__matmat_impl(X)
else:
return super(_CustomLinearOperator, self)._matmat(X)
def _matvec(self, x):
return self.__matvec_impl(x)
def _rmatvec(self, x):
func = self.__rmatvec_impl
if func is None:
raise NotImplementedError("rmatvec is not defined")
return self.__rmatvec_impl(x)
def _rmatmat(self, X):
if self.__rmatmat_impl is not None:
return self.__rmatmat_impl(X)
else:
return super(_CustomLinearOperator, self)._rmatmat(X)
def _adjoint(self):
return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
matvec=self.__rmatvec_impl,
rmatvec=self.__matvec_impl,
matmat=self.__rmatmat_impl,
rmatmat=self.__matmat_impl,
dtype=self.dtype)
class _AdjointLinearOperator(LinearOperator):
"""Adjoint of arbitrary Linear Operator"""
def __init__(self, A):
shape = (A.shape[1], A.shape[0])
super(_AdjointLinearOperator, self).__init__(dtype=A.dtype, shape=shape)
self.A = A
self.args = (A,)
def _matvec(self, x):
return self.A._rmatvec(x)
def _rmatvec(self, x):
return self.A._matvec(x)
def _matmat(self, x):
return self.A._rmatmat(x)
def _rmatmat(self, x):
return self.A._matmat(x)
class _TransposedLinearOperator(LinearOperator):
"""Transposition of arbitrary Linear Operator"""
def __init__(self, A):
shape = (A.shape[1], A.shape[0])
super(_TransposedLinearOperator, self).__init__(dtype=A.dtype, shape=shape)
self.A = A
self.args = (A,)
def _matvec(self, x):
# NB. np.conj works also on sparse matrices
return np.conj(self.A._rmatvec(np.conj(x)))
def _rmatvec(self, x):
return np.conj(self.A._matvec(np.conj(x)))
def _matmat(self, x):
# NB. np.conj works also on sparse matrices
return np.conj(self.A._rmatmat(np.conj(x)))
def _rmatmat(self, x):
return np.conj(self.A._matmat(np.conj(x)))
def _get_dtype(operators, dtypes=None):
if dtypes is None:
dtypes = []
for obj in operators:
if obj is not None and hasattr(obj, 'dtype'):
dtypes.append(obj.dtype)
return np.find_common_type(dtypes, [])
class _SumLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError('cannot add %r and %r: shape mismatch'
% (A, B))
self.args = (A, B)
super(_SumLinearOperator, self).__init__(_get_dtype([A, B]), A.shape)
def _matvec(self, x):
return self.args[0].matvec(x) + self.args[1].matvec(x)
def _rmatvec(self, x):
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
def _rmatmat(self, x):
return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)
def _matmat(self, x):
return self.args[0].matmat(x) + self.args[1].matmat(x)
def _adjoint(self):
A, B = self.args
return A.H + B.H
class _ProductLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape[1] != B.shape[0]:
raise ValueError('cannot multiply %r and %r: shape mismatch'
% (A, B))
super(_ProductLinearOperator, self).__init__(_get_dtype([A, B]),
(A.shape[0], B.shape[1]))
self.args = (A, B)
def _matvec(self, x):
return self.args[0].matvec(self.args[1].matvec(x))
def _rmatvec(self, x):
return self.args[1].rmatvec(self.args[0].rmatvec(x))
def _rmatmat(self, x):
return self.args[1].rmatmat(self.args[0].rmatmat(x))
def _matmat(self, x):
return self.args[0].matmat(self.args[1].matmat(x))
def _adjoint(self):
A, B = self.args
return B.H * A.H
class _ScaledLinearOperator(LinearOperator):
def __init__(self, A, alpha):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if not np.isscalar(alpha):
raise ValueError('scalar expected as alpha')
dtype = _get_dtype([A], [type(alpha)])
super(_ScaledLinearOperator, self).__init__(dtype, A.shape)
self.args = (A, alpha)
def _matvec(self, x):
return self.args[1] * self.args[0].matvec(x)
def _rmatvec(self, x):
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
def _rmatmat(self, x):
return np.conj(self.args[1]) * self.args[0].rmatmat(x)
def _matmat(self, x):
return self.args[1] * self.args[0].matmat(x)
def _adjoint(self):
A, alpha = self.args
return A.H * np.conj(alpha)
class _PowerLinearOperator(LinearOperator):
def __init__(self, A, p):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if A.shape[0] != A.shape[1]:
raise ValueError('square LinearOperator expected, got %r' % A)
if not isintlike(p) or p < 0:
raise ValueError('non-negative integer expected as p')
super(_PowerLinearOperator, self).__init__(_get_dtype([A]), A.shape)
self.args = (A, p)
def _power(self, fun, x):
res = np.array(x, copy=True)
for i in range(self.args[1]):
res = fun(res)
return res
def _matvec(self, x):
return self._power(self.args[0].matvec, x)
def _rmatvec(self, x):
return self._power(self.args[0].rmatvec, x)
def _rmatmat(self, x):
return self._power(self.args[0].rmatmat, x)
def _matmat(self, x):
return self._power(self.args[0].matmat, x)
def _adjoint(self):
A, p = self.args
return A.H ** p
class MatrixLinearOperator(LinearOperator):
def __init__(self, A):
super(MatrixLinearOperator, self).__init__(A.dtype, A.shape)
self.A = A
self.__adj = None
self.args = (A,)
def _matmat(self, X):
return self.A.dot(X)
def _adjoint(self):
if self.__adj is None:
self.__adj = _AdjointMatrixOperator(self)
return self.__adj
class _AdjointMatrixOperator(MatrixLinearOperator):
def __init__(self, adjoint):
self.A = adjoint.A.T.conj()
self.__adjoint = adjoint
self.args = (adjoint,)
self.shape = adjoint.shape[1], adjoint.shape[0]
@property
def dtype(self):
return self.__adjoint.dtype
def _adjoint(self):
return self.__adjoint
class IdentityOperator(LinearOperator):
def __init__(self, shape, dtype=None):
super(IdentityOperator, self).__init__(dtype, shape)
def _matvec(self, x):
return x
def _rmatvec(self, x):
return x
def _rmatmat(self, x):
return x
def _matmat(self, x):
return x
def _adjoint(self):
return self
def aslinearoperator(A):
"""Return A as a LinearOperator.
'A' may be any of the following types:
- ndarray
- matrix
- sparse matrix (e.g. csr_matrix, lil_matrix, etc.)
- LinearOperator
- An object with .shape and .matvec attributes
See the LinearOperator documentation for additional information.
Notes
-----
If 'A' has no .dtype attribute, the data type is determined by calling
:func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this
call upon the linear operator creation.
Examples
--------
>>> from scipy.sparse.linalg import aslinearoperator
>>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
>>> aslinearoperator(M)
<2x3 MatrixLinearOperator with dtype=int32>
"""
if isinstance(A, LinearOperator):
return A
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
if A.ndim > 2:
raise ValueError('array must have ndim <= 2')
A = np.atleast_2d(np.asarray(A))
return MatrixLinearOperator(A)
elif isspmatrix(A) or is_pydata_spmatrix(A):
return MatrixLinearOperator(A)
else:
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
rmatvec = None
rmatmat = None
dtype = None
if hasattr(A, 'rmatvec'):
rmatvec = A.rmatvec
if hasattr(A, 'rmatmat'):
rmatmat = A.rmatmat
if hasattr(A, 'dtype'):
dtype = A.dtype
return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,
rmatmat=rmatmat, dtype=dtype)
else:
raise TypeError('type not understood')

View file

@ -0,0 +1,15 @@
"Iterative Solvers for Sparse Linear Systems"
#from info import __doc__
from .iterative import *
from .minres import minres
from .lgmres import lgmres
from .lsqr import lsqr
from .lsmr import lsmr
from ._gcrotmk import gcrotmk
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View file

@ -0,0 +1,487 @@
# Copyright (C) 2015, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq)
from scipy.sparse.linalg.isolve.utils import make_system
__all__ = ['gcrotmk']
def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(),
prepend_outer_v=False):
"""
FGMRES Arnoldi process, with optional projection or augmentation
Parameters
----------
matvec : callable
Operation A*x
v0 : ndarray
Initial vector, normalized to nrm2(v0) == 1
m : int
Number of GMRES rounds
atol : float
Absolute tolerance for early exit
lpsolve : callable
Left preconditioner L
rpsolve : callable
Right preconditioner R
CU : list of (ndarray, ndarray)
Columns of matrices C and U in GCROT
outer_v : list of ndarrays
Augmentation vectors in LGMRES
prepend_outer_v : bool, optional
Whether augmentation vectors come before or after
Krylov iterates
Raises
------
LinAlgError
If nans encountered
Returns
-------
Q, R : ndarray
QR decomposition of the upper Hessenberg H=QR
B : ndarray
Projections corresponding to matrix C
vs : list of ndarray
Columns of matrix V
zs : list of ndarray
Columns of matrix Z
y : ndarray
Solution to ||H y - e_1||_2 = min!
res : float
The final (preconditioned) residual norm
"""
if lpsolve is None:
lpsolve = lambda x: x
if rpsolve is None:
rpsolve = lambda x: x
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,))
vs = [v0]
zs = []
y = None
res = np.nan
m = m + len(outer_v)
# Orthogonal projection coefficients
B = np.zeros((len(cs), m), dtype=v0.dtype)
# H is stored in QR factorized form
Q = np.ones((1, 1), dtype=v0.dtype)
R = np.zeros((1, 0), dtype=v0.dtype)
eps = np.finfo(v0.dtype).eps
breakdown = False
# FGMRES Arnoldi process
for j in range(m):
# L A Z = C B + V H
if prepend_outer_v and j < len(outer_v):
z, w = outer_v[j]
elif prepend_outer_v and j == len(outer_v):
z = rpsolve(v0)
w = None
elif not prepend_outer_v and j >= m - len(outer_v):
z, w = outer_v[j - (m - len(outer_v))]
else:
z = rpsolve(vs[-1])
w = None
if w is None:
w = lpsolve(matvec(z))
else:
# w is clobbered below
w = w.copy()
w_norm = nrm2(w)
# GCROT projection: L A -> (1 - C C^H) L A
# i.e. orthogonalize against C
for i, c in enumerate(cs):
alpha = dot(c, w)
B[i,j] = alpha
w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c
# Orthogonalize against V
hcur = np.zeros(j+2, dtype=Q.dtype)
for i, v in enumerate(vs):
alpha = dot(v, w)
hcur[i] = alpha
w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v
hcur[i+1] = nrm2(w)
with np.errstate(over='ignore', divide='ignore'):
# Careful with denormals
alpha = 1/hcur[-1]
if np.isfinite(alpha):
w = scal(alpha, w)
if not (hcur[-1] > eps * w_norm):
# w essentially in the span of previous vectors,
# or we have nans. Bail out after updating the QR
# solution.
breakdown = True
vs.append(w)
zs.append(z)
# Arnoldi LSQ problem
# Add new column to H=Q*R, padding other columns with zeros
Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F')
Q2[:j+1,:j+1] = Q
Q2[j+1,j+1] = 1
R2 = np.zeros((j+2, j), dtype=R.dtype, order='F')
R2[:j+1,:] = R
Q, R = qr_insert(Q2, R2, hcur, j, which='col',
overwrite_qru=True, check_finite=False)
# Transformed least squares problem
# || Q R y - inner_res_0 * e_1 ||_2 = min!
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
# Residual is immediately known
res = abs(Q[0,-1])
# Check for termination
if res < atol or breakdown:
break
if not np.isfinite(R[j,j]):
# nans encountered, bail out
raise LinAlgError()
# -- Get the LSQ problem solution
# The problem is triangular, but the condition number may be
# bad (or in case of breakdown the last diagonal entry may be
# zero), so use lstsq instead of trtrs.
y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj())
B = B[:,:j+1]
return Q, R, B, vs, zs, y, res
def gcrotmk(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
m=20, k=None, CU=None, discard_C=False, truncate='oldest',
atol=None):
"""
Solve a matrix equation using flexible GCROT(m,k) algorithm.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : {array, matrix}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is `tol`.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner
can vary from iteration to iteration. Effective preconditioning
dramatically improves the rate of convergence, which implies that
fewer iterations are needed to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
m : int, optional
Number of inner FGMRES iterations per each outer iteration.
Default: 20
k : int, optional
Number of vectors to carry between inner FGMRES iterations.
According to [2]_, good values are around m.
Default: m
CU : list of tuples, optional
List of tuples ``(c, u)`` which contain the columns of the matrices
C and U in the GCROT(m,k) algorithm. For details, see [2]_.
The list given and vectors contained in it are modified in-place.
If not given, start from empty matrices. The ``c`` elements in the
tuples can be ``None``, in which case the vectors are recomputed
via ``c = A u`` on start and orthogonalized as described in [3]_.
discard_C : bool, optional
Discard the C-vectors at the end. Useful if recycling Krylov subspaces
for different linear systems.
truncate : {'oldest', 'smallest'}, optional
Truncation scheme to use. Drop: oldest vectors, or vectors with
smallest singular values using the scheme discussed in [1,2].
See [2]_ for detailed comparison.
Default: 'oldest'
Returns
-------
x : array or matrix
The solution found.
info : int
Provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
References
----------
.. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
methods'', SIAM J. Numer. Anal. 36, 864 (1999).
.. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
of GCROT for solving nonsymmetric linear systems'',
SIAM J. Sci. Comput. 32, 172 (2010).
.. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
''Recycling Krylov subspaces for sequences of linear systems'',
SIAM J. Sci. Comput. 28, 1651 (2006).
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
if truncate not in ('oldest', 'smallest'):
raise ValueError("Invalid value for 'truncate': %r" % (truncate,))
if atol is None:
warnings.warn("scipy.sparse.linalg.gcrotmk called without specifying `atol`. "
"The default value will change in the future. To preserve "
"current behavior, set ``atol=tol``.",
category=DeprecationWarning, stacklevel=2)
atol = tol
matvec = A.matvec
psolve = M.matvec
if CU is None:
CU = []
if k is None:
k = m
axpy, dot, scal = None, None, None
r = b - matvec(x)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r))
b_norm = nrm2(b)
if discard_C:
CU[:] = [(None, u) for c, u in CU]
# Reorthogonalize old vectors
if CU:
# Sort already existing vectors to the front
CU.sort(key=lambda cu: cu[0] is not None)
# Fill-in missing ones
C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
us = []
j = 0
while CU:
# More memory-efficient: throw away old vectors as we go
c, u = CU.pop(0)
if c is None:
c = matvec(u)
C[:,j] = c
j += 1
us.append(u)
# Orthogonalize
Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
del C
# C := Q
cs = list(Q.T)
# U := U P R^-1, back-substitution
new_us = []
for j in range(len(cs)):
u = us[P[j]]
for i in range(j):
u = axpy(us[P[i]], u, u.shape[0], -R[i,j])
if abs(R[j,j]) < 1e-12 * abs(R[0,0]):
# discard rest of the vectors
break
u = scal(1.0/R[j,j], u)
new_us.append(u)
# Form the new CU lists
CU[:] = list(zip(cs, new_us))[::-1]
if CU:
axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,))
# Solve first the projection operation with respect to the CU
# vectors. This corresponds to modifying the initial guess to
# be
#
# x' = x + U y
# y = argmin_y || b - A (x + U y) ||^2
#
# The solution is y = C^H (b - A x)
for c, u in CU:
yc = dot(c, r)
x = axpy(u, x, x.shape[0], yc)
r = axpy(c, r, r.shape[0], -yc)
# GCROT main iteration
for j_outer in range(maxiter):
# -- callback
if callback is not None:
callback(x)
beta = nrm2(r)
# -- check stopping condition
beta_tol = max(atol, tol * b_norm)
if beta <= beta_tol and (j_outer > 0 or CU):
# recompute residual to avoid rounding error
r = b - matvec(x)
beta = nrm2(r)
if beta <= beta_tol:
j_outer = -1
break
ml = m + max(k - len(CU), 0)
cs = [c for c, u in CU]
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
r/beta,
ml,
rpsolve=psolve,
atol=max(atol, tol*b_norm)/beta,
cs=cs)
y *= beta
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
break
#
# At this point,
#
# [A U, A Z] = [C, V] G; G = [ I B ]
# [ 0 H ]
#
# where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
#
# || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
#
# from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
#
#
# GCROT(m,k) update
#
# Define new outer vectors
# ux := (Z - U B) y
ux = zs[0]*y[0]
for z, yc in zip(zs[1:], y[1:]):
ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc
by = B.dot(y)
for cu, byc in zip(CU, by):
c, u = cu
ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc
# cx := V H y
hy = Q.dot(R.dot(y))
cx = vs[0] * hy[0]
for v, hyc in zip(vs[1:], hy[1:]):
cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc
# Normalize cx, maintaining cx = A ux
# This new cx is orthogonal to the previous C, by construction
try:
alpha = 1/nrm2(cx)
if not np.isfinite(alpha):
raise FloatingPointError()
except (FloatingPointError, ZeroDivisionError):
# Cannot update, so skip it
continue
cx = scal(alpha, cx)
ux = scal(alpha, ux)
# Update residual and solution
gamma = dot(cx, r)
r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx
x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux
# Truncate CU
if truncate == 'oldest':
while len(CU) >= k and CU:
del CU[0]
elif truncate == 'smallest':
if len(CU) >= k and CU:
# cf. [1,2]
D = solve(R[:-1,:].T, B.T).T
W, sigma, V = svd(D)
# C := C W[:,:k-1], U := U W[:,:k-1]
new_CU = []
for j, w in enumerate(W[:,:k-1].T):
c, u = CU[0]
c = c * w[0]
u = u * w[0]
for cup, wp in zip(CU[1:], w[1:]):
cp, up = cup
c = axpy(cp, c, c.shape[0], wp)
u = axpy(up, u, u.shape[0], wp)
# Reorthogonalize at the same time; not necessary
# in exact arithmetic, but floating point error
# tends to accumulate here
for cp, up in new_CU:
alpha = dot(cp, c)
c = axpy(cp, c, c.shape[0], -alpha)
u = axpy(up, u, u.shape[0], -alpha)
alpha = nrm2(c)
c = scal(1.0/alpha, c)
u = scal(1.0/alpha, u)
new_CU.append((c, u))
CU[:] = new_CU
# Add new vector to CU
CU.append((cx, ux))
# Include the solution vector to the span
CU.append((None, x.copy()))
if discard_C:
CU[:] = [(None, uz) for cz, uz in CU]
return postprocess(x), j_outer + 1

View file

@ -0,0 +1,816 @@
"""Iterative methods for solving linear systems"""
__all__ = ['bicg','bicgstab','cg','cgs','gmres','qmr']
import warnings
import numpy as np
from . import _iterative
from scipy.sparse.linalg.interface import LinearOperator
from .utils import make_system
from scipy._lib._util import _aligned_zeros
from scipy._lib._threadsafety import non_reentrant
_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'}
# Part of the docstring common to all iterative solvers
common_doc1 = \
"""
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}"""
common_doc2 = \
"""b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
"""
def _stoptest(residual, atol):
"""
Successful termination condition for the solvers.
"""
resid = np.linalg.norm(residual)
if resid <= atol:
return resid, 1
else:
return resid, 0
def _get_atol(tol, atol, bnrm2, get_residual, routine_name):
"""
Parse arguments for absolute tolerance in termination condition.
Parameters
----------
tol, atol : object
The arguments passed into the solver routine by user.
bnrm2 : float
2-norm of the rhs vector.
get_residual : callable
Callable ``get_residual()`` that returns the initial value of
the residual.
routine_name : str
Name of the routine.
"""
if atol is None:
warnings.warn("scipy.sparse.linalg.{name} called without specifying `atol`. "
"The default value will be changed in a future release. "
"For compatibility, specify a value for `atol` explicitly, e.g., "
"``{name}(..., atol=0)``, or to retain the old behavior "
"``{name}(..., atol='legacy')``".format(name=routine_name),
category=DeprecationWarning, stacklevel=4)
atol = 'legacy'
tol = float(tol)
if atol == 'legacy':
# emulate old legacy behavior
resid = get_residual()
if resid <= tol:
return 'exit'
if bnrm2 == 0:
return tol
else:
return tol * float(bnrm2)
else:
return max(float(atol), tol * float(bnrm2))
def set_docstring(header, Ainfo, footer='', atol_default='0'):
def combine(fn):
fn.__doc__ = '\n'.join((header, common_doc1,
' ' + Ainfo.replace('\n', '\n '),
common_doc2, footer))
return fn
return combine
@set_docstring('Use BIConjugate Gradient iteration to solve ``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` and ``A^T x`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.',
footer="""
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import bicg
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = bicg(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
)
@non_reentrant()
def bicg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A,M,x,b,postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec, rmatvec = A.matvec, A.rmatvec
psolve, rpsolve = M.matvec, M.rmatvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'bicgrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicg')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(6*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = psolve(work[slice2])
elif (ijob == 4):
work[slice1] = rpsolve(work[slice2])
elif (ijob == 5):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 6):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use BIConjugate Gradient STABilized iteration to solve '
'``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'bicgstabrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicgstab')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(7*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use Conjugate Gradient iteration to solve ``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'``A`` must represent a hermitian, positive definite matrix.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'cgrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cg')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(4*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
if info == 1 and iter_ > 1:
# recompute residual and recheck, to avoid
# accumulating rounding error
work[slice1] = b - matvec(x)
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use Conjugate Gradient Squared iteration to solve ``Ax = b``.',
'The real-valued N-by-N matrix of the linear system.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'cgsrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cgs')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(7*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
if info == 1 and iter_ > 1:
# recompute residual and recheck, to avoid
# accumulating rounding error
work[slice1] = b - matvec(x)
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info == -10:
# termination due to breakdown: check for convergence
resid, ok = _stoptest(b - matvec(x), atol)
if ok:
info = 0
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@non_reentrant()
def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, callback=None,
restrt=None, atol=None, callback_type=None):
"""
Use Generalized Minimal RESidual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : int
Provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
* <0 : illegal input or breakdown
Other parameters
----------------
x0 : {array, matrix}
Starting guess for the solution (a vector of zeros by default).
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
restart : int, optional
Number of iterations between restarts. Larger values increase
iteration cost, but may be necessary for convergence.
Default is 20.
maxiter : int, optional
Maximum number of iterations (restart cycles). Iteration will stop
after maxiter steps even if the specified tolerance has not been
achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Inverse of the preconditioner of A. M should approximate the
inverse of A and be easy to solve for (see Notes). Effective
preconditioning dramatically improves the rate of convergence,
which implies that fewer iterations are needed to reach a given
error tolerance. By default, no preconditioner is used.
callback : function
User-supplied function to call after each iteration. It is called
as `callback(args)`, where `args` are selected by `callback_type`.
callback_type : {'x', 'pr_norm', 'legacy'}, optional
Callback function argument requested:
- ``x``: current iterate (ndarray), called on every restart
- ``pr_norm``: relative (preconditioned) residual norm (float),
called on every inner iteration
- ``legacy`` (default): same as ``pr_norm``, but also changes the
meaning of 'maxiter' to count inner iterations instead of restart
cycles.
restrt : int, optional
DEPRECATED - use `restart` instead.
See Also
--------
LinearOperator
Notes
-----
A preconditioner, P, is chosen such that P is close to A but easy to solve
for. The preconditioner parameter required by this routine is
``M = P^-1``. The inverse should preferably not be calculated
explicitly. Rather, use the following template to produce M::
# Construct a linear operator that computes P^-1 * x.
import scipy.sparse.linalg as spla
M_x = lambda x: spla.spsolve(P, x)
M = spla.LinearOperator((n, n), M_x)
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import gmres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = gmres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
# Change 'restrt' keyword to 'restart'
if restrt is None:
restrt = restart
elif restart is not None:
raise ValueError("Cannot specify both restart and restrt keywords. "
"Preferably use 'restart' only.")
if callback is not None and callback_type is None:
# Warn about 'callback_type' semantic changes.
# Probably should be removed only in far future, Scipy 2.0 or so.
warnings.warn("scipy.sparse.linalg.gmres called without specifying `callback_type`. "
"The default value will be changed in a future release. "
"For compatibility, specify a value for `callback_type` explicitly, e.g., "
"``{name}(..., callback_type='pr_norm')``, or to retain the old behavior "
"``{name}(..., callback_type='legacy')``",
category=DeprecationWarning, stacklevel=3)
if callback_type is None:
callback_type = 'legacy'
if callback_type not in ('x', 'pr_norm', 'legacy'):
raise ValueError("Unknown callback_type: {!r}".format(callback_type))
if callback is None:
callback_type = 'none'
A, M, x, b,postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
if restrt is None:
restrt = 20
restrt = min(restrt, n)
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'gmresrevcom')
bnrm2 = np.linalg.norm(b)
Mb_nrm2 = np.linalg.norm(psolve(b))
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, bnrm2, get_residual, 'gmres')
if atol == 'exit':
return postprocess(x), 0
if bnrm2 == 0:
return postprocess(b), 0
# Tolerance passed to GMRESREVCOM applies to the inner iteration
# and deals with the left-preconditioned residual.
ptol_max_factor = 1.0
ptol = Mb_nrm2 * min(ptol_max_factor, atol / bnrm2)
resid = np.nan
presid = np.nan
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros((6+restrt)*n,dtype=x.dtype)
work2 = _aligned_zeros((restrt+1)*(2*restrt+2),dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
old_ijob = ijob
first_pass = True
resid_ready = False
iter_num = 1
while True:
olditer = iter_
x, iter_, presid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, restrt, work, work2, iter_, presid, info, ndx1, ndx2, ijob, ptol)
if callback_type == 'x' and iter_ != olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1): # gmres success, update last residual
if callback_type in ('pr_norm', 'legacy'):
if resid_ready:
callback(presid / bnrm2)
elif callback_type == 'x':
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
if not first_pass and old_ijob == 3:
resid_ready = True
first_pass = False
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
if resid_ready:
if callback_type in ('pr_norm', 'legacy'):
callback(presid / bnrm2)
resid_ready = False
iter_num = iter_num+1
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
# Inner loop tolerance control
if info or presid > ptol:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
else:
# Inner loop tolerance OK, but outer loop not.
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
if resid != 0:
ptol = presid * min(ptol_max_factor, atol / resid)
else:
ptol = presid * ptol_max_factor
old_ijob = ijob
ijob = 2
if callback_type == 'legacy':
# Legacy behavior
if iter_num > maxiter:
info = maxiter
break
if info >= 0 and not (resid <= atol):
# info isn't set appropriately otherwise
info = maxiter
return postprocess(x), info
@non_reentrant()
def qmr(A, b, x0=None, tol=1e-5, maxiter=None, M1=None, M2=None, callback=None,
atol=None):
"""Use Quasi-Minimal Residual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real-valued N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^T x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M1 : {sparse matrix, dense matrix, LinearOperator}
Left preconditioner for A.
M2 : {sparse matrix, dense matrix, LinearOperator}
Right preconditioner for A. Used together with the left
preconditioner M1. The matrix M1*A*M2 should have better
conditioned than A alone.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
See Also
--------
LinearOperator
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import qmr
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = qmr(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A_ = A
A, M, x, b, postprocess = make_system(A, None, x0, b)
if M1 is None and M2 is None:
if hasattr(A_,'psolve'):
def left_psolve(b):
return A_.psolve(b,'left')
def right_psolve(b):
return A_.psolve(b,'right')
def left_rpsolve(b):
return A_.rpsolve(b,'left')
def right_rpsolve(b):
return A_.rpsolve(b,'right')
M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
else:
def id(b):
return b
M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
n = len(b)
if maxiter is None:
maxiter = n*10
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'qmrrevcom')
get_residual = lambda: np.linalg.norm(A.matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(11*n,x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*A.rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = M1.matvec(work[slice2])
elif (ijob == 4):
work[slice1] = M2.matvec(work[slice2])
elif (ijob == 5):
work[slice1] = M1.rmatvec(work[slice2])
elif (ijob == 6):
work[slice1] = M2.rmatvec(work[slice2])
elif (ijob == 7):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(x)
elif (ijob == 8):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info

View file

@ -0,0 +1,232 @@
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import get_blas_funcs
from .utils import make_system
from ._gcrotmk import _fgmres
__all__ = ['lgmres']
def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True,
prepend_outer_v=False, atol=None):
"""
Solve a matrix equation using the LGMRES algorithm.
The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
in the convergence in restarted GMRES, and often converges in fewer
iterations.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : {array, matrix}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is `tol`.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
inner_m : int, optional
Number of inner GMRES iterations per each outer iteration.
outer_k : int, optional
Number of vectors to carry between inner GMRES iterations.
According to [1]_, good values are in the range of 1...3.
However, note that if you want to use the additional vectors to
accelerate solving multiple similar problems, larger values may
be beneficial.
outer_v : list of tuples, optional
List containing tuples ``(v, Av)`` of vectors and corresponding
matrix-vector products, used to augment the Krylov subspace, and
carried between inner GMRES iterations. The element ``Av`` can
be `None` if the matrix-vector product should be re-evaluated.
This parameter is modified in-place by `lgmres`, and can be used
to pass "guess" vectors in and out of the algorithm when solving
similar problems.
store_outer_Av : bool, optional
Whether LGMRES should store also A*v in addition to vectors `v`
in the `outer_v` list. Default is True.
prepend_outer_v : bool, optional
Whether to put outer_v augmentation vectors before Krylov iterates.
In standard LGMRES, prepend_outer_v=False.
Returns
-------
x : array or matrix
The converged solution.
info : int
Provides convergence information:
- 0 : successful exit
- >0 : convergence to tolerance not achieved, number of iterations
- <0 : illegal input or breakdown
Notes
-----
The LGMRES algorithm [1]_ [2]_ is designed to avoid the
slowing of convergence in restarted GMRES, due to alternating
residual vectors. Typically, it often outperforms GMRES(m) of
comparable memory requirements by some measure, or at least is not
much worse.
Another advantage in this algorithm is that you can supply it with
'guess' vectors in the `outer_v` argument that augment the Krylov
subspace. If the solution lies close to the span of these vectors,
the algorithm converges faster. This can be useful if several very
similar matrices need to be inverted one after another, such as in
Newton-Krylov iteration where the Jacobian matrix often changes
little in the nonlinear steps.
References
----------
.. [1] A.H. Baker and E.R. Jessup and T. Manteuffel, "A Technique for
Accelerating the Convergence of Restarted GMRES", SIAM J. Matrix
Anal. Appl. 26, 962 (2005).
.. [2] A.H. Baker, "On Improving the Performance of the Linear Solver
restarted GMRES", PhD thesis, University of Colorado (2003).
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lgmres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = lgmres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
if atol is None:
warnings.warn("scipy.sparse.linalg.lgmres called without specifying `atol`. "
"The default value will change in the future. To preserve "
"current behavior, set ``atol=tol``.",
category=DeprecationWarning, stacklevel=2)
atol = tol
matvec = A.matvec
psolve = M.matvec
if outer_v is None:
outer_v = []
axpy, dot, scal = None, None, None
nrm2 = get_blas_funcs('nrm2', [b])
b_norm = nrm2(b)
ptol_max_factor = 1.0
for k_outer in range(maxiter):
r_outer = matvec(x) - b
# -- callback
if callback is not None:
callback(x)
# -- determine input type routines
if axpy is None:
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
x = x.astype(r_outer.dtype)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
(x, r_outer))
# -- check stopping condition
r_norm = nrm2(r_outer)
if r_norm <= max(atol, tol * b_norm):
break
# -- inner LGMRES iteration
v0 = -psolve(r_outer)
inner_res_0 = nrm2(v0)
if inner_res_0 == 0:
rnorm = nrm2(r_outer)
raise RuntimeError("Preconditioner returned a zero vector; "
"|v| ~ %.1g, |M v| = 0" % rnorm)
v0 = scal(1.0/inner_res_0, v0)
ptol = min(ptol_max_factor, max(atol, tol*b_norm)/r_norm)
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
v0,
inner_m,
lpsolve=psolve,
atol=ptol,
outer_v=outer_v,
prepend_outer_v=prepend_outer_v)
y *= inner_res_0
if not np.isfinite(y).all():
# Overflow etc. in computation. There's no way to
# recover from this, so we have to bail out.
raise LinAlgError()
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
return postprocess(x), k_outer + 1
# Inner loop tolerance control
if pres > ptol:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
else:
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
# -- GMRES terminated: eval solution
dx = zs[0]*y[0]
for w, yc in zip(zs[1:], y[1:]):
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
# -- Store LGMRES augmentation vectors
nx = nrm2(dx)
if nx > 0:
if store_outer_Av:
q = Q.dot(R.dot(y))
ax = vs[0]*q[0]
for v, qc in zip(vs[1:], q[1:]):
ax = axpy(v, ax, ax.shape[0], qc)
outer_v.append((dx/nx, ax/nx))
else:
outer_v.append((dx/nx, None))
# -- Retain only a finite number of augmentation vectors
while len(outer_v) > outer_k:
del outer_v[0]
# -- Apply step
x += dx
else:
# didn't converge ...
return postprocess(x), maxiter
return postprocess(x), 0

View file

@ -0,0 +1,480 @@
"""
Copyright (C) 2010 David Fong and Michael Saunders
LSMR uses an iterative method.
07 Jun 2010: Documentation updated
03 Jun 2010: First release version in Python
David Chin-lung Fong clfong@stanford.edu
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders saunders@stanford.edu
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
__all__ = ['lsmr']
from numpy import zeros, infty, atleast_1d, result_type
from numpy.linalg import norm
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
from .lsqr import _sym_ortho
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
maxiter=None, show=False, x0=None):
"""Iterative solver for least-squares problems.
lsmr solves the system of linear equations ``Ax = b``. If the system
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
A is a rectangular matrix of dimension m-by-n, where all cases are
allowed: m = n, m > n, or m < n. B is a vector of length m.
The matrix A may be dense or sparse (usually sparse).
Parameters
----------
A : {matrix, sparse matrix, ndarray, LinearOperator}
Matrix A in the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^H x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : array_like, shape (m,)
Vector b in the linear system.
damp : float
Damping factor for regularized least-squares. `lsmr` solves
the regularized least-squares problem::
min ||(b) - ( A )x||
||(0) (damp*I) ||_2
where damp is a scalar. If damp is None or 0, the system
is solved without regularization.
atol, btol : float, optional
Stopping tolerances. `lsmr` continues iterations until a
certain backward error estimate is smaller than some quantity
depending on atol and btol. Let ``r = b - Ax`` be the
residual vector for the current approximate solution ``x``.
If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
Otherwise, lsmr terminates when ``norm(A^H r) <=
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),
the final ``norm(r)`` should be accurate to about 6
digits. (The final x will usually have fewer correct digits,
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
or `btol` is None, a default value of 1.0e-6 will be used.
Ideally, they should be estimates of the relative error in the
entries of A and B respectively. For example, if the entries
of `A` have 7 correct digits, set atol = 1e-7. This prevents
the algorithm from doing unnecessary work beyond the
uncertainty of the input data.
conlim : float, optional
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
`conlim`. For compatible systems ``Ax = b``, conlim could be
as large as 1.0e+12 (say). For least-squares problems,
`conlim` should be less than 1.0e+8. If `conlim` is None, the
default value is 1e+8. Maximum precision can be obtained by
setting ``atol = btol = conlim = 0``, but the number of
iterations may then be excessive.
maxiter : int, optional
`lsmr` terminates if the number of iterations reaches
`maxiter`. The default is ``maxiter = min(m, n)``. For
ill-conditioned systems, a larger value of `maxiter` may be
needed.
show : bool, optional
Print iterations logs if ``show=True``.
x0 : array_like, shape (n,), optional
Initial guess of x, if None zeros are used.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
Least-square solution returned.
istop : int
istop gives the reason for stopping::
istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a
solution.
= 1 means x is an approximate solution to A*x = B,
according to atol and btol.
= 2 means x approximately solves the least-squares problem
according to atol.
= 3 means COND(A) seems to be greater than CONLIM.
= 4 is the same as 1 with atol = btol = eps (machine
precision)
= 5 is the same as 2 with atol = eps.
= 6 is the same as 3 with CONLIM = 1/eps.
= 7 means ITN reached maxiter before the other stopping
conditions were satisfied.
itn : int
Number of iterations used.
normr : float
``norm(b-Ax)``
normar : float
``norm(A^H (b - Ax))``
norma : float
``norm(A)``
conda : float
Condition number of A.
normx : float
``norm(x)``
Notes
-----
.. versionadded:: 0.11.0
References
----------
.. [1] D. C.-L. Fong and M. A. Saunders,
"LSMR: An iterative algorithm for sparse least-squares problems",
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
https://arxiv.org/abs/1006.0758
.. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lsmr
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution `[0, 0]`
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
0
>>> x
array([ 0., 0.])
The stopping code `istop=0` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> normr
4.440892098500627e-16
As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance
limits. The given solution `[1., -1.]` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, normr = lsmr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> normr
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `normr`
contains the minimal distance that was found.
"""
A = aslinearoperator(A)
b = atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
msg = ('The exact solution is x = 0, or x = x0, if x0 was given ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
hdg1 = ' itn x(1) norm r norm Ar'
hdg2 = ' compatible LS norm A cond A'
pfreq = 20 # print frequency (for repeating the heading)
pcount = 0 # print counter
m, n = A.shape
# stores the num of singular values
minDim = min([m, n])
if maxiter is None:
maxiter = minDim
if x0 is None:
dtype = result_type(A, b, float)
else:
dtype = result_type(A, b, x0, float)
if show:
print(' ')
print('LSMR Least-squares solution of Ax = b\n')
print(f'The matrix A has {m} rows and {n} columns')
print('damp = %20.14e\n' % (damp))
print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim))
print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter))
u = b
normb = norm(b)
if x0 is None:
x = zeros(n, dtype)
beta = normb.copy()
else:
x = atleast_1d(x0)
u = u - A.matvec(x)
beta = norm(u)
if beta > 0:
u = (1 / beta) * u
v = A.rmatvec(u)
alpha = norm(v)
else:
v = zeros(n, dtype)
alpha = 0
if alpha > 0:
v = (1 / alpha) * v
# Initialize variables for 1st iteration.
itn = 0
zetabar = alpha * beta
alphabar = alpha
rho = 1
rhobar = 1
cbar = 1
sbar = 0
h = v.copy()
hbar = zeros(n, dtype)
# Initialize variables for estimation of ||r||.
betadd = beta
betad = 0
rhodold = 1
tautildeold = 0
thetatilde = 0
zeta = 0
d = 0
# Initialize variables for estimation of ||A|| and cond(A)
normA2 = alpha * alpha
maxrbar = 0
minrbar = 1e+100
normA = sqrt(normA2)
condA = 1
normx = 0
# Items for use in stopping rules, normb set earlier
istop = 0
ctol = 0
if conlim > 0:
ctol = 1 / conlim
normr = beta
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
normar = alpha * beta
if normar == 0:
if show:
print(msg[0])
return x, istop, itn, normr, normar, normA, condA, normx
if show:
print(' ')
print(hdg1, hdg2)
test1 = 1
test2 = alpha / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (normr, normar)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(''.join([str1, str2, str3]))
# Main iteration loop.
while itn < maxiter:
itn = itn + 1
# Perform the next step of the bidiagonalization to obtain the
# next beta, u, alpha, v. These satisfy the relations
# beta*u = a*v - alpha*u,
# alpha*v = A'*u - beta*v.
u *= -alpha
u += A.matvec(v)
beta = norm(u)
if beta > 0:
u *= (1 / beta)
v *= -beta
v += A.rmatvec(u)
alpha = norm(v)
if alpha > 0:
v *= (1 / alpha)
# At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
# Construct rotation Qhat_{k,2k+1}.
chat, shat, alphahat = _sym_ortho(alphabar, damp)
# Use a plane rotation (Q_i) to turn B_i to R_i
rhoold = rho
c, s, rho = _sym_ortho(alphahat, beta)
thetanew = s*alpha
alphabar = c*alpha
# Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
rhobarold = rhobar
zetaold = zeta
thetabar = sbar * rho
rhotemp = cbar * rho
cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
zeta = cbar * zetabar
zetabar = - sbar * zetabar
# Update h, h_hat, x.
hbar *= - (thetabar * rho / (rhoold * rhobarold))
hbar += h
x += (zeta / (rho * rhobar)) * hbar
h *= - (thetanew / rho)
h += v
# Estimate of ||r||.
# Apply rotation Qhat_{k,2k+1}.
betaacute = chat * betadd
betacheck = -shat * betadd
# Apply rotation Q_{k,k+1}.
betahat = c * betaacute
betadd = -s * betaacute
# Apply rotation Qtilde_{k-1}.
# betad = betad_{k-1} here.
thetatildeold = thetatilde
ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
thetatilde = stildeold * rhobar
rhodold = ctildeold * rhobar
betad = - stildeold * betad + ctildeold * betahat
# betad = betad_k here.
# rhodold = rhod_k here.
tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
taud = (zeta - thetatilde * tautildeold) / rhodold
d = d + betacheck * betacheck
normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
# Estimate ||A||.
normA2 = normA2 + beta * beta
normA = sqrt(normA2)
normA2 = normA2 + alpha * alpha
# Estimate cond(A).
maxrbar = max(maxrbar, rhobarold)
if itn > 1:
minrbar = min(minrbar, rhobarold)
condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
# Test for convergence.
# Compute norms for convergence testing.
normar = abs(zetabar)
normx = norm(x)
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = normr / normb
if (normA * normr) != 0:
test2 = normar / (normA * normr)
else:
test2 = infty
test3 = 1 / condA
t1 = test1 / (1 + normA * normx / normb)
rtol = btol + atol * normA * normx / normb
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normAl tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= maxiter:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
if show:
if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
(itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
(test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
(istop != 0):
if pcount >= pfreq:
pcount = 0
print(' ')
print(hdg1, hdg2)
pcount = pcount + 1
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (normr, normar)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (normA, condA)
print(''.join([str1, str2, str3, str4]))
if istop > 0:
break
# Print the stopping condition.
if show:
print(' ')
print('LSMR finished')
print(msg[istop])
print('istop =%8g normr =%8.1e' % (istop, normr))
print(' normA =%8.1e normAr =%8.1e' % (normA, normar))
print('itn =%8g condA =%8.1e' % (itn, condA))
print(' normx =%8.1e' % (normx))
print(str1, str2)
print(str3, str4)
return x, istop, itn, normr, normar, normA, condA, normx

View file

@ -0,0 +1,568 @@
"""Sparse Equations and Least Squares.
The original Fortran code was written by C. C. Paige and M. A. Saunders as
described in
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
It is licensed under the following BSD license:
Copyright (c) 2006, Systems Optimization Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Stanford University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The Fortran code was translated to Python for use in CVXOPT by Jeffery
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
Adapted for SciPy by Stefan van der Walt.
"""
__all__ = ['lsqr']
import numpy as np
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
eps = np.finfo(np.float64).eps
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
iter_lim=None, show=False, calc_var=False, x0=None):
"""Find the least-squares solution to a large, sparse, linear system
of equations.
The function solves ``Ax = b`` or ``min ||Ax - b||^2`` or
``min ||Ax - b||^2 + d^2 ||x||^2``.
The matrix A may be square or rectangular (over-determined or
under-determined), and may have any rank.
::
1. Unsymmetric equations -- solve A*x = b
2. Linear least squares -- solve A*x = b
in the least-squares sense
3. Damped least squares -- solve ( A )*x = ( b )
( damp*I ) ( 0 )
in the least-squares sense
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
Representation of an m-by-n matrix.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^T x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : array_like, shape (m,)
Right-hand side vector ``b``.
damp : float
Damping coefficient.
atol, btol : float, optional
Stopping tolerances. If both are 1.0e-9 (say), the final
residual norm should be accurate to about 9 digits. (The
final x will usually have fewer correct digits, depending on
cond(A) and the size of damp.)
conlim : float, optional
Another stopping tolerance. lsqr terminates if an estimate of
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
b``, `conlim` could be as large as 1.0e+12 (say). For
least-squares problems, conlim should be less than 1.0e+8.
Maximum precision can be obtained by setting ``atol = btol =
conlim = zero``, but the number of iterations may then be
excessive.
iter_lim : int, optional
Explicit limitation on number of iterations (for safety).
show : bool, optional
Display an iteration log.
calc_var : bool, optional
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
x0 : array_like, shape (n,), optional
Initial guess of x, if None zeros are used.
.. versionadded:: 1.0.0
Returns
-------
x : ndarray of float
The final solution.
istop : int
Gives the reason for termination.
1 means x is an approximate solution to Ax = b.
2 means x approximately solves the least-squares problem.
itn : int
Iteration number upon termination.
r1norm : float
``norm(r)``, where ``r = b - Ax``.
r2norm : float
``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if
``damp == 0``.
anorm : float
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
acond : float
Estimate of ``cond(Abar)``.
arnorm : float
Estimate of ``norm(A'*r - damp^2*x)``.
xnorm : float
``norm(x)``
var : ndarray of float
If ``calc_var`` is True, estimates all diagonals of
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
damp^2*I)^{-1}``. This is well defined if A has full column
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
< n`` and ``damp = 0.``)
Notes
-----
LSQR uses an iterative method to approximate the solution. The
number of iterations required to reach a certain accuracy depends
strongly on the scaling of the problem. Poor scaling of the rows
or columns of A should therefore be avoided where possible.
For example, in problem 1 the solution is unaltered by
row-scaling. If a row of A is very small or large compared to
the other rows of A, the corresponding row of ( A b ) should be
scaled up or down.
In problems 1 and 2, the solution x is easily recovered
following column-scaling. Unless better information is known,
the nonzero columns of A should be scaled so that they all have
the same Euclidean norm (e.g., 1.0).
In problem 3, there is no freedom to re-scale if damp is
nonzero. However, the value of damp should be assigned only
after attention has been paid to the scaling of A.
The parameter damp is intended to help regularize
ill-conditioned systems, by preventing the true solution from
being very large. Another aid to regularization is provided by
the parameter acond, which may be used to terminate iterations
before the computed solution becomes very large.
If some initial estimate ``x0`` is known and if ``damp == 0``,
one could proceed as follows:
1. Compute a residual vector ``r0 = b - A*x0``.
2. Use LSQR to solve the system ``A*dx = r0``.
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
This requires that ``x0`` be available before and after the call
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
to solve A*x = b and k2 iterations to solve A*dx = r0.
If x0 is "good", norm(r0) will be smaller than norm(b).
If the same stopping tolerances atol and btol are used for each
system, k1 and k2 will be similar, but the final solution x0 + dx
should be more accurate. The only way to reduce the total work
is to use a larger stopping tolerance for the second system.
If some value btol is suitable for A*x = b, the larger value
btol*norm(b)/norm(r0) should be suitable for A*dx = r0.
Preconditioning is another way to reduce the number of iterations.
If it is possible to solve a related system ``M*x = b``
efficiently, where M approximates A in some helpful way (e.g. M -
A has low rank or its elements are small relative to those of A),
LSQR may converge more rapidly on the system ``A*M(inverse)*z =
b``, after which x can be recovered by solving M*x = z.
If A is symmetric, LSQR should not be used!
Alternatives are the symmetric conjugate-gradient method (cg)
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
applies to any symmetric A and will converge more rapidly than
LSQR. If A is positive definite, there are other implementations
of symmetric cg that require slightly less work per iteration than
SYMMLQ (but will take the same number of iterations).
References
----------
.. [1] C. C. Paige and M. A. Saunders (1982a).
"LSQR: An algorithm for sparse linear equations and
sparse least squares", ACM TOMS 8(1), 43-71.
.. [2] C. C. Paige and M. A. Saunders (1982b).
"Algorithm 583. LSQR: Sparse linear equations and least
squares problems", ACM TOMS 8(2), 195-209.
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
systems using LSQR and CRAIG", BIT 35, 588-604.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import lsqr
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
The first example has the trivial solution `[0, 0]`
>>> b = np.array([0., 0., 0.], dtype=float)
>>> x, istop, itn, normr = lsqr(A, b)[:4]
The exact solution is x = 0
>>> istop
0
>>> x
array([ 0., 0.])
The stopping code `istop=0` returned indicates that a vector of zeros was
found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
The next example has a non-trivial solution:
>>> b = np.array([1., 0., -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
1
>>> x
array([ 1., -1.])
>>> itn
1
>>> r1norm
4.440892098500627e-16
As indicated by `istop=1`, `lsqr` found a solution obeying the tolerance
limits. The given solution `[1., -1.]` obviously solves the equation. The
remaining return values include information about the number of iterations
(`itn=1`) and the remaining difference of left and right side of the solved
equation.
The final example demonstrates the behavior in the case where there is no
solution for the equation:
>>> b = np.array([1., 0.01, -1.], dtype=float)
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
>>> istop
2
>>> x
array([ 1.00333333, -0.99666667])
>>> A.dot(x)-b
array([ 0.00333333, -0.00333333, 0.00333333])
>>> r1norm
0.005773502691896255
`istop` indicates that the system is inconsistent and thus `x` is rather an
approximate solution to the corresponding least-squares problem. `r1norm`
contains the norm of the minimal residual that was found.
"""
A = aslinearoperator(A)
b = np.atleast_1d(b)
if b.ndim > 1:
b = b.squeeze()
m, n = A.shape
if iter_lim is None:
iter_lim = 2 * n
var = np.zeros(n)
msg = ('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
if show:
print(' ')
print('LSQR Least-squares solution of Ax = b')
str1 = f'The matrix A has {m} rows and {n} columns'
str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var)
str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim)
str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim)
print(str1)
print(str2)
print(str3)
print(str4)
itn = 0
istop = 0
ctol = 0
if conlim > 0:
ctol = 1/conlim
anorm = 0
acond = 0
dampsq = damp**2
ddnorm = 0
res2 = 0
xnorm = 0
xxnorm = 0
z = 0
cs2 = -1
sn2 = 0
"""
Set up the first vectors u and v for the bidiagonalization.
These satisfy beta*u = b - A*x, alfa*v = A'*u.
"""
u = b
bnorm = np.linalg.norm(b)
if x0 is None:
x = np.zeros(n)
beta = bnorm.copy()
else:
x = np.asarray(x0)
u = u - A.matvec(x)
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
v = A.rmatvec(u)
alfa = np.linalg.norm(v)
else:
v = x.copy()
alfa = 0
if alfa > 0:
v = (1/alfa) * v
w = v.copy()
rhobar = alfa
phibar = beta
rnorm = beta
r1norm = rnorm
r2norm = rnorm
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
arnorm = alfa * beta
if arnorm == 0:
print(msg[0])
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
head1 = ' Itn x[0] r1norm r2norm '
head2 = ' Compatible LS Norm A Cond A'
if show:
print(' ')
print(head1, head2)
test1 = 1
test2 = alfa / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(str1, str2, str3)
# Main iteration loop.
while itn < iter_lim:
itn = itn + 1
"""
% Perform the next step of the bidiagonalization to obtain the
% next beta, u, alfa, v. These satisfy the relations
% beta*u = a*v - alfa*u,
% alfa*v = A'*u - beta*v.
"""
u = A.matvec(v) - alfa * u
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
v = A.rmatvec(u) - beta * v
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1 / alfa) * v
# Use a plane rotation to eliminate the damping parameter.
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
rhobar1 = sqrt(rhobar**2 + damp**2)
cs1 = rhobar / rhobar1
sn1 = damp / rhobar1
psi = sn1 * phibar
phibar = cs1 * phibar
# Use a plane rotation to eliminate the subdiagonal element (beta)
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
cs, sn, rho = _sym_ortho(rhobar1, beta)
theta = sn * alfa
rhobar = -cs * alfa
phi = cs * phibar
phibar = sn * phibar
tau = sn * phi
# Update x and w.
t1 = phi / rho
t2 = -theta / rho
dk = (1 / rho) * w
x = x + t1 * w
w = v + t2 * w
ddnorm = ddnorm + np.linalg.norm(dk)**2
if calc_var:
var = var + dk**2
# Use a plane rotation on the right to eliminate the
# super-diagonal element (theta) of the upper-bidiagonal matrix.
# Then use the result to estimate norm(x).
delta = sn2 * rho
gambar = -cs2 * rho
rhs = phi - delta * z
zbar = rhs / gambar
xnorm = sqrt(xxnorm + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar / gamma
sn2 = theta / gamma
z = rhs / gamma
xxnorm = xxnorm + z**2
# Test for convergence.
# First, estimate the condition of the matrix Abar,
# and the norms of rbar and Abar'rbar.
acond = anorm * sqrt(ddnorm)
res1 = phibar**2
res2 = res2 + psi**2
rnorm = sqrt(res1 + res2)
arnorm = alfa * abs(tau)
# Distinguish between
# r1norm = ||b - Ax|| and
# r2norm = rnorm in current code
# = sqrt(r1norm^2 + damp^2*||x||^2).
# Estimate r1norm from
# r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
# Although there is cancellation, it might be accurate enough.
r1sq = rnorm**2 - dampsq * xxnorm
r1norm = sqrt(abs(r1sq))
if r1sq < 0:
r1norm = -r1norm
r2norm = rnorm
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = rnorm / bnorm
test2 = arnorm / (anorm * rnorm + eps)
test3 = 1 / (acond + eps)
t1 = test1 / (1 + anorm * xnorm / bnorm)
rtol = btol + atol * anorm * xnorm / bnorm
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normal tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= iter_lim:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= iter_lim-10:
prnt = True
# if itn%10 == 0: prnt = True
if test3 <= 2*ctol:
prnt = True
if test2 <= 10*atol:
prnt = True
if test1 <= 10*rtol:
prnt = True
if istop != 0:
prnt = True
if prnt:
if show:
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (anorm, acond)
print(str1, str2, str3, str4)
if istop != 0:
break
# End of iteration loop.
# Print the stopping condition.
if show:
print(' ')
print('LSQR finished')
print(msg[istop])
print(' ')
str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm)
str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm)
str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm)
str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm)
print(str1 + ' ' + str2)
print(str3 + ' ' + str4)
print(' ')
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var

View file

@ -0,0 +1,375 @@
from numpy import sqrt, inner, zeros, inf, finfo
from numpy.linalg import norm
from .utils import make_system
__all__ = ['minres']
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
M=None, callback=None, show=False, check=False):
"""
Use MINimum RESidual iteration to solve Ax=b
MINRES minimizes norm(A*x - b) for a real symmetric matrix A. Unlike
the Conjugate Gradient method, A can be indefinite or singular.
If shift != 0 then the method solves (A - shift*I)x = b
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real symmetric N-by-N matrix of the linear system
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol : float
Tolerance to achieve. The algorithm terminates when the relative
residual is below `tol`.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import minres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> A = A + A.T
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = minres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
References
----------
Solution of sparse indefinite systems of linear equations,
C. C. Paige and M. A. Saunders (1975),
SIAM J. Numer. Anal. 12(4), pp. 617-629.
https://web.stanford.edu/group/SOL/software/minres/
This file is a translation of the following MATLAB implementation:
https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
matvec = A.matvec
psolve = M.matvec
first = 'Enter minres. '
last = 'Exit minres. '
n = A.shape[0]
if maxiter is None:
maxiter = 5 * n
msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1
' beta1 = 0. The exact solution is x0 ', # 0
' A solution to Ax = b was found, given rtol ', # 1
' A least-squares solution was found, given rtol ', # 2
' Reasonable accuracy achieved, given eps ', # 3
' x has converged to an eigenvector ', # 4
' acond has exceeded 0.1/eps ', # 5
' The iteration limit was reached ', # 6
' A does not define a symmetric matrix ', # 7
' M does not define a symmetric matrix ', # 8
' M does not define a pos-def preconditioner '] # 9
if show:
print(first + 'Solution of symmetric Ax = b')
print(first + 'n = %3g shift = %23.14e' % (n,shift))
print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol))
print()
istop = 0
itn = 0
Anorm = 0
Acond = 0
rnorm = 0
ynorm = 0
xtype = x.dtype
eps = finfo(xtype).eps
# Set up y and v for the first Lanczos vector v1.
# y = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
r1 = b - A*x
y = psolve(r1)
beta1 = inner(r1, y)
if beta1 < 0:
raise ValueError('indefinite preconditioner')
elif beta1 == 0:
return (postprocess(x), 0)
beta1 = sqrt(beta1)
if check:
# are these too strict?
# see if A is symmetric
w = matvec(y)
r2 = matvec(w)
s = inner(w,w)
t = inner(y,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric matrix')
# see if M is symmetric
r2 = psolve(y)
s = inner(y,y)
t = inner(r1,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric preconditioner')
# Initialize other quantities
oldb = 0
beta = beta1
dbar = 0
epsln = 0
qrnorm = beta1
phibar = beta1
rhs1 = beta1
rhs2 = 0
tnorm2 = 0
gmax = 0
gmin = finfo(xtype).max
cs = -1
sn = 0
w = zeros(n, dtype=xtype)
w2 = zeros(n, dtype=xtype)
r2 = r1
if show:
print()
print()
print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|')
while itn < maxiter:
itn += 1
s = 1.0/beta
v = s*y
y = matvec(v)
y = y - shift * v
if itn >= 2:
y = y - (beta/oldb)*r1
alfa = inner(v,y)
y = y - (alfa/beta)*r2
r1 = r2
r2 = y
y = psolve(r2)
oldb = beta
beta = inner(r2,y)
if beta < 0:
raise ValueError('non-symmetric matrix')
beta = sqrt(beta)
tnorm2 += alfa**2 + oldb**2 + beta**2
if itn == 1:
if beta/beta1 <= 10*eps:
istop = -1 # Terminate later
# Apply previous rotation Qk-1 to get
# [deltak epslnk+1] = [cs sn][dbark 0 ]
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
oldeps = epsln
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
epsln = sn * beta # epsln2 = 0 epslnk+1
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
root = norm([gbar, dbar])
Arnorm = phibar * root
# Compute the next plane rotation Qk
gamma = norm([gbar, beta]) # gammak
gamma = max(gamma, eps)
cs = gbar / gamma # ck
sn = beta / gamma # sk
phi = cs * phibar # phik
phibar = sn * phibar # phibark+1
# Update x.
denom = 1.0/gamma
w1 = w2
w2 = w
w = (v - oldeps*w1 - delta*w2) * denom
x = x + phi*w
# Go round again.
gmax = max(gmax, gamma)
gmin = min(gmin, gamma)
z = rhs1 / gamma
rhs1 = rhs2 - delta*z
rhs2 = - epsln*z
# Estimate various norms and test for convergence.
Anorm = sqrt(tnorm2)
ynorm = norm(x)
epsa = Anorm * eps
epsx = Anorm * ynorm * eps
epsr = Anorm * ynorm * tol
diag = gbar
if diag == 0:
diag = epsa
qrnorm = phibar
rnorm = qrnorm
if ynorm == 0 or Anorm == 0:
test1 = inf
else:
test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||)
if Anorm == 0:
test2 = inf
else:
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
# Estimate cond(A).
# In this version we look at the diagonals of R in the
# factorization of the lower Hessenberg matrix, Q * H = R,
# where H is the tridiagonal matrix from Lanczos with one
# extra row, beta(k+1) e_k^T.
Acond = gmax/gmin
# See if any of the stopping criteria are satisfied.
# In rare cases, istop is already -1 from above (Abar = const*I).
if istop == 0:
t1 = 1 + test1 # These tests work if tol < eps
t2 = 1 + test2
if t2 <= 1:
istop = 2
if t1 <= 1:
istop = 1
if itn >= maxiter:
istop = 6
if Acond >= 0.1/eps:
istop = 4
if epsx >= beta1:
istop = 3
# if rnorm <= epsx : istop = 2
# if rnorm <= epsr : istop = 1
if test2 <= tol:
istop = 2
if test1 <= tol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= maxiter-10:
prnt = True
if itn % 10 == 0:
prnt = True
if qrnorm <= 10*epsx:
prnt = True
if qrnorm <= 10*epsr:
prnt = True
if Acond <= 1e-2/eps:
prnt = True
if istop != 0:
prnt = True
if show and prnt:
str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1)
str2 = ' %10.3e' % (test2,)
str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm)
print(str1 + str2 + str3)
if itn % 10 == 0:
print()
if callback is not None:
callback(x)
if istop != 0:
break # TODO check this
if show:
print()
print(last + ' istop = %3g itn =%5g' % (istop,itn))
print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond))
print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm))
print(last + ' Arnorm = %12.4e' % (Arnorm,))
print(last + msg[istop+1])
if istop == 6:
info = maxiter
else:
info = 0
return (postprocess(x),info)
if __name__ == '__main__':
from numpy import arange
from scipy.sparse import spdiags
n = 10
residuals = []
def cb(x):
residuals.append(norm(b - A*x))
# A = poisson((10,),format='csr')
A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
A.psolve = M.matvec
b = zeros(A.shape[0])
x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
# x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]

View file

@ -0,0 +1,52 @@
from os.path import join
def configuration(parent_package='',top_path=None):
from scipy._build_utils.system_info import get_info
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import (get_g77_abi_wrappers, uses_blas64,
blas_ilp64_pre_build_hook, get_f2py_int64_options)
config = Configuration('isolve',parent_package,top_path)
if uses_blas64():
lapack_opt = get_info('lapack_ilp64_opt')
f2py_options = get_f2py_int64_options()
pre_build_hook = blas_ilp64_pre_build_hook(lapack_opt)
else:
lapack_opt = get_info('lapack_opt')
f2py_options = None
pre_build_hook = None
# iterative methods
methods = ['BiCGREVCOM.f.src',
'BiCGSTABREVCOM.f.src',
'CGREVCOM.f.src',
'CGSREVCOM.f.src',
# 'ChebyREVCOM.f.src',
'GMRESREVCOM.f.src',
# 'JacobiREVCOM.f.src',
'QMRREVCOM.f.src',
# 'SORREVCOM.f.src'
]
Util = ['getbreak.f.src']
sources = Util + methods + ['_iterative.pyf.src']
sources = [join('iterative', x) for x in sources]
sources += get_g77_abi_wrappers(lapack_opt)
ext = config.add_extension('_iterative',
sources=sources,
f2py_options=f2py_options,
extra_info=lapack_opt)
ext._pre_build_hook = pre_build_hook
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())

View file

@ -0,0 +1,60 @@
import scipy.sparse.linalg as la
import scipy.io as io
import numpy as np
import sys
#problem = "SPARSKIT/drivcav/e05r0100"
problem = "SPARSKIT/drivcav/e05r0200"
#problem = "Harwell-Boeing/sherman/sherman1"
#problem = "misc/hamm/add32"
mm = np.lib._datasource.Repository('ftp://math.nist.gov/pub/MatrixMarket2/')
f = mm.open('%s.mtx.gz' % problem)
Am = io.mmread(f).tocsr()
f.close()
f = mm.open('%s_rhs1.mtx.gz' % problem)
b = np.array(io.mmread(f)).ravel()
f.close()
count = [0]
def matvec(v):
count[0] += 1
sys.stderr.write('%d\r' % count[0])
return Am*v
A = la.LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
M = 100
print("MatrixMarket problem %s" % problem)
print("Invert %d x %d matrix; nnz = %d" % (Am.shape[0], Am.shape[1], Am.nnz))
count[0] = 0
x0, info = la.gmres(A, b, restrt=M, tol=1e-14)
count_0 = count[0]
err0 = np.linalg.norm(Am*x0 - b) / np.linalg.norm(b)
print("GMRES(%d):" % M, count_0, "matvecs, residual", err0)
if info != 0:
print("Didn't converge")
count[0] = 0
x1, info = la.lgmres(A, b, inner_m=M-6*2, outer_k=6, tol=1e-14)
count_1 = count[0]
err1 = np.linalg.norm(Am*x1 - b) / np.linalg.norm(b)
print("LGMRES(%d,6) [same memory req.]:" % (M-2*6), count_1,
"matvecs, residual:", err1)
if info != 0:
print("Didn't converge")
count[0] = 0
x2, info = la.lgmres(A, b, inner_m=M-6, outer_k=6, tol=1e-14)
count_2 = count[0]
err2 = np.linalg.norm(Am*x2 - b) / np.linalg.norm(b)
print("LGMRES(%d,6) [same subspace size]:" % (M-6), count_2,
"matvecs, residual:", err2)
if info != 0:
print("Didn't converge")

View file

@ -0,0 +1,165 @@
#!/usr/bin/env python
"""Tests for the linalg.isolve.gcrotmk module
"""
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_matrix, eye, rand
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg.isolve import gcrotmk, gmres
Am = csr_matrix(array([[-2,1,0,0,0,9],
[1,-2,1,0,5,0],
[0,1,-2,1,0,0],
[0,0,1,-2,1,0],
[0,3,0,1,-2,1],
[1,0,0,0,1,-2]]))
b = array([1,2,3,4,5,6])
count = [0]
def matvec(v):
count[0] += 1
return Am*v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
count[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)
count_0 = count[0]
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
return x0, count_0
class TestGCROTMK(object):
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
x1, count_1 = do_solve(M=M)
assert_equal(count_1, 3)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
def test_arnoldi(self):
np.random.seed(1)
A = eye(2000) + rand(2000, 2000, density=5e-4)
b = np.random.rand(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert np.linalg.norm(A.dot(x0) - b) > 1e-3
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye(3, format='lil')
A[1,1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, tol=0, maxiter=10)
assert_equal(info, 1)
def test_truncate(self):
np.random.seed(1234)
A = np.random.rand(30, 30) + np.eye(30)
b = np.random.rand(30)
for truncate in ['oldest', 'smallest']:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,
maxiter=200)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-3)
def test_CU(self):
for discard_C in (True, False):
# Check that C,U behave as expected
CU = []
x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
assert_(len(CU) > 0)
assert_(len(CU) <= 6)
if discard_C:
for c, u in CU:
assert_(c is None)
# should converge immediately
x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
if discard_C:
assert_equal(count_1, 2 + len(CU))
else:
assert_equal(count_1, 3)
assert_(count_1 <= count_0/2)
assert_allclose(x1, x0, atol=1e-14)
def test_denormals(self):
# Check that no warnings are emitted if the matrix contains
# numbers for which 1/x has no float representation, and that
# the solver behaves properly.
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = gcrotmk(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)

View file

@ -0,0 +1,726 @@
""" Test functions for the sparse.linalg.isolve module
"""
import itertools
import platform
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal,
assert_, assert_allclose, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from numpy import zeros, arange, array, ones, eye, iscomplexobj
from scipy.linalg import norm
from scipy.sparse import spdiags, csr_matrix, SparseEfficiencyWarning
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk
# TODO check that method preserve shape and type
# TODO test both preconditioner methods
class Case(object):
def __init__(self, name, A, b=None, skip=None, nonconvergence=None):
self.name = name
self.A = A
if b is None:
self.b = arange(A.shape[0], dtype=float)
else:
self.b = b
if skip is None:
self.skip = []
else:
self.skip = skip
if nonconvergence is None:
self.nonconvergence = []
else:
self.nonconvergence = nonconvergence
def __repr__(self):
return "<%s>" % self.name
class IterativeParams(object):
def __init__(self):
# list of tuples (solver, symmetric, positive_definite )
solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk]
sym_solvers = [minres, cg]
posdef_solvers = [cg]
real_solvers = [minres]
self.solvers = solvers
# list of tuples (A, symmetric, positive_definite )
self.cases = []
# Symmetric and Positive Definite
N = 40
data = ones((3,N))
data[0,:] = 2
data[1,:] = -1
data[2,:] = -1
Poisson1D = spdiags(data, [0,-1,1], N, N, format='csr')
self.Poisson1D = Case("poisson1d", Poisson1D)
self.cases.append(Case("poisson1d", Poisson1D))
# note: minres fails for single precision
self.cases.append(Case("poisson1d", Poisson1D.astype('f'),
skip=[minres]))
# Symmetric and Negative Definite
self.cases.append(Case("neg-poisson1d", -Poisson1D,
skip=posdef_solvers))
# note: minres fails for single precision
self.cases.append(Case("neg-poisson1d", (-Poisson1D).astype('f'),
skip=posdef_solvers + [minres]))
# Symmetric and Indefinite
data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]],dtype='d')
RandDiag = spdiags(data, [0], 10, 10, format='csr')
self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers))
self.cases.append(Case("rand-diag", RandDiag.astype('f'),
skip=posdef_solvers))
# Random real-valued
np.random.seed(1234)
data = np.random.rand(4, 4)
self.cases.append(Case("rand", data, skip=posdef_solvers+sym_solvers))
self.cases.append(Case("rand", data.astype('f'),
skip=posdef_solvers+sym_solvers))
# Random symmetric real-valued
np.random.seed(1234)
data = np.random.rand(4, 4)
data = data + data.T
self.cases.append(Case("rand-sym", data, skip=posdef_solvers))
self.cases.append(Case("rand-sym", data.astype('f'),
skip=posdef_solvers))
# Random pos-def symmetric real
np.random.seed(1234)
data = np.random.rand(9, 9)
data = np.dot(data.conj(), data.T)
self.cases.append(Case("rand-sym-pd", data))
# note: minres fails for single precision
self.cases.append(Case("rand-sym-pd", data.astype('f'),
skip=[minres]))
# Random complex-valued
np.random.seed(1234)
data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
self.cases.append(Case("rand-cmplx", data,
skip=posdef_solvers+sym_solvers+real_solvers))
self.cases.append(Case("rand-cmplx", data.astype('F'),
skip=posdef_solvers+sym_solvers+real_solvers))
# Random hermitian complex-valued
np.random.seed(1234)
data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
data = data + data.T.conj()
self.cases.append(Case("rand-cmplx-herm", data,
skip=posdef_solvers+real_solvers))
self.cases.append(Case("rand-cmplx-herm", data.astype('F'),
skip=posdef_solvers+real_solvers))
# Random pos-def hermitian complex-valued
np.random.seed(1234)
data = np.random.rand(9, 9) + 1j*np.random.rand(9, 9)
data = np.dot(data.conj(), data.T)
self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers))
self.cases.append(Case("rand-cmplx-sym-pd", data.astype('F'),
skip=real_solvers))
# Non-symmetric and Positive Definite
#
# cgs, qmr, and bicg fail to converge on this one
# -- algorithmic limitation apparently
data = ones((2,10))
data[0,:] = 2
data[1,:] = -1
A = spdiags(data, [0,-1], 10, 10, format='csr')
self.cases.append(Case("nonsymposdef", A,
skip=sym_solvers+[cgs, qmr, bicg]))
self.cases.append(Case("nonsymposdef", A.astype('F'),
skip=sym_solvers+[cgs, qmr, bicg]))
# Symmetric, non-pd, hitting cgs/bicg/bicgstab/qmr breakdown
A = np.array([[0, 0, 0, 0, 0, 1, -1, -0, -0, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -1, -0, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -0, -1, -0, -0],
[0, 0, 0, 0, 0, 2, -0, -0, -0, -1, -0],
[0, 0, 0, 0, 0, 1, -0, -0, -0, -0, -1],
[1, 2, 2, 2, 1, 0, -0, -0, -0, -0, -0],
[-1, 0, 0, 0, 0, 0, -1, -0, -0, -0, -0],
[0, -1, 0, 0, 0, 0, -0, -1, -0, -0, -0],
[0, 0, -1, 0, 0, 0, -0, -0, -1, -0, -0],
[0, 0, 0, -1, 0, 0, -0, -0, -0, -1, -0],
[0, 0, 0, 0, -1, 0, -0, -0, -0, -0, -1]], dtype=float)
b = np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype=float)
assert (A == A.T).all()
self.cases.append(Case("sym-nonpd", A, b,
skip=posdef_solvers,
nonconvergence=[cgs,bicg,bicgstab,qmr]))
params = IterativeParams()
def check_maxiter(solver, case):
A = case.A
tol = 1e-12
b = case.b
x0 = 0*b
residuals = []
def callback(x):
residuals.append(norm(b - case.A*x))
x, info = solver(A, b, x0=x0, tol=tol, maxiter=1, callback=callback)
assert_equal(len(residuals), 1)
assert_equal(info, 1)
def test_maxiter():
case = params.Poisson1D
for solver in params.solvers:
if solver in case.skip:
continue
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
check_maxiter(solver, case)
def assert_normclose(a, b, tol=1e-8):
residual = norm(a - b)
tolerance = tol*norm(b)
msg = "residual (%g) not smaller than tolerance %g" % (residual, tolerance)
assert_(residual < tolerance, msg=msg)
def check_convergence(solver, case):
A = case.A
if A.dtype.char in "dD":
tol = 1e-8
else:
tol = 1e-2
b = case.b
x0 = 0*b
x, info = solver(A, b, x0=x0, tol=tol)
assert_array_equal(x0, 0*b) # ensure that x0 is not overwritten
if solver not in case.nonconvergence:
assert_equal(info,0)
assert_normclose(A.dot(x), b, tol=tol)
else:
assert_(info != 0)
assert_(np.linalg.norm(A.dot(x) - b) <= np.linalg.norm(b))
def test_convergence():
for solver in params.solvers:
for case in params.cases:
if solver in case.skip:
continue
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
check_convergence(solver, case)
def check_precond_dummy(solver, case):
tol = 1e-8
def identity(b,which=None):
"""trivial preconditioner"""
return b
A = case.A
M,N = A.shape
spdiags([1.0/A.diagonal()], [0], M, N)
b = case.b
x0 = 0*b
precond = LinearOperator(A.shape, identity, rmatvec=identity)
if solver is qmr:
x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol)
else:
x, info = solver(A, b, M=precond, x0=x0, tol=tol)
assert_equal(info,0)
assert_normclose(A.dot(x), b, tol)
A = aslinearoperator(A)
A.psolve = identity
A.rpsolve = identity
x, info = solver(A, b, x0=x0, tol=tol)
assert_equal(info,0)
assert_normclose(A*x, b, tol=tol)
def test_precond_dummy():
case = params.Poisson1D
for solver in params.solvers:
if solver in case.skip:
continue
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
check_precond_dummy(solver, case)
def check_precond_inverse(solver, case):
tol = 1e-8
def inverse(b,which=None):
"""inverse preconditioner"""
A = case.A
if not isinstance(A, np.ndarray):
A = A.todense()
return np.linalg.solve(A, b)
def rinverse(b,which=None):
"""inverse preconditioner"""
A = case.A
if not isinstance(A, np.ndarray):
A = A.todense()
return np.linalg.solve(A.T, b)
matvec_count = [0]
def matvec(b):
matvec_count[0] += 1
return case.A.dot(b)
def rmatvec(b):
matvec_count[0] += 1
return case.A.T.dot(b)
b = case.b
x0 = 0*b
A = LinearOperator(case.A.shape, matvec, rmatvec=rmatvec)
precond = LinearOperator(case.A.shape, inverse, rmatvec=rinverse)
# Solve with preconditioner
matvec_count = [0]
x, info = solver(A, b, M=precond, x0=x0, tol=tol)
assert_equal(info, 0)
assert_normclose(case.A.dot(x), b, tol)
# Solution should be nearly instant
assert_(matvec_count[0] <= 3, repr(matvec_count))
def test_precond_inverse():
case = params.Poisson1D
for solver in params.solvers:
if solver in case.skip:
continue
if solver is qmr:
continue
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
check_precond_inverse(solver, case)
def test_gmres_basic():
A = np.vander(np.arange(10) + 1)[:, ::-1]
b = np.zeros(10)
b[0] = 1
np.linalg.solve(A, b)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x_gm, err = gmres(A, b, restart=5, maxiter=1)
assert_allclose(x_gm[0], 0.359, rtol=1e-2)
def test_reentrancy():
non_reentrant = [cg, cgs, bicg, bicgstab, gmres, qmr]
reentrant = [lgmres, minres, gcrotmk]
for solver in reentrant + non_reentrant:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
_check_reentrancy(solver, solver in reentrant)
def _check_reentrancy(solver, is_reentrant):
def matvec(x):
A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]])
y, info = solver(A, x)
assert_equal(info, 0)
return y
b = np.array([1, 1./2, 1./3])
op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec,
dtype=b.dtype)
if not is_reentrant:
assert_raises(RuntimeError, solver, op, b)
else:
y, info = solver(op, b)
assert_equal(info, 0)
assert_allclose(y, [1, 1, 1])
@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, lgmres, gcrotmk])
def test_atol(solver):
# TODO: minres. It didn't historically use absolute tolerances, so
# fixing it is less urgent.
np.random.seed(1234)
A = np.random.rand(10, 10)
A = A.dot(A.T) + 10 * np.eye(10)
b = 1e3 * np.random.rand(10)
b_norm = np.linalg.norm(b)
tols = np.r_[0, np.logspace(np.log10(1e-10), np.log10(1e2), 7), np.inf]
# Check effect of badly scaled preconditioners
M0 = np.random.randn(10, 10)
M0 = M0.dot(M0.T)
Ms = [None, 1e-6 * M0, 1e6 * M0]
for M, tol, atol in itertools.product(Ms, tols, tols):
if tol == 0 and atol == 0:
continue
if solver is qmr:
if M is not None:
M = aslinearoperator(M)
M2 = aslinearoperator(np.eye(10))
else:
M2 = None
x, info = solver(A, b, M1=M, M2=M2, tol=tol, atol=atol)
else:
x, info = solver(A, b, M=M, tol=tol, atol=atol)
assert_equal(info, 0)
residual = A.dot(x) - b
err = np.linalg.norm(residual)
atol2 = tol * b_norm
assert_(err <= max(atol, atol2))
@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk])
def test_zero_rhs(solver):
np.random.seed(1234)
A = np.random.rand(10, 10)
A = A.dot(A.T) + 10 * np.eye(10)
b = np.zeros(10)
tols = np.r_[np.logspace(np.log10(1e-10), np.log10(1e2), 7)]
for tol in tols:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = solver(A, b, tol=tol)
assert_equal(info, 0)
assert_allclose(x, 0, atol=1e-15)
x, info = solver(A, b, tol=tol, x0=ones(10))
assert_equal(info, 0)
assert_allclose(x, 0, atol=tol)
if solver is not minres:
x, info = solver(A, b, tol=tol, atol=0, x0=ones(10))
if info == 0:
assert_allclose(x, 0)
x, info = solver(A, b, tol=tol, atol=tol)
assert_equal(info, 0)
assert_allclose(x, 0, atol=1e-300)
x, info = solver(A, b, tol=tol, atol=0)
assert_equal(info, 0)
assert_allclose(x, 0, atol=1e-300)
@pytest.mark.parametrize("solver", [
gmres, qmr,
pytest.param(lgmres, marks=pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")),
pytest.param(cgs, marks=pytest.mark.xfail),
pytest.param(bicg, marks=pytest.mark.xfail),
pytest.param(bicgstab, marks=pytest.mark.xfail),
pytest.param(gcrotmk, marks=pytest.mark.xfail)])
def test_maxiter_worsening(solver):
# Check error does not grow (boundlessly) with increasing maxiter.
# This can occur due to the solvers hitting close to breakdown,
# which they should detect and halt as necessary.
# cf. gh-9100
# Singular matrix, rhs numerically not in range
A = np.array([[-0.1112795288033378, 0, 0, 0.16127952880333685],
[0, -0.13627952880333782+6.283185307179586j, 0, 0],
[0, 0, -0.13627952880333782-6.283185307179586j, 0],
[0.1112795288033368, 0j, 0j, -0.16127952880333785]])
v = np.ones(4)
best_error = np.inf
tol = 7 if platform.machine() == 'aarch64' else 5
for maxiter in range(1, 20):
x, info = solver(A, v, maxiter=maxiter, tol=1e-8, atol=0)
if info == 0:
assert_(np.linalg.norm(A.dot(x) - v) <= 1e-8*np.linalg.norm(v))
error = np.linalg.norm(A.dot(x) - v)
best_error = min(best_error, error)
# Check with slack
assert_(error <= tol*best_error)
@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk])
def test_x0_working(solver):
# Easy problem
np.random.seed(1)
n = 10
A = np.random.rand(n, n)
A = A.dot(A.T)
b = np.random.rand(n)
x0 = np.random.rand(n)
if solver is minres:
kw = dict(tol=1e-6)
else:
kw = dict(atol=0, tol=1e-6)
x, info = solver(A, b, **kw)
assert_equal(info, 0)
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-6*np.linalg.norm(b))
x, info = solver(A, b, x0=x0, **kw)
assert_equal(info, 0)
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-6*np.linalg.norm(b))
#------------------------------------------------------------------------------
class TestQMR(object):
def test_leftright_precond(self):
"""Check that QMR works with left and right preconditioners"""
from scipy.sparse.linalg.dsolve import splu
from scipy.sparse.linalg.interface import LinearOperator
n = 100
dat = ones(n)
A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n)
b = arange(n,dtype='d')
L = spdiags([-dat/2, dat], [-1,0], n, n)
U = spdiags([4*dat, -dat], [0,1], n, n)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format")
L_solver = splu(L)
U_solver = splu(U)
def L_solve(b):
return L_solver.solve(b)
def U_solve(b):
return U_solver.solve(b)
def LT_solve(b):
return L_solver.solve(b,'T')
def UT_solve(b):
return U_solver.solve(b,'T')
M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve)
M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)
assert_equal(info,0)
assert_normclose(A*x, b, tol=1e-8)
class TestGMRES(object):
def test_callback(self):
def store_residual(r, rvec):
rvec[rvec.nonzero()[0].max()+1] = r
# Define, A,b
A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]]))
b = ones((A.shape[0],))
maxiter = 1
rvec = zeros(maxiter+1)
rvec[0] = 1.0
callback = lambda r:store_residual(r, rvec)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback)
# Expected output from SciPy 1.0.0
assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10)
# Test preconditioned callback
M = 1e-3 * np.eye(A.shape[0])
rvec = zeros(maxiter+1)
rvec[0] = 1.0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, flag = gmres(A, b, M=M, tol=1e-16, maxiter=maxiter, callback=callback)
# Expected output from SciPy 1.0.0 (callback has preconditioned residual!)
assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]), rtol=1e-10)
def test_abi(self):
# Check we don't segfault on gmres with complex argument
A = eye(2)
b = ones(2)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
r_x, r_info = gmres(A, b)
r_x = r_x.astype(complex)
x, info = gmres(A.astype(complex), b.astype(complex))
assert_(iscomplexobj(x))
assert_allclose(r_x, x)
assert_(r_info == info)
def test_atol_legacy(self):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
# Check the strange legacy behavior: the tolerance is interpreted
# as atol, but only for the initial residual
A = eye(2)
b = 1e-6 * ones(2)
x, info = gmres(A, b, tol=1e-5)
assert_array_equal(x, np.zeros(2))
A = eye(2)
b = ones(2)
x, info = gmres(A, b, tol=1e-5)
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-5*np.linalg.norm(b))
assert_allclose(x, b, atol=0, rtol=1e-8)
rndm = np.random.RandomState(12345)
A = rndm.rand(30, 30)
b = 1e-6 * ones(30)
x, info = gmres(A, b, tol=1e-7, restart=20)
assert_(np.linalg.norm(A.dot(x) - b) > 1e-7)
A = eye(2)
b = 1e-10 * ones(2)
x, info = gmres(A, b, tol=1e-8, atol=0)
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b))
def test_defective_precond_breakdown(self):
# Breakdown due to defective preconditioner
M = np.eye(3)
M[2,2] = 0
b = np.array([0, 1, 1])
x = np.array([1, 0, 0])
A = np.diag([2, 3, 4])
x, info = gmres(A, b, x0=x, M=M, tol=1e-15, atol=0)
# Should not return nans, nor terminate with false success
assert_(not np.isnan(x).any())
if info == 0:
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-15*np.linalg.norm(b))
# The solution should be OK outside null space of M
assert_allclose(M.dot(A.dot(x)), M.dot(b))
def test_defective_matrix_breakdown(self):
# Breakdown due to defective matrix
A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]])
b = np.array([1, 0, 1])
x, info = gmres(A, b, tol=1e-8, atol=0)
# Should not return nans, nor terminate with false success
assert_(not np.isnan(x).any())
if info == 0:
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b))
# The solution should be OK outside null space of A
assert_allclose(A.dot(A.dot(x)), A.dot(b))
def test_callback_type(self):
# The legacy callback type changes meaning of 'maxiter'
np.random.seed(1)
A = np.random.rand(20, 20)
b = np.random.rand(20)
cb_count = [0]
def pr_norm_cb(r):
cb_count[0] += 1
assert_(isinstance(r, float))
def x_cb(x):
cb_count[0] += 1
assert_(isinstance(x, np.ndarray))
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
# 2 iterations is not enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50)
assert info == 2
assert cb_count[0] == 2
# With `callback_type` specified, no warning should be raised
cb_count = [0]
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50,
callback_type='legacy')
assert info == 2
assert cb_count[0] == 2
# 2 restart cycles is enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50,
callback_type='pr_norm')
assert info == 0
assert cb_count[0] > 2
# 2 restart cycles is enough to solve the problem
cb_count = [0]
x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=2, restart=50,
callback_type='x')
assert info == 0
assert cb_count[0] == 2
def test_callback_x_monotonic(self):
# Check that callback_type='x' gives monotonic norm decrease
np.random.seed(1)
A = np.random.rand(20, 20) + np.eye(20)
b = np.random.rand(20)
prev_r = [np.inf]
count = [0]
def x_cb(x):
r = np.linalg.norm(A.dot(x) - b)
assert r <= prev_r[0]
prev_r[0] = r
count[0] += 1
x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=20, restart=10,
callback_type='x')
assert info == 20
assert count[0] == 21
x_cb(x)

View file

@ -0,0 +1,212 @@
"""Tests for the linalg.isolve.lgmres module
"""
from numpy.testing import (assert_, assert_allclose, assert_equal,
suppress_warnings)
import pytest
from platform import python_implementation
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_matrix, eye, rand
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg.isolve import lgmres, gmres
Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9],
[1, -2, 1, 0, 5, 0],
[0, 1, -2, 1, 0, 0],
[0, 0, 1, -2, 1, 0],
[0, 3, 0, 1, -2, 1],
[1, 0, 0, 0, 1, -2]]))
b = array([1, 2, 3, 4, 5, 6])
count = [0]
def matvec(v):
count[0] += 1
return Am*v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
count[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = lgmres(A, b, x0=zeros(A.shape[0]),
inner_m=6, tol=1e-14, **kw)
count_0 = count[0]
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
return x0, count_0
class TestLGMRES(object):
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
x1, count_1 = do_solve(M=M)
assert_(count_1 == 3)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
def test_outer_v(self):
# Check that the augmentation vectors behave as expected
outer_v = []
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)
assert_(len(outer_v) > 0)
assert_(len(outer_v) <= 6)
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
prepend_outer_v=True)
assert_(count_1 == 2, count_1)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
# ---
outer_v = []
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v,
store_outer_Av=False)
assert_(array([v[1] is None for v in outer_v]).all())
assert_(len(outer_v) > 0)
assert_(len(outer_v) <= 6)
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
prepend_outer_v=True)
assert_(count_1 == 3, count_1)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
@pytest.mark.skipif(python_implementation() == 'PyPy',
reason="Fails on PyPy CI runs. See #9507")
def test_arnoldi(self):
np.random.rand(1234)
A = eye(2000) + rand(2000, 2000, density=5e-4)
b = np.random.rand(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]),
inner_m=15, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]),
restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert_(np.linalg.norm(A.dot(x0) - b) > 4e-4)
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = lgmres(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = lgmres(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = lgmres(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = lgmres(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye(3, format='lil')
A[1, 1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = lgmres(A, b, tol=0, maxiter=10)
assert_equal(info, 1)
def test_breakdown_with_outer_v(self):
A = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([1, 2])
x = np.linalg.solve(A, b)
v0 = np.array([1, 0])
# The inner iteration should converge to the correct solution,
# since it's in the outer vector list
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1)
assert_allclose(xp, x, atol=1e-12)
def test_breakdown_underdetermined(self):
# Should find LSQ solution in the Krylov span in one inner
# iteration, despite solver breakdown from nilpotent A.
A = np.array([[0, 1, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 0]], dtype=float)
bs = [
np.array([1, 1, 1, 1]),
np.array([1, 1, 1, 0]),
np.array([1, 1, 0, 0]),
np.array([1, 0, 0, 0]),
]
for b in bs:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b, maxiter=1)
resp = np.linalg.norm(A.dot(xp) - b)
K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))]
y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1)
x = K.dot(y)
res = np.linalg.norm(A.dot(x) - b)
assert_allclose(resp, res, err_msg=repr(b))
def test_denormals(self):
# Check that no warnings are emitted if the matrix contains
# numbers for which 1/x has no float representation, and that
# the solver behaves properly.
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = lgmres(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)

View file

@ -0,0 +1,203 @@
"""
Copyright (C) 2010 David Fong and Michael Saunders
Distributed under the same license as SciPy
Testing Code for LSMR.
03 Jun 2010: First version release with lsmr.py
David Chin-lung Fong clfong@stanford.edu
Institute for Computational and Mathematical Engineering
Stanford University
Michael Saunders saunders@stanford.edu
Systems Optimization Laboratory
Dept of MS&E, Stanford University.
"""
from numpy import array, arange, eye, zeros, ones, sqrt, transpose, hstack
from numpy.linalg import norm
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from scipy.sparse import coo_matrix
from scipy.sparse.linalg.interface import aslinearoperator
from scipy.sparse.linalg import lsmr
from .test_lsqr import G, b
class TestLSMR:
def setup_method(self):
self.n = 10
self.m = 10
def assertCompatibleSystem(self, A, xtrue):
Afun = aslinearoperator(A)
b = Afun.matvec(xtrue)
x = lsmr(A, b)[0]
assert_almost_equal(norm(x - xtrue), 0, decimal=5)
def testIdentityACase1(self):
A = eye(self.n)
xtrue = zeros((self.n, 1))
self.assertCompatibleSystem(A, xtrue)
def testIdentityACase2(self):
A = eye(self.n)
xtrue = ones((self.n,1))
self.assertCompatibleSystem(A, xtrue)
def testIdentityACase3(self):
A = eye(self.n)
xtrue = transpose(arange(self.n,0,-1))
self.assertCompatibleSystem(A, xtrue)
def testBidiagonalA(self):
A = lowerBidiagonalMatrix(20,self.n)
xtrue = transpose(arange(self.n,0,-1))
self.assertCompatibleSystem(A,xtrue)
def testScalarB(self):
A = array([[1.0, 2.0]])
b = 3.0
x = lsmr(A, b)[0]
assert_almost_equal(norm(A.dot(x) - b), 0)
def testComplexX(self):
A = eye(self.n)
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
self.assertCompatibleSystem(A, xtrue)
def testComplexX0(self):
A = 4 * eye(self.n) + ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1))
b = aslinearoperator(A).matvec(xtrue)
x0 = zeros(self.n, dtype=complex)
x = lsmr(A, b, x0=x0)[0]
assert_almost_equal(norm(x - xtrue), 0, decimal=5)
def testComplexA(self):
A = 4 * eye(self.n) + 1j * ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1).astype(complex))
self.assertCompatibleSystem(A, xtrue)
def testComplexB(self):
A = 4 * eye(self.n) + ones((self.n, self.n))
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
b = aslinearoperator(A).matvec(xtrue)
x = lsmr(A, b)[0]
assert_almost_equal(norm(x - xtrue), 0, decimal=5)
def testColumnB(self):
A = eye(self.n)
b = ones((self.n, 1))
x = lsmr(A, b)[0]
assert_almost_equal(norm(A.dot(x) - b.ravel()), 0)
def testInitialization(self):
# Test that the default setting is not modified
x_ref = lsmr(G, b)[0]
x0 = zeros(b.shape)
x = lsmr(G, b, x0=x0)[0]
assert_array_almost_equal(x_ref, x)
# Test warm-start with single iteration
x0 = lsmr(G, b, maxiter=1)[0]
x = lsmr(G, b, x0=x0)[0]
assert_array_almost_equal(x_ref, x)
class TestLSMRReturns:
def setup_method(self):
self.n = 10
self.A = lowerBidiagonalMatrix(20,self.n)
self.xtrue = transpose(arange(self.n,0,-1))
self.Afun = aslinearoperator(self.A)
self.b = self.Afun.matvec(self.xtrue)
self.returnValues = lsmr(self.A,self.b)
def testNormr(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert_almost_equal(normr, norm(self.b - self.Afun.matvec(x)))
def testNormar(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert_almost_equal(normar,
norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x))))
def testNormx(self):
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
assert_almost_equal(normx, norm(x))
def lowerBidiagonalMatrix(m, n):
# This is a simple example for testing LSMR.
# It uses the leading m*n submatrix from
# A = [ 1
# 1 2
# 2 3
# 3 4
# ...
# n ]
# suitably padded by zeros.
#
# 04 Jun 2010: First version for distribution with lsmr.py
if m <= n:
row = hstack((arange(m, dtype=int),
arange(1, m, dtype=int)))
col = hstack((arange(m, dtype=int),
arange(m-1, dtype=int)))
data = hstack((arange(1, m+1, dtype=float),
arange(1,m, dtype=float)))
return coo_matrix((data, (row, col)), shape=(m,n))
else:
row = hstack((arange(n, dtype=int),
arange(1, n+1, dtype=int)))
col = hstack((arange(n, dtype=int),
arange(n, dtype=int)))
data = hstack((arange(1, n+1, dtype=float),
arange(1,n+1, dtype=float)))
return coo_matrix((data,(row, col)), shape=(m,n))
def lsmrtest(m, n, damp):
"""Verbose testing of lsmr"""
A = lowerBidiagonalMatrix(m,n)
xtrue = arange(n,0,-1, dtype=float)
Afun = aslinearoperator(A)
b = Afun.matvec(xtrue)
atol = 1.0e-7
btol = 1.0e-7
conlim = 1.0e+10
itnlim = 10*n
show = 1
x, istop, itn, normr, normar, norma, conda, normx \
= lsmr(A, b, damp, atol, btol, conlim, itnlim, show)
j1 = min(n,5)
j2 = max(n-4,1)
print(' ')
print('First elements of x:')
str = ['%10.4f' % (xi) for xi in x[0:j1]]
print(''.join(str))
print(' ')
print('Last elements of x:')
str = ['%10.4f' % (xi) for xi in x[j2-1:]]
print(''.join(str))
r = b - Afun.matvec(x)
r2 = sqrt(norm(r)**2 + (damp*norm(x))**2)
print(' ')
str = 'normr (est.) %17.10e' % (normr)
str2 = 'normr (true) %17.10e' % (r2)
print(str)
print(str2)
print(' ')
if __name__ == "__main__":
lsmrtest(20,10,0)

View file

@ -0,0 +1,136 @@
import numpy as np
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
assert_array_almost_equal)
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg import lsqr
from time import time
# Set up a test problem
n = 35
G = np.eye(n)
normal = np.random.normal
norm = np.linalg.norm
for jj in range(5):
gg = normal(size=n)
hh = gg * gg.T
G += (hh + hh.T) * 0.5
G += normal(size=n) * normal(size=n)
b = normal(size=n)
tol = 1e-10
show = False
maxit = None
def test_basic():
b_copy = b.copy()
X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
assert_(np.all(b_copy == b))
svx = np.linalg.solve(G, b)
xo = X[0]
assert_(norm(svx - xo) < 1e-5)
def test_gh_2466():
row = np.array([0, 0])
col = np.array([0, 1])
val = np.array([1, -1])
A = scipy.sparse.coo_matrix((val, (row, col)), shape=(1, 2))
b = np.asarray([4])
lsqr(A, b)
def test_well_conditioned_problems():
# Test that sparse the lsqr solver returns the right solution
# on various problems with different random seeds.
# This is a non-regression test for a potential ZeroDivisionError
# raised when computing the `test2` & `test3` convergence conditions.
n = 10
A_sparse = scipy.sparse.eye(n, n)
A_dense = A_sparse.toarray()
with np.errstate(invalid='raise'):
for seed in range(30):
rng = np.random.RandomState(seed + 10)
beta = rng.rand(n)
beta[beta == 0] = 0.00001 # ensure that all the betas are not null
b = A_sparse * beta[:, np.newaxis]
output = lsqr(A_sparse, b, show=show)
# Check that the termination condition corresponds to an approximate
# solution to Ax = b
assert_equal(output[1], 1)
solution = output[0]
# Check that we recover the ground truth solution
assert_array_almost_equal(solution, beta)
# Sanity check: compare to the dense array solver
reference_solution = np.linalg.solve(A_dense, b).ravel()
assert_array_almost_equal(solution, reference_solution)
def test_b_shapes():
# Test b being a scalar.
A = np.array([[1.0, 2.0]])
b = 3.0
x = lsqr(A, b)[0]
assert_almost_equal(norm(A.dot(x) - b), 0)
# Test b being a column vector.
A = np.eye(10)
b = np.ones((10, 1))
x = lsqr(A, b)[0]
assert_almost_equal(norm(A.dot(x) - b.ravel()), 0)
def test_initialization():
# Test the default setting is the same as zeros
b_copy = b.copy()
x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
x0 = np.zeros(x_ref[0].shape)
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
assert_(np.all(b_copy == b))
assert_array_almost_equal(x_ref[0], x[0])
# Test warm-start with single iteration
x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0]
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
assert_array_almost_equal(x_ref[0], x[0])
assert_(np.all(b_copy == b))
if __name__ == "__main__":
svx = np.linalg.solve(G, b)
tic = time()
X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
xo = X[0]
phio = X[3]
psio = X[7]
k = X[2]
chio = X[8]
mg = np.amax(G - G.T)
if mg > 1e-14:
sym = 'No'
else:
sym = 'Yes'
print('LSQR')
print("Is linear operator symmetric? " + sym)
print("n: %3g iterations: %3g" % (n, k))
print("Norms computed in %.2fs by LSQR" % (time() - tic))
print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e " % (chio, phio, psio))
print("Residual norms computed directly:")
print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e" % (norm(xo),
norm(G*xo - b),
norm(G.T*(G*xo-b))))
print("Direct solution norms:")
print(" ||x|| %9.4e ||r|| %9.4e " % (norm(svx), norm(G*svx - b)))
print("")
print(" || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo))
print("")

View file

@ -0,0 +1,98 @@
import numpy as np
from numpy.testing import assert_equal, assert_allclose, assert_
from scipy.sparse.linalg.isolve import minres
from scipy.linalg import norm
from pytest import raises as assert_raises
from .test_iterative import assert_normclose
def get_sample_problem():
# A random 10 x 10 symmetric matrix
np.random.seed(1234)
matrix = np.random.rand(10, 10)
matrix = matrix + matrix.T
# A random vector of length 10
vector = np.random.rand(10)
return matrix, vector
def test_singular():
A, b = get_sample_problem()
A[0, ] = 0
b[0] = 0
xp, info = minres(A, b)
assert_equal(info, 0)
assert_normclose(A.dot(xp), b, tol=1e-5)
def test_x0_is_used_by():
A, b = get_sample_problem()
# Random x0 to feed minres
np.random.seed(12345)
x0 = np.random.rand(10)
trace = []
def trace_iterates(xk):
trace.append(xk)
minres(A, b, x0=x0, callback=trace_iterates)
trace_with_x0 = trace
trace = []
minres(A, b, callback=trace_iterates)
assert_(not np.array_equal(trace_with_x0[0], trace[0]))
def test_shift():
A, b = get_sample_problem()
shift = 0.5
shifted_A = A - shift * np.eye(10)
x1, info1 = minres(A, b, shift=shift)
x2, info2 = minres(shifted_A, b)
assert_equal(info1, 0)
assert_allclose(x1, x2, rtol=1e-5)
def test_asymmetric_fail():
"""Asymmetric matrix should raise `ValueError` when check=True"""
A, b = get_sample_problem()
A[1, 2] = 1
A[2, 1] = 2
with assert_raises(ValueError):
xp, info = minres(A, b, check=True)
def test_minres_non_default_x0():
np.random.seed(1234)
tol = 10**(-6)
a = np.random.randn(5, 5)
a = np.dot(a, a.T)
b = np.random.randn(5)
c = np.random.randn(5)
x = minres(a, b, x0=c, tol=tol)[0]
assert norm(a.dot(x) - b) < tol
def test_minres_precond_non_default_x0():
np.random.seed(12345)
tol = 10**(-6)
a = np.random.randn(5, 5)
a = np.dot(a, a.T)
b = np.random.randn(5)
c = np.random.randn(5)
m = np.random.randn(5, 5)
m = np.dot(m, m.T)
x = minres(a, b, M=m, x0=c, tol=tol)[0]
assert norm(a.dot(x) - b) < tol
def test_minres_precond_exact_x0():
np.random.seed(1234)
tol = 10**(-6)
a = np.eye(10)
b = np.ones(10)
c = np.ones(10)
m = np.random.randn(10, 10)
m = np.dot(m, m.T)
x = minres(a, b, M=m, x0=c, tol=tol)[0]
assert norm(a.dot(x) - b) < tol

View file

@ -0,0 +1,8 @@
import numpy as np
from pytest import raises as assert_raises
from scipy.sparse.linalg import utils
def test_make_system_bad_shape():
assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))

View file

@ -0,0 +1,121 @@
__docformat__ = "restructuredtext en"
__all__ = []
from numpy import asanyarray, asarray, array, matrix, zeros
from scipy.sparse.sputils import asmatrix
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \
IdentityOperator
_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
('D','D'):'D'}
def coerce(x,y):
if x not in 'fdFD':
x = 'd'
if y not in 'fdFD':
y = 'd'
return _coerce_rules[x,y]
def id(x):
return x
def make_system(A, M, x0, b):
"""Make a linear system Ax=b
Parameters
----------
A : LinearOperator
sparse or dense matrix (or any valid input to aslinearoperator)
M : {LinearOperator, Nones}
preconditioner
sparse or dense matrix (or any valid input to aslinearoperator)
x0 : {array_like, None}
initial guess to iterative method
b : array_like
right hand side
Returns
-------
(A, M, x, b, postprocess)
A : LinearOperator
matrix of the linear system
M : LinearOperator
preconditioner
x : rank 1 ndarray
initial guess
b : rank 1 ndarray
right hand side
postprocess : function
converts the solution vector to the appropriate
type and dimensions (e.g. (N,1) matrix)
"""
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix, but got shape=%s' % (A.shape,))
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError('A and b have incompatible dimensions')
if b.dtype.char not in 'fdFD':
b = b.astype('d') # upcast non-FP types to double
def postprocess(x):
if isinstance(b,matrix):
x = asmatrix(x)
return x.reshape(b.shape)
if hasattr(A,'dtype'):
xtype = A.dtype.char
else:
xtype = A.matvec(b).dtype.char
xtype = coerce(xtype, b.dtype.char)
b = asarray(b,dtype=xtype) # make b the same type as x
b = b.ravel()
if x0 is None:
x = zeros(N, dtype=xtype)
else:
x = array(x0, dtype=xtype)
if not (x.shape == (N,1) or x.shape == (N,)):
raise ValueError('A and x have incompatible dimensions')
x = x.ravel()
# process preconditioner
if M is None:
if hasattr(A_,'psolve'):
psolve = A_.psolve
else:
psolve = id
if hasattr(A_,'rpsolve'):
rpsolve = A_.rpsolve
else:
rpsolve = id
if psolve is id and rpsolve is id:
M = IdentityOperator(shape=A.shape, dtype=A.dtype)
else:
M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
dtype=A.dtype)
else:
M = aslinearoperator(M)
if A.shape != M.shape:
raise ValueError('matrix and preconditioner have different shapes')
return A, M, x, b, postprocess

View file

@ -0,0 +1,860 @@
"""
Sparse matrix functions
"""
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
__all__ = ['expm', 'inv']
import math
import numpy as np
import scipy.special
from scipy.linalg.basic import solve, solve_triangular
from scipy.sparse.base import isspmatrix
from scipy.sparse.linalg import spsolve
from scipy.sparse.sputils import is_pydata_spmatrix
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg.interface import LinearOperator
from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm
UPPER_TRIANGULAR = 'upper_triangular'
def inv(A):
"""
Compute the inverse of a sparse matrix
Parameters
----------
A : (M,M) ndarray or sparse matrix
square matrix to be inverted
Returns
-------
Ainv : (M,M) ndarray or sparse matrix
inverse of `A`
Notes
-----
This computes the sparse inverse of `A`. If the inverse of `A` is expected
to be non-sparse, it will likely be faster to convert `A` to dense and use
scipy.linalg.inv.
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import inv
>>> A = csc_matrix([[1., 0.], [1., 2.]])
>>> Ainv = inv(A)
>>> Ainv
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv)
<2x2 sparse matrix of type '<class 'numpy.float64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> A.dot(Ainv).todense()
matrix([[ 1., 0.],
[ 0., 1.]])
.. versionadded:: 0.12.0
"""
#check input
if not (scipy.sparse.isspmatrix(A) or is_pydata_spmatrix(A)):
raise TypeError('Input must be a sparse matrix')
I = _ident_like(A)
Ainv = spsolve(A, I)
return Ainv
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = np.ones((A.shape[0], 1), dtype=float)
M = A.T
for i in range(p):
v = M.dot(v)
return np.max(v)
def _is_upper_triangular(A):
# This function could possibly be of wider interest.
if isspmatrix(A):
lower_part = scipy.sparse.tril(A, -1)
# Check structural upper triangularity,
# then coincidental upper triangularity if needed.
return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
elif is_pydata_spmatrix(A):
import sparse
lower_part = sparse.tril(A, -1)
return lower_part.nnz == 0
else:
return not np.tril(A, -1).any()
def _smart_matrix_product(A, B, alpha=None, structure=None):
"""
A matrix product that knows about sparse and structured matrices.
Parameters
----------
A : 2d ndarray
First matrix.
B : 2d ndarray
Second matrix.
alpha : float
The matrix product will be scaled by this constant.
structure : str, optional
A string describing the structure of both matrices `A` and `B`.
Only `upper_triangular` is currently supported.
Returns
-------
M : 2d ndarray
Matrix product of A and B.
"""
if len(A.shape) != 2:
raise ValueError('expected A to be a rectangular matrix')
if len(B.shape) != 2:
raise ValueError('expected B to be a rectangular matrix')
f = None
if structure == UPPER_TRIANGULAR:
if (not isspmatrix(A) and not isspmatrix(B)
and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)):
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
if f is not None:
if alpha is None:
alpha = 1.
out = f(alpha, A, B)
else:
if alpha is None:
out = A.dot(B)
else:
out = alpha * A.dot(B)
return out
class MatrixPowerOperator(LinearOperator):
def __init__(self, A, p, structure=None):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0:
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self._structure = structure
self.dtype = A.dtype
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x)
return x
def _rmatvec(self, x):
A_T = self._A.T
x = x.ravel()
for i in range(self._p):
x = A_T.dot(x)
return x
def _matmat(self, X):
for i in range(self._p):
X = _smart_matrix_product(self._A, X, structure=self._structure)
return X
@property
def T(self):
return MatrixPowerOperator(self._A.T, self._p)
class ProductOperator(LinearOperator):
"""
For now, this is limited to products of multiple square matrices.
"""
def __init__(self, *args, **kwargs):
self._structure = kwargs.get('structure', None)
for A in args:
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError(
'For now, the ProductOperator implementation is '
'limited to the product of multiple square matrices.')
if args:
n = args[0].shape[0]
for A in args:
for d in A.shape:
if d != n:
raise ValueError(
'The square matrices of the ProductOperator '
'must all have the same shape.')
self.shape = (n, n)
self.ndim = len(self.shape)
self.dtype = np.find_common_type([x.dtype for x in args], [])
self._operator_sequence = args
def _matvec(self, x):
for A in reversed(self._operator_sequence):
x = A.dot(x)
return x
def _rmatvec(self, x):
x = x.ravel()
for A in self._operator_sequence:
x = A.T.dot(x)
return x
def _matmat(self, X):
for A in reversed(self._operator_sequence):
X = _smart_matrix_product(A, X, structure=self._structure)
return X
@property
def T(self):
T_args = [A.T for A in reversed(self._operator_sequence)]
return ProductOperator(*T_args)
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
MatrixPowerOperator(A, p, structure=structure))
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
class _ExpmPadeHelper(object):
"""
Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A, structure=None, use_exact_onenorm=False):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
structure : str, optional
A string describing the structure of matrix `A`.
Only `upper_triangular` is currently supported.
use_exact_onenorm : bool, optional
If True then only the exact one-norm of matrix powers and products
will be used. Otherwise, the one-norm of powers and products
may initially be estimated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
self.structure = structure
self.use_exact_onenorm = use_exact_onenorm
@property
def A2(self):
if self._A2 is None:
self._A2 = _smart_matrix_product(
self.A, self.A, structure=self.structure)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = _smart_matrix_product(
self.A2, self.A2, structure=self.structure)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = _smart_matrix_product(
self.A4, self.A2, structure=self.structure)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = _smart_matrix_product(
self.A6, self.A2, structure=self.structure)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = _smart_matrix_product(
self.A4, self.A6, structure=self.structure)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
if self.use_exact_onenorm:
return self.d4_tight
if self._d4_exact is not None:
return self._d4_exact
else:
if self._d4_approx is None:
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
structure=self.structure)**(1/4.)
return self._d4_approx
@property
def d6_loose(self):
if self.use_exact_onenorm:
return self.d6_tight
if self._d6_exact is not None:
return self._d6_exact
else:
if self._d6_approx is None:
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
structure=self.structure)**(1/6.)
return self._d6_approx
@property
def d8_loose(self):
if self.use_exact_onenorm:
return self.d8_tight
if self._d8_exact is not None:
return self._d8_exact
else:
if self._d8_approx is None:
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
structure=self.structure)**(1/8.)
return self._d8_approx
@property
def d10_loose(self):
if self.use_exact_onenorm:
return self.d10_tight
if self._d10_exact is not None:
return self._d10_exact
else:
if self._d10_approx is None:
self._d10_approx = _onenormest_product((self.A4, self.A6),
structure=self.structure)**(1/10.)
return self._d10_approx
def pade3(self):
b = (120., 60., 12., 1.)
U = _smart_matrix_product(self.A,
b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = _smart_matrix_product(self.A,
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7(self):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
U = _smart_matrix_product(self.A,
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
structure=self.structure)
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade9(self):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
U = _smart_matrix_product(self.A,
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
b[3]*self.A2 + b[1]*self.ident),
structure=self.structure)
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
b[2]*self.A2 + b[0]*self.ident)
return U, V
def pade13_scaled(self, s):
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U2 = _smart_matrix_product(B6,
b[13]*B6 + b[11]*B4 + b[9]*B2,
structure=self.structure)
U = _smart_matrix_product(B,
(U2 + b[7]*B6 + b[5]*B4 +
b[3]*B2 + b[1]*self.ident),
structure=self.structure)
V2 = _smart_matrix_product(B6,
b[12]*B6 + b[10]*B4 + b[8]*B2,
structure=self.structure)
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import expm
>>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
>>> A.todense()
matrix([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]], dtype=int64)
>>> Aexp = expm(A)
>>> Aexp
<3x3 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Column format>
>>> Aexp.todense()
matrix([[ 2.71828183, 0. , 0. ],
[ 0. , 7.3890561 , 0. ],
[ 0. , 0. , 20.08553692]])
"""
return _expm(A, use_exact_onenorm='auto')
def _expm(A, use_exact_onenorm):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if isinstance(A, (list, tuple, np.matrix)):
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# gracefully handle size-0 input,
# carefully handling sparse scenario
if A.shape == (0, 0):
out = np.zeros([0, 0], dtype=A.dtype)
if isspmatrix(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return out
# Trivial case
if A.shape == (1, 1):
out = [[np.exp(A[0, 0])]]
# Avoid indiscriminate casting to ndarray to
# allow for sparse or other strange arrays
if isspmatrix(A) or is_pydata_spmatrix(A):
return A.__class__(out)
return np.array(out)
# Ensure input is of float type, to avoid integer overflows etc.
if ((isinstance(A, np.ndarray) or isspmatrix(A) or is_pydata_spmatrix(A))
and not np.issubdtype(A.dtype, np.inexact)):
A = A.astype(float)
# Detect upper triangularity.
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
if use_exact_onenorm == "auto":
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
use_exact_onenorm = A.shape[0] < 200
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V, structure=structure)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V, structure=structure)
# Try Pade orders 7 and 9.
eta_3 = max(h.d6_tight, h.d8_loose)
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
U, V = h.pade7()
return _solve_P_Q(U, V, structure=structure)
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
U, V = h.pade9()
return _solve_P_Q(U, V, structure=structure)
# Use Pade order 13.
eta_4 = max(h.d8_loose, h.d10_loose)
eta_5 = min(eta_3, eta_4)
theta_13 = 4.25
# Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
if eta_5 == 0:
# Nilpotent special case
s = 0
else:
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
s = s + _ell(2**-s * h.A, 13)
U, V = h.pade13_scaled(s)
X = _solve_P_Q(U, V, structure=structure)
if structure == UPPER_TRIANGULAR:
# Invoke Code Fragment 2.1.
X = _fragment_2_1(X, h.A, s)
else:
# X = r_13(A)^(2^s) by repeated squaring.
for i in range(s):
X = X.dot(X)
return X
def _solve_P_Q(U, V, structure=None):
"""
A helper function for expm_2009.
Parameters
----------
U : ndarray
Pade numerator.
V : ndarray
Pade denominator.
structure : str, optional
A string describing the structure of both matrices `U` and `V`.
Only `upper_triangular` is currently supported.
Notes
-----
The `structure` argument is inspired by similar args
for theano and cvxopt functions.
"""
P = U + V
Q = -U + V
if isspmatrix(U) or is_pydata_spmatrix(U):
return spsolve(Q, P)
elif structure is None:
return solve(Q, P)
elif structure == UPPER_TRIANGULAR:
return solve_triangular(Q, P)
else:
raise ValueError('unsupported matrix structure: ' + str(structure))
def _exp_sinch(a, x):
"""
Stably evaluate exp(a)*sinh(x)/x
Notes
-----
The strategy of falling back to a sixth order Taylor expansion
was suggested by the Spallation Neutron Source docs
which was found on the internet by google search.
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
The details of the cutoff point and the Horner-like evaluation
was picked without reference to anything in particular.
Note that sinch is not currently implemented in scipy.special,
whereas the "engineer's" definition of sinc is implemented.
The implementation of sinc involves a scaling factor of pi
that distinguishes it from the "mathematician's" version of sinc.
"""
# If x is small then use sixth order Taylor expansion.
# How small is small? I am using the point where the relative error
# of the approximation is less than 1e-14.
# If x is large then directly evaluate sinh(x) / x.
if abs(x) < 0.0135:
x2 = x*x
return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.))))
else:
return (np.exp(a + x) - np.exp(a - x)) / (2*x)
def _eq_10_42(lam_1, lam_2, t_12):
"""
Equation (10.42) of Functions of Matrices: Theory and Computation.
Notes
-----
This is a helper function for _fragment_2_1 of expm_2009.
Equation (10.42) is on page 251 in the section on Schur algorithms.
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
expm([[lam_1, t_12], [0, lam_1])
=
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
[0, exp(lam_2)]
"""
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
# apparently suffers from cancellation, according to Higham's textbook.
# A nice implementation of sinch, defined as sinh(x)/x,
# will apparently work around the cancellation.
a = 0.5 * (lam_1 + lam_2)
b = 0.5 * (lam_1 - lam_2)
return t_12 * _exp_sinch(a, b)
def _fragment_2_1(X, T, s):
"""
A helper function for expm_2009.
Notes
-----
The argument X is modified in-place, but this modification is not the same
as the returned value of the function.
This function also takes pains to do things in ways that are compatible
with sparse matrices, for example by avoiding fancy indexing
and by using methods of the matrices whenever possible instead of
using functions of the numpy or scipy libraries themselves.
"""
# Form X = r_m(2^-s T)
# Replace diag(X) by exp(2^-s diag(T)).
n = X.shape[0]
diag_T = np.ravel(T.diagonal().copy())
# Replace diag(X) by exp(2^-s diag(T)).
scale = 2 ** -s
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
for i in range(s-1, -1, -1):
X = X.dot(X)
# Replace diag(X) by exp(2^-i diag(T)).
scale = 2 ** -i
exp_diag = np.exp(scale * diag_T)
for k in range(n):
X[k, k] = exp_diag[k]
# Replace (first) superdiagonal of X by explicit formula
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
# the author's 2008 textbook
# Functions of Matrices: Theory and Computation.
for k in range(n-1):
lam_1 = scale * diag_T[k]
lam_2 = scale * diag_T[k+1]
t_12 = scale * T[k, k+1]
value = _eq_10_42(lam_1, lam_2, t_12)
X[k, k+1] = value
# Return the updated X matrix.
return X
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2m_m = scipy.special.comb(2*m, m, exact=True)
abs_c_recip = float(choose_2m_m * math.factorial(2*m + 1))
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2**-53
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
log2_alpha_div_u = np.log2(alpha/u)
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
return max(value, 0)

View file

@ -0,0 +1,18 @@
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linalg',parent_package,top_path)
config.add_subpackage(('isolve'))
config.add_subpackage(('dsolve'))
config.add_subpackage(('eigen'))
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())

View file

@ -0,0 +1,251 @@
"""Test functions for the sparse.linalg._expm_multiply module
"""
import numpy as np
from numpy.testing import (assert_allclose, assert_, assert_equal,
suppress_warnings)
from scipy.sparse import SparseEfficiencyWarning
import scipy.linalg
from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max,
_onenormest_matrix_power, expm_multiply, _expm_multiply_simple,
_expm_multiply_interval)
def less_than_or_close(a, b):
return np.allclose(a, b) or (a < b)
class TestExpmActionSimple(object):
"""
These tests do not consider the case of multiple time steps in one call.
"""
def test_theta_monotonicity(self):
pairs = sorted(_theta.items())
for (m_a, theta_a), (m_b, theta_b) in zip(pairs[:-1], pairs[1:]):
assert_(theta_a < theta_b)
def test_p_max_default(self):
m_max = 55
expected_p_max = 8
observed_p_max = _compute_p_max(m_max)
assert_equal(observed_p_max, expected_p_max)
def test_p_max_range(self):
for m_max in range(1, 55+1):
p_max = _compute_p_max(m_max)
assert_(p_max*(p_max - 1) <= m_max + 1)
p_too_big = p_max + 1
assert_(p_too_big*(p_too_big - 1) > m_max + 1)
def test_onenormest_matrix_power(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
for p in range(4):
if not p:
M = np.identity(n)
else:
M = np.dot(M, A)
estimated = _onenormest_matrix_power(A, p)
exact = np.linalg.norm(M, 1)
assert_(less_than_or_close(estimated, exact))
assert_(less_than_or_close(exact, 3*estimated))
def test_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
expected = np.dot(scipy.linalg.expm(A), B)
assert_allclose(observed, expected)
def test_matrix_vector_multiply(self):
np.random.seed(1234)
n = 40
nsamples = 10
for i in range(nsamples):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
observed = expm_multiply(A, v)
expected = np.dot(scipy.linalg.expm(A), v)
assert_allclose(observed, expected)
def test_scaled_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
for t in (0.2, 1.0, 1.5):
with np.errstate(invalid='ignore'):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = np.dot(scipy.linalg.expm(t*A), B)
assert_allclose(observed, expected)
def test_scaled_expm_multiply_single_timepoint(self):
np.random.seed(1234)
t = 0.1
n = 5
k = 2
A = np.random.randn(n, n)
B = np.random.randn(n, k)
observed = _expm_multiply_simple(A, B, t=t)
expected = scipy.linalg.expm(t*A).dot(B)
assert_allclose(observed, expected)
def test_sparse_expm_multiply(self):
np.random.seed(1234)
n = 40
k = 3
nsamples = 10
for i in range(nsamples):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
observed = expm_multiply(A, B)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu requires CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
expected = scipy.linalg.expm(A).dot(B)
assert_allclose(observed, expected)
def test_complex(self):
A = np.array([
[1j, 1j],
[0, 1j]], dtype=complex)
B = np.array([1j, 1j])
observed = expm_multiply(A, B)
expected = np.array([
1j * np.exp(1j) + 1j * (1j*np.cos(1) - np.sin(1)),
1j * np.exp(1j)], dtype=complex)
assert_allclose(observed, expected)
class TestExpmActionInterval(object):
def test_sparse_expm_multiply_interval(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
n = 40
k = 3
endpoint = True
for num in (14, 13, 2):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
v = np.random.randn(n)
for target in (B, v):
X = expm_multiply(A, target,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning,
"splu requires CSC matrix format")
sup.filter(SparseEfficiencyWarning,
"spsolve is more efficient when sparse b is in the CSC matrix format")
for solution, t in zip(X, samples):
assert_allclose(solution,
scipy.linalg.expm(t*A).dot(target))
def test_expm_multiply_interval_vector(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
endpoint = True
for num in (14, 13, 2):
for n in (1, 2, 5, 20, 40):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
X = expm_multiply(A, v,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(v))
def test_expm_multiply_interval_matrix(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
endpoint = True
for num in (14, 13, 2):
for n in (1, 2, 5, 20, 40):
for k in (1, 2):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
X = expm_multiply(A, B,
start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
def test_sparse_expm_multiply_interval_dtypes(self):
# Test A & B int
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A complex, B int
A = scipy.sparse.diags(-1j*np.arange(5),format='csr', dtype=complex)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(-1j*np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
# Test A int, B complex
A = scipy.sparse.diags(np.arange(5),format='csr', dtype=int)
B = np.full(5, 1j, dtype=complex)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)),format='csr')
assert_allclose(expm_multiply(A,B,0,1)[-1], Aexpm.dot(B))
def test_expm_multiply_interval_status_0(self):
self._help_test_specific_expm_interval_status(0)
def test_expm_multiply_interval_status_1(self):
self._help_test_specific_expm_interval_status(1)
def test_expm_multiply_interval_status_2(self):
self._help_test_specific_expm_interval_status(2)
def _help_test_specific_expm_interval_status(self, target_status):
np.random.seed(1234)
start = 0.1
stop = 3.2
num = 13
endpoint = True
n = 5
k = 2
nrepeats = 10
nsuccesses = 0
for num in [14, 13, 2] * nrepeats:
A = np.random.randn(n, n)
B = np.random.randn(n, k)
status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=True)
if status == target_status:
X, status = _expm_multiply_interval(A, B,
start=start, stop=stop, num=num, endpoint=endpoint,
status_only=False)
assert_equal(X.shape, (num, n, k))
samples = np.linspace(start=start, stop=stop,
num=num, endpoint=endpoint)
for solution, t in zip(X, samples):
assert_allclose(solution, scipy.linalg.expm(t*A).dot(B))
nsuccesses += 1
if not nsuccesses:
msg = 'failed to find a status-' + str(target_status) + ' interval'
raise Exception(msg)

Some files were not shown because too many files have changed in this diff Show more