Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
|
@ -0,0 +1,15 @@
|
|||
"Iterative Solvers for Sparse Linear Systems"
|
||||
|
||||
#from info import __doc__
|
||||
from .iterative import *
|
||||
from .minres import minres
|
||||
from .lgmres import lgmres
|
||||
from .lsqr import lsqr
|
||||
from .lsmr import lsmr
|
||||
from ._gcrotmk import gcrotmk
|
||||
|
||||
__all__ = [s for s in dir() if not s.startswith('_')]
|
||||
|
||||
from scipy._lib._testutils import PytestTester
|
||||
test = PytestTester(__name__)
|
||||
del PytestTester
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
487
venv/Lib/site-packages/scipy/sparse/linalg/isolve/_gcrotmk.py
Normal file
487
venv/Lib/site-packages/scipy/sparse/linalg/isolve/_gcrotmk.py
Normal file
|
@ -0,0 +1,487 @@
|
|||
# Copyright (C) 2015, Pauli Virtanen <pav@iki.fi>
|
||||
# Distributed under the same license as SciPy.
|
||||
|
||||
import warnings
|
||||
import numpy as np
|
||||
from numpy.linalg import LinAlgError
|
||||
from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq)
|
||||
from scipy.sparse.linalg.isolve.utils import make_system
|
||||
|
||||
|
||||
__all__ = ['gcrotmk']
|
||||
|
||||
|
||||
def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(),
|
||||
prepend_outer_v=False):
|
||||
"""
|
||||
FGMRES Arnoldi process, with optional projection or augmentation
|
||||
|
||||
Parameters
|
||||
----------
|
||||
matvec : callable
|
||||
Operation A*x
|
||||
v0 : ndarray
|
||||
Initial vector, normalized to nrm2(v0) == 1
|
||||
m : int
|
||||
Number of GMRES rounds
|
||||
atol : float
|
||||
Absolute tolerance for early exit
|
||||
lpsolve : callable
|
||||
Left preconditioner L
|
||||
rpsolve : callable
|
||||
Right preconditioner R
|
||||
CU : list of (ndarray, ndarray)
|
||||
Columns of matrices C and U in GCROT
|
||||
outer_v : list of ndarrays
|
||||
Augmentation vectors in LGMRES
|
||||
prepend_outer_v : bool, optional
|
||||
Whether augmentation vectors come before or after
|
||||
Krylov iterates
|
||||
|
||||
Raises
|
||||
------
|
||||
LinAlgError
|
||||
If nans encountered
|
||||
|
||||
Returns
|
||||
-------
|
||||
Q, R : ndarray
|
||||
QR decomposition of the upper Hessenberg H=QR
|
||||
B : ndarray
|
||||
Projections corresponding to matrix C
|
||||
vs : list of ndarray
|
||||
Columns of matrix V
|
||||
zs : list of ndarray
|
||||
Columns of matrix Z
|
||||
y : ndarray
|
||||
Solution to ||H y - e_1||_2 = min!
|
||||
res : float
|
||||
The final (preconditioned) residual norm
|
||||
|
||||
"""
|
||||
|
||||
if lpsolve is None:
|
||||
lpsolve = lambda x: x
|
||||
if rpsolve is None:
|
||||
rpsolve = lambda x: x
|
||||
|
||||
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,))
|
||||
|
||||
vs = [v0]
|
||||
zs = []
|
||||
y = None
|
||||
res = np.nan
|
||||
|
||||
m = m + len(outer_v)
|
||||
|
||||
# Orthogonal projection coefficients
|
||||
B = np.zeros((len(cs), m), dtype=v0.dtype)
|
||||
|
||||
# H is stored in QR factorized form
|
||||
Q = np.ones((1, 1), dtype=v0.dtype)
|
||||
R = np.zeros((1, 0), dtype=v0.dtype)
|
||||
|
||||
eps = np.finfo(v0.dtype).eps
|
||||
|
||||
breakdown = False
|
||||
|
||||
# FGMRES Arnoldi process
|
||||
for j in range(m):
|
||||
# L A Z = C B + V H
|
||||
|
||||
if prepend_outer_v and j < len(outer_v):
|
||||
z, w = outer_v[j]
|
||||
elif prepend_outer_v and j == len(outer_v):
|
||||
z = rpsolve(v0)
|
||||
w = None
|
||||
elif not prepend_outer_v and j >= m - len(outer_v):
|
||||
z, w = outer_v[j - (m - len(outer_v))]
|
||||
else:
|
||||
z = rpsolve(vs[-1])
|
||||
w = None
|
||||
|
||||
if w is None:
|
||||
w = lpsolve(matvec(z))
|
||||
else:
|
||||
# w is clobbered below
|
||||
w = w.copy()
|
||||
|
||||
w_norm = nrm2(w)
|
||||
|
||||
# GCROT projection: L A -> (1 - C C^H) L A
|
||||
# i.e. orthogonalize against C
|
||||
for i, c in enumerate(cs):
|
||||
alpha = dot(c, w)
|
||||
B[i,j] = alpha
|
||||
w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c
|
||||
|
||||
# Orthogonalize against V
|
||||
hcur = np.zeros(j+2, dtype=Q.dtype)
|
||||
for i, v in enumerate(vs):
|
||||
alpha = dot(v, w)
|
||||
hcur[i] = alpha
|
||||
w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v
|
||||
hcur[i+1] = nrm2(w)
|
||||
|
||||
with np.errstate(over='ignore', divide='ignore'):
|
||||
# Careful with denormals
|
||||
alpha = 1/hcur[-1]
|
||||
|
||||
if np.isfinite(alpha):
|
||||
w = scal(alpha, w)
|
||||
|
||||
if not (hcur[-1] > eps * w_norm):
|
||||
# w essentially in the span of previous vectors,
|
||||
# or we have nans. Bail out after updating the QR
|
||||
# solution.
|
||||
breakdown = True
|
||||
|
||||
vs.append(w)
|
||||
zs.append(z)
|
||||
|
||||
# Arnoldi LSQ problem
|
||||
|
||||
# Add new column to H=Q*R, padding other columns with zeros
|
||||
Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F')
|
||||
Q2[:j+1,:j+1] = Q
|
||||
Q2[j+1,j+1] = 1
|
||||
|
||||
R2 = np.zeros((j+2, j), dtype=R.dtype, order='F')
|
||||
R2[:j+1,:] = R
|
||||
|
||||
Q, R = qr_insert(Q2, R2, hcur, j, which='col',
|
||||
overwrite_qru=True, check_finite=False)
|
||||
|
||||
# Transformed least squares problem
|
||||
# || Q R y - inner_res_0 * e_1 ||_2 = min!
|
||||
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
|
||||
|
||||
# Residual is immediately known
|
||||
res = abs(Q[0,-1])
|
||||
|
||||
# Check for termination
|
||||
if res < atol or breakdown:
|
||||
break
|
||||
|
||||
if not np.isfinite(R[j,j]):
|
||||
# nans encountered, bail out
|
||||
raise LinAlgError()
|
||||
|
||||
# -- Get the LSQ problem solution
|
||||
|
||||
# The problem is triangular, but the condition number may be
|
||||
# bad (or in case of breakdown the last diagonal entry may be
|
||||
# zero), so use lstsq instead of trtrs.
|
||||
y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj())
|
||||
|
||||
B = B[:,:j+1]
|
||||
|
||||
return Q, R, B, vs, zs, y, res
|
||||
|
||||
|
||||
def gcrotmk(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
|
||||
m=20, k=None, CU=None, discard_C=False, truncate='oldest',
|
||||
atol=None):
|
||||
"""
|
||||
Solve a matrix equation using flexible GCROT(m,k) algorithm.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : {sparse matrix, dense matrix, LinearOperator}
|
||||
The real or complex N-by-N matrix of the linear system.
|
||||
Alternatively, ``A`` can be a linear operator which can
|
||||
produce ``Ax`` using, e.g.,
|
||||
``scipy.sparse.linalg.LinearOperator``.
|
||||
b : {array, matrix}
|
||||
Right hand side of the linear system. Has shape (N,) or (N,1).
|
||||
x0 : {array, matrix}
|
||||
Starting guess for the solution.
|
||||
tol, atol : float, optional
|
||||
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
|
||||
The default for ``atol`` is `tol`.
|
||||
|
||||
.. warning::
|
||||
|
||||
The default value for `atol` will be changed in a future release.
|
||||
For future compatibility, specify `atol` explicitly.
|
||||
maxiter : int, optional
|
||||
Maximum number of iterations. Iteration will stop after maxiter
|
||||
steps even if the specified tolerance has not been achieved.
|
||||
M : {sparse matrix, dense matrix, LinearOperator}, optional
|
||||
Preconditioner for A. The preconditioner should approximate the
|
||||
inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner
|
||||
can vary from iteration to iteration. Effective preconditioning
|
||||
dramatically improves the rate of convergence, which implies that
|
||||
fewer iterations are needed to reach a given error tolerance.
|
||||
callback : function, optional
|
||||
User-supplied function to call after each iteration. It is called
|
||||
as callback(xk), where xk is the current solution vector.
|
||||
m : int, optional
|
||||
Number of inner FGMRES iterations per each outer iteration.
|
||||
Default: 20
|
||||
k : int, optional
|
||||
Number of vectors to carry between inner FGMRES iterations.
|
||||
According to [2]_, good values are around m.
|
||||
Default: m
|
||||
CU : list of tuples, optional
|
||||
List of tuples ``(c, u)`` which contain the columns of the matrices
|
||||
C and U in the GCROT(m,k) algorithm. For details, see [2]_.
|
||||
The list given and vectors contained in it are modified in-place.
|
||||
If not given, start from empty matrices. The ``c`` elements in the
|
||||
tuples can be ``None``, in which case the vectors are recomputed
|
||||
via ``c = A u`` on start and orthogonalized as described in [3]_.
|
||||
discard_C : bool, optional
|
||||
Discard the C-vectors at the end. Useful if recycling Krylov subspaces
|
||||
for different linear systems.
|
||||
truncate : {'oldest', 'smallest'}, optional
|
||||
Truncation scheme to use. Drop: oldest vectors, or vectors with
|
||||
smallest singular values using the scheme discussed in [1,2].
|
||||
See [2]_ for detailed comparison.
|
||||
Default: 'oldest'
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : array or matrix
|
||||
The solution found.
|
||||
info : int
|
||||
Provides convergence information:
|
||||
|
||||
* 0 : successful exit
|
||||
* >0 : convergence to tolerance not achieved, number of iterations
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
|
||||
methods'', SIAM J. Numer. Anal. 36, 864 (1999).
|
||||
.. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
|
||||
of GCROT for solving nonsymmetric linear systems'',
|
||||
SIAM J. Sci. Comput. 32, 172 (2010).
|
||||
.. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
|
||||
''Recycling Krylov subspaces for sequences of linear systems'',
|
||||
SIAM J. Sci. Comput. 28, 1651 (2006).
|
||||
|
||||
"""
|
||||
A,M,x,b,postprocess = make_system(A,M,x0,b)
|
||||
|
||||
if not np.isfinite(b).all():
|
||||
raise ValueError("RHS must contain only finite numbers")
|
||||
|
||||
if truncate not in ('oldest', 'smallest'):
|
||||
raise ValueError("Invalid value for 'truncate': %r" % (truncate,))
|
||||
|
||||
if atol is None:
|
||||
warnings.warn("scipy.sparse.linalg.gcrotmk called without specifying `atol`. "
|
||||
"The default value will change in the future. To preserve "
|
||||
"current behavior, set ``atol=tol``.",
|
||||
category=DeprecationWarning, stacklevel=2)
|
||||
atol = tol
|
||||
|
||||
matvec = A.matvec
|
||||
psolve = M.matvec
|
||||
|
||||
if CU is None:
|
||||
CU = []
|
||||
|
||||
if k is None:
|
||||
k = m
|
||||
|
||||
axpy, dot, scal = None, None, None
|
||||
|
||||
r = b - matvec(x)
|
||||
|
||||
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r))
|
||||
|
||||
b_norm = nrm2(b)
|
||||
|
||||
if discard_C:
|
||||
CU[:] = [(None, u) for c, u in CU]
|
||||
|
||||
# Reorthogonalize old vectors
|
||||
if CU:
|
||||
# Sort already existing vectors to the front
|
||||
CU.sort(key=lambda cu: cu[0] is not None)
|
||||
|
||||
# Fill-in missing ones
|
||||
C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
|
||||
us = []
|
||||
j = 0
|
||||
while CU:
|
||||
# More memory-efficient: throw away old vectors as we go
|
||||
c, u = CU.pop(0)
|
||||
if c is None:
|
||||
c = matvec(u)
|
||||
C[:,j] = c
|
||||
j += 1
|
||||
us.append(u)
|
||||
|
||||
# Orthogonalize
|
||||
Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
|
||||
del C
|
||||
|
||||
# C := Q
|
||||
cs = list(Q.T)
|
||||
|
||||
# U := U P R^-1, back-substitution
|
||||
new_us = []
|
||||
for j in range(len(cs)):
|
||||
u = us[P[j]]
|
||||
for i in range(j):
|
||||
u = axpy(us[P[i]], u, u.shape[0], -R[i,j])
|
||||
if abs(R[j,j]) < 1e-12 * abs(R[0,0]):
|
||||
# discard rest of the vectors
|
||||
break
|
||||
u = scal(1.0/R[j,j], u)
|
||||
new_us.append(u)
|
||||
|
||||
# Form the new CU lists
|
||||
CU[:] = list(zip(cs, new_us))[::-1]
|
||||
|
||||
if CU:
|
||||
axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,))
|
||||
|
||||
# Solve first the projection operation with respect to the CU
|
||||
# vectors. This corresponds to modifying the initial guess to
|
||||
# be
|
||||
#
|
||||
# x' = x + U y
|
||||
# y = argmin_y || b - A (x + U y) ||^2
|
||||
#
|
||||
# The solution is y = C^H (b - A x)
|
||||
for c, u in CU:
|
||||
yc = dot(c, r)
|
||||
x = axpy(u, x, x.shape[0], yc)
|
||||
r = axpy(c, r, r.shape[0], -yc)
|
||||
|
||||
# GCROT main iteration
|
||||
for j_outer in range(maxiter):
|
||||
# -- callback
|
||||
if callback is not None:
|
||||
callback(x)
|
||||
|
||||
beta = nrm2(r)
|
||||
|
||||
# -- check stopping condition
|
||||
beta_tol = max(atol, tol * b_norm)
|
||||
|
||||
if beta <= beta_tol and (j_outer > 0 or CU):
|
||||
# recompute residual to avoid rounding error
|
||||
r = b - matvec(x)
|
||||
beta = nrm2(r)
|
||||
|
||||
if beta <= beta_tol:
|
||||
j_outer = -1
|
||||
break
|
||||
|
||||
ml = m + max(k - len(CU), 0)
|
||||
|
||||
cs = [c for c, u in CU]
|
||||
|
||||
try:
|
||||
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
|
||||
r/beta,
|
||||
ml,
|
||||
rpsolve=psolve,
|
||||
atol=max(atol, tol*b_norm)/beta,
|
||||
cs=cs)
|
||||
y *= beta
|
||||
except LinAlgError:
|
||||
# Floating point over/underflow, non-finite result from
|
||||
# matmul etc. -- report failure.
|
||||
break
|
||||
|
||||
#
|
||||
# At this point,
|
||||
#
|
||||
# [A U, A Z] = [C, V] G; G = [ I B ]
|
||||
# [ 0 H ]
|
||||
#
|
||||
# where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
|
||||
#
|
||||
# || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
|
||||
#
|
||||
# from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
|
||||
#
|
||||
|
||||
#
|
||||
# GCROT(m,k) update
|
||||
#
|
||||
|
||||
# Define new outer vectors
|
||||
|
||||
# ux := (Z - U B) y
|
||||
ux = zs[0]*y[0]
|
||||
for z, yc in zip(zs[1:], y[1:]):
|
||||
ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc
|
||||
by = B.dot(y)
|
||||
for cu, byc in zip(CU, by):
|
||||
c, u = cu
|
||||
ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc
|
||||
|
||||
# cx := V H y
|
||||
hy = Q.dot(R.dot(y))
|
||||
cx = vs[0] * hy[0]
|
||||
for v, hyc in zip(vs[1:], hy[1:]):
|
||||
cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc
|
||||
|
||||
# Normalize cx, maintaining cx = A ux
|
||||
# This new cx is orthogonal to the previous C, by construction
|
||||
try:
|
||||
alpha = 1/nrm2(cx)
|
||||
if not np.isfinite(alpha):
|
||||
raise FloatingPointError()
|
||||
except (FloatingPointError, ZeroDivisionError):
|
||||
# Cannot update, so skip it
|
||||
continue
|
||||
|
||||
cx = scal(alpha, cx)
|
||||
ux = scal(alpha, ux)
|
||||
|
||||
# Update residual and solution
|
||||
gamma = dot(cx, r)
|
||||
r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx
|
||||
x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux
|
||||
|
||||
# Truncate CU
|
||||
if truncate == 'oldest':
|
||||
while len(CU) >= k and CU:
|
||||
del CU[0]
|
||||
elif truncate == 'smallest':
|
||||
if len(CU) >= k and CU:
|
||||
# cf. [1,2]
|
||||
D = solve(R[:-1,:].T, B.T).T
|
||||
W, sigma, V = svd(D)
|
||||
|
||||
# C := C W[:,:k-1], U := U W[:,:k-1]
|
||||
new_CU = []
|
||||
for j, w in enumerate(W[:,:k-1].T):
|
||||
c, u = CU[0]
|
||||
c = c * w[0]
|
||||
u = u * w[0]
|
||||
for cup, wp in zip(CU[1:], w[1:]):
|
||||
cp, up = cup
|
||||
c = axpy(cp, c, c.shape[0], wp)
|
||||
u = axpy(up, u, u.shape[0], wp)
|
||||
|
||||
# Reorthogonalize at the same time; not necessary
|
||||
# in exact arithmetic, but floating point error
|
||||
# tends to accumulate here
|
||||
for cp, up in new_CU:
|
||||
alpha = dot(cp, c)
|
||||
c = axpy(cp, c, c.shape[0], -alpha)
|
||||
u = axpy(up, u, u.shape[0], -alpha)
|
||||
alpha = nrm2(c)
|
||||
c = scal(1.0/alpha, c)
|
||||
u = scal(1.0/alpha, u)
|
||||
|
||||
new_CU.append((c, u))
|
||||
CU[:] = new_CU
|
||||
|
||||
# Add new vector to CU
|
||||
CU.append((cx, ux))
|
||||
|
||||
# Include the solution vector to the span
|
||||
CU.append((None, x.copy()))
|
||||
if discard_C:
|
||||
CU[:] = [(None, uz) for cz, uz in CU]
|
||||
|
||||
return postprocess(x), j_outer + 1
|
Binary file not shown.
816
venv/Lib/site-packages/scipy/sparse/linalg/isolve/iterative.py
Normal file
816
venv/Lib/site-packages/scipy/sparse/linalg/isolve/iterative.py
Normal file
|
@ -0,0 +1,816 @@
|
|||
"""Iterative methods for solving linear systems"""
|
||||
|
||||
__all__ = ['bicg','bicgstab','cg','cgs','gmres','qmr']
|
||||
|
||||
import warnings
|
||||
import numpy as np
|
||||
|
||||
from . import _iterative
|
||||
|
||||
from scipy.sparse.linalg.interface import LinearOperator
|
||||
from .utils import make_system
|
||||
from scipy._lib._util import _aligned_zeros
|
||||
from scipy._lib._threadsafety import non_reentrant
|
||||
|
||||
_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'}
|
||||
|
||||
|
||||
# Part of the docstring common to all iterative solvers
|
||||
common_doc1 = \
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
A : {sparse matrix, dense matrix, LinearOperator}"""
|
||||
|
||||
common_doc2 = \
|
||||
"""b : {array, matrix}
|
||||
Right hand side of the linear system. Has shape (N,) or (N,1).
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : {array, matrix}
|
||||
The converged solution.
|
||||
info : integer
|
||||
Provides convergence information:
|
||||
0 : successful exit
|
||||
>0 : convergence to tolerance not achieved, number of iterations
|
||||
<0 : illegal input or breakdown
|
||||
|
||||
Other Parameters
|
||||
----------------
|
||||
x0 : {array, matrix}
|
||||
Starting guess for the solution.
|
||||
tol, atol : float, optional
|
||||
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
|
||||
The default for ``atol`` is ``'legacy'``, which emulates
|
||||
a different legacy behavior.
|
||||
|
||||
.. warning::
|
||||
|
||||
The default value for `atol` will be changed in a future release.
|
||||
For future compatibility, specify `atol` explicitly.
|
||||
maxiter : integer
|
||||
Maximum number of iterations. Iteration will stop after maxiter
|
||||
steps even if the specified tolerance has not been achieved.
|
||||
M : {sparse matrix, dense matrix, LinearOperator}
|
||||
Preconditioner for A. The preconditioner should approximate the
|
||||
inverse of A. Effective preconditioning dramatically improves the
|
||||
rate of convergence, which implies that fewer iterations are needed
|
||||
to reach a given error tolerance.
|
||||
callback : function
|
||||
User-supplied function to call after each iteration. It is called
|
||||
as callback(xk), where xk is the current solution vector.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
def _stoptest(residual, atol):
|
||||
"""
|
||||
Successful termination condition for the solvers.
|
||||
"""
|
||||
resid = np.linalg.norm(residual)
|
||||
if resid <= atol:
|
||||
return resid, 1
|
||||
else:
|
||||
return resid, 0
|
||||
|
||||
|
||||
def _get_atol(tol, atol, bnrm2, get_residual, routine_name):
|
||||
"""
|
||||
Parse arguments for absolute tolerance in termination condition.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
tol, atol : object
|
||||
The arguments passed into the solver routine by user.
|
||||
bnrm2 : float
|
||||
2-norm of the rhs vector.
|
||||
get_residual : callable
|
||||
Callable ``get_residual()`` that returns the initial value of
|
||||
the residual.
|
||||
routine_name : str
|
||||
Name of the routine.
|
||||
"""
|
||||
|
||||
if atol is None:
|
||||
warnings.warn("scipy.sparse.linalg.{name} called without specifying `atol`. "
|
||||
"The default value will be changed in a future release. "
|
||||
"For compatibility, specify a value for `atol` explicitly, e.g., "
|
||||
"``{name}(..., atol=0)``, or to retain the old behavior "
|
||||
"``{name}(..., atol='legacy')``".format(name=routine_name),
|
||||
category=DeprecationWarning, stacklevel=4)
|
||||
atol = 'legacy'
|
||||
|
||||
tol = float(tol)
|
||||
|
||||
if atol == 'legacy':
|
||||
# emulate old legacy behavior
|
||||
resid = get_residual()
|
||||
if resid <= tol:
|
||||
return 'exit'
|
||||
if bnrm2 == 0:
|
||||
return tol
|
||||
else:
|
||||
return tol * float(bnrm2)
|
||||
else:
|
||||
return max(float(atol), tol * float(bnrm2))
|
||||
|
||||
|
||||
def set_docstring(header, Ainfo, footer='', atol_default='0'):
|
||||
def combine(fn):
|
||||
fn.__doc__ = '\n'.join((header, common_doc1,
|
||||
' ' + Ainfo.replace('\n', '\n '),
|
||||
common_doc2, footer))
|
||||
return fn
|
||||
return combine
|
||||
|
||||
|
||||
@set_docstring('Use BIConjugate Gradient iteration to solve ``Ax = b``.',
|
||||
'The real or complex N-by-N matrix of the linear system.\n'
|
||||
'Alternatively, ``A`` can be a linear operator which can\n'
|
||||
'produce ``Ax`` and ``A^T x`` using, e.g.,\n'
|
||||
'``scipy.sparse.linalg.LinearOperator``.',
|
||||
footer="""
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csc_matrix
|
||||
>>> from scipy.sparse.linalg import bicg
|
||||
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
|
||||
>>> b = np.array([2, 4, -1], dtype=float)
|
||||
>>> x, exitCode = bicg(A, b)
|
||||
>>> print(exitCode) # 0 indicates successful convergence
|
||||
0
|
||||
>>> np.allclose(A.dot(x), b)
|
||||
True
|
||||
|
||||
"""
|
||||
)
|
||||
@non_reentrant()
|
||||
def bicg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
|
||||
A,M,x,b,postprocess = make_system(A, M, x0, b)
|
||||
|
||||
n = len(b)
|
||||
if maxiter is None:
|
||||
maxiter = n*10
|
||||
|
||||
matvec, rmatvec = A.matvec, A.rmatvec
|
||||
psolve, rpsolve = M.matvec, M.rmatvec
|
||||
ltr = _type_conv[x.dtype.char]
|
||||
revcom = getattr(_iterative, ltr + 'bicgrevcom')
|
||||
|
||||
get_residual = lambda: np.linalg.norm(matvec(x) - b)
|
||||
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicg')
|
||||
if atol == 'exit':
|
||||
return postprocess(x), 0
|
||||
|
||||
resid = atol
|
||||
ndx1 = 1
|
||||
ndx2 = -1
|
||||
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
||||
work = _aligned_zeros(6*n,dtype=x.dtype)
|
||||
ijob = 1
|
||||
info = 0
|
||||
ftflag = True
|
||||
iter_ = maxiter
|
||||
while True:
|
||||
olditer = iter_
|
||||
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
|
||||
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
|
||||
if callback is not None and iter_ > olditer:
|
||||
callback(x)
|
||||
slice1 = slice(ndx1-1, ndx1-1+n)
|
||||
slice2 = slice(ndx2-1, ndx2-1+n)
|
||||
if (ijob == -1):
|
||||
if callback is not None:
|
||||
callback(x)
|
||||
break
|
||||
elif (ijob == 1):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(work[slice1])
|
||||
elif (ijob == 2):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*rmatvec(work[slice1])
|
||||
elif (ijob == 3):
|
||||
work[slice1] = psolve(work[slice2])
|
||||
elif (ijob == 4):
|
||||
work[slice1] = rpsolve(work[slice2])
|
||||
elif (ijob == 5):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(x)
|
||||
elif (ijob == 6):
|
||||
if ftflag:
|
||||
info = -1
|
||||
ftflag = False
|
||||
resid, info = _stoptest(work[slice1], atol)
|
||||
ijob = 2
|
||||
|
||||
if info > 0 and iter_ == maxiter and not (resid <= atol):
|
||||
# info isn't set appropriately otherwise
|
||||
info = iter_
|
||||
|
||||
return postprocess(x), info
|
||||
|
||||
|
||||
@set_docstring('Use BIConjugate Gradient STABilized iteration to solve '
|
||||
'``Ax = b``.',
|
||||
'The real or complex N-by-N matrix of the linear system.\n'
|
||||
'Alternatively, ``A`` can be a linear operator which can\n'
|
||||
'produce ``Ax`` using, e.g.,\n'
|
||||
'``scipy.sparse.linalg.LinearOperator``.')
|
||||
@non_reentrant()
|
||||
def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
|
||||
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
||||
|
||||
n = len(b)
|
||||
if maxiter is None:
|
||||
maxiter = n*10
|
||||
|
||||
matvec = A.matvec
|
||||
psolve = M.matvec
|
||||
ltr = _type_conv[x.dtype.char]
|
||||
revcom = getattr(_iterative, ltr + 'bicgstabrevcom')
|
||||
|
||||
get_residual = lambda: np.linalg.norm(matvec(x) - b)
|
||||
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicgstab')
|
||||
if atol == 'exit':
|
||||
return postprocess(x), 0
|
||||
|
||||
resid = atol
|
||||
ndx1 = 1
|
||||
ndx2 = -1
|
||||
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
||||
work = _aligned_zeros(7*n,dtype=x.dtype)
|
||||
ijob = 1
|
||||
info = 0
|
||||
ftflag = True
|
||||
iter_ = maxiter
|
||||
while True:
|
||||
olditer = iter_
|
||||
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
|
||||
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
|
||||
if callback is not None and iter_ > olditer:
|
||||
callback(x)
|
||||
slice1 = slice(ndx1-1, ndx1-1+n)
|
||||
slice2 = slice(ndx2-1, ndx2-1+n)
|
||||
if (ijob == -1):
|
||||
if callback is not None:
|
||||
callback(x)
|
||||
break
|
||||
elif (ijob == 1):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(work[slice1])
|
||||
elif (ijob == 2):
|
||||
work[slice1] = psolve(work[slice2])
|
||||
elif (ijob == 3):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(x)
|
||||
elif (ijob == 4):
|
||||
if ftflag:
|
||||
info = -1
|
||||
ftflag = False
|
||||
resid, info = _stoptest(work[slice1], atol)
|
||||
ijob = 2
|
||||
|
||||
if info > 0 and iter_ == maxiter and not (resid <= atol):
|
||||
# info isn't set appropriately otherwise
|
||||
info = iter_
|
||||
|
||||
return postprocess(x), info
|
||||
|
||||
|
||||
@set_docstring('Use Conjugate Gradient iteration to solve ``Ax = b``.',
|
||||
'The real or complex N-by-N matrix of the linear system.\n'
|
||||
'``A`` must represent a hermitian, positive definite matrix.\n'
|
||||
'Alternatively, ``A`` can be a linear operator which can\n'
|
||||
'produce ``Ax`` using, e.g.,\n'
|
||||
'``scipy.sparse.linalg.LinearOperator``.')
|
||||
@non_reentrant()
|
||||
def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
|
||||
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
||||
|
||||
n = len(b)
|
||||
if maxiter is None:
|
||||
maxiter = n*10
|
||||
|
||||
matvec = A.matvec
|
||||
psolve = M.matvec
|
||||
ltr = _type_conv[x.dtype.char]
|
||||
revcom = getattr(_iterative, ltr + 'cgrevcom')
|
||||
|
||||
get_residual = lambda: np.linalg.norm(matvec(x) - b)
|
||||
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cg')
|
||||
if atol == 'exit':
|
||||
return postprocess(x), 0
|
||||
|
||||
resid = atol
|
||||
ndx1 = 1
|
||||
ndx2 = -1
|
||||
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
||||
work = _aligned_zeros(4*n,dtype=x.dtype)
|
||||
ijob = 1
|
||||
info = 0
|
||||
ftflag = True
|
||||
iter_ = maxiter
|
||||
while True:
|
||||
olditer = iter_
|
||||
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
|
||||
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
|
||||
if callback is not None and iter_ > olditer:
|
||||
callback(x)
|
||||
slice1 = slice(ndx1-1, ndx1-1+n)
|
||||
slice2 = slice(ndx2-1, ndx2-1+n)
|
||||
if (ijob == -1):
|
||||
if callback is not None:
|
||||
callback(x)
|
||||
break
|
||||
elif (ijob == 1):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(work[slice1])
|
||||
elif (ijob == 2):
|
||||
work[slice1] = psolve(work[slice2])
|
||||
elif (ijob == 3):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(x)
|
||||
elif (ijob == 4):
|
||||
if ftflag:
|
||||
info = -1
|
||||
ftflag = False
|
||||
resid, info = _stoptest(work[slice1], atol)
|
||||
if info == 1 and iter_ > 1:
|
||||
# recompute residual and recheck, to avoid
|
||||
# accumulating rounding error
|
||||
work[slice1] = b - matvec(x)
|
||||
resid, info = _stoptest(work[slice1], atol)
|
||||
ijob = 2
|
||||
|
||||
if info > 0 and iter_ == maxiter and not (resid <= atol):
|
||||
# info isn't set appropriately otherwise
|
||||
info = iter_
|
||||
|
||||
return postprocess(x), info
|
||||
|
||||
|
||||
@set_docstring('Use Conjugate Gradient Squared iteration to solve ``Ax = b``.',
|
||||
'The real-valued N-by-N matrix of the linear system.\n'
|
||||
'Alternatively, ``A`` can be a linear operator which can\n'
|
||||
'produce ``Ax`` using, e.g.,\n'
|
||||
'``scipy.sparse.linalg.LinearOperator``.')
|
||||
@non_reentrant()
|
||||
def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
|
||||
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
||||
|
||||
n = len(b)
|
||||
if maxiter is None:
|
||||
maxiter = n*10
|
||||
|
||||
matvec = A.matvec
|
||||
psolve = M.matvec
|
||||
ltr = _type_conv[x.dtype.char]
|
||||
revcom = getattr(_iterative, ltr + 'cgsrevcom')
|
||||
|
||||
get_residual = lambda: np.linalg.norm(matvec(x) - b)
|
||||
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cgs')
|
||||
if atol == 'exit':
|
||||
return postprocess(x), 0
|
||||
|
||||
resid = atol
|
||||
ndx1 = 1
|
||||
ndx2 = -1
|
||||
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
||||
work = _aligned_zeros(7*n,dtype=x.dtype)
|
||||
ijob = 1
|
||||
info = 0
|
||||
ftflag = True
|
||||
iter_ = maxiter
|
||||
while True:
|
||||
olditer = iter_
|
||||
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
|
||||
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
|
||||
if callback is not None and iter_ > olditer:
|
||||
callback(x)
|
||||
slice1 = slice(ndx1-1, ndx1-1+n)
|
||||
slice2 = slice(ndx2-1, ndx2-1+n)
|
||||
if (ijob == -1):
|
||||
if callback is not None:
|
||||
callback(x)
|
||||
break
|
||||
elif (ijob == 1):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(work[slice1])
|
||||
elif (ijob == 2):
|
||||
work[slice1] = psolve(work[slice2])
|
||||
elif (ijob == 3):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(x)
|
||||
elif (ijob == 4):
|
||||
if ftflag:
|
||||
info = -1
|
||||
ftflag = False
|
||||
resid, info = _stoptest(work[slice1], atol)
|
||||
if info == 1 and iter_ > 1:
|
||||
# recompute residual and recheck, to avoid
|
||||
# accumulating rounding error
|
||||
work[slice1] = b - matvec(x)
|
||||
resid, info = _stoptest(work[slice1], atol)
|
||||
ijob = 2
|
||||
|
||||
if info == -10:
|
||||
# termination due to breakdown: check for convergence
|
||||
resid, ok = _stoptest(b - matvec(x), atol)
|
||||
if ok:
|
||||
info = 0
|
||||
|
||||
if info > 0 and iter_ == maxiter and not (resid <= atol):
|
||||
# info isn't set appropriately otherwise
|
||||
info = iter_
|
||||
|
||||
return postprocess(x), info
|
||||
|
||||
|
||||
@non_reentrant()
|
||||
def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, callback=None,
|
||||
restrt=None, atol=None, callback_type=None):
|
||||
"""
|
||||
Use Generalized Minimal RESidual iteration to solve ``Ax = b``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : {sparse matrix, dense matrix, LinearOperator}
|
||||
The real or complex N-by-N matrix of the linear system.
|
||||
Alternatively, ``A`` can be a linear operator which can
|
||||
produce ``Ax`` using, e.g.,
|
||||
``scipy.sparse.linalg.LinearOperator``.
|
||||
b : {array, matrix}
|
||||
Right hand side of the linear system. Has shape (N,) or (N,1).
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : {array, matrix}
|
||||
The converged solution.
|
||||
info : int
|
||||
Provides convergence information:
|
||||
* 0 : successful exit
|
||||
* >0 : convergence to tolerance not achieved, number of iterations
|
||||
* <0 : illegal input or breakdown
|
||||
|
||||
Other parameters
|
||||
----------------
|
||||
x0 : {array, matrix}
|
||||
Starting guess for the solution (a vector of zeros by default).
|
||||
tol, atol : float, optional
|
||||
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
|
||||
The default for ``atol`` is ``'legacy'``, which emulates
|
||||
a different legacy behavior.
|
||||
|
||||
.. warning::
|
||||
|
||||
The default value for `atol` will be changed in a future release.
|
||||
For future compatibility, specify `atol` explicitly.
|
||||
restart : int, optional
|
||||
Number of iterations between restarts. Larger values increase
|
||||
iteration cost, but may be necessary for convergence.
|
||||
Default is 20.
|
||||
maxiter : int, optional
|
||||
Maximum number of iterations (restart cycles). Iteration will stop
|
||||
after maxiter steps even if the specified tolerance has not been
|
||||
achieved.
|
||||
M : {sparse matrix, dense matrix, LinearOperator}
|
||||
Inverse of the preconditioner of A. M should approximate the
|
||||
inverse of A and be easy to solve for (see Notes). Effective
|
||||
preconditioning dramatically improves the rate of convergence,
|
||||
which implies that fewer iterations are needed to reach a given
|
||||
error tolerance. By default, no preconditioner is used.
|
||||
callback : function
|
||||
User-supplied function to call after each iteration. It is called
|
||||
as `callback(args)`, where `args` are selected by `callback_type`.
|
||||
callback_type : {'x', 'pr_norm', 'legacy'}, optional
|
||||
Callback function argument requested:
|
||||
- ``x``: current iterate (ndarray), called on every restart
|
||||
- ``pr_norm``: relative (preconditioned) residual norm (float),
|
||||
called on every inner iteration
|
||||
- ``legacy`` (default): same as ``pr_norm``, but also changes the
|
||||
meaning of 'maxiter' to count inner iterations instead of restart
|
||||
cycles.
|
||||
restrt : int, optional
|
||||
DEPRECATED - use `restart` instead.
|
||||
|
||||
See Also
|
||||
--------
|
||||
LinearOperator
|
||||
|
||||
Notes
|
||||
-----
|
||||
A preconditioner, P, is chosen such that P is close to A but easy to solve
|
||||
for. The preconditioner parameter required by this routine is
|
||||
``M = P^-1``. The inverse should preferably not be calculated
|
||||
explicitly. Rather, use the following template to produce M::
|
||||
|
||||
# Construct a linear operator that computes P^-1 * x.
|
||||
import scipy.sparse.linalg as spla
|
||||
M_x = lambda x: spla.spsolve(P, x)
|
||||
M = spla.LinearOperator((n, n), M_x)
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csc_matrix
|
||||
>>> from scipy.sparse.linalg import gmres
|
||||
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
|
||||
>>> b = np.array([2, 4, -1], dtype=float)
|
||||
>>> x, exitCode = gmres(A, b)
|
||||
>>> print(exitCode) # 0 indicates successful convergence
|
||||
0
|
||||
>>> np.allclose(A.dot(x), b)
|
||||
True
|
||||
"""
|
||||
|
||||
# Change 'restrt' keyword to 'restart'
|
||||
if restrt is None:
|
||||
restrt = restart
|
||||
elif restart is not None:
|
||||
raise ValueError("Cannot specify both restart and restrt keywords. "
|
||||
"Preferably use 'restart' only.")
|
||||
|
||||
if callback is not None and callback_type is None:
|
||||
# Warn about 'callback_type' semantic changes.
|
||||
# Probably should be removed only in far future, Scipy 2.0 or so.
|
||||
warnings.warn("scipy.sparse.linalg.gmres called without specifying `callback_type`. "
|
||||
"The default value will be changed in a future release. "
|
||||
"For compatibility, specify a value for `callback_type` explicitly, e.g., "
|
||||
"``{name}(..., callback_type='pr_norm')``, or to retain the old behavior "
|
||||
"``{name}(..., callback_type='legacy')``",
|
||||
category=DeprecationWarning, stacklevel=3)
|
||||
|
||||
if callback_type is None:
|
||||
callback_type = 'legacy'
|
||||
|
||||
if callback_type not in ('x', 'pr_norm', 'legacy'):
|
||||
raise ValueError("Unknown callback_type: {!r}".format(callback_type))
|
||||
|
||||
if callback is None:
|
||||
callback_type = 'none'
|
||||
|
||||
A, M, x, b,postprocess = make_system(A, M, x0, b)
|
||||
|
||||
n = len(b)
|
||||
if maxiter is None:
|
||||
maxiter = n*10
|
||||
|
||||
if restrt is None:
|
||||
restrt = 20
|
||||
restrt = min(restrt, n)
|
||||
|
||||
matvec = A.matvec
|
||||
psolve = M.matvec
|
||||
ltr = _type_conv[x.dtype.char]
|
||||
revcom = getattr(_iterative, ltr + 'gmresrevcom')
|
||||
|
||||
bnrm2 = np.linalg.norm(b)
|
||||
Mb_nrm2 = np.linalg.norm(psolve(b))
|
||||
get_residual = lambda: np.linalg.norm(matvec(x) - b)
|
||||
atol = _get_atol(tol, atol, bnrm2, get_residual, 'gmres')
|
||||
if atol == 'exit':
|
||||
return postprocess(x), 0
|
||||
|
||||
if bnrm2 == 0:
|
||||
return postprocess(b), 0
|
||||
|
||||
# Tolerance passed to GMRESREVCOM applies to the inner iteration
|
||||
# and deals with the left-preconditioned residual.
|
||||
ptol_max_factor = 1.0
|
||||
ptol = Mb_nrm2 * min(ptol_max_factor, atol / bnrm2)
|
||||
resid = np.nan
|
||||
presid = np.nan
|
||||
ndx1 = 1
|
||||
ndx2 = -1
|
||||
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
||||
work = _aligned_zeros((6+restrt)*n,dtype=x.dtype)
|
||||
work2 = _aligned_zeros((restrt+1)*(2*restrt+2),dtype=x.dtype)
|
||||
ijob = 1
|
||||
info = 0
|
||||
ftflag = True
|
||||
iter_ = maxiter
|
||||
old_ijob = ijob
|
||||
first_pass = True
|
||||
resid_ready = False
|
||||
iter_num = 1
|
||||
while True:
|
||||
olditer = iter_
|
||||
x, iter_, presid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
|
||||
revcom(b, x, restrt, work, work2, iter_, presid, info, ndx1, ndx2, ijob, ptol)
|
||||
if callback_type == 'x' and iter_ != olditer:
|
||||
callback(x)
|
||||
slice1 = slice(ndx1-1, ndx1-1+n)
|
||||
slice2 = slice(ndx2-1, ndx2-1+n)
|
||||
if (ijob == -1): # gmres success, update last residual
|
||||
if callback_type in ('pr_norm', 'legacy'):
|
||||
if resid_ready:
|
||||
callback(presid / bnrm2)
|
||||
elif callback_type == 'x':
|
||||
callback(x)
|
||||
break
|
||||
elif (ijob == 1):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(x)
|
||||
elif (ijob == 2):
|
||||
work[slice1] = psolve(work[slice2])
|
||||
if not first_pass and old_ijob == 3:
|
||||
resid_ready = True
|
||||
|
||||
first_pass = False
|
||||
elif (ijob == 3):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*matvec(work[slice1])
|
||||
if resid_ready:
|
||||
if callback_type in ('pr_norm', 'legacy'):
|
||||
callback(presid / bnrm2)
|
||||
resid_ready = False
|
||||
iter_num = iter_num+1
|
||||
|
||||
elif (ijob == 4):
|
||||
if ftflag:
|
||||
info = -1
|
||||
ftflag = False
|
||||
resid, info = _stoptest(work[slice1], atol)
|
||||
|
||||
# Inner loop tolerance control
|
||||
if info or presid > ptol:
|
||||
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
|
||||
else:
|
||||
# Inner loop tolerance OK, but outer loop not.
|
||||
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
|
||||
|
||||
if resid != 0:
|
||||
ptol = presid * min(ptol_max_factor, atol / resid)
|
||||
else:
|
||||
ptol = presid * ptol_max_factor
|
||||
|
||||
old_ijob = ijob
|
||||
ijob = 2
|
||||
|
||||
if callback_type == 'legacy':
|
||||
# Legacy behavior
|
||||
if iter_num > maxiter:
|
||||
info = maxiter
|
||||
break
|
||||
|
||||
if info >= 0 and not (resid <= atol):
|
||||
# info isn't set appropriately otherwise
|
||||
info = maxiter
|
||||
|
||||
return postprocess(x), info
|
||||
|
||||
|
||||
@non_reentrant()
|
||||
def qmr(A, b, x0=None, tol=1e-5, maxiter=None, M1=None, M2=None, callback=None,
|
||||
atol=None):
|
||||
"""Use Quasi-Minimal Residual iteration to solve ``Ax = b``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : {sparse matrix, dense matrix, LinearOperator}
|
||||
The real-valued N-by-N matrix of the linear system.
|
||||
Alternatively, ``A`` can be a linear operator which can
|
||||
produce ``Ax`` and ``A^T x`` using, e.g.,
|
||||
``scipy.sparse.linalg.LinearOperator``.
|
||||
b : {array, matrix}
|
||||
Right hand side of the linear system. Has shape (N,) or (N,1).
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : {array, matrix}
|
||||
The converged solution.
|
||||
info : integer
|
||||
Provides convergence information:
|
||||
0 : successful exit
|
||||
>0 : convergence to tolerance not achieved, number of iterations
|
||||
<0 : illegal input or breakdown
|
||||
|
||||
Other Parameters
|
||||
----------------
|
||||
x0 : {array, matrix}
|
||||
Starting guess for the solution.
|
||||
tol, atol : float, optional
|
||||
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
|
||||
The default for ``atol`` is ``'legacy'``, which emulates
|
||||
a different legacy behavior.
|
||||
|
||||
.. warning::
|
||||
|
||||
The default value for `atol` will be changed in a future release.
|
||||
For future compatibility, specify `atol` explicitly.
|
||||
maxiter : integer
|
||||
Maximum number of iterations. Iteration will stop after maxiter
|
||||
steps even if the specified tolerance has not been achieved.
|
||||
M1 : {sparse matrix, dense matrix, LinearOperator}
|
||||
Left preconditioner for A.
|
||||
M2 : {sparse matrix, dense matrix, LinearOperator}
|
||||
Right preconditioner for A. Used together with the left
|
||||
preconditioner M1. The matrix M1*A*M2 should have better
|
||||
conditioned than A alone.
|
||||
callback : function
|
||||
User-supplied function to call after each iteration. It is called
|
||||
as callback(xk), where xk is the current solution vector.
|
||||
|
||||
See Also
|
||||
--------
|
||||
LinearOperator
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csc_matrix
|
||||
>>> from scipy.sparse.linalg import qmr
|
||||
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
|
||||
>>> b = np.array([2, 4, -1], dtype=float)
|
||||
>>> x, exitCode = qmr(A, b)
|
||||
>>> print(exitCode) # 0 indicates successful convergence
|
||||
0
|
||||
>>> np.allclose(A.dot(x), b)
|
||||
True
|
||||
"""
|
||||
A_ = A
|
||||
A, M, x, b, postprocess = make_system(A, None, x0, b)
|
||||
|
||||
if M1 is None and M2 is None:
|
||||
if hasattr(A_,'psolve'):
|
||||
def left_psolve(b):
|
||||
return A_.psolve(b,'left')
|
||||
|
||||
def right_psolve(b):
|
||||
return A_.psolve(b,'right')
|
||||
|
||||
def left_rpsolve(b):
|
||||
return A_.rpsolve(b,'left')
|
||||
|
||||
def right_rpsolve(b):
|
||||
return A_.rpsolve(b,'right')
|
||||
M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
|
||||
M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
|
||||
else:
|
||||
def id(b):
|
||||
return b
|
||||
M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
|
||||
M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
|
||||
|
||||
n = len(b)
|
||||
if maxiter is None:
|
||||
maxiter = n*10
|
||||
|
||||
ltr = _type_conv[x.dtype.char]
|
||||
revcom = getattr(_iterative, ltr + 'qmrrevcom')
|
||||
|
||||
get_residual = lambda: np.linalg.norm(A.matvec(x) - b)
|
||||
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr')
|
||||
if atol == 'exit':
|
||||
return postprocess(x), 0
|
||||
|
||||
resid = atol
|
||||
ndx1 = 1
|
||||
ndx2 = -1
|
||||
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
||||
work = _aligned_zeros(11*n,x.dtype)
|
||||
ijob = 1
|
||||
info = 0
|
||||
ftflag = True
|
||||
iter_ = maxiter
|
||||
while True:
|
||||
olditer = iter_
|
||||
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
|
||||
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
|
||||
if callback is not None and iter_ > olditer:
|
||||
callback(x)
|
||||
slice1 = slice(ndx1-1, ndx1-1+n)
|
||||
slice2 = slice(ndx2-1, ndx2-1+n)
|
||||
if (ijob == -1):
|
||||
if callback is not None:
|
||||
callback(x)
|
||||
break
|
||||
elif (ijob == 1):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*A.matvec(work[slice1])
|
||||
elif (ijob == 2):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*A.rmatvec(work[slice1])
|
||||
elif (ijob == 3):
|
||||
work[slice1] = M1.matvec(work[slice2])
|
||||
elif (ijob == 4):
|
||||
work[slice1] = M2.matvec(work[slice2])
|
||||
elif (ijob == 5):
|
||||
work[slice1] = M1.rmatvec(work[slice2])
|
||||
elif (ijob == 6):
|
||||
work[slice1] = M2.rmatvec(work[slice2])
|
||||
elif (ijob == 7):
|
||||
work[slice2] *= sclr2
|
||||
work[slice2] += sclr1*A.matvec(x)
|
||||
elif (ijob == 8):
|
||||
if ftflag:
|
||||
info = -1
|
||||
ftflag = False
|
||||
resid, info = _stoptest(work[slice1], atol)
|
||||
ijob = 2
|
||||
|
||||
if info > 0 and iter_ == maxiter and not (resid <= atol):
|
||||
# info isn't set appropriately otherwise
|
||||
info = iter_
|
||||
|
||||
return postprocess(x), info
|
232
venv/Lib/site-packages/scipy/sparse/linalg/isolve/lgmres.py
Normal file
232
venv/Lib/site-packages/scipy/sparse/linalg/isolve/lgmres.py
Normal file
|
@ -0,0 +1,232 @@
|
|||
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
|
||||
# Distributed under the same license as SciPy.
|
||||
|
||||
import warnings
|
||||
import numpy as np
|
||||
from numpy.linalg import LinAlgError
|
||||
from scipy.linalg import get_blas_funcs
|
||||
from .utils import make_system
|
||||
|
||||
from ._gcrotmk import _fgmres
|
||||
|
||||
__all__ = ['lgmres']
|
||||
|
||||
|
||||
def lgmres(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
|
||||
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True,
|
||||
prepend_outer_v=False, atol=None):
|
||||
"""
|
||||
Solve a matrix equation using the LGMRES algorithm.
|
||||
|
||||
The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
|
||||
in the convergence in restarted GMRES, and often converges in fewer
|
||||
iterations.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : {sparse matrix, dense matrix, LinearOperator}
|
||||
The real or complex N-by-N matrix of the linear system.
|
||||
Alternatively, ``A`` can be a linear operator which can
|
||||
produce ``Ax`` using, e.g.,
|
||||
``scipy.sparse.linalg.LinearOperator``.
|
||||
b : {array, matrix}
|
||||
Right hand side of the linear system. Has shape (N,) or (N,1).
|
||||
x0 : {array, matrix}
|
||||
Starting guess for the solution.
|
||||
tol, atol : float, optional
|
||||
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
|
||||
The default for ``atol`` is `tol`.
|
||||
|
||||
.. warning::
|
||||
|
||||
The default value for `atol` will be changed in a future release.
|
||||
For future compatibility, specify `atol` explicitly.
|
||||
maxiter : int, optional
|
||||
Maximum number of iterations. Iteration will stop after maxiter
|
||||
steps even if the specified tolerance has not been achieved.
|
||||
M : {sparse matrix, dense matrix, LinearOperator}, optional
|
||||
Preconditioner for A. The preconditioner should approximate the
|
||||
inverse of A. Effective preconditioning dramatically improves the
|
||||
rate of convergence, which implies that fewer iterations are needed
|
||||
to reach a given error tolerance.
|
||||
callback : function, optional
|
||||
User-supplied function to call after each iteration. It is called
|
||||
as callback(xk), where xk is the current solution vector.
|
||||
inner_m : int, optional
|
||||
Number of inner GMRES iterations per each outer iteration.
|
||||
outer_k : int, optional
|
||||
Number of vectors to carry between inner GMRES iterations.
|
||||
According to [1]_, good values are in the range of 1...3.
|
||||
However, note that if you want to use the additional vectors to
|
||||
accelerate solving multiple similar problems, larger values may
|
||||
be beneficial.
|
||||
outer_v : list of tuples, optional
|
||||
List containing tuples ``(v, Av)`` of vectors and corresponding
|
||||
matrix-vector products, used to augment the Krylov subspace, and
|
||||
carried between inner GMRES iterations. The element ``Av`` can
|
||||
be `None` if the matrix-vector product should be re-evaluated.
|
||||
This parameter is modified in-place by `lgmres`, and can be used
|
||||
to pass "guess" vectors in and out of the algorithm when solving
|
||||
similar problems.
|
||||
store_outer_Av : bool, optional
|
||||
Whether LGMRES should store also A*v in addition to vectors `v`
|
||||
in the `outer_v` list. Default is True.
|
||||
prepend_outer_v : bool, optional
|
||||
Whether to put outer_v augmentation vectors before Krylov iterates.
|
||||
In standard LGMRES, prepend_outer_v=False.
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : array or matrix
|
||||
The converged solution.
|
||||
info : int
|
||||
Provides convergence information:
|
||||
|
||||
- 0 : successful exit
|
||||
- >0 : convergence to tolerance not achieved, number of iterations
|
||||
- <0 : illegal input or breakdown
|
||||
|
||||
Notes
|
||||
-----
|
||||
The LGMRES algorithm [1]_ [2]_ is designed to avoid the
|
||||
slowing of convergence in restarted GMRES, due to alternating
|
||||
residual vectors. Typically, it often outperforms GMRES(m) of
|
||||
comparable memory requirements by some measure, or at least is not
|
||||
much worse.
|
||||
|
||||
Another advantage in this algorithm is that you can supply it with
|
||||
'guess' vectors in the `outer_v` argument that augment the Krylov
|
||||
subspace. If the solution lies close to the span of these vectors,
|
||||
the algorithm converges faster. This can be useful if several very
|
||||
similar matrices need to be inverted one after another, such as in
|
||||
Newton-Krylov iteration where the Jacobian matrix often changes
|
||||
little in the nonlinear steps.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] A.H. Baker and E.R. Jessup and T. Manteuffel, "A Technique for
|
||||
Accelerating the Convergence of Restarted GMRES", SIAM J. Matrix
|
||||
Anal. Appl. 26, 962 (2005).
|
||||
.. [2] A.H. Baker, "On Improving the Performance of the Linear Solver
|
||||
restarted GMRES", PhD thesis, University of Colorado (2003).
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csc_matrix
|
||||
>>> from scipy.sparse.linalg import lgmres
|
||||
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
|
||||
>>> b = np.array([2, 4, -1], dtype=float)
|
||||
>>> x, exitCode = lgmres(A, b)
|
||||
>>> print(exitCode) # 0 indicates successful convergence
|
||||
0
|
||||
>>> np.allclose(A.dot(x), b)
|
||||
True
|
||||
"""
|
||||
A,M,x,b,postprocess = make_system(A,M,x0,b)
|
||||
|
||||
if not np.isfinite(b).all():
|
||||
raise ValueError("RHS must contain only finite numbers")
|
||||
|
||||
if atol is None:
|
||||
warnings.warn("scipy.sparse.linalg.lgmres called without specifying `atol`. "
|
||||
"The default value will change in the future. To preserve "
|
||||
"current behavior, set ``atol=tol``.",
|
||||
category=DeprecationWarning, stacklevel=2)
|
||||
atol = tol
|
||||
|
||||
matvec = A.matvec
|
||||
psolve = M.matvec
|
||||
|
||||
if outer_v is None:
|
||||
outer_v = []
|
||||
|
||||
axpy, dot, scal = None, None, None
|
||||
nrm2 = get_blas_funcs('nrm2', [b])
|
||||
|
||||
b_norm = nrm2(b)
|
||||
ptol_max_factor = 1.0
|
||||
|
||||
for k_outer in range(maxiter):
|
||||
r_outer = matvec(x) - b
|
||||
|
||||
# -- callback
|
||||
if callback is not None:
|
||||
callback(x)
|
||||
|
||||
# -- determine input type routines
|
||||
if axpy is None:
|
||||
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
|
||||
x = x.astype(r_outer.dtype)
|
||||
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
|
||||
(x, r_outer))
|
||||
|
||||
# -- check stopping condition
|
||||
r_norm = nrm2(r_outer)
|
||||
if r_norm <= max(atol, tol * b_norm):
|
||||
break
|
||||
|
||||
# -- inner LGMRES iteration
|
||||
v0 = -psolve(r_outer)
|
||||
inner_res_0 = nrm2(v0)
|
||||
|
||||
if inner_res_0 == 0:
|
||||
rnorm = nrm2(r_outer)
|
||||
raise RuntimeError("Preconditioner returned a zero vector; "
|
||||
"|v| ~ %.1g, |M v| = 0" % rnorm)
|
||||
|
||||
v0 = scal(1.0/inner_res_0, v0)
|
||||
|
||||
ptol = min(ptol_max_factor, max(atol, tol*b_norm)/r_norm)
|
||||
|
||||
try:
|
||||
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
|
||||
v0,
|
||||
inner_m,
|
||||
lpsolve=psolve,
|
||||
atol=ptol,
|
||||
outer_v=outer_v,
|
||||
prepend_outer_v=prepend_outer_v)
|
||||
y *= inner_res_0
|
||||
if not np.isfinite(y).all():
|
||||
# Overflow etc. in computation. There's no way to
|
||||
# recover from this, so we have to bail out.
|
||||
raise LinAlgError()
|
||||
except LinAlgError:
|
||||
# Floating point over/underflow, non-finite result from
|
||||
# matmul etc. -- report failure.
|
||||
return postprocess(x), k_outer + 1
|
||||
|
||||
# Inner loop tolerance control
|
||||
if pres > ptol:
|
||||
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
|
||||
else:
|
||||
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
|
||||
|
||||
# -- GMRES terminated: eval solution
|
||||
dx = zs[0]*y[0]
|
||||
for w, yc in zip(zs[1:], y[1:]):
|
||||
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
|
||||
|
||||
# -- Store LGMRES augmentation vectors
|
||||
nx = nrm2(dx)
|
||||
if nx > 0:
|
||||
if store_outer_Av:
|
||||
q = Q.dot(R.dot(y))
|
||||
ax = vs[0]*q[0]
|
||||
for v, qc in zip(vs[1:], q[1:]):
|
||||
ax = axpy(v, ax, ax.shape[0], qc)
|
||||
outer_v.append((dx/nx, ax/nx))
|
||||
else:
|
||||
outer_v.append((dx/nx, None))
|
||||
|
||||
# -- Retain only a finite number of augmentation vectors
|
||||
while len(outer_v) > outer_k:
|
||||
del outer_v[0]
|
||||
|
||||
# -- Apply step
|
||||
x += dx
|
||||
else:
|
||||
# didn't converge ...
|
||||
return postprocess(x), maxiter
|
||||
|
||||
return postprocess(x), 0
|
480
venv/Lib/site-packages/scipy/sparse/linalg/isolve/lsmr.py
Normal file
480
venv/Lib/site-packages/scipy/sparse/linalg/isolve/lsmr.py
Normal file
|
@ -0,0 +1,480 @@
|
|||
"""
|
||||
Copyright (C) 2010 David Fong and Michael Saunders
|
||||
|
||||
LSMR uses an iterative method.
|
||||
|
||||
07 Jun 2010: Documentation updated
|
||||
03 Jun 2010: First release version in Python
|
||||
|
||||
David Chin-lung Fong clfong@stanford.edu
|
||||
Institute for Computational and Mathematical Engineering
|
||||
Stanford University
|
||||
|
||||
Michael Saunders saunders@stanford.edu
|
||||
Systems Optimization Laboratory
|
||||
Dept of MS&E, Stanford University.
|
||||
|
||||
"""
|
||||
|
||||
__all__ = ['lsmr']
|
||||
|
||||
from numpy import zeros, infty, atleast_1d, result_type
|
||||
from numpy.linalg import norm
|
||||
from math import sqrt
|
||||
from scipy.sparse.linalg.interface import aslinearoperator
|
||||
|
||||
from .lsqr import _sym_ortho
|
||||
|
||||
|
||||
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
|
||||
maxiter=None, show=False, x0=None):
|
||||
"""Iterative solver for least-squares problems.
|
||||
|
||||
lsmr solves the system of linear equations ``Ax = b``. If the system
|
||||
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
|
||||
A is a rectangular matrix of dimension m-by-n, where all cases are
|
||||
allowed: m = n, m > n, or m < n. B is a vector of length m.
|
||||
The matrix A may be dense or sparse (usually sparse).
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : {matrix, sparse matrix, ndarray, LinearOperator}
|
||||
Matrix A in the linear system.
|
||||
Alternatively, ``A`` can be a linear operator which can
|
||||
produce ``Ax`` and ``A^H x`` using, e.g.,
|
||||
``scipy.sparse.linalg.LinearOperator``.
|
||||
b : array_like, shape (m,)
|
||||
Vector b in the linear system.
|
||||
damp : float
|
||||
Damping factor for regularized least-squares. `lsmr` solves
|
||||
the regularized least-squares problem::
|
||||
|
||||
min ||(b) - ( A )x||
|
||||
||(0) (damp*I) ||_2
|
||||
|
||||
where damp is a scalar. If damp is None or 0, the system
|
||||
is solved without regularization.
|
||||
atol, btol : float, optional
|
||||
Stopping tolerances. `lsmr` continues iterations until a
|
||||
certain backward error estimate is smaller than some quantity
|
||||
depending on atol and btol. Let ``r = b - Ax`` be the
|
||||
residual vector for the current approximate solution ``x``.
|
||||
If ``Ax = b`` seems to be consistent, ``lsmr`` terminates
|
||||
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
|
||||
Otherwise, lsmr terminates when ``norm(A^H r) <=
|
||||
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (say),
|
||||
the final ``norm(r)`` should be accurate to about 6
|
||||
digits. (The final x will usually have fewer correct digits,
|
||||
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
|
||||
or `btol` is None, a default value of 1.0e-6 will be used.
|
||||
Ideally, they should be estimates of the relative error in the
|
||||
entries of A and B respectively. For example, if the entries
|
||||
of `A` have 7 correct digits, set atol = 1e-7. This prevents
|
||||
the algorithm from doing unnecessary work beyond the
|
||||
uncertainty of the input data.
|
||||
conlim : float, optional
|
||||
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
|
||||
`conlim`. For compatible systems ``Ax = b``, conlim could be
|
||||
as large as 1.0e+12 (say). For least-squares problems,
|
||||
`conlim` should be less than 1.0e+8. If `conlim` is None, the
|
||||
default value is 1e+8. Maximum precision can be obtained by
|
||||
setting ``atol = btol = conlim = 0``, but the number of
|
||||
iterations may then be excessive.
|
||||
maxiter : int, optional
|
||||
`lsmr` terminates if the number of iterations reaches
|
||||
`maxiter`. The default is ``maxiter = min(m, n)``. For
|
||||
ill-conditioned systems, a larger value of `maxiter` may be
|
||||
needed.
|
||||
show : bool, optional
|
||||
Print iterations logs if ``show=True``.
|
||||
x0 : array_like, shape (n,), optional
|
||||
Initial guess of x, if None zeros are used.
|
||||
|
||||
.. versionadded:: 1.0.0
|
||||
Returns
|
||||
-------
|
||||
x : ndarray of float
|
||||
Least-square solution returned.
|
||||
istop : int
|
||||
istop gives the reason for stopping::
|
||||
|
||||
istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a
|
||||
solution.
|
||||
= 1 means x is an approximate solution to A*x = B,
|
||||
according to atol and btol.
|
||||
= 2 means x approximately solves the least-squares problem
|
||||
according to atol.
|
||||
= 3 means COND(A) seems to be greater than CONLIM.
|
||||
= 4 is the same as 1 with atol = btol = eps (machine
|
||||
precision)
|
||||
= 5 is the same as 2 with atol = eps.
|
||||
= 6 is the same as 3 with CONLIM = 1/eps.
|
||||
= 7 means ITN reached maxiter before the other stopping
|
||||
conditions were satisfied.
|
||||
|
||||
itn : int
|
||||
Number of iterations used.
|
||||
normr : float
|
||||
``norm(b-Ax)``
|
||||
normar : float
|
||||
``norm(A^H (b - Ax))``
|
||||
norma : float
|
||||
``norm(A)``
|
||||
conda : float
|
||||
Condition number of A.
|
||||
normx : float
|
||||
``norm(x)``
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
.. versionadded:: 0.11.0
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] D. C.-L. Fong and M. A. Saunders,
|
||||
"LSMR: An iterative algorithm for sparse least-squares problems",
|
||||
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
|
||||
https://arxiv.org/abs/1006.0758
|
||||
.. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csc_matrix
|
||||
>>> from scipy.sparse.linalg import lsmr
|
||||
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
|
||||
|
||||
The first example has the trivial solution `[0, 0]`
|
||||
|
||||
>>> b = np.array([0., 0., 0.], dtype=float)
|
||||
>>> x, istop, itn, normr = lsmr(A, b)[:4]
|
||||
>>> istop
|
||||
0
|
||||
>>> x
|
||||
array([ 0., 0.])
|
||||
|
||||
The stopping code `istop=0` returned indicates that a vector of zeros was
|
||||
found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
|
||||
The next example has a non-trivial solution:
|
||||
|
||||
>>> b = np.array([1., 0., -1.], dtype=float)
|
||||
>>> x, istop, itn, normr = lsmr(A, b)[:4]
|
||||
>>> istop
|
||||
1
|
||||
>>> x
|
||||
array([ 1., -1.])
|
||||
>>> itn
|
||||
1
|
||||
>>> normr
|
||||
4.440892098500627e-16
|
||||
|
||||
As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance
|
||||
limits. The given solution `[1., -1.]` obviously solves the equation. The
|
||||
remaining return values include information about the number of iterations
|
||||
(`itn=1`) and the remaining difference of left and right side of the solved
|
||||
equation.
|
||||
The final example demonstrates the behavior in the case where there is no
|
||||
solution for the equation:
|
||||
|
||||
>>> b = np.array([1., 0.01, -1.], dtype=float)
|
||||
>>> x, istop, itn, normr = lsmr(A, b)[:4]
|
||||
>>> istop
|
||||
2
|
||||
>>> x
|
||||
array([ 1.00333333, -0.99666667])
|
||||
>>> A.dot(x)-b
|
||||
array([ 0.00333333, -0.00333333, 0.00333333])
|
||||
>>> normr
|
||||
0.005773502691896255
|
||||
|
||||
`istop` indicates that the system is inconsistent and thus `x` is rather an
|
||||
approximate solution to the corresponding least-squares problem. `normr`
|
||||
contains the minimal distance that was found.
|
||||
"""
|
||||
|
||||
A = aslinearoperator(A)
|
||||
b = atleast_1d(b)
|
||||
if b.ndim > 1:
|
||||
b = b.squeeze()
|
||||
|
||||
msg = ('The exact solution is x = 0, or x = x0, if x0 was given ',
|
||||
'Ax - b is small enough, given atol, btol ',
|
||||
'The least-squares solution is good enough, given atol ',
|
||||
'The estimate of cond(Abar) has exceeded conlim ',
|
||||
'Ax - b is small enough for this machine ',
|
||||
'The least-squares solution is good enough for this machine',
|
||||
'Cond(Abar) seems to be too large for this machine ',
|
||||
'The iteration limit has been reached ')
|
||||
|
||||
hdg1 = ' itn x(1) norm r norm Ar'
|
||||
hdg2 = ' compatible LS norm A cond A'
|
||||
pfreq = 20 # print frequency (for repeating the heading)
|
||||
pcount = 0 # print counter
|
||||
|
||||
m, n = A.shape
|
||||
|
||||
# stores the num of singular values
|
||||
minDim = min([m, n])
|
||||
|
||||
if maxiter is None:
|
||||
maxiter = minDim
|
||||
|
||||
if x0 is None:
|
||||
dtype = result_type(A, b, float)
|
||||
else:
|
||||
dtype = result_type(A, b, x0, float)
|
||||
|
||||
if show:
|
||||
print(' ')
|
||||
print('LSMR Least-squares solution of Ax = b\n')
|
||||
print(f'The matrix A has {m} rows and {n} columns')
|
||||
print('damp = %20.14e\n' % (damp))
|
||||
print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim))
|
||||
print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter))
|
||||
|
||||
u = b
|
||||
normb = norm(b)
|
||||
if x0 is None:
|
||||
x = zeros(n, dtype)
|
||||
beta = normb.copy()
|
||||
else:
|
||||
x = atleast_1d(x0)
|
||||
u = u - A.matvec(x)
|
||||
beta = norm(u)
|
||||
|
||||
if beta > 0:
|
||||
u = (1 / beta) * u
|
||||
v = A.rmatvec(u)
|
||||
alpha = norm(v)
|
||||
else:
|
||||
v = zeros(n, dtype)
|
||||
alpha = 0
|
||||
|
||||
if alpha > 0:
|
||||
v = (1 / alpha) * v
|
||||
|
||||
# Initialize variables for 1st iteration.
|
||||
|
||||
itn = 0
|
||||
zetabar = alpha * beta
|
||||
alphabar = alpha
|
||||
rho = 1
|
||||
rhobar = 1
|
||||
cbar = 1
|
||||
sbar = 0
|
||||
|
||||
h = v.copy()
|
||||
hbar = zeros(n, dtype)
|
||||
|
||||
# Initialize variables for estimation of ||r||.
|
||||
|
||||
betadd = beta
|
||||
betad = 0
|
||||
rhodold = 1
|
||||
tautildeold = 0
|
||||
thetatilde = 0
|
||||
zeta = 0
|
||||
d = 0
|
||||
|
||||
# Initialize variables for estimation of ||A|| and cond(A)
|
||||
|
||||
normA2 = alpha * alpha
|
||||
maxrbar = 0
|
||||
minrbar = 1e+100
|
||||
normA = sqrt(normA2)
|
||||
condA = 1
|
||||
normx = 0
|
||||
|
||||
# Items for use in stopping rules, normb set earlier
|
||||
istop = 0
|
||||
ctol = 0
|
||||
if conlim > 0:
|
||||
ctol = 1 / conlim
|
||||
normr = beta
|
||||
|
||||
# Reverse the order here from the original matlab code because
|
||||
# there was an error on return when arnorm==0
|
||||
normar = alpha * beta
|
||||
if normar == 0:
|
||||
if show:
|
||||
print(msg[0])
|
||||
return x, istop, itn, normr, normar, normA, condA, normx
|
||||
|
||||
if show:
|
||||
print(' ')
|
||||
print(hdg1, hdg2)
|
||||
test1 = 1
|
||||
test2 = alpha / beta
|
||||
str1 = '%6g %12.5e' % (itn, x[0])
|
||||
str2 = ' %10.3e %10.3e' % (normr, normar)
|
||||
str3 = ' %8.1e %8.1e' % (test1, test2)
|
||||
print(''.join([str1, str2, str3]))
|
||||
|
||||
# Main iteration loop.
|
||||
while itn < maxiter:
|
||||
itn = itn + 1
|
||||
|
||||
# Perform the next step of the bidiagonalization to obtain the
|
||||
# next beta, u, alpha, v. These satisfy the relations
|
||||
# beta*u = a*v - alpha*u,
|
||||
# alpha*v = A'*u - beta*v.
|
||||
|
||||
u *= -alpha
|
||||
u += A.matvec(v)
|
||||
beta = norm(u)
|
||||
|
||||
if beta > 0:
|
||||
u *= (1 / beta)
|
||||
v *= -beta
|
||||
v += A.rmatvec(u)
|
||||
alpha = norm(v)
|
||||
if alpha > 0:
|
||||
v *= (1 / alpha)
|
||||
|
||||
# At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
|
||||
|
||||
# Construct rotation Qhat_{k,2k+1}.
|
||||
|
||||
chat, shat, alphahat = _sym_ortho(alphabar, damp)
|
||||
|
||||
# Use a plane rotation (Q_i) to turn B_i to R_i
|
||||
|
||||
rhoold = rho
|
||||
c, s, rho = _sym_ortho(alphahat, beta)
|
||||
thetanew = s*alpha
|
||||
alphabar = c*alpha
|
||||
|
||||
# Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
|
||||
|
||||
rhobarold = rhobar
|
||||
zetaold = zeta
|
||||
thetabar = sbar * rho
|
||||
rhotemp = cbar * rho
|
||||
cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
|
||||
zeta = cbar * zetabar
|
||||
zetabar = - sbar * zetabar
|
||||
|
||||
# Update h, h_hat, x.
|
||||
|
||||
hbar *= - (thetabar * rho / (rhoold * rhobarold))
|
||||
hbar += h
|
||||
x += (zeta / (rho * rhobar)) * hbar
|
||||
h *= - (thetanew / rho)
|
||||
h += v
|
||||
|
||||
# Estimate of ||r||.
|
||||
|
||||
# Apply rotation Qhat_{k,2k+1}.
|
||||
betaacute = chat * betadd
|
||||
betacheck = -shat * betadd
|
||||
|
||||
# Apply rotation Q_{k,k+1}.
|
||||
betahat = c * betaacute
|
||||
betadd = -s * betaacute
|
||||
|
||||
# Apply rotation Qtilde_{k-1}.
|
||||
# betad = betad_{k-1} here.
|
||||
|
||||
thetatildeold = thetatilde
|
||||
ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
|
||||
thetatilde = stildeold * rhobar
|
||||
rhodold = ctildeold * rhobar
|
||||
betad = - stildeold * betad + ctildeold * betahat
|
||||
|
||||
# betad = betad_k here.
|
||||
# rhodold = rhod_k here.
|
||||
|
||||
tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
|
||||
taud = (zeta - thetatilde * tautildeold) / rhodold
|
||||
d = d + betacheck * betacheck
|
||||
normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
|
||||
|
||||
# Estimate ||A||.
|
||||
normA2 = normA2 + beta * beta
|
||||
normA = sqrt(normA2)
|
||||
normA2 = normA2 + alpha * alpha
|
||||
|
||||
# Estimate cond(A).
|
||||
maxrbar = max(maxrbar, rhobarold)
|
||||
if itn > 1:
|
||||
minrbar = min(minrbar, rhobarold)
|
||||
condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
|
||||
|
||||
# Test for convergence.
|
||||
|
||||
# Compute norms for convergence testing.
|
||||
normar = abs(zetabar)
|
||||
normx = norm(x)
|
||||
|
||||
# Now use these norms to estimate certain other quantities,
|
||||
# some of which will be small near a solution.
|
||||
|
||||
test1 = normr / normb
|
||||
if (normA * normr) != 0:
|
||||
test2 = normar / (normA * normr)
|
||||
else:
|
||||
test2 = infty
|
||||
test3 = 1 / condA
|
||||
t1 = test1 / (1 + normA * normx / normb)
|
||||
rtol = btol + atol * normA * normx / normb
|
||||
|
||||
# The following tests guard against extremely small values of
|
||||
# atol, btol or ctol. (The user may have set any or all of
|
||||
# the parameters atol, btol, conlim to 0.)
|
||||
# The effect is equivalent to the normAl tests using
|
||||
# atol = eps, btol = eps, conlim = 1/eps.
|
||||
|
||||
if itn >= maxiter:
|
||||
istop = 7
|
||||
if 1 + test3 <= 1:
|
||||
istop = 6
|
||||
if 1 + test2 <= 1:
|
||||
istop = 5
|
||||
if 1 + t1 <= 1:
|
||||
istop = 4
|
||||
|
||||
# Allow for tolerances set by the user.
|
||||
|
||||
if test3 <= ctol:
|
||||
istop = 3
|
||||
if test2 <= atol:
|
||||
istop = 2
|
||||
if test1 <= rtol:
|
||||
istop = 1
|
||||
|
||||
# See if it is time to print something.
|
||||
|
||||
if show:
|
||||
if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
|
||||
(itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
|
||||
(test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
|
||||
(istop != 0):
|
||||
|
||||
if pcount >= pfreq:
|
||||
pcount = 0
|
||||
print(' ')
|
||||
print(hdg1, hdg2)
|
||||
pcount = pcount + 1
|
||||
str1 = '%6g %12.5e' % (itn, x[0])
|
||||
str2 = ' %10.3e %10.3e' % (normr, normar)
|
||||
str3 = ' %8.1e %8.1e' % (test1, test2)
|
||||
str4 = ' %8.1e %8.1e' % (normA, condA)
|
||||
print(''.join([str1, str2, str3, str4]))
|
||||
|
||||
if istop > 0:
|
||||
break
|
||||
|
||||
# Print the stopping condition.
|
||||
|
||||
if show:
|
||||
print(' ')
|
||||
print('LSMR finished')
|
||||
print(msg[istop])
|
||||
print('istop =%8g normr =%8.1e' % (istop, normr))
|
||||
print(' normA =%8.1e normAr =%8.1e' % (normA, normar))
|
||||
print('itn =%8g condA =%8.1e' % (itn, condA))
|
||||
print(' normx =%8.1e' % (normx))
|
||||
print(str1, str2)
|
||||
print(str3, str4)
|
||||
|
||||
return x, istop, itn, normr, normar, normA, condA, normx
|
568
venv/Lib/site-packages/scipy/sparse/linalg/isolve/lsqr.py
Normal file
568
venv/Lib/site-packages/scipy/sparse/linalg/isolve/lsqr.py
Normal file
|
@ -0,0 +1,568 @@
|
|||
"""Sparse Equations and Least Squares.
|
||||
|
||||
The original Fortran code was written by C. C. Paige and M. A. Saunders as
|
||||
described in
|
||||
|
||||
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
|
||||
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
|
||||
|
||||
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
|
||||
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
|
||||
|
||||
It is licensed under the following BSD license:
|
||||
|
||||
Copyright (c) 2006, Systems Optimization Laboratory
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided
|
||||
with the distribution.
|
||||
|
||||
* Neither the name of Stanford University nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
The Fortran code was translated to Python for use in CVXOPT by Jeffery
|
||||
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
|
||||
|
||||
Adapted for SciPy by Stefan van der Walt.
|
||||
|
||||
"""
|
||||
|
||||
__all__ = ['lsqr']
|
||||
|
||||
import numpy as np
|
||||
from math import sqrt
|
||||
from scipy.sparse.linalg.interface import aslinearoperator
|
||||
|
||||
eps = np.finfo(np.float64).eps
|
||||
|
||||
|
||||
def _sym_ortho(a, b):
|
||||
"""
|
||||
Stable implementation of Givens rotation.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The routine 'SymOrtho' was added for numerical stability. This is
|
||||
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
|
||||
``1/eps`` in some important places (see, for example text following
|
||||
"Compute the next plane rotation Qk" in minres.py).
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
|
||||
and Least-Squares Problems", Dissertation,
|
||||
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
|
||||
|
||||
"""
|
||||
if b == 0:
|
||||
return np.sign(a), 0, abs(a)
|
||||
elif a == 0:
|
||||
return 0, np.sign(b), abs(b)
|
||||
elif abs(b) > abs(a):
|
||||
tau = a / b
|
||||
s = np.sign(b) / sqrt(1 + tau * tau)
|
||||
c = s * tau
|
||||
r = b / s
|
||||
else:
|
||||
tau = b / a
|
||||
c = np.sign(a) / sqrt(1+tau*tau)
|
||||
s = c * tau
|
||||
r = a / c
|
||||
return c, s, r
|
||||
|
||||
|
||||
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
|
||||
iter_lim=None, show=False, calc_var=False, x0=None):
|
||||
"""Find the least-squares solution to a large, sparse, linear system
|
||||
of equations.
|
||||
|
||||
The function solves ``Ax = b`` or ``min ||Ax - b||^2`` or
|
||||
``min ||Ax - b||^2 + d^2 ||x||^2``.
|
||||
|
||||
The matrix A may be square or rectangular (over-determined or
|
||||
under-determined), and may have any rank.
|
||||
|
||||
::
|
||||
|
||||
1. Unsymmetric equations -- solve A*x = b
|
||||
|
||||
2. Linear least squares -- solve A*x = b
|
||||
in the least-squares sense
|
||||
|
||||
3. Damped least squares -- solve ( A )*x = ( b )
|
||||
( damp*I ) ( 0 )
|
||||
in the least-squares sense
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : {sparse matrix, ndarray, LinearOperator}
|
||||
Representation of an m-by-n matrix.
|
||||
Alternatively, ``A`` can be a linear operator which can
|
||||
produce ``Ax`` and ``A^T x`` using, e.g.,
|
||||
``scipy.sparse.linalg.LinearOperator``.
|
||||
b : array_like, shape (m,)
|
||||
Right-hand side vector ``b``.
|
||||
damp : float
|
||||
Damping coefficient.
|
||||
atol, btol : float, optional
|
||||
Stopping tolerances. If both are 1.0e-9 (say), the final
|
||||
residual norm should be accurate to about 9 digits. (The
|
||||
final x will usually have fewer correct digits, depending on
|
||||
cond(A) and the size of damp.)
|
||||
conlim : float, optional
|
||||
Another stopping tolerance. lsqr terminates if an estimate of
|
||||
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
|
||||
b``, `conlim` could be as large as 1.0e+12 (say). For
|
||||
least-squares problems, conlim should be less than 1.0e+8.
|
||||
Maximum precision can be obtained by setting ``atol = btol =
|
||||
conlim = zero``, but the number of iterations may then be
|
||||
excessive.
|
||||
iter_lim : int, optional
|
||||
Explicit limitation on number of iterations (for safety).
|
||||
show : bool, optional
|
||||
Display an iteration log.
|
||||
calc_var : bool, optional
|
||||
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
|
||||
x0 : array_like, shape (n,), optional
|
||||
Initial guess of x, if None zeros are used.
|
||||
|
||||
.. versionadded:: 1.0.0
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : ndarray of float
|
||||
The final solution.
|
||||
istop : int
|
||||
Gives the reason for termination.
|
||||
1 means x is an approximate solution to Ax = b.
|
||||
2 means x approximately solves the least-squares problem.
|
||||
itn : int
|
||||
Iteration number upon termination.
|
||||
r1norm : float
|
||||
``norm(r)``, where ``r = b - Ax``.
|
||||
r2norm : float
|
||||
``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if
|
||||
``damp == 0``.
|
||||
anorm : float
|
||||
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
|
||||
acond : float
|
||||
Estimate of ``cond(Abar)``.
|
||||
arnorm : float
|
||||
Estimate of ``norm(A'*r - damp^2*x)``.
|
||||
xnorm : float
|
||||
``norm(x)``
|
||||
var : ndarray of float
|
||||
If ``calc_var`` is True, estimates all diagonals of
|
||||
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
|
||||
damp^2*I)^{-1}``. This is well defined if A has full column
|
||||
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
|
||||
< n`` and ``damp = 0.``)
|
||||
|
||||
Notes
|
||||
-----
|
||||
LSQR uses an iterative method to approximate the solution. The
|
||||
number of iterations required to reach a certain accuracy depends
|
||||
strongly on the scaling of the problem. Poor scaling of the rows
|
||||
or columns of A should therefore be avoided where possible.
|
||||
|
||||
For example, in problem 1 the solution is unaltered by
|
||||
row-scaling. If a row of A is very small or large compared to
|
||||
the other rows of A, the corresponding row of ( A b ) should be
|
||||
scaled up or down.
|
||||
|
||||
In problems 1 and 2, the solution x is easily recovered
|
||||
following column-scaling. Unless better information is known,
|
||||
the nonzero columns of A should be scaled so that they all have
|
||||
the same Euclidean norm (e.g., 1.0).
|
||||
|
||||
In problem 3, there is no freedom to re-scale if damp is
|
||||
nonzero. However, the value of damp should be assigned only
|
||||
after attention has been paid to the scaling of A.
|
||||
|
||||
The parameter damp is intended to help regularize
|
||||
ill-conditioned systems, by preventing the true solution from
|
||||
being very large. Another aid to regularization is provided by
|
||||
the parameter acond, which may be used to terminate iterations
|
||||
before the computed solution becomes very large.
|
||||
|
||||
If some initial estimate ``x0`` is known and if ``damp == 0``,
|
||||
one could proceed as follows:
|
||||
|
||||
1. Compute a residual vector ``r0 = b - A*x0``.
|
||||
2. Use LSQR to solve the system ``A*dx = r0``.
|
||||
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
|
||||
|
||||
This requires that ``x0`` be available before and after the call
|
||||
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
|
||||
to solve A*x = b and k2 iterations to solve A*dx = r0.
|
||||
If x0 is "good", norm(r0) will be smaller than norm(b).
|
||||
If the same stopping tolerances atol and btol are used for each
|
||||
system, k1 and k2 will be similar, but the final solution x0 + dx
|
||||
should be more accurate. The only way to reduce the total work
|
||||
is to use a larger stopping tolerance for the second system.
|
||||
If some value btol is suitable for A*x = b, the larger value
|
||||
btol*norm(b)/norm(r0) should be suitable for A*dx = r0.
|
||||
|
||||
Preconditioning is another way to reduce the number of iterations.
|
||||
If it is possible to solve a related system ``M*x = b``
|
||||
efficiently, where M approximates A in some helpful way (e.g. M -
|
||||
A has low rank or its elements are small relative to those of A),
|
||||
LSQR may converge more rapidly on the system ``A*M(inverse)*z =
|
||||
b``, after which x can be recovered by solving M*x = z.
|
||||
|
||||
If A is symmetric, LSQR should not be used!
|
||||
|
||||
Alternatives are the symmetric conjugate-gradient method (cg)
|
||||
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
|
||||
applies to any symmetric A and will converge more rapidly than
|
||||
LSQR. If A is positive definite, there are other implementations
|
||||
of symmetric cg that require slightly less work per iteration than
|
||||
SYMMLQ (but will take the same number of iterations).
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] C. C. Paige and M. A. Saunders (1982a).
|
||||
"LSQR: An algorithm for sparse linear equations and
|
||||
sparse least squares", ACM TOMS 8(1), 43-71.
|
||||
.. [2] C. C. Paige and M. A. Saunders (1982b).
|
||||
"Algorithm 583. LSQR: Sparse linear equations and least
|
||||
squares problems", ACM TOMS 8(2), 195-209.
|
||||
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
|
||||
systems using LSQR and CRAIG", BIT 35, 588-604.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from scipy.sparse import csc_matrix
|
||||
>>> from scipy.sparse.linalg import lsqr
|
||||
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
|
||||
|
||||
The first example has the trivial solution `[0, 0]`
|
||||
|
||||
>>> b = np.array([0., 0., 0.], dtype=float)
|
||||
>>> x, istop, itn, normr = lsqr(A, b)[:4]
|
||||
The exact solution is x = 0
|
||||
>>> istop
|
||||
0
|
||||
>>> x
|
||||
array([ 0., 0.])
|
||||
|
||||
The stopping code `istop=0` returned indicates that a vector of zeros was
|
||||
found as a solution. The returned solution `x` indeed contains `[0., 0.]`.
|
||||
The next example has a non-trivial solution:
|
||||
|
||||
>>> b = np.array([1., 0., -1.], dtype=float)
|
||||
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
|
||||
>>> istop
|
||||
1
|
||||
>>> x
|
||||
array([ 1., -1.])
|
||||
>>> itn
|
||||
1
|
||||
>>> r1norm
|
||||
4.440892098500627e-16
|
||||
|
||||
As indicated by `istop=1`, `lsqr` found a solution obeying the tolerance
|
||||
limits. The given solution `[1., -1.]` obviously solves the equation. The
|
||||
remaining return values include information about the number of iterations
|
||||
(`itn=1`) and the remaining difference of left and right side of the solved
|
||||
equation.
|
||||
The final example demonstrates the behavior in the case where there is no
|
||||
solution for the equation:
|
||||
|
||||
>>> b = np.array([1., 0.01, -1.], dtype=float)
|
||||
>>> x, istop, itn, r1norm = lsqr(A, b)[:4]
|
||||
>>> istop
|
||||
2
|
||||
>>> x
|
||||
array([ 1.00333333, -0.99666667])
|
||||
>>> A.dot(x)-b
|
||||
array([ 0.00333333, -0.00333333, 0.00333333])
|
||||
>>> r1norm
|
||||
0.005773502691896255
|
||||
|
||||
`istop` indicates that the system is inconsistent and thus `x` is rather an
|
||||
approximate solution to the corresponding least-squares problem. `r1norm`
|
||||
contains the norm of the minimal residual that was found.
|
||||
"""
|
||||
A = aslinearoperator(A)
|
||||
b = np.atleast_1d(b)
|
||||
if b.ndim > 1:
|
||||
b = b.squeeze()
|
||||
|
||||
m, n = A.shape
|
||||
if iter_lim is None:
|
||||
iter_lim = 2 * n
|
||||
var = np.zeros(n)
|
||||
|
||||
msg = ('The exact solution is x = 0 ',
|
||||
'Ax - b is small enough, given atol, btol ',
|
||||
'The least-squares solution is good enough, given atol ',
|
||||
'The estimate of cond(Abar) has exceeded conlim ',
|
||||
'Ax - b is small enough for this machine ',
|
||||
'The least-squares solution is good enough for this machine',
|
||||
'Cond(Abar) seems to be too large for this machine ',
|
||||
'The iteration limit has been reached ')
|
||||
|
||||
if show:
|
||||
print(' ')
|
||||
print('LSQR Least-squares solution of Ax = b')
|
||||
str1 = f'The matrix A has {m} rows and {n} columns'
|
||||
str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var)
|
||||
str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim)
|
||||
str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim)
|
||||
print(str1)
|
||||
print(str2)
|
||||
print(str3)
|
||||
print(str4)
|
||||
|
||||
itn = 0
|
||||
istop = 0
|
||||
ctol = 0
|
||||
if conlim > 0:
|
||||
ctol = 1/conlim
|
||||
anorm = 0
|
||||
acond = 0
|
||||
dampsq = damp**2
|
||||
ddnorm = 0
|
||||
res2 = 0
|
||||
xnorm = 0
|
||||
xxnorm = 0
|
||||
z = 0
|
||||
cs2 = -1
|
||||
sn2 = 0
|
||||
|
||||
"""
|
||||
Set up the first vectors u and v for the bidiagonalization.
|
||||
These satisfy beta*u = b - A*x, alfa*v = A'*u.
|
||||
"""
|
||||
u = b
|
||||
bnorm = np.linalg.norm(b)
|
||||
if x0 is None:
|
||||
x = np.zeros(n)
|
||||
beta = bnorm.copy()
|
||||
else:
|
||||
x = np.asarray(x0)
|
||||
u = u - A.matvec(x)
|
||||
beta = np.linalg.norm(u)
|
||||
|
||||
if beta > 0:
|
||||
u = (1/beta) * u
|
||||
v = A.rmatvec(u)
|
||||
alfa = np.linalg.norm(v)
|
||||
else:
|
||||
v = x.copy()
|
||||
alfa = 0
|
||||
|
||||
if alfa > 0:
|
||||
v = (1/alfa) * v
|
||||
w = v.copy()
|
||||
|
||||
rhobar = alfa
|
||||
phibar = beta
|
||||
rnorm = beta
|
||||
r1norm = rnorm
|
||||
r2norm = rnorm
|
||||
|
||||
# Reverse the order here from the original matlab code because
|
||||
# there was an error on return when arnorm==0
|
||||
arnorm = alfa * beta
|
||||
if arnorm == 0:
|
||||
print(msg[0])
|
||||
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
|
||||
|
||||
head1 = ' Itn x[0] r1norm r2norm '
|
||||
head2 = ' Compatible LS Norm A Cond A'
|
||||
|
||||
if show:
|
||||
print(' ')
|
||||
print(head1, head2)
|
||||
test1 = 1
|
||||
test2 = alfa / beta
|
||||
str1 = '%6g %12.5e' % (itn, x[0])
|
||||
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
|
||||
str3 = ' %8.1e %8.1e' % (test1, test2)
|
||||
print(str1, str2, str3)
|
||||
|
||||
# Main iteration loop.
|
||||
while itn < iter_lim:
|
||||
itn = itn + 1
|
||||
"""
|
||||
% Perform the next step of the bidiagonalization to obtain the
|
||||
% next beta, u, alfa, v. These satisfy the relations
|
||||
% beta*u = a*v - alfa*u,
|
||||
% alfa*v = A'*u - beta*v.
|
||||
"""
|
||||
u = A.matvec(v) - alfa * u
|
||||
beta = np.linalg.norm(u)
|
||||
|
||||
if beta > 0:
|
||||
u = (1/beta) * u
|
||||
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
|
||||
v = A.rmatvec(u) - beta * v
|
||||
alfa = np.linalg.norm(v)
|
||||
if alfa > 0:
|
||||
v = (1 / alfa) * v
|
||||
|
||||
# Use a plane rotation to eliminate the damping parameter.
|
||||
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
|
||||
rhobar1 = sqrt(rhobar**2 + damp**2)
|
||||
cs1 = rhobar / rhobar1
|
||||
sn1 = damp / rhobar1
|
||||
psi = sn1 * phibar
|
||||
phibar = cs1 * phibar
|
||||
|
||||
# Use a plane rotation to eliminate the subdiagonal element (beta)
|
||||
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
|
||||
cs, sn, rho = _sym_ortho(rhobar1, beta)
|
||||
|
||||
theta = sn * alfa
|
||||
rhobar = -cs * alfa
|
||||
phi = cs * phibar
|
||||
phibar = sn * phibar
|
||||
tau = sn * phi
|
||||
|
||||
# Update x and w.
|
||||
t1 = phi / rho
|
||||
t2 = -theta / rho
|
||||
dk = (1 / rho) * w
|
||||
|
||||
x = x + t1 * w
|
||||
w = v + t2 * w
|
||||
ddnorm = ddnorm + np.linalg.norm(dk)**2
|
||||
|
||||
if calc_var:
|
||||
var = var + dk**2
|
||||
|
||||
# Use a plane rotation on the right to eliminate the
|
||||
# super-diagonal element (theta) of the upper-bidiagonal matrix.
|
||||
# Then use the result to estimate norm(x).
|
||||
delta = sn2 * rho
|
||||
gambar = -cs2 * rho
|
||||
rhs = phi - delta * z
|
||||
zbar = rhs / gambar
|
||||
xnorm = sqrt(xxnorm + zbar**2)
|
||||
gamma = sqrt(gambar**2 + theta**2)
|
||||
cs2 = gambar / gamma
|
||||
sn2 = theta / gamma
|
||||
z = rhs / gamma
|
||||
xxnorm = xxnorm + z**2
|
||||
|
||||
# Test for convergence.
|
||||
# First, estimate the condition of the matrix Abar,
|
||||
# and the norms of rbar and Abar'rbar.
|
||||
acond = anorm * sqrt(ddnorm)
|
||||
res1 = phibar**2
|
||||
res2 = res2 + psi**2
|
||||
rnorm = sqrt(res1 + res2)
|
||||
arnorm = alfa * abs(tau)
|
||||
|
||||
# Distinguish between
|
||||
# r1norm = ||b - Ax|| and
|
||||
# r2norm = rnorm in current code
|
||||
# = sqrt(r1norm^2 + damp^2*||x||^2).
|
||||
# Estimate r1norm from
|
||||
# r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
|
||||
# Although there is cancellation, it might be accurate enough.
|
||||
r1sq = rnorm**2 - dampsq * xxnorm
|
||||
r1norm = sqrt(abs(r1sq))
|
||||
if r1sq < 0:
|
||||
r1norm = -r1norm
|
||||
r2norm = rnorm
|
||||
|
||||
# Now use these norms to estimate certain other quantities,
|
||||
# some of which will be small near a solution.
|
||||
test1 = rnorm / bnorm
|
||||
test2 = arnorm / (anorm * rnorm + eps)
|
||||
test3 = 1 / (acond + eps)
|
||||
t1 = test1 / (1 + anorm * xnorm / bnorm)
|
||||
rtol = btol + atol * anorm * xnorm / bnorm
|
||||
|
||||
# The following tests guard against extremely small values of
|
||||
# atol, btol or ctol. (The user may have set any or all of
|
||||
# the parameters atol, btol, conlim to 0.)
|
||||
# The effect is equivalent to the normal tests using
|
||||
# atol = eps, btol = eps, conlim = 1/eps.
|
||||
if itn >= iter_lim:
|
||||
istop = 7
|
||||
if 1 + test3 <= 1:
|
||||
istop = 6
|
||||
if 1 + test2 <= 1:
|
||||
istop = 5
|
||||
if 1 + t1 <= 1:
|
||||
istop = 4
|
||||
|
||||
# Allow for tolerances set by the user.
|
||||
if test3 <= ctol:
|
||||
istop = 3
|
||||
if test2 <= atol:
|
||||
istop = 2
|
||||
if test1 <= rtol:
|
||||
istop = 1
|
||||
|
||||
# See if it is time to print something.
|
||||
prnt = False
|
||||
if n <= 40:
|
||||
prnt = True
|
||||
if itn <= 10:
|
||||
prnt = True
|
||||
if itn >= iter_lim-10:
|
||||
prnt = True
|
||||
# if itn%10 == 0: prnt = True
|
||||
if test3 <= 2*ctol:
|
||||
prnt = True
|
||||
if test2 <= 10*atol:
|
||||
prnt = True
|
||||
if test1 <= 10*rtol:
|
||||
prnt = True
|
||||
if istop != 0:
|
||||
prnt = True
|
||||
|
||||
if prnt:
|
||||
if show:
|
||||
str1 = '%6g %12.5e' % (itn, x[0])
|
||||
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
|
||||
str3 = ' %8.1e %8.1e' % (test1, test2)
|
||||
str4 = ' %8.1e %8.1e' % (anorm, acond)
|
||||
print(str1, str2, str3, str4)
|
||||
|
||||
if istop != 0:
|
||||
break
|
||||
|
||||
# End of iteration loop.
|
||||
# Print the stopping condition.
|
||||
if show:
|
||||
print(' ')
|
||||
print('LSQR finished')
|
||||
print(msg[istop])
|
||||
print(' ')
|
||||
str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm)
|
||||
str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm)
|
||||
str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm)
|
||||
str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm)
|
||||
print(str1 + ' ' + str2)
|
||||
print(str3 + ' ' + str4)
|
||||
print(' ')
|
||||
|
||||
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
|
375
venv/Lib/site-packages/scipy/sparse/linalg/isolve/minres.py
Normal file
375
venv/Lib/site-packages/scipy/sparse/linalg/isolve/minres.py
Normal file
|
@ -0,0 +1,375 @@
|
|||
from numpy import sqrt, inner, zeros, inf, finfo
|
||||
from numpy.linalg import norm
|
||||
|
||||
from .utils import make_system
|
||||
|
||||
__all__ = ['minres']
|
||||
|
||||
|
||||
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
|
||||
M=None, callback=None, show=False, check=False):
|
||||
"""
|
||||
Use MINimum RESidual iteration to solve Ax=b
|
||||
|
||||
MINRES minimizes norm(A*x - b) for a real symmetric matrix A. Unlike
|
||||
the Conjugate Gradient method, A can be indefinite or singular.
|
||||
|
||||
If shift != 0 then the method solves (A - shift*I)x = b
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : {sparse matrix, dense matrix, LinearOperator}
|
||||
The real symmetric N-by-N matrix of the linear system
|
||||
Alternatively, ``A`` can be a linear operator which can
|
||||
produce ``Ax`` using, e.g.,
|
||||
``scipy.sparse.linalg.LinearOperator``.
|
||||
b : {array, matrix}
|
||||
Right hand side of the linear system. Has shape (N,) or (N,1).
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : {array, matrix}
|
||||
The converged solution.
|
||||
info : integer
|
||||
Provides convergence information:
|
||||
0 : successful exit
|
||||
>0 : convergence to tolerance not achieved, number of iterations
|
||||
<0 : illegal input or breakdown
|
||||
|
||||
Other Parameters
|
||||
----------------
|
||||
x0 : {array, matrix}
|
||||
Starting guess for the solution.
|
||||
tol : float
|
||||
Tolerance to achieve. The algorithm terminates when the relative
|
||||
residual is below `tol`.
|
||||
maxiter : integer
|
||||
Maximum number of iterations. Iteration will stop after maxiter
|
||||
steps even if the specified tolerance has not been achieved.
|
||||
M : {sparse matrix, dense matrix, LinearOperator}
|
||||
Preconditioner for A. The preconditioner should approximate the
|
||||
inverse of A. Effective preconditioning dramatically improves the
|
||||
rate of convergence, which implies that fewer iterations are needed
|
||||
to reach a given error tolerance.
|
||||
callback : function
|
||||
User-supplied function to call after each iteration. It is called
|
||||
as callback(xk), where xk is the current solution vector.
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> import numpy as np
|
||||
>>> from scipy.sparse import csc_matrix
|
||||
>>> from scipy.sparse.linalg import minres
|
||||
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
|
||||
>>> A = A + A.T
|
||||
>>> b = np.array([2, 4, -1], dtype=float)
|
||||
>>> x, exitCode = minres(A, b)
|
||||
>>> print(exitCode) # 0 indicates successful convergence
|
||||
0
|
||||
>>> np.allclose(A.dot(x), b)
|
||||
True
|
||||
|
||||
References
|
||||
----------
|
||||
Solution of sparse indefinite systems of linear equations,
|
||||
C. C. Paige and M. A. Saunders (1975),
|
||||
SIAM J. Numer. Anal. 12(4), pp. 617-629.
|
||||
https://web.stanford.edu/group/SOL/software/minres/
|
||||
|
||||
This file is a translation of the following MATLAB implementation:
|
||||
https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
|
||||
|
||||
"""
|
||||
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
||||
|
||||
matvec = A.matvec
|
||||
psolve = M.matvec
|
||||
|
||||
first = 'Enter minres. '
|
||||
last = 'Exit minres. '
|
||||
|
||||
n = A.shape[0]
|
||||
|
||||
if maxiter is None:
|
||||
maxiter = 5 * n
|
||||
|
||||
msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1
|
||||
' beta1 = 0. The exact solution is x0 ', # 0
|
||||
' A solution to Ax = b was found, given rtol ', # 1
|
||||
' A least-squares solution was found, given rtol ', # 2
|
||||
' Reasonable accuracy achieved, given eps ', # 3
|
||||
' x has converged to an eigenvector ', # 4
|
||||
' acond has exceeded 0.1/eps ', # 5
|
||||
' The iteration limit was reached ', # 6
|
||||
' A does not define a symmetric matrix ', # 7
|
||||
' M does not define a symmetric matrix ', # 8
|
||||
' M does not define a pos-def preconditioner '] # 9
|
||||
|
||||
if show:
|
||||
print(first + 'Solution of symmetric Ax = b')
|
||||
print(first + 'n = %3g shift = %23.14e' % (n,shift))
|
||||
print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol))
|
||||
print()
|
||||
|
||||
istop = 0
|
||||
itn = 0
|
||||
Anorm = 0
|
||||
Acond = 0
|
||||
rnorm = 0
|
||||
ynorm = 0
|
||||
|
||||
xtype = x.dtype
|
||||
|
||||
eps = finfo(xtype).eps
|
||||
|
||||
# Set up y and v for the first Lanczos vector v1.
|
||||
# y = beta1 P' v1, where P = C**(-1).
|
||||
# v is really P' v1.
|
||||
|
||||
r1 = b - A*x
|
||||
y = psolve(r1)
|
||||
|
||||
beta1 = inner(r1, y)
|
||||
|
||||
if beta1 < 0:
|
||||
raise ValueError('indefinite preconditioner')
|
||||
elif beta1 == 0:
|
||||
return (postprocess(x), 0)
|
||||
|
||||
beta1 = sqrt(beta1)
|
||||
|
||||
if check:
|
||||
# are these too strict?
|
||||
|
||||
# see if A is symmetric
|
||||
w = matvec(y)
|
||||
r2 = matvec(w)
|
||||
s = inner(w,w)
|
||||
t = inner(y,r2)
|
||||
z = abs(s - t)
|
||||
epsa = (s + eps) * eps**(1.0/3.0)
|
||||
if z > epsa:
|
||||
raise ValueError('non-symmetric matrix')
|
||||
|
||||
# see if M is symmetric
|
||||
r2 = psolve(y)
|
||||
s = inner(y,y)
|
||||
t = inner(r1,r2)
|
||||
z = abs(s - t)
|
||||
epsa = (s + eps) * eps**(1.0/3.0)
|
||||
if z > epsa:
|
||||
raise ValueError('non-symmetric preconditioner')
|
||||
|
||||
# Initialize other quantities
|
||||
oldb = 0
|
||||
beta = beta1
|
||||
dbar = 0
|
||||
epsln = 0
|
||||
qrnorm = beta1
|
||||
phibar = beta1
|
||||
rhs1 = beta1
|
||||
rhs2 = 0
|
||||
tnorm2 = 0
|
||||
gmax = 0
|
||||
gmin = finfo(xtype).max
|
||||
cs = -1
|
||||
sn = 0
|
||||
w = zeros(n, dtype=xtype)
|
||||
w2 = zeros(n, dtype=xtype)
|
||||
r2 = r1
|
||||
|
||||
if show:
|
||||
print()
|
||||
print()
|
||||
print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|')
|
||||
|
||||
while itn < maxiter:
|
||||
itn += 1
|
||||
|
||||
s = 1.0/beta
|
||||
v = s*y
|
||||
|
||||
y = matvec(v)
|
||||
y = y - shift * v
|
||||
|
||||
if itn >= 2:
|
||||
y = y - (beta/oldb)*r1
|
||||
|
||||
alfa = inner(v,y)
|
||||
y = y - (alfa/beta)*r2
|
||||
r1 = r2
|
||||
r2 = y
|
||||
y = psolve(r2)
|
||||
oldb = beta
|
||||
beta = inner(r2,y)
|
||||
if beta < 0:
|
||||
raise ValueError('non-symmetric matrix')
|
||||
beta = sqrt(beta)
|
||||
tnorm2 += alfa**2 + oldb**2 + beta**2
|
||||
|
||||
if itn == 1:
|
||||
if beta/beta1 <= 10*eps:
|
||||
istop = -1 # Terminate later
|
||||
|
||||
# Apply previous rotation Qk-1 to get
|
||||
# [deltak epslnk+1] = [cs sn][dbark 0 ]
|
||||
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
|
||||
|
||||
oldeps = epsln
|
||||
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
|
||||
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
|
||||
epsln = sn * beta # epsln2 = 0 epslnk+1
|
||||
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
|
||||
root = norm([gbar, dbar])
|
||||
Arnorm = phibar * root
|
||||
|
||||
# Compute the next plane rotation Qk
|
||||
|
||||
gamma = norm([gbar, beta]) # gammak
|
||||
gamma = max(gamma, eps)
|
||||
cs = gbar / gamma # ck
|
||||
sn = beta / gamma # sk
|
||||
phi = cs * phibar # phik
|
||||
phibar = sn * phibar # phibark+1
|
||||
|
||||
# Update x.
|
||||
|
||||
denom = 1.0/gamma
|
||||
w1 = w2
|
||||
w2 = w
|
||||
w = (v - oldeps*w1 - delta*w2) * denom
|
||||
x = x + phi*w
|
||||
|
||||
# Go round again.
|
||||
|
||||
gmax = max(gmax, gamma)
|
||||
gmin = min(gmin, gamma)
|
||||
z = rhs1 / gamma
|
||||
rhs1 = rhs2 - delta*z
|
||||
rhs2 = - epsln*z
|
||||
|
||||
# Estimate various norms and test for convergence.
|
||||
|
||||
Anorm = sqrt(tnorm2)
|
||||
ynorm = norm(x)
|
||||
epsa = Anorm * eps
|
||||
epsx = Anorm * ynorm * eps
|
||||
epsr = Anorm * ynorm * tol
|
||||
diag = gbar
|
||||
|
||||
if diag == 0:
|
||||
diag = epsa
|
||||
|
||||
qrnorm = phibar
|
||||
rnorm = qrnorm
|
||||
if ynorm == 0 or Anorm == 0:
|
||||
test1 = inf
|
||||
else:
|
||||
test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||)
|
||||
if Anorm == 0:
|
||||
test2 = inf
|
||||
else:
|
||||
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
|
||||
|
||||
# Estimate cond(A).
|
||||
# In this version we look at the diagonals of R in the
|
||||
# factorization of the lower Hessenberg matrix, Q * H = R,
|
||||
# where H is the tridiagonal matrix from Lanczos with one
|
||||
# extra row, beta(k+1) e_k^T.
|
||||
|
||||
Acond = gmax/gmin
|
||||
|
||||
# See if any of the stopping criteria are satisfied.
|
||||
# In rare cases, istop is already -1 from above (Abar = const*I).
|
||||
|
||||
if istop == 0:
|
||||
t1 = 1 + test1 # These tests work if tol < eps
|
||||
t2 = 1 + test2
|
||||
if t2 <= 1:
|
||||
istop = 2
|
||||
if t1 <= 1:
|
||||
istop = 1
|
||||
|
||||
if itn >= maxiter:
|
||||
istop = 6
|
||||
if Acond >= 0.1/eps:
|
||||
istop = 4
|
||||
if epsx >= beta1:
|
||||
istop = 3
|
||||
# if rnorm <= epsx : istop = 2
|
||||
# if rnorm <= epsr : istop = 1
|
||||
if test2 <= tol:
|
||||
istop = 2
|
||||
if test1 <= tol:
|
||||
istop = 1
|
||||
|
||||
# See if it is time to print something.
|
||||
|
||||
prnt = False
|
||||
if n <= 40:
|
||||
prnt = True
|
||||
if itn <= 10:
|
||||
prnt = True
|
||||
if itn >= maxiter-10:
|
||||
prnt = True
|
||||
if itn % 10 == 0:
|
||||
prnt = True
|
||||
if qrnorm <= 10*epsx:
|
||||
prnt = True
|
||||
if qrnorm <= 10*epsr:
|
||||
prnt = True
|
||||
if Acond <= 1e-2/eps:
|
||||
prnt = True
|
||||
if istop != 0:
|
||||
prnt = True
|
||||
|
||||
if show and prnt:
|
||||
str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1)
|
||||
str2 = ' %10.3e' % (test2,)
|
||||
str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm)
|
||||
|
||||
print(str1 + str2 + str3)
|
||||
|
||||
if itn % 10 == 0:
|
||||
print()
|
||||
|
||||
if callback is not None:
|
||||
callback(x)
|
||||
|
||||
if istop != 0:
|
||||
break # TODO check this
|
||||
|
||||
if show:
|
||||
print()
|
||||
print(last + ' istop = %3g itn =%5g' % (istop,itn))
|
||||
print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond))
|
||||
print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm))
|
||||
print(last + ' Arnorm = %12.4e' % (Arnorm,))
|
||||
print(last + msg[istop+1])
|
||||
|
||||
if istop == 6:
|
||||
info = maxiter
|
||||
else:
|
||||
info = 0
|
||||
|
||||
return (postprocess(x),info)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy import arange
|
||||
from scipy.sparse import spdiags
|
||||
|
||||
n = 10
|
||||
|
||||
residuals = []
|
||||
|
||||
def cb(x):
|
||||
residuals.append(norm(b - A*x))
|
||||
|
||||
# A = poisson((10,),format='csr')
|
||||
A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
|
||||
M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
|
||||
A.psolve = M.matvec
|
||||
b = zeros(A.shape[0])
|
||||
x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
|
||||
# x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
|
52
venv/Lib/site-packages/scipy/sparse/linalg/isolve/setup.py
Normal file
52
venv/Lib/site-packages/scipy/sparse/linalg/isolve/setup.py
Normal file
|
@ -0,0 +1,52 @@
|
|||
from os.path import join
|
||||
|
||||
|
||||
def configuration(parent_package='',top_path=None):
|
||||
from scipy._build_utils.system_info import get_info
|
||||
from numpy.distutils.misc_util import Configuration
|
||||
from scipy._build_utils import (get_g77_abi_wrappers, uses_blas64,
|
||||
blas_ilp64_pre_build_hook, get_f2py_int64_options)
|
||||
|
||||
config = Configuration('isolve',parent_package,top_path)
|
||||
|
||||
if uses_blas64():
|
||||
lapack_opt = get_info('lapack_ilp64_opt')
|
||||
f2py_options = get_f2py_int64_options()
|
||||
pre_build_hook = blas_ilp64_pre_build_hook(lapack_opt)
|
||||
else:
|
||||
lapack_opt = get_info('lapack_opt')
|
||||
f2py_options = None
|
||||
pre_build_hook = None
|
||||
|
||||
# iterative methods
|
||||
methods = ['BiCGREVCOM.f.src',
|
||||
'BiCGSTABREVCOM.f.src',
|
||||
'CGREVCOM.f.src',
|
||||
'CGSREVCOM.f.src',
|
||||
# 'ChebyREVCOM.f.src',
|
||||
'GMRESREVCOM.f.src',
|
||||
# 'JacobiREVCOM.f.src',
|
||||
'QMRREVCOM.f.src',
|
||||
# 'SORREVCOM.f.src'
|
||||
]
|
||||
|
||||
Util = ['getbreak.f.src']
|
||||
sources = Util + methods + ['_iterative.pyf.src']
|
||||
sources = [join('iterative', x) for x in sources]
|
||||
sources += get_g77_abi_wrappers(lapack_opt)
|
||||
|
||||
ext = config.add_extension('_iterative',
|
||||
sources=sources,
|
||||
f2py_options=f2py_options,
|
||||
extra_info=lapack_opt)
|
||||
ext._pre_build_hook = pre_build_hook
|
||||
|
||||
config.add_data_dir('tests')
|
||||
|
||||
return config
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
|
||||
setup(**configuration(top_path='').todict())
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,60 @@
|
|||
import scipy.sparse.linalg as la
|
||||
import scipy.io as io
|
||||
import numpy as np
|
||||
import sys
|
||||
|
||||
#problem = "SPARSKIT/drivcav/e05r0100"
|
||||
problem = "SPARSKIT/drivcav/e05r0200"
|
||||
#problem = "Harwell-Boeing/sherman/sherman1"
|
||||
#problem = "misc/hamm/add32"
|
||||
|
||||
mm = np.lib._datasource.Repository('ftp://math.nist.gov/pub/MatrixMarket2/')
|
||||
f = mm.open('%s.mtx.gz' % problem)
|
||||
Am = io.mmread(f).tocsr()
|
||||
f.close()
|
||||
|
||||
f = mm.open('%s_rhs1.mtx.gz' % problem)
|
||||
b = np.array(io.mmread(f)).ravel()
|
||||
f.close()
|
||||
|
||||
count = [0]
|
||||
|
||||
|
||||
def matvec(v):
|
||||
count[0] += 1
|
||||
sys.stderr.write('%d\r' % count[0])
|
||||
return Am*v
|
||||
|
||||
|
||||
A = la.LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
|
||||
|
||||
M = 100
|
||||
|
||||
print("MatrixMarket problem %s" % problem)
|
||||
print("Invert %d x %d matrix; nnz = %d" % (Am.shape[0], Am.shape[1], Am.nnz))
|
||||
|
||||
count[0] = 0
|
||||
x0, info = la.gmres(A, b, restrt=M, tol=1e-14)
|
||||
count_0 = count[0]
|
||||
err0 = np.linalg.norm(Am*x0 - b) / np.linalg.norm(b)
|
||||
print("GMRES(%d):" % M, count_0, "matvecs, residual", err0)
|
||||
if info != 0:
|
||||
print("Didn't converge")
|
||||
|
||||
count[0] = 0
|
||||
x1, info = la.lgmres(A, b, inner_m=M-6*2, outer_k=6, tol=1e-14)
|
||||
count_1 = count[0]
|
||||
err1 = np.linalg.norm(Am*x1 - b) / np.linalg.norm(b)
|
||||
print("LGMRES(%d,6) [same memory req.]:" % (M-2*6), count_1,
|
||||
"matvecs, residual:", err1)
|
||||
if info != 0:
|
||||
print("Didn't converge")
|
||||
|
||||
count[0] = 0
|
||||
x2, info = la.lgmres(A, b, inner_m=M-6, outer_k=6, tol=1e-14)
|
||||
count_2 = count[0]
|
||||
err2 = np.linalg.norm(Am*x2 - b) / np.linalg.norm(b)
|
||||
print("LGMRES(%d,6) [same subspace size]:" % (M-6), count_2,
|
||||
"matvecs, residual:", err2)
|
||||
if info != 0:
|
||||
print("Didn't converge")
|
|
@ -0,0 +1,165 @@
|
|||
#!/usr/bin/env python
|
||||
"""Tests for the linalg.isolve.gcrotmk module
|
||||
"""
|
||||
|
||||
from numpy.testing import (assert_, assert_allclose, assert_equal,
|
||||
suppress_warnings)
|
||||
|
||||
import numpy as np
|
||||
from numpy import zeros, array, allclose
|
||||
from scipy.linalg import norm
|
||||
from scipy.sparse import csr_matrix, eye, rand
|
||||
|
||||
from scipy.sparse.linalg.interface import LinearOperator
|
||||
from scipy.sparse.linalg import splu
|
||||
from scipy.sparse.linalg.isolve import gcrotmk, gmres
|
||||
|
||||
|
||||
Am = csr_matrix(array([[-2,1,0,0,0,9],
|
||||
[1,-2,1,0,5,0],
|
||||
[0,1,-2,1,0,0],
|
||||
[0,0,1,-2,1,0],
|
||||
[0,3,0,1,-2,1],
|
||||
[1,0,0,0,1,-2]]))
|
||||
b = array([1,2,3,4,5,6])
|
||||
count = [0]
|
||||
|
||||
|
||||
def matvec(v):
|
||||
count[0] += 1
|
||||
return Am*v
|
||||
|
||||
|
||||
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
|
||||
|
||||
|
||||
def do_solve(**kw):
|
||||
count[0] = 0
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)
|
||||
count_0 = count[0]
|
||||
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
|
||||
return x0, count_0
|
||||
|
||||
|
||||
class TestGCROTMK(object):
|
||||
def test_preconditioner(self):
|
||||
# Check that preconditioning works
|
||||
pc = splu(Am.tocsc())
|
||||
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
|
||||
|
||||
x0, count_0 = do_solve()
|
||||
x1, count_1 = do_solve(M=M)
|
||||
|
||||
assert_equal(count_1, 3)
|
||||
assert_(count_1 < count_0/2)
|
||||
assert_(allclose(x1, x0, rtol=1e-14))
|
||||
|
||||
def test_arnoldi(self):
|
||||
np.random.seed(1)
|
||||
|
||||
A = eye(2000) + rand(2000, 2000, density=5e-4)
|
||||
b = np.random.rand(2000)
|
||||
|
||||
# The inner arnoldi should be equivalent to gmres
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
|
||||
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
|
||||
|
||||
assert_equal(flag0, 1)
|
||||
assert_equal(flag1, 1)
|
||||
assert np.linalg.norm(A.dot(x0) - b) > 1e-3
|
||||
|
||||
assert_allclose(x0, x1)
|
||||
|
||||
def test_cornercase(self):
|
||||
np.random.seed(1234)
|
||||
|
||||
# Rounding error may prevent convergence with tol=0 --- ensure
|
||||
# that the return values in this case are correct, and no
|
||||
# exceptions are raised
|
||||
|
||||
for n in [3, 5, 10, 100]:
|
||||
A = 2*eye(n)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
b = np.ones(n)
|
||||
x, info = gcrotmk(A, b, maxiter=10)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
x, info = gcrotmk(A, b, tol=0, maxiter=10)
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
b = np.random.rand(n)
|
||||
x, info = gcrotmk(A, b, maxiter=10)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
x, info = gcrotmk(A, b, tol=0, maxiter=10)
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
def test_nans(self):
|
||||
A = eye(3, format='lil')
|
||||
A[1,1] = np.nan
|
||||
b = np.ones(3)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x, info = gcrotmk(A, b, tol=0, maxiter=10)
|
||||
assert_equal(info, 1)
|
||||
|
||||
def test_truncate(self):
|
||||
np.random.seed(1234)
|
||||
A = np.random.rand(30, 30) + np.eye(30)
|
||||
b = np.random.rand(30)
|
||||
|
||||
for truncate in ['oldest', 'smallest']:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,
|
||||
maxiter=200)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-3)
|
||||
|
||||
def test_CU(self):
|
||||
for discard_C in (True, False):
|
||||
# Check that C,U behave as expected
|
||||
CU = []
|
||||
x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
|
||||
assert_(len(CU) > 0)
|
||||
assert_(len(CU) <= 6)
|
||||
|
||||
if discard_C:
|
||||
for c, u in CU:
|
||||
assert_(c is None)
|
||||
|
||||
# should converge immediately
|
||||
x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
|
||||
if discard_C:
|
||||
assert_equal(count_1, 2 + len(CU))
|
||||
else:
|
||||
assert_equal(count_1, 3)
|
||||
assert_(count_1 <= count_0/2)
|
||||
assert_allclose(x1, x0, atol=1e-14)
|
||||
|
||||
def test_denormals(self):
|
||||
# Check that no warnings are emitted if the matrix contains
|
||||
# numbers for which 1/x has no float representation, and that
|
||||
# the solver behaves properly.
|
||||
A = np.array([[1, 2], [3, 4]], dtype=float)
|
||||
A *= 100 * np.nextafter(0, 1)
|
||||
|
||||
b = np.array([1, 1])
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
xp, info = gcrotmk(A, b)
|
||||
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(xp), b)
|
|
@ -0,0 +1,726 @@
|
|||
""" Test functions for the sparse.linalg.isolve module
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import platform
|
||||
import numpy as np
|
||||
|
||||
from numpy.testing import (assert_equal, assert_array_equal,
|
||||
assert_, assert_allclose, suppress_warnings)
|
||||
import pytest
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from numpy import zeros, arange, array, ones, eye, iscomplexobj
|
||||
from scipy.linalg import norm
|
||||
from scipy.sparse import spdiags, csr_matrix, SparseEfficiencyWarning
|
||||
|
||||
from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
||||
from scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk
|
||||
|
||||
# TODO check that method preserve shape and type
|
||||
# TODO test both preconditioner methods
|
||||
|
||||
|
||||
class Case(object):
|
||||
def __init__(self, name, A, b=None, skip=None, nonconvergence=None):
|
||||
self.name = name
|
||||
self.A = A
|
||||
if b is None:
|
||||
self.b = arange(A.shape[0], dtype=float)
|
||||
else:
|
||||
self.b = b
|
||||
if skip is None:
|
||||
self.skip = []
|
||||
else:
|
||||
self.skip = skip
|
||||
if nonconvergence is None:
|
||||
self.nonconvergence = []
|
||||
else:
|
||||
self.nonconvergence = nonconvergence
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s>" % self.name
|
||||
|
||||
|
||||
class IterativeParams(object):
|
||||
def __init__(self):
|
||||
# list of tuples (solver, symmetric, positive_definite )
|
||||
solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk]
|
||||
sym_solvers = [minres, cg]
|
||||
posdef_solvers = [cg]
|
||||
real_solvers = [minres]
|
||||
|
||||
self.solvers = solvers
|
||||
|
||||
# list of tuples (A, symmetric, positive_definite )
|
||||
self.cases = []
|
||||
|
||||
# Symmetric and Positive Definite
|
||||
N = 40
|
||||
data = ones((3,N))
|
||||
data[0,:] = 2
|
||||
data[1,:] = -1
|
||||
data[2,:] = -1
|
||||
Poisson1D = spdiags(data, [0,-1,1], N, N, format='csr')
|
||||
self.Poisson1D = Case("poisson1d", Poisson1D)
|
||||
self.cases.append(Case("poisson1d", Poisson1D))
|
||||
# note: minres fails for single precision
|
||||
self.cases.append(Case("poisson1d", Poisson1D.astype('f'),
|
||||
skip=[minres]))
|
||||
|
||||
# Symmetric and Negative Definite
|
||||
self.cases.append(Case("neg-poisson1d", -Poisson1D,
|
||||
skip=posdef_solvers))
|
||||
# note: minres fails for single precision
|
||||
self.cases.append(Case("neg-poisson1d", (-Poisson1D).astype('f'),
|
||||
skip=posdef_solvers + [minres]))
|
||||
|
||||
# Symmetric and Indefinite
|
||||
data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]],dtype='d')
|
||||
RandDiag = spdiags(data, [0], 10, 10, format='csr')
|
||||
self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers))
|
||||
self.cases.append(Case("rand-diag", RandDiag.astype('f'),
|
||||
skip=posdef_solvers))
|
||||
|
||||
# Random real-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(4, 4)
|
||||
self.cases.append(Case("rand", data, skip=posdef_solvers+sym_solvers))
|
||||
self.cases.append(Case("rand", data.astype('f'),
|
||||
skip=posdef_solvers+sym_solvers))
|
||||
|
||||
# Random symmetric real-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(4, 4)
|
||||
data = data + data.T
|
||||
self.cases.append(Case("rand-sym", data, skip=posdef_solvers))
|
||||
self.cases.append(Case("rand-sym", data.astype('f'),
|
||||
skip=posdef_solvers))
|
||||
|
||||
# Random pos-def symmetric real
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(9, 9)
|
||||
data = np.dot(data.conj(), data.T)
|
||||
self.cases.append(Case("rand-sym-pd", data))
|
||||
# note: minres fails for single precision
|
||||
self.cases.append(Case("rand-sym-pd", data.astype('f'),
|
||||
skip=[minres]))
|
||||
|
||||
# Random complex-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
|
||||
self.cases.append(Case("rand-cmplx", data,
|
||||
skip=posdef_solvers+sym_solvers+real_solvers))
|
||||
self.cases.append(Case("rand-cmplx", data.astype('F'),
|
||||
skip=posdef_solvers+sym_solvers+real_solvers))
|
||||
|
||||
# Random hermitian complex-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
|
||||
data = data + data.T.conj()
|
||||
self.cases.append(Case("rand-cmplx-herm", data,
|
||||
skip=posdef_solvers+real_solvers))
|
||||
self.cases.append(Case("rand-cmplx-herm", data.astype('F'),
|
||||
skip=posdef_solvers+real_solvers))
|
||||
|
||||
# Random pos-def hermitian complex-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(9, 9) + 1j*np.random.rand(9, 9)
|
||||
data = np.dot(data.conj(), data.T)
|
||||
self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers))
|
||||
self.cases.append(Case("rand-cmplx-sym-pd", data.astype('F'),
|
||||
skip=real_solvers))
|
||||
|
||||
# Non-symmetric and Positive Definite
|
||||
#
|
||||
# cgs, qmr, and bicg fail to converge on this one
|
||||
# -- algorithmic limitation apparently
|
||||
data = ones((2,10))
|
||||
data[0,:] = 2
|
||||
data[1,:] = -1
|
||||
A = spdiags(data, [0,-1], 10, 10, format='csr')
|
||||
self.cases.append(Case("nonsymposdef", A,
|
||||
skip=sym_solvers+[cgs, qmr, bicg]))
|
||||
self.cases.append(Case("nonsymposdef", A.astype('F'),
|
||||
skip=sym_solvers+[cgs, qmr, bicg]))
|
||||
|
||||
# Symmetric, non-pd, hitting cgs/bicg/bicgstab/qmr breakdown
|
||||
A = np.array([[0, 0, 0, 0, 0, 1, -1, -0, -0, -0, -0],
|
||||
[0, 0, 0, 0, 0, 2, -0, -1, -0, -0, -0],
|
||||
[0, 0, 0, 0, 0, 2, -0, -0, -1, -0, -0],
|
||||
[0, 0, 0, 0, 0, 2, -0, -0, -0, -1, -0],
|
||||
[0, 0, 0, 0, 0, 1, -0, -0, -0, -0, -1],
|
||||
[1, 2, 2, 2, 1, 0, -0, -0, -0, -0, -0],
|
||||
[-1, 0, 0, 0, 0, 0, -1, -0, -0, -0, -0],
|
||||
[0, -1, 0, 0, 0, 0, -0, -1, -0, -0, -0],
|
||||
[0, 0, -1, 0, 0, 0, -0, -0, -1, -0, -0],
|
||||
[0, 0, 0, -1, 0, 0, -0, -0, -0, -1, -0],
|
||||
[0, 0, 0, 0, -1, 0, -0, -0, -0, -0, -1]], dtype=float)
|
||||
b = np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype=float)
|
||||
assert (A == A.T).all()
|
||||
self.cases.append(Case("sym-nonpd", A, b,
|
||||
skip=posdef_solvers,
|
||||
nonconvergence=[cgs,bicg,bicgstab,qmr]))
|
||||
|
||||
|
||||
params = IterativeParams()
|
||||
|
||||
|
||||
def check_maxiter(solver, case):
|
||||
A = case.A
|
||||
tol = 1e-12
|
||||
|
||||
b = case.b
|
||||
x0 = 0*b
|
||||
|
||||
residuals = []
|
||||
|
||||
def callback(x):
|
||||
residuals.append(norm(b - case.A*x))
|
||||
|
||||
x, info = solver(A, b, x0=x0, tol=tol, maxiter=1, callback=callback)
|
||||
|
||||
assert_equal(len(residuals), 1)
|
||||
assert_equal(info, 1)
|
||||
|
||||
|
||||
def test_maxiter():
|
||||
case = params.Poisson1D
|
||||
for solver in params.solvers:
|
||||
if solver in case.skip:
|
||||
continue
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
check_maxiter(solver, case)
|
||||
|
||||
|
||||
def assert_normclose(a, b, tol=1e-8):
|
||||
residual = norm(a - b)
|
||||
tolerance = tol*norm(b)
|
||||
msg = "residual (%g) not smaller than tolerance %g" % (residual, tolerance)
|
||||
assert_(residual < tolerance, msg=msg)
|
||||
|
||||
|
||||
def check_convergence(solver, case):
|
||||
A = case.A
|
||||
|
||||
if A.dtype.char in "dD":
|
||||
tol = 1e-8
|
||||
else:
|
||||
tol = 1e-2
|
||||
|
||||
b = case.b
|
||||
x0 = 0*b
|
||||
|
||||
x, info = solver(A, b, x0=x0, tol=tol)
|
||||
|
||||
assert_array_equal(x0, 0*b) # ensure that x0 is not overwritten
|
||||
if solver not in case.nonconvergence:
|
||||
assert_equal(info,0)
|
||||
assert_normclose(A.dot(x), b, tol=tol)
|
||||
else:
|
||||
assert_(info != 0)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= np.linalg.norm(b))
|
||||
|
||||
|
||||
def test_convergence():
|
||||
for solver in params.solvers:
|
||||
for case in params.cases:
|
||||
if solver in case.skip:
|
||||
continue
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
check_convergence(solver, case)
|
||||
|
||||
|
||||
def check_precond_dummy(solver, case):
|
||||
tol = 1e-8
|
||||
|
||||
def identity(b,which=None):
|
||||
"""trivial preconditioner"""
|
||||
return b
|
||||
|
||||
A = case.A
|
||||
|
||||
M,N = A.shape
|
||||
spdiags([1.0/A.diagonal()], [0], M, N)
|
||||
|
||||
b = case.b
|
||||
x0 = 0*b
|
||||
|
||||
precond = LinearOperator(A.shape, identity, rmatvec=identity)
|
||||
|
||||
if solver is qmr:
|
||||
x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol)
|
||||
else:
|
||||
x, info = solver(A, b, M=precond, x0=x0, tol=tol)
|
||||
assert_equal(info,0)
|
||||
assert_normclose(A.dot(x), b, tol)
|
||||
|
||||
A = aslinearoperator(A)
|
||||
A.psolve = identity
|
||||
A.rpsolve = identity
|
||||
|
||||
x, info = solver(A, b, x0=x0, tol=tol)
|
||||
assert_equal(info,0)
|
||||
assert_normclose(A*x, b, tol=tol)
|
||||
|
||||
|
||||
def test_precond_dummy():
|
||||
case = params.Poisson1D
|
||||
for solver in params.solvers:
|
||||
if solver in case.skip:
|
||||
continue
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
check_precond_dummy(solver, case)
|
||||
|
||||
|
||||
def check_precond_inverse(solver, case):
|
||||
tol = 1e-8
|
||||
|
||||
def inverse(b,which=None):
|
||||
"""inverse preconditioner"""
|
||||
A = case.A
|
||||
if not isinstance(A, np.ndarray):
|
||||
A = A.todense()
|
||||
return np.linalg.solve(A, b)
|
||||
|
||||
def rinverse(b,which=None):
|
||||
"""inverse preconditioner"""
|
||||
A = case.A
|
||||
if not isinstance(A, np.ndarray):
|
||||
A = A.todense()
|
||||
return np.linalg.solve(A.T, b)
|
||||
|
||||
matvec_count = [0]
|
||||
|
||||
def matvec(b):
|
||||
matvec_count[0] += 1
|
||||
return case.A.dot(b)
|
||||
|
||||
def rmatvec(b):
|
||||
matvec_count[0] += 1
|
||||
return case.A.T.dot(b)
|
||||
|
||||
b = case.b
|
||||
x0 = 0*b
|
||||
|
||||
A = LinearOperator(case.A.shape, matvec, rmatvec=rmatvec)
|
||||
precond = LinearOperator(case.A.shape, inverse, rmatvec=rinverse)
|
||||
|
||||
# Solve with preconditioner
|
||||
matvec_count = [0]
|
||||
x, info = solver(A, b, M=precond, x0=x0, tol=tol)
|
||||
|
||||
assert_equal(info, 0)
|
||||
assert_normclose(case.A.dot(x), b, tol)
|
||||
|
||||
# Solution should be nearly instant
|
||||
assert_(matvec_count[0] <= 3, repr(matvec_count))
|
||||
|
||||
|
||||
def test_precond_inverse():
|
||||
case = params.Poisson1D
|
||||
for solver in params.solvers:
|
||||
if solver in case.skip:
|
||||
continue
|
||||
if solver is qmr:
|
||||
continue
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
check_precond_inverse(solver, case)
|
||||
|
||||
|
||||
def test_gmres_basic():
|
||||
A = np.vander(np.arange(10) + 1)[:, ::-1]
|
||||
b = np.zeros(10)
|
||||
b[0] = 1
|
||||
np.linalg.solve(A, b)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x_gm, err = gmres(A, b, restart=5, maxiter=1)
|
||||
|
||||
assert_allclose(x_gm[0], 0.359, rtol=1e-2)
|
||||
|
||||
|
||||
def test_reentrancy():
|
||||
non_reentrant = [cg, cgs, bicg, bicgstab, gmres, qmr]
|
||||
reentrant = [lgmres, minres, gcrotmk]
|
||||
for solver in reentrant + non_reentrant:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
_check_reentrancy(solver, solver in reentrant)
|
||||
|
||||
|
||||
def _check_reentrancy(solver, is_reentrant):
|
||||
def matvec(x):
|
||||
A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]])
|
||||
y, info = solver(A, x)
|
||||
assert_equal(info, 0)
|
||||
return y
|
||||
b = np.array([1, 1./2, 1./3])
|
||||
op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec,
|
||||
dtype=b.dtype)
|
||||
|
||||
if not is_reentrant:
|
||||
assert_raises(RuntimeError, solver, op, b)
|
||||
else:
|
||||
y, info = solver(op, b)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(y, [1, 1, 1])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, lgmres, gcrotmk])
|
||||
def test_atol(solver):
|
||||
# TODO: minres. It didn't historically use absolute tolerances, so
|
||||
# fixing it is less urgent.
|
||||
|
||||
np.random.seed(1234)
|
||||
A = np.random.rand(10, 10)
|
||||
A = A.dot(A.T) + 10 * np.eye(10)
|
||||
b = 1e3 * np.random.rand(10)
|
||||
b_norm = np.linalg.norm(b)
|
||||
|
||||
tols = np.r_[0, np.logspace(np.log10(1e-10), np.log10(1e2), 7), np.inf]
|
||||
|
||||
# Check effect of badly scaled preconditioners
|
||||
M0 = np.random.randn(10, 10)
|
||||
M0 = M0.dot(M0.T)
|
||||
Ms = [None, 1e-6 * M0, 1e6 * M0]
|
||||
|
||||
for M, tol, atol in itertools.product(Ms, tols, tols):
|
||||
if tol == 0 and atol == 0:
|
||||
continue
|
||||
|
||||
if solver is qmr:
|
||||
if M is not None:
|
||||
M = aslinearoperator(M)
|
||||
M2 = aslinearoperator(np.eye(10))
|
||||
else:
|
||||
M2 = None
|
||||
x, info = solver(A, b, M1=M, M2=M2, tol=tol, atol=atol)
|
||||
else:
|
||||
x, info = solver(A, b, M=M, tol=tol, atol=atol)
|
||||
assert_equal(info, 0)
|
||||
|
||||
residual = A.dot(x) - b
|
||||
err = np.linalg.norm(residual)
|
||||
atol2 = tol * b_norm
|
||||
assert_(err <= max(atol, atol2))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk])
|
||||
def test_zero_rhs(solver):
|
||||
np.random.seed(1234)
|
||||
A = np.random.rand(10, 10)
|
||||
A = A.dot(A.T) + 10 * np.eye(10)
|
||||
|
||||
b = np.zeros(10)
|
||||
tols = np.r_[np.logspace(np.log10(1e-10), np.log10(1e2), 7)]
|
||||
|
||||
for tol in tols:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
|
||||
x, info = solver(A, b, tol=tol)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(x, 0, atol=1e-15)
|
||||
|
||||
x, info = solver(A, b, tol=tol, x0=ones(10))
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(x, 0, atol=tol)
|
||||
|
||||
if solver is not minres:
|
||||
x, info = solver(A, b, tol=tol, atol=0, x0=ones(10))
|
||||
if info == 0:
|
||||
assert_allclose(x, 0)
|
||||
|
||||
x, info = solver(A, b, tol=tol, atol=tol)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(x, 0, atol=1e-300)
|
||||
|
||||
x, info = solver(A, b, tol=tol, atol=0)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(x, 0, atol=1e-300)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("solver", [
|
||||
gmres, qmr,
|
||||
pytest.param(lgmres, marks=pytest.mark.xfail(platform.machine() == 'ppc64le',
|
||||
reason="fails on ppc64le")),
|
||||
pytest.param(cgs, marks=pytest.mark.xfail),
|
||||
pytest.param(bicg, marks=pytest.mark.xfail),
|
||||
pytest.param(bicgstab, marks=pytest.mark.xfail),
|
||||
pytest.param(gcrotmk, marks=pytest.mark.xfail)])
|
||||
def test_maxiter_worsening(solver):
|
||||
# Check error does not grow (boundlessly) with increasing maxiter.
|
||||
# This can occur due to the solvers hitting close to breakdown,
|
||||
# which they should detect and halt as necessary.
|
||||
# cf. gh-9100
|
||||
|
||||
# Singular matrix, rhs numerically not in range
|
||||
A = np.array([[-0.1112795288033378, 0, 0, 0.16127952880333685],
|
||||
[0, -0.13627952880333782+6.283185307179586j, 0, 0],
|
||||
[0, 0, -0.13627952880333782-6.283185307179586j, 0],
|
||||
[0.1112795288033368, 0j, 0j, -0.16127952880333785]])
|
||||
v = np.ones(4)
|
||||
best_error = np.inf
|
||||
tol = 7 if platform.machine() == 'aarch64' else 5
|
||||
|
||||
for maxiter in range(1, 20):
|
||||
x, info = solver(A, v, maxiter=maxiter, tol=1e-8, atol=0)
|
||||
|
||||
if info == 0:
|
||||
assert_(np.linalg.norm(A.dot(x) - v) <= 1e-8*np.linalg.norm(v))
|
||||
|
||||
error = np.linalg.norm(A.dot(x) - v)
|
||||
best_error = min(best_error, error)
|
||||
|
||||
# Check with slack
|
||||
assert_(error <= tol*best_error)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk])
|
||||
def test_x0_working(solver):
|
||||
# Easy problem
|
||||
np.random.seed(1)
|
||||
n = 10
|
||||
A = np.random.rand(n, n)
|
||||
A = A.dot(A.T)
|
||||
b = np.random.rand(n)
|
||||
x0 = np.random.rand(n)
|
||||
|
||||
if solver is minres:
|
||||
kw = dict(tol=1e-6)
|
||||
else:
|
||||
kw = dict(atol=0, tol=1e-6)
|
||||
|
||||
x, info = solver(A, b, **kw)
|
||||
assert_equal(info, 0)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-6*np.linalg.norm(b))
|
||||
|
||||
x, info = solver(A, b, x0=x0, **kw)
|
||||
assert_equal(info, 0)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-6*np.linalg.norm(b))
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
class TestQMR(object):
|
||||
def test_leftright_precond(self):
|
||||
"""Check that QMR works with left and right preconditioners"""
|
||||
|
||||
from scipy.sparse.linalg.dsolve import splu
|
||||
from scipy.sparse.linalg.interface import LinearOperator
|
||||
|
||||
n = 100
|
||||
|
||||
dat = ones(n)
|
||||
A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n)
|
||||
b = arange(n,dtype='d')
|
||||
|
||||
L = spdiags([-dat/2, dat], [-1,0], n, n)
|
||||
U = spdiags([4*dat, -dat], [0,1], n, n)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format")
|
||||
L_solver = splu(L)
|
||||
U_solver = splu(U)
|
||||
|
||||
def L_solve(b):
|
||||
return L_solver.solve(b)
|
||||
|
||||
def U_solve(b):
|
||||
return U_solver.solve(b)
|
||||
|
||||
def LT_solve(b):
|
||||
return L_solver.solve(b,'T')
|
||||
|
||||
def UT_solve(b):
|
||||
return U_solver.solve(b,'T')
|
||||
|
||||
M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve)
|
||||
M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)
|
||||
|
||||
assert_equal(info,0)
|
||||
assert_normclose(A*x, b, tol=1e-8)
|
||||
|
||||
|
||||
class TestGMRES(object):
|
||||
def test_callback(self):
|
||||
|
||||
def store_residual(r, rvec):
|
||||
rvec[rvec.nonzero()[0].max()+1] = r
|
||||
|
||||
# Define, A,b
|
||||
A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]]))
|
||||
b = ones((A.shape[0],))
|
||||
maxiter = 1
|
||||
rvec = zeros(maxiter+1)
|
||||
rvec[0] = 1.0
|
||||
callback = lambda r:store_residual(r, rvec)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback)
|
||||
|
||||
# Expected output from SciPy 1.0.0
|
||||
assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10)
|
||||
|
||||
# Test preconditioned callback
|
||||
M = 1e-3 * np.eye(A.shape[0])
|
||||
rvec = zeros(maxiter+1)
|
||||
rvec[0] = 1.0
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x, flag = gmres(A, b, M=M, tol=1e-16, maxiter=maxiter, callback=callback)
|
||||
|
||||
# Expected output from SciPy 1.0.0 (callback has preconditioned residual!)
|
||||
assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]), rtol=1e-10)
|
||||
|
||||
def test_abi(self):
|
||||
# Check we don't segfault on gmres with complex argument
|
||||
A = eye(2)
|
||||
b = ones(2)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
r_x, r_info = gmres(A, b)
|
||||
r_x = r_x.astype(complex)
|
||||
|
||||
x, info = gmres(A.astype(complex), b.astype(complex))
|
||||
|
||||
assert_(iscomplexobj(x))
|
||||
assert_allclose(r_x, x)
|
||||
assert_(r_info == info)
|
||||
|
||||
def test_atol_legacy(self):
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
|
||||
# Check the strange legacy behavior: the tolerance is interpreted
|
||||
# as atol, but only for the initial residual
|
||||
A = eye(2)
|
||||
b = 1e-6 * ones(2)
|
||||
x, info = gmres(A, b, tol=1e-5)
|
||||
assert_array_equal(x, np.zeros(2))
|
||||
|
||||
A = eye(2)
|
||||
b = ones(2)
|
||||
x, info = gmres(A, b, tol=1e-5)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-5*np.linalg.norm(b))
|
||||
assert_allclose(x, b, atol=0, rtol=1e-8)
|
||||
|
||||
rndm = np.random.RandomState(12345)
|
||||
A = rndm.rand(30, 30)
|
||||
b = 1e-6 * ones(30)
|
||||
x, info = gmres(A, b, tol=1e-7, restart=20)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) > 1e-7)
|
||||
|
||||
A = eye(2)
|
||||
b = 1e-10 * ones(2)
|
||||
x, info = gmres(A, b, tol=1e-8, atol=0)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b))
|
||||
|
||||
def test_defective_precond_breakdown(self):
|
||||
# Breakdown due to defective preconditioner
|
||||
M = np.eye(3)
|
||||
M[2,2] = 0
|
||||
|
||||
b = np.array([0, 1, 1])
|
||||
x = np.array([1, 0, 0])
|
||||
A = np.diag([2, 3, 4])
|
||||
|
||||
x, info = gmres(A, b, x0=x, M=M, tol=1e-15, atol=0)
|
||||
|
||||
# Should not return nans, nor terminate with false success
|
||||
assert_(not np.isnan(x).any())
|
||||
if info == 0:
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-15*np.linalg.norm(b))
|
||||
|
||||
# The solution should be OK outside null space of M
|
||||
assert_allclose(M.dot(A.dot(x)), M.dot(b))
|
||||
|
||||
def test_defective_matrix_breakdown(self):
|
||||
# Breakdown due to defective matrix
|
||||
A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]])
|
||||
b = np.array([1, 0, 1])
|
||||
x, info = gmres(A, b, tol=1e-8, atol=0)
|
||||
|
||||
# Should not return nans, nor terminate with false success
|
||||
assert_(not np.isnan(x).any())
|
||||
if info == 0:
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b))
|
||||
|
||||
# The solution should be OK outside null space of A
|
||||
assert_allclose(A.dot(A.dot(x)), A.dot(b))
|
||||
|
||||
def test_callback_type(self):
|
||||
# The legacy callback type changes meaning of 'maxiter'
|
||||
np.random.seed(1)
|
||||
A = np.random.rand(20, 20)
|
||||
b = np.random.rand(20)
|
||||
|
||||
cb_count = [0]
|
||||
|
||||
def pr_norm_cb(r):
|
||||
cb_count[0] += 1
|
||||
assert_(isinstance(r, float))
|
||||
|
||||
def x_cb(x):
|
||||
cb_count[0] += 1
|
||||
assert_(isinstance(x, np.ndarray))
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
# 2 iterations is not enough to solve the problem
|
||||
cb_count = [0]
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50)
|
||||
assert info == 2
|
||||
assert cb_count[0] == 2
|
||||
|
||||
# With `callback_type` specified, no warning should be raised
|
||||
cb_count = [0]
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50,
|
||||
callback_type='legacy')
|
||||
assert info == 2
|
||||
assert cb_count[0] == 2
|
||||
|
||||
# 2 restart cycles is enough to solve the problem
|
||||
cb_count = [0]
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50,
|
||||
callback_type='pr_norm')
|
||||
assert info == 0
|
||||
assert cb_count[0] > 2
|
||||
|
||||
# 2 restart cycles is enough to solve the problem
|
||||
cb_count = [0]
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=2, restart=50,
|
||||
callback_type='x')
|
||||
assert info == 0
|
||||
assert cb_count[0] == 2
|
||||
|
||||
def test_callback_x_monotonic(self):
|
||||
# Check that callback_type='x' gives monotonic norm decrease
|
||||
np.random.seed(1)
|
||||
A = np.random.rand(20, 20) + np.eye(20)
|
||||
b = np.random.rand(20)
|
||||
|
||||
prev_r = [np.inf]
|
||||
count = [0]
|
||||
|
||||
def x_cb(x):
|
||||
r = np.linalg.norm(A.dot(x) - b)
|
||||
assert r <= prev_r[0]
|
||||
prev_r[0] = r
|
||||
count[0] += 1
|
||||
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=20, restart=10,
|
||||
callback_type='x')
|
||||
assert info == 20
|
||||
assert count[0] == 21
|
||||
x_cb(x)
|
|
@ -0,0 +1,212 @@
|
|||
"""Tests for the linalg.isolve.lgmres module
|
||||
"""
|
||||
|
||||
from numpy.testing import (assert_, assert_allclose, assert_equal,
|
||||
suppress_warnings)
|
||||
|
||||
import pytest
|
||||
from platform import python_implementation
|
||||
|
||||
import numpy as np
|
||||
from numpy import zeros, array, allclose
|
||||
from scipy.linalg import norm
|
||||
from scipy.sparse import csr_matrix, eye, rand
|
||||
|
||||
from scipy.sparse.linalg.interface import LinearOperator
|
||||
from scipy.sparse.linalg import splu
|
||||
from scipy.sparse.linalg.isolve import lgmres, gmres
|
||||
|
||||
|
||||
Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9],
|
||||
[1, -2, 1, 0, 5, 0],
|
||||
[0, 1, -2, 1, 0, 0],
|
||||
[0, 0, 1, -2, 1, 0],
|
||||
[0, 3, 0, 1, -2, 1],
|
||||
[1, 0, 0, 0, 1, -2]]))
|
||||
b = array([1, 2, 3, 4, 5, 6])
|
||||
count = [0]
|
||||
|
||||
|
||||
def matvec(v):
|
||||
count[0] += 1
|
||||
return Am*v
|
||||
|
||||
|
||||
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
|
||||
|
||||
|
||||
def do_solve(**kw):
|
||||
count[0] = 0
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x0, flag = lgmres(A, b, x0=zeros(A.shape[0]),
|
||||
inner_m=6, tol=1e-14, **kw)
|
||||
count_0 = count[0]
|
||||
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
|
||||
return x0, count_0
|
||||
|
||||
|
||||
class TestLGMRES(object):
|
||||
def test_preconditioner(self):
|
||||
# Check that preconditioning works
|
||||
pc = splu(Am.tocsc())
|
||||
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
|
||||
|
||||
x0, count_0 = do_solve()
|
||||
x1, count_1 = do_solve(M=M)
|
||||
|
||||
assert_(count_1 == 3)
|
||||
assert_(count_1 < count_0/2)
|
||||
assert_(allclose(x1, x0, rtol=1e-14))
|
||||
|
||||
def test_outer_v(self):
|
||||
# Check that the augmentation vectors behave as expected
|
||||
|
||||
outer_v = []
|
||||
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)
|
||||
assert_(len(outer_v) > 0)
|
||||
assert_(len(outer_v) <= 6)
|
||||
|
||||
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
|
||||
prepend_outer_v=True)
|
||||
assert_(count_1 == 2, count_1)
|
||||
assert_(count_1 < count_0/2)
|
||||
assert_(allclose(x1, x0, rtol=1e-14))
|
||||
|
||||
# ---
|
||||
|
||||
outer_v = []
|
||||
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v,
|
||||
store_outer_Av=False)
|
||||
assert_(array([v[1] is None for v in outer_v]).all())
|
||||
assert_(len(outer_v) > 0)
|
||||
assert_(len(outer_v) <= 6)
|
||||
|
||||
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
|
||||
prepend_outer_v=True)
|
||||
assert_(count_1 == 3, count_1)
|
||||
assert_(count_1 < count_0/2)
|
||||
assert_(allclose(x1, x0, rtol=1e-14))
|
||||
|
||||
@pytest.mark.skipif(python_implementation() == 'PyPy',
|
||||
reason="Fails on PyPy CI runs. See #9507")
|
||||
def test_arnoldi(self):
|
||||
np.random.rand(1234)
|
||||
|
||||
A = eye(2000) + rand(2000, 2000, density=5e-4)
|
||||
b = np.random.rand(2000)
|
||||
|
||||
# The inner arnoldi should be equivalent to gmres
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]),
|
||||
inner_m=15, maxiter=1)
|
||||
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]),
|
||||
restart=15, maxiter=1)
|
||||
|
||||
assert_equal(flag0, 1)
|
||||
assert_equal(flag1, 1)
|
||||
assert_(np.linalg.norm(A.dot(x0) - b) > 4e-4)
|
||||
|
||||
assert_allclose(x0, x1)
|
||||
|
||||
def test_cornercase(self):
|
||||
np.random.seed(1234)
|
||||
|
||||
# Rounding error may prevent convergence with tol=0 --- ensure
|
||||
# that the return values in this case are correct, and no
|
||||
# exceptions are raised
|
||||
|
||||
for n in [3, 5, 10, 100]:
|
||||
A = 2*eye(n)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
|
||||
b = np.ones(n)
|
||||
x, info = lgmres(A, b, maxiter=10)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
x, info = lgmres(A, b, tol=0, maxiter=10)
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
b = np.random.rand(n)
|
||||
x, info = lgmres(A, b, maxiter=10)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
x, info = lgmres(A, b, tol=0, maxiter=10)
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
def test_nans(self):
|
||||
A = eye(3, format='lil')
|
||||
A[1, 1] = np.nan
|
||||
b = np.ones(3)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x, info = lgmres(A, b, tol=0, maxiter=10)
|
||||
assert_equal(info, 1)
|
||||
|
||||
def test_breakdown_with_outer_v(self):
|
||||
A = np.array([[1, 2], [3, 4]], dtype=float)
|
||||
b = np.array([1, 2])
|
||||
|
||||
x = np.linalg.solve(A, b)
|
||||
v0 = np.array([1, 0])
|
||||
|
||||
# The inner iteration should converge to the correct solution,
|
||||
# since it's in the outer vector list
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1)
|
||||
|
||||
assert_allclose(xp, x, atol=1e-12)
|
||||
|
||||
def test_breakdown_underdetermined(self):
|
||||
# Should find LSQ solution in the Krylov span in one inner
|
||||
# iteration, despite solver breakdown from nilpotent A.
|
||||
A = np.array([[0, 1, 1, 1],
|
||||
[0, 0, 1, 1],
|
||||
[0, 0, 0, 1],
|
||||
[0, 0, 0, 0]], dtype=float)
|
||||
|
||||
bs = [
|
||||
np.array([1, 1, 1, 1]),
|
||||
np.array([1, 1, 1, 0]),
|
||||
np.array([1, 1, 0, 0]),
|
||||
np.array([1, 0, 0, 0]),
|
||||
]
|
||||
|
||||
for b in bs:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
xp, info = lgmres(A, b, maxiter=1)
|
||||
resp = np.linalg.norm(A.dot(xp) - b)
|
||||
|
||||
K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))]
|
||||
y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1)
|
||||
x = K.dot(y)
|
||||
res = np.linalg.norm(A.dot(x) - b)
|
||||
|
||||
assert_allclose(resp, res, err_msg=repr(b))
|
||||
|
||||
def test_denormals(self):
|
||||
# Check that no warnings are emitted if the matrix contains
|
||||
# numbers for which 1/x has no float representation, and that
|
||||
# the solver behaves properly.
|
||||
A = np.array([[1, 2], [3, 4]], dtype=float)
|
||||
A *= 100 * np.nextafter(0, 1)
|
||||
|
||||
b = np.array([1, 1])
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
xp, info = lgmres(A, b)
|
||||
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(xp), b)
|
||||
|
|
@ -0,0 +1,203 @@
|
|||
"""
|
||||
Copyright (C) 2010 David Fong and Michael Saunders
|
||||
Distributed under the same license as SciPy
|
||||
|
||||
Testing Code for LSMR.
|
||||
|
||||
03 Jun 2010: First version release with lsmr.py
|
||||
|
||||
David Chin-lung Fong clfong@stanford.edu
|
||||
Institute for Computational and Mathematical Engineering
|
||||
Stanford University
|
||||
|
||||
Michael Saunders saunders@stanford.edu
|
||||
Systems Optimization Laboratory
|
||||
Dept of MS&E, Stanford University.
|
||||
|
||||
"""
|
||||
|
||||
from numpy import array, arange, eye, zeros, ones, sqrt, transpose, hstack
|
||||
from numpy.linalg import norm
|
||||
from numpy.testing import (assert_almost_equal,
|
||||
assert_array_almost_equal)
|
||||
|
||||
from scipy.sparse import coo_matrix
|
||||
from scipy.sparse.linalg.interface import aslinearoperator
|
||||
from scipy.sparse.linalg import lsmr
|
||||
from .test_lsqr import G, b
|
||||
|
||||
|
||||
class TestLSMR:
|
||||
def setup_method(self):
|
||||
self.n = 10
|
||||
self.m = 10
|
||||
|
||||
def assertCompatibleSystem(self, A, xtrue):
|
||||
Afun = aslinearoperator(A)
|
||||
b = Afun.matvec(xtrue)
|
||||
x = lsmr(A, b)[0]
|
||||
assert_almost_equal(norm(x - xtrue), 0, decimal=5)
|
||||
|
||||
def testIdentityACase1(self):
|
||||
A = eye(self.n)
|
||||
xtrue = zeros((self.n, 1))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testIdentityACase2(self):
|
||||
A = eye(self.n)
|
||||
xtrue = ones((self.n,1))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testIdentityACase3(self):
|
||||
A = eye(self.n)
|
||||
xtrue = transpose(arange(self.n,0,-1))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testBidiagonalA(self):
|
||||
A = lowerBidiagonalMatrix(20,self.n)
|
||||
xtrue = transpose(arange(self.n,0,-1))
|
||||
self.assertCompatibleSystem(A,xtrue)
|
||||
|
||||
def testScalarB(self):
|
||||
A = array([[1.0, 2.0]])
|
||||
b = 3.0
|
||||
x = lsmr(A, b)[0]
|
||||
assert_almost_equal(norm(A.dot(x) - b), 0)
|
||||
|
||||
def testComplexX(self):
|
||||
A = eye(self.n)
|
||||
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testComplexX0(self):
|
||||
A = 4 * eye(self.n) + ones((self.n, self.n))
|
||||
xtrue = transpose(arange(self.n, 0, -1))
|
||||
b = aslinearoperator(A).matvec(xtrue)
|
||||
x0 = zeros(self.n, dtype=complex)
|
||||
x = lsmr(A, b, x0=x0)[0]
|
||||
assert_almost_equal(norm(x - xtrue), 0, decimal=5)
|
||||
|
||||
def testComplexA(self):
|
||||
A = 4 * eye(self.n) + 1j * ones((self.n, self.n))
|
||||
xtrue = transpose(arange(self.n, 0, -1).astype(complex))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testComplexB(self):
|
||||
A = 4 * eye(self.n) + ones((self.n, self.n))
|
||||
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
|
||||
b = aslinearoperator(A).matvec(xtrue)
|
||||
x = lsmr(A, b)[0]
|
||||
assert_almost_equal(norm(x - xtrue), 0, decimal=5)
|
||||
|
||||
def testColumnB(self):
|
||||
A = eye(self.n)
|
||||
b = ones((self.n, 1))
|
||||
x = lsmr(A, b)[0]
|
||||
assert_almost_equal(norm(A.dot(x) - b.ravel()), 0)
|
||||
|
||||
def testInitialization(self):
|
||||
# Test that the default setting is not modified
|
||||
x_ref = lsmr(G, b)[0]
|
||||
x0 = zeros(b.shape)
|
||||
x = lsmr(G, b, x0=x0)[0]
|
||||
assert_array_almost_equal(x_ref, x)
|
||||
|
||||
# Test warm-start with single iteration
|
||||
x0 = lsmr(G, b, maxiter=1)[0]
|
||||
x = lsmr(G, b, x0=x0)[0]
|
||||
assert_array_almost_equal(x_ref, x)
|
||||
|
||||
class TestLSMRReturns:
|
||||
def setup_method(self):
|
||||
self.n = 10
|
||||
self.A = lowerBidiagonalMatrix(20,self.n)
|
||||
self.xtrue = transpose(arange(self.n,0,-1))
|
||||
self.Afun = aslinearoperator(self.A)
|
||||
self.b = self.Afun.matvec(self.xtrue)
|
||||
self.returnValues = lsmr(self.A,self.b)
|
||||
|
||||
def testNormr(self):
|
||||
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
|
||||
assert_almost_equal(normr, norm(self.b - self.Afun.matvec(x)))
|
||||
|
||||
def testNormar(self):
|
||||
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
|
||||
assert_almost_equal(normar,
|
||||
norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x))))
|
||||
|
||||
def testNormx(self):
|
||||
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
|
||||
assert_almost_equal(normx, norm(x))
|
||||
|
||||
|
||||
def lowerBidiagonalMatrix(m, n):
|
||||
# This is a simple example for testing LSMR.
|
||||
# It uses the leading m*n submatrix from
|
||||
# A = [ 1
|
||||
# 1 2
|
||||
# 2 3
|
||||
# 3 4
|
||||
# ...
|
||||
# n ]
|
||||
# suitably padded by zeros.
|
||||
#
|
||||
# 04 Jun 2010: First version for distribution with lsmr.py
|
||||
if m <= n:
|
||||
row = hstack((arange(m, dtype=int),
|
||||
arange(1, m, dtype=int)))
|
||||
col = hstack((arange(m, dtype=int),
|
||||
arange(m-1, dtype=int)))
|
||||
data = hstack((arange(1, m+1, dtype=float),
|
||||
arange(1,m, dtype=float)))
|
||||
return coo_matrix((data, (row, col)), shape=(m,n))
|
||||
else:
|
||||
row = hstack((arange(n, dtype=int),
|
||||
arange(1, n+1, dtype=int)))
|
||||
col = hstack((arange(n, dtype=int),
|
||||
arange(n, dtype=int)))
|
||||
data = hstack((arange(1, n+1, dtype=float),
|
||||
arange(1,n+1, dtype=float)))
|
||||
return coo_matrix((data,(row, col)), shape=(m,n))
|
||||
|
||||
|
||||
def lsmrtest(m, n, damp):
|
||||
"""Verbose testing of lsmr"""
|
||||
|
||||
A = lowerBidiagonalMatrix(m,n)
|
||||
xtrue = arange(n,0,-1, dtype=float)
|
||||
Afun = aslinearoperator(A)
|
||||
|
||||
b = Afun.matvec(xtrue)
|
||||
|
||||
atol = 1.0e-7
|
||||
btol = 1.0e-7
|
||||
conlim = 1.0e+10
|
||||
itnlim = 10*n
|
||||
show = 1
|
||||
|
||||
x, istop, itn, normr, normar, norma, conda, normx \
|
||||
= lsmr(A, b, damp, atol, btol, conlim, itnlim, show)
|
||||
|
||||
j1 = min(n,5)
|
||||
j2 = max(n-4,1)
|
||||
print(' ')
|
||||
print('First elements of x:')
|
||||
str = ['%10.4f' % (xi) for xi in x[0:j1]]
|
||||
print(''.join(str))
|
||||
print(' ')
|
||||
print('Last elements of x:')
|
||||
str = ['%10.4f' % (xi) for xi in x[j2-1:]]
|
||||
print(''.join(str))
|
||||
|
||||
r = b - Afun.matvec(x)
|
||||
r2 = sqrt(norm(r)**2 + (damp*norm(x))**2)
|
||||
print(' ')
|
||||
str = 'normr (est.) %17.10e' % (normr)
|
||||
str2 = 'normr (true) %17.10e' % (r2)
|
||||
print(str)
|
||||
print(str2)
|
||||
print(' ')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
lsmrtest(20,10,0)
|
|
@ -0,0 +1,136 @@
|
|||
import numpy as np
|
||||
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
|
||||
assert_array_almost_equal)
|
||||
|
||||
import scipy.sparse
|
||||
import scipy.sparse.linalg
|
||||
from scipy.sparse.linalg import lsqr
|
||||
from time import time
|
||||
|
||||
# Set up a test problem
|
||||
n = 35
|
||||
G = np.eye(n)
|
||||
normal = np.random.normal
|
||||
norm = np.linalg.norm
|
||||
|
||||
for jj in range(5):
|
||||
gg = normal(size=n)
|
||||
hh = gg * gg.T
|
||||
G += (hh + hh.T) * 0.5
|
||||
G += normal(size=n) * normal(size=n)
|
||||
|
||||
b = normal(size=n)
|
||||
|
||||
tol = 1e-10
|
||||
show = False
|
||||
maxit = None
|
||||
|
||||
|
||||
def test_basic():
|
||||
b_copy = b.copy()
|
||||
X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
|
||||
assert_(np.all(b_copy == b))
|
||||
|
||||
svx = np.linalg.solve(G, b)
|
||||
xo = X[0]
|
||||
assert_(norm(svx - xo) < 1e-5)
|
||||
|
||||
def test_gh_2466():
|
||||
row = np.array([0, 0])
|
||||
col = np.array([0, 1])
|
||||
val = np.array([1, -1])
|
||||
A = scipy.sparse.coo_matrix((val, (row, col)), shape=(1, 2))
|
||||
b = np.asarray([4])
|
||||
lsqr(A, b)
|
||||
|
||||
|
||||
def test_well_conditioned_problems():
|
||||
# Test that sparse the lsqr solver returns the right solution
|
||||
# on various problems with different random seeds.
|
||||
# This is a non-regression test for a potential ZeroDivisionError
|
||||
# raised when computing the `test2` & `test3` convergence conditions.
|
||||
n = 10
|
||||
A_sparse = scipy.sparse.eye(n, n)
|
||||
A_dense = A_sparse.toarray()
|
||||
|
||||
with np.errstate(invalid='raise'):
|
||||
for seed in range(30):
|
||||
rng = np.random.RandomState(seed + 10)
|
||||
beta = rng.rand(n)
|
||||
beta[beta == 0] = 0.00001 # ensure that all the betas are not null
|
||||
b = A_sparse * beta[:, np.newaxis]
|
||||
output = lsqr(A_sparse, b, show=show)
|
||||
|
||||
# Check that the termination condition corresponds to an approximate
|
||||
# solution to Ax = b
|
||||
assert_equal(output[1], 1)
|
||||
solution = output[0]
|
||||
|
||||
# Check that we recover the ground truth solution
|
||||
assert_array_almost_equal(solution, beta)
|
||||
|
||||
# Sanity check: compare to the dense array solver
|
||||
reference_solution = np.linalg.solve(A_dense, b).ravel()
|
||||
assert_array_almost_equal(solution, reference_solution)
|
||||
|
||||
|
||||
def test_b_shapes():
|
||||
# Test b being a scalar.
|
||||
A = np.array([[1.0, 2.0]])
|
||||
b = 3.0
|
||||
x = lsqr(A, b)[0]
|
||||
assert_almost_equal(norm(A.dot(x) - b), 0)
|
||||
|
||||
# Test b being a column vector.
|
||||
A = np.eye(10)
|
||||
b = np.ones((10, 1))
|
||||
x = lsqr(A, b)[0]
|
||||
assert_almost_equal(norm(A.dot(x) - b.ravel()), 0)
|
||||
|
||||
|
||||
def test_initialization():
|
||||
# Test the default setting is the same as zeros
|
||||
b_copy = b.copy()
|
||||
x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
|
||||
x0 = np.zeros(x_ref[0].shape)
|
||||
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
|
||||
assert_(np.all(b_copy == b))
|
||||
assert_array_almost_equal(x_ref[0], x[0])
|
||||
|
||||
# Test warm-start with single iteration
|
||||
x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0]
|
||||
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
|
||||
assert_array_almost_equal(x_ref[0], x[0])
|
||||
assert_(np.all(b_copy == b))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
svx = np.linalg.solve(G, b)
|
||||
|
||||
tic = time()
|
||||
X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
|
||||
xo = X[0]
|
||||
phio = X[3]
|
||||
psio = X[7]
|
||||
k = X[2]
|
||||
chio = X[8]
|
||||
mg = np.amax(G - G.T)
|
||||
if mg > 1e-14:
|
||||
sym = 'No'
|
||||
else:
|
||||
sym = 'Yes'
|
||||
|
||||
print('LSQR')
|
||||
print("Is linear operator symmetric? " + sym)
|
||||
print("n: %3g iterations: %3g" % (n, k))
|
||||
print("Norms computed in %.2fs by LSQR" % (time() - tic))
|
||||
print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e " % (chio, phio, psio))
|
||||
print("Residual norms computed directly:")
|
||||
print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e" % (norm(xo),
|
||||
norm(G*xo - b),
|
||||
norm(G.T*(G*xo-b))))
|
||||
print("Direct solution norms:")
|
||||
print(" ||x|| %9.4e ||r|| %9.4e " % (norm(svx), norm(G*svx - b)))
|
||||
print("")
|
||||
print(" || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo))
|
||||
print("")
|
|
@ -0,0 +1,98 @@
|
|||
import numpy as np
|
||||
from numpy.testing import assert_equal, assert_allclose, assert_
|
||||
from scipy.sparse.linalg.isolve import minres
|
||||
from scipy.linalg import norm
|
||||
|
||||
from pytest import raises as assert_raises
|
||||
from .test_iterative import assert_normclose
|
||||
|
||||
|
||||
def get_sample_problem():
|
||||
# A random 10 x 10 symmetric matrix
|
||||
np.random.seed(1234)
|
||||
matrix = np.random.rand(10, 10)
|
||||
matrix = matrix + matrix.T
|
||||
# A random vector of length 10
|
||||
vector = np.random.rand(10)
|
||||
return matrix, vector
|
||||
|
||||
|
||||
def test_singular():
|
||||
A, b = get_sample_problem()
|
||||
A[0, ] = 0
|
||||
b[0] = 0
|
||||
xp, info = minres(A, b)
|
||||
assert_equal(info, 0)
|
||||
assert_normclose(A.dot(xp), b, tol=1e-5)
|
||||
|
||||
|
||||
def test_x0_is_used_by():
|
||||
A, b = get_sample_problem()
|
||||
# Random x0 to feed minres
|
||||
np.random.seed(12345)
|
||||
x0 = np.random.rand(10)
|
||||
trace = []
|
||||
|
||||
def trace_iterates(xk):
|
||||
trace.append(xk)
|
||||
minres(A, b, x0=x0, callback=trace_iterates)
|
||||
trace_with_x0 = trace
|
||||
|
||||
trace = []
|
||||
minres(A, b, callback=trace_iterates)
|
||||
assert_(not np.array_equal(trace_with_x0[0], trace[0]))
|
||||
|
||||
|
||||
def test_shift():
|
||||
A, b = get_sample_problem()
|
||||
shift = 0.5
|
||||
shifted_A = A - shift * np.eye(10)
|
||||
x1, info1 = minres(A, b, shift=shift)
|
||||
x2, info2 = minres(shifted_A, b)
|
||||
assert_equal(info1, 0)
|
||||
assert_allclose(x1, x2, rtol=1e-5)
|
||||
|
||||
|
||||
def test_asymmetric_fail():
|
||||
"""Asymmetric matrix should raise `ValueError` when check=True"""
|
||||
A, b = get_sample_problem()
|
||||
A[1, 2] = 1
|
||||
A[2, 1] = 2
|
||||
with assert_raises(ValueError):
|
||||
xp, info = minres(A, b, check=True)
|
||||
|
||||
|
||||
def test_minres_non_default_x0():
|
||||
np.random.seed(1234)
|
||||
tol = 10**(-6)
|
||||
a = np.random.randn(5, 5)
|
||||
a = np.dot(a, a.T)
|
||||
b = np.random.randn(5)
|
||||
c = np.random.randn(5)
|
||||
x = minres(a, b, x0=c, tol=tol)[0]
|
||||
assert norm(a.dot(x) - b) < tol
|
||||
|
||||
|
||||
def test_minres_precond_non_default_x0():
|
||||
np.random.seed(12345)
|
||||
tol = 10**(-6)
|
||||
a = np.random.randn(5, 5)
|
||||
a = np.dot(a, a.T)
|
||||
b = np.random.randn(5)
|
||||
c = np.random.randn(5)
|
||||
m = np.random.randn(5, 5)
|
||||
m = np.dot(m, m.T)
|
||||
x = minres(a, b, M=m, x0=c, tol=tol)[0]
|
||||
assert norm(a.dot(x) - b) < tol
|
||||
|
||||
|
||||
def test_minres_precond_exact_x0():
|
||||
np.random.seed(1234)
|
||||
tol = 10**(-6)
|
||||
a = np.eye(10)
|
||||
b = np.ones(10)
|
||||
c = np.ones(10)
|
||||
m = np.random.randn(10, 10)
|
||||
m = np.dot(m, m.T)
|
||||
x = minres(a, b, M=m, x0=c, tol=tol)[0]
|
||||
assert norm(a.dot(x) - b) < tol
|
|
@ -0,0 +1,8 @@
|
|||
import numpy as np
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy.sparse.linalg import utils
|
||||
|
||||
|
||||
def test_make_system_bad_shape():
|
||||
assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))
|
121
venv/Lib/site-packages/scipy/sparse/linalg/isolve/utils.py
Normal file
121
venv/Lib/site-packages/scipy/sparse/linalg/isolve/utils.py
Normal file
|
@ -0,0 +1,121 @@
|
|||
__docformat__ = "restructuredtext en"
|
||||
|
||||
__all__ = []
|
||||
|
||||
|
||||
from numpy import asanyarray, asarray, array, matrix, zeros
|
||||
from scipy.sparse.sputils import asmatrix
|
||||
|
||||
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \
|
||||
IdentityOperator
|
||||
|
||||
_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
|
||||
('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
|
||||
('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
|
||||
('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
|
||||
('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
|
||||
('D','D'):'D'}
|
||||
|
||||
|
||||
def coerce(x,y):
|
||||
if x not in 'fdFD':
|
||||
x = 'd'
|
||||
if y not in 'fdFD':
|
||||
y = 'd'
|
||||
return _coerce_rules[x,y]
|
||||
|
||||
|
||||
def id(x):
|
||||
return x
|
||||
|
||||
|
||||
def make_system(A, M, x0, b):
|
||||
"""Make a linear system Ax=b
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : LinearOperator
|
||||
sparse or dense matrix (or any valid input to aslinearoperator)
|
||||
M : {LinearOperator, Nones}
|
||||
preconditioner
|
||||
sparse or dense matrix (or any valid input to aslinearoperator)
|
||||
x0 : {array_like, None}
|
||||
initial guess to iterative method
|
||||
b : array_like
|
||||
right hand side
|
||||
|
||||
Returns
|
||||
-------
|
||||
(A, M, x, b, postprocess)
|
||||
A : LinearOperator
|
||||
matrix of the linear system
|
||||
M : LinearOperator
|
||||
preconditioner
|
||||
x : rank 1 ndarray
|
||||
initial guess
|
||||
b : rank 1 ndarray
|
||||
right hand side
|
||||
postprocess : function
|
||||
converts the solution vector to the appropriate
|
||||
type and dimensions (e.g. (N,1) matrix)
|
||||
|
||||
"""
|
||||
A_ = A
|
||||
A = aslinearoperator(A)
|
||||
|
||||
if A.shape[0] != A.shape[1]:
|
||||
raise ValueError('expected square matrix, but got shape=%s' % (A.shape,))
|
||||
|
||||
N = A.shape[0]
|
||||
|
||||
b = asanyarray(b)
|
||||
|
||||
if not (b.shape == (N,1) or b.shape == (N,)):
|
||||
raise ValueError('A and b have incompatible dimensions')
|
||||
|
||||
if b.dtype.char not in 'fdFD':
|
||||
b = b.astype('d') # upcast non-FP types to double
|
||||
|
||||
def postprocess(x):
|
||||
if isinstance(b,matrix):
|
||||
x = asmatrix(x)
|
||||
return x.reshape(b.shape)
|
||||
|
||||
if hasattr(A,'dtype'):
|
||||
xtype = A.dtype.char
|
||||
else:
|
||||
xtype = A.matvec(b).dtype.char
|
||||
xtype = coerce(xtype, b.dtype.char)
|
||||
|
||||
b = asarray(b,dtype=xtype) # make b the same type as x
|
||||
b = b.ravel()
|
||||
|
||||
if x0 is None:
|
||||
x = zeros(N, dtype=xtype)
|
||||
else:
|
||||
x = array(x0, dtype=xtype)
|
||||
if not (x.shape == (N,1) or x.shape == (N,)):
|
||||
raise ValueError('A and x have incompatible dimensions')
|
||||
x = x.ravel()
|
||||
|
||||
# process preconditioner
|
||||
if M is None:
|
||||
if hasattr(A_,'psolve'):
|
||||
psolve = A_.psolve
|
||||
else:
|
||||
psolve = id
|
||||
if hasattr(A_,'rpsolve'):
|
||||
rpsolve = A_.rpsolve
|
||||
else:
|
||||
rpsolve = id
|
||||
if psolve is id and rpsolve is id:
|
||||
M = IdentityOperator(shape=A.shape, dtype=A.dtype)
|
||||
else:
|
||||
M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
|
||||
dtype=A.dtype)
|
||||
else:
|
||||
M = aslinearoperator(M)
|
||||
if A.shape != M.shape:
|
||||
raise ValueError('matrix and preconditioner have different shapes')
|
||||
|
||||
return A, M, x, b, postprocess
|
Loading…
Add table
Add a link
Reference in a new issue