Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,60 @@
|
|||
import scipy.sparse.linalg as la
|
||||
import scipy.io as io
|
||||
import numpy as np
|
||||
import sys
|
||||
|
||||
#problem = "SPARSKIT/drivcav/e05r0100"
|
||||
problem = "SPARSKIT/drivcav/e05r0200"
|
||||
#problem = "Harwell-Boeing/sherman/sherman1"
|
||||
#problem = "misc/hamm/add32"
|
||||
|
||||
mm = np.lib._datasource.Repository('ftp://math.nist.gov/pub/MatrixMarket2/')
|
||||
f = mm.open('%s.mtx.gz' % problem)
|
||||
Am = io.mmread(f).tocsr()
|
||||
f.close()
|
||||
|
||||
f = mm.open('%s_rhs1.mtx.gz' % problem)
|
||||
b = np.array(io.mmread(f)).ravel()
|
||||
f.close()
|
||||
|
||||
count = [0]
|
||||
|
||||
|
||||
def matvec(v):
|
||||
count[0] += 1
|
||||
sys.stderr.write('%d\r' % count[0])
|
||||
return Am*v
|
||||
|
||||
|
||||
A = la.LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
|
||||
|
||||
M = 100
|
||||
|
||||
print("MatrixMarket problem %s" % problem)
|
||||
print("Invert %d x %d matrix; nnz = %d" % (Am.shape[0], Am.shape[1], Am.nnz))
|
||||
|
||||
count[0] = 0
|
||||
x0, info = la.gmres(A, b, restrt=M, tol=1e-14)
|
||||
count_0 = count[0]
|
||||
err0 = np.linalg.norm(Am*x0 - b) / np.linalg.norm(b)
|
||||
print("GMRES(%d):" % M, count_0, "matvecs, residual", err0)
|
||||
if info != 0:
|
||||
print("Didn't converge")
|
||||
|
||||
count[0] = 0
|
||||
x1, info = la.lgmres(A, b, inner_m=M-6*2, outer_k=6, tol=1e-14)
|
||||
count_1 = count[0]
|
||||
err1 = np.linalg.norm(Am*x1 - b) / np.linalg.norm(b)
|
||||
print("LGMRES(%d,6) [same memory req.]:" % (M-2*6), count_1,
|
||||
"matvecs, residual:", err1)
|
||||
if info != 0:
|
||||
print("Didn't converge")
|
||||
|
||||
count[0] = 0
|
||||
x2, info = la.lgmres(A, b, inner_m=M-6, outer_k=6, tol=1e-14)
|
||||
count_2 = count[0]
|
||||
err2 = np.linalg.norm(Am*x2 - b) / np.linalg.norm(b)
|
||||
print("LGMRES(%d,6) [same subspace size]:" % (M-6), count_2,
|
||||
"matvecs, residual:", err2)
|
||||
if info != 0:
|
||||
print("Didn't converge")
|
|
@ -0,0 +1,165 @@
|
|||
#!/usr/bin/env python
|
||||
"""Tests for the linalg.isolve.gcrotmk module
|
||||
"""
|
||||
|
||||
from numpy.testing import (assert_, assert_allclose, assert_equal,
|
||||
suppress_warnings)
|
||||
|
||||
import numpy as np
|
||||
from numpy import zeros, array, allclose
|
||||
from scipy.linalg import norm
|
||||
from scipy.sparse import csr_matrix, eye, rand
|
||||
|
||||
from scipy.sparse.linalg.interface import LinearOperator
|
||||
from scipy.sparse.linalg import splu
|
||||
from scipy.sparse.linalg.isolve import gcrotmk, gmres
|
||||
|
||||
|
||||
Am = csr_matrix(array([[-2,1,0,0,0,9],
|
||||
[1,-2,1,0,5,0],
|
||||
[0,1,-2,1,0,0],
|
||||
[0,0,1,-2,1,0],
|
||||
[0,3,0,1,-2,1],
|
||||
[1,0,0,0,1,-2]]))
|
||||
b = array([1,2,3,4,5,6])
|
||||
count = [0]
|
||||
|
||||
|
||||
def matvec(v):
|
||||
count[0] += 1
|
||||
return Am*v
|
||||
|
||||
|
||||
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
|
||||
|
||||
|
||||
def do_solve(**kw):
|
||||
count[0] = 0
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)
|
||||
count_0 = count[0]
|
||||
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
|
||||
return x0, count_0
|
||||
|
||||
|
||||
class TestGCROTMK(object):
|
||||
def test_preconditioner(self):
|
||||
# Check that preconditioning works
|
||||
pc = splu(Am.tocsc())
|
||||
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
|
||||
|
||||
x0, count_0 = do_solve()
|
||||
x1, count_1 = do_solve(M=M)
|
||||
|
||||
assert_equal(count_1, 3)
|
||||
assert_(count_1 < count_0/2)
|
||||
assert_(allclose(x1, x0, rtol=1e-14))
|
||||
|
||||
def test_arnoldi(self):
|
||||
np.random.seed(1)
|
||||
|
||||
A = eye(2000) + rand(2000, 2000, density=5e-4)
|
||||
b = np.random.rand(2000)
|
||||
|
||||
# The inner arnoldi should be equivalent to gmres
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
|
||||
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
|
||||
|
||||
assert_equal(flag0, 1)
|
||||
assert_equal(flag1, 1)
|
||||
assert np.linalg.norm(A.dot(x0) - b) > 1e-3
|
||||
|
||||
assert_allclose(x0, x1)
|
||||
|
||||
def test_cornercase(self):
|
||||
np.random.seed(1234)
|
||||
|
||||
# Rounding error may prevent convergence with tol=0 --- ensure
|
||||
# that the return values in this case are correct, and no
|
||||
# exceptions are raised
|
||||
|
||||
for n in [3, 5, 10, 100]:
|
||||
A = 2*eye(n)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
b = np.ones(n)
|
||||
x, info = gcrotmk(A, b, maxiter=10)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
x, info = gcrotmk(A, b, tol=0, maxiter=10)
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
b = np.random.rand(n)
|
||||
x, info = gcrotmk(A, b, maxiter=10)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
x, info = gcrotmk(A, b, tol=0, maxiter=10)
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
def test_nans(self):
|
||||
A = eye(3, format='lil')
|
||||
A[1,1] = np.nan
|
||||
b = np.ones(3)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x, info = gcrotmk(A, b, tol=0, maxiter=10)
|
||||
assert_equal(info, 1)
|
||||
|
||||
def test_truncate(self):
|
||||
np.random.seed(1234)
|
||||
A = np.random.rand(30, 30) + np.eye(30)
|
||||
b = np.random.rand(30)
|
||||
|
||||
for truncate in ['oldest', 'smallest']:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,
|
||||
maxiter=200)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-3)
|
||||
|
||||
def test_CU(self):
|
||||
for discard_C in (True, False):
|
||||
# Check that C,U behave as expected
|
||||
CU = []
|
||||
x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
|
||||
assert_(len(CU) > 0)
|
||||
assert_(len(CU) <= 6)
|
||||
|
||||
if discard_C:
|
||||
for c, u in CU:
|
||||
assert_(c is None)
|
||||
|
||||
# should converge immediately
|
||||
x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
|
||||
if discard_C:
|
||||
assert_equal(count_1, 2 + len(CU))
|
||||
else:
|
||||
assert_equal(count_1, 3)
|
||||
assert_(count_1 <= count_0/2)
|
||||
assert_allclose(x1, x0, atol=1e-14)
|
||||
|
||||
def test_denormals(self):
|
||||
# Check that no warnings are emitted if the matrix contains
|
||||
# numbers for which 1/x has no float representation, and that
|
||||
# the solver behaves properly.
|
||||
A = np.array([[1, 2], [3, 4]], dtype=float)
|
||||
A *= 100 * np.nextafter(0, 1)
|
||||
|
||||
b = np.array([1, 1])
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
xp, info = gcrotmk(A, b)
|
||||
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(xp), b)
|
|
@ -0,0 +1,726 @@
|
|||
""" Test functions for the sparse.linalg.isolve module
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import platform
|
||||
import numpy as np
|
||||
|
||||
from numpy.testing import (assert_equal, assert_array_equal,
|
||||
assert_, assert_allclose, suppress_warnings)
|
||||
import pytest
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from numpy import zeros, arange, array, ones, eye, iscomplexobj
|
||||
from scipy.linalg import norm
|
||||
from scipy.sparse import spdiags, csr_matrix, SparseEfficiencyWarning
|
||||
|
||||
from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
||||
from scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk
|
||||
|
||||
# TODO check that method preserve shape and type
|
||||
# TODO test both preconditioner methods
|
||||
|
||||
|
||||
class Case(object):
|
||||
def __init__(self, name, A, b=None, skip=None, nonconvergence=None):
|
||||
self.name = name
|
||||
self.A = A
|
||||
if b is None:
|
||||
self.b = arange(A.shape[0], dtype=float)
|
||||
else:
|
||||
self.b = b
|
||||
if skip is None:
|
||||
self.skip = []
|
||||
else:
|
||||
self.skip = skip
|
||||
if nonconvergence is None:
|
||||
self.nonconvergence = []
|
||||
else:
|
||||
self.nonconvergence = nonconvergence
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s>" % self.name
|
||||
|
||||
|
||||
class IterativeParams(object):
|
||||
def __init__(self):
|
||||
# list of tuples (solver, symmetric, positive_definite )
|
||||
solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk]
|
||||
sym_solvers = [minres, cg]
|
||||
posdef_solvers = [cg]
|
||||
real_solvers = [minres]
|
||||
|
||||
self.solvers = solvers
|
||||
|
||||
# list of tuples (A, symmetric, positive_definite )
|
||||
self.cases = []
|
||||
|
||||
# Symmetric and Positive Definite
|
||||
N = 40
|
||||
data = ones((3,N))
|
||||
data[0,:] = 2
|
||||
data[1,:] = -1
|
||||
data[2,:] = -1
|
||||
Poisson1D = spdiags(data, [0,-1,1], N, N, format='csr')
|
||||
self.Poisson1D = Case("poisson1d", Poisson1D)
|
||||
self.cases.append(Case("poisson1d", Poisson1D))
|
||||
# note: minres fails for single precision
|
||||
self.cases.append(Case("poisson1d", Poisson1D.astype('f'),
|
||||
skip=[minres]))
|
||||
|
||||
# Symmetric and Negative Definite
|
||||
self.cases.append(Case("neg-poisson1d", -Poisson1D,
|
||||
skip=posdef_solvers))
|
||||
# note: minres fails for single precision
|
||||
self.cases.append(Case("neg-poisson1d", (-Poisson1D).astype('f'),
|
||||
skip=posdef_solvers + [minres]))
|
||||
|
||||
# Symmetric and Indefinite
|
||||
data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]],dtype='d')
|
||||
RandDiag = spdiags(data, [0], 10, 10, format='csr')
|
||||
self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers))
|
||||
self.cases.append(Case("rand-diag", RandDiag.astype('f'),
|
||||
skip=posdef_solvers))
|
||||
|
||||
# Random real-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(4, 4)
|
||||
self.cases.append(Case("rand", data, skip=posdef_solvers+sym_solvers))
|
||||
self.cases.append(Case("rand", data.astype('f'),
|
||||
skip=posdef_solvers+sym_solvers))
|
||||
|
||||
# Random symmetric real-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(4, 4)
|
||||
data = data + data.T
|
||||
self.cases.append(Case("rand-sym", data, skip=posdef_solvers))
|
||||
self.cases.append(Case("rand-sym", data.astype('f'),
|
||||
skip=posdef_solvers))
|
||||
|
||||
# Random pos-def symmetric real
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(9, 9)
|
||||
data = np.dot(data.conj(), data.T)
|
||||
self.cases.append(Case("rand-sym-pd", data))
|
||||
# note: minres fails for single precision
|
||||
self.cases.append(Case("rand-sym-pd", data.astype('f'),
|
||||
skip=[minres]))
|
||||
|
||||
# Random complex-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
|
||||
self.cases.append(Case("rand-cmplx", data,
|
||||
skip=posdef_solvers+sym_solvers+real_solvers))
|
||||
self.cases.append(Case("rand-cmplx", data.astype('F'),
|
||||
skip=posdef_solvers+sym_solvers+real_solvers))
|
||||
|
||||
# Random hermitian complex-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
|
||||
data = data + data.T.conj()
|
||||
self.cases.append(Case("rand-cmplx-herm", data,
|
||||
skip=posdef_solvers+real_solvers))
|
||||
self.cases.append(Case("rand-cmplx-herm", data.astype('F'),
|
||||
skip=posdef_solvers+real_solvers))
|
||||
|
||||
# Random pos-def hermitian complex-valued
|
||||
np.random.seed(1234)
|
||||
data = np.random.rand(9, 9) + 1j*np.random.rand(9, 9)
|
||||
data = np.dot(data.conj(), data.T)
|
||||
self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers))
|
||||
self.cases.append(Case("rand-cmplx-sym-pd", data.astype('F'),
|
||||
skip=real_solvers))
|
||||
|
||||
# Non-symmetric and Positive Definite
|
||||
#
|
||||
# cgs, qmr, and bicg fail to converge on this one
|
||||
# -- algorithmic limitation apparently
|
||||
data = ones((2,10))
|
||||
data[0,:] = 2
|
||||
data[1,:] = -1
|
||||
A = spdiags(data, [0,-1], 10, 10, format='csr')
|
||||
self.cases.append(Case("nonsymposdef", A,
|
||||
skip=sym_solvers+[cgs, qmr, bicg]))
|
||||
self.cases.append(Case("nonsymposdef", A.astype('F'),
|
||||
skip=sym_solvers+[cgs, qmr, bicg]))
|
||||
|
||||
# Symmetric, non-pd, hitting cgs/bicg/bicgstab/qmr breakdown
|
||||
A = np.array([[0, 0, 0, 0, 0, 1, -1, -0, -0, -0, -0],
|
||||
[0, 0, 0, 0, 0, 2, -0, -1, -0, -0, -0],
|
||||
[0, 0, 0, 0, 0, 2, -0, -0, -1, -0, -0],
|
||||
[0, 0, 0, 0, 0, 2, -0, -0, -0, -1, -0],
|
||||
[0, 0, 0, 0, 0, 1, -0, -0, -0, -0, -1],
|
||||
[1, 2, 2, 2, 1, 0, -0, -0, -0, -0, -0],
|
||||
[-1, 0, 0, 0, 0, 0, -1, -0, -0, -0, -0],
|
||||
[0, -1, 0, 0, 0, 0, -0, -1, -0, -0, -0],
|
||||
[0, 0, -1, 0, 0, 0, -0, -0, -1, -0, -0],
|
||||
[0, 0, 0, -1, 0, 0, -0, -0, -0, -1, -0],
|
||||
[0, 0, 0, 0, -1, 0, -0, -0, -0, -0, -1]], dtype=float)
|
||||
b = np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype=float)
|
||||
assert (A == A.T).all()
|
||||
self.cases.append(Case("sym-nonpd", A, b,
|
||||
skip=posdef_solvers,
|
||||
nonconvergence=[cgs,bicg,bicgstab,qmr]))
|
||||
|
||||
|
||||
params = IterativeParams()
|
||||
|
||||
|
||||
def check_maxiter(solver, case):
|
||||
A = case.A
|
||||
tol = 1e-12
|
||||
|
||||
b = case.b
|
||||
x0 = 0*b
|
||||
|
||||
residuals = []
|
||||
|
||||
def callback(x):
|
||||
residuals.append(norm(b - case.A*x))
|
||||
|
||||
x, info = solver(A, b, x0=x0, tol=tol, maxiter=1, callback=callback)
|
||||
|
||||
assert_equal(len(residuals), 1)
|
||||
assert_equal(info, 1)
|
||||
|
||||
|
||||
def test_maxiter():
|
||||
case = params.Poisson1D
|
||||
for solver in params.solvers:
|
||||
if solver in case.skip:
|
||||
continue
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
check_maxiter(solver, case)
|
||||
|
||||
|
||||
def assert_normclose(a, b, tol=1e-8):
|
||||
residual = norm(a - b)
|
||||
tolerance = tol*norm(b)
|
||||
msg = "residual (%g) not smaller than tolerance %g" % (residual, tolerance)
|
||||
assert_(residual < tolerance, msg=msg)
|
||||
|
||||
|
||||
def check_convergence(solver, case):
|
||||
A = case.A
|
||||
|
||||
if A.dtype.char in "dD":
|
||||
tol = 1e-8
|
||||
else:
|
||||
tol = 1e-2
|
||||
|
||||
b = case.b
|
||||
x0 = 0*b
|
||||
|
||||
x, info = solver(A, b, x0=x0, tol=tol)
|
||||
|
||||
assert_array_equal(x0, 0*b) # ensure that x0 is not overwritten
|
||||
if solver not in case.nonconvergence:
|
||||
assert_equal(info,0)
|
||||
assert_normclose(A.dot(x), b, tol=tol)
|
||||
else:
|
||||
assert_(info != 0)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= np.linalg.norm(b))
|
||||
|
||||
|
||||
def test_convergence():
|
||||
for solver in params.solvers:
|
||||
for case in params.cases:
|
||||
if solver in case.skip:
|
||||
continue
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
check_convergence(solver, case)
|
||||
|
||||
|
||||
def check_precond_dummy(solver, case):
|
||||
tol = 1e-8
|
||||
|
||||
def identity(b,which=None):
|
||||
"""trivial preconditioner"""
|
||||
return b
|
||||
|
||||
A = case.A
|
||||
|
||||
M,N = A.shape
|
||||
spdiags([1.0/A.diagonal()], [0], M, N)
|
||||
|
||||
b = case.b
|
||||
x0 = 0*b
|
||||
|
||||
precond = LinearOperator(A.shape, identity, rmatvec=identity)
|
||||
|
||||
if solver is qmr:
|
||||
x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol)
|
||||
else:
|
||||
x, info = solver(A, b, M=precond, x0=x0, tol=tol)
|
||||
assert_equal(info,0)
|
||||
assert_normclose(A.dot(x), b, tol)
|
||||
|
||||
A = aslinearoperator(A)
|
||||
A.psolve = identity
|
||||
A.rpsolve = identity
|
||||
|
||||
x, info = solver(A, b, x0=x0, tol=tol)
|
||||
assert_equal(info,0)
|
||||
assert_normclose(A*x, b, tol=tol)
|
||||
|
||||
|
||||
def test_precond_dummy():
|
||||
case = params.Poisson1D
|
||||
for solver in params.solvers:
|
||||
if solver in case.skip:
|
||||
continue
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
check_precond_dummy(solver, case)
|
||||
|
||||
|
||||
def check_precond_inverse(solver, case):
|
||||
tol = 1e-8
|
||||
|
||||
def inverse(b,which=None):
|
||||
"""inverse preconditioner"""
|
||||
A = case.A
|
||||
if not isinstance(A, np.ndarray):
|
||||
A = A.todense()
|
||||
return np.linalg.solve(A, b)
|
||||
|
||||
def rinverse(b,which=None):
|
||||
"""inverse preconditioner"""
|
||||
A = case.A
|
||||
if not isinstance(A, np.ndarray):
|
||||
A = A.todense()
|
||||
return np.linalg.solve(A.T, b)
|
||||
|
||||
matvec_count = [0]
|
||||
|
||||
def matvec(b):
|
||||
matvec_count[0] += 1
|
||||
return case.A.dot(b)
|
||||
|
||||
def rmatvec(b):
|
||||
matvec_count[0] += 1
|
||||
return case.A.T.dot(b)
|
||||
|
||||
b = case.b
|
||||
x0 = 0*b
|
||||
|
||||
A = LinearOperator(case.A.shape, matvec, rmatvec=rmatvec)
|
||||
precond = LinearOperator(case.A.shape, inverse, rmatvec=rinverse)
|
||||
|
||||
# Solve with preconditioner
|
||||
matvec_count = [0]
|
||||
x, info = solver(A, b, M=precond, x0=x0, tol=tol)
|
||||
|
||||
assert_equal(info, 0)
|
||||
assert_normclose(case.A.dot(x), b, tol)
|
||||
|
||||
# Solution should be nearly instant
|
||||
assert_(matvec_count[0] <= 3, repr(matvec_count))
|
||||
|
||||
|
||||
def test_precond_inverse():
|
||||
case = params.Poisson1D
|
||||
for solver in params.solvers:
|
||||
if solver in case.skip:
|
||||
continue
|
||||
if solver is qmr:
|
||||
continue
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
check_precond_inverse(solver, case)
|
||||
|
||||
|
||||
def test_gmres_basic():
|
||||
A = np.vander(np.arange(10) + 1)[:, ::-1]
|
||||
b = np.zeros(10)
|
||||
b[0] = 1
|
||||
np.linalg.solve(A, b)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x_gm, err = gmres(A, b, restart=5, maxiter=1)
|
||||
|
||||
assert_allclose(x_gm[0], 0.359, rtol=1e-2)
|
||||
|
||||
|
||||
def test_reentrancy():
|
||||
non_reentrant = [cg, cgs, bicg, bicgstab, gmres, qmr]
|
||||
reentrant = [lgmres, minres, gcrotmk]
|
||||
for solver in reentrant + non_reentrant:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
_check_reentrancy(solver, solver in reentrant)
|
||||
|
||||
|
||||
def _check_reentrancy(solver, is_reentrant):
|
||||
def matvec(x):
|
||||
A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]])
|
||||
y, info = solver(A, x)
|
||||
assert_equal(info, 0)
|
||||
return y
|
||||
b = np.array([1, 1./2, 1./3])
|
||||
op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec,
|
||||
dtype=b.dtype)
|
||||
|
||||
if not is_reentrant:
|
||||
assert_raises(RuntimeError, solver, op, b)
|
||||
else:
|
||||
y, info = solver(op, b)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(y, [1, 1, 1])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, lgmres, gcrotmk])
|
||||
def test_atol(solver):
|
||||
# TODO: minres. It didn't historically use absolute tolerances, so
|
||||
# fixing it is less urgent.
|
||||
|
||||
np.random.seed(1234)
|
||||
A = np.random.rand(10, 10)
|
||||
A = A.dot(A.T) + 10 * np.eye(10)
|
||||
b = 1e3 * np.random.rand(10)
|
||||
b_norm = np.linalg.norm(b)
|
||||
|
||||
tols = np.r_[0, np.logspace(np.log10(1e-10), np.log10(1e2), 7), np.inf]
|
||||
|
||||
# Check effect of badly scaled preconditioners
|
||||
M0 = np.random.randn(10, 10)
|
||||
M0 = M0.dot(M0.T)
|
||||
Ms = [None, 1e-6 * M0, 1e6 * M0]
|
||||
|
||||
for M, tol, atol in itertools.product(Ms, tols, tols):
|
||||
if tol == 0 and atol == 0:
|
||||
continue
|
||||
|
||||
if solver is qmr:
|
||||
if M is not None:
|
||||
M = aslinearoperator(M)
|
||||
M2 = aslinearoperator(np.eye(10))
|
||||
else:
|
||||
M2 = None
|
||||
x, info = solver(A, b, M1=M, M2=M2, tol=tol, atol=atol)
|
||||
else:
|
||||
x, info = solver(A, b, M=M, tol=tol, atol=atol)
|
||||
assert_equal(info, 0)
|
||||
|
||||
residual = A.dot(x) - b
|
||||
err = np.linalg.norm(residual)
|
||||
atol2 = tol * b_norm
|
||||
assert_(err <= max(atol, atol2))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk])
|
||||
def test_zero_rhs(solver):
|
||||
np.random.seed(1234)
|
||||
A = np.random.rand(10, 10)
|
||||
A = A.dot(A.T) + 10 * np.eye(10)
|
||||
|
||||
b = np.zeros(10)
|
||||
tols = np.r_[np.logspace(np.log10(1e-10), np.log10(1e2), 7)]
|
||||
|
||||
for tol in tols:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
|
||||
x, info = solver(A, b, tol=tol)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(x, 0, atol=1e-15)
|
||||
|
||||
x, info = solver(A, b, tol=tol, x0=ones(10))
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(x, 0, atol=tol)
|
||||
|
||||
if solver is not minres:
|
||||
x, info = solver(A, b, tol=tol, atol=0, x0=ones(10))
|
||||
if info == 0:
|
||||
assert_allclose(x, 0)
|
||||
|
||||
x, info = solver(A, b, tol=tol, atol=tol)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(x, 0, atol=1e-300)
|
||||
|
||||
x, info = solver(A, b, tol=tol, atol=0)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(x, 0, atol=1e-300)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("solver", [
|
||||
gmres, qmr,
|
||||
pytest.param(lgmres, marks=pytest.mark.xfail(platform.machine() == 'ppc64le',
|
||||
reason="fails on ppc64le")),
|
||||
pytest.param(cgs, marks=pytest.mark.xfail),
|
||||
pytest.param(bicg, marks=pytest.mark.xfail),
|
||||
pytest.param(bicgstab, marks=pytest.mark.xfail),
|
||||
pytest.param(gcrotmk, marks=pytest.mark.xfail)])
|
||||
def test_maxiter_worsening(solver):
|
||||
# Check error does not grow (boundlessly) with increasing maxiter.
|
||||
# This can occur due to the solvers hitting close to breakdown,
|
||||
# which they should detect and halt as necessary.
|
||||
# cf. gh-9100
|
||||
|
||||
# Singular matrix, rhs numerically not in range
|
||||
A = np.array([[-0.1112795288033378, 0, 0, 0.16127952880333685],
|
||||
[0, -0.13627952880333782+6.283185307179586j, 0, 0],
|
||||
[0, 0, -0.13627952880333782-6.283185307179586j, 0],
|
||||
[0.1112795288033368, 0j, 0j, -0.16127952880333785]])
|
||||
v = np.ones(4)
|
||||
best_error = np.inf
|
||||
tol = 7 if platform.machine() == 'aarch64' else 5
|
||||
|
||||
for maxiter in range(1, 20):
|
||||
x, info = solver(A, v, maxiter=maxiter, tol=1e-8, atol=0)
|
||||
|
||||
if info == 0:
|
||||
assert_(np.linalg.norm(A.dot(x) - v) <= 1e-8*np.linalg.norm(v))
|
||||
|
||||
error = np.linalg.norm(A.dot(x) - v)
|
||||
best_error = min(best_error, error)
|
||||
|
||||
# Check with slack
|
||||
assert_(error <= tol*best_error)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("solver", [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres, gcrotmk])
|
||||
def test_x0_working(solver):
|
||||
# Easy problem
|
||||
np.random.seed(1)
|
||||
n = 10
|
||||
A = np.random.rand(n, n)
|
||||
A = A.dot(A.T)
|
||||
b = np.random.rand(n)
|
||||
x0 = np.random.rand(n)
|
||||
|
||||
if solver is minres:
|
||||
kw = dict(tol=1e-6)
|
||||
else:
|
||||
kw = dict(atol=0, tol=1e-6)
|
||||
|
||||
x, info = solver(A, b, **kw)
|
||||
assert_equal(info, 0)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-6*np.linalg.norm(b))
|
||||
|
||||
x, info = solver(A, b, x0=x0, **kw)
|
||||
assert_equal(info, 0)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-6*np.linalg.norm(b))
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
class TestQMR(object):
|
||||
def test_leftright_precond(self):
|
||||
"""Check that QMR works with left and right preconditioners"""
|
||||
|
||||
from scipy.sparse.linalg.dsolve import splu
|
||||
from scipy.sparse.linalg.interface import LinearOperator
|
||||
|
||||
n = 100
|
||||
|
||||
dat = ones(n)
|
||||
A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n)
|
||||
b = arange(n,dtype='d')
|
||||
|
||||
L = spdiags([-dat/2, dat], [-1,0], n, n)
|
||||
U = spdiags([4*dat, -dat], [0,1], n, n)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(SparseEfficiencyWarning, "splu requires CSC matrix format")
|
||||
L_solver = splu(L)
|
||||
U_solver = splu(U)
|
||||
|
||||
def L_solve(b):
|
||||
return L_solver.solve(b)
|
||||
|
||||
def U_solve(b):
|
||||
return U_solver.solve(b)
|
||||
|
||||
def LT_solve(b):
|
||||
return L_solver.solve(b,'T')
|
||||
|
||||
def UT_solve(b):
|
||||
return U_solver.solve(b,'T')
|
||||
|
||||
M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve)
|
||||
M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)
|
||||
|
||||
assert_equal(info,0)
|
||||
assert_normclose(A*x, b, tol=1e-8)
|
||||
|
||||
|
||||
class TestGMRES(object):
|
||||
def test_callback(self):
|
||||
|
||||
def store_residual(r, rvec):
|
||||
rvec[rvec.nonzero()[0].max()+1] = r
|
||||
|
||||
# Define, A,b
|
||||
A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]]))
|
||||
b = ones((A.shape[0],))
|
||||
maxiter = 1
|
||||
rvec = zeros(maxiter+1)
|
||||
rvec[0] = 1.0
|
||||
callback = lambda r:store_residual(r, rvec)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback)
|
||||
|
||||
# Expected output from SciPy 1.0.0
|
||||
assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10)
|
||||
|
||||
# Test preconditioned callback
|
||||
M = 1e-3 * np.eye(A.shape[0])
|
||||
rvec = zeros(maxiter+1)
|
||||
rvec[0] = 1.0
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x, flag = gmres(A, b, M=M, tol=1e-16, maxiter=maxiter, callback=callback)
|
||||
|
||||
# Expected output from SciPy 1.0.0 (callback has preconditioned residual!)
|
||||
assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]), rtol=1e-10)
|
||||
|
||||
def test_abi(self):
|
||||
# Check we don't segfault on gmres with complex argument
|
||||
A = eye(2)
|
||||
b = ones(2)
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
r_x, r_info = gmres(A, b)
|
||||
r_x = r_x.astype(complex)
|
||||
|
||||
x, info = gmres(A.astype(complex), b.astype(complex))
|
||||
|
||||
assert_(iscomplexobj(x))
|
||||
assert_allclose(r_x, x)
|
||||
assert_(r_info == info)
|
||||
|
||||
def test_atol_legacy(self):
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
|
||||
# Check the strange legacy behavior: the tolerance is interpreted
|
||||
# as atol, but only for the initial residual
|
||||
A = eye(2)
|
||||
b = 1e-6 * ones(2)
|
||||
x, info = gmres(A, b, tol=1e-5)
|
||||
assert_array_equal(x, np.zeros(2))
|
||||
|
||||
A = eye(2)
|
||||
b = ones(2)
|
||||
x, info = gmres(A, b, tol=1e-5)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-5*np.linalg.norm(b))
|
||||
assert_allclose(x, b, atol=0, rtol=1e-8)
|
||||
|
||||
rndm = np.random.RandomState(12345)
|
||||
A = rndm.rand(30, 30)
|
||||
b = 1e-6 * ones(30)
|
||||
x, info = gmres(A, b, tol=1e-7, restart=20)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) > 1e-7)
|
||||
|
||||
A = eye(2)
|
||||
b = 1e-10 * ones(2)
|
||||
x, info = gmres(A, b, tol=1e-8, atol=0)
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b))
|
||||
|
||||
def test_defective_precond_breakdown(self):
|
||||
# Breakdown due to defective preconditioner
|
||||
M = np.eye(3)
|
||||
M[2,2] = 0
|
||||
|
||||
b = np.array([0, 1, 1])
|
||||
x = np.array([1, 0, 0])
|
||||
A = np.diag([2, 3, 4])
|
||||
|
||||
x, info = gmres(A, b, x0=x, M=M, tol=1e-15, atol=0)
|
||||
|
||||
# Should not return nans, nor terminate with false success
|
||||
assert_(not np.isnan(x).any())
|
||||
if info == 0:
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-15*np.linalg.norm(b))
|
||||
|
||||
# The solution should be OK outside null space of M
|
||||
assert_allclose(M.dot(A.dot(x)), M.dot(b))
|
||||
|
||||
def test_defective_matrix_breakdown(self):
|
||||
# Breakdown due to defective matrix
|
||||
A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]])
|
||||
b = np.array([1, 0, 1])
|
||||
x, info = gmres(A, b, tol=1e-8, atol=0)
|
||||
|
||||
# Should not return nans, nor terminate with false success
|
||||
assert_(not np.isnan(x).any())
|
||||
if info == 0:
|
||||
assert_(np.linalg.norm(A.dot(x) - b) <= 1e-8*np.linalg.norm(b))
|
||||
|
||||
# The solution should be OK outside null space of A
|
||||
assert_allclose(A.dot(A.dot(x)), A.dot(b))
|
||||
|
||||
def test_callback_type(self):
|
||||
# The legacy callback type changes meaning of 'maxiter'
|
||||
np.random.seed(1)
|
||||
A = np.random.rand(20, 20)
|
||||
b = np.random.rand(20)
|
||||
|
||||
cb_count = [0]
|
||||
|
||||
def pr_norm_cb(r):
|
||||
cb_count[0] += 1
|
||||
assert_(isinstance(r, float))
|
||||
|
||||
def x_cb(x):
|
||||
cb_count[0] += 1
|
||||
assert_(isinstance(x, np.ndarray))
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
# 2 iterations is not enough to solve the problem
|
||||
cb_count = [0]
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50)
|
||||
assert info == 2
|
||||
assert cb_count[0] == 2
|
||||
|
||||
# With `callback_type` specified, no warning should be raised
|
||||
cb_count = [0]
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50,
|
||||
callback_type='legacy')
|
||||
assert info == 2
|
||||
assert cb_count[0] == 2
|
||||
|
||||
# 2 restart cycles is enough to solve the problem
|
||||
cb_count = [0]
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=pr_norm_cb, maxiter=2, restart=50,
|
||||
callback_type='pr_norm')
|
||||
assert info == 0
|
||||
assert cb_count[0] > 2
|
||||
|
||||
# 2 restart cycles is enough to solve the problem
|
||||
cb_count = [0]
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=2, restart=50,
|
||||
callback_type='x')
|
||||
assert info == 0
|
||||
assert cb_count[0] == 2
|
||||
|
||||
def test_callback_x_monotonic(self):
|
||||
# Check that callback_type='x' gives monotonic norm decrease
|
||||
np.random.seed(1)
|
||||
A = np.random.rand(20, 20) + np.eye(20)
|
||||
b = np.random.rand(20)
|
||||
|
||||
prev_r = [np.inf]
|
||||
count = [0]
|
||||
|
||||
def x_cb(x):
|
||||
r = np.linalg.norm(A.dot(x) - b)
|
||||
assert r <= prev_r[0]
|
||||
prev_r[0] = r
|
||||
count[0] += 1
|
||||
|
||||
x, info = gmres(A, b, tol=1e-6, atol=0, callback=x_cb, maxiter=20, restart=10,
|
||||
callback_type='x')
|
||||
assert info == 20
|
||||
assert count[0] == 21
|
||||
x_cb(x)
|
|
@ -0,0 +1,212 @@
|
|||
"""Tests for the linalg.isolve.lgmres module
|
||||
"""
|
||||
|
||||
from numpy.testing import (assert_, assert_allclose, assert_equal,
|
||||
suppress_warnings)
|
||||
|
||||
import pytest
|
||||
from platform import python_implementation
|
||||
|
||||
import numpy as np
|
||||
from numpy import zeros, array, allclose
|
||||
from scipy.linalg import norm
|
||||
from scipy.sparse import csr_matrix, eye, rand
|
||||
|
||||
from scipy.sparse.linalg.interface import LinearOperator
|
||||
from scipy.sparse.linalg import splu
|
||||
from scipy.sparse.linalg.isolve import lgmres, gmres
|
||||
|
||||
|
||||
Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9],
|
||||
[1, -2, 1, 0, 5, 0],
|
||||
[0, 1, -2, 1, 0, 0],
|
||||
[0, 0, 1, -2, 1, 0],
|
||||
[0, 3, 0, 1, -2, 1],
|
||||
[1, 0, 0, 0, 1, -2]]))
|
||||
b = array([1, 2, 3, 4, 5, 6])
|
||||
count = [0]
|
||||
|
||||
|
||||
def matvec(v):
|
||||
count[0] += 1
|
||||
return Am*v
|
||||
|
||||
|
||||
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
|
||||
|
||||
|
||||
def do_solve(**kw):
|
||||
count[0] = 0
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x0, flag = lgmres(A, b, x0=zeros(A.shape[0]),
|
||||
inner_m=6, tol=1e-14, **kw)
|
||||
count_0 = count[0]
|
||||
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
|
||||
return x0, count_0
|
||||
|
||||
|
||||
class TestLGMRES(object):
|
||||
def test_preconditioner(self):
|
||||
# Check that preconditioning works
|
||||
pc = splu(Am.tocsc())
|
||||
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
|
||||
|
||||
x0, count_0 = do_solve()
|
||||
x1, count_1 = do_solve(M=M)
|
||||
|
||||
assert_(count_1 == 3)
|
||||
assert_(count_1 < count_0/2)
|
||||
assert_(allclose(x1, x0, rtol=1e-14))
|
||||
|
||||
def test_outer_v(self):
|
||||
# Check that the augmentation vectors behave as expected
|
||||
|
||||
outer_v = []
|
||||
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)
|
||||
assert_(len(outer_v) > 0)
|
||||
assert_(len(outer_v) <= 6)
|
||||
|
||||
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
|
||||
prepend_outer_v=True)
|
||||
assert_(count_1 == 2, count_1)
|
||||
assert_(count_1 < count_0/2)
|
||||
assert_(allclose(x1, x0, rtol=1e-14))
|
||||
|
||||
# ---
|
||||
|
||||
outer_v = []
|
||||
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v,
|
||||
store_outer_Av=False)
|
||||
assert_(array([v[1] is None for v in outer_v]).all())
|
||||
assert_(len(outer_v) > 0)
|
||||
assert_(len(outer_v) <= 6)
|
||||
|
||||
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
|
||||
prepend_outer_v=True)
|
||||
assert_(count_1 == 3, count_1)
|
||||
assert_(count_1 < count_0/2)
|
||||
assert_(allclose(x1, x0, rtol=1e-14))
|
||||
|
||||
@pytest.mark.skipif(python_implementation() == 'PyPy',
|
||||
reason="Fails on PyPy CI runs. See #9507")
|
||||
def test_arnoldi(self):
|
||||
np.random.rand(1234)
|
||||
|
||||
A = eye(2000) + rand(2000, 2000, density=5e-4)
|
||||
b = np.random.rand(2000)
|
||||
|
||||
# The inner arnoldi should be equivalent to gmres
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]),
|
||||
inner_m=15, maxiter=1)
|
||||
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]),
|
||||
restart=15, maxiter=1)
|
||||
|
||||
assert_equal(flag0, 1)
|
||||
assert_equal(flag1, 1)
|
||||
assert_(np.linalg.norm(A.dot(x0) - b) > 4e-4)
|
||||
|
||||
assert_allclose(x0, x1)
|
||||
|
||||
def test_cornercase(self):
|
||||
np.random.seed(1234)
|
||||
|
||||
# Rounding error may prevent convergence with tol=0 --- ensure
|
||||
# that the return values in this case are correct, and no
|
||||
# exceptions are raised
|
||||
|
||||
for n in [3, 5, 10, 100]:
|
||||
A = 2*eye(n)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
|
||||
b = np.ones(n)
|
||||
x, info = lgmres(A, b, maxiter=10)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
x, info = lgmres(A, b, tol=0, maxiter=10)
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
b = np.random.rand(n)
|
||||
x, info = lgmres(A, b, maxiter=10)
|
||||
assert_equal(info, 0)
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
x, info = lgmres(A, b, tol=0, maxiter=10)
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
||||
|
||||
def test_nans(self):
|
||||
A = eye(3, format='lil')
|
||||
A[1, 1] = np.nan
|
||||
b = np.ones(3)
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
x, info = lgmres(A, b, tol=0, maxiter=10)
|
||||
assert_equal(info, 1)
|
||||
|
||||
def test_breakdown_with_outer_v(self):
|
||||
A = np.array([[1, 2], [3, 4]], dtype=float)
|
||||
b = np.array([1, 2])
|
||||
|
||||
x = np.linalg.solve(A, b)
|
||||
v0 = np.array([1, 0])
|
||||
|
||||
# The inner iteration should converge to the correct solution,
|
||||
# since it's in the outer vector list
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1)
|
||||
|
||||
assert_allclose(xp, x, atol=1e-12)
|
||||
|
||||
def test_breakdown_underdetermined(self):
|
||||
# Should find LSQ solution in the Krylov span in one inner
|
||||
# iteration, despite solver breakdown from nilpotent A.
|
||||
A = np.array([[0, 1, 1, 1],
|
||||
[0, 0, 1, 1],
|
||||
[0, 0, 0, 1],
|
||||
[0, 0, 0, 0]], dtype=float)
|
||||
|
||||
bs = [
|
||||
np.array([1, 1, 1, 1]),
|
||||
np.array([1, 1, 1, 0]),
|
||||
np.array([1, 1, 0, 0]),
|
||||
np.array([1, 0, 0, 0]),
|
||||
]
|
||||
|
||||
for b in bs:
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
xp, info = lgmres(A, b, maxiter=1)
|
||||
resp = np.linalg.norm(A.dot(xp) - b)
|
||||
|
||||
K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))]
|
||||
y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1)
|
||||
x = K.dot(y)
|
||||
res = np.linalg.norm(A.dot(x) - b)
|
||||
|
||||
assert_allclose(resp, res, err_msg=repr(b))
|
||||
|
||||
def test_denormals(self):
|
||||
# Check that no warnings are emitted if the matrix contains
|
||||
# numbers for which 1/x has no float representation, and that
|
||||
# the solver behaves properly.
|
||||
A = np.array([[1, 2], [3, 4]], dtype=float)
|
||||
A *= 100 * np.nextafter(0, 1)
|
||||
|
||||
b = np.array([1, 1])
|
||||
|
||||
with suppress_warnings() as sup:
|
||||
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
||||
xp, info = lgmres(A, b)
|
||||
|
||||
if info == 0:
|
||||
assert_allclose(A.dot(xp), b)
|
||||
|
|
@ -0,0 +1,203 @@
|
|||
"""
|
||||
Copyright (C) 2010 David Fong and Michael Saunders
|
||||
Distributed under the same license as SciPy
|
||||
|
||||
Testing Code for LSMR.
|
||||
|
||||
03 Jun 2010: First version release with lsmr.py
|
||||
|
||||
David Chin-lung Fong clfong@stanford.edu
|
||||
Institute for Computational and Mathematical Engineering
|
||||
Stanford University
|
||||
|
||||
Michael Saunders saunders@stanford.edu
|
||||
Systems Optimization Laboratory
|
||||
Dept of MS&E, Stanford University.
|
||||
|
||||
"""
|
||||
|
||||
from numpy import array, arange, eye, zeros, ones, sqrt, transpose, hstack
|
||||
from numpy.linalg import norm
|
||||
from numpy.testing import (assert_almost_equal,
|
||||
assert_array_almost_equal)
|
||||
|
||||
from scipy.sparse import coo_matrix
|
||||
from scipy.sparse.linalg.interface import aslinearoperator
|
||||
from scipy.sparse.linalg import lsmr
|
||||
from .test_lsqr import G, b
|
||||
|
||||
|
||||
class TestLSMR:
|
||||
def setup_method(self):
|
||||
self.n = 10
|
||||
self.m = 10
|
||||
|
||||
def assertCompatibleSystem(self, A, xtrue):
|
||||
Afun = aslinearoperator(A)
|
||||
b = Afun.matvec(xtrue)
|
||||
x = lsmr(A, b)[0]
|
||||
assert_almost_equal(norm(x - xtrue), 0, decimal=5)
|
||||
|
||||
def testIdentityACase1(self):
|
||||
A = eye(self.n)
|
||||
xtrue = zeros((self.n, 1))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testIdentityACase2(self):
|
||||
A = eye(self.n)
|
||||
xtrue = ones((self.n,1))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testIdentityACase3(self):
|
||||
A = eye(self.n)
|
||||
xtrue = transpose(arange(self.n,0,-1))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testBidiagonalA(self):
|
||||
A = lowerBidiagonalMatrix(20,self.n)
|
||||
xtrue = transpose(arange(self.n,0,-1))
|
||||
self.assertCompatibleSystem(A,xtrue)
|
||||
|
||||
def testScalarB(self):
|
||||
A = array([[1.0, 2.0]])
|
||||
b = 3.0
|
||||
x = lsmr(A, b)[0]
|
||||
assert_almost_equal(norm(A.dot(x) - b), 0)
|
||||
|
||||
def testComplexX(self):
|
||||
A = eye(self.n)
|
||||
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testComplexX0(self):
|
||||
A = 4 * eye(self.n) + ones((self.n, self.n))
|
||||
xtrue = transpose(arange(self.n, 0, -1))
|
||||
b = aslinearoperator(A).matvec(xtrue)
|
||||
x0 = zeros(self.n, dtype=complex)
|
||||
x = lsmr(A, b, x0=x0)[0]
|
||||
assert_almost_equal(norm(x - xtrue), 0, decimal=5)
|
||||
|
||||
def testComplexA(self):
|
||||
A = 4 * eye(self.n) + 1j * ones((self.n, self.n))
|
||||
xtrue = transpose(arange(self.n, 0, -1).astype(complex))
|
||||
self.assertCompatibleSystem(A, xtrue)
|
||||
|
||||
def testComplexB(self):
|
||||
A = 4 * eye(self.n) + ones((self.n, self.n))
|
||||
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
|
||||
b = aslinearoperator(A).matvec(xtrue)
|
||||
x = lsmr(A, b)[0]
|
||||
assert_almost_equal(norm(x - xtrue), 0, decimal=5)
|
||||
|
||||
def testColumnB(self):
|
||||
A = eye(self.n)
|
||||
b = ones((self.n, 1))
|
||||
x = lsmr(A, b)[0]
|
||||
assert_almost_equal(norm(A.dot(x) - b.ravel()), 0)
|
||||
|
||||
def testInitialization(self):
|
||||
# Test that the default setting is not modified
|
||||
x_ref = lsmr(G, b)[0]
|
||||
x0 = zeros(b.shape)
|
||||
x = lsmr(G, b, x0=x0)[0]
|
||||
assert_array_almost_equal(x_ref, x)
|
||||
|
||||
# Test warm-start with single iteration
|
||||
x0 = lsmr(G, b, maxiter=1)[0]
|
||||
x = lsmr(G, b, x0=x0)[0]
|
||||
assert_array_almost_equal(x_ref, x)
|
||||
|
||||
class TestLSMRReturns:
|
||||
def setup_method(self):
|
||||
self.n = 10
|
||||
self.A = lowerBidiagonalMatrix(20,self.n)
|
||||
self.xtrue = transpose(arange(self.n,0,-1))
|
||||
self.Afun = aslinearoperator(self.A)
|
||||
self.b = self.Afun.matvec(self.xtrue)
|
||||
self.returnValues = lsmr(self.A,self.b)
|
||||
|
||||
def testNormr(self):
|
||||
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
|
||||
assert_almost_equal(normr, norm(self.b - self.Afun.matvec(x)))
|
||||
|
||||
def testNormar(self):
|
||||
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
|
||||
assert_almost_equal(normar,
|
||||
norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x))))
|
||||
|
||||
def testNormx(self):
|
||||
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
|
||||
assert_almost_equal(normx, norm(x))
|
||||
|
||||
|
||||
def lowerBidiagonalMatrix(m, n):
|
||||
# This is a simple example for testing LSMR.
|
||||
# It uses the leading m*n submatrix from
|
||||
# A = [ 1
|
||||
# 1 2
|
||||
# 2 3
|
||||
# 3 4
|
||||
# ...
|
||||
# n ]
|
||||
# suitably padded by zeros.
|
||||
#
|
||||
# 04 Jun 2010: First version for distribution with lsmr.py
|
||||
if m <= n:
|
||||
row = hstack((arange(m, dtype=int),
|
||||
arange(1, m, dtype=int)))
|
||||
col = hstack((arange(m, dtype=int),
|
||||
arange(m-1, dtype=int)))
|
||||
data = hstack((arange(1, m+1, dtype=float),
|
||||
arange(1,m, dtype=float)))
|
||||
return coo_matrix((data, (row, col)), shape=(m,n))
|
||||
else:
|
||||
row = hstack((arange(n, dtype=int),
|
||||
arange(1, n+1, dtype=int)))
|
||||
col = hstack((arange(n, dtype=int),
|
||||
arange(n, dtype=int)))
|
||||
data = hstack((arange(1, n+1, dtype=float),
|
||||
arange(1,n+1, dtype=float)))
|
||||
return coo_matrix((data,(row, col)), shape=(m,n))
|
||||
|
||||
|
||||
def lsmrtest(m, n, damp):
|
||||
"""Verbose testing of lsmr"""
|
||||
|
||||
A = lowerBidiagonalMatrix(m,n)
|
||||
xtrue = arange(n,0,-1, dtype=float)
|
||||
Afun = aslinearoperator(A)
|
||||
|
||||
b = Afun.matvec(xtrue)
|
||||
|
||||
atol = 1.0e-7
|
||||
btol = 1.0e-7
|
||||
conlim = 1.0e+10
|
||||
itnlim = 10*n
|
||||
show = 1
|
||||
|
||||
x, istop, itn, normr, normar, norma, conda, normx \
|
||||
= lsmr(A, b, damp, atol, btol, conlim, itnlim, show)
|
||||
|
||||
j1 = min(n,5)
|
||||
j2 = max(n-4,1)
|
||||
print(' ')
|
||||
print('First elements of x:')
|
||||
str = ['%10.4f' % (xi) for xi in x[0:j1]]
|
||||
print(''.join(str))
|
||||
print(' ')
|
||||
print('Last elements of x:')
|
||||
str = ['%10.4f' % (xi) for xi in x[j2-1:]]
|
||||
print(''.join(str))
|
||||
|
||||
r = b - Afun.matvec(x)
|
||||
r2 = sqrt(norm(r)**2 + (damp*norm(x))**2)
|
||||
print(' ')
|
||||
str = 'normr (est.) %17.10e' % (normr)
|
||||
str2 = 'normr (true) %17.10e' % (r2)
|
||||
print(str)
|
||||
print(str2)
|
||||
print(' ')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
lsmrtest(20,10,0)
|
|
@ -0,0 +1,136 @@
|
|||
import numpy as np
|
||||
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
|
||||
assert_array_almost_equal)
|
||||
|
||||
import scipy.sparse
|
||||
import scipy.sparse.linalg
|
||||
from scipy.sparse.linalg import lsqr
|
||||
from time import time
|
||||
|
||||
# Set up a test problem
|
||||
n = 35
|
||||
G = np.eye(n)
|
||||
normal = np.random.normal
|
||||
norm = np.linalg.norm
|
||||
|
||||
for jj in range(5):
|
||||
gg = normal(size=n)
|
||||
hh = gg * gg.T
|
||||
G += (hh + hh.T) * 0.5
|
||||
G += normal(size=n) * normal(size=n)
|
||||
|
||||
b = normal(size=n)
|
||||
|
||||
tol = 1e-10
|
||||
show = False
|
||||
maxit = None
|
||||
|
||||
|
||||
def test_basic():
|
||||
b_copy = b.copy()
|
||||
X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
|
||||
assert_(np.all(b_copy == b))
|
||||
|
||||
svx = np.linalg.solve(G, b)
|
||||
xo = X[0]
|
||||
assert_(norm(svx - xo) < 1e-5)
|
||||
|
||||
def test_gh_2466():
|
||||
row = np.array([0, 0])
|
||||
col = np.array([0, 1])
|
||||
val = np.array([1, -1])
|
||||
A = scipy.sparse.coo_matrix((val, (row, col)), shape=(1, 2))
|
||||
b = np.asarray([4])
|
||||
lsqr(A, b)
|
||||
|
||||
|
||||
def test_well_conditioned_problems():
|
||||
# Test that sparse the lsqr solver returns the right solution
|
||||
# on various problems with different random seeds.
|
||||
# This is a non-regression test for a potential ZeroDivisionError
|
||||
# raised when computing the `test2` & `test3` convergence conditions.
|
||||
n = 10
|
||||
A_sparse = scipy.sparse.eye(n, n)
|
||||
A_dense = A_sparse.toarray()
|
||||
|
||||
with np.errstate(invalid='raise'):
|
||||
for seed in range(30):
|
||||
rng = np.random.RandomState(seed + 10)
|
||||
beta = rng.rand(n)
|
||||
beta[beta == 0] = 0.00001 # ensure that all the betas are not null
|
||||
b = A_sparse * beta[:, np.newaxis]
|
||||
output = lsqr(A_sparse, b, show=show)
|
||||
|
||||
# Check that the termination condition corresponds to an approximate
|
||||
# solution to Ax = b
|
||||
assert_equal(output[1], 1)
|
||||
solution = output[0]
|
||||
|
||||
# Check that we recover the ground truth solution
|
||||
assert_array_almost_equal(solution, beta)
|
||||
|
||||
# Sanity check: compare to the dense array solver
|
||||
reference_solution = np.linalg.solve(A_dense, b).ravel()
|
||||
assert_array_almost_equal(solution, reference_solution)
|
||||
|
||||
|
||||
def test_b_shapes():
|
||||
# Test b being a scalar.
|
||||
A = np.array([[1.0, 2.0]])
|
||||
b = 3.0
|
||||
x = lsqr(A, b)[0]
|
||||
assert_almost_equal(norm(A.dot(x) - b), 0)
|
||||
|
||||
# Test b being a column vector.
|
||||
A = np.eye(10)
|
||||
b = np.ones((10, 1))
|
||||
x = lsqr(A, b)[0]
|
||||
assert_almost_equal(norm(A.dot(x) - b.ravel()), 0)
|
||||
|
||||
|
||||
def test_initialization():
|
||||
# Test the default setting is the same as zeros
|
||||
b_copy = b.copy()
|
||||
x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
|
||||
x0 = np.zeros(x_ref[0].shape)
|
||||
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
|
||||
assert_(np.all(b_copy == b))
|
||||
assert_array_almost_equal(x_ref[0], x[0])
|
||||
|
||||
# Test warm-start with single iteration
|
||||
x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0]
|
||||
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
|
||||
assert_array_almost_equal(x_ref[0], x[0])
|
||||
assert_(np.all(b_copy == b))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
svx = np.linalg.solve(G, b)
|
||||
|
||||
tic = time()
|
||||
X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
|
||||
xo = X[0]
|
||||
phio = X[3]
|
||||
psio = X[7]
|
||||
k = X[2]
|
||||
chio = X[8]
|
||||
mg = np.amax(G - G.T)
|
||||
if mg > 1e-14:
|
||||
sym = 'No'
|
||||
else:
|
||||
sym = 'Yes'
|
||||
|
||||
print('LSQR')
|
||||
print("Is linear operator symmetric? " + sym)
|
||||
print("n: %3g iterations: %3g" % (n, k))
|
||||
print("Norms computed in %.2fs by LSQR" % (time() - tic))
|
||||
print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e " % (chio, phio, psio))
|
||||
print("Residual norms computed directly:")
|
||||
print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e" % (norm(xo),
|
||||
norm(G*xo - b),
|
||||
norm(G.T*(G*xo-b))))
|
||||
print("Direct solution norms:")
|
||||
print(" ||x|| %9.4e ||r|| %9.4e " % (norm(svx), norm(G*svx - b)))
|
||||
print("")
|
||||
print(" || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo))
|
||||
print("")
|
|
@ -0,0 +1,98 @@
|
|||
import numpy as np
|
||||
from numpy.testing import assert_equal, assert_allclose, assert_
|
||||
from scipy.sparse.linalg.isolve import minres
|
||||
from scipy.linalg import norm
|
||||
|
||||
from pytest import raises as assert_raises
|
||||
from .test_iterative import assert_normclose
|
||||
|
||||
|
||||
def get_sample_problem():
|
||||
# A random 10 x 10 symmetric matrix
|
||||
np.random.seed(1234)
|
||||
matrix = np.random.rand(10, 10)
|
||||
matrix = matrix + matrix.T
|
||||
# A random vector of length 10
|
||||
vector = np.random.rand(10)
|
||||
return matrix, vector
|
||||
|
||||
|
||||
def test_singular():
|
||||
A, b = get_sample_problem()
|
||||
A[0, ] = 0
|
||||
b[0] = 0
|
||||
xp, info = minres(A, b)
|
||||
assert_equal(info, 0)
|
||||
assert_normclose(A.dot(xp), b, tol=1e-5)
|
||||
|
||||
|
||||
def test_x0_is_used_by():
|
||||
A, b = get_sample_problem()
|
||||
# Random x0 to feed minres
|
||||
np.random.seed(12345)
|
||||
x0 = np.random.rand(10)
|
||||
trace = []
|
||||
|
||||
def trace_iterates(xk):
|
||||
trace.append(xk)
|
||||
minres(A, b, x0=x0, callback=trace_iterates)
|
||||
trace_with_x0 = trace
|
||||
|
||||
trace = []
|
||||
minres(A, b, callback=trace_iterates)
|
||||
assert_(not np.array_equal(trace_with_x0[0], trace[0]))
|
||||
|
||||
|
||||
def test_shift():
|
||||
A, b = get_sample_problem()
|
||||
shift = 0.5
|
||||
shifted_A = A - shift * np.eye(10)
|
||||
x1, info1 = minres(A, b, shift=shift)
|
||||
x2, info2 = minres(shifted_A, b)
|
||||
assert_equal(info1, 0)
|
||||
assert_allclose(x1, x2, rtol=1e-5)
|
||||
|
||||
|
||||
def test_asymmetric_fail():
|
||||
"""Asymmetric matrix should raise `ValueError` when check=True"""
|
||||
A, b = get_sample_problem()
|
||||
A[1, 2] = 1
|
||||
A[2, 1] = 2
|
||||
with assert_raises(ValueError):
|
||||
xp, info = minres(A, b, check=True)
|
||||
|
||||
|
||||
def test_minres_non_default_x0():
|
||||
np.random.seed(1234)
|
||||
tol = 10**(-6)
|
||||
a = np.random.randn(5, 5)
|
||||
a = np.dot(a, a.T)
|
||||
b = np.random.randn(5)
|
||||
c = np.random.randn(5)
|
||||
x = minres(a, b, x0=c, tol=tol)[0]
|
||||
assert norm(a.dot(x) - b) < tol
|
||||
|
||||
|
||||
def test_minres_precond_non_default_x0():
|
||||
np.random.seed(12345)
|
||||
tol = 10**(-6)
|
||||
a = np.random.randn(5, 5)
|
||||
a = np.dot(a, a.T)
|
||||
b = np.random.randn(5)
|
||||
c = np.random.randn(5)
|
||||
m = np.random.randn(5, 5)
|
||||
m = np.dot(m, m.T)
|
||||
x = minres(a, b, M=m, x0=c, tol=tol)[0]
|
||||
assert norm(a.dot(x) - b) < tol
|
||||
|
||||
|
||||
def test_minres_precond_exact_x0():
|
||||
np.random.seed(1234)
|
||||
tol = 10**(-6)
|
||||
a = np.eye(10)
|
||||
b = np.ones(10)
|
||||
c = np.ones(10)
|
||||
m = np.random.randn(10, 10)
|
||||
m = np.dot(m, m.T)
|
||||
x = minres(a, b, M=m, x0=c, tol=tol)[0]
|
||||
assert norm(a.dot(x) - b) < tol
|
|
@ -0,0 +1,8 @@
|
|||
import numpy as np
|
||||
from pytest import raises as assert_raises
|
||||
|
||||
from scipy.sparse.linalg import utils
|
||||
|
||||
|
||||
def test_make_system_bad_shape():
|
||||
assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))
|
Loading…
Add table
Add a link
Reference in a new issue