Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
|
@ -0,0 +1,6 @@
|
|||
"""This module contains the equality constrained SQP solver."""
|
||||
|
||||
|
||||
from .minimize_trustregion_constr import _minimize_trustregion_constr
|
||||
|
||||
__all__ = ['_minimize_trustregion_constr']
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,390 @@
|
|||
import numpy as np
|
||||
import scipy.sparse as sps
|
||||
|
||||
|
||||
class CanonicalConstraint(object):
|
||||
"""Canonical constraint to use with trust-constr algorithm.
|
||||
|
||||
It represents the set of constraints of the form::
|
||||
|
||||
f_eq(x) = 0
|
||||
f_ineq(x) <= 0
|
||||
|
||||
where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see
|
||||
below.
|
||||
|
||||
The class is supposed to be instantiated by factory methods, which
|
||||
should prepare the parameters listed below.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
n_eq, n_ineq : int
|
||||
Number of equality and inequality constraints respectively.
|
||||
fun : callable
|
||||
Function defining the constraints. The signature is
|
||||
``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq`
|
||||
components and ``c_ineq`` is ndarray with `n_ineq` components.
|
||||
jac : callable
|
||||
Function to evaluate the Jacobian of the constraint. The signature
|
||||
is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are
|
||||
either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n),
|
||||
respectively.
|
||||
hess : callable
|
||||
Function to evaluate the Hessian of the constraints multiplied
|
||||
by Lagrange multipliers, that is
|
||||
``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is
|
||||
``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied
|
||||
shape (n, n) and provide a matrix-vector product operation
|
||||
``H.dot(p)``.
|
||||
keep_feasible : ndarray, shape (n_ineq,)
|
||||
Mask indicating which inequality constraints should be kept feasible.
|
||||
"""
|
||||
def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible):
|
||||
self.n_eq = n_eq
|
||||
self.n_ineq = n_ineq
|
||||
self.fun = fun
|
||||
self.jac = jac
|
||||
self.hess = hess
|
||||
self.keep_feasible = keep_feasible
|
||||
|
||||
@classmethod
|
||||
def from_PreparedConstraint(cls, constraint):
|
||||
"""Create an instance from `PreparedConstrained` object."""
|
||||
lb, ub = constraint.bounds
|
||||
cfun = constraint.fun
|
||||
keep_feasible = constraint.keep_feasible
|
||||
|
||||
if np.all(lb == -np.inf) and np.all(ub == np.inf):
|
||||
return cls.empty(cfun.n)
|
||||
|
||||
if np.all(lb == -np.inf) and np.all(ub == np.inf):
|
||||
return cls.empty(cfun.n)
|
||||
elif np.all(lb == ub):
|
||||
return cls._equal_to_canonical(cfun, lb)
|
||||
elif np.all(lb == -np.inf):
|
||||
return cls._less_to_canonical(cfun, ub, keep_feasible)
|
||||
elif np.all(ub == np.inf):
|
||||
return cls._greater_to_canonical(cfun, lb, keep_feasible)
|
||||
else:
|
||||
return cls._interval_to_canonical(cfun, lb, ub, keep_feasible)
|
||||
|
||||
@classmethod
|
||||
def empty(cls, n):
|
||||
"""Create an "empty" instance.
|
||||
|
||||
This "empty" instance is required to allow working with unconstrained
|
||||
problems as if they have some constraints.
|
||||
"""
|
||||
empty_fun = np.empty(0)
|
||||
empty_jac = np.empty((0, n))
|
||||
empty_hess = sps.csr_matrix((n, n))
|
||||
|
||||
def fun(x):
|
||||
return empty_fun, empty_fun
|
||||
|
||||
def jac(x):
|
||||
return empty_jac, empty_jac
|
||||
|
||||
def hess(x, v_eq, v_ineq):
|
||||
return empty_hess
|
||||
|
||||
return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_))
|
||||
|
||||
@classmethod
|
||||
def concatenate(cls, canonical_constraints, sparse_jacobian):
|
||||
"""Concatenate multiple `CanonicalConstraint` into one.
|
||||
|
||||
`sparse_jacobian` (bool) determines the Jacobian format of the
|
||||
concatenated constraint. Note that items in `canonical_constraints`
|
||||
must have their Jacobians in the same format.
|
||||
"""
|
||||
def fun(x):
|
||||
if canonical_constraints:
|
||||
eq_all, ineq_all = zip(
|
||||
*[c.fun(x) for c in canonical_constraints])
|
||||
else:
|
||||
eq_all, ineq_all = [], []
|
||||
|
||||
return np.hstack(eq_all), np.hstack(ineq_all)
|
||||
|
||||
if sparse_jacobian:
|
||||
vstack = sps.vstack
|
||||
else:
|
||||
vstack = np.vstack
|
||||
|
||||
def jac(x):
|
||||
if canonical_constraints:
|
||||
eq_all, ineq_all = zip(
|
||||
*[c.jac(x) for c in canonical_constraints])
|
||||
else:
|
||||
eq_all, ineq_all = [], []
|
||||
|
||||
return vstack(eq_all), vstack(ineq_all)
|
||||
|
||||
def hess(x, v_eq, v_ineq):
|
||||
hess_all = []
|
||||
index_eq = 0
|
||||
index_ineq = 0
|
||||
for c in canonical_constraints:
|
||||
vc_eq = v_eq[index_eq:index_eq + c.n_eq]
|
||||
vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq]
|
||||
hess_all.append(c.hess(x, vc_eq, vc_ineq))
|
||||
index_eq += c.n_eq
|
||||
index_ineq += c.n_ineq
|
||||
|
||||
def matvec(p):
|
||||
result = np.zeros_like(p)
|
||||
for h in hess_all:
|
||||
result += h.dot(p)
|
||||
return result
|
||||
|
||||
n = x.shape[0]
|
||||
return sps.linalg.LinearOperator((n, n), matvec, dtype=float)
|
||||
|
||||
n_eq = sum(c.n_eq for c in canonical_constraints)
|
||||
n_ineq = sum(c.n_ineq for c in canonical_constraints)
|
||||
keep_feasible = np.hstack([c.keep_feasible for c in
|
||||
canonical_constraints])
|
||||
|
||||
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
||||
|
||||
@classmethod
|
||||
def _equal_to_canonical(cls, cfun, value):
|
||||
empty_fun = np.empty(0)
|
||||
n = cfun.n
|
||||
|
||||
n_eq = value.shape[0]
|
||||
n_ineq = 0
|
||||
keep_feasible = np.empty(0, dtype=bool)
|
||||
|
||||
if cfun.sparse_jacobian:
|
||||
empty_jac = sps.csr_matrix((0, n))
|
||||
else:
|
||||
empty_jac = np.empty((0, n))
|
||||
|
||||
def fun(x):
|
||||
return cfun.fun(x) - value, empty_fun
|
||||
|
||||
def jac(x):
|
||||
return cfun.jac(x), empty_jac
|
||||
|
||||
def hess(x, v_eq, v_ineq):
|
||||
return cfun.hess(x, v_eq)
|
||||
|
||||
empty_fun = np.empty(0)
|
||||
n = cfun.n
|
||||
if cfun.sparse_jacobian:
|
||||
empty_jac = sps.csr_matrix((0, n))
|
||||
else:
|
||||
empty_jac = np.empty((0, n))
|
||||
|
||||
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
||||
|
||||
@classmethod
|
||||
def _less_to_canonical(cls, cfun, ub, keep_feasible):
|
||||
empty_fun = np.empty(0)
|
||||
n = cfun.n
|
||||
if cfun.sparse_jacobian:
|
||||
empty_jac = sps.csr_matrix((0, n))
|
||||
else:
|
||||
empty_jac = np.empty((0, n))
|
||||
|
||||
finite_ub = ub < np.inf
|
||||
n_eq = 0
|
||||
n_ineq = np.sum(finite_ub)
|
||||
|
||||
if np.all(finite_ub):
|
||||
def fun(x):
|
||||
return empty_fun, cfun.fun(x) - ub
|
||||
|
||||
def jac(x):
|
||||
return empty_jac, cfun.jac(x)
|
||||
|
||||
def hess(x, v_eq, v_ineq):
|
||||
return cfun.hess(x, v_ineq)
|
||||
else:
|
||||
finite_ub = np.nonzero(finite_ub)[0]
|
||||
keep_feasible = keep_feasible[finite_ub]
|
||||
ub = ub[finite_ub]
|
||||
|
||||
def fun(x):
|
||||
return empty_fun, cfun.fun(x)[finite_ub] - ub
|
||||
|
||||
def jac(x):
|
||||
return empty_jac, cfun.jac(x)[finite_ub]
|
||||
|
||||
def hess(x, v_eq, v_ineq):
|
||||
v = np.zeros(cfun.m)
|
||||
v[finite_ub] = v_ineq
|
||||
return cfun.hess(x, v)
|
||||
|
||||
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
||||
|
||||
@classmethod
|
||||
def _greater_to_canonical(cls, cfun, lb, keep_feasible):
|
||||
empty_fun = np.empty(0)
|
||||
n = cfun.n
|
||||
if cfun.sparse_jacobian:
|
||||
empty_jac = sps.csr_matrix((0, n))
|
||||
else:
|
||||
empty_jac = np.empty((0, n))
|
||||
|
||||
finite_lb = lb > -np.inf
|
||||
n_eq = 0
|
||||
n_ineq = np.sum(finite_lb)
|
||||
|
||||
if np.all(finite_lb):
|
||||
def fun(x):
|
||||
return empty_fun, lb - cfun.fun(x)
|
||||
|
||||
def jac(x):
|
||||
return empty_jac, -cfun.jac(x)
|
||||
|
||||
def hess(x, v_eq, v_ineq):
|
||||
return cfun.hess(x, -v_ineq)
|
||||
else:
|
||||
finite_lb = np.nonzero(finite_lb)[0]
|
||||
keep_feasible = keep_feasible[finite_lb]
|
||||
lb = lb[finite_lb]
|
||||
|
||||
def fun(x):
|
||||
return empty_fun, lb - cfun.fun(x)[finite_lb]
|
||||
|
||||
def jac(x):
|
||||
return empty_jac, -cfun.jac(x)[finite_lb]
|
||||
|
||||
def hess(x, v_eq, v_ineq):
|
||||
v = np.zeros(cfun.m)
|
||||
v[finite_lb] = -v_ineq
|
||||
return cfun.hess(x, v)
|
||||
|
||||
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
||||
|
||||
@classmethod
|
||||
def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible):
|
||||
lb_inf = lb == -np.inf
|
||||
ub_inf = ub == np.inf
|
||||
equal = lb == ub
|
||||
less = lb_inf & ~ub_inf
|
||||
greater = ub_inf & ~lb_inf
|
||||
interval = ~equal & ~lb_inf & ~ub_inf
|
||||
|
||||
equal = np.nonzero(equal)[0]
|
||||
less = np.nonzero(less)[0]
|
||||
greater = np.nonzero(greater)[0]
|
||||
interval = np.nonzero(interval)[0]
|
||||
n_less = less.shape[0]
|
||||
n_greater = greater.shape[0]
|
||||
n_interval = interval.shape[0]
|
||||
n_ineq = n_less + n_greater + 2 * n_interval
|
||||
n_eq = equal.shape[0]
|
||||
|
||||
keep_feasible = np.hstack((keep_feasible[less],
|
||||
keep_feasible[greater],
|
||||
keep_feasible[interval],
|
||||
keep_feasible[interval]))
|
||||
|
||||
def fun(x):
|
||||
f = cfun.fun(x)
|
||||
eq = f[equal] - lb[equal]
|
||||
le = f[less] - ub[less]
|
||||
ge = lb[greater] - f[greater]
|
||||
il = f[interval] - ub[interval]
|
||||
ig = lb[interval] - f[interval]
|
||||
return eq, np.hstack((le, ge, il, ig))
|
||||
|
||||
def jac(x):
|
||||
J = cfun.jac(x)
|
||||
eq = J[equal]
|
||||
le = J[less]
|
||||
ge = -J[greater]
|
||||
il = J[interval]
|
||||
ig = -il
|
||||
if sps.issparse(J):
|
||||
ineq = sps.vstack((le, ge, il, ig))
|
||||
else:
|
||||
ineq = np.vstack((le, ge, il, ig))
|
||||
return eq, ineq
|
||||
|
||||
def hess(x, v_eq, v_ineq):
|
||||
n_start = 0
|
||||
v_l = v_ineq[n_start:n_start + n_less]
|
||||
n_start += n_less
|
||||
v_g = v_ineq[n_start:n_start + n_greater]
|
||||
n_start += n_greater
|
||||
v_il = v_ineq[n_start:n_start + n_interval]
|
||||
n_start += n_interval
|
||||
v_ig = v_ineq[n_start:n_start + n_interval]
|
||||
|
||||
v = np.zeros_like(lb)
|
||||
v[equal] = v_eq
|
||||
v[less] = v_l
|
||||
v[greater] = -v_g
|
||||
v[interval] = v_il - v_ig
|
||||
|
||||
return cfun.hess(x, v)
|
||||
|
||||
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
|
||||
|
||||
|
||||
def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian):
|
||||
"""Convert initial values of the constraints to the canonical format.
|
||||
|
||||
The purpose to avoid one additional call to the constraints at the initial
|
||||
point. It takes saved values in `PreparedConstraint`, modififies and
|
||||
concatenates them to the the canonical constraint format.
|
||||
"""
|
||||
c_eq = []
|
||||
c_ineq = []
|
||||
J_eq = []
|
||||
J_ineq = []
|
||||
|
||||
for c in prepared_constraints:
|
||||
f = c.fun.f
|
||||
J = c.fun.J
|
||||
lb, ub = c.bounds
|
||||
if np.all(lb == ub):
|
||||
c_eq.append(f - lb)
|
||||
J_eq.append(J)
|
||||
elif np.all(lb == -np.inf):
|
||||
finite_ub = ub < np.inf
|
||||
c_ineq.append(f[finite_ub] - ub[finite_ub])
|
||||
J_ineq.append(J[finite_ub])
|
||||
elif np.all(ub == np.inf):
|
||||
finite_lb = lb > -np.inf
|
||||
c_ineq.append(lb[finite_lb] - f[finite_lb])
|
||||
J_ineq.append(-J[finite_lb])
|
||||
else:
|
||||
lb_inf = lb == -np.inf
|
||||
ub_inf = ub == np.inf
|
||||
equal = lb == ub
|
||||
less = lb_inf & ~ub_inf
|
||||
greater = ub_inf & ~lb_inf
|
||||
interval = ~equal & ~lb_inf & ~ub_inf
|
||||
|
||||
c_eq.append(f[equal] - lb[equal])
|
||||
c_ineq.append(f[less] - ub[less])
|
||||
c_ineq.append(lb[greater] - f[greater])
|
||||
c_ineq.append(f[interval] - ub[interval])
|
||||
c_ineq.append(lb[interval] - f[interval])
|
||||
|
||||
J_eq.append(J[equal])
|
||||
J_ineq.append(J[less])
|
||||
J_ineq.append(-J[greater])
|
||||
J_ineq.append(J[interval])
|
||||
J_ineq.append(-J[interval])
|
||||
|
||||
c_eq = np.hstack(c_eq) if c_eq else np.empty(0)
|
||||
c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0)
|
||||
|
||||
if sparse_jacobian:
|
||||
vstack = sps.vstack
|
||||
empty = sps.csr_matrix((0, n))
|
||||
else:
|
||||
vstack = np.vstack
|
||||
empty = np.empty((0, n))
|
||||
|
||||
J_eq = vstack(J_eq) if J_eq else empty
|
||||
J_ineq = vstack(J_ineq) if J_ineq else empty
|
||||
|
||||
return c_eq, c_ineq, J_eq, J_ineq
|
|
@ -0,0 +1,217 @@
|
|||
"""Byrd-Omojokun Trust-Region SQP method."""
|
||||
|
||||
from scipy.sparse import eye as speye
|
||||
from .projections import projections
|
||||
from .qp_subproblem import modified_dogleg, projected_cg, box_intersections
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
|
||||
__all__ = ['equality_constrained_sqp']
|
||||
|
||||
|
||||
def default_scaling(x):
|
||||
n, = np.shape(x)
|
||||
return speye(n)
|
||||
|
||||
|
||||
def equality_constrained_sqp(fun_and_constr, grad_and_jac, lagr_hess,
|
||||
x0, fun0, grad0, constr0,
|
||||
jac0, stop_criteria,
|
||||
state,
|
||||
initial_penalty,
|
||||
initial_trust_radius,
|
||||
factorization_method,
|
||||
trust_lb=None,
|
||||
trust_ub=None,
|
||||
scaling=default_scaling):
|
||||
"""Solve nonlinear equality-constrained problem using trust-region SQP.
|
||||
|
||||
Solve optimization problem:
|
||||
|
||||
minimize fun(x)
|
||||
subject to: constr(x) = 0
|
||||
|
||||
using Byrd-Omojokun Trust-Region SQP method described in [1]_. Several
|
||||
implementation details are based on [2]_ and [3]_, p. 549.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Lalee, Marucha, Jorge Nocedal, and Todd Plantenga. "On the
|
||||
implementation of an algorithm for large-scale equality
|
||||
constrained optimization." SIAM Journal on
|
||||
Optimization 8.3 (1998): 682-706.
|
||||
.. [2] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
|
||||
"An interior point algorithm for large-scale nonlinear
|
||||
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
|
||||
.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
||||
Second Edition (2006).
|
||||
"""
|
||||
PENALTY_FACTOR = 0.3 # Rho from formula (3.51), reference [2]_, p.891.
|
||||
LARGE_REDUCTION_RATIO = 0.9
|
||||
INTERMEDIARY_REDUCTION_RATIO = 0.3
|
||||
SUFFICIENT_REDUCTION_RATIO = 1e-8 # Eta from reference [2]_, p.892.
|
||||
TRUST_ENLARGEMENT_FACTOR_L = 7.0
|
||||
TRUST_ENLARGEMENT_FACTOR_S = 2.0
|
||||
MAX_TRUST_REDUCTION = 0.5
|
||||
MIN_TRUST_REDUCTION = 0.1
|
||||
SOC_THRESHOLD = 0.1
|
||||
TR_FACTOR = 0.8 # Zeta from formula (3.21), reference [2]_, p.885.
|
||||
BOX_FACTOR = 0.5
|
||||
|
||||
n, = np.shape(x0) # Number of parameters
|
||||
|
||||
# Set default lower and upper bounds.
|
||||
if trust_lb is None:
|
||||
trust_lb = np.full(n, -np.inf)
|
||||
if trust_ub is None:
|
||||
trust_ub = np.full(n, np.inf)
|
||||
|
||||
# Initial values
|
||||
x = np.copy(x0)
|
||||
trust_radius = initial_trust_radius
|
||||
penalty = initial_penalty
|
||||
# Compute Values
|
||||
f = fun0
|
||||
c = grad0
|
||||
b = constr0
|
||||
A = jac0
|
||||
S = scaling(x)
|
||||
# Get projections
|
||||
Z, LS, Y = projections(A, factorization_method)
|
||||
# Compute least-square lagrange multipliers
|
||||
v = -LS.dot(c)
|
||||
# Compute Hessian
|
||||
H = lagr_hess(x, v)
|
||||
|
||||
# Update state parameters
|
||||
optimality = norm(c + A.T.dot(v), np.inf)
|
||||
constr_violation = norm(b, np.inf) if len(b) > 0 else 0
|
||||
cg_info = {'niter': 0, 'stop_cond': 0,
|
||||
'hits_boundary': False}
|
||||
|
||||
last_iteration_failed = False
|
||||
while not stop_criteria(state, x, last_iteration_failed,
|
||||
optimality, constr_violation,
|
||||
trust_radius, penalty, cg_info):
|
||||
# Normal Step - `dn`
|
||||
# minimize 1/2*||A dn + b||^2
|
||||
# subject to:
|
||||
# ||dn|| <= TR_FACTOR * trust_radius
|
||||
# BOX_FACTOR * lb <= dn <= BOX_FACTOR * ub.
|
||||
dn = modified_dogleg(A, Y, b,
|
||||
TR_FACTOR*trust_radius,
|
||||
BOX_FACTOR*trust_lb,
|
||||
BOX_FACTOR*trust_ub)
|
||||
|
||||
# Tangential Step - `dt`
|
||||
# Solve the QP problem:
|
||||
# minimize 1/2 dt.T H dt + dt.T (H dn + c)
|
||||
# subject to:
|
||||
# A dt = 0
|
||||
# ||dt|| <= sqrt(trust_radius**2 - ||dn||**2)
|
||||
# lb - dn <= dt <= ub - dn
|
||||
c_t = H.dot(dn) + c
|
||||
b_t = np.zeros_like(b)
|
||||
trust_radius_t = np.sqrt(trust_radius**2 - np.linalg.norm(dn)**2)
|
||||
lb_t = trust_lb - dn
|
||||
ub_t = trust_ub - dn
|
||||
dt, cg_info = projected_cg(H, c_t, Z, Y, b_t,
|
||||
trust_radius_t,
|
||||
lb_t, ub_t)
|
||||
|
||||
# Compute update (normal + tangential steps).
|
||||
d = dn + dt
|
||||
|
||||
# Compute second order model: 1/2 d H d + c.T d + f.
|
||||
quadratic_model = 1/2*(H.dot(d)).dot(d) + c.T.dot(d)
|
||||
# Compute linearized constraint: l = A d + b.
|
||||
linearized_constr = A.dot(d)+b
|
||||
# Compute new penalty parameter according to formula (3.52),
|
||||
# reference [2]_, p.891.
|
||||
vpred = norm(b) - norm(linearized_constr)
|
||||
# Guarantee `vpred` always positive,
|
||||
# regardless of roundoff errors.
|
||||
vpred = max(1e-16, vpred)
|
||||
previous_penalty = penalty
|
||||
if quadratic_model > 0:
|
||||
new_penalty = quadratic_model / ((1-PENALTY_FACTOR)*vpred)
|
||||
penalty = max(penalty, new_penalty)
|
||||
# Compute predicted reduction according to formula (3.52),
|
||||
# reference [2]_, p.891.
|
||||
predicted_reduction = -quadratic_model + penalty*vpred
|
||||
|
||||
# Compute merit function at current point
|
||||
merit_function = f + penalty*norm(b)
|
||||
# Evaluate function and constraints at trial point
|
||||
x_next = x + S.dot(d)
|
||||
f_next, b_next = fun_and_constr(x_next)
|
||||
# Compute merit function at trial point
|
||||
merit_function_next = f_next + penalty*norm(b_next)
|
||||
# Compute actual reduction according to formula (3.54),
|
||||
# reference [2]_, p.892.
|
||||
actual_reduction = merit_function - merit_function_next
|
||||
# Compute reduction ratio
|
||||
reduction_ratio = actual_reduction / predicted_reduction
|
||||
|
||||
# Second order correction (SOC), reference [2]_, p.892.
|
||||
if reduction_ratio < SUFFICIENT_REDUCTION_RATIO and \
|
||||
norm(dn) <= SOC_THRESHOLD * norm(dt):
|
||||
# Compute second order correction
|
||||
y = -Y.dot(b_next)
|
||||
# Make sure increment is inside box constraints
|
||||
_, t, intersect = box_intersections(d, y, trust_lb, trust_ub)
|
||||
# Compute tentative point
|
||||
x_soc = x + S.dot(d + t*y)
|
||||
f_soc, b_soc = fun_and_constr(x_soc)
|
||||
# Recompute actual reduction
|
||||
merit_function_soc = f_soc + penalty*norm(b_soc)
|
||||
actual_reduction_soc = merit_function - merit_function_soc
|
||||
# Recompute reduction ratio
|
||||
reduction_ratio_soc = actual_reduction_soc / predicted_reduction
|
||||
if intersect and reduction_ratio_soc >= SUFFICIENT_REDUCTION_RATIO:
|
||||
x_next = x_soc
|
||||
f_next = f_soc
|
||||
b_next = b_soc
|
||||
reduction_ratio = reduction_ratio_soc
|
||||
|
||||
# Readjust trust region step, formula (3.55), reference [2]_, p.892.
|
||||
if reduction_ratio >= LARGE_REDUCTION_RATIO:
|
||||
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_L * norm(d),
|
||||
trust_radius)
|
||||
elif reduction_ratio >= INTERMEDIARY_REDUCTION_RATIO:
|
||||
trust_radius = max(TRUST_ENLARGEMENT_FACTOR_S * norm(d),
|
||||
trust_radius)
|
||||
# Reduce trust region step, according to reference [3]_, p.696.
|
||||
elif reduction_ratio < SUFFICIENT_REDUCTION_RATIO:
|
||||
trust_reduction = ((1-SUFFICIENT_REDUCTION_RATIO) /
|
||||
(1-reduction_ratio))
|
||||
new_trust_radius = trust_reduction * norm(d)
|
||||
if new_trust_radius >= MAX_TRUST_REDUCTION * trust_radius:
|
||||
trust_radius *= MAX_TRUST_REDUCTION
|
||||
elif new_trust_radius >= MIN_TRUST_REDUCTION * trust_radius:
|
||||
trust_radius = new_trust_radius
|
||||
else:
|
||||
trust_radius *= MIN_TRUST_REDUCTION
|
||||
|
||||
# Update iteration
|
||||
if reduction_ratio >= SUFFICIENT_REDUCTION_RATIO:
|
||||
x = x_next
|
||||
f, b = f_next, b_next
|
||||
c, A = grad_and_jac(x)
|
||||
S = scaling(x)
|
||||
# Get projections
|
||||
Z, LS, Y = projections(A, factorization_method)
|
||||
# Compute least-square lagrange multipliers
|
||||
v = -LS.dot(c)
|
||||
# Compute Hessian
|
||||
H = lagr_hess(x, v)
|
||||
# Set Flag
|
||||
last_iteration_failed = False
|
||||
# Otimality values
|
||||
optimality = norm(c + A.T.dot(v), np.inf)
|
||||
constr_violation = norm(b, np.inf) if len(b) > 0 else 0
|
||||
else:
|
||||
penalty = previous_penalty
|
||||
last_iteration_failed = True
|
||||
|
||||
return x, state
|
|
@ -0,0 +1,545 @@
|
|||
import time
|
||||
import numpy as np
|
||||
from scipy.sparse.linalg import LinearOperator
|
||||
from .._differentiable_functions import VectorFunction
|
||||
from .._constraints import (
|
||||
NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds)
|
||||
from .._hessian_update_strategy import BFGS
|
||||
from ..optimize import OptimizeResult
|
||||
from .._differentiable_functions import ScalarFunction
|
||||
from .equality_constrained_sqp import equality_constrained_sqp
|
||||
from .canonical_constraint import (CanonicalConstraint,
|
||||
initial_constraints_as_canonical)
|
||||
from .tr_interior_point import tr_interior_point
|
||||
from .report import BasicReport, SQPReport, IPReport
|
||||
|
||||
|
||||
TERMINATION_MESSAGES = {
|
||||
0: "The maximum number of function evaluations is exceeded.",
|
||||
1: "`gtol` termination condition is satisfied.",
|
||||
2: "`xtol` termination condition is satisfied.",
|
||||
3: "`callback` function requested termination."
|
||||
}
|
||||
|
||||
|
||||
class HessianLinearOperator(object):
|
||||
"""Build LinearOperator from hessp"""
|
||||
def __init__(self, hessp, n):
|
||||
self.hessp = hessp
|
||||
self.n = n
|
||||
|
||||
def __call__(self, x, *args):
|
||||
def matvec(p):
|
||||
return self.hessp(x, p, *args)
|
||||
|
||||
return LinearOperator((self.n, self.n), matvec=matvec)
|
||||
|
||||
|
||||
class LagrangianHessian(object):
|
||||
"""The Hessian of the Lagrangian as LinearOperator.
|
||||
|
||||
The Lagrangian is computed as the objective function plus all the
|
||||
constraints multiplied with some numbers (Lagrange multipliers).
|
||||
"""
|
||||
def __init__(self, n, objective_hess, constraints_hess):
|
||||
self.n = n
|
||||
self.objective_hess = objective_hess
|
||||
self.constraints_hess = constraints_hess
|
||||
|
||||
def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)):
|
||||
H_objective = self.objective_hess(x)
|
||||
H_constraints = self.constraints_hess(x, v_eq, v_ineq)
|
||||
|
||||
def matvec(p):
|
||||
return H_objective.dot(p) + H_constraints.dot(p)
|
||||
|
||||
return LinearOperator((self.n, self.n), matvec)
|
||||
|
||||
|
||||
def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints,
|
||||
start_time, tr_radius, constr_penalty, cg_info):
|
||||
state.nit += 1
|
||||
state.nfev = objective.nfev
|
||||
state.njev = objective.ngev
|
||||
state.nhev = objective.nhev
|
||||
state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0
|
||||
for c in prepared_constraints]
|
||||
state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0
|
||||
for c in prepared_constraints]
|
||||
state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0
|
||||
for c in prepared_constraints]
|
||||
|
||||
if not last_iteration_failed:
|
||||
state.x = x
|
||||
state.fun = objective.f
|
||||
state.grad = objective.g
|
||||
state.v = [c.fun.v for c in prepared_constraints]
|
||||
state.constr = [c.fun.f for c in prepared_constraints]
|
||||
state.jac = [c.fun.J for c in prepared_constraints]
|
||||
# Compute Lagrangian Gradient
|
||||
state.lagrangian_grad = np.copy(state.grad)
|
||||
for c in prepared_constraints:
|
||||
state.lagrangian_grad += c.fun.J.T.dot(c.fun.v)
|
||||
state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf)
|
||||
# Compute maximum constraint violation
|
||||
state.constr_violation = 0
|
||||
for i in range(len(prepared_constraints)):
|
||||
lb, ub = prepared_constraints[i].bounds
|
||||
c = state.constr[i]
|
||||
state.constr_violation = np.max([state.constr_violation,
|
||||
np.max(lb - c),
|
||||
np.max(c - ub)])
|
||||
|
||||
state.execution_time = time.time() - start_time
|
||||
state.tr_radius = tr_radius
|
||||
state.constr_penalty = constr_penalty
|
||||
state.cg_niter += cg_info["niter"]
|
||||
state.cg_stop_cond = cg_info["stop_cond"]
|
||||
|
||||
return state
|
||||
|
||||
|
||||
def update_state_ip(state, x, last_iteration_failed, objective,
|
||||
prepared_constraints, start_time,
|
||||
tr_radius, constr_penalty, cg_info,
|
||||
barrier_parameter, barrier_tolerance):
|
||||
state = update_state_sqp(state, x, last_iteration_failed, objective,
|
||||
prepared_constraints, start_time, tr_radius,
|
||||
constr_penalty, cg_info)
|
||||
state.barrier_parameter = barrier_parameter
|
||||
state.barrier_tolerance = barrier_tolerance
|
||||
return state
|
||||
|
||||
|
||||
def _minimize_trustregion_constr(fun, x0, args, grad,
|
||||
hess, hessp, bounds, constraints,
|
||||
xtol=1e-8, gtol=1e-8,
|
||||
barrier_tol=1e-8,
|
||||
sparse_jacobian=None,
|
||||
callback=None, maxiter=1000,
|
||||
verbose=0, finite_diff_rel_step=None,
|
||||
initial_constr_penalty=1.0, initial_tr_radius=1.0,
|
||||
initial_barrier_parameter=0.1,
|
||||
initial_barrier_tolerance=0.1,
|
||||
factorization_method=None,
|
||||
disp=False):
|
||||
"""Minimize a scalar function subject to constraints.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
gtol : float, optional
|
||||
Tolerance for termination by the norm of the Lagrangian gradient.
|
||||
The algorithm will terminate when both the infinity norm (i.e., max
|
||||
abs value) of the Lagrangian gradient and the constraint violation
|
||||
are smaller than ``gtol``. Default is 1e-8.
|
||||
xtol : float, optional
|
||||
Tolerance for termination by the change of the independent variable.
|
||||
The algorithm will terminate when ``tr_radius < xtol``, where
|
||||
``tr_radius`` is the radius of the trust region used in the algorithm.
|
||||
Default is 1e-8.
|
||||
barrier_tol : float, optional
|
||||
Threshold on the barrier parameter for the algorithm termination.
|
||||
When inequality constraints are present, the algorithm will terminate
|
||||
only when the barrier parameter is less than `barrier_tol`.
|
||||
Default is 1e-8.
|
||||
sparse_jacobian : {bool, None}, optional
|
||||
Determines how to represent Jacobians of the constraints. If bool,
|
||||
then Jacobians of all the constraints will be converted to the
|
||||
corresponding format. If None (default), then Jacobians won't be
|
||||
converted, but the algorithm can proceed only if they all have the
|
||||
same format.
|
||||
initial_tr_radius: float, optional
|
||||
Initial trust radius. The trust radius gives the maximum distance
|
||||
between solution points in consecutive iterations. It reflects the
|
||||
trust the algorithm puts in the local approximation of the optimization
|
||||
problem. For an accurate local approximation the trust-region should be
|
||||
large and for an approximation valid only close to the current point it
|
||||
should be a small one. The trust radius is automatically updated throughout
|
||||
the optimization process, with ``initial_tr_radius`` being its initial value.
|
||||
Default is 1 (recommended in [1]_, p. 19).
|
||||
initial_constr_penalty : float, optional
|
||||
Initial constraints penalty parameter. The penalty parameter is used for
|
||||
balancing the requirements of decreasing the objective function
|
||||
and satisfying the constraints. It is used for defining the merit function:
|
||||
``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``,
|
||||
where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all
|
||||
the constraints. The merit function is used for accepting or rejecting
|
||||
trial points and ``constr_penalty`` weights the two conflicting goals
|
||||
of reducing objective function and constraints. The penalty is automatically
|
||||
updated throughout the optimization process, with
|
||||
``initial_constr_penalty`` being its initial value. Default is 1
|
||||
(recommended in [1]_, p 19).
|
||||
initial_barrier_parameter, initial_barrier_tolerance: float, optional
|
||||
Initial barrier parameter and initial tolerance for the barrier subproblem.
|
||||
Both are used only when inequality constraints are present. For dealing with
|
||||
optimization problems ``min_x f(x)`` subject to inequality constraints
|
||||
``c(x) <= 0`` the algorithm introduces slack variables, solving the problem
|
||||
``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality
|
||||
constraints ``c(x) + s = 0`` instead of the original problem. This subproblem
|
||||
is solved for decreasing values of ``barrier_parameter`` and with decreasing
|
||||
tolerances for the termination, starting with ``initial_barrier_parameter``
|
||||
for the barrier parameter and ``initial_barrier_tolerance`` for the
|
||||
barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19).
|
||||
Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated
|
||||
with the same prefactor.
|
||||
factorization_method : string or None, optional
|
||||
Method to factorize the Jacobian of the constraints. Use None (default)
|
||||
for the auto selection or one of:
|
||||
|
||||
- 'NormalEquation' (requires scikit-sparse)
|
||||
- 'AugmentedSystem'
|
||||
- 'QRFactorization'
|
||||
- 'SVDFactorization'
|
||||
|
||||
The methods 'NormalEquation' and 'AugmentedSystem' can be used only
|
||||
with sparse constraints. The projections required by the algorithm
|
||||
will be computed using, respectively, the the normal equation and the
|
||||
augmented system approaches explained in [1]_. 'NormalEquation'
|
||||
computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem'
|
||||
performs the LU factorization of an augmented system. They usually
|
||||
provide similar results. 'AugmentedSystem' is used by default for
|
||||
sparse matrices.
|
||||
|
||||
The methods 'QRFactorization' and 'SVDFactorization' can be used
|
||||
only with dense constraints. They compute the required projections
|
||||
using, respectively, QR and SVD factorizations. The 'SVDFactorization'
|
||||
method can cope with Jacobian matrices with deficient row rank and will
|
||||
be used whenever other factorization methods fail (which may imply the
|
||||
conversion of sparse matrices to a dense format when required).
|
||||
By default, 'QRFactorization' is used for dense matrices.
|
||||
finite_diff_rel_step : None or array_like, optional
|
||||
Relative step size for the finite difference approximation.
|
||||
maxiter : int, optional
|
||||
Maximum number of algorithm iterations. Default is 1000.
|
||||
verbose : {0, 1, 2}, optional
|
||||
Level of algorithm's verbosity:
|
||||
|
||||
* 0 (default) : work silently.
|
||||
* 1 : display a termination report.
|
||||
* 2 : display progress during iterations.
|
||||
* 3 : display progress during iterations (more complete report).
|
||||
|
||||
disp : bool, optional
|
||||
If True (default), then `verbose` will be set to 1 if it was 0.
|
||||
|
||||
Returns
|
||||
-------
|
||||
`OptimizeResult` with the fields documented below. Note the following:
|
||||
|
||||
1. All values corresponding to the constraints are ordered as they
|
||||
were passed to the solver. And values corresponding to `bounds`
|
||||
constraints are put *after* other constraints.
|
||||
2. All numbers of function, Jacobian or Hessian evaluations correspond
|
||||
to numbers of actual Python function calls. It means, for example,
|
||||
that if a Jacobian is estimated by finite differences, then the
|
||||
number of Jacobian evaluations will be zero and the number of
|
||||
function evaluations will be incremented by all calls during the
|
||||
finite difference estimation.
|
||||
|
||||
x : ndarray, shape (n,)
|
||||
Solution found.
|
||||
optimality : float
|
||||
Infinity norm of the Lagrangian gradient at the solution.
|
||||
constr_violation : float
|
||||
Maximum constraint violation at the solution.
|
||||
fun : float
|
||||
Objective function at the solution.
|
||||
grad : ndarray, shape (n,)
|
||||
Gradient of the objective function at the solution.
|
||||
lagrangian_grad : ndarray, shape (n,)
|
||||
Gradient of the Lagrangian function at the solution.
|
||||
nit : int
|
||||
Total number of iterations.
|
||||
nfev : integer
|
||||
Number of the objective function evaluations.
|
||||
njev : integer
|
||||
Number of the objective function gradient evaluations.
|
||||
nhev : integer
|
||||
Number of the objective function Hessian evaluations.
|
||||
cg_niter : int
|
||||
Total number of the conjugate gradient method iterations.
|
||||
method : {'equality_constrained_sqp', 'tr_interior_point'}
|
||||
Optimization method used.
|
||||
constr : list of ndarray
|
||||
List of constraint values at the solution.
|
||||
jac : list of {ndarray, sparse matrix}
|
||||
List of the Jacobian matrices of the constraints at the solution.
|
||||
v : list of ndarray
|
||||
List of the Lagrange multipliers for the constraints at the solution.
|
||||
For an inequality constraint a positive multiplier means that the upper
|
||||
bound is active, a negative multiplier means that the lower bound is
|
||||
active and if a multiplier is zero it means the constraint is not
|
||||
active.
|
||||
constr_nfev : list of int
|
||||
Number of constraint evaluations for each of the constraints.
|
||||
constr_njev : list of int
|
||||
Number of Jacobian matrix evaluations for each of the constraints.
|
||||
constr_nhev : list of int
|
||||
Number of Hessian evaluations for each of the constraints.
|
||||
tr_radius : float
|
||||
Radius of the trust region at the last iteration.
|
||||
constr_penalty : float
|
||||
Penalty parameter at the last iteration, see `initial_constr_penalty`.
|
||||
barrier_tolerance : float
|
||||
Tolerance for the barrier subproblem at the last iteration.
|
||||
Only for problems with inequality constraints.
|
||||
barrier_parameter : float
|
||||
Barrier parameter at the last iteration. Only for problems
|
||||
with inequality constraints.
|
||||
execution_time : float
|
||||
Total execution time.
|
||||
message : str
|
||||
Termination message.
|
||||
status : {0, 1, 2, 3}
|
||||
Termination status:
|
||||
|
||||
* 0 : The maximum number of function evaluations is exceeded.
|
||||
* 1 : `gtol` termination condition is satisfied.
|
||||
* 2 : `xtol` termination condition is satisfied.
|
||||
* 3 : `callback` function requested termination.
|
||||
|
||||
cg_stop_cond : int
|
||||
Reason for CG subproblem termination at the last iteration:
|
||||
|
||||
* 0 : CG subproblem not evaluated.
|
||||
* 1 : Iteration limit was reached.
|
||||
* 2 : Reached the trust-region boundary.
|
||||
* 3 : Negative curvature detected.
|
||||
* 4 : Tolerance was satisfied.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
|
||||
Trust region methods. 2000. Siam. pp. 19.
|
||||
"""
|
||||
x0 = np.atleast_1d(x0).astype(float)
|
||||
n_vars = np.size(x0)
|
||||
if hess is None:
|
||||
if callable(hessp):
|
||||
hess = HessianLinearOperator(hessp, n_vars)
|
||||
else:
|
||||
hess = BFGS()
|
||||
if disp and verbose == 0:
|
||||
verbose = 1
|
||||
|
||||
if bounds is not None:
|
||||
finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub,
|
||||
bounds.keep_feasible, n_vars)
|
||||
else:
|
||||
finite_diff_bounds = (-np.inf, np.inf)
|
||||
|
||||
# Define Objective Function
|
||||
objective = ScalarFunction(fun, x0, args, grad, hess,
|
||||
finite_diff_rel_step, finite_diff_bounds)
|
||||
|
||||
# Put constraints in list format when needed.
|
||||
if isinstance(constraints, (NonlinearConstraint, LinearConstraint)):
|
||||
constraints = [constraints]
|
||||
|
||||
# Prepare constraints.
|
||||
prepared_constraints = [
|
||||
PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds)
|
||||
for c in constraints]
|
||||
|
||||
# Check that all constraints are either sparse or dense.
|
||||
n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints)
|
||||
if 0 < n_sparse < len(prepared_constraints):
|
||||
raise ValueError("All constraints must have the same kind of the "
|
||||
"Jacobian --- either all sparse or all dense. "
|
||||
"You can set the sparsity globally by setting "
|
||||
"`sparse_jacobian` to either True of False.")
|
||||
if prepared_constraints:
|
||||
sparse_jacobian = n_sparse > 0
|
||||
|
||||
if bounds is not None:
|
||||
if sparse_jacobian is None:
|
||||
sparse_jacobian = True
|
||||
prepared_constraints.append(PreparedConstraint(bounds, x0,
|
||||
sparse_jacobian))
|
||||
|
||||
# Concatenate initial constraints to the canonical form.
|
||||
c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical(
|
||||
n_vars, prepared_constraints, sparse_jacobian)
|
||||
|
||||
# Prepare all canonical constraints and concatenate it into one.
|
||||
canonical_all = [CanonicalConstraint.from_PreparedConstraint(c)
|
||||
for c in prepared_constraints]
|
||||
|
||||
if len(canonical_all) == 0:
|
||||
canonical = CanonicalConstraint.empty(n_vars)
|
||||
elif len(canonical_all) == 1:
|
||||
canonical = canonical_all[0]
|
||||
else:
|
||||
canonical = CanonicalConstraint.concatenate(canonical_all,
|
||||
sparse_jacobian)
|
||||
|
||||
# Generate the Hessian of the Lagrangian.
|
||||
lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess)
|
||||
|
||||
# Choose appropriate method
|
||||
if canonical.n_ineq == 0:
|
||||
method = 'equality_constrained_sqp'
|
||||
else:
|
||||
method = 'tr_interior_point'
|
||||
|
||||
# Construct OptimizeResult
|
||||
state = OptimizeResult(
|
||||
nit=0, nfev=0, njev=0, nhev=0,
|
||||
cg_niter=0, cg_stop_cond=0,
|
||||
fun=objective.f, grad=objective.g,
|
||||
lagrangian_grad=np.copy(objective.g),
|
||||
constr=[c.fun.f for c in prepared_constraints],
|
||||
jac=[c.fun.J for c in prepared_constraints],
|
||||
constr_nfev=[0 for c in prepared_constraints],
|
||||
constr_njev=[0 for c in prepared_constraints],
|
||||
constr_nhev=[0 for c in prepared_constraints],
|
||||
v=[c.fun.v for c in prepared_constraints],
|
||||
method=method)
|
||||
|
||||
# Start counting
|
||||
start_time = time.time()
|
||||
|
||||
# Define stop criteria
|
||||
if method == 'equality_constrained_sqp':
|
||||
def stop_criteria(state, x, last_iteration_failed,
|
||||
optimality, constr_violation,
|
||||
tr_radius, constr_penalty, cg_info):
|
||||
state = update_state_sqp(state, x, last_iteration_failed,
|
||||
objective, prepared_constraints,
|
||||
start_time, tr_radius, constr_penalty,
|
||||
cg_info)
|
||||
if verbose == 2:
|
||||
BasicReport.print_iteration(state.nit,
|
||||
state.nfev,
|
||||
state.cg_niter,
|
||||
state.fun,
|
||||
state.tr_radius,
|
||||
state.optimality,
|
||||
state.constr_violation)
|
||||
elif verbose > 2:
|
||||
SQPReport.print_iteration(state.nit,
|
||||
state.nfev,
|
||||
state.cg_niter,
|
||||
state.fun,
|
||||
state.tr_radius,
|
||||
state.optimality,
|
||||
state.constr_violation,
|
||||
state.constr_penalty,
|
||||
state.cg_stop_cond)
|
||||
state.status = None
|
||||
state.niter = state.nit # Alias for callback (backward-compatibility)
|
||||
if callback is not None and callback(np.copy(state.x), state):
|
||||
state.status = 3
|
||||
elif state.optimality < gtol and state.constr_violation < gtol:
|
||||
state.status = 1
|
||||
elif state.tr_radius < xtol:
|
||||
state.status = 2
|
||||
elif state.nit >= maxiter:
|
||||
state.status = 0
|
||||
return state.status in (0, 1, 2, 3)
|
||||
elif method == 'tr_interior_point':
|
||||
def stop_criteria(state, x, last_iteration_failed, tr_radius,
|
||||
constr_penalty, cg_info, barrier_parameter,
|
||||
barrier_tolerance):
|
||||
state = update_state_ip(state, x, last_iteration_failed,
|
||||
objective, prepared_constraints,
|
||||
start_time, tr_radius, constr_penalty,
|
||||
cg_info, barrier_parameter, barrier_tolerance)
|
||||
if verbose == 2:
|
||||
BasicReport.print_iteration(state.nit,
|
||||
state.nfev,
|
||||
state.cg_niter,
|
||||
state.fun,
|
||||
state.tr_radius,
|
||||
state.optimality,
|
||||
state.constr_violation)
|
||||
elif verbose > 2:
|
||||
IPReport.print_iteration(state.nit,
|
||||
state.nfev,
|
||||
state.cg_niter,
|
||||
state.fun,
|
||||
state.tr_radius,
|
||||
state.optimality,
|
||||
state.constr_violation,
|
||||
state.constr_penalty,
|
||||
state.barrier_parameter,
|
||||
state.cg_stop_cond)
|
||||
state.status = None
|
||||
state.niter = state.nit # Alias for callback (backward compatibility)
|
||||
if callback is not None and callback(np.copy(state.x), state):
|
||||
state.status = 3
|
||||
elif state.optimality < gtol and state.constr_violation < gtol:
|
||||
state.status = 1
|
||||
elif (state.tr_radius < xtol
|
||||
and state.barrier_parameter < barrier_tol):
|
||||
state.status = 2
|
||||
elif state.nit >= maxiter:
|
||||
state.status = 0
|
||||
return state.status in (0, 1, 2, 3)
|
||||
|
||||
if verbose == 2:
|
||||
BasicReport.print_header()
|
||||
elif verbose > 2:
|
||||
if method == 'equality_constrained_sqp':
|
||||
SQPReport.print_header()
|
||||
elif method == 'tr_interior_point':
|
||||
IPReport.print_header()
|
||||
|
||||
# Call inferior function to do the optimization
|
||||
if method == 'equality_constrained_sqp':
|
||||
def fun_and_constr(x):
|
||||
f = objective.fun(x)
|
||||
c_eq, _ = canonical.fun(x)
|
||||
return f, c_eq
|
||||
|
||||
def grad_and_jac(x):
|
||||
g = objective.grad(x)
|
||||
J_eq, _ = canonical.jac(x)
|
||||
return g, J_eq
|
||||
|
||||
_, result = equality_constrained_sqp(
|
||||
fun_and_constr, grad_and_jac, lagrangian_hess,
|
||||
x0, objective.f, objective.g,
|
||||
c_eq0, J_eq0,
|
||||
stop_criteria, state,
|
||||
initial_constr_penalty, initial_tr_radius,
|
||||
factorization_method)
|
||||
|
||||
elif method == 'tr_interior_point':
|
||||
_, result = tr_interior_point(
|
||||
objective.fun, objective.grad, lagrangian_hess,
|
||||
n_vars, canonical.n_ineq, canonical.n_eq,
|
||||
canonical.fun, canonical.jac,
|
||||
x0, objective.f, objective.g,
|
||||
c_ineq0, J_ineq0, c_eq0, J_eq0,
|
||||
stop_criteria,
|
||||
canonical.keep_feasible,
|
||||
xtol, state, initial_barrier_parameter,
|
||||
initial_barrier_tolerance,
|
||||
initial_constr_penalty, initial_tr_radius,
|
||||
factorization_method)
|
||||
|
||||
# Status 3 occurs when the callback function requests termination,
|
||||
# this is assumed to not be a success.
|
||||
result.success = True if result.status in (1, 2) else False
|
||||
result.message = TERMINATION_MESSAGES[result.status]
|
||||
|
||||
# Alias (for backward compatibility with 1.1.0)
|
||||
result.niter = result.nit
|
||||
|
||||
if verbose == 2:
|
||||
BasicReport.print_footer()
|
||||
elif verbose > 2:
|
||||
if method == 'equality_constrained_sqp':
|
||||
SQPReport.print_footer()
|
||||
elif method == 'tr_interior_point':
|
||||
IPReport.print_footer()
|
||||
if verbose >= 1:
|
||||
print(result.message)
|
||||
print("Number of iterations: {}, function evaluations: {}, "
|
||||
"CG iterations: {}, optimality: {:.2e}, "
|
||||
"constraint violation: {:.2e}, execution time: {:4.2} s."
|
||||
.format(result.nit, result.nfev, result.cg_niter,
|
||||
result.optimality, result.constr_violation,
|
||||
result.execution_time))
|
||||
return result
|
|
@ -0,0 +1,405 @@
|
|||
"""Basic linear factorizations needed by the solver."""
|
||||
|
||||
from scipy.sparse import (bmat, csc_matrix, eye, issparse)
|
||||
from scipy.sparse.linalg import LinearOperator
|
||||
import scipy.linalg
|
||||
import scipy.sparse.linalg
|
||||
try:
|
||||
from sksparse.cholmod import cholesky_AAt
|
||||
sksparse_available = True
|
||||
except ImportError:
|
||||
import warnings
|
||||
sksparse_available = False
|
||||
import numpy as np
|
||||
from warnings import warn
|
||||
|
||||
__all__ = [
|
||||
'orthogonality',
|
||||
'projections',
|
||||
]
|
||||
|
||||
|
||||
def orthogonality(A, g):
|
||||
"""Measure orthogonality between a vector and the null space of a matrix.
|
||||
|
||||
Compute a measure of orthogonality between the null space
|
||||
of the (possibly sparse) matrix ``A`` and a given vector ``g``.
|
||||
|
||||
The formula is a simplified (and cheaper) version of formula (3.13)
|
||||
from [1]_.
|
||||
``orth = norm(A g, ord=2)/(norm(A, ord='fro')*norm(g, ord=2))``.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
|
||||
"On the solution of equality constrained quadratic
|
||||
programming problems arising in optimization."
|
||||
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
|
||||
"""
|
||||
# Compute vector norms
|
||||
norm_g = np.linalg.norm(g)
|
||||
# Compute Froebnius norm of the matrix A
|
||||
if issparse(A):
|
||||
norm_A = scipy.sparse.linalg.norm(A, ord='fro')
|
||||
else:
|
||||
norm_A = np.linalg.norm(A, ord='fro')
|
||||
|
||||
# Check if norms are zero
|
||||
if norm_g == 0 or norm_A == 0:
|
||||
return 0
|
||||
|
||||
norm_A_g = np.linalg.norm(A.dot(g))
|
||||
# Orthogonality measure
|
||||
orth = norm_A_g / (norm_A*norm_g)
|
||||
return orth
|
||||
|
||||
|
||||
def normal_equation_projections(A, m, n, orth_tol, max_refin, tol):
|
||||
"""Return linear operators for matrix A using ``NormalEquation`` approach.
|
||||
"""
|
||||
# Cholesky factorization
|
||||
factor = cholesky_AAt(A)
|
||||
|
||||
# z = x - A.T inv(A A.T) A x
|
||||
def null_space(x):
|
||||
v = factor(A.dot(x))
|
||||
z = x - A.T.dot(v)
|
||||
|
||||
# Iterative refinement to improve roundoff
|
||||
# errors described in [2]_, algorithm 5.1.
|
||||
k = 0
|
||||
while orthogonality(A, z) > orth_tol:
|
||||
if k >= max_refin:
|
||||
break
|
||||
# z_next = z - A.T inv(A A.T) A z
|
||||
v = factor(A.dot(z))
|
||||
z = z - A.T.dot(v)
|
||||
k += 1
|
||||
|
||||
return z
|
||||
|
||||
# z = inv(A A.T) A x
|
||||
def least_squares(x):
|
||||
return factor(A.dot(x))
|
||||
|
||||
# z = A.T inv(A A.T) x
|
||||
def row_space(x):
|
||||
return A.T.dot(factor(x))
|
||||
|
||||
return null_space, least_squares, row_space
|
||||
|
||||
|
||||
def augmented_system_projections(A, m, n, orth_tol, max_refin, tol):
|
||||
"""Return linear operators for matrix A - ``AugmentedSystem``."""
|
||||
# Form augmented system
|
||||
K = csc_matrix(bmat([[eye(n), A.T], [A, None]]))
|
||||
# LU factorization
|
||||
# TODO: Use a symmetric indefinite factorization
|
||||
# to solve the system twice as fast (because
|
||||
# of the symmetry).
|
||||
try:
|
||||
solve = scipy.sparse.linalg.factorized(K)
|
||||
except RuntimeError:
|
||||
warn("Singular Jacobian matrix. Using dense SVD decomposition to "
|
||||
"perform the factorizations.")
|
||||
return svd_factorization_projections(A.toarray(),
|
||||
m, n, orth_tol,
|
||||
max_refin, tol)
|
||||
|
||||
# z = x - A.T inv(A A.T) A x
|
||||
# is computed solving the extended system:
|
||||
# [I A.T] * [ z ] = [x]
|
||||
# [A O ] [aux] [0]
|
||||
def null_space(x):
|
||||
# v = [x]
|
||||
# [0]
|
||||
v = np.hstack([x, np.zeros(m)])
|
||||
# lu_sol = [ z ]
|
||||
# [aux]
|
||||
lu_sol = solve(v)
|
||||
z = lu_sol[:n]
|
||||
|
||||
# Iterative refinement to improve roundoff
|
||||
# errors described in [2]_, algorithm 5.2.
|
||||
k = 0
|
||||
while orthogonality(A, z) > orth_tol:
|
||||
if k >= max_refin:
|
||||
break
|
||||
# new_v = [x] - [I A.T] * [ z ]
|
||||
# [0] [A O ] [aux]
|
||||
new_v = v - K.dot(lu_sol)
|
||||
# [I A.T] * [delta z ] = new_v
|
||||
# [A O ] [delta aux]
|
||||
lu_update = solve(new_v)
|
||||
# [ z ] += [delta z ]
|
||||
# [aux] [delta aux]
|
||||
lu_sol += lu_update
|
||||
z = lu_sol[:n]
|
||||
k += 1
|
||||
|
||||
# return z = x - A.T inv(A A.T) A x
|
||||
return z
|
||||
|
||||
# z = inv(A A.T) A x
|
||||
# is computed solving the extended system:
|
||||
# [I A.T] * [aux] = [x]
|
||||
# [A O ] [ z ] [0]
|
||||
def least_squares(x):
|
||||
# v = [x]
|
||||
# [0]
|
||||
v = np.hstack([x, np.zeros(m)])
|
||||
# lu_sol = [aux]
|
||||
# [ z ]
|
||||
lu_sol = solve(v)
|
||||
# return z = inv(A A.T) A x
|
||||
return lu_sol[n:m+n]
|
||||
|
||||
# z = A.T inv(A A.T) x
|
||||
# is computed solving the extended system:
|
||||
# [I A.T] * [ z ] = [0]
|
||||
# [A O ] [aux] [x]
|
||||
def row_space(x):
|
||||
# v = [0]
|
||||
# [x]
|
||||
v = np.hstack([np.zeros(n), x])
|
||||
# lu_sol = [ z ]
|
||||
# [aux]
|
||||
lu_sol = solve(v)
|
||||
# return z = A.T inv(A A.T) x
|
||||
return lu_sol[:n]
|
||||
|
||||
return null_space, least_squares, row_space
|
||||
|
||||
|
||||
def qr_factorization_projections(A, m, n, orth_tol, max_refin, tol):
|
||||
"""Return linear operators for matrix A using ``QRFactorization`` approach.
|
||||
"""
|
||||
# QRFactorization
|
||||
Q, R, P = scipy.linalg.qr(A.T, pivoting=True, mode='economic')
|
||||
|
||||
if np.linalg.norm(R[-1, :], np.inf) < tol:
|
||||
warn('Singular Jacobian matrix. Using SVD decomposition to ' +
|
||||
'perform the factorizations.')
|
||||
return svd_factorization_projections(A, m, n,
|
||||
orth_tol,
|
||||
max_refin,
|
||||
tol)
|
||||
|
||||
# z = x - A.T inv(A A.T) A x
|
||||
def null_space(x):
|
||||
# v = P inv(R) Q.T x
|
||||
aux1 = Q.T.dot(x)
|
||||
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
|
||||
v = np.zeros(m)
|
||||
v[P] = aux2
|
||||
z = x - A.T.dot(v)
|
||||
|
||||
# Iterative refinement to improve roundoff
|
||||
# errors described in [2]_, algorithm 5.1.
|
||||
k = 0
|
||||
while orthogonality(A, z) > orth_tol:
|
||||
if k >= max_refin:
|
||||
break
|
||||
# v = P inv(R) Q.T x
|
||||
aux1 = Q.T.dot(z)
|
||||
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
|
||||
v[P] = aux2
|
||||
# z_next = z - A.T v
|
||||
z = z - A.T.dot(v)
|
||||
k += 1
|
||||
|
||||
return z
|
||||
|
||||
# z = inv(A A.T) A x
|
||||
def least_squares(x):
|
||||
# z = P inv(R) Q.T x
|
||||
aux1 = Q.T.dot(x)
|
||||
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
|
||||
z = np.zeros(m)
|
||||
z[P] = aux2
|
||||
return z
|
||||
|
||||
# z = A.T inv(A A.T) x
|
||||
def row_space(x):
|
||||
# z = Q inv(R.T) P.T x
|
||||
aux1 = x[P]
|
||||
aux2 = scipy.linalg.solve_triangular(R, aux1,
|
||||
lower=False,
|
||||
trans='T')
|
||||
z = Q.dot(aux2)
|
||||
return z
|
||||
|
||||
return null_space, least_squares, row_space
|
||||
|
||||
|
||||
def svd_factorization_projections(A, m, n, orth_tol, max_refin, tol):
|
||||
"""Return linear operators for matrix A using ``SVDFactorization`` approach.
|
||||
"""
|
||||
# SVD Factorization
|
||||
U, s, Vt = scipy.linalg.svd(A, full_matrices=False)
|
||||
|
||||
# Remove dimensions related with very small singular values
|
||||
U = U[:, s > tol]
|
||||
Vt = Vt[s > tol, :]
|
||||
s = s[s > tol]
|
||||
|
||||
# z = x - A.T inv(A A.T) A x
|
||||
def null_space(x):
|
||||
# v = U 1/s V.T x = inv(A A.T) A x
|
||||
aux1 = Vt.dot(x)
|
||||
aux2 = 1/s*aux1
|
||||
v = U.dot(aux2)
|
||||
z = x - A.T.dot(v)
|
||||
|
||||
# Iterative refinement to improve roundoff
|
||||
# errors described in [2]_, algorithm 5.1.
|
||||
k = 0
|
||||
while orthogonality(A, z) > orth_tol:
|
||||
if k >= max_refin:
|
||||
break
|
||||
# v = U 1/s V.T x = inv(A A.T) A x
|
||||
aux1 = Vt.dot(z)
|
||||
aux2 = 1/s*aux1
|
||||
v = U.dot(aux2)
|
||||
# z_next = z - A.T v
|
||||
z = z - A.T.dot(v)
|
||||
k += 1
|
||||
|
||||
return z
|
||||
|
||||
# z = inv(A A.T) A x
|
||||
def least_squares(x):
|
||||
# z = U 1/s V.T x = inv(A A.T) A x
|
||||
aux1 = Vt.dot(x)
|
||||
aux2 = 1/s*aux1
|
||||
z = U.dot(aux2)
|
||||
return z
|
||||
|
||||
# z = A.T inv(A A.T) x
|
||||
def row_space(x):
|
||||
# z = V 1/s U.T x
|
||||
aux1 = U.T.dot(x)
|
||||
aux2 = 1/s*aux1
|
||||
z = Vt.T.dot(aux2)
|
||||
return z
|
||||
|
||||
return null_space, least_squares, row_space
|
||||
|
||||
|
||||
def projections(A, method=None, orth_tol=1e-12, max_refin=3, tol=1e-15):
|
||||
"""Return three linear operators related with a given matrix A.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : sparse matrix (or ndarray), shape (m, n)
|
||||
Matrix ``A`` used in the projection.
|
||||
method : string, optional
|
||||
Method used for compute the given linear
|
||||
operators. Should be one of:
|
||||
|
||||
- 'NormalEquation': The operators
|
||||
will be computed using the
|
||||
so-called normal equation approach
|
||||
explained in [1]_. In order to do
|
||||
so the Cholesky factorization of
|
||||
``(A A.T)`` is computed. Exclusive
|
||||
for sparse matrices.
|
||||
- 'AugmentedSystem': The operators
|
||||
will be computed using the
|
||||
so-called augmented system approach
|
||||
explained in [1]_. Exclusive
|
||||
for sparse matrices.
|
||||
- 'QRFactorization': Compute projections
|
||||
using QR factorization. Exclusive for
|
||||
dense matrices.
|
||||
- 'SVDFactorization': Compute projections
|
||||
using SVD factorization. Exclusive for
|
||||
dense matrices.
|
||||
|
||||
orth_tol : float, optional
|
||||
Tolerance for iterative refinements.
|
||||
max_refin : int, optional
|
||||
Maximum number of iterative refinements.
|
||||
tol : float, optional
|
||||
Tolerance for singular values.
|
||||
|
||||
Returns
|
||||
-------
|
||||
Z : LinearOperator, shape (n, n)
|
||||
Null-space operator. For a given vector ``x``,
|
||||
the null space operator is equivalent to apply
|
||||
a projection matrix ``P = I - A.T inv(A A.T) A``
|
||||
to the vector. It can be shown that this is
|
||||
equivalent to project ``x`` into the null space
|
||||
of A.
|
||||
LS : LinearOperator, shape (m, n)
|
||||
Least-squares operator. For a given vector ``x``,
|
||||
the least-squares operator is equivalent to apply a
|
||||
pseudoinverse matrix ``pinv(A.T) = inv(A A.T) A``
|
||||
to the vector. It can be shown that this vector
|
||||
``pinv(A.T) x`` is the least_square solution to
|
||||
``A.T y = x``.
|
||||
Y : LinearOperator, shape (n, m)
|
||||
Row-space operator. For a given vector ``x``,
|
||||
the row-space operator is equivalent to apply a
|
||||
projection matrix ``Q = A.T inv(A A.T)``
|
||||
to the vector. It can be shown that this
|
||||
vector ``y = Q x`` the minimum norm solution
|
||||
of ``A y = x``.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Uses iterative refinements described in [1]
|
||||
during the computation of ``Z`` in order to
|
||||
cope with the possibility of large roundoff errors.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
|
||||
"On the solution of equality constrained quadratic
|
||||
programming problems arising in optimization."
|
||||
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
|
||||
"""
|
||||
m, n = np.shape(A)
|
||||
|
||||
# The factorization of an empty matrix
|
||||
# only works for the sparse representation.
|
||||
if m*n == 0:
|
||||
A = csc_matrix(A)
|
||||
|
||||
# Check Argument
|
||||
if issparse(A):
|
||||
if method is None:
|
||||
method = "AugmentedSystem"
|
||||
if method not in ("NormalEquation", "AugmentedSystem"):
|
||||
raise ValueError("Method not allowed for sparse matrix.")
|
||||
if method == "NormalEquation" and not sksparse_available:
|
||||
warnings.warn(("Only accepts 'NormalEquation' option when"
|
||||
" scikit-sparse is available. Using "
|
||||
"'AugmentedSystem' option instead."),
|
||||
ImportWarning)
|
||||
method = 'AugmentedSystem'
|
||||
else:
|
||||
if method is None:
|
||||
method = "QRFactorization"
|
||||
if method not in ("QRFactorization", "SVDFactorization"):
|
||||
raise ValueError("Method not allowed for dense array.")
|
||||
|
||||
if method == 'NormalEquation':
|
||||
null_space, least_squares, row_space \
|
||||
= normal_equation_projections(A, m, n, orth_tol, max_refin, tol)
|
||||
elif method == 'AugmentedSystem':
|
||||
null_space, least_squares, row_space \
|
||||
= augmented_system_projections(A, m, n, orth_tol, max_refin, tol)
|
||||
elif method == "QRFactorization":
|
||||
null_space, least_squares, row_space \
|
||||
= qr_factorization_projections(A, m, n, orth_tol, max_refin, tol)
|
||||
elif method == "SVDFactorization":
|
||||
null_space, least_squares, row_space \
|
||||
= svd_factorization_projections(A, m, n, orth_tol, max_refin, tol)
|
||||
|
||||
Z = LinearOperator((n, n), null_space)
|
||||
LS = LinearOperator((m, n), least_squares)
|
||||
Y = LinearOperator((n, m), row_space)
|
||||
|
||||
return Z, LS, Y
|
|
@ -0,0 +1,637 @@
|
|||
"""Equality-constrained quadratic programming solvers."""
|
||||
|
||||
from scipy.sparse import (linalg, bmat, csc_matrix)
|
||||
from math import copysign
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
|
||||
__all__ = [
|
||||
'eqp_kktfact',
|
||||
'sphere_intersections',
|
||||
'box_intersections',
|
||||
'box_sphere_intersections',
|
||||
'inside_box_boundaries',
|
||||
'modified_dogleg',
|
||||
'projected_cg'
|
||||
]
|
||||
|
||||
|
||||
# For comparison with the projected CG
|
||||
def eqp_kktfact(H, c, A, b):
|
||||
"""Solve equality-constrained quadratic programming (EQP) problem.
|
||||
|
||||
Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0``
|
||||
using direct factorization of the KKT system.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
H : sparse matrix, shape (n, n)
|
||||
Hessian matrix of the EQP problem.
|
||||
c : array_like, shape (n,)
|
||||
Gradient of the quadratic objective function.
|
||||
A : sparse matrix
|
||||
Jacobian matrix of the EQP problem.
|
||||
b : array_like, shape (m,)
|
||||
Right-hand side of the constraint equation.
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : array_like, shape (n,)
|
||||
Solution of the KKT problem.
|
||||
lagrange_multipliers : ndarray, shape (m,)
|
||||
Lagrange multipliers of the KKT problem.
|
||||
"""
|
||||
n, = np.shape(c) # Number of parameters
|
||||
m, = np.shape(b) # Number of constraints
|
||||
|
||||
# Karush-Kuhn-Tucker matrix of coefficients.
|
||||
# Defined as in Nocedal/Wright "Numerical
|
||||
# Optimization" p.452 in Eq. (16.4).
|
||||
kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]]))
|
||||
# Vector of coefficients.
|
||||
kkt_vec = np.hstack([-c, -b])
|
||||
|
||||
# TODO: Use a symmetric indefinite factorization
|
||||
# to solve the system twice as fast (because
|
||||
# of the symmetry).
|
||||
lu = linalg.splu(kkt_matrix)
|
||||
kkt_sol = lu.solve(kkt_vec)
|
||||
x = kkt_sol[:n]
|
||||
lagrange_multipliers = -kkt_sol[n:n+m]
|
||||
|
||||
return x, lagrange_multipliers
|
||||
|
||||
|
||||
def sphere_intersections(z, d, trust_radius,
|
||||
entire_line=False):
|
||||
"""Find the intersection between segment (or line) and spherical constraints.
|
||||
|
||||
Find the intersection between the segment (or line) defined by the
|
||||
parametric equation ``x(t) = z + t*d`` and the ball
|
||||
``||x|| <= trust_radius``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
z : array_like, shape (n,)
|
||||
Initial point.
|
||||
d : array_like, shape (n,)
|
||||
Direction.
|
||||
trust_radius : float
|
||||
Ball radius.
|
||||
entire_line : bool, optional
|
||||
When ``True``, the function returns the intersection between the line
|
||||
``x(t) = z + t*d`` (``t`` can assume any value) and the ball
|
||||
``||x|| <= trust_radius``. When ``False``, the function returns the intersection
|
||||
between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ta, tb : float
|
||||
The line/segment ``x(t) = z + t*d`` is inside the ball for
|
||||
for ``ta <= t <= tb``.
|
||||
intersect : bool
|
||||
When ``True``, there is a intersection between the line/segment
|
||||
and the sphere. On the other hand, when ``False``, there is no
|
||||
intersection.
|
||||
"""
|
||||
# Special case when d=0
|
||||
if norm(d) == 0:
|
||||
return 0, 0, False
|
||||
# Check for inf trust_radius
|
||||
if np.isinf(trust_radius):
|
||||
if entire_line:
|
||||
ta = -np.inf
|
||||
tb = np.inf
|
||||
else:
|
||||
ta = 0
|
||||
tb = 1
|
||||
intersect = True
|
||||
return ta, tb, intersect
|
||||
|
||||
a = np.dot(d, d)
|
||||
b = 2 * np.dot(z, d)
|
||||
c = np.dot(z, z) - trust_radius**2
|
||||
discriminant = b*b - 4*a*c
|
||||
if discriminant < 0:
|
||||
intersect = False
|
||||
return 0, 0, intersect
|
||||
sqrt_discriminant = np.sqrt(discriminant)
|
||||
|
||||
# The following calculation is mathematically
|
||||
# equivalent to:
|
||||
# ta = (-b - sqrt_discriminant) / (2*a)
|
||||
# tb = (-b + sqrt_discriminant) / (2*a)
|
||||
# but produce smaller round off errors.
|
||||
# Look at Matrix Computation p.97
|
||||
# for a better justification.
|
||||
aux = b + copysign(sqrt_discriminant, b)
|
||||
ta = -aux / (2*a)
|
||||
tb = -2*c / aux
|
||||
ta, tb = sorted([ta, tb])
|
||||
|
||||
if entire_line:
|
||||
intersect = True
|
||||
else:
|
||||
# Checks to see if intersection happens
|
||||
# within vectors length.
|
||||
if tb < 0 or ta > 1:
|
||||
intersect = False
|
||||
ta = 0
|
||||
tb = 0
|
||||
else:
|
||||
intersect = True
|
||||
# Restrict intersection interval
|
||||
# between 0 and 1.
|
||||
ta = max(0, ta)
|
||||
tb = min(1, tb)
|
||||
|
||||
return ta, tb, intersect
|
||||
|
||||
|
||||
def box_intersections(z, d, lb, ub,
|
||||
entire_line=False):
|
||||
"""Find the intersection between segment (or line) and box constraints.
|
||||
|
||||
Find the intersection between the segment (or line) defined by the
|
||||
parametric equation ``x(t) = z + t*d`` and the rectangular box
|
||||
``lb <= x <= ub``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
z : array_like, shape (n,)
|
||||
Initial point.
|
||||
d : array_like, shape (n,)
|
||||
Direction.
|
||||
lb : array_like, shape (n,)
|
||||
Lower bounds to each one of the components of ``x``. Used
|
||||
to delimit the rectangular box.
|
||||
ub : array_like, shape (n, )
|
||||
Upper bounds to each one of the components of ``x``. Used
|
||||
to delimit the rectangular box.
|
||||
entire_line : bool, optional
|
||||
When ``True``, the function returns the intersection between the line
|
||||
``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular
|
||||
box. When ``False``, the function returns the intersection between the segment
|
||||
``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ta, tb : float
|
||||
The line/segment ``x(t) = z + t*d`` is inside the box for
|
||||
for ``ta <= t <= tb``.
|
||||
intersect : bool
|
||||
When ``True``, there is a intersection between the line (or segment)
|
||||
and the rectangular box. On the other hand, when ``False``, there is no
|
||||
intersection.
|
||||
"""
|
||||
# Make sure it is a numpy array
|
||||
z = np.asarray(z)
|
||||
d = np.asarray(d)
|
||||
lb = np.asarray(lb)
|
||||
ub = np.asarray(ub)
|
||||
# Special case when d=0
|
||||
if norm(d) == 0:
|
||||
return 0, 0, False
|
||||
|
||||
# Get values for which d==0
|
||||
zero_d = (d == 0)
|
||||
# If the boundaries are not satisfied for some coordinate
|
||||
# for which "d" is zero, there is no box-line intersection.
|
||||
if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any():
|
||||
intersect = False
|
||||
return 0, 0, intersect
|
||||
# Remove values for which d is zero
|
||||
not_zero_d = np.logical_not(zero_d)
|
||||
z = z[not_zero_d]
|
||||
d = d[not_zero_d]
|
||||
lb = lb[not_zero_d]
|
||||
ub = ub[not_zero_d]
|
||||
|
||||
# Find a series of intervals (t_lb[i], t_ub[i]).
|
||||
t_lb = (lb-z) / d
|
||||
t_ub = (ub-z) / d
|
||||
# Get the intersection of all those intervals.
|
||||
ta = max(np.minimum(t_lb, t_ub))
|
||||
tb = min(np.maximum(t_lb, t_ub))
|
||||
|
||||
# Check if intersection is feasible
|
||||
if ta <= tb:
|
||||
intersect = True
|
||||
else:
|
||||
intersect = False
|
||||
# Checks to see if intersection happens within vectors length.
|
||||
if not entire_line:
|
||||
if tb < 0 or ta > 1:
|
||||
intersect = False
|
||||
ta = 0
|
||||
tb = 0
|
||||
else:
|
||||
# Restrict intersection interval between 0 and 1.
|
||||
ta = max(0, ta)
|
||||
tb = min(1, tb)
|
||||
|
||||
return ta, tb, intersect
|
||||
|
||||
|
||||
def box_sphere_intersections(z, d, lb, ub, trust_radius,
|
||||
entire_line=False,
|
||||
extra_info=False):
|
||||
"""Find the intersection between segment (or line) and box/sphere constraints.
|
||||
|
||||
Find the intersection between the segment (or line) defined by the
|
||||
parametric equation ``x(t) = z + t*d``, the rectangular box
|
||||
``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
z : array_like, shape (n,)
|
||||
Initial point.
|
||||
d : array_like, shape (n,)
|
||||
Direction.
|
||||
lb : array_like, shape (n,)
|
||||
Lower bounds to each one of the components of ``x``. Used
|
||||
to delimit the rectangular box.
|
||||
ub : array_like, shape (n, )
|
||||
Upper bounds to each one of the components of ``x``. Used
|
||||
to delimit the rectangular box.
|
||||
trust_radius : float
|
||||
Ball radius.
|
||||
entire_line : bool, optional
|
||||
When ``True``, the function returns the intersection between the line
|
||||
``x(t) = z + t*d`` (``t`` can assume any value) and the constraints.
|
||||
When ``False``, the function returns the intersection between the segment
|
||||
``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints.
|
||||
extra_info : bool, optional
|
||||
When ``True``, the function returns ``intersect_sphere`` and ``intersect_box``.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ta, tb : float
|
||||
The line/segment ``x(t) = z + t*d`` is inside the rectangular box and
|
||||
inside the ball for for ``ta <= t <= tb``.
|
||||
intersect : bool
|
||||
When ``True``, there is a intersection between the line (or segment)
|
||||
and both constraints. On the other hand, when ``False``, there is no
|
||||
intersection.
|
||||
sphere_info : dict, optional
|
||||
Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
|
||||
for which the line intercepts the ball. And a boolean value indicating
|
||||
whether the sphere is intersected by the line.
|
||||
box_info : dict, optional
|
||||
Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
|
||||
for which the line intercepts the box. And a boolean value indicating
|
||||
whether the box is intersected by the line.
|
||||
"""
|
||||
ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub,
|
||||
entire_line)
|
||||
ta_s, tb_s, intersect_s = sphere_intersections(z, d,
|
||||
trust_radius,
|
||||
entire_line)
|
||||
ta = np.maximum(ta_b, ta_s)
|
||||
tb = np.minimum(tb_b, tb_s)
|
||||
if intersect_b and intersect_s and ta <= tb:
|
||||
intersect = True
|
||||
else:
|
||||
intersect = False
|
||||
|
||||
if extra_info:
|
||||
sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s}
|
||||
box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b}
|
||||
return ta, tb, intersect, sphere_info, box_info
|
||||
else:
|
||||
return ta, tb, intersect
|
||||
|
||||
|
||||
def inside_box_boundaries(x, lb, ub):
|
||||
"""Check if lb <= x <= ub."""
|
||||
return (lb <= x).all() and (x <= ub).all()
|
||||
|
||||
|
||||
def reinforce_box_boundaries(x, lb, ub):
|
||||
"""Return clipped value of x"""
|
||||
return np.minimum(np.maximum(x, lb), ub)
|
||||
|
||||
|
||||
def modified_dogleg(A, Y, b, trust_radius, lb, ub):
|
||||
"""Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region.
|
||||
|
||||
Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2``
|
||||
subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification
|
||||
of the classical dogleg approach.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
A : LinearOperator (or sparse matrix or ndarray), shape (m, n)
|
||||
Matrix ``A`` in the minimization problem. It should have
|
||||
dimension ``(m, n)`` such that ``m < n``.
|
||||
Y : LinearOperator (or sparse matrix or ndarray), shape (n, m)
|
||||
LinearOperator that apply the projection matrix
|
||||
``Q = A.T inv(A A.T)`` to the vector. The obtained vector
|
||||
``y = Q x`` being the minimum norm solution of ``A y = x``.
|
||||
b : array_like, shape (m,)
|
||||
Vector ``b``in the minimization problem.
|
||||
trust_radius: float
|
||||
Trust radius to be considered. Delimits a sphere boundary
|
||||
to the problem.
|
||||
lb : array_like, shape (n,)
|
||||
Lower bounds to each one of the components of ``x``.
|
||||
It is expected that ``lb <= 0``, otherwise the algorithm
|
||||
may fail. If ``lb[i] = -Inf``, the lower
|
||||
bound for the ith component is just ignored.
|
||||
ub : array_like, shape (n, )
|
||||
Upper bounds to each one of the components of ``x``.
|
||||
It is expected that ``ub >= 0``, otherwise the algorithm
|
||||
may fail. If ``ub[i] = Inf``, the upper bound for the ith
|
||||
component is just ignored.
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : array_like, shape (n,)
|
||||
Solution to the problem.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Based on implementations described in pp. 885-886 from [1]_.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
|
||||
"An interior point algorithm for large-scale nonlinear
|
||||
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
|
||||
"""
|
||||
# Compute minimum norm minimizer of 1/2*|| A x + b ||^2.
|
||||
newton_point = -Y.dot(b)
|
||||
# Check for interior point
|
||||
if inside_box_boundaries(newton_point, lb, ub) \
|
||||
and norm(newton_point) <= trust_radius:
|
||||
x = newton_point
|
||||
return x
|
||||
|
||||
# Compute gradient vector ``g = A.T b``
|
||||
g = A.T.dot(b)
|
||||
# Compute Cauchy point
|
||||
# `cauchy_point = g.T g / (g.T A.T A g)``.
|
||||
A_g = A.dot(g)
|
||||
cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g
|
||||
# Origin
|
||||
origin_point = np.zeros_like(cauchy_point)
|
||||
|
||||
# Check the segment between cauchy_point and newton_point
|
||||
# for a possible solution.
|
||||
z = cauchy_point
|
||||
p = newton_point - cauchy_point
|
||||
_, alpha, intersect = box_sphere_intersections(z, p, lb, ub,
|
||||
trust_radius)
|
||||
if intersect:
|
||||
x1 = z + alpha*p
|
||||
else:
|
||||
# Check the segment between the origin and cauchy_point
|
||||
# for a possible solution.
|
||||
z = origin_point
|
||||
p = cauchy_point
|
||||
_, alpha, _ = box_sphere_intersections(z, p, lb, ub,
|
||||
trust_radius)
|
||||
x1 = z + alpha*p
|
||||
|
||||
# Check the segment between origin and newton_point
|
||||
# for a possible solution.
|
||||
z = origin_point
|
||||
p = newton_point
|
||||
_, alpha, _ = box_sphere_intersections(z, p, lb, ub,
|
||||
trust_radius)
|
||||
x2 = z + alpha*p
|
||||
|
||||
# Return the best solution among x1 and x2.
|
||||
if norm(A.dot(x1) + b) < norm(A.dot(x2) + b):
|
||||
return x1
|
||||
else:
|
||||
return x2
|
||||
|
||||
|
||||
def projected_cg(H, c, Z, Y, b, trust_radius=np.inf,
|
||||
lb=None, ub=None, tol=None,
|
||||
max_iter=None, max_infeasible_iter=None,
|
||||
return_all=False):
|
||||
"""Solve EQP problem with projected CG method.
|
||||
|
||||
Solve equality-constrained quadratic programming problem
|
||||
``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and,
|
||||
possibly, to trust region constraints ``||x|| < trust_radius``
|
||||
and box constraints ``lb <= x <= ub``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
H : LinearOperator (or sparse matrix or ndarray), shape (n, n)
|
||||
Operator for computing ``H v``.
|
||||
c : array_like, shape (n,)
|
||||
Gradient of the quadratic objective function.
|
||||
Z : LinearOperator (or sparse matrix or ndarray), shape (n, n)
|
||||
Operator for projecting ``x`` into the null space of A.
|
||||
Y : LinearOperator, sparse matrix, ndarray, shape (n, m)
|
||||
Operator that, for a given a vector ``b``, compute smallest
|
||||
norm solution of ``A x + b = 0``.
|
||||
b : array_like, shape (m,)
|
||||
Right-hand side of the constraint equation.
|
||||
trust_radius : float, optional
|
||||
Trust radius to be considered. By default, uses ``trust_radius=inf``,
|
||||
which means no trust radius at all.
|
||||
lb : array_like, shape (n,), optional
|
||||
Lower bounds to each one of the components of ``x``.
|
||||
If ``lb[i] = -Inf`` the lower bound for the i-th
|
||||
component is just ignored (default).
|
||||
ub : array_like, shape (n, ), optional
|
||||
Upper bounds to each one of the components of ``x``.
|
||||
If ``ub[i] = Inf`` the upper bound for the i-th
|
||||
component is just ignored (default).
|
||||
tol : float, optional
|
||||
Tolerance used to interrupt the algorithm.
|
||||
max_iter : int, optional
|
||||
Maximum algorithm iterations. Where ``max_inter <= n-m``.
|
||||
By default, uses ``max_iter = n-m``.
|
||||
max_infeasible_iter : int, optional
|
||||
Maximum infeasible (regarding box constraints) iterations the
|
||||
algorithm is allowed to take.
|
||||
By default, uses ``max_infeasible_iter = n-m``.
|
||||
return_all : bool, optional
|
||||
When ``true``, return the list of all vectors through the iterations.
|
||||
|
||||
Returns
|
||||
-------
|
||||
x : array_like, shape (n,)
|
||||
Solution of the EQP problem.
|
||||
info : Dict
|
||||
Dictionary containing the following:
|
||||
|
||||
- niter : Number of iterations.
|
||||
- stop_cond : Reason for algorithm termination:
|
||||
1. Iteration limit was reached;
|
||||
2. Reached the trust-region boundary;
|
||||
3. Negative curvature detected;
|
||||
4. Tolerance was satisfied.
|
||||
- allvecs : List containing all intermediary vectors (optional).
|
||||
- hits_boundary : True if the proposed step is on the boundary
|
||||
of the trust region.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Implementation of Algorithm 6.2 on [1]_.
|
||||
|
||||
In the absence of spherical and box constraints, for sufficient
|
||||
iterations, the method returns a truly optimal result.
|
||||
In the presence of those constraints, the value returned is only
|
||||
a inexpensive approximation of the optimal value.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
|
||||
"On the solution of equality constrained quadratic
|
||||
programming problems arising in optimization."
|
||||
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
|
||||
"""
|
||||
CLOSE_TO_ZERO = 1e-25
|
||||
|
||||
n, = np.shape(c) # Number of parameters
|
||||
m, = np.shape(b) # Number of constraints
|
||||
|
||||
# Initial Values
|
||||
x = Y.dot(-b)
|
||||
r = Z.dot(H.dot(x) + c)
|
||||
g = Z.dot(r)
|
||||
p = -g
|
||||
|
||||
# Store ``x`` value
|
||||
if return_all:
|
||||
allvecs = [x]
|
||||
# Values for the first iteration
|
||||
H_p = H.dot(p)
|
||||
rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
|
||||
|
||||
# If x > trust-region the problem does not have a solution.
|
||||
tr_distance = trust_radius - norm(x)
|
||||
if tr_distance < 0:
|
||||
raise ValueError("Trust region problem does not have a solution.")
|
||||
# If x == trust_radius, then x is the solution
|
||||
# to the optimization problem, since x is the
|
||||
# minimum norm solution to Ax=b.
|
||||
elif tr_distance < CLOSE_TO_ZERO:
|
||||
info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True}
|
||||
if return_all:
|
||||
allvecs.append(x)
|
||||
info['allvecs'] = allvecs
|
||||
return x, info
|
||||
|
||||
# Set default tolerance
|
||||
if tol is None:
|
||||
tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO)
|
||||
# Set default lower and upper bounds
|
||||
if lb is None:
|
||||
lb = np.full(n, -np.inf)
|
||||
if ub is None:
|
||||
ub = np.full(n, np.inf)
|
||||
# Set maximum iterations
|
||||
if max_iter is None:
|
||||
max_iter = n-m
|
||||
max_iter = min(max_iter, n-m)
|
||||
# Set maximum infeasible iterations
|
||||
if max_infeasible_iter is None:
|
||||
max_infeasible_iter = n-m
|
||||
|
||||
hits_boundary = False
|
||||
stop_cond = 1
|
||||
counter = 0
|
||||
last_feasible_x = np.zeros_like(x)
|
||||
k = 0
|
||||
for i in range(max_iter):
|
||||
# Stop criteria - Tolerance : r.T g < tol
|
||||
if rt_g < tol:
|
||||
stop_cond = 4
|
||||
break
|
||||
k += 1
|
||||
# Compute curvature
|
||||
pt_H_p = H_p.dot(p)
|
||||
# Stop criteria - Negative curvature
|
||||
if pt_H_p <= 0:
|
||||
if np.isinf(trust_radius):
|
||||
raise ValueError("Negative curvature not allowed "
|
||||
"for unrestricted problems.")
|
||||
else:
|
||||
# Find intersection with constraints
|
||||
_, alpha, intersect = box_sphere_intersections(
|
||||
x, p, lb, ub, trust_radius, entire_line=True)
|
||||
# Update solution
|
||||
if intersect:
|
||||
x = x + alpha*p
|
||||
# Reinforce variables are inside box constraints.
|
||||
# This is only necessary because of roundoff errors.
|
||||
x = reinforce_box_boundaries(x, lb, ub)
|
||||
# Attribute information
|
||||
stop_cond = 3
|
||||
hits_boundary = True
|
||||
break
|
||||
|
||||
# Get next step
|
||||
alpha = rt_g / pt_H_p
|
||||
x_next = x + alpha*p
|
||||
|
||||
# Stop criteria - Hits boundary
|
||||
if np.linalg.norm(x_next) >= trust_radius:
|
||||
# Find intersection with box constraints
|
||||
_, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
|
||||
trust_radius)
|
||||
# Update solution
|
||||
if intersect:
|
||||
x = x + theta*alpha*p
|
||||
# Reinforce variables are inside box constraints.
|
||||
# This is only necessary because of roundoff errors.
|
||||
x = reinforce_box_boundaries(x, lb, ub)
|
||||
# Attribute information
|
||||
stop_cond = 2
|
||||
hits_boundary = True
|
||||
break
|
||||
|
||||
# Check if ``x`` is inside the box and start counter if it is not.
|
||||
if inside_box_boundaries(x_next, lb, ub):
|
||||
counter = 0
|
||||
else:
|
||||
counter += 1
|
||||
# Whenever outside box constraints keep looking for intersections.
|
||||
if counter > 0:
|
||||
_, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
|
||||
trust_radius)
|
||||
if intersect:
|
||||
last_feasible_x = x + theta*alpha*p
|
||||
# Reinforce variables are inside box constraints.
|
||||
# This is only necessary because of roundoff errors.
|
||||
last_feasible_x = reinforce_box_boundaries(last_feasible_x,
|
||||
lb, ub)
|
||||
counter = 0
|
||||
# Stop after too many infeasible (regarding box constraints) iteration.
|
||||
if counter > max_infeasible_iter:
|
||||
break
|
||||
# Store ``x_next`` value
|
||||
if return_all:
|
||||
allvecs.append(x_next)
|
||||
|
||||
# Update residual
|
||||
r_next = r + alpha*H_p
|
||||
# Project residual g+ = Z r+
|
||||
g_next = Z.dot(r_next)
|
||||
# Compute conjugate direction step d
|
||||
rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389)
|
||||
beta = rt_g_next / rt_g
|
||||
p = - g_next + beta*p
|
||||
# Prepare for next iteration
|
||||
x = x_next
|
||||
g = g_next
|
||||
r = g_next
|
||||
rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
|
||||
H_p = H.dot(p)
|
||||
|
||||
if not inside_box_boundaries(x, lb, ub):
|
||||
x = last_feasible_x
|
||||
hits_boundary = True
|
||||
info = {'niter': k, 'stop_cond': stop_cond,
|
||||
'hits_boundary': hits_boundary}
|
||||
if return_all:
|
||||
info['allvecs'] = allvecs
|
||||
return x, info
|
|
@ -0,0 +1,56 @@
|
|||
"""Progress report printers."""
|
||||
|
||||
|
||||
class ReportBase(object):
|
||||
COLUMN_NAMES = NotImplemented
|
||||
COLUMN_WIDTHS = NotImplemented
|
||||
ITERATION_FORMATS = NotImplemented
|
||||
|
||||
@classmethod
|
||||
def print_header(cls):
|
||||
fmt = ("|"
|
||||
+ "|".join(["{{:^{}}}".format(x) for x in cls.COLUMN_WIDTHS])
|
||||
+ "|")
|
||||
separators = ['-' * x for x in cls.COLUMN_WIDTHS]
|
||||
print(fmt.format(*cls.COLUMN_NAMES))
|
||||
print(fmt.format(*separators))
|
||||
|
||||
@classmethod
|
||||
def print_iteration(cls, *args):
|
||||
# args[3] is obj func. It should really be a float. However,
|
||||
# trust-constr typically provides a length 1 array. We have to coerce
|
||||
# it to a float, otherwise the string format doesn't work.
|
||||
args = list(args)
|
||||
args[3] = float(args[3])
|
||||
|
||||
iteration_format = ["{{:{}}}".format(x) for x in cls.ITERATION_FORMATS]
|
||||
fmt = "|" + "|".join(iteration_format) + "|"
|
||||
print(fmt.format(*args))
|
||||
|
||||
@classmethod
|
||||
def print_footer(cls):
|
||||
print()
|
||||
|
||||
|
||||
class BasicReport(ReportBase):
|
||||
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
|
||||
"opt", "c viol"]
|
||||
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10]
|
||||
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e",
|
||||
"^10.2e", "^10.2e", "^10.2e"]
|
||||
|
||||
|
||||
class SQPReport(ReportBase):
|
||||
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
|
||||
"opt", "c viol", "penalty", "CG stop"]
|
||||
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 7]
|
||||
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
|
||||
"^10.2e", "^10.2e", "^7"]
|
||||
|
||||
|
||||
class IPReport(ReportBase):
|
||||
COLUMN_NAMES = ["niter", "f evals", "CG iter", "obj func", "tr radius",
|
||||
"opt", "c viol", "penalty", "barrier param", "CG stop"]
|
||||
COLUMN_WIDTHS = [7, 7, 7, 13, 10, 10, 10, 10, 13, 7]
|
||||
ITERATION_FORMATS = ["^7", "^7", "^7", "^+13.4e", "^10.2e", "^10.2e",
|
||||
"^10.2e", "^10.2e", "^13.2e", "^7"]
|
|
@ -0,0 +1,11 @@
|
|||
|
||||
def configuration(parent_package='', top_path=None):
|
||||
from numpy.distutils.misc_util import Configuration
|
||||
config = Configuration('_trustregion_constr', parent_package, top_path)
|
||||
config.add_data_dir('tests')
|
||||
return config
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from numpy.distutils.core import setup
|
||||
setup(**configuration(top_path='').todict())
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,296 @@
|
|||
import numpy as np
|
||||
from numpy.testing import assert_array_equal, assert_equal
|
||||
from scipy.optimize._constraints import (NonlinearConstraint, Bounds,
|
||||
PreparedConstraint)
|
||||
from scipy.optimize._trustregion_constr.canonical_constraint \
|
||||
import CanonicalConstraint, initial_constraints_as_canonical
|
||||
|
||||
|
||||
def create_quadratic_function(n, m, rng):
|
||||
a = rng.rand(m)
|
||||
A = rng.rand(m, n)
|
||||
H = rng.rand(m, n, n)
|
||||
HT = np.transpose(H, (1, 2, 0))
|
||||
|
||||
def fun(x):
|
||||
return a + A.dot(x) + 0.5 * H.dot(x).dot(x)
|
||||
|
||||
def jac(x):
|
||||
return A + H.dot(x)
|
||||
|
||||
def hess(x, v):
|
||||
return HT.dot(v)
|
||||
|
||||
return fun, jac, hess
|
||||
|
||||
|
||||
def test_bounds_cases():
|
||||
# Test 1: no constraints.
|
||||
user_constraint = Bounds(-np.inf, np.inf)
|
||||
x0 = np.array([-1, 2])
|
||||
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
|
||||
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
|
||||
|
||||
assert_equal(c.n_eq, 0)
|
||||
assert_equal(c.n_ineq, 0)
|
||||
|
||||
c_eq, c_ineq = c.fun(x0)
|
||||
assert_array_equal(c_eq, [])
|
||||
assert_array_equal(c_ineq, [])
|
||||
|
||||
J_eq, J_ineq = c.jac(x0)
|
||||
assert_array_equal(J_eq, np.empty((0, 2)))
|
||||
assert_array_equal(J_ineq, np.empty((0, 2)))
|
||||
|
||||
assert_array_equal(c.keep_feasible, [])
|
||||
|
||||
# Test 2: infinite lower bound.
|
||||
user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True])
|
||||
x0 = np.array([-1, -2, -3], dtype=float)
|
||||
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
|
||||
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
|
||||
|
||||
assert_equal(c.n_eq, 0)
|
||||
assert_equal(c.n_ineq, 2)
|
||||
|
||||
c_eq, c_ineq = c.fun(x0)
|
||||
assert_array_equal(c_eq, [])
|
||||
assert_array_equal(c_ineq, [-1, -4])
|
||||
|
||||
J_eq, J_ineq = c.jac(x0)
|
||||
assert_array_equal(J_eq, np.empty((0, 3)))
|
||||
assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]]))
|
||||
|
||||
assert_array_equal(c.keep_feasible, [False, True])
|
||||
|
||||
# Test 3: infinite upper bound.
|
||||
user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True])
|
||||
x0 = np.array([1, 2, 3], dtype=float)
|
||||
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
|
||||
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
|
||||
|
||||
assert_equal(c.n_eq, 0)
|
||||
assert_equal(c.n_ineq, 2)
|
||||
|
||||
c_eq, c_ineq = c.fun(x0)
|
||||
assert_array_equal(c_eq, [])
|
||||
assert_array_equal(c_ineq, [-1, -1])
|
||||
|
||||
J_eq, J_ineq = c.jac(x0)
|
||||
assert_array_equal(J_eq, np.empty((0, 3)))
|
||||
assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]]))
|
||||
|
||||
assert_array_equal(c.keep_feasible, [True, False])
|
||||
|
||||
# Test 4: interval constraint.
|
||||
user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3],
|
||||
[False, True, True, True])
|
||||
x0 = np.array([0, 10, 8, 5])
|
||||
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
|
||||
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
|
||||
|
||||
assert_equal(c.n_eq, 1)
|
||||
assert_equal(c.n_ineq, 4)
|
||||
|
||||
c_eq, c_ineq = c.fun(x0)
|
||||
assert_array_equal(c_eq, [2])
|
||||
assert_array_equal(c_ineq, [-1, -2, -1, -6])
|
||||
|
||||
J_eq, J_ineq = c.jac(x0)
|
||||
assert_array_equal(J_eq, [[0, 0, 0, 1]])
|
||||
assert_array_equal(J_ineq, [[1, 0, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[-1, 0, 0, 0],
|
||||
[0, 0, -1, 0]])
|
||||
|
||||
assert_array_equal(c.keep_feasible, [False, True, False, True])
|
||||
|
||||
|
||||
def test_nonlinear_constraint():
|
||||
n = 3
|
||||
m = 5
|
||||
rng = np.random.RandomState(0)
|
||||
x0 = rng.rand(n)
|
||||
|
||||
fun, jac, hess = create_quadratic_function(n, m, rng)
|
||||
f = fun(x0)
|
||||
J = jac(x0)
|
||||
|
||||
lb = [-10, 3, -np.inf, -np.inf, -5]
|
||||
ub = [10, 3, np.inf, 3, np.inf]
|
||||
user_constraint = NonlinearConstraint(
|
||||
fun, lb, ub, jac, hess, [True, False, False, True, False])
|
||||
|
||||
for sparse_jacobian in [False, True]:
|
||||
prepared_constraint = PreparedConstraint(user_constraint, x0,
|
||||
sparse_jacobian)
|
||||
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
|
||||
|
||||
assert_array_equal(c.n_eq, 1)
|
||||
assert_array_equal(c.n_ineq, 4)
|
||||
|
||||
c_eq, c_ineq = c.fun(x0)
|
||||
assert_array_equal(c_eq, [f[1] - lb[1]])
|
||||
assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4],
|
||||
f[0] - ub[0], lb[0] - f[0]])
|
||||
|
||||
J_eq, J_ineq = c.jac(x0)
|
||||
if sparse_jacobian:
|
||||
J_eq = J_eq.toarray()
|
||||
J_ineq = J_ineq.toarray()
|
||||
|
||||
assert_array_equal(J_eq, J[1, None])
|
||||
assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0])))
|
||||
|
||||
v_eq = rng.rand(c.n_eq)
|
||||
v_ineq = rng.rand(c.n_ineq)
|
||||
v = np.zeros(m)
|
||||
v[1] = v_eq[0]
|
||||
v[3] = v_ineq[0]
|
||||
v[4] = -v_ineq[1]
|
||||
v[0] = v_ineq[2] - v_ineq[3]
|
||||
assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v))
|
||||
|
||||
assert_array_equal(c.keep_feasible, [True, False, True, True])
|
||||
|
||||
|
||||
def test_concatenation():
|
||||
rng = np.random.RandomState(0)
|
||||
n = 4
|
||||
x0 = rng.rand(n)
|
||||
|
||||
f1 = x0
|
||||
J1 = np.eye(n)
|
||||
lb1 = [-1, -np.inf, -2, 3]
|
||||
ub1 = [1, np.inf, np.inf, 3]
|
||||
bounds = Bounds(lb1, ub1, [False, False, True, False])
|
||||
|
||||
fun, jac, hess = create_quadratic_function(n, 5, rng)
|
||||
f2 = fun(x0)
|
||||
J2 = jac(x0)
|
||||
lb2 = [-10, 3, -np.inf, -np.inf, -5]
|
||||
ub2 = [10, 3, np.inf, 5, np.inf]
|
||||
nonlinear = NonlinearConstraint(
|
||||
fun, lb2, ub2, jac, hess, [True, False, False, True, False])
|
||||
|
||||
for sparse_jacobian in [False, True]:
|
||||
bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
|
||||
nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
|
||||
|
||||
c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared)
|
||||
c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared)
|
||||
c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian)
|
||||
|
||||
assert_equal(c.n_eq, 2)
|
||||
assert_equal(c.n_ineq, 7)
|
||||
|
||||
c_eq, c_ineq = c.fun(x0)
|
||||
assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]])
|
||||
assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0],
|
||||
lb1[0] - f1[0], f2[3] - ub2[3],
|
||||
lb2[4] - f2[4], f2[0] - ub2[0],
|
||||
lb2[0] - f2[0]])
|
||||
|
||||
J_eq, J_ineq = c.jac(x0)
|
||||
if sparse_jacobian:
|
||||
J_eq = J_eq.toarray()
|
||||
J_ineq = J_ineq.toarray()
|
||||
|
||||
assert_array_equal(J_eq, np.vstack((J1[3], J2[1])))
|
||||
assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3],
|
||||
-J2[4], J2[0], -J2[0])))
|
||||
|
||||
v_eq = rng.rand(c.n_eq)
|
||||
v_ineq = rng.rand(c.n_ineq)
|
||||
v = np.zeros(5)
|
||||
v[1] = v_eq[1]
|
||||
v[3] = v_ineq[3]
|
||||
v[4] = -v_ineq[4]
|
||||
v[0] = v_ineq[5] - v_ineq[6]
|
||||
H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n))
|
||||
assert_array_equal(H, hess(x0, v))
|
||||
|
||||
assert_array_equal(c.keep_feasible,
|
||||
[True, False, False, True, False, True, True])
|
||||
|
||||
|
||||
def test_empty():
|
||||
x = np.array([1, 2, 3])
|
||||
c = CanonicalConstraint.empty(3)
|
||||
assert_equal(c.n_eq, 0)
|
||||
assert_equal(c.n_ineq, 0)
|
||||
|
||||
c_eq, c_ineq = c.fun(x)
|
||||
assert_array_equal(c_eq, [])
|
||||
assert_array_equal(c_ineq, [])
|
||||
|
||||
J_eq, J_ineq = c.jac(x)
|
||||
assert_array_equal(J_eq, np.empty((0, 3)))
|
||||
assert_array_equal(J_ineq, np.empty((0, 3)))
|
||||
|
||||
H = c.hess(x, None, None).toarray()
|
||||
assert_array_equal(H, np.zeros((3, 3)))
|
||||
|
||||
|
||||
def test_initial_constraints_as_canonical():
|
||||
# rng is only used to generate the coefficients of the quadratic
|
||||
# function that is used by the nonlinear constraint.
|
||||
rng = np.random.RandomState(0)
|
||||
|
||||
x0 = np.array([0.5, 0.4, 0.3, 0.2])
|
||||
n = len(x0)
|
||||
|
||||
lb1 = [-1, -np.inf, -2, 3]
|
||||
ub1 = [1, np.inf, np.inf, 3]
|
||||
bounds = Bounds(lb1, ub1, [False, False, True, False])
|
||||
|
||||
fun, jac, hess = create_quadratic_function(n, 5, rng)
|
||||
lb2 = [-10, 3, -np.inf, -np.inf, -5]
|
||||
ub2 = [10, 3, np.inf, 5, np.inf]
|
||||
nonlinear = NonlinearConstraint(
|
||||
fun, lb2, ub2, jac, hess, [True, False, False, True, False])
|
||||
|
||||
for sparse_jacobian in [False, True]:
|
||||
bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
|
||||
nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
|
||||
|
||||
f1 = bounds_prepared.fun.f
|
||||
J1 = bounds_prepared.fun.J
|
||||
f2 = nonlinear_prepared.fun.f
|
||||
J2 = nonlinear_prepared.fun.J
|
||||
|
||||
c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical(
|
||||
n, [bounds_prepared, nonlinear_prepared], sparse_jacobian)
|
||||
|
||||
assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]])
|
||||
assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0],
|
||||
lb1[0] - f1[0], f2[3] - ub2[3],
|
||||
lb2[4] - f2[4], f2[0] - ub2[0],
|
||||
lb2[0] - f2[0]])
|
||||
|
||||
if sparse_jacobian:
|
||||
J1 = J1.toarray()
|
||||
J2 = J2.toarray()
|
||||
J_eq = J_eq.toarray()
|
||||
J_ineq = J_ineq.toarray()
|
||||
|
||||
assert_array_equal(J_eq, np.vstack((J1[3], J2[1])))
|
||||
assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3],
|
||||
-J2[4], J2[0], -J2[0])))
|
||||
|
||||
|
||||
def test_initial_constraints_as_canonical_empty():
|
||||
n = 3
|
||||
for sparse_jacobian in [False, True]:
|
||||
c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical(
|
||||
n, [], sparse_jacobian)
|
||||
|
||||
assert_array_equal(c_eq, [])
|
||||
assert_array_equal(c_ineq, [])
|
||||
|
||||
if sparse_jacobian:
|
||||
J_eq = J_eq.toarray()
|
||||
J_ineq = J_ineq.toarray()
|
||||
|
||||
assert_array_equal(J_eq, np.empty((0, n)))
|
||||
assert_array_equal(J_ineq, np.empty((0, n)))
|
|
@ -0,0 +1,214 @@
|
|||
import numpy as np
|
||||
import scipy.linalg
|
||||
from scipy.sparse import csc_matrix
|
||||
from scipy.optimize._trustregion_constr.projections \
|
||||
import projections, orthogonality
|
||||
from numpy.testing import (TestCase, assert_array_almost_equal,
|
||||
assert_equal, assert_allclose)
|
||||
|
||||
try:
|
||||
from sksparse.cholmod import cholesky_AAt
|
||||
sksparse_available = True
|
||||
available_sparse_methods = ("NormalEquation", "AugmentedSystem")
|
||||
except ImportError:
|
||||
sksparse_available = False
|
||||
available_sparse_methods = ("AugmentedSystem",)
|
||||
available_dense_methods = ('QRFactorization', 'SVDFactorization')
|
||||
|
||||
|
||||
class TestProjections(TestCase):
|
||||
|
||||
def test_nullspace_and_least_squares_sparse(self):
|
||||
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
||||
[0, 8, 7, 0, 1, 5, 9, 0],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3]])
|
||||
At_dense = A_dense.T
|
||||
A = csc_matrix(A_dense)
|
||||
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
|
||||
[1, 10, 3, 0, 1, 6, 7, 8],
|
||||
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
|
||||
|
||||
for method in available_sparse_methods:
|
||||
Z, LS, _ = projections(A, method)
|
||||
for z in test_points:
|
||||
# Test if x is in the null_space
|
||||
x = Z.matvec(z)
|
||||
assert_array_almost_equal(A.dot(x), 0)
|
||||
# Test orthogonality
|
||||
assert_array_almost_equal(orthogonality(A, x), 0)
|
||||
# Test if x is the least square solution
|
||||
x = LS.matvec(z)
|
||||
x2 = scipy.linalg.lstsq(At_dense, z)[0]
|
||||
assert_array_almost_equal(x, x2)
|
||||
|
||||
def test_iterative_refinements_sparse(self):
|
||||
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
||||
[0, 8, 7, 0, 1, 5, 9, 0],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3]])
|
||||
A = csc_matrix(A_dense)
|
||||
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
|
||||
[1, 10, 3, 0, 1, 6, 7, 8],
|
||||
[1.12, 10, 0, 0, 100000, 6, 0.7, 8],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
|
||||
|
||||
for method in available_sparse_methods:
|
||||
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100)
|
||||
for z in test_points:
|
||||
# Test if x is in the null_space
|
||||
x = Z.matvec(z)
|
||||
atol = 1e-13 * abs(x).max()
|
||||
assert_allclose(A.dot(x), 0, atol=atol)
|
||||
# Test orthogonality
|
||||
assert_allclose(orthogonality(A, x), 0, atol=1e-13)
|
||||
|
||||
def test_rowspace_sparse(self):
|
||||
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
||||
[0, 8, 7, 0, 1, 5, 9, 0],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3]])
|
||||
A = csc_matrix(A_dense)
|
||||
test_points = ([1, 2, 3],
|
||||
[1, 10, 3],
|
||||
[1.12, 10, 0])
|
||||
|
||||
for method in available_sparse_methods:
|
||||
_, _, Y = projections(A, method)
|
||||
for z in test_points:
|
||||
# Test if x is solution of A x = z
|
||||
x = Y.matvec(z)
|
||||
assert_array_almost_equal(A.dot(x), z)
|
||||
# Test if x is in the return row space of A
|
||||
A_ext = np.vstack((A_dense, x))
|
||||
assert_equal(np.linalg.matrix_rank(A_dense),
|
||||
np.linalg.matrix_rank(A_ext))
|
||||
|
||||
def test_nullspace_and_least_squares_dense(self):
|
||||
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
||||
[0, 8, 7, 0, 1, 5, 9, 0],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3]])
|
||||
At = A.T
|
||||
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
|
||||
[1, 10, 3, 0, 1, 6, 7, 8],
|
||||
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
|
||||
|
||||
for method in available_dense_methods:
|
||||
Z, LS, _ = projections(A, method)
|
||||
for z in test_points:
|
||||
# Test if x is in the null_space
|
||||
x = Z.matvec(z)
|
||||
assert_array_almost_equal(A.dot(x), 0)
|
||||
# Test orthogonality
|
||||
assert_array_almost_equal(orthogonality(A, x), 0)
|
||||
# Test if x is the least square solution
|
||||
x = LS.matvec(z)
|
||||
x2 = scipy.linalg.lstsq(At, z)[0]
|
||||
assert_array_almost_equal(x, x2)
|
||||
|
||||
def test_compare_dense_and_sparse(self):
|
||||
D = np.diag(range(1, 101))
|
||||
A = np.hstack([D, D, D, D])
|
||||
A_sparse = csc_matrix(A)
|
||||
np.random.seed(0)
|
||||
|
||||
Z, LS, Y = projections(A)
|
||||
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
|
||||
for k in range(20):
|
||||
z = np.random.normal(size=(400,))
|
||||
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
|
||||
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
|
||||
x = np.random.normal(size=(100,))
|
||||
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
|
||||
|
||||
def test_compare_dense_and_sparse2(self):
|
||||
D1 = np.diag([-1.7, 1, 0.5])
|
||||
D2 = np.diag([1, -0.6, -0.3])
|
||||
D3 = np.diag([-0.3, -1.5, 2])
|
||||
A = np.hstack([D1, D2, D3])
|
||||
A_sparse = csc_matrix(A)
|
||||
np.random.seed(0)
|
||||
|
||||
Z, LS, Y = projections(A)
|
||||
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
|
||||
for k in range(1):
|
||||
z = np.random.normal(size=(9,))
|
||||
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
|
||||
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
|
||||
x = np.random.normal(size=(3,))
|
||||
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
|
||||
|
||||
def test_iterative_refinements_dense(self):
|
||||
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
||||
[0, 8, 7, 0, 1, 5, 9, 0],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3]])
|
||||
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
|
||||
[1, 10, 3, 0, 1, 6, 7, 8],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
|
||||
|
||||
for method in available_dense_methods:
|
||||
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10)
|
||||
for z in test_points:
|
||||
# Test if x is in the null_space
|
||||
x = Z.matvec(z)
|
||||
assert_array_almost_equal(A.dot(x), 0, decimal=14)
|
||||
# Test orthogonality
|
||||
assert_array_almost_equal(orthogonality(A, x), 0, decimal=16)
|
||||
|
||||
def test_rowspace_dense(self):
|
||||
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
||||
[0, 8, 7, 0, 1, 5, 9, 0],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3]])
|
||||
test_points = ([1, 2, 3],
|
||||
[1, 10, 3],
|
||||
[1.12, 10, 0])
|
||||
|
||||
for method in available_dense_methods:
|
||||
_, _, Y = projections(A, method)
|
||||
for z in test_points:
|
||||
# Test if x is solution of A x = z
|
||||
x = Y.matvec(z)
|
||||
assert_array_almost_equal(A.dot(x), z)
|
||||
# Test if x is in the return row space of A
|
||||
A_ext = np.vstack((A, x))
|
||||
assert_equal(np.linalg.matrix_rank(A),
|
||||
np.linalg.matrix_rank(A_ext))
|
||||
|
||||
|
||||
class TestOrthogonality(TestCase):
|
||||
|
||||
def test_dense_matrix(self):
|
||||
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
||||
[0, 8, 7, 0, 1, 5, 9, 0],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3]])
|
||||
test_vectors = ([-1.98931144, -1.56363389,
|
||||
-0.84115584, 2.2864762,
|
||||
5.599141, 0.09286976,
|
||||
1.37040802, -0.28145812],
|
||||
[697.92794044, -4091.65114008,
|
||||
-3327.42316335, 836.86906951,
|
||||
99434.98929065, -1285.37653682,
|
||||
-4109.21503806, 2935.29289083])
|
||||
test_expected_orth = (0, 0)
|
||||
|
||||
for i in range(len(test_vectors)):
|
||||
x = test_vectors[i]
|
||||
orth = test_expected_orth[i]
|
||||
assert_array_almost_equal(orthogonality(A, x), orth)
|
||||
|
||||
def test_sparse_matrix(self):
|
||||
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
|
||||
[0, 8, 7, 0, 1, 5, 9, 0],
|
||||
[1, 0, 0, 0, 0, 1, 2, 3]])
|
||||
A = csc_matrix(A)
|
||||
test_vectors = ([-1.98931144, -1.56363389,
|
||||
-0.84115584, 2.2864762,
|
||||
5.599141, 0.09286976,
|
||||
1.37040802, -0.28145812],
|
||||
[697.92794044, -4091.65114008,
|
||||
-3327.42316335, 836.86906951,
|
||||
99434.98929065, -1285.37653682,
|
||||
-4109.21503806, 2935.29289083])
|
||||
test_expected_orth = (0, 0)
|
||||
|
||||
for i in range(len(test_vectors)):
|
||||
x = test_vectors[i]
|
||||
orth = test_expected_orth[i]
|
||||
assert_array_almost_equal(orthogonality(A, x), orth)
|
|
@ -0,0 +1,645 @@
|
|||
import numpy as np
|
||||
from scipy.sparse import csc_matrix
|
||||
from scipy.optimize._trustregion_constr.qp_subproblem \
|
||||
import (eqp_kktfact,
|
||||
projected_cg,
|
||||
box_intersections,
|
||||
sphere_intersections,
|
||||
box_sphere_intersections,
|
||||
modified_dogleg)
|
||||
from scipy.optimize._trustregion_constr.projections \
|
||||
import projections
|
||||
from numpy.testing import (TestCase, assert_array_almost_equal, assert_equal)
|
||||
import pytest
|
||||
|
||||
|
||||
class TestEQPDirectFactorization(TestCase):
|
||||
|
||||
# From Example 16.2 Nocedal/Wright "Numerical
|
||||
# Optimization" p.452.
|
||||
def test_nocedal_example(self):
|
||||
H = csc_matrix([[6, 2, 1],
|
||||
[2, 5, 2],
|
||||
[1, 2, 4]])
|
||||
A = csc_matrix([[1, 0, 1],
|
||||
[0, 1, 1]])
|
||||
c = np.array([-8, -3, -3])
|
||||
b = -np.array([3, 0])
|
||||
x, lagrange_multipliers = eqp_kktfact(H, c, A, b)
|
||||
assert_array_almost_equal(x, [2, -1, 1])
|
||||
assert_array_almost_equal(lagrange_multipliers, [3, -2])
|
||||
|
||||
|
||||
class TestSphericalBoundariesIntersections(TestCase):
|
||||
|
||||
def test_2d_sphere_constraints(self):
|
||||
# Interior inicial point
|
||||
ta, tb, intersect = sphere_intersections([0, 0],
|
||||
[1, 0], 0.5)
|
||||
assert_array_almost_equal([ta, tb], [0, 0.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# No intersection between line and circle
|
||||
ta, tb, intersect = sphere_intersections([2, 0],
|
||||
[0, 1], 1)
|
||||
assert_equal(intersect, False)
|
||||
|
||||
# Outside initial point pointing toward outside the circle
|
||||
ta, tb, intersect = sphere_intersections([2, 0],
|
||||
[1, 0], 1)
|
||||
assert_equal(intersect, False)
|
||||
|
||||
# Outside initial point pointing toward inside the circle
|
||||
ta, tb, intersect = sphere_intersections([2, 0],
|
||||
[-1, 0], 1.5)
|
||||
assert_array_almost_equal([ta, tb], [0.5, 1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Initial point on the boundary
|
||||
ta, tb, intersect = sphere_intersections([2, 0],
|
||||
[1, 0], 2)
|
||||
assert_array_almost_equal([ta, tb], [0, 0])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
def test_2d_sphere_constraints_line_intersections(self):
|
||||
# Interior initial point
|
||||
ta, tb, intersect = sphere_intersections([0, 0],
|
||||
[1, 0], 0.5,
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [-0.5, 0.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# No intersection between line and circle
|
||||
ta, tb, intersect = sphere_intersections([2, 0],
|
||||
[0, 1], 1,
|
||||
entire_line=True)
|
||||
assert_equal(intersect, False)
|
||||
|
||||
# Outside initial point pointing toward outside the circle
|
||||
ta, tb, intersect = sphere_intersections([2, 0],
|
||||
[1, 0], 1,
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [-3, -1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Outside initial point pointing toward inside the circle
|
||||
ta, tb, intersect = sphere_intersections([2, 0],
|
||||
[-1, 0], 1.5,
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [0.5, 3.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Initial point on the boundary
|
||||
ta, tb, intersect = sphere_intersections([2, 0],
|
||||
[1, 0], 2,
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [-4, 0])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
|
||||
class TestBoxBoundariesIntersections(TestCase):
|
||||
|
||||
def test_2d_box_constraints(self):
|
||||
# Box constraint in the direction of vector d
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[1, 1], [3, 3])
|
||||
assert_array_almost_equal([ta, tb], [0.5, 1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Negative direction
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[1, -3], [3, -1])
|
||||
assert_equal(intersect, False)
|
||||
|
||||
# Some constraints are absent (set to +/- inf)
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[-np.inf, 1],
|
||||
[np.inf, np.inf])
|
||||
assert_array_almost_equal([ta, tb], [0.5, 1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Intersect on the face of the box
|
||||
ta, tb, intersect = box_intersections([1, 0], [0, 1],
|
||||
[1, 1], [3, 3])
|
||||
assert_array_almost_equal([ta, tb], [1, 1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Interior initial point
|
||||
ta, tb, intersect = box_intersections([0, 0], [4, 4],
|
||||
[-2, -3], [3, 2])
|
||||
assert_array_almost_equal([ta, tb], [0, 0.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# No intersection between line and box constraints
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[-3, -3], [-1, -1])
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[-3, 3], [-1, 1])
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[-3, -np.inf],
|
||||
[-1, np.inf])
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_intersections([0, 0], [1, 100],
|
||||
[1, 1], [3, 3])
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
|
||||
[1, 1], [3, 3])
|
||||
assert_equal(intersect, False)
|
||||
|
||||
# Initial point on the boundary
|
||||
ta, tb, intersect = box_intersections([2, 2], [0, 1],
|
||||
[-2, -2], [2, 2])
|
||||
assert_array_almost_equal([ta, tb], [0, 0])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
def test_2d_box_constraints_entire_line(self):
|
||||
# Box constraint in the direction of vector d
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[1, 1], [3, 3],
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [0.5, 1.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Negative direction
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[1, -3], [3, -1],
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [-1.5, -0.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Some constraints are absent (set to +/- inf)
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[-np.inf, 1],
|
||||
[np.inf, np.inf],
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [0.5, np.inf])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Intersect on the face of the box
|
||||
ta, tb, intersect = box_intersections([1, 0], [0, 1],
|
||||
[1, 1], [3, 3],
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [1, 3])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Interior initial pointoint
|
||||
ta, tb, intersect = box_intersections([0, 0], [4, 4],
|
||||
[-2, -3], [3, 2],
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [-0.5, 0.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# No intersection between line and box constraints
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[-3, -3], [-1, -1],
|
||||
entire_line=True)
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[-3, 3], [-1, 1],
|
||||
entire_line=True)
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_intersections([2, 0], [0, 2],
|
||||
[-3, -np.inf],
|
||||
[-1, np.inf],
|
||||
entire_line=True)
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_intersections([0, 0], [1, 100],
|
||||
[1, 1], [3, 3],
|
||||
entire_line=True)
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
|
||||
[1, 1], [3, 3],
|
||||
entire_line=True)
|
||||
assert_equal(intersect, False)
|
||||
|
||||
# Initial point on the boundary
|
||||
ta, tb, intersect = box_intersections([2, 2], [0, 1],
|
||||
[-2, -2], [2, 2],
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [-4, 0])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
def test_3d_box_constraints(self):
|
||||
# Simple case
|
||||
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
|
||||
[1, 1, 1], [3, 3, 3])
|
||||
assert_array_almost_equal([ta, tb], [1, 1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Negative direction
|
||||
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
|
||||
[1, 1, 1], [3, 3, 3])
|
||||
assert_equal(intersect, False)
|
||||
|
||||
# Interior point
|
||||
ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
|
||||
[1, 1, 1], [3, 3, 3])
|
||||
assert_array_almost_equal([ta, tb], [0, 1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
def test_3d_box_constraints_entire_line(self):
|
||||
# Simple case
|
||||
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
|
||||
[1, 1, 1], [3, 3, 3],
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [1, 3])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Negative direction
|
||||
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
|
||||
[1, 1, 1], [3, 3, 3],
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [-3, -1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Interior point
|
||||
ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
|
||||
[1, 1, 1], [3, 3, 3],
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [-1, 1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
|
||||
class TestBoxSphereBoundariesIntersections(TestCase):
|
||||
|
||||
def test_2d_box_constraints(self):
|
||||
# Both constraints are active
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
|
||||
[-1, -2], [1, 2], 2,
|
||||
entire_line=False)
|
||||
assert_array_almost_equal([ta, tb], [0, 0.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# None of the constraints are active
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
|
||||
[-1, -3], [1, 3], 10,
|
||||
entire_line=False)
|
||||
assert_array_almost_equal([ta, tb], [0, 1])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Box constraints are active
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
||||
[-1, -3], [1, 3], 10,
|
||||
entire_line=False)
|
||||
assert_array_almost_equal([ta, tb], [0, 0.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Spherical constraints are active
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
||||
[-1, -3], [1, 3], 2,
|
||||
entire_line=False)
|
||||
assert_array_almost_equal([ta, tb], [0, 0.25])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Infeasible problems
|
||||
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
|
||||
[-1, -3], [1, 3], 2,
|
||||
entire_line=False)
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
||||
[2, 4], [2, 4], 2,
|
||||
entire_line=False)
|
||||
assert_equal(intersect, False)
|
||||
|
||||
def test_2d_box_constraints_entire_line(self):
|
||||
# Both constraints are active
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
|
||||
[-1, -2], [1, 2], 2,
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [0, 0.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# None of the constraints are active
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
|
||||
[-1, -3], [1, 3], 10,
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [0, 2])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Box constraints are active
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
||||
[-1, -3], [1, 3], 10,
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [0, 0.5])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Spherical constraints are active
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
||||
[-1, -3], [1, 3], 2,
|
||||
entire_line=True)
|
||||
assert_array_almost_equal([ta, tb], [0, 0.25])
|
||||
assert_equal(intersect, True)
|
||||
|
||||
# Infeasible problems
|
||||
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
|
||||
[-1, -3], [1, 3], 2,
|
||||
entire_line=True)
|
||||
assert_equal(intersect, False)
|
||||
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
|
||||
[2, 4], [2, 4], 2,
|
||||
entire_line=True)
|
||||
assert_equal(intersect, False)
|
||||
|
||||
|
||||
class TestModifiedDogleg(TestCase):
|
||||
|
||||
def test_cauchypoint_equalsto_newtonpoint(self):
|
||||
A = np.array([[1, 8]])
|
||||
b = np.array([-16])
|
||||
_, _, Y = projections(A)
|
||||
newton_point = np.array([0.24615385, 1.96923077])
|
||||
|
||||
# Newton point inside boundaries
|
||||
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf])
|
||||
assert_array_almost_equal(x, newton_point)
|
||||
|
||||
# Spherical constraint active
|
||||
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf])
|
||||
assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point))
|
||||
|
||||
# Box constraints active
|
||||
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf])
|
||||
assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1)
|
||||
|
||||
def test_3d_example(self):
|
||||
A = np.array([[1, 8, 1],
|
||||
[4, 2, 2]])
|
||||
b = np.array([-16, 2])
|
||||
Z, LS, Y = projections(A)
|
||||
|
||||
newton_point = np.array([-1.37090909, 2.23272727, -0.49090909])
|
||||
cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585])
|
||||
origin = np.zeros_like(newton_point)
|
||||
|
||||
# newton_point inside boundaries
|
||||
x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf],
|
||||
[np.inf, np.inf, np.inf])
|
||||
assert_array_almost_equal(x, newton_point)
|
||||
|
||||
# line between cauchy_point and newton_point contains best point
|
||||
# (spherical constraint is active).
|
||||
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
|
||||
[np.inf, np.inf, np.inf])
|
||||
z = cauchy_point
|
||||
d = newton_point-cauchy_point
|
||||
t = ((x-z)/(d))
|
||||
assert_array_almost_equal(t, np.full(3, 0.40807330))
|
||||
assert_array_almost_equal(np.linalg.norm(x), 2)
|
||||
|
||||
# line between cauchy_point and newton_point contains best point
|
||||
# (box constraint is active).
|
||||
x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf],
|
||||
[np.inf, np.inf, np.inf])
|
||||
z = cauchy_point
|
||||
d = newton_point-cauchy_point
|
||||
t = ((x-z)/(d))
|
||||
assert_array_almost_equal(t, np.full(3, 0.7498195))
|
||||
assert_array_almost_equal(x[0], -1)
|
||||
|
||||
# line between origin and cauchy_point contains best point
|
||||
# (spherical constraint is active).
|
||||
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf],
|
||||
[np.inf, np.inf, np.inf])
|
||||
z = origin
|
||||
d = cauchy_point
|
||||
t = ((x-z)/(d))
|
||||
assert_array_almost_equal(t, np.full(3, 0.573936265))
|
||||
assert_array_almost_equal(np.linalg.norm(x), 1)
|
||||
|
||||
# line between origin and newton_point contains best point
|
||||
# (box constraint is active).
|
||||
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
|
||||
[np.inf, 1, np.inf])
|
||||
z = origin
|
||||
d = newton_point
|
||||
t = ((x-z)/(d))
|
||||
assert_array_almost_equal(t, np.full(3, 0.4478827364))
|
||||
assert_array_almost_equal(x[1], 1)
|
||||
|
||||
|
||||
class TestProjectCG(TestCase):
|
||||
|
||||
# From Example 16.2 Nocedal/Wright "Numerical
|
||||
# Optimization" p.452.
|
||||
def test_nocedal_example(self):
|
||||
H = csc_matrix([[6, 2, 1],
|
||||
[2, 5, 2],
|
||||
[1, 2, 4]])
|
||||
A = csc_matrix([[1, 0, 1],
|
||||
[0, 1, 1]])
|
||||
c = np.array([-8, -3, -3])
|
||||
b = -np.array([3, 0])
|
||||
Z, _, Y = projections(A)
|
||||
x, info = projected_cg(H, c, Z, Y, b)
|
||||
assert_equal(info["stop_cond"], 4)
|
||||
assert_equal(info["hits_boundary"], False)
|
||||
assert_array_almost_equal(x, [2, -1, 1])
|
||||
|
||||
def test_compare_with_direct_fact(self):
|
||||
H = csc_matrix([[6, 2, 1, 3],
|
||||
[2, 5, 2, 4],
|
||||
[1, 2, 4, 5],
|
||||
[3, 4, 5, 7]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
Z, _, Y = projections(A)
|
||||
x, info = projected_cg(H, c, Z, Y, b, tol=0)
|
||||
x_kkt, _ = eqp_kktfact(H, c, A, b)
|
||||
assert_equal(info["stop_cond"], 1)
|
||||
assert_equal(info["hits_boundary"], False)
|
||||
assert_array_almost_equal(x, x_kkt)
|
||||
|
||||
def test_trust_region_infeasible(self):
|
||||
H = csc_matrix([[6, 2, 1, 3],
|
||||
[2, 5, 2, 4],
|
||||
[1, 2, 4, 5],
|
||||
[3, 4, 5, 7]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
trust_radius = 1
|
||||
Z, _, Y = projections(A)
|
||||
with pytest.raises(ValueError):
|
||||
projected_cg(H, c, Z, Y, b, trust_radius=trust_radius)
|
||||
|
||||
def test_trust_region_barely_feasible(self):
|
||||
H = csc_matrix([[6, 2, 1, 3],
|
||||
[2, 5, 2, 4],
|
||||
[1, 2, 4, 5],
|
||||
[3, 4, 5, 7]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
trust_radius = 2.32379000772445021283
|
||||
Z, _, Y = projections(A)
|
||||
x, info = projected_cg(H, c, Z, Y, b,
|
||||
tol=0,
|
||||
trust_radius=trust_radius)
|
||||
assert_equal(info["stop_cond"], 2)
|
||||
assert_equal(info["hits_boundary"], True)
|
||||
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
|
||||
assert_array_almost_equal(x, -Y.dot(b))
|
||||
|
||||
def test_hits_boundary(self):
|
||||
H = csc_matrix([[6, 2, 1, 3],
|
||||
[2, 5, 2, 4],
|
||||
[1, 2, 4, 5],
|
||||
[3, 4, 5, 7]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
trust_radius = 3
|
||||
Z, _, Y = projections(A)
|
||||
x, info = projected_cg(H, c, Z, Y, b,
|
||||
tol=0,
|
||||
trust_radius=trust_radius)
|
||||
assert_equal(info["stop_cond"], 2)
|
||||
assert_equal(info["hits_boundary"], True)
|
||||
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
|
||||
|
||||
def test_negative_curvature_unconstrained(self):
|
||||
H = csc_matrix([[1, 2, 1, 3],
|
||||
[2, 0, 2, 4],
|
||||
[1, 2, 0, 2],
|
||||
[3, 4, 2, 0]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 0, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
Z, _, Y = projections(A)
|
||||
with pytest.raises(ValueError):
|
||||
projected_cg(H, c, Z, Y, b, tol=0)
|
||||
|
||||
def test_negative_curvature(self):
|
||||
H = csc_matrix([[1, 2, 1, 3],
|
||||
[2, 0, 2, 4],
|
||||
[1, 2, 0, 2],
|
||||
[3, 4, 2, 0]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 0, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
Z, _, Y = projections(A)
|
||||
trust_radius = 1000
|
||||
x, info = projected_cg(H, c, Z, Y, b,
|
||||
tol=0,
|
||||
trust_radius=trust_radius)
|
||||
assert_equal(info["stop_cond"], 3)
|
||||
assert_equal(info["hits_boundary"], True)
|
||||
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
|
||||
|
||||
# The box constraints are inactive at the solution but
|
||||
# are active during the iterations.
|
||||
def test_inactive_box_constraints(self):
|
||||
H = csc_matrix([[6, 2, 1, 3],
|
||||
[2, 5, 2, 4],
|
||||
[1, 2, 4, 5],
|
||||
[3, 4, 5, 7]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
Z, _, Y = projections(A)
|
||||
x, info = projected_cg(H, c, Z, Y, b,
|
||||
tol=0,
|
||||
lb=[0.5, -np.inf,
|
||||
-np.inf, -np.inf],
|
||||
return_all=True)
|
||||
x_kkt, _ = eqp_kktfact(H, c, A, b)
|
||||
assert_equal(info["stop_cond"], 1)
|
||||
assert_equal(info["hits_boundary"], False)
|
||||
assert_array_almost_equal(x, x_kkt)
|
||||
|
||||
# The box constraints active and the termination is
|
||||
# by maximum iterations (infeasible iteraction).
|
||||
def test_active_box_constraints_maximum_iterations_reached(self):
|
||||
H = csc_matrix([[6, 2, 1, 3],
|
||||
[2, 5, 2, 4],
|
||||
[1, 2, 4, 5],
|
||||
[3, 4, 5, 7]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
Z, _, Y = projections(A)
|
||||
x, info = projected_cg(H, c, Z, Y, b,
|
||||
tol=0,
|
||||
lb=[0.8, -np.inf,
|
||||
-np.inf, -np.inf],
|
||||
return_all=True)
|
||||
assert_equal(info["stop_cond"], 1)
|
||||
assert_equal(info["hits_boundary"], True)
|
||||
assert_array_almost_equal(A.dot(x), -b)
|
||||
assert_array_almost_equal(x[0], 0.8)
|
||||
|
||||
# The box constraints are active and the termination is
|
||||
# because it hits boundary (without infeasible iteraction).
|
||||
def test_active_box_constraints_hits_boundaries(self):
|
||||
H = csc_matrix([[6, 2, 1, 3],
|
||||
[2, 5, 2, 4],
|
||||
[1, 2, 4, 5],
|
||||
[3, 4, 5, 7]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
trust_radius = 3
|
||||
Z, _, Y = projections(A)
|
||||
x, info = projected_cg(H, c, Z, Y, b,
|
||||
tol=0,
|
||||
ub=[np.inf, np.inf, 1.6, np.inf],
|
||||
trust_radius=trust_radius,
|
||||
return_all=True)
|
||||
assert_equal(info["stop_cond"], 2)
|
||||
assert_equal(info["hits_boundary"], True)
|
||||
assert_array_almost_equal(x[2], 1.6)
|
||||
|
||||
# The box constraints are active and the termination is
|
||||
# because it hits boundary (infeasible iteraction).
|
||||
def test_active_box_constraints_hits_boundaries_infeasible_iter(self):
|
||||
H = csc_matrix([[6, 2, 1, 3],
|
||||
[2, 5, 2, 4],
|
||||
[1, 2, 4, 5],
|
||||
[3, 4, 5, 7]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 1, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
trust_radius = 4
|
||||
Z, _, Y = projections(A)
|
||||
x, info = projected_cg(H, c, Z, Y, b,
|
||||
tol=0,
|
||||
ub=[np.inf, 0.1, np.inf, np.inf],
|
||||
trust_radius=trust_radius,
|
||||
return_all=True)
|
||||
assert_equal(info["stop_cond"], 2)
|
||||
assert_equal(info["hits_boundary"], True)
|
||||
assert_array_almost_equal(x[1], 0.1)
|
||||
|
||||
# The box constraints are active and the termination is
|
||||
# because it hits boundary (no infeasible iteraction).
|
||||
def test_active_box_constraints_negative_curvature(self):
|
||||
H = csc_matrix([[1, 2, 1, 3],
|
||||
[2, 0, 2, 4],
|
||||
[1, 2, 0, 2],
|
||||
[3, 4, 2, 0]])
|
||||
A = csc_matrix([[1, 0, 1, 0],
|
||||
[0, 1, 0, 1]])
|
||||
c = np.array([-2, -3, -3, 1])
|
||||
b = -np.array([3, 0])
|
||||
Z, _, Y = projections(A)
|
||||
trust_radius = 1000
|
||||
x, info = projected_cg(H, c, Z, Y, b,
|
||||
tol=0,
|
||||
ub=[np.inf, np.inf, 100, np.inf],
|
||||
trust_radius=trust_radius)
|
||||
assert_equal(info["stop_cond"], 3)
|
||||
assert_equal(info["hits_boundary"], True)
|
||||
assert_array_almost_equal(x[2], 100)
|
|
@ -0,0 +1,10 @@
|
|||
from scipy.optimize import minimize, Bounds
|
||||
|
||||
def test_gh10880():
|
||||
# checks that verbose reporting works with trust-constr
|
||||
bnds = Bounds(1, 2)
|
||||
opts = {'maxiter': 1000, 'verbose': 2}
|
||||
minimize(lambda x: x**2, x0=2., method='trust-constr', bounds=bnds, options=opts)
|
||||
|
||||
opts = {'maxiter': 1000, 'verbose': 3}
|
||||
minimize(lambda x: x**2, x0=2., method='trust-constr', bounds=bnds, options=opts)
|
|
@ -0,0 +1,346 @@
|
|||
"""Trust-region interior point method.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
|
||||
"An interior point algorithm for large-scale nonlinear
|
||||
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
|
||||
.. [2] Byrd, Richard H., Guanghui Liu, and Jorge Nocedal.
|
||||
"On the local behavior of an interior point method for
|
||||
nonlinear programming." Numerical analysis 1997 (1997): 37-56.
|
||||
.. [3] Nocedal, Jorge, and Stephen J. Wright. "Numerical optimization"
|
||||
Second Edition (2006).
|
||||
"""
|
||||
|
||||
import scipy.sparse as sps
|
||||
import numpy as np
|
||||
from .equality_constrained_sqp import equality_constrained_sqp
|
||||
from scipy.sparse.linalg import LinearOperator
|
||||
|
||||
__all__ = ['tr_interior_point']
|
||||
|
||||
|
||||
class BarrierSubproblem:
|
||||
"""
|
||||
Barrier optimization problem:
|
||||
minimize fun(x) - barrier_parameter*sum(log(s))
|
||||
subject to: constr_eq(x) = 0
|
||||
constr_ineq(x) + s = 0
|
||||
"""
|
||||
|
||||
def __init__(self, x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq,
|
||||
constr, jac, barrier_parameter, tolerance,
|
||||
enforce_feasibility, global_stop_criteria,
|
||||
xtol, fun0, grad0, constr_ineq0, jac_ineq0, constr_eq0,
|
||||
jac_eq0):
|
||||
# Store parameters
|
||||
self.n_vars = n_vars
|
||||
self.x0 = x0
|
||||
self.s0 = s0
|
||||
self.fun = fun
|
||||
self.grad = grad
|
||||
self.lagr_hess = lagr_hess
|
||||
self.constr = constr
|
||||
self.jac = jac
|
||||
self.barrier_parameter = barrier_parameter
|
||||
self.tolerance = tolerance
|
||||
self.n_eq = n_eq
|
||||
self.n_ineq = n_ineq
|
||||
self.enforce_feasibility = enforce_feasibility
|
||||
self.global_stop_criteria = global_stop_criteria
|
||||
self.xtol = xtol
|
||||
self.fun0 = self._compute_function(fun0, constr_ineq0, s0)
|
||||
self.grad0 = self._compute_gradient(grad0)
|
||||
self.constr0 = self._compute_constr(constr_ineq0, constr_eq0, s0)
|
||||
self.jac0 = self._compute_jacobian(jac_eq0, jac_ineq0, s0)
|
||||
self.terminate = False
|
||||
|
||||
def update(self, barrier_parameter, tolerance):
|
||||
self.barrier_parameter = barrier_parameter
|
||||
self.tolerance = tolerance
|
||||
|
||||
def get_slack(self, z):
|
||||
return z[self.n_vars:self.n_vars+self.n_ineq]
|
||||
|
||||
def get_variables(self, z):
|
||||
return z[:self.n_vars]
|
||||
|
||||
def function_and_constraints(self, z):
|
||||
"""Returns barrier function and constraints at given point.
|
||||
|
||||
For z = [x, s], returns barrier function:
|
||||
function(z) = fun(x) - barrier_parameter*sum(log(s))
|
||||
and barrier constraints:
|
||||
constraints(z) = [ constr_eq(x) ]
|
||||
[ constr_ineq(x) + s ]
|
||||
|
||||
"""
|
||||
# Get variables and slack variables
|
||||
x = self.get_variables(z)
|
||||
s = self.get_slack(z)
|
||||
# Compute function and constraints
|
||||
f = self.fun(x)
|
||||
c_eq, c_ineq = self.constr(x)
|
||||
# Return objective function and constraints
|
||||
return (self._compute_function(f, c_ineq, s),
|
||||
self._compute_constr(c_ineq, c_eq, s))
|
||||
|
||||
def _compute_function(self, f, c_ineq, s):
|
||||
# Use technique from Nocedal and Wright book, ref [3]_, p.576,
|
||||
# to guarantee constraints from `enforce_feasibility`
|
||||
# stay feasible along iterations.
|
||||
s[self.enforce_feasibility] = -c_ineq[self.enforce_feasibility]
|
||||
log_s = [np.log(s_i) if s_i > 0 else -np.inf for s_i in s]
|
||||
# Compute barrier objective function
|
||||
return f - self.barrier_parameter*np.sum(log_s)
|
||||
|
||||
def _compute_constr(self, c_ineq, c_eq, s):
|
||||
# Compute barrier constraint
|
||||
return np.hstack((c_eq,
|
||||
c_ineq + s))
|
||||
|
||||
def scaling(self, z):
|
||||
"""Returns scaling vector.
|
||||
Given by:
|
||||
scaling = [ones(n_vars), s]
|
||||
"""
|
||||
s = self.get_slack(z)
|
||||
diag_elements = np.hstack((np.ones(self.n_vars), s))
|
||||
|
||||
# Diagonal matrix
|
||||
def matvec(vec):
|
||||
return diag_elements*vec
|
||||
return LinearOperator((self.n_vars+self.n_ineq,
|
||||
self.n_vars+self.n_ineq),
|
||||
matvec)
|
||||
|
||||
def gradient_and_jacobian(self, z):
|
||||
"""Returns scaled gradient.
|
||||
|
||||
Return scaled gradient:
|
||||
gradient = [ grad(x) ]
|
||||
[ -barrier_parameter*ones(n_ineq) ]
|
||||
and scaled Jacobian matrix:
|
||||
jacobian = [ jac_eq(x) 0 ]
|
||||
[ jac_ineq(x) S ]
|
||||
Both of them scaled by the previously defined scaling factor.
|
||||
"""
|
||||
# Get variables and slack variables
|
||||
x = self.get_variables(z)
|
||||
s = self.get_slack(z)
|
||||
# Compute first derivatives
|
||||
g = self.grad(x)
|
||||
J_eq, J_ineq = self.jac(x)
|
||||
# Return gradient and Jacobian
|
||||
return (self._compute_gradient(g),
|
||||
self._compute_jacobian(J_eq, J_ineq, s))
|
||||
|
||||
def _compute_gradient(self, g):
|
||||
return np.hstack((g, -self.barrier_parameter*np.ones(self.n_ineq)))
|
||||
|
||||
def _compute_jacobian(self, J_eq, J_ineq, s):
|
||||
if self.n_ineq == 0:
|
||||
return J_eq
|
||||
else:
|
||||
if sps.issparse(J_eq) or sps.issparse(J_ineq):
|
||||
# It is expected that J_eq and J_ineq
|
||||
# are already `csr_matrix` because of
|
||||
# the way ``BoxConstraint``, ``NonlinearConstraint``
|
||||
# and ``LinearConstraint`` are defined.
|
||||
J_eq = sps.csr_matrix(J_eq)
|
||||
J_ineq = sps.csr_matrix(J_ineq)
|
||||
return self._assemble_sparse_jacobian(J_eq, J_ineq, s)
|
||||
else:
|
||||
S = np.diag(s)
|
||||
zeros = np.zeros((self.n_eq, self.n_ineq))
|
||||
# Convert to matrix
|
||||
if sps.issparse(J_ineq):
|
||||
J_ineq = J_ineq.toarray()
|
||||
if sps.issparse(J_eq):
|
||||
J_eq = J_eq.toarray()
|
||||
# Concatenate matrices
|
||||
return np.block([[J_eq, zeros],
|
||||
[J_ineq, S]])
|
||||
|
||||
def _assemble_sparse_jacobian(self, J_eq, J_ineq, s):
|
||||
"""Assemble sparse Jacobian given its components.
|
||||
|
||||
Given ``J_eq``, ``J_ineq`` and ``s`` returns:
|
||||
jacobian = [ J_eq, 0 ]
|
||||
[ J_ineq, diag(s) ]
|
||||
|
||||
It is equivalent to:
|
||||
sps.bmat([[ J_eq, None ],
|
||||
[ J_ineq, diag(s) ]], "csr")
|
||||
but significantly more efficient for this
|
||||
given structure.
|
||||
"""
|
||||
n_vars, n_ineq, n_eq = self.n_vars, self.n_ineq, self.n_eq
|
||||
J_aux = sps.vstack([J_eq, J_ineq], "csr")
|
||||
indptr, indices, data = J_aux.indptr, J_aux.indices, J_aux.data
|
||||
new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int),
|
||||
np.arange(n_ineq+1, dtype=int)))
|
||||
size = indices.size+n_ineq
|
||||
new_indices = np.empty(size)
|
||||
new_data = np.empty(size)
|
||||
mask = np.full(size, False, bool)
|
||||
mask[new_indptr[-n_ineq:]-1] = True
|
||||
new_indices[mask] = n_vars+np.arange(n_ineq)
|
||||
new_indices[~mask] = indices
|
||||
new_data[mask] = s
|
||||
new_data[~mask] = data
|
||||
J = sps.csr_matrix((new_data, new_indices, new_indptr),
|
||||
(n_eq + n_ineq, n_vars + n_ineq))
|
||||
return J
|
||||
|
||||
def lagrangian_hessian_x(self, z, v):
|
||||
"""Returns Lagrangian Hessian (in relation to `x`) -> Hx"""
|
||||
x = self.get_variables(z)
|
||||
# Get lagrange multipliers relatated to nonlinear equality constraints
|
||||
v_eq = v[:self.n_eq]
|
||||
# Get lagrange multipliers relatated to nonlinear ineq. constraints
|
||||
v_ineq = v[self.n_eq:self.n_eq+self.n_ineq]
|
||||
lagr_hess = self.lagr_hess
|
||||
return lagr_hess(x, v_eq, v_ineq)
|
||||
|
||||
def lagrangian_hessian_s(self, z, v):
|
||||
"""Returns scaled Lagrangian Hessian (in relation to`s`) -> S Hs S"""
|
||||
s = self.get_slack(z)
|
||||
# Using the primal formulation:
|
||||
# S Hs S = diag(s)*diag(barrier_parameter/s**2)*diag(s).
|
||||
# Reference [1]_ p. 882, formula (3.1)
|
||||
primal = self.barrier_parameter
|
||||
# Using the primal-dual formulation
|
||||
# S Hs S = diag(s)*diag(v/s)*diag(s)
|
||||
# Reference [1]_ p. 883, formula (3.11)
|
||||
primal_dual = v[-self.n_ineq:]*s
|
||||
# Uses the primal-dual formulation for
|
||||
# positives values of v_ineq, and primal
|
||||
# formulation for the remaining ones.
|
||||
return np.where(v[-self.n_ineq:] > 0, primal_dual, primal)
|
||||
|
||||
def lagrangian_hessian(self, z, v):
|
||||
"""Returns scaled Lagrangian Hessian"""
|
||||
# Compute Hessian in relation to x and s
|
||||
Hx = self.lagrangian_hessian_x(z, v)
|
||||
if self.n_ineq > 0:
|
||||
S_Hs_S = self.lagrangian_hessian_s(z, v)
|
||||
|
||||
# The scaled Lagragian Hessian is:
|
||||
# [ Hx 0 ]
|
||||
# [ 0 S Hs S ]
|
||||
def matvec(vec):
|
||||
vec_x = self.get_variables(vec)
|
||||
vec_s = self.get_slack(vec)
|
||||
if self.n_ineq > 0:
|
||||
return np.hstack((Hx.dot(vec_x), S_Hs_S*vec_s))
|
||||
else:
|
||||
return Hx.dot(vec_x)
|
||||
return LinearOperator((self.n_vars+self.n_ineq,
|
||||
self.n_vars+self.n_ineq),
|
||||
matvec)
|
||||
|
||||
def stop_criteria(self, state, z, last_iteration_failed,
|
||||
optimality, constr_violation,
|
||||
trust_radius, penalty, cg_info):
|
||||
"""Stop criteria to the barrier problem.
|
||||
The criteria here proposed is similar to formula (2.3)
|
||||
from [1]_, p.879.
|
||||
"""
|
||||
x = self.get_variables(z)
|
||||
if self.global_stop_criteria(state, x,
|
||||
last_iteration_failed,
|
||||
trust_radius, penalty,
|
||||
cg_info,
|
||||
self.barrier_parameter,
|
||||
self.tolerance):
|
||||
self.terminate = True
|
||||
return True
|
||||
else:
|
||||
g_cond = (optimality < self.tolerance and
|
||||
constr_violation < self.tolerance)
|
||||
x_cond = trust_radius < self.xtol
|
||||
return g_cond or x_cond
|
||||
|
||||
|
||||
def tr_interior_point(fun, grad, lagr_hess, n_vars, n_ineq, n_eq,
|
||||
constr, jac, x0, fun0, grad0,
|
||||
constr_ineq0, jac_ineq0, constr_eq0,
|
||||
jac_eq0, stop_criteria,
|
||||
enforce_feasibility, xtol, state,
|
||||
initial_barrier_parameter,
|
||||
initial_tolerance,
|
||||
initial_penalty,
|
||||
initial_trust_radius,
|
||||
factorization_method):
|
||||
"""Trust-region interior points method.
|
||||
|
||||
Solve problem:
|
||||
minimize fun(x)
|
||||
subject to: constr_ineq(x) <= 0
|
||||
constr_eq(x) = 0
|
||||
using trust-region interior point method described in [1]_.
|
||||
"""
|
||||
# BOUNDARY_PARAMETER controls the decrease on the slack
|
||||
# variables. Represents ``tau`` from [1]_ p.885, formula (3.18).
|
||||
BOUNDARY_PARAMETER = 0.995
|
||||
# BARRIER_DECAY_RATIO controls the decay of the barrier parameter
|
||||
# and of the subproblem toloerance. Represents ``theta`` from [1]_ p.879.
|
||||
BARRIER_DECAY_RATIO = 0.2
|
||||
# TRUST_ENLARGEMENT controls the enlargement on trust radius
|
||||
# after each iteration
|
||||
TRUST_ENLARGEMENT = 5
|
||||
|
||||
# Default enforce_feasibility
|
||||
if enforce_feasibility is None:
|
||||
enforce_feasibility = np.zeros(n_ineq, bool)
|
||||
# Initial Values
|
||||
barrier_parameter = initial_barrier_parameter
|
||||
tolerance = initial_tolerance
|
||||
trust_radius = initial_trust_radius
|
||||
# Define initial value for the slack variables
|
||||
s0 = np.maximum(-1.5*constr_ineq0, np.ones(n_ineq))
|
||||
# Define barrier subproblem
|
||||
subprob = BarrierSubproblem(
|
||||
x0, s0, fun, grad, lagr_hess, n_vars, n_ineq, n_eq, constr, jac,
|
||||
barrier_parameter, tolerance, enforce_feasibility,
|
||||
stop_criteria, xtol, fun0, grad0, constr_ineq0, jac_ineq0,
|
||||
constr_eq0, jac_eq0)
|
||||
# Define initial parameter for the first iteration.
|
||||
z = np.hstack((x0, s0))
|
||||
fun0_subprob, constr0_subprob = subprob.fun0, subprob.constr0
|
||||
grad0_subprob, jac0_subprob = subprob.grad0, subprob.jac0
|
||||
# Define trust region bounds
|
||||
trust_lb = np.hstack((np.full(subprob.n_vars, -np.inf),
|
||||
np.full(subprob.n_ineq, -BOUNDARY_PARAMETER)))
|
||||
trust_ub = np.full(subprob.n_vars+subprob.n_ineq, np.inf)
|
||||
|
||||
# Solves a sequence of barrier problems
|
||||
while True:
|
||||
# Solve SQP subproblem
|
||||
z, state = equality_constrained_sqp(
|
||||
subprob.function_and_constraints,
|
||||
subprob.gradient_and_jacobian,
|
||||
subprob.lagrangian_hessian,
|
||||
z, fun0_subprob, grad0_subprob,
|
||||
constr0_subprob, jac0_subprob, subprob.stop_criteria,
|
||||
state, initial_penalty, trust_radius,
|
||||
factorization_method, trust_lb, trust_ub, subprob.scaling)
|
||||
if subprob.terminate:
|
||||
break
|
||||
# Update parameters
|
||||
trust_radius = max(initial_trust_radius,
|
||||
TRUST_ENLARGEMENT*state.tr_radius)
|
||||
# TODO: Use more advanced strategies from [2]_
|
||||
# to update this parameters.
|
||||
barrier_parameter *= BARRIER_DECAY_RATIO
|
||||
tolerance *= BARRIER_DECAY_RATIO
|
||||
# Update Barrier Problem
|
||||
subprob.update(barrier_parameter, tolerance)
|
||||
# Compute initial values for next iteration
|
||||
fun0_subprob, constr0_subprob = subprob.function_and_constraints(z)
|
||||
grad0_subprob, jac0_subprob = subprob.gradient_and_jacobian(z)
|
||||
|
||||
# Get x and s
|
||||
x = subprob.get_variables(z)
|
||||
return x, state
|
Loading…
Add table
Add a link
Reference in a new issue