8000 First implementation of structured quasi-Newton approximation in Auglag. · PythonOptimizers/NLP.py@c3c5f2f · GitHub
[go: up one dir, main page]

Skip to content

Commit c3c5f2f

Browse files
Andrew Lambedpo
authored andcommitted
First implementation of structured quasi-Newton approximation in Auglag.
1 parent b74b153 commit c3c5f2f

File tree

3 files changed

+54
-3
lines changed

3 files changed

+54
-3
lines changed

nlp/model/augmented_lagrangian.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,3 +176,27 @@ class QuasiNewtonAugmentedLagrangian(QuasiNewtonModel, AugmentedLagrangian):
176176
"""
177177

178178
pass # All the work is done by the parent classes.
179+
180+
181+
class StructuredQuasiNewtonAugmentedLagrangian(QuasiNewtonModel, AugmentedLagrangian):
182+
"""Bound-constrained augmented Lagrangian with a structured Hessian.
183+
184+
Overwrite only the hprod method to provide a structured estimate of
185+
the Hessian in quadratic approximations.
186+
"""
187+
188+
def __init__(self, *args, **kwargs):
189+
super(StructuredQuasiNewtonAugmentedLagrangian, self).__init__(*args, **kwargs)
190+
191+
def hprod(self, x, z, v, **kwargs):
192+
"""Structured Hessian-vector product."""
193+
model = self.model
194+
# cons = self.model.cons(x)
195+
196+
# w = model.hprod(x, self.pi - self.penalty * cons, v)
197+
w = self.H * v
198+
J = model.jop(x)
199+
Hv = w + self.penalty * J.T * J * v
200+
if self.prox > 0:
201+
Hv += self.prox * v
202+
return Hv

nlp/optimize/auglag.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
import logging
1212
from nlp.model.augmented_lagrangian import AugmentedLagrangian
1313
from nlp.model.augmented_lagrangian import QuasiNewtonAugmentedLagrangian
14+
from nlp.model.augmented_lagrangian import StructuredQuasiNewtonAugmentedLagrangian
1415
from nlp.optimize.pcg import TruncatedCG
1516
from nlp.tools.exceptions import UserExitRequest
1617
from nlp.tools.utils import project, where
@@ -77,10 +78,12 @@ def __init__(self, model, bc_solver, **kwargs):
7778
:stal: Problem converged to an infeasible point
7879
:time: Time limit exceeded
7980
"""
80-
self.qn = kwargs.get("qn",False)
81+
self.qn = kwargs.get("qn","none")
8182

82-
if self.qn:
83+
if self.qn == "full":
8384
self.model = QuasiNewtonAugmentedLagrangian(model, **kwargs)
85+
elif self.qn == "struct":
86+
self.model = StructuredQuasiNewtonAugmentedLagrangian(model, **kwargs)
8487
else:
8588
self.model = AugmentedLagrangian(model, **kwargs)
8689

nlp/optimize/tron.py

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ def __init__(self, model, tr_solver, **kwargs):
5959
self.iter = 0 # Iteration counter
6060
self.total_cgiter = 0
6161
self.x = kwargs.get("x0", self.model.x0.copy())
62+
self.x_old = None
6263
self.f = None
6364
self.f0 = None
6465
self.g = None
@@ -391,6 +392,7 @@ def solve(self):
391392
self.f0 = self.f
392393
self.g = model.grad(self.x) # Current gradient
393394
self.g_old = self.g.copy()
395+
self.x_old = self.x.copy()
394396
pgnorm = projected_gradient_norm2(self.x, self.g,
395397
model.Lvar, model.Uvar)
396398
self.pg0 = pgnorm
@@ -424,6 +426,7 @@ def solve(self):
424426
self.step_accepted = False
425427
if self.save_g:
426428
self.g_old = self.g.copy()
429+
self.x_old = self.x.copy()
427430

428431
# Wrap Hessian into an operator.
429432
H = model.hop(self.x.copy())
@@ -585,6 +588,27 @@ def __init__(self, *args, **kwargs):
585588

586589
def post_iteration(self, **kwargs):
587590
# Update quasi-Newton approximation.
588-
# import ipdb; ipdb.set_trace()
589591
if self.step_accepted:
590592
self.model.H.store(self.dvars, self.dgrad)
593+
594+
595+
class StructQNTRON(QNTRON):
596+
"""A variant of TRON with a structured Hessian approximation.
597+
598+
This solver is designed for use within Auglag.
599+
"""
600+
601+
def __init__(self, *args, **kwargs):
602+
super(StructQNTRON, self).__init__(*args, **kwargs)
603+
604+
def post_iteration(self, **kwargs):
605+
if self.step_accepted:
606+
# Form correction to the gradient difference to account for structure
607+
cons_new = self.model.model.cons(self.x)
608+
cons_old = self.model.model.cons(self.x_old)
609+
J_old = self.model.model.jop(self.x_old)
610+
penalty = self.model.penalty
611+
612+
dgrad_mod = self.dgrad + penalty * J_old.T * (cons_old - cons_new)
613+
614+
self.model.H.store(self.dvars, self.dgrad, dgrad_mod)

0 commit comments

Comments
 (0)
0