From 5a757a457d9cae7c45a7382d42f512c61bd86750 Mon Sep 17 00:00:00 2001 From: dcluo Date: Thu, 7 Mar 2024 13:52:09 -0600 Subject: [PATCH 1/2] changed where setLinearizationPoint is happening to avoid refactoring solves during repeated hessian evaluation --- soupy/modeling/controlCostFunctional.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/soupy/modeling/controlCostFunctional.py b/soupy/modeling/controlCostFunctional.py index bdb04dd..7011c01 100644 --- a/soupy/modeling/controlCostFunctional.py +++ b/soupy/modeling/controlCostFunctional.py @@ -159,6 +159,8 @@ def computeComponents(self, z, order=0): self.model.evalGradientControl(self.x, self.grad_objective) self.has_adjoint_solve = True + self.model.setLinearizationPoint(self.x) + self.has_forward_solve = True @@ -182,6 +184,7 @@ def cost(self, z, order=0, **kwargs): penalization = self.penalization.cost(z) return objective + penalization + def grad(self, g): """ Computes the gradient of the cost functional @@ -212,7 +215,7 @@ def hessian(self, zhat, Hzhat): .. note:: Assumes :code:`self.cost` has been called with :code:`order >= 2` """ - self.model.setLinearizationPoint(self.x) + # self.model.setLinearizationPoint(self.x) self.model.applyCz(zhat, self.rhs_fwd) self.model.solveFwdIncremental(self.uhat, self.rhs_fwd) self.model.applyWuu(self.uhat, self.rhs_adj) From 8ee61e02853e60bb2fedb2817454b8790aa5b57a Mon Sep 17 00:00:00 2001 From: dcluo Date: Tue, 7 May 2024 12:04:41 -0500 Subject: [PATCH 2/2] Updated finite difference checkers to return the estimated derivatives --- soupy/utils/stochasticCostFiniteDifference.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/soupy/utils/stochasticCostFiniteDifference.py b/soupy/utils/stochasticCostFiniteDifference.py index 018c92c..9f248eb 100644 --- a/soupy/utils/stochasticCostFiniteDifference.py +++ b/soupy/utils/stochasticCostFiniteDifference.py @@ -28,6 +28,8 @@ def stochasticCostFiniteDifference(pde_cost, z, dz, delta=1e-3, sample_size=1): :param dz: Direction for finite difference derivative :param delta: Step size :param sample_size: sample size for expectation computations + + :Returns: The adjoint directional derivative and the FD directional derivative """ gz = pde_cost.generate_vector(CONTROL) @@ -56,7 +58,7 @@ def stochasticCostFiniteDifference(pde_cost, z, dz, delta=1e-3, sample_size=1): print("Analytic gradient: %g" %ad_grad) print("Finite diff gradient: %g" %fd_grad) - + return ad_grad, fd_grad def SAACostFiniteDifference(pde_cost, z, dz, delta=1e-3): """ @@ -67,6 +69,8 @@ def SAACostFiniteDifference(pde_cost, z, dz, delta=1e-3): :param dz: Direction for finite difference derivative :param delta: Step size :param sample_size: sample size for expectation computations + + :Returns: The adjoint directional derivative and the FD directional derivative """ gz = pde_cost.generate_vector(CONTROL) @@ -86,7 +90,6 @@ def SAACostFiniteDifference(pde_cost, z, dz, delta=1e-3): pde_cost.grad(gz) rng = hp.Random() - delta = 1e-3 z1.axpy(delta, dz) c1 = pde_cost.cost(z1, order=0) @@ -96,6 +99,6 @@ def SAACostFiniteDifference(pde_cost, z, dz, delta=1e-3): print("Analytic gradient: %g" %ad_grad) print("Finite diff gradient: %g" %fd_grad) - + return ad_grad, fd_grad