diff --git a/doc/manual/source/the-model-file.rst b/doc/manual/source/the-model-file.rst
index b8bd7d89e363fbccc67fc757b03ebc08a588b18a..8419124101b1cb5b80548b01b8c2d14bad41a73d 100644
--- a/doc/manual/source/the-model-file.rst
+++ b/doc/manual/source/the-model-file.rst
@@ -4220,7 +4220,8 @@ Computing the stochastic solution
     variance-covariance of the endogenous variables. Contains
     theoretical variance if the ``periods`` option is not present and simulated variance
     otherwise. Only available for ``order<4``. At ``order=2`` it will be be
-    a second-order accurate approximation. At ``order=3``, theoretical moments
+    a second-order accurate approximation (i.e. ignoring terms of order 3 and 4 that would
+    arise when using the full second-order policy function). At ``order=3``, theoretical moments
     are only available with ``pruning``. The variables are arranged in declaration order.
 
 .. matvar:: oo_.var_list
@@ -10229,7 +10230,8 @@ with ``discretionary_policy`` or for optimal simple rules with ``osr``
 
     At the current stage, the stochastic context does not support the ``pruning`` option.
     At ``order>3``, only the computation of conditional welfare with steady state Lagrange 
-    multipliers is supported.
+    multipliers is supported. Note that at `order=2`, the output is based on the second-order
+    accurate approximation of the variance stored in `oo_.var`.
     
     *Example (stochastic context)*
 
diff --git a/matlab/evaluate_planner_objective.m b/matlab/evaluate_planner_objective.m
index ff18eef734eb70f2db9c0611e933229d97fc5fca..f59562ccd923187b2a94ca81813ca5836a783939 100644
--- a/matlab/evaluate_planner_objective.m
+++ b/matlab/evaluate_planner_objective.m
@@ -43,7 +43,8 @@ function planner_objective_value = evaluate_planner_objective(M_,options_,oo_)
 
 % Similarly, taking the unconditional expectation of a second-order approximation of utility around the non-stochastic steady state yields a second-order approximation of unconditional welfare
 % E(W) = (1 - beta)^{-1} ( Ubar + U_x h_y E(yhat) + 0.5 ( (U_xx h_y^2 + U_x h_yy) E(yhat^2) + (U_xx h_u^2 + U_x h_uu) E(u^2) + U_x h_ss )
-% where E(yhat), E(yhat^2) and E(u^2) can be derived from oo_.mean and oo_.var
+% where E(yhat), E(yhat^2) and E(u^2) can be derived from oo_.mean and oo_.var. 
+% Importantly, E(yhat) and E(yhat^2) are second-order approximations, which is not the same as approximations computed with all the information provided by decision rules approximated up to the second order. The latter might include terms that are order 3 or 4 for the approximation of E(yhat^2), which we exclude here.
 
 % As for conditional welfare, the second-order approximation of welfare around the non-stochastic steady state leads to
 % W(y_{t-1}, u_t, sigma) = Wbar + W_y yhat_{t-1} + W_u u_t + W_yu yhat_{t-1} ⊗ u_t + 0.5 ( W_yy yhat_{t-1}^2 + W_uu u_t^2 + W_ss )
@@ -173,8 +174,7 @@ if options_.ramsey_policy
             Ey = oo_.mean;
             Eyhat = Ey - ys(dr.order_var(nstatic+(1:nspred)));
             
-            var_corr = Eyhat*Eyhat';
-            Eyhatyhat = oo_.var(:) + var_corr(:);
+            Eyhatyhat = oo_.var(:)
             Euu = M_.Sigma_e(:);
             
             EU = U + Uy*gy*Eyhat + 0.5*((Uyygygy + Uy*gyy)*Eyhatyhat + (Uyygugu + Uy*guu)*Euu + Uy*gss);
@@ -262,8 +262,7 @@ elseif options_.discretionary_policy
     Ey = oo_.mean;
     Eyhat = Ey - ys(dr.order_var(nstatic+(1:nspred)));
     
-    var_corr = Eyhat*Eyhat';
-    Eyhatyhat = oo_.var(:) + var_corr(:);
+    Eyhatyhat = oo_.var(:);
     Euu = M_.Sigma_e(:);
     
     EU = U + Uy*gy*Eyhat + 0.5*(Uyygygy*Eyhatyhat + Uyygugu*Euu);
diff --git a/tests/optimal_policy/neo_growth_common.inc b/tests/optimal_policy/neo_growth_common.inc
index 0d5d1d6b32adb20964046a4243361facea37b5af..e9ef8f0cc1f84f0b93f53de6d17a177d30dc557d 100644
--- a/tests/optimal_policy/neo_growth_common.inc
+++ b/tests/optimal_policy/neo_growth_common.inc
@@ -9,7 +9,7 @@ gamma = 1;
 delta = 0.012;
 alpha = 0.4;
 rho = 0.95;
-s = 0.007;
+s = 0.07; % increased by a factor of 10 from 0.007 to increase risk correction
 
 model;
 c^(-gamma)=beta*c(+1)^(-gamma)*(alpha*exp(z(+1))*k^(alpha-1)+1-delta);
diff --git a/tests/optimal_policy/neo_growth_ramsey.mod b/tests/optimal_policy/neo_growth_ramsey.mod
index 09ef3b9e46a1fdea7df87293ca3a9d79072d7910..6201589b9a68ccacaa44a71f31abaebb851a57ad 100644
--- a/tests/optimal_policy/neo_growth_ramsey.mod
+++ b/tests/optimal_policy/neo_growth_ramsey.mod
@@ -54,6 +54,15 @@ if abs(cond_W_hand_L_SS - planner_objective_value.conditional.steady_initial_mul
    error('Inaccurate conditional welfare with Lagrange multiplier set to its steady-state value');
 end;
 
+if abs(oo_.mean(strmatch('U',M_.endo_names,'exact'))-oo1.oo_.mean(strmatch('U',M1.M_.endo_names,'exact')))>1e-6
+    error('Utility inconsistent');
+end
+
+if abs(planner_objective_value.unconditional-oo_.mean(strmatch('U',M_.endo_names,'exact'))/(1-beta)) > 1e-6;
+   error('Unconditional welfare assessment does not match utility');
+end;
+
+
 initial_condition_states = zeros(M1.M_.endo_nbr,M1.M_.maximum_lag);
 initial_condition_states(1:M1.M_.orig_endo_nbr,:) = repmat(oo1.oo_.dr.ys(1:M1.M_.orig_endo_nbr),1,M1.M_.maximum_lag);
 shock_matrix = zeros(1,M1.M_.exo_nbr);
diff --git a/tests/optimal_policy/neo_growth_ramsey_common.inc b/tests/optimal_policy/neo_growth_ramsey_common.inc
index 01e4a6d39a40e94efe3b99ea60d1fcddb6db1f1e..dd08a476218c2c9a0956a69e79ae6493c025baaf 100644
--- a/tests/optimal_policy/neo_growth_ramsey_common.inc
+++ b/tests/optimal_policy/neo_growth_ramsey_common.inc
@@ -1,4 +1,4 @@
-var k z c;
+var k z c U;
 
 varexo e;
 
@@ -9,15 +9,17 @@ gamma = 1;
 delta = 0.012;
 alpha = 0.4;
 rho = 0.95;
-s = 0.007;
+s = 0.07; % increased by a factor of 10 from 0.007 to increase risk correction
 
 model;
 k=exp(z)*k(-1)^(alpha)-c+(1-delta)*k(-1);
 z=rho*z(-1)+s*e;
+U=ln(c);
 end;
 
 steady_state_model;
 z = 0;
+U=ln(c);
 end;
 
 planner_objective ln(c);