diff --git a/matlab/cli/prior.m b/matlab/cli/prior.m
index 9a4ead84905e78885e8c1cdb22825d6089fae4af..f25c4a9fa24604b3f803f071c1c4ff85b5cf50fd 100644
--- a/matlab/cli/prior.m
+++ b/matlab/cli/prior.m
@@ -40,7 +40,7 @@ if isempty(varargin) || ( isequal(length(varargin), 1) && isequal(varargin{1},'h
     return
 end
 
-global options_ M_ estim_params_ bayestopt_ oo_ objective_function_penalty_base
+global options_ M_ estim_params_ bayestopt_ oo_ 
 
 donesomething = false;
 
diff --git a/matlab/dsge_likelihood.m b/matlab/dsge_likelihood.m
index d5dfaefdf37284f892dc18a1047b1215c9fad72e..5dac526810216968f8c828c5c54d356225948f50 100644
--- a/matlab/dsge_likelihood.m
+++ b/matlab/dsge_likelihood.m
@@ -55,9 +55,9 @@ function [fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff,Model,DynareOpti
 %! @item info==19
 %! The steadystate routine thrown an exception (inconsistent deep parameters).
 %! @item info==20
-%! Cannot find the steady state, info(2) contains the sum of square residuals (of the static equations).
+%! Cannot find the steady state, info(4) contains the sum of square residuals (of the static equations).
 %! @item info==21
-%! The steady state is complex, info(2) contains the sum of square of imaginary parts of the steady state.
+%! The steady state is complex, info(4) contains the sum of square of imaginary parts of the steady state.
 %! @item info==22
 %! The steady has NaNs.
 %! @item info==23
@@ -134,14 +134,12 @@ function [fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff,Model,DynareOpti
 
 % AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT FR
 
-global objective_function_penalty_base
-
 % Initialization of the returned variables and others...
 fval        = [];
 SteadyState = [];
 trend_coeff = [];
 exit_flag   = 1;
-info        = 0;
+info        = zeros(4,1);
 DLIK        = [];
 Hess       = [];
 
@@ -181,9 +179,10 @@ end
 % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
 if ~isequal(DynareOptions.mode_compute,1) && any(xparam1<BoundsInfo.lb)
     k = find(xparam1<BoundsInfo.lb);
-    fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 41;
+    info(1) = 41;
+    info(4)= sum((BoundsInfo.lb(k)-xparam1(k)).^2);
     if analytic_derivation,
         DLIK=ones(length(xparam1),1);
     end
@@ -193,9 +192,10 @@ end
 % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
 if ~isequal(DynareOptions.mode_compute,1) && any(xparam1>BoundsInfo.ub)
     k = find(xparam1>BoundsInfo.ub);
-    fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 42;
+    info(1) = 42;
+    info(4)= sum((xparam1(k)-BoundsInfo.ub(k)).^2);
     if analytic_derivation,
         DLIK=ones(length(xparam1),1);
     end
@@ -212,18 +212,20 @@ H = Model.H;
 if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances')
     [Q_is_positive_definite, penalty] = ispd(Q(EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness,EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness));
     if ~Q_is_positive_definite
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
         exit_flag = 0;
-        info = 43;
+        info(1) = 43;
+        info(4) = penalty;
         return
     end
     if isfield(EstimatedParameters,'calibrated_covariances')
         correct_flag=check_consistency_covariances(Q);
         if ~correct_flag
             penalty = sum(Q(EstimatedParameters.calibrated_covariances.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 71;
+            info(1) = 71;
+            info(4) = penalty;
             return
         end
     end
@@ -233,18 +235,20 @@ end
 if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME')
     [H_is_positive_definite, penalty] = ispd(H(EstimatedParameters.H_entries_to_check_for_positive_definiteness,EstimatedParameters.H_entries_to_check_for_positive_definiteness));
     if ~H_is_positive_definite
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
+        info(1) = 44;
+        info(4) = penalty;
         exit_flag = 0;
-        info = 44;
         return
     end
     if isfield(EstimatedParameters,'calibrated_covariances_ME')
         correct_flag=check_consistency_covariances(H);
         if ~correct_flag
             penalty = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 72;
+            info(1) = 72;
+            info(4) = penalty;
             return
         end
     end
@@ -259,30 +263,36 @@ end
 [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
 
 % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
-if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || info(1) == 9 || ...
-            info(1) == 22 || info(1) == 24 || info(1) == 19 || info(1) == 25 || info(1) == 10
-    fval = objective_function_penalty_base+1;
-    info = info(1);
-    exit_flag = 0;
-    if analytic_derivation,
-        DLIK=ones(length(xparam1),1);
-    end
-    return
-elseif info(1) == 3 || info(1) == 4 || info(1)==6 || info(1) == 20 || info(1) == 21  || info(1) == 23 || info(1)==26
-    fval = objective_function_penalty_base+info(2);
-    info = info(1);
-    exit_flag = 0;
-    if analytic_derivation,
-        DLIK=ones(length(xparam1),1);
+if info(1)
+    if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
+            info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
+            info(1) == 81 || info(1) == 84 ||  info(1) == 85
+        %meaningful second entry of output that can be used
+        fval = Inf;
+        info(4) = info(2);
+        exit_flag = 0;
+        if analytic_derivation,
+            DLIK=ones(length(xparam1),1);
+        end
+        
+        return
+    else
+        fval = Inf;
+        info(4) = 0.1;
+        exit_flag = 0;
+        if analytic_derivation,
+            DLIK=ones(length(xparam1),1);
+        end
+        
+        return
     end
-    return
 end
 
 % check endogenous prior restrictions
 info=endogenous_prior_restrictions(T,R,Model,DynareOptions,DynareResults);
 if info(1),
-    fval = objective_function_penalty_base+info(2);
-    info = info(1);
+    fval = Inf;
+    info(4)=info(2);
     exit_flag = 0;
     if analytic_derivation,
         DLIK=ones(length(xparam1),1);
@@ -458,8 +468,9 @@ switch DynareOptions.lik_init
         diffuse_periods = size(dlik,1);
     end
     if isnan(dLIK),
-        info = 45;
-        fval = objective_function_penalty_base + 100;
+        fval = Inf;
+        info(1) = 45;
+        info(4) = 0.1;
         exit_flag = 0;
         return
     end
@@ -698,8 +709,9 @@ if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter
             end
         else
             if isinf(LIK)
-                info = 66;
-                fval = objective_function_penalty_base+1;
+                fval = Inf;
+                info(1) = 66;
+                info(4) = 0.1;
                 exit_flag = 0;
                 return
             end
@@ -707,7 +719,7 @@ if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter
     else
         if DynareOptions.lik_init==3
             LIK = LIK + dLIK;
-            if analytic_derivation==0 && nargout==2,
+            if analytic_derivation==0 && nargout>3,
                 if ~singular_diffuse_filter,
                     lik = [dlik; lik];
                 else
@@ -771,7 +783,7 @@ if (kalman_algo==2) || (kalman_algo==4)
     end
     if DynareOptions.lik_init==3
         LIK = LIK+dLIK;
-        if analytic_derivation==0 && nargout==2,
+        if analytic_derivation==0 && nargout>3,
             lik = [dlik; lik];
         end
     end
@@ -797,15 +809,17 @@ if analytic_derivation
 end
 
 if isnan(LIK)
-    info = 45;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 45;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
 
 if imag(LIK)~=0
-    info = 46;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 46;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
@@ -851,15 +865,17 @@ if DynareOptions.prior_restrictions.status
 end
 
 if isnan(fval)
-    info = 47;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 47;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
 
 if imag(fval)~=0
-    info = 48;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 48;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
@@ -867,7 +883,7 @@ end
 % Update DynareOptions.kalman_algo.
 DynareOptions.kalman_algo = kalman_algo;
 
-if analytic_derivation==0 && nargout==2,
+if analytic_derivation==0 && nargout>3,
     lik=lik(start:end,:);
     DLIK=[-lnprior; lik(:)];
 end
\ No newline at end of file
diff --git a/matlab/dsge_var_likelihood.m b/matlab/dsge_var_likelihood.m
index 1317f7e6c8b20924aff6824b51915f7617908578..2dc670e5747ac0be5fff3e431459ac9c6d6cc2a2 100644
--- a/matlab/dsge_var_likelihood.m
+++ b/matlab/dsge_var_likelihood.m
@@ -42,13 +42,12 @@ function [fval,info,exit_flag,grad,hess,SteadyState,trend_coeff,PHI,SIGMAu,iXX,p
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-global objective_function_penalty_base
 persistent dsge_prior_weight_idx
 
 grad=[];
 hess=[];
 exit_flag = [];
-info = [];
+info = zeros(4,1);
 PHI = [];
 SIGMAu = [];
 iXX = [];
@@ -92,18 +91,20 @@ exit_flag = 1;
 % Return, with endogenous penalty, if some dsge-parameters are smaller than the lower bound of the prior domain.
 if DynareOptions.mode_compute ~= 1 && any(xparam1 < BoundsInfo.lb)
     k = find(xparam1 < BoundsInfo.lb);
-    fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 41;
+    info(1) = 41;
+    info(4)= sum((BoundsInfo.lb(k)-xparam1(k)).^2);
     return;
 end
 
 % Return, with endogenous penalty, if some dsge-parameters are greater than the upper bound of the prior domain.
 if DynareOptions.mode_compute ~= 1 && any(xparam1 > BoundsInfo.ub)
     k = find(xparam1 > BoundsInfo.ub);
-    fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 42;
+    info(1) = 42;
+    info(4) = sum((xparam1(k)-BoundsInfo.ub(k)).^2);
     return;
 end
 
@@ -124,11 +125,12 @@ dsge_prior_weight = Model.params(dsge_prior_weight_idx);
 
 % Is the dsge prior proper?
 if dsge_prior_weight<(NumberOfParameters+NumberOfObservedVariables)/NumberOfObservations;
-    fval = objective_function_penalty_base+abs(NumberOfObservations*dsge_prior_weight-(NumberOfParameters+NumberOfObservedVariables));
+    fval = Inf;
     exit_flag = 0;
-    info = 51;
+    info(1) = 51;
     info(2)=dsge_prior_weight;
     info(3)=(NumberOfParameters+NumberOfObservedVariables)/DynareDataset.nobs;
+    info(4)=abs(NumberOfObservations*dsge_prior_weight-(NumberOfParameters+NumberOfObservedVariables));
     return
 end
 
@@ -141,17 +143,21 @@ end
 [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
 
 % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
-if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || ...
-            info(1) == 22 || info(1) == 24 || info(1) == 25 || info(1) == 10
-    fval = objective_function_penalty_base+1;
-    info = info(1);
-    exit_flag = 0;
-    return
-elseif info(1) == 3 || info(1) == 4 || info(1) == 6 || info(1) == 19 || info(1) == 20 || info(1) == 21
-    fval = objective_function_penalty_base+info(2);
-    info = info(1);
-    exit_flag = 0;
-    return
+if info(1)
+    if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
+            info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
+            info(1) == 81 || info(1) == 84 ||  info(1) == 85
+        %meaningful second entry of output that can be used
+        fval = Inf;
+        info(4) = info(2);
+        exit_flag = 0;
+        return
+    else
+        fval = Inf;
+        info(4) = 0.1;
+        exit_flag = 0;
+        return
+    end
 end
 
 % Define the mean/steady state vector.
@@ -218,8 +224,9 @@ if ~isinf(dsge_prior_weight)% Evaluation of the likelihood of the dsge-var model
     SIGMAu = tmp0 - tmp1*tmp2*tmp1'; clear('tmp0');
     [SIGMAu_is_positive_definite, penalty] = ispd(SIGMAu);
     if ~SIGMAu_is_positive_definite
-        fval = objective_function_penalty_base + penalty;
-        info = 52;
+        fval = Inf;
+        info(1) = 52;
+        info(4) = penalty;
         exit_flag = 0;
         return;
     end
@@ -249,15 +256,17 @@ else% Evaluation of the likelihood of the dsge-var model when the dsge prior wei
 end
 
 if isnan(lik)
-    info = 45;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 45;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
 
 if imag(lik)~=0
-    info = 46;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 46;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
@@ -267,15 +276,17 @@ lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo
 fval = (lik-lnprior);
 
 if isnan(fval)
-    info = 47;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 47;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
 
 if imag(fval)~=0
-    info = 48;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 48;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
diff --git a/matlab/dynare_estimation_init.m b/matlab/dynare_estimation_init.m
index 9de2de7e640b0272ecc02077c076b09f01d81c4c..6c2e360384f71680c096f048d268b1f672576b69 100644
--- a/matlab/dynare_estimation_init.m
+++ b/matlab/dynare_estimation_init.m
@@ -49,8 +49,6 @@ function [dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_,
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-global objective_function_penalty_base
-
 hh = [];
 
 if isempty(gsa_flag)
@@ -384,9 +382,6 @@ else% Yes!
     end
 end
 
-% Set the "size" of penalty.
-objective_function_penalty_base = 1e8;
-
 % Get informations about the variables of the model.
 dr = set_state_space(oo_.dr,M_,options_);
 oo_.dr = dr;
diff --git a/matlab/minus_logged_prior_density.m b/matlab/minus_logged_prior_density.m
index 0b965b5f93bdef21febafed00939ef03c851fdf3..38662541e1bfb192cbc8f0456b33ca0e1086b9c3 100644
--- a/matlab/minus_logged_prior_density.m
+++ b/matlab/minus_logged_prior_density.m
@@ -1,4 +1,4 @@
-function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams,pshape,p6,p7,p3,p4,DynareOptions,DynareModel,EstimatedParams,DynareResults)
+function [fval,info,exit_flag,fake_1,fake_2] = minus_logged_prior_density(xparams,pshape,p6,p7,p3,p4,DynareOptions,DynareModel,EstimatedParams,DynareResults)
 % Evaluates minus the logged prior density.
 % 
 % INPUTS 
@@ -11,8 +11,9 @@ function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams,
 %
 % OUTPUTS 
 %   f          [double]  value of minus the logged prior density.
-
-% Copyright (C) 2009-2012 Dynare Team
+%   info       [double]  vector: second entry stores penalty, first entry the error code.
+%
+% Copyright (C) 2009-2016 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -29,8 +30,6 @@ function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams,
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-global objective_function_penalty_base
-
 fake_1 = 1;
 fake_2 = 1;
 
@@ -44,18 +43,20 @@ info = 0;
 % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
 if ~isequal(DynareOptions.mode_compute,1) && any(xparams<p3)
     k = find(xparams<p3);
-    fval = objective_function_penalty_base+sum((p3(k)-xparams(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 41;
+    info(1) = 41;
+    info(2) = sum((p3(k)-xparams(k)).^2);
     return
 end
 
 % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
 if ~isequal(DynareOptions.mode_compute,1) && any(xparams>p4)
     k = find(xparams>p4);
-    fval = objective_function_penalty_base+sum((xparams(k)-p4(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 42;
+    info(1) = 42;
+    info(2) = sum((xparams(k)-p4(k)).^2);
     return
 end
 
@@ -71,18 +72,20 @@ if ~issquare(Q) || EstimatedParams.ncx || isfield(EstimatedParams,'calibrated_co
     [Q_is_positive_definite, penalty] = ispd(Q);
     if ~Q_is_positive_definite
         % The variance-covariance matrix of the structural innovations is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
         exit_flag = 0;
-        info = 43;
+        info(1) = 43;
+        info(2) = penalty;
         return
     end
     if isfield(EstimatedParams,'calibrated_covariances')
         correct_flag=check_consistency_covariances(Q);
         if ~correct_flag
             penalty = sum(Q(EstimatedParams.calibrated_covariances.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 71;
+            info(1) = 71;
+            info(2) = penalty;
             return
         end
     end
@@ -94,18 +97,20 @@ if ~issquare(H) || EstimatedParams.ncn || isfield(EstimatedParams,'calibrated_co
     [H_is_positive_definite, penalty] = ispd(H);
     if ~H_is_positive_definite
         % The variance-covariance matrix of the measurement errors is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
         exit_flag = 0;
-        info = 44;
+        info(1) = 44;
+        info(2) = penalty;
         return
     end
     if isfield(EstimatedParams,'calibrated_covariances_ME')
         correct_flag=check_consistency_covariances(H);
         if ~correct_flag
             penalty = sum(H(EstimatedParams.calibrated_covariances_ME.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 72;
+            info(1) = 72;
+            info(2) = penalty;
             return
         end
     end
@@ -120,18 +125,24 @@ M_ = set_all_parameters(xparams,EstimatedParams,DynareModel);
 [dr,info,DynareModel,DynareOptions,DynareResults] = resol(0,DynareModel,DynareOptions,DynareResults);
 
 % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
-if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) ...
-            == 8 || info(1) == 22 || info(1) == 24 || info(1) == 19
-    fval = objective_function_penalty_base+1;
-    info = info(1);
-    exit_flag = 0;
-    return
-elseif info(1) == 3 || info(1) == 4 || info(1)==6 || info(1) == 20 || info(1) == 21  || info(1) == 23
-    fval = objective_function_penalty_base+info(2);
-    info = info(1);
-    exit_flag = 0;
-    return
+% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
+if info(1)
+    if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
+            info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
+            info(1) == 81 || info(1) == 84 ||  info(1) == 85
+        %meaningful second entry of output that can be used
+        fval = Inf;
+        info(4) = info(2);
+        exit_flag = 0;
+        return
+    else
+        fval = Inf;
+        info(4) = 0.1;
+        exit_flag = 0;        
+        return
+    end
 end
 
 
+
 fval = - priordens(xparams,pshape,p6,p7,p3,p4);
\ No newline at end of file
diff --git a/matlab/non_linear_dsge_likelihood.m b/matlab/non_linear_dsge_likelihood.m
index 35685ed2a47540f703303244573f60543af7f4c6..034f708ab77bc7f8f67b954766a4a880fa14822e 100644
--- a/matlab/non_linear_dsge_likelihood.m
+++ b/matlab/non_linear_dsge_likelihood.m
@@ -32,7 +32,7 @@ function [fval,info,exit_flag,DLIK,Hess,ys,trend_coeff,Model,DynareOptions,Bayes
 %! @item fval
 %! Double scalar, value of (minus) the likelihood.
 %! @item info
-%! Double vector, second entry stores penalty, first entry the error code.
+%! Double vector, fourth entry stores penalty, first entry the error code.
 %! @table @ @code
 %! @item info==0
 %! No error.
@@ -127,7 +127,6 @@ function [fval,info,exit_flag,DLIK,Hess,ys,trend_coeff,Model,DynareOptions,Bayes
 % AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT fr
 %           frederic DOT karame AT univ DASH lemans DOT fr
 
-global objective_function_penalty_base
 % Declaration of the penalty as a persistent variable.
 persistent init_flag
 persistent restrict_variables_idx observed_variables_idx state_variables_idx mf0 mf1
@@ -153,18 +152,20 @@ end
 % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
 if (DynareOptions.mode_compute~=1) && any(xparam1<BoundsInfo.lb)
     k = find(xparam1(:) < BoundsInfo.lb);
-    fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 41;
+    info(1) = 41;
+    info(4) = sum((BoundsInfo.lb(k)-xparam1(k)).^2);
     return
 end
 
 % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
 if (DynareOptions.mode_compute~=1) && any(xparam1>BoundsInfo.ub)
     k = find(xparam1(:)>BoundsInfo.ub);
-    fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 42;
+    info(1) = 42;
+    info(4) = sum((xparam1(k)-BoundsInfo.ub(k)).^2);
     return
 end
 
@@ -176,18 +177,20 @@ H = Model.H;
 if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances')
     [Q_is_positive_definite, penalty] = ispd(Q(EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness,EstimatedParameters.Sigma_e_entries_to_check_for_positive_definiteness));
     if ~Q_is_positive_definite
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
         exit_flag = 0;
-        info = 43;
+        info(1) = 43;
+        info(4) = penalty;
         return
     end
     if isfield(EstimatedParameters,'calibrated_covariances')
         correct_flag=check_consistency_covariances(Q);
         if ~correct_flag
             penalty = sum(Q(EstimatedParameters.calibrated_covariances.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 71;
+            info(1) = 71;
+            info(4) = penalty;
             return
         end
     end
@@ -197,18 +200,20 @@ end
 if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME')
     [H_is_positive_definite, penalty] = ispd(H(EstimatedParameters.H_entries_to_check_for_positive_definiteness,EstimatedParameters.H_entries_to_check_for_positive_definiteness));
     if ~H_is_positive_definite
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
         exit_flag = 0;
-        info = 44;
+        info(1) = 44;
+        info(4) = penalty;
         return
     end
     if isfield(EstimatedParameters,'calibrated_covariances_ME')
         correct_flag=check_consistency_covariances(H);
         if ~correct_flag
             penalty = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 72;
+            info(1) = 72;
+            info(4) = penalty;
             return
         end
     end
@@ -222,14 +227,21 @@ end
 % Linearize the model around the deterministic sdteadystate and extract the matrices of the state equation (T and R).
 [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
 
-if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 25 || info(1) == 10 || info(1) == 7 || info(1) == 9
-    fval = objective_function_penalty_base+1;
-    exit_flag = 0;
-    return
-elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21
-    fval = objective_function_penalty_base+info(2);
-    exit_flag = 0;
-    return
+if info(1)
+    if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
+            info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
+            info(1) == 81 || info(1) == 84 ||  info(1) == 85
+        %meaningful second entry of output that can be used
+        fval = Inf;
+        info(4) = info(2);
+        exit_flag = 0;
+        return
+    else
+        fval = Inf;
+        info(4) = 0.1;
+        exit_flag = 0;
+        return
+    end
 end
 
 % Define a vector of indices for the observed variables. Is this really usefull?...
@@ -321,13 +333,15 @@ DynareOptions.warning_for_steadystate = 0;
 LIK = feval(DynareOptions.particle.algorithm,ReducedForm,Y,start,DynareOptions.particle,DynareOptions.threads);
 set_dynare_random_generator_state(s1,s2);
 if imag(LIK)
-    info = 46;
-    likelihood = objective_function_penalty_base;
-    exit_flag  = 0;
+    likelihood = Inf;
+    info(1) = 46;
+    info(4) = 0.1;
+    exit_flag = 0;
 elseif isnan(LIK)
-    info = 45;
-    likelihood = objective_function_penalty_base;
-    exit_flag  = 0;
+    likelihood = Inf;
+    info(1) = 45;
+    info(4) = 0.1;
+    exit_flag = 0;
 else
     likelihood = LIK;
 end
@@ -339,15 +353,17 @@ lnprior = priordens(xparam1(:),BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesI
 fval    = (likelihood-lnprior);
 
 if isnan(fval)
-    info = 47;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 47;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
 
 if imag(fval)~=0
-    info = 48;
-    fval = objective_function_penalty_base + 100;
+    fval = Inf;
+    info(1) = 48;
+    info(4) = 0.1;
     exit_flag = 0;
     return
 end
diff --git a/matlab/optimization/csminit1.m b/matlab/optimization/csminit1.m
index 428c704c60bb7470dc19ede6a4ad308b21fb2e1c..1a07951957269e1ec3426f0d5e1fa5468f660965 100644
--- a/matlab/optimization/csminit1.m
+++ b/matlab/optimization/csminit1.m
@@ -1,24 +1,28 @@
-function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,Verbose,varargin)
-% [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin)
+function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,penalty,f0,g0,badg,H0,Verbose,varargin)
+% [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,penalty,f0,g0,badg,H0,Verbose,varargin)
 % 
 % Inputs: 
-%   fcn:    [string]        string naming the objective function to be minimized
-%   x0:     [npar by 1]     initial value of the parameter vector
-%   g0:     [npar by 1]     initial value of the gradient vector
-%   H0:     [npar by npar]  initial value for the inverse Hessian.  Must be positive definite.
-%   varargin:               Optional additional inputs that get handed off to fcn each
-%                           time it is called.
+%   fcn:        [string]        string naming the objective function to be minimized
+%   x0:         [npar by 1]     initial value of the parameter vector
+%   penalty:    [scalar]        variable penalty in case of failure of objective function
+%   f0:         [scalar]        initial value of the fucntion
+%   g0:         [npar by 1]     initial value of the gradient vector
+%   badg        [scalar]        indicator for problem in gradient computation
+%   H0:         [npar by npar]  initial value for the inverse Hessian.  Must be positive definite.
+%   Verbose:    [scalar]        indicator for verbose computations
+%   varargin:                   Optional additional inputs that get handed off to fcn each
+%                               time it is called.
 
 % Outputs:
-%   fhat:   [scalar]        function value at minimum
-%   xhat:   [npar by 1]     parameter vector at minimum
-%   fcount  [scalar]        function iteration count upon termination
-%   retcode    [scalar]    0: normal step
-%                           1: zero gradient.
-%                           5: largest step still improves too fast.
-%                           2,4: back and forth adjustment of stepsize didn't finish.  
-%                           3: smallest stepsize still improves too slow
-%                           6: no improvement found
+%   fhat:       [scalar]        function value at minimum
+%   xhat:       [npar by 1]     parameter vector at minimum
+%   fcount      [scalar]        function iteration count upon termination
+%   retcode     [scalar]        0: normal step
+%                               1: zero gradient.
+%                               5: largest step still improves too fast.
+%                               2,4: back and forth adjustment of stepsize didn't finish.  
+%                               3: smallest stepsize still improves too slow
+%                               6: no improvement found
 %---------------------
 % Modified 7/22/96 to omit variable-length P list, for efficiency and compilation.
 % Places where the number of P's need to be altered or the code could be returned to
@@ -34,7 +38,7 @@ function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,Verbose,vara
 % http://sims.princeton.edu/yftp/optimize/mfiles/csminit.m
 % 
 % Copyright (C) 1993-2007 Christopher Sims
-% Copyright (C) 2008-2015 Dynare Team
+% Copyright (C) 2008-2016 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -137,7 +141,7 @@ else
             dxtest=x0+dx*lambda;
         end
         % home
-        f = feval(fcn,dxtest,varargin{:});
+        f = penalty_objective_function(dxtest,fcn,penalty,varargin{:});
         %ARGLIST
         %f = feval(fcn,dxtest,P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13);
         % f = feval(fcn,x0+dx*lambda,P1,P2,P3,P4,P5,P6,P7,P8);
diff --git a/matlab/optimization/csminwel1.m b/matlab/optimization/csminwel1.m
index 5c641ba9e3da0d166f09a61c66d5ee915cb9e3d2..4d197c0285c8d05c885441228d295138479d17c0 100644
--- a/matlab/optimization/csminwel1.m
+++ b/matlab/optimization/csminwel1.m
@@ -59,8 +59,8 @@ function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,m
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-global objective_function_penalty_base
-
+% initialize variable penalty    
+penalty = 1e8;
 fh = [];
 xh = [];
 [nx,no]=size(x0);
@@ -72,6 +72,14 @@ fcount=0;
 gh = [];
 H = [];
 retcodeh = [];
+
+% force fcn, grad to function handle
+if ischar(fcn)
+    fcn = str2func(fcn);
+end
+if ischar(grad)
+    grad = str2func(grad);
+end
 %tailstr = ')';
 %stailstr = [];
 % Lines below make the number of Pi's optional.  This is inefficient, though, and precludes
@@ -83,7 +91,7 @@ retcodeh = [];
 %   stailstr=[' P' num2str(i) stailstr];
 %end
 
-[f0,junk1,junk2,cost_flag] = feval(fcn,x0,varargin{:});
+[f0,cost_flag,arg1] = penalty_objective_function(x0,fcn,penalty,varargin{:});
 
 if ~cost_flag
     disp_verbose('Bad initial parameter.',Verbose)
@@ -91,11 +99,11 @@ if ~cost_flag
 end
 
 if NumGrad
-    [g, badg]=get_num_grad(method,fcn,f0,x0,epsilon,varargin{:});
+    [g, badg]=get_num_grad(method,fcn,penalty,f0,x0,epsilon,varargin{:});
 elseif ischar(grad)
-    [g,badg] = feval(grad,x0,varargin{:});
+    [g,badg] = grad(x0,varargin{:});
 else
-    g=junk1;
+    g=arg1;
     badg=0;
 end
 retcode3=101;
@@ -105,7 +113,7 @@ H=H0;
 cliff=0;
 while ~done
     % penalty for dsge_likelihood and dsge_var_likelihood
-    objective_function_penalty_base = f;
+    penalty = f;
 
     g1=[]; g2=[]; g3=[];
     %addition fj. 7/6/94 for control
@@ -115,7 +123,7 @@ while ~done
     %   disp_verbose([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]);
     %-------------------------
     itct=itct+1;
-    [f1, x1, fc, retcode1] = csminit1(fcn,x,f,g,badg,H,Verbose,varargin{:});
+    [f1, x1, fc, retcode1] = csminit1(fcn,x,penalty,f,g,badg,H,Verbose,varargin{:});
     fcount = fcount+fc;
     % erased on 8/4/94
     % if (retcode == 1) || (abs(f1-f) < crit)
@@ -130,17 +138,17 @@ while ~done
             wall1=1; badg1=1;
         else
             if NumGrad
-                [g1, badg1]=get_num_grad(method,fcn,f1,x1,epsilon,varargin{:});
+                [g1, badg1]=get_num_grad(method,fcn,penalty,f1,x1,epsilon,varargin{:});
             elseif ischar(grad),
-                [g1, badg1] = feval(grad,x1,varargin{:});
+                [g1, badg1] = grad(x1,varargin{:});
             else
-                [junk1,g1,junk2, cost_flag] = feval(fcn,x1,varargin{:});
+                [junk1,cost_flag,g1] = penalty_objective_function(x1,fcn,penalty,varargin{:});
                 badg1 = ~cost_flag;
             end
             wall1=badg1;
             % g1
             if Save_files
-                save g1.mat g1 x1 f1 varargin;
+                save('g1.mat','g1','x1','f1','varargin');
             end
         end
         if wall1 % && (~done) by Jinill
@@ -150,18 +158,18 @@ while ~done
                  %fcliff=fh;xcliff=xh;
             Hcliff=H+diag(diag(H).*rand(nx,1));
             disp_verbose('Cliff.  Perturbing search direction.',Verbose)
-            [f2, x2, fc, retcode2] = csminit1(fcn,x,f,g,badg,Hcliff,Verbose,varargin{:});
+            [f2, x2, fc, retcode2] = csminit1(fcn,x,penalty,f,g,badg,Hcliff,Verbose,varargin{:});
             fcount = fcount+fc; % put by Jinill
             if  f2 < f
                 if retcode2==2 || retcode2==4
                     wall2=1; badg2=1;
                 else
                     if NumGrad
-                        [g2, badg2]=get_num_grad(method,fcn,f2,x2,epsilon,varargin{:});
+                        [g2, badg2]=get_num_grad(method,fcn,penalty,f2,x2,epsilon,varargin{:});
                     elseif ischar(grad),
-                        [g2, badg2] = feval(grad,x2,varargin{:});
+                        [g2, badg2] = grad(x2,varargin{:});
                     else
-                        [junk1,g2,junk2, cost_flag] = feval(fcn,x1,varargin{:});
+                        [junk2,cost_flag,g2] = penalty_objective_function(x1,fcn,penalty,varargin{:});
                         badg2 = ~cost_flag;
                     end
                     wall2=badg2;
@@ -170,7 +178,7 @@ while ~done
                         badg2
                     end
                     if Save_files
-                        save g2.mat g2 x2 f2 varargin
+                        save('g2.mat','g2','x2','f2','varargin');
                     end
                 end
                 if wall2
@@ -182,24 +190,24 @@ while ~done
                         if(size(x0,2)>1)
                             gcliff=gcliff';
                         end
-                        [f3, x3, fc, retcode3] = csminit1(fcn,x,f,gcliff,0,eye(nx),Verbose,varargin{:});
+                        [f3, x3, fc, retcode3] = csminit1(fcn,x,penalty,f,gcliff,0,eye(nx),Verbose,varargin{:});
                         fcount = fcount+fc; % put by Jinill
                         if retcode3==2 || retcode3==4
                             wall3=1; 
                             badg3=1;
                         else
                             if NumGrad
-                                [g3, badg3]=get_num_grad(method,fcn,f3,x3,epsilon,varargin{:});
+                                [g3, badg3]=get_num_grad(method,fcn,penalty,f3,x3,epsilon,varargin{:});
                             elseif ischar(grad),
-                                [g3, badg3] = feval(grad,x3,varargin{:});
+                                [g3, badg3] = grad(x3,varargin{:});
                             else
-                                [junk1,g3,junk2, cost_flag] = feval(fcn,x1,varargin{:});
+                                [junk3,cost_flag,g3] = penalty_objective_function(x1,fcn,penalty,varargin{:});
                                 badg3 = ~cost_flag;
                             end
                             wall3=badg3;
                             % g3
                             if Save_files
-                                save g3.mat g3 x3 f3 varargin;
+                                save('g3.mat','g3','x3','f3','varargin');
                             end
                         end
                     end
@@ -249,11 +257,11 @@ while ~done
         end
         if nogh
             if NumGrad
-                [gh, badgh]=get_num_grad(method,fcn,fh,xh,epsilon,varargin{:});
+                [gh, badgh]=get_num_grad(method,fcn,penalty,fh,xh,epsilon,varargin{:});
             elseif ischar(grad),
-                [gh, badgh] = feval(grad, xh,varargin{:});
+                [gh, badgh] = grad(xh,varargin{:});
             else
-                [junk1,gh,junk2, cost_flag] = feval(fcn,x1,varargin{:});
+                [junkh,cost_flag,gh] = penalty_objective_function(x1,penalty,varargin{:});
                 badgh = ~cost_flag;
             end
         end
@@ -303,18 +311,18 @@ end
 
 end
 
-function [g, badg]=get_num_grad(method,fcn,f0,x0,epsilon,varargin)
+function [g, badg]=get_num_grad(method,fcn,penalty,f0,x0,epsilon,varargin)
     switch method
       case 2
-        [g,badg] = numgrad2(fcn, f0, x0, epsilon, varargin{:});
+        [g,badg] = numgrad2(fcn, f0, x0, penalty, epsilon, varargin{:});
       case 3
-        [g,badg] = numgrad3(fcn, f0, x0, epsilon, varargin{:});
+        [g,badg] = numgrad3(fcn, f0, x0, penalty, epsilon, varargin{:});
       case 5
-        [g,badg] = numgrad5(fcn, f0, x0, epsilon, varargin{:});
+        [g,badg] = numgrad5(fcn, f0, x0, penalty, epsilon, varargin{:});
       case 13
-        [g,badg] = numgrad3_(fcn, f0, x0, epsilon, varargin{:});
+        [g,badg] = numgrad3_(fcn, f0, x0, penalty, epsilon, varargin{:});
       case 15
-        [g,badg] = numgrad5_(fcn, f0, x0, epsilon, varargin{:});
+        [g,badg] = numgrad5_(fcn, f0, x0, penalty, epsilon, varargin{:});
       otherwise
         error('csminwel1: Unknown method for gradient evaluation!')
     end
diff --git a/matlab/optimization/mr_gstep.m b/matlab/optimization/mr_gstep.m
index a2335e1b89394539e49df0956bb8bf97fc5e051d..605508d2c0d53186f0431c251d2dd9be2f964849 100644
--- a/matlab/optimization/mr_gstep.m
+++ b/matlab/optimization/mr_gstep.m
@@ -1,5 +1,5 @@
-function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,htol0,Verbose,Save_files,varargin)
-% function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin)
+function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,varargin)
+% [f0, x, ig] = mr_gstep(h1,x,bounds,func0,penalty,htol0,Verbose,Save_files,varargin)
 %
 % Gibbs type step in optimisation
 %
@@ -11,7 +11,7 @@ function [f0, x, ig] = mr_gstep(h1,x,bounds,func0,htol0,Verbose,Save_files,varar
 % varargin{6} --> BayesInfo
 % varargin{1} --> DynareResults
 
-% Copyright (C) 2006-2014 Dynare Team
+% Copyright (C) 2006-2016 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -42,7 +42,7 @@ end
 if length(htol)==1,
     htol=htol*ones(n,1);
 end
-f0=feval(func0,x,varargin{:});
+f0=penalty_objective_function(x,func0,penalty,varargin{:});
 
 xh1=x;
 f1=zeros(size(f0,1),n);
@@ -56,10 +56,10 @@ while i<n
     hcheck=0;
     dx=[];
     xh1(i)=x(i)+h1(i);
-    fx = feval(func0,xh1,varargin{:});
+    fx = penalty_objective_function(xh1,func0,penalty,varargin{:});
     f1(:,i)=fx;
     xh1(i)=x(i)-h1(i);
-    fx = feval(func0,xh1,varargin{:});
+    fx = penalty_objective_function(xh1,func0,penalty,varargin{:});
     f_1(:,i)=fx;
     if hcheck && htol(i)<1
         htol(i)=min(1,max(min(abs(dx))*2,htol(i)*10));
@@ -72,7 +72,7 @@ while i<n
         gg(i)=(f1(i)'-f_1(i)')./(2.*h1(i));
         hh(i) = 1/max(1.e-9,abs( (f1(i)+f_1(i)-2*f0)./(h1(i)*h1(i)) ));
         if gg(i)*(hh(i)*gg(i))/2 > htol(i)
-            [f0 x fc retcode] = csminit1(func0,x,f0,gg,0,diag(hh),Verbose,varargin{:});
+            [f0 x fc retcode] = csminit1(func0,x,penalty,f0,gg,0,diag(hh),Verbose,varargin{:});
             ig(i)=1;
             if Verbose
                 fprintf(['Done for param %s = %8.4f\n'],varargin{6}.name{i},x(i))
@@ -82,11 +82,11 @@ while i<n
     end
     x = check_bounds(x,bounds);
     if Save_files
-        save gstep.mat x h1 f0
+        save('gstep.mat','x','h1','f0')
     end
 end
 if Save_files
-    save gstep.mat x h1 f0
+    save('gstep.mat','x','h1','f0')
 end
 
 return
diff --git a/matlab/optimization/mr_hessian.m b/matlab/optimization/mr_hessian.m
index 040472437e9f6d20b102dcff405b21e09b5afe31..096584e90b34c18fbfa7bbc75064879c6a221c71 100644
--- a/matlab/optimization/mr_hessian.m
+++ b/matlab/optimization/mr_hessian.m
@@ -1,5 +1,5 @@
-function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hflag,htol0,varargin)
-%  [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hflag,htol0,varargin)
+function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,penalty,hflag,htol0,varargin)
+%  [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,penalty,hflag,htol0,varargin)
 %
 %  numerical gradient and Hessian, with 'automatic' check of numerical
 %  error
@@ -33,7 +33,7 @@ function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hf
 
 
 
-% Copyright (C) 2004-2014 Dynare Team
+% Copyright (C) 2004-2016 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -60,7 +60,7 @@ if init
     return
 end
 
-[f0, ff0]=feval(func,x,varargin{:});
+[f0,exit_flag, ff0]=penalty_objective_function(x,func,penalty,varargin{:});
 h2=varargin{7}.ub-varargin{7}.lb;
 hmax=varargin{7}.ub-x;
 hmax=min(hmax,x-varargin{7}.lb);
@@ -94,7 +94,7 @@ while i<n
     hcheck=0;
     xh1(i)=x(i)+h1(i);
     try
-        [fx, ffx]=feval(func,xh1,varargin{:});
+        [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
     catch
         fx=1.e8;
     end
@@ -115,7 +115,7 @@ while i<n
             h1(i) = max(h1(i),1.e-10);
             xh1(i)=x(i)+h1(i);
             try
-                [fx, ffx]=feval(func,xh1,varargin{:});
+                [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
             catch
                 fx=1.e8;
             end
@@ -124,14 +124,14 @@ while i<n
             h1(i)= htol/abs(dx(it))*h1(i);
             xh1(i)=x(i)+h1(i);
             try
-                [fx, ffx]=feval(func,xh1,varargin{:});
+                [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
             catch
                 fx=1.e8;
             end
             while (fx-f0)==0
                 h1(i)= h1(i)*2;
                 xh1(i)=x(i)+h1(i);
-                [fx, ffx]=feval(func,xh1,varargin{:});
+                [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
                 ic=1;
             end
         end
@@ -152,7 +152,7 @@ while i<n
         end
     end
     xh1(i)=x(i)-h1(i);
-    [fx, ffx]=feval(func,xh1,varargin{:});
+    [fx,exit_flag,ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
     f_1(:,i)=fx;
     if outer_product_gradient,
         if any(isnan(ffx)) || isempty(ffx),
@@ -193,8 +193,8 @@ if outer_product_gradient,
                 xh1(j)=x(j)+h_1(j);
                 xh_1(i)=x(i)-h1(i);
                 xh_1(j)=x(j)-h_1(j);
-                temp1 = feval(func,xh1,varargin{:});
-                temp2 = feval(func,xh_1,varargin{:});
+                temp1 = penalty_objective_function(xh1,func,penalty,varargin{:});
+                temp2 = penalty_objective_function(xh_1,func,penalty,varargin{:});
                 hessian_mat(:,(i-1)*n+j)=-(-temp1 -temp2+temp(:,i)+temp(:,j))./(2*h1(i)*h_1(j));
                 xh1(i)=x(i);
                 xh1(j)=x(j);
diff --git a/matlab/optimization/newrat.m b/matlab/optimization/newrat.m
index 2b1f7cb2654dac836d6077af0ec1e92d6fc98ba0..d21ddd87e0985228849c8e928ff998477402126e 100644
--- a/matlab/optimization/newrat.m
+++ b/matlab/optimization/newrat.m
@@ -1,5 +1,5 @@
 function [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, varargin)
-%  [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit, flagg, varargin)
+%  [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, varargin)
 %
 %  Optimiser with outer product gradient and with sequences of univariate steps
 %  uses Chris Sims subroutine for line search
@@ -30,7 +30,7 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_deriva
 %  varargin{1} --> DynareResults
 
 
-% Copyright (C) 2004-2014 Dynare Team
+% Copyright (C) 2004-2016 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -47,8 +47,8 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, bounds, analytic_deriva
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-global objective_function_penalty_base
-
+% initialize variable penalty    
+penalty = 1e8;
 
 icount=0;
 nx=length(x);
@@ -62,17 +62,22 @@ htol=htol_base;
 htol0=htol_base;
 gibbstol=length(varargin{6}.pshape)/50; %25;
 
+% force fcn, grad to function handle
+if ischar(func0)
+    func0 = str2func(func0);
+end
+
 % func0 = str2func([func2str(func0),'_hh']);
 % func0 = func0;
-[fval0,gg,hh]=feval(func0,x,varargin{:});
+[fval0,exit_flag,gg,hh]=penalty_objective_function(x,func0,penalty,varargin{:});
 fval=fval0;
 
 % initialize mr_gstep and mr_hessian
 
 outer_product_gradient=1;
 if isempty(hh)
-    mr_hessian(1,x,[],[],[],varargin{:});
-    [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,x,func0,flagit,htol,varargin{:});
+    mr_hessian(1,x,[],[],[],[],varargin{:});
+    [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,x,func0,penalty,flagit,htol,varargin{:});
     if isempty(dum),
         outer_product_gradient=0;
         igg = 1e-4*eye(nx);
@@ -110,7 +115,7 @@ if max(eig(hh))<0
     pause
 end
 if Save_files
-    save m1.mat x hh g hhg igg fval0
+    save('m1.mat','x','hh','g','hhg','igg','fval0')
 end
 
 igrad=1;
@@ -124,12 +129,12 @@ while norm(gg)>gtol && check==0 && jit<nit
     jit=jit+1;
     tic1 = tic;
     icount=icount+1;
-    objective_function_penalty_base = fval0(icount);
+    penalty = fval0(icount);
     disp_verbose([' '],Verbose)
     disp_verbose(['Iteration ',num2str(icount)],Verbose)
-    [fval,x0,fc,retcode] = csminit1(func0,xparam1,fval0(icount),gg,0,H,Verbose,varargin{:});
+    [fval,x0,fc,retcode] = csminit1(func0,xparam1,penalty,fval0(icount),gg,0,H,Verbose,varargin{:});
     if igrad
-        [fval1,x01,fc,retcode1] = csminit1(func0,x0,fval,gg,0,inx,Verbose,varargin{:});
+        [fval1,x01,fc,retcode1] = csminit1(func0,x0,penalty,fval,gg,0,inx,Verbose,varargin{:});
         if (fval-fval1)>1
             disp_verbose('Gradient step!!',Verbose)
         else
@@ -148,10 +153,10 @@ while norm(gg)>gtol && check==0 && jit<nit
         end
         iggx=eye(length(gg));
         iggx(find(ig),find(ig)) = inv( hhx(find(ig),find(ig)) );
-        [fvala,x0,fc,retcode] = csminit1(func0,x0,fval,ggx,0,iggx,Verbose,varargin{:});
+        [fvala,x0,fc,retcode] = csminit1(func0,x0,penalty,fval,ggx,0,iggx,Verbose,varargin{:});
     end
     x0 = check_bounds(x0,bounds);
-    [fvala, x0, ig] = mr_gstep(h1,x0,bounds,func0,htol0,Verbose,Save_files,varargin{:});
+    [fvala, x0, ig] = mr_gstep(h1,x0,bounds,func0,penalty,htol0,Verbose,Save_files,varargin{:});
     x0 = check_bounds(x0,bounds);
     nig=[nig ig];
     disp_verbose('Sequence of univariate steps!!',Verbose)
@@ -159,7 +164,7 @@ while norm(gg)>gtol && check==0 && jit<nit
     if (fval0(icount)-fval)<ftol && flagit==0
         disp_verbose('Try diagonal Hessian',Verbose)
         ihh=diag(1./(diag(hhg)));
-        [fval2,x0,fc,retcode2] = csminit1(func0,x0,fval,gg,0,ihh,Verbose,varargin{:});
+        [fval2,x0,fc,retcode2] = csminit1(func0,x0,penalty,fval,gg,0,ihh,Verbose,varargin{:});
         x0 = check_bounds(x0,bounds);
         if (fval-fval2)>=ftol
             disp_verbose('Diagonal Hessian successful',Verbose)
@@ -169,7 +174,7 @@ while norm(gg)>gtol && check==0 && jit<nit
     if (fval0(icount)-fval)<ftol && flagit==0
         disp_verbose('Try gradient direction',Verbose)
         ihh0=inx.*1.e-4;
-        [fval3,x0,fc,retcode3] = csminit1(func0,x0,fval,gg,0,ihh0,Verbose,varargin{:});
+        [fval3,x0,fc,retcode3] = csminit1(func0,x0,penalty,fval,gg,0,ihh0,Verbose,varargin{:});
         x0 = check_bounds(x0,bounds);
         if (fval-fval3)>=ftol
             disp_verbose('Gradient direction successful',Verbose)
@@ -183,14 +188,14 @@ while norm(gg)>gtol && check==0 && jit<nit
         disp_verbose('No further improvement is possible!',Verbose)
         check=1;
         if analytic_derivation,
-            [fvalx,gg,hh]=feval(func0,xparam1,varargin{:});
+            [fvalx,exit_flag,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
             hhg=hh;
             H = inv(hh);
         else
         if flagit==2
             hh=hh0;
         elseif flagg>0
-            [dum, gg, htol0, igg, hhg,h1]=mr_hessian(0,xparam1,func0,flagg,ftol0,varargin{:});
+            [dum, gg, htol0, igg, hhg,h1]=mr_hessian(0,xparam1,func0,penalty,flagg,ftol0,varargin{:});
             if flagg==2
                 hh = reshape(dum,nx,nx);
                 ee=eig(hh);
@@ -223,14 +228,14 @@ while norm(gg)>gtol && check==0 && jit<nit
         if norm(x(:,icount)-xparam1)>1.e-12 && analytic_derivation==0,
             try
                 if Save_files
-                    save m1.mat x fval0 nig -append
+                    save('m1.mat','x','fval0','nig','-append')
                 end
             catch
                 if Save_files
-                    save m1.mat x fval0 nig
+                    save('m1.mat','x','fval0','nig')
                 end
             end
-            [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func0,flagit,htol,varargin{:});
+            [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func0,penalty,flagit,htol,varargin{:});
             if isempty(dum),
                 outer_product_gradient=0;
             end
@@ -258,7 +263,7 @@ while norm(gg)>gtol && check==0 && jit<nit
                 H = igg;
             end
         elseif analytic_derivation,
-            [fvalx,gg,hh]=feval(func0,xparam1,varargin{:});
+            [fvalx,exit_flag,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
             hhg=hh;
             H = inv(hh);
         end
@@ -274,12 +279,12 @@ while norm(gg)>gtol && check==0 && jit<nit
         disp_verbose(['Elapsed time for iteration ',num2str(t),' s.'],Verbose)
         g(:,icount+1)=gg;
         if Save_files
-            save m1.mat x hh g hhg igg fval0 nig H
+            save('m1.mat','x','hh','g','hhg','igg','fval0','nig','H')
         end
     end
 end
 if Save_files
-    save m1.mat x hh g hhg igg fval0 nig
+    save('m1.mat','x','hh','g','hhg','igg','fval0','nig')
 end
 if ftol>ftol0
     skipline()
diff --git a/matlab/optimization/numgrad2.m b/matlab/optimization/numgrad2.m
index f8057bb94e807e259dfd1cfedf8da538c715c5a5..a0f2552aacef890c8199f6d0ff8d251f808b7938 100644
--- a/matlab/optimization/numgrad2.m
+++ b/matlab/optimization/numgrad2.m
@@ -1,10 +1,10 @@
-function [g, badg] = numgrad2(fcn,f0,x,epsilon,varargin)
+function [g, badg] = numgrad2(fcn,f0,x,penalty,epsilon,varargin)
 
 % Original file downloaded from:
 % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m
 
 % Copyright (C) 1993-2007 Christopher Sims
-% Copyright (C) 2008-2014 Dynare Team
+% Copyright (C) 2008-2016 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -33,11 +33,11 @@ for i=1:n
     xiold = x(i);
     if right_derivative
         x(i) = x(i)+h;
-        fh = feval(fcn, x, varargin{:});
+        fh = penalty_objective_function(x, fcn, penalty, varargin{:});
         g0 = (fh-f0)/h;
     else
         x(i) = x(i)-h;
-        fh = feval(fcn, x, varargin{:});
+        fh = penalty_objective_function(x, fcn, penalty, varargin{:});
         g0 = (f0-fh)/h;
     end
     if abs(g0)< 1e15
diff --git a/matlab/optimization/numgrad3.m b/matlab/optimization/numgrad3.m
index b11c1b03936d718f5eea63f2a54fa532b50b9bd5..2ff94bc072c09c343366726186d1e8244022c447 100644
--- a/matlab/optimization/numgrad3.m
+++ b/matlab/optimization/numgrad3.m
@@ -1,4 +1,4 @@
-function [g, badg] = numgrad3(fcn,f0,x,epsilon,varargin)
+function [g, badg] = numgrad3(fcn,f0,x,penalty,epsilon,varargin)
 % Computes the gradient of the objective function fcn using a three points
 % formula if possible.
 %
@@ -11,7 +11,7 @@ function [g, badg] = numgrad3(fcn,f0,x,epsilon,varargin)
 % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m
 
 % Copyright (C) 1993-2007 Christopher Sims
-% Copyright (C) 2008-2014 Dynare Team
+% Copyright (C) 2008-2016 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -38,9 +38,9 @@ badg=0;
 for i=1:n
     xiold = x(i);
     x(i) = xiold+h;
-    f1 = feval(fcn, x, varargin{:});
+    f1 = penalty_objective_function(x, fcn, penalty, varargin{:});
     x(i) = xiold-h;
-    f2 = feval(fcn, x, varargin{:});
+    f2 = penalty_objective_function(x, fcn, penalty, varargin{:});
     g0 = (f1-f2)/H;
     if abs(g0)< 1e15
         g(i)=g0;
diff --git a/matlab/optimization/numgrad3_.m b/matlab/optimization/numgrad3_.m
index 61e8710c3bf4fb4bd54a8ac97141c38a07f37c7d..6358138335d3170af2d72e77e9b3332579e15021 100644
--- a/matlab/optimization/numgrad3_.m
+++ b/matlab/optimization/numgrad3_.m
@@ -1,4 +1,4 @@
-function [g, badg] = numgrad3_(fcn,f0,x,epsilon,varargin)
+function [g, badg] = numgrad3_(fcn,f0,x,penalty,epsilon,varargin)
 % Computes the gradient of the objective function fcn using a three points
 % formula if possible.
 %
@@ -46,12 +46,12 @@ for i=1:n
     xiold = x(i);
     h = step_length_correction(xiold,scale,i)*delta;
     x(i) = xiold + h;
-    [f1,junk1,junk2,cost_flag1] = feval(fcn, x, varargin{:});
+    [f1,junk1,cost_flag1] = penalty_objective_function(x, fcn, penalty, varargin{:});
     if ~cost_flag1
         fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the right!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h))
     end
     x(i) = xiold - h;
-    [f2,junk2,junk2,cost_flag2] = feval(fcn, x, varargin{:});
+    [f2,junk2,cost_flag2] = penalty_objective_function(x, fcn, penalty, varargin{:});
     if ~cost_flag2
         fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the left!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h))
     end
diff --git a/matlab/optimization/numgrad5.m b/matlab/optimization/numgrad5.m
index c31936790b43f52a8027c761591a75a1f6a3c4ee..f8b8d33e0387e70de323ba59821e182eecafc48a 100644
--- a/matlab/optimization/numgrad5.m
+++ b/matlab/optimization/numgrad5.m
@@ -1,4 +1,4 @@
-function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin)
+function [g, badg] = numgrad5(fcn,f0,x,penalty,epsilon,varargin)
 % Computes the gradient of the objective function fcn using a five points
 % formula if possible.
 %
@@ -13,7 +13,7 @@ function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin)
 % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m
 
 % Copyright (C) 1993-2007 Christopher Sims
-% Copyright (C) 2008-2014 Dynare Team
+% Copyright (C) 2008-2016 Dynare Team
 
 % This file is part of Dynare.
 %
@@ -40,13 +40,13 @@ badg = 0;
 for i=1:n
     xiold = x(i);
     x(i) = xiold+h;
-    f1 = feval(fcn, x, varargin{:});
+    f1 = penalty_objective_function(x, fcn, penalty, varargin{:});
     x(i) = xiold-h;
-    f2 = feval(fcn, x, varargin{:});
+    f2 = penalty_objective_function(x, fcn, penalty, varargin{:});
     x(i) = xiold+2*h;
-    f3 = feval(fcn, x, varargin{:});
+    f3 = penalty_objective_function(x, fcn, penalty, varargin{:});
     x(i) = xiold-2*h;
-    f4 = feval(fcn, x, varargin{:});
+    f4 = penalty_objective_function(x, fcn, penalty, varargin{:});
     g0 = (8*(f1-f2)+f4-f3)/H;
     if abs(g0)< 1e15
         g(i) = g0;
diff --git a/matlab/optimization/numgrad5_.m b/matlab/optimization/numgrad5_.m
index d8395092e12a0f14f615c82da7482294f623f030..bbce6fd0efce3d17ec094be7e49e4ed63ef13b25 100644
--- a/matlab/optimization/numgrad5_.m
+++ b/matlab/optimization/numgrad5_.m
@@ -1,4 +1,4 @@
-function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin)
+function [g, badg] = numgrad5_(fcn,f0,x,penalty,epsilon,varargin)
 % Computes the gradient of the objective function fcn using a five points
 % formula if possible.
 %
@@ -13,7 +13,7 @@ function [g, badg] = numgrad5(fcn,f0,x,epsilon,varargin)
 % http://sims.princeton.edu/yftp/optimize/mfiles/numgrad.m
 
 % Copyright (C) 1993-2007 Christopher Sims
-% Copyright (C) 2008-2014 Dynare Team
+% Copyright (C) 2008-2016 Dynare Team
 %
 % This file is part of Dynare.
 %
@@ -48,13 +48,13 @@ for i=1:n
     xiold = x(i);
     h = step_length_correction(xiold,scale,i)*delta;
     x(i) = xiold+h;
-    [f1,junk1,junk2,cost_flag1] = feval(fcn, x, varargin{:});
+    [f1,junk1,cost_flag1,] = penalty_objective_function(x, fcn, penalty, varargin{:});
     x(i) = xiold-h;
-    [f2,junk1,junk2,cost_flag2] = feval(fcn, x, varargin{:});
+    [f2,junk1,cost_flag2] = penalty_objective_function(x, fcn, penalty, varargin{:});
     x(i) = xiold+2*h;
-    [f3,junk1,junk2,cost_flag3] = feval(fcn, x, varargin{:});
+    [f3,junk1,cost_flag3] = penalty_objective_function(x, fcn, penalty, varargin{:});
     x(i) = xiold-2*h;
-    [f4,junk1,junk2,cost_flag4] = feval(fcn, x, varargin{:});
+    [f4,junk1,cost_flag4] = penalty_objective_function(x, fcn, penalty, varargin{:});
     if f0<f1 && f1<f3 && f0<f2 && f2<f4
         g0 = 0;
     else
diff --git a/matlab/optimization/penalty_objective_function.m b/matlab/optimization/penalty_objective_function.m
index f35e56686d58364b5b3dccc81eb597bb75475bd8..32ef71ebe57e9cda81fe50a8b1bcbe8152819b1a 100644
--- a/matlab/optimization/penalty_objective_function.m
+++ b/matlab/optimization/penalty_objective_function.m
@@ -2,6 +2,6 @@ function [fval,exit_flag,arg1,arg2] = penalty_objective_function(x0,fcn,penalty,
     [fval,info,exit_flag,arg1,arg2] = fcn(x0,varargin{:});
     
     if info(1) ~= 0
-        fval = penalty + info(2);
+        fval = penalty + info(4);
     end
 end
\ No newline at end of file
diff --git a/matlab/optimize_prior.m b/matlab/optimize_prior.m
index d5b5e0ac121a8a66011c7a4c2293160dd5981ab3..cdb03ecff2f5e659787683c5e2407e43f0094044 100644
--- a/matlab/optimize_prior.m
+++ b/matlab/optimize_prior.m
@@ -43,13 +43,6 @@ while look_for_admissible_initial_condition
     end
 end
 
-% Evaluate the prior density at the initial condition.
-objective_function_penalty_base = minus_logged_prior_density(xinit, BayesInfo.pshape, ...
-                               BayesInfo.p6, ...
-                               BayesInfo.p7, ...
-                               BayesInfo.p3, ...
-                               BayesInfo.p4,DynareOptions,ModelInfo,EstimationInfo,DynareResults);
-
 % Maximization of the prior density
 [xparams, lpd, hessian_mat] = ...
     maximize_prior_density(xinit, BayesInfo.pshape, ...
diff --git a/matlab/osr_obj.m b/matlab/osr_obj.m
index 7c3e7035312a51cb724a863390de6dc7ff50f672..599deb8e7c3fa5c8061678c7fbfd36add4d2dedb 100644
--- a/matlab/osr_obj.m
+++ b/matlab/osr_obj.m
@@ -40,7 +40,7 @@ global M_ oo_ options_ optimal_Q_ it_
 junk = [];
 exit_flag = 1;
 vx = [];
-info=0;
+info=zeros(4,1);
 loss=[];
 % set parameters of the policiy rule
 M_.params(i_params) = x;
@@ -49,42 +49,18 @@ M_.params(i_params) = x;
 it_ = M_.maximum_lag+1;
 [dr,info,M_,options_,oo_] = resol(0,M_,options_,oo_);
 
-switch info(1)
-  case 1
-    loss = 1e8;
-    return
-  case 2
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 3
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 4
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 5
-    loss = 1e8;
-    return
-  case 6
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 7
-    loss = 1e8*min(1e3);
-    return
-  case 8
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 9
-    loss = 1e8*min(1e3,info(2));
-    return   
-  case 20
-    loss = 1e8*min(1e3,info(2));
-    return
-  otherwise
-    if info(1)~=0
-      loss = 1e8;
-      return;
-    end  
+if info(1)
+    if info(1) == 3 || info(1) == 4 || info(1) == 5 || info(1)==6 ||info(1) == 19 ...
+            info(1) == 20 || info(1) == 21 || info(1) == 23 || info(1) == 26 || ...
+            info(1) == 81 || info(1) == 84 ||  info(1) == 85
+        loss = 1e8;
+        info(4)=info(2);
+        return
+    else
+        loss = 1e8;
+        info(4)=0.1;        
+        return
+    end
 end
 
 vx = get_variance_of_endogenous_variables(dr,i_var);
diff --git a/matlab/parallel/masterParallel.m b/matlab/parallel/masterParallel.m
index 75728bb0f7746afce6ed955b674683505084cb7a..7fe687dceed640ccf6726fc51b987d1de921dfb2 100644
--- a/matlab/parallel/masterParallel.m
+++ b/matlab/parallel/masterParallel.m
@@ -28,8 +28,7 @@ function [fOutVar,nBlockPerCPU, totCPU] = masterParallel(Parallel,fBlock,nBlock,
 % The number of parallelized threads will be equal to (nBlock-fBlock+1).
 %
 % Treatment of global variables:
-%   Global variables used within the called function (e.g.
-%   objective_function_penalty_base) are wrapped and passed by storing their 
+%   Global variables used within the called function are wrapped and passed by storing their 
 %   values at the start of the parallel computation in a file via
 %   storeGlobalVars.m. This file is then loaded in the separate,
 %   independent slave Matlab sessions. By keeping them separate, no
diff --git a/matlab/posterior_sampler.m b/matlab/posterior_sampler.m
index 9efabb4d89b00fac5a65608aee9e4e4436e09d74..d1f36918f806fc4bfa31ce9b956fa1a0c81667e9 100644
--- a/matlab/posterior_sampler.m
+++ b/matlab/posterior_sampler.m
@@ -54,11 +54,6 @@ function posterior_sampler(TargetFun,ProposalFun,xparam1,sampler_options,mh_boun
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-
-% In Metropolis, we set penalty to Inf so as to reject all parameter sets triggering an error during target density computation
-global objective_function_penalty_base
-objective_function_penalty_base = Inf;
-
 vv = sampler_options.invhess;
 % Initialization of the sampler
 [ ix2, ilogpo2, ModelName, MetropolisFolder, fblck, fline, npar, nblck, nruns, NewFile, MAX_nruns, d ] = ...
diff --git a/matlab/posterior_sampler_iteration.m b/matlab/posterior_sampler_iteration.m
index 5b09680ee4fd199abaea3a1beeba571c0ea2ce74..09e5d8851060d425cacd040ff90e1b19232b0de9 100644
--- a/matlab/posterior_sampler_iteration.m
+++ b/matlab/posterior_sampler_iteration.m
@@ -68,7 +68,6 @@ switch posterior_sampling_method
         bayestopt_=varargin{6};
         npar=length(last_draw);
         %% randomize indices for blocking in this iteration
-        global objective_function_penalty_base
         indices=randperm(npar)';
         blocks=[1; (1+cumsum((rand(length(indices)-1,1)>(1-sampler_options.new_block_probability))))];
         nblocks=blocks(end,1); %get number of blocks this iteration
@@ -82,7 +81,6 @@ switch posterior_sampling_method
             [xopt_current_block, fval, exitflag, hess_mat_optimizer, options_, Scale] = dynare_minimize_objective(@TaRB_optimizer_wrapper,par_start_current_block,sampler_options.mode_compute,options_,[mh_bounds.lb(indices(blocks==block_iter,1),1) mh_bounds.ub(indices(blocks==block_iter,1),1)],bayestopt_.name,bayestopt_,[],...
                 current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper
                 varargin{:}); %inputs for objective           
-            objective_function_penalty_base=Inf; %reset penalty that may have been changed by optimizer
             %% covariance for proposal density
             hessian_mat = reshape(hessian('TaRB_optimizer_wrapper',xopt_current_block, ...
                     options_.gstep,...