diff --git a/matlab/TaRB_metropolis_hastings_core.m b/matlab/TaRB_metropolis_hastings_core.m
index 7259c8ff8def2e99da6a224c484f1a0c090d2f1b..5fb1374118d1ad804c5e6357aa18544309052edf 100644
--- a/matlab/TaRB_metropolis_hastings_core.m
+++ b/matlab/TaRB_metropolis_hastings_core.m
@@ -59,7 +59,6 @@ function myoutput = TaRB_metropolis_hastings_core(myinputs,fblck,nblck,whoiam, T
 %
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
-global objective_function_penalty_base;
 
 if nargin<4,
     whoiam=0;
@@ -163,7 +162,6 @@ for curr_chain = fblck:nblck,
             [xopt_current_block, fval, exitflag, hess_mat_optimizer, options_, Scale] = dynare_minimize_objective(@TaRB_optimizer_wrapper,par_start_current_block,options_.TaRB.mode_compute,options_,[mh_bounds.lb(indices(blocks==block_iter,1),1) mh_bounds.ub(indices(blocks==block_iter,1),1)],bayestopt_.name,bayestopt_,[],...
                 current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper
                 dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_); %inputs for objective           
-            objective_function_penalty_base=Inf; %reset penalty that may have been changed by optimizer
             %% covariance for proposal density
             hessian_mat = reshape(hessian('TaRB_optimizer_wrapper',xopt_current_block, ...
                     options_.gstep,...
diff --git a/matlab/cli/prior.m b/matlab/cli/prior.m
index d00e0555b5191228f1b5a4306905dffa6ececa4d..b409c9fc13ca241c458d267935f4fb12b519fd20 100644
--- a/matlab/cli/prior.m
+++ b/matlab/cli/prior.m
@@ -40,8 +40,6 @@ if isempty(varargin) || ( isequal(length(varargin), 1) && isequal(varargin{1},'h
     return
 end
 
-global options_ M_ estim_params_ bayestopt_ oo_ objective_function_penalty_base
-
 donesomething = false;
 
 % Temporarly change qz_criterium option value
diff --git a/matlab/dynare_estimation_init.m b/matlab/dynare_estimation_init.m
index 9ef77b87caf38dea103d2d30f1e85638111ef6bc..db1129d441277153729cfca8467bca98dad00e84 100644
--- a/matlab/dynare_estimation_init.m
+++ b/matlab/dynare_estimation_init.m
@@ -49,8 +49,6 @@ function [dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_,
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-global objective_function_penalty_base
-
 hh = [];
 
 if isempty(gsa_flag)
@@ -386,9 +384,6 @@ else% Yes!
     end
 end
 
-% Set the "size" of penalty.
-objective_function_penalty_base = 1e8;
-
 % Get informations about the variables of the model.
 dr = set_state_space(oo_.dr,M_,options_);
 oo_.dr = dr;
diff --git a/matlab/independent_metropolis_hastings_core.m b/matlab/independent_metropolis_hastings_core.m
index 3e2ae2e42f6a95dcadd2af38e9c846d614f8a107..6821090165945c5edd0cb4394d50dc0d0bc81187 100644
--- a/matlab/independent_metropolis_hastings_core.m
+++ b/matlab/independent_metropolis_hastings_core.m
@@ -37,7 +37,7 @@ if nargin<4,
     whoiam=0;
 end
 
-global bayestopt_ estim_params_ options_  M_ oo_ objective_function_penalty_base
+global bayestopt_ estim_params_ options_  M_ oo_
 
 % Reshape 'myinputs' for local computation.
 % In order to avoid confusion in the name space, the instruction struct2local(myinputs) is replaced by:
@@ -67,9 +67,6 @@ if whoiam
               bayestopt_.p3,bayestopt_.p4,1);
 end
 
-% (re)Set the penalty.
-objective_function_penalty_base = Inf;
-
 MetropolisFolder = CheckPath('metropolis',M_.dname);
 BaseName = [MetropolisFolder filesep ModelName];
 
diff --git a/matlab/minus_logged_prior_density.m b/matlab/minus_logged_prior_density.m
index 0b965b5f93bdef21febafed00939ef03c851fdf3..ceb7c0302ec2839d92715c19b6069ebc446fc2d6 100644
--- a/matlab/minus_logged_prior_density.m
+++ b/matlab/minus_logged_prior_density.m
@@ -29,8 +29,6 @@ function [fval,fake_1, fake_2, exit_flag ] = minus_logged_prior_density(xparams,
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-global objective_function_penalty_base
-
 fake_1 = 1;
 fake_2 = 1;
 
@@ -44,18 +42,20 @@ info = 0;
 % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
 if ~isequal(DynareOptions.mode_compute,1) && any(xparams<p3)
     k = find(xparams<p3);
-    fval = objective_function_penalty_base+sum((p3(k)-xparams(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 41;
+    info(1) = 41;
+    info(2) = sum((p3(k)-xparams(k)).^2);
     return
 end
 
 % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
 if ~isequal(DynareOptions.mode_compute,1) && any(xparams>p4)
     k = find(xparams>p4);
-    fval = objective_function_penalty_base+sum((xparams(k)-p4(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 42;
+    info(1) = 42;
+    info(2) = sum((xparams(k)-p4(k)).^2);
     return
 end
 
@@ -71,18 +71,20 @@ if ~issquare(Q) || EstimatedParams.ncx || isfield(EstimatedParams,'calibrated_co
     [Q_is_positive_definite, penalty] = ispd(Q);
     if ~Q_is_positive_definite
         % The variance-covariance matrix of the structural innovations is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
         exit_flag = 0;
-        info = 43;
+        info(1) = 43;
+        info(2) = penalty;
         return
     end
     if isfield(EstimatedParams,'calibrated_covariances')
         correct_flag=check_consistency_covariances(Q);
         if ~correct_flag
             penalty = sum(Q(EstimatedParams.calibrated_covariances.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 71;
+            info(1) = 71;
+            info(2) = penalty;
             return
         end
     end
@@ -94,18 +96,20 @@ if ~issquare(H) || EstimatedParams.ncn || isfield(EstimatedParams,'calibrated_co
     [H_is_positive_definite, penalty] = ispd(H);
     if ~H_is_positive_definite
         % The variance-covariance matrix of the measurement errors is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
         exit_flag = 0;
-        info = 44;
+        info(1) = 44;
+        info(2) = penalty;
         return
     end
     if isfield(EstimatedParams,'calibrated_covariances_ME')
         correct_flag=check_consistency_covariances(H);
         if ~correct_flag
             penalty = sum(H(EstimatedParams.calibrated_covariances_ME.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 72;
+            info(1) = 72;
+            info(2) = penalty;
             return
         end
     end
@@ -122,13 +126,13 @@ M_ = set_all_parameters(xparams,EstimatedParams,DynareModel);
 % Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
 if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) ...
             == 8 || info(1) == 22 || info(1) == 24 || info(1) == 19
-    fval = objective_function_penalty_base+1;
-    info = info(1);
+    fval = Inf;
+    info(1) = info(1);
+    info(2) = 0.1;
     exit_flag = 0;
     return
 elseif info(1) == 3 || info(1) == 4 || info(1)==6 || info(1) == 20 || info(1) == 21  || info(1) == 23
-    fval = objective_function_penalty_base+info(2);
-    info = info(1);
+    fval = Inf;
     exit_flag = 0;
     return
 end
diff --git a/matlab/non_linear_dsge_likelihood.m b/matlab/non_linear_dsge_likelihood.m
index 43bf42575259828bd9408c9dd660f6c62a44869a..9e5770c1cfc8ae5fc9a6d87549a09ed4984e69c8 100644
--- a/matlab/non_linear_dsge_likelihood.m
+++ b/matlab/non_linear_dsge_likelihood.m
@@ -121,8 +121,6 @@ function [fval,ys,trend_coeff,exit_flag,info,Model,DynareOptions,BayesInfo,Dynar
 % AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT fr
 %           frederic DOT karame AT univ DASH lemans DOT fr
 
-global objective_function_penalty_base
-% Declaration of the penalty as a persistent variable.
 persistent init_flag
 persistent restrict_variables_idx observed_variables_idx state_variables_idx mf0 mf1
 persistent sample_size number_of_state_variables number_of_observed_variables number_of_structural_innovations
@@ -145,18 +143,20 @@ end
 % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
 if (DynareOptions.mode_compute~=1) && any(xparam1<BoundsInfo.lb)
     k = find(xparam1(:) < BoundsInfo.lb);
-    fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 41;
+    info(1) = 41;
+    info(2) = sum((BoundsInfo.lb(k)-xparam1(k)).^2);
     return
 end
 
 % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
 if (DynareOptions.mode_compute~=1) && any(xparam1>BoundsInfo.ub)
     k = find(xparam1(:)>BoundsInfo.ub);
-    fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2);
+    fval = Inf;
     exit_flag = 0;
-    info = 42;
+    info(1) = 42;
+    info(2) = sum((xparam1(k)-BoundsInfo.ub(k)).^2);
     return
 end
 
@@ -168,18 +168,20 @@ H = Model.H;
 if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances')
     [Q_is_positive_definite, penalty] = ispd(Q);
     if ~Q_is_positive_definite
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
         exit_flag = 0;
-        info = 43;
+        info(1) = 43;
+        info(2) = penalty;
         return
     end
     if isfield(EstimatedParameters,'calibrated_covariances')
         correct_flag=check_consistency_covariances(Q);
         if ~correct_flag
             penalty = sum(Q(EstimatedParameters.calibrated_covariances.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 71;
+            info(1) = 71;
+            info(2) = penalty;
             return
         end
     end
@@ -189,18 +191,20 @@ end
 if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME')
     [H_is_positive_definite, penalty] = ispd(H);
     if ~H_is_positive_definite
-        fval = objective_function_penalty_base+penalty;
+        fval = Inf;
         exit_flag = 0;
-        info = 44;
+        info(1) = 44;
+        info(2) = penalty;
         return
     end
     if isfield(EstimatedParameters,'calibrated_covariances_ME')
         correct_flag=check_consistency_covariances(H);
         if ~correct_flag
             penalty = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2);
-            fval = objective_function_penalty_base+penalty;
+            fval = Inf;
             exit_flag = 0;
-            info = 72;
+            info(1) = 72;
+            info(2) = penalty;
             return
         end
     end
@@ -215,11 +219,12 @@ end
 [T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
 
 if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 25 || info(1) == 10 || info(1) == 7
-    fval = objective_function_penalty_base+1;
+    fval = Inf;
     exit_flag = 0;
+    info(2) = 0.1;
     return
 elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21
-    fval = objective_function_penalty_base+info(2);
+    fval = Inf;
     exit_flag = 0;
     return
 end
@@ -298,12 +303,14 @@ DynareOptions.warning_for_steadystate = 0;
 LIK = feval(DynareOptions.particle.algorithm,ReducedForm,Y,start,DynareOptions.particle,DynareOptions.threads);
 set_dynare_random_generator_state(s1,s2);
 if imag(LIK)
-    info = 46;
-    likelihood = objective_function_penalty_base;
+    info(1) = 46;
+    info(2) = 0.1;
+    likelihood = Inf;
     exit_flag  = 0;
 elseif isnan(LIK)
-    info = 45;
-    likelihood = objective_function_penalty_base;
+    info(1) = 45;
+    info(2) = 0.1;
+    likelihood = Inf;
     exit_flag  = 0;
 else
     likelihood = LIK;
@@ -316,15 +323,17 @@ lnprior = priordens(xparam1(:),BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesI
 fval    = (likelihood-lnprior);
 
 if isnan(fval)
-    info = 47;
-    fval = objective_function_penalty_base + 100;
+    info(1) = 47;
+    info(2) = 0.1;
+    fval = Inf;
     exit_flag = 0;
     return
 end
 
 if imag(fval)~=0
-    info = 48;
-    fval = objective_function_penalty_base + 100;
+    info(1) = 48;
+    info(2) = 0.1
+    fval = Inf;
     exit_flag = 0;
     return
 end
diff --git a/matlab/optimization/newrat.m b/matlab/optimization/newrat.m
index a1a2bb0dfc72f774543c75e7ca5bfa6f43f458b7..94309aa0f871ee0c0d4285390f83ecfb28b9abbf 100644
--- a/matlab/optimization/newrat.m
+++ b/matlab/optimization/newrat.m
@@ -47,8 +47,6 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, analytic_derivation, ft
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-global objective_function_penalty_base
-
 penalty = 1e8;
 
 icount=0;
diff --git a/matlab/optimize_prior.m b/matlab/optimize_prior.m
index d5b5e0ac121a8a66011c7a4c2293160dd5981ab3..cdb03ecff2f5e659787683c5e2407e43f0094044 100644
--- a/matlab/optimize_prior.m
+++ b/matlab/optimize_prior.m
@@ -43,13 +43,6 @@ while look_for_admissible_initial_condition
     end
 end
 
-% Evaluate the prior density at the initial condition.
-objective_function_penalty_base = minus_logged_prior_density(xinit, BayesInfo.pshape, ...
-                               BayesInfo.p6, ...
-                               BayesInfo.p7, ...
-                               BayesInfo.p3, ...
-                               BayesInfo.p4,DynareOptions,ModelInfo,EstimationInfo,DynareResults);
-
 % Maximization of the prior density
 [xparams, lpd, hessian_mat] = ...
     maximize_prior_density(xinit, BayesInfo.pshape, ...
diff --git a/matlab/parallel/masterParallel.m b/matlab/parallel/masterParallel.m
index 75728bb0f7746afce6ed955b674683505084cb7a..db15443789c3ab66fcaa737acea1a40209062c90 100644
--- a/matlab/parallel/masterParallel.m
+++ b/matlab/parallel/masterParallel.m
@@ -28,8 +28,8 @@ function [fOutVar,nBlockPerCPU, totCPU] = masterParallel(Parallel,fBlock,nBlock,
 % The number of parallelized threads will be equal to (nBlock-fBlock+1).
 %
 % Treatment of global variables:
-%   Global variables used within the called function (e.g.
-%   objective_function_penalty_base) are wrapped and passed by storing their 
+%   Global variables used within the called function 
+%   are wrapped and passed by storing their 
 %   values at the start of the parallel computation in a file via
 %   storeGlobalVars.m. This file is then loaded in the separate,
 %   independent slave Matlab sessions. By keeping them separate, no
diff --git a/matlab/random_walk_metropolis_hastings.m b/matlab/random_walk_metropolis_hastings.m
index 8f18d1d0f400bf3b69080e67313a43ab7007e5fa..aba7af23c9d35d2656953e3ea98560e495bb043f 100644
--- a/matlab/random_walk_metropolis_hastings.m
+++ b/matlab/random_walk_metropolis_hastings.m
@@ -57,10 +57,6 @@ function random_walk_metropolis_hastings(TargetFun,ProposalFun,xparam1,vv,mh_bou
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
 
-% In Metropolis, we set penalty to Inf so as to reject all parameter sets triggering an error during target density computation
-global objective_function_penalty_base
-objective_function_penalty_base = Inf;
-
 % Initialization of the random walk metropolis-hastings chains.
 [ ix2, ilogpo2, ModelName, MetropolisFolder, fblck, fline, npar, nblck, nruns, NewFile, MAX_nruns, d ] = ...
     metropolis_hastings_initialization(TargetFun, xparam1, vv, mh_bounds,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);