diff --git a/matlab/PosteriorIRF_core1.m b/matlab/PosteriorIRF_core1.m
index 0d0a1a752e4ba9921bc39a74ffb61344b9e02167..891703f51dbb97a9d07c738c8e7ca6946b2931d5 100644
--- a/matlab/PosteriorIRF_core1.m
+++ b/matlab/PosteriorIRF_core1.m
@@ -77,6 +77,7 @@ if options_.dsge_var
     NumberOfLagsTimesNvobs = myinputs.NumberOfLagsTimesNvobs;
     Companion_matrix = myinputs.Companion_matrix;
     stock_irf_bvardsge = zeros(options_.irf,nvobs,M_.exo_nbr,MAX_nirfs_dsgevar);
+    bounds = prior_bounds(bayestopt_,options_);
 end
 
 
@@ -189,7 +190,7 @@ while fpar<B
     end
     if MAX_nirfs_dsgevar
         IRUN = IRUN+1;
-        [fval,junk1,junk2,cost_flag,info,PHI,SIGMAu,iXX] =  dsge_var_likelihood(deep',dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+        [fval,junk1,junk2,cost_flag,info,PHI,SIGMAu,iXX] =  dsge_var_likelihood(deep',dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
         dsge_prior_weight = M_.params(strmatch('dsge_prior_weight',M_.param_names));
         DSGE_PRIOR_WEIGHT = floor(dataset_.nobs*(1+dsge_prior_weight));
         SIGMA_inv_upper_chol = chol(inv(SIGMAu*dataset_.nobs*(dsge_prior_weight+1)));
diff --git a/matlab/check_prior_bounds.m b/matlab/check_prior_bounds.m
index 5e39fb2f0b8dce9575623677f9f19197b5780bbe..5214ce010c0a02f2891eec0763ef3ca284bc736c 100644
--- a/matlab/check_prior_bounds.m
+++ b/matlab/check_prior_bounds.m
@@ -27,7 +27,7 @@ function check_prior_bounds(xparam1,bounds,M_,estim_params_,options_,bayestopt_)
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-outside_bound_pars=find(xparam1 < bounds(:,1) | xparam1 > bounds(:,2));
+outside_bound_pars=find(xparam1 < bounds.lb | xparam1 > bounds.ub);
 if ~isempty(outside_bound_pars)
     for ii=1:length(outside_bound_pars)
         outside_bound_par_names{ii,1}=get_the_name(outside_bound_pars(ii),0,M_,estim_params_,options_);
diff --git a/matlab/dsge_likelihood.m b/matlab/dsge_likelihood.m
index 7869000536a093f76cd93d9f5d6da5d14642f32c..585b7e97a2bf01bd74225b996e133a7d23ce9f84 100644
--- a/matlab/dsge_likelihood.m
+++ b/matlab/dsge_likelihood.m
@@ -1,4 +1,4 @@
-function [fval,DLIK,Hess,exit_flag,SteadyState,trend_coeff,info,Model,DynareOptions,BayesInfo,DynareResults] = dsge_likelihood(xparam1,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults,derivatives_info)
+function [fval,DLIK,Hess,exit_flag,SteadyState,trend_coeff,info,Model,DynareOptions,BayesInfo,DynareResults] = dsge_likelihood(xparam1,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults,derivatives_info)
 % Evaluates the posterior kernel of a dsge model using the specified
 % kalman_algo; the resulting posterior includes the 2*pi constant of the
 % likelihood function
@@ -177,9 +177,9 @@ end
 %------------------------------------------------------------------------------
 
 % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
-if ~isequal(DynareOptions.mode_compute,1) && any(xparam1<BayesInfo.lb)
-    k = find(xparam1<BayesInfo.lb);
-    fval = objective_function_penalty_base+sum((BayesInfo.lb(k)-xparam1(k)).^2);
+if ~isequal(DynareOptions.mode_compute,1) && any(xparam1<BoundsInfo.lb)
+    k = find(xparam1<BoundsInfo.lb);
+    fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2);
     exit_flag = 0;
     info = 41;
     if analytic_derivation,
@@ -189,9 +189,9 @@ if ~isequal(DynareOptions.mode_compute,1) && any(xparam1<BayesInfo.lb)
 end
 
 % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
-if ~isequal(DynareOptions.mode_compute,1) && any(xparam1>BayesInfo.ub)
-    k = find(xparam1>BayesInfo.ub);
-    fval = objective_function_penalty_base+sum((xparam1(k)-BayesInfo.ub(k)).^2);
+if ~isequal(DynareOptions.mode_compute,1) && any(xparam1>BoundsInfo.ub)
+    k = find(xparam1>BoundsInfo.ub);
+    fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2);
     exit_flag = 0;
     info = 42;
     if analytic_derivation,
@@ -523,7 +523,7 @@ if analytic_derivation,
     DLIK = [];
     AHess = [];
     iv = DynareResults.dr.restrict_var_list;
-    if nargin<9 || isempty(derivatives_info)
+    if nargin<10 || isempty(derivatives_info)
         [A,B,nou,nou,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults);
         if ~isempty(EstimatedParameters.var_exo)
             indexo=EstimatedParameters.var_exo(:,1);
diff --git a/matlab/dsge_var_likelihood.m b/matlab/dsge_var_likelihood.m
index 75bb5541c5f54b07e4c062876a95cbab0ebca865..07b5d1b735ec3048f5d5afc4e3d9cb6f1573a8e6 100644
--- a/matlab/dsge_var_likelihood.m
+++ b/matlab/dsge_var_likelihood.m
@@ -1,4 +1,4 @@
-function [fval,grad,hess,exit_flag,info,PHI,SIGMAu,iXX,prior] = dsge_var_likelihood(xparam1,DynareDataset,DynareInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults)
+function [fval,grad,hess,exit_flag,info,PHI,SIGMAu,iXX,prior] = dsge_var_likelihood(xparam1,DynareDataset,DynareInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults)
 % Evaluates the posterior kernel of the bvar-dsge model.
 %
 % INPUTS
@@ -81,18 +81,18 @@ fval = [];
 exit_flag = 1;
 
 % Return, with endogenous penalty, if some dsge-parameters are smaller than the lower bound of the prior domain.
-if DynareOptions.mode_compute ~= 1 && any(xparam1 < BayesInfo.lb)
-    k = find(xparam1 < BayesInfo.lb);
-    fval = objective_function_penalty_base+sum((BayesInfo.lb(k)-xparam1(k)).^2);
+if DynareOptions.mode_compute ~= 1 && any(xparam1 < BoundsInfo.lb)
+    k = find(xparam1 < BoundsInfo.lb);
+    fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2);
     exit_flag = 0;
     info = 41;
     return;
 end
 
 % Return, with endogenous penalty, if some dsge-parameters are greater than the upper bound of the prior domain.
-if DynareOptions.mode_compute ~= 1 && any(xparam1 > BayesInfo.ub)
-    k = find(xparam1 > BayesInfo.ub);
-    fval = objective_function_penalty_base+sum((xparam1(k)-BayesInfo.ub(k)).^2);
+if DynareOptions.mode_compute ~= 1 && any(xparam1 > BoundsInfo.ub)
+    k = find(xparam1 > BoundsInfo.ub);
+    fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2);
     exit_flag = 0;
     info = 42;
     return;
diff --git a/matlab/dynare_estimation_1.m b/matlab/dynare_estimation_1.m
index 03992f2f1224b84bf4e941ecd193bef79a2c99d7..59ef2090d2e95199a926a43a5b49fec22934a244 100644
--- a/matlab/dynare_estimation_1.m
+++ b/matlab/dynare_estimation_1.m
@@ -99,7 +99,7 @@ else
     objective_function = str2func('dsge_var_likelihood');
 end
 
-[dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_, bayestopt_] = ...
+[dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_, bayestopt_, bounds] = ...
     dynare_estimation_init(var_list_, dname, [], M_, options_, oo_, estim_params_, bayestopt_);
 
 if options_.dsge_var
@@ -128,9 +128,9 @@ else
     full_calibration_detected=0;
 end
 if options_.use_calibration_initialization %set calibration as starting values
-    [xparam1,estim_params_]=do_parameter_initialization(estim_params_,xparam1_calib,xparam1); %get explicitly initialized parameters that have precedence to calibrated values
+    [xparam1,estim_params_]=do_parameter_initialization(estim_params_,xparam1_calib,xparam1);   %get explicitly initialized parameters that have precedence to calibrated values
     try
-        check_prior_bounds(xparam1,[bayestopt_.lb bayestopt_.ub],M_,estim_params_,options_,bayestopt_); %check whether calibration satisfies prior bounds
+        check_prior_bounds(xparam1,bounds,M_,estim_params_,options_,bayestopt_); %check whether calibration satisfies prior bounds
     catch
         e = lasterror();
         fprintf('Cannot use parameter values from calibration as they violate the prior bounds.')
@@ -162,9 +162,6 @@ np  = estim_params_.np ;  % Number of deep parameters.
 nx  = nvx+nvn+ncx+ncn+np; % Total number of parameters to be estimated.
 %% Set the names of the priors.
 pnames = ['     ';'beta ';'gamm ';'norm ';'invg ';'unif ';'invg2'];
-%% Set parameters bounds
-lb = bayestopt_.lb;
-ub = bayestopt_.ub;
 
 dr = oo_.dr;
 
@@ -188,7 +185,7 @@ end
 
 %% perform initial estimation checks;
 try
-    oo_ = initial_estimation_checks(objective_function,xparam1,dataset_,dataset_info,M_,estim_params_,options_,bayestopt_,oo_);
+    oo_ = initial_estimation_checks(objective_function,xparam1,dataset_,dataset_info,M_,estim_params_,options_,bayestopt_,bounds,oo_);
 catch % if check fails, provide info on using calibration if present
     e = lasterror();
     if full_calibration_detected %calibrated model present and no explicit starting values
@@ -258,7 +255,7 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
             optim_options = optimset(optim_options,'GradObj','on','TolX',1e-7);
         end
         [xparam1,fval,exitflag,output,lamdba,grad,hessian_fmincon] = ...
-            fmincon(objective_function,xparam1,[],[],[],[],lb,ub,[],optim_options,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+            fmincon(objective_function,xparam1,[],[],[],[],bounds.lb,bounds.ub,[],optim_options,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
       case 2
         error('ESTIMATION: mode_compute=2 option (Lester Ingber''s Adaptive Simulated Annealing) is no longer available')
       case 3
@@ -276,10 +273,10 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
             optim_options = optimset(optim_options,'GradObj','on');
         end
         if ~isoctave
-            [xparam1,fval,exitflag] = fminunc(objective_function,xparam1,optim_options,dataset_,dataset_info_,options_,M_,estim_params_,bayestopt_,oo_);
+            [xparam1,fval,exitflag] = fminunc(objective_function,xparam1,optim_options,dataset_,dataset_info_,options_,M_,estim_params_,bayestopt_,bounds,oo_);
         else
             % Under Octave, use a wrapper, since fminunc() does not have a 4th arg
-            func = @(x) objective_function(x, dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+            func = @(x) objective_function(x, dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
             [xparam1,fval,exitflag] = fminunc(func,xparam1,optim_options);
         end
       case 4
@@ -317,7 +314,7 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
         end
         % Call csminwell.
         [fval,xparam1,grad,hessian_csminwel,itct,fcount,retcodehat] = ...
-            csminwel1(objective_function, xparam1, H0, analytic_grad, crit, nit, numgrad, epsilon, dataset_, dataset_info, options_, M_, estim_params_, bayestopt_, oo_);
+            csminwel1(objective_function, xparam1, H0, analytic_grad, crit, nit, numgrad, epsilon, dataset_, dataset_info, options_, M_, estim_params_, bayestopt_,bounds, oo_);
         % Disp value at the mode.
         disp(sprintf('Objective function at mode: %f',fval))
       case 5
@@ -345,7 +342,7 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
         else
             nit=1000;
         end
-        [xparam1,hh,gg,fval,invhess] = newrat(objective_function,xparam1,analytic_grad,crit,nit,0,dataset_, dataset_info, options_,M_,estim_params_,bayestopt_,oo_);
+        [xparam1,hh,gg,fval,invhess] = newrat(objective_function,xparam1,analytic_grad,crit,nit,0,dataset_, dataset_info, options_,M_,estim_params_,bayestopt_,bounds,oo_);
         if options_.analytic_derivation,
             options_.analytic_derivation = ana_deriv;
         else
@@ -358,7 +355,7 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
                         options_.kalman_algo=4;
                     end
                 end
-                hh = reshape(mr_hessian(0,xparam1,objective_function,1,crit,dataset_, dataset_info, options_,M_,estim_params_,bayestopt_,oo_), nx, nx);
+                hh = reshape(mr_hessian(0,xparam1,objective_function,1,crit,dataset_, dataset_info, options_,M_,estim_params_,bayestopt_,bounds,oo_), nx, nx);
                 options_.kalman_algo = kalman_algo0;
             end
         end
@@ -408,7 +405,7 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
             end
         end
         % Evaluate the objective function.
-        fval = feval(objective_function,xparam1,dataset_, dataset_info, options_,M_,estim_params_,bayestopt_,oo_);
+        fval = feval(objective_function,xparam1,dataset_, dataset_info, options_,M_,estim_params_,bayestopt_,bounds,oo_);
         OldMode = fval;
         if ~exist('MeanPar','var')
             MeanPar = xparam1;
@@ -440,8 +437,8 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
                     flag = 'LastCall';
                 end
                 [xparam1,PostVar,Scale,PostMean] = ...
-                    gmhmaxlik(objective_function,xparam1,[lb ub],gmhmaxlikOptions,Scale,flag,MeanPar,CovJump,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
-                fval = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+                    gmhmaxlik(objective_function,xparam1,[bounds.lb bounds.ub],gmhmaxlikOptions,Scale,flag,MeanPar,CovJump,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
+                fval = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
                 options_.mh_jscale = Scale;
                 mouvement = max(max(abs(PostVar-OldPostVar)));
                 skipline()
@@ -459,12 +456,12 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
                     flag = 'LastCall';
                 end
                 [xparam1,PostVar,Scale,PostMean] = ...
-                    gmhmaxlik(objective_function,xparam1,[lb ub],...
-                              gmhmaxlikOptions,Scale,flag,PostMean,PostVar,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
-                fval = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+                    gmhmaxlik(objective_function,xparam1,[bounds.lb bounds.ub],...
+                              gmhmaxlikOptions,Scale,flag,PostMean,PostVar,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
+                fval = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
                 options_.mh_jscale = Scale;
                 mouvement = max(max(abs(PostVar-OldPostVar)));
-                fval = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+                fval = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
                 skipline()
                 disp('========================================================== ')
                 disp(['   Change in the covariance matrix = ' num2str(mouvement) '.'])
@@ -497,7 +494,7 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
         if isfield(options_,'optim_opt')
             eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
         end
-        [xparam1,fval,exitflag] = fminsearch(objective_function,xparam1,optim_options,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+        [xparam1,fval,exitflag] = fminsearch(objective_function,xparam1,optim_options,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
       case 8
         % Dynare implementation of the simplex algorithm.
         simplexOptions = options_.simplex;
@@ -522,7 +519,7 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
                 end
             end
         end
-        [xparam1,fval,exitflag] = simplex_optimization_routine(objective_function,xparam1,simplexOptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+        [xparam1,fval,exitflag] = simplex_optimization_routine(objective_function,xparam1,simplexOptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
       case 9
         % Set defaults
         H0 = 1e-4*ones(nx,1);
@@ -547,7 +544,7 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
         end
         warning('off','CMAES:NonfinitenessRange');
         warning('off','CMAES:InitialSigma');
-        [x, fval, COUNTEVAL, STOPFLAG, OUT, BESTEVER] = cmaes(func2str(objective_function),xparam1,H0,cmaesOptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+        [x, fval, COUNTEVAL, STOPFLAG, OUT, BESTEVER] = cmaes(func2str(objective_function),xparam1,H0,cmaesOptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
         xparam1=BESTEVER.x;
         disp(sprintf('\n Objective function at mode: %f',fval))
       case 10
@@ -578,16 +575,16 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
         end
         simpsaOptionsList = options2cell(simpsaOptions);
         simpsaOptions = simpsaset(simpsaOptionsList{:});
-        [xparam1, fval, exitflag] = simpsa(func2str(objective_function),xparam1,lb,ub,simpsaOptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+        [xparam1, fval, exitflag] = simpsa(func2str(objective_function),xparam1,bounds.lb,bounds.ub,simpsaOptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
       case 11
          options_.cova_compute = 0 ;
-         [xparam1,stdh,lb_95,ub_95,med_param] = online_auxiliary_filter(xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_) ;
+         [xparam1,stdh,lb_95,ub_95,med_param] = online_auxiliary_filter(xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_) ;
       case 101
         myoptions=soptions;
         myoptions(2)=1e-6; %accuracy of argument
         myoptions(3)=1e-6; %accuracy of function (see Solvopt p.29)
         myoptions(5)= 1.0;
-        [xparam1,fval]=solvopt(xparam1,objective_function,[],myoptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+        [xparam1,fval]=solvopt(xparam1,objective_function,[],myoptions,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
       case 102
         %simulating annealing
         %        LB=zeros(size(xparam1))-20;
@@ -624,10 +621,10 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
         disp(['c vector   ' num2str(c')]);
 
         [xparam1, fval, nacc, nfcnev, nobds, ier, t, vm] = sa(objective_function,xparam1,maxy,rt_,epsilon,ns,nt ...
-                                                              ,neps,maxevl,LB,UB,c,idisp ,t,vm,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+                                                              ,neps,maxevl,LB,UB,c,idisp ,t,vm,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
       otherwise
         if ischar(options_.mode_compute)
-            [xparam1, fval, retcode ] = feval(options_.mode_compute,objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+            [xparam1, fval, retcode ] = feval(options_.mode_compute,objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
         else
             error(['dynare_estimation:: mode_compute = ' int2str(options_.mode_compute) ' option is unknown!'])
         end
@@ -638,13 +635,13 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
                 ana_deriv = options_.analytic_derivation;
                 options_.analytic_derivation = 2;
                 [junk1, junk2, hh] = feval(objective_function,xparam1, ...
-                    dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+                    dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
                 options_.analytic_derivation = ana_deriv;
             elseif ~(isequal(options_.mode_compute,5) && flag==0), 
                 % with flag==0, we force to use the hessian from outer
                 % product gradient of optimizer 5
                 hh = reshape(hessian(objective_function,xparam1, ...
-                    options_.gstep,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_),nx,nx);
+                    options_.gstep,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_),nx,nx);
             end
         end
     end
@@ -699,7 +696,7 @@ if ~options_.mh_posterior_mode_estimation && options_.cova_compute
         disp('=> posterior variance of the estimated parameters are not positive.')
         disp('You should try to change the initial values of the parameters using')
         disp('the estimated_params_init block, or use another optimization routine.')
-        params_at_bound=find(xparam1==ub | xparam1==lb);
+        params_at_bound=find(xparam1==bounds.ub | xparam1==bounds.lb);
         if ~isempty(params_at_bound)
             for ii=1:length(params_at_bound)
             params_at_bound_name{ii,1}=get_the_name(params_at_bound(ii),0,M_,estim_params_,options_);
@@ -713,6 +710,7 @@ if ~options_.mh_posterior_mode_estimation && options_.cova_compute
             fprintf('   - Check your model for mistakes.\n')
             fprintf('   - Check whether model and data are consistent (correct observation equation).\n')
             fprintf('   - Shut off prior_trunc.\n')
+            fprintf('   - Change the optimization bounds.\n')
             fprintf('   - Use a different mode_compute like 6 or 9.\n')
             fprintf('   - Check whether the parameters estimated are identified.\n')
             fprintf('   - Increase the informativeness of the prior.\n')
@@ -724,7 +722,7 @@ end
 if options_.mode_check.status == 1 && ~options_.mh_posterior_mode_estimation
     ana_deriv = options_.analytic_derivation;
     options_.analytic_derivation = 0;
-    mode_check(objective_function,xparam1,hh,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+    mode_check(objective_function,xparam1,hh,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
     options_.analytic_derivation = ana_deriv;
 end
 
@@ -758,7 +756,7 @@ if any(bayestopt_.pshape > 0) && ~options_.mh_posterior_mode_estimation
         estim_params_nbr = size(xparam1,1);
         scale_factor = -sum(log10(diag(invhess)));
         log_det_invhess = -estim_params_nbr*log(scale_factor)+log(det(scale_factor*invhess));
-        likelihood = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+        likelihood = feval(objective_function,xparam1,dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_);
         oo_.MarginalDensity.LaplaceApproximation = .5*estim_params_nbr*log(2*pi) + .5*log_det_invhess - likelihood;
         skipline()
         disp(sprintf('Log data density [Laplace approximation] is %f.',oo_.MarginalDensity.LaplaceApproximation))
@@ -778,11 +776,7 @@ end
 if (any(bayestopt_.pshape  >0 ) && options_.mh_replic) || ...
         (any(bayestopt_.pshape >0 ) && options_.load_mh_file)  %% not ML estimation
     bounds = prior_bounds(bayestopt_,options_);
-    bounds(:,1)=max(bounds(:,1),lb);
-    bounds(:,2)=min(bounds(:,2),ub);
-    bayestopt_.lb = bounds(:,1);
-    bayestopt_.ub = bounds(:,2);
-    outside_bound_pars=find(xparam1 < bounds(:,1) | xparam1 > bounds(:,2));
+    outside_bound_pars=find(xparam1 < bounds.lb | xparam1 > bounds.ub);
     if ~isempty(outside_bound_pars)
         for ii=1:length(outside_bound_pars)
             outside_bound_par_names{ii,1}=get_the_name(ii,0,M_,estim_params_,options_);
@@ -822,8 +816,7 @@ if (any(bayestopt_.pshape  >0 ) && options_.mh_replic) || ...
         CutSample(M_, options_, estim_params_);
         %% Estimation of the marginal density from the Mh draws:
         if options_.mh_replic
-            [marginal,oo_] = marginal_density(M_, options_, estim_params_, ...
-                                              oo_);
+            [marginal,oo_] = marginal_density(M_, options_, estim_params_, oo_);
             % Store posterior statistics by parameter name
             oo_ = GetPosteriorParametersStatistics(estim_params_, M_, options_, bayestopt_, oo_);
             if ~options_.nograph
diff --git a/matlab/dynare_estimation_init.m b/matlab/dynare_estimation_init.m
index 71b791220d0ef5d385a259011471ae3cb3e3e0fc..99534d9f9d32e561c4bf4bc18dbc3d5ef429751e 100644
--- a/matlab/dynare_estimation_init.m
+++ b/matlab/dynare_estimation_init.m
@@ -1,4 +1,4 @@
-function [dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_,bayestopt_] = dynare_estimation_init(var_list_, dname, gsa_flag, M_, options_, oo_, estim_params_, bayestopt_)
+function [dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_,bayestopt_, bounds] = dynare_estimation_init(var_list_, dname, gsa_flag, M_, options_, oo_, estim_params_, bayestopt_)
 
 % function dynare_estimation_init(var_list_, gsa_flag)
 % performs initialization tasks before estimation or
@@ -14,7 +14,8 @@ function [dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_,
 %   estim_params_:  structure storing information about estimated
 %                   parameters
 %   bayestopt_:     structure storing information about priors
-
+%   optim:          structure storing optimization bounds
+    
 % OUTPUTS
 %   dataset_:       the dataset after required transformation
 %   dataset_info:   Various informations about the dataset (descriptive statistics and missing observations).
@@ -288,22 +289,17 @@ if ~isempty(estim_params_)
         end
         % Set prior bounds
         bounds = prior_bounds(bayestopt_,options_);
-        bounds(:,1)=max(bounds(:,1),lb);
-        bounds(:,2)=min(bounds(:,2),ub);
+        bounds.lb = max(bounds.lb,lb);
+        bounds.ub = min(bounds.ub,ub);
     else  % estimated parameters but no declared priors
         % No priors are declared so Dynare will estimate the model by
         % maximum likelihood with inequality constraints for the parameters.
         options_.mh_replic = 0;% No metropolis.
-        bounds(:,1) = lb;
-        bounds(:,2) = ub;
+        bounds.lb = lb;
+        bounds.ub = ub;
     end
     % Test if initial values of the estimated parameters are all between the prior lower and upper bounds.
     check_prior_bounds(xparam1,bounds,M_,estim_params_,options_,bayestopt_)
-
-    lb = bounds(:,1);
-    ub = bounds(:,2);
-    bayestopt_.lb = lb;
-    bayestopt_.ub = ub;
 end
 
 if isempty(estim_params_)% If estim_params_ is empty (e.g. when running the smoother on a calibrated model)
@@ -311,8 +307,6 @@ if isempty(estim_params_)% If estim_params_ is empty (e.g. when running the smoo
         error('Estimation: the ''estimated_params'' block is mandatory (unless you are running a smoother)')
     end
     xparam1 = [];
-    bayestopt_.lb = [];
-    bayestopt_.ub = [];
     bayestopt_.jscale = [];
     bayestopt_.pshape = [];
     bayestopt_.p1 = [];
diff --git a/matlab/evaluate_likelihood.m b/matlab/evaluate_likelihood.m
index 14abbd0d4b8fbab6dac06d59c0bd637dd4228075..7e6c1f087b71c646c5ec5a0585b02213385f8324 100644
--- a/matlab/evaluate_likelihood.m
+++ b/matlab/evaluate_likelihood.m
@@ -70,6 +70,6 @@ if isempty(dataset)
     [dataset, dataset_info] = makedataset(options_);
 end
 
-llik = -dsge_likelihood(parameters,dataset,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+llik = -dsge_likelihood(parameters,dataset,dataset_info,options_,M_,estim_params_,bayestopt_,prior_bounds(bayestopt_,options_),oo_);
 ldens = evaluate_prior(parameters);
 llik = llik - ldens;
\ No newline at end of file
diff --git a/matlab/get_prior_info.m b/matlab/get_prior_info.m
index d2ec1810e1bf3a60336c7df6ce59229f3c219c45..cea8ffe4658378f40365231662e93b1cd2735c6c 100644
--- a/matlab/get_prior_info.m
+++ b/matlab/get_prior_info.m
@@ -144,8 +144,8 @@ if size(M_.param_names,1)==size(M_.param_names_tex,1)% All the parameters have a
                 PriorStandardDeviation, ...
                 LowerBound, ...
                 UpperBound, ...
-                PriorIntervals(i,1), ...
-                PriorIntervals(i,2) );
+                PriorIntervals.lb(i), ...
+                PriorIntervals.ub(i) );
     end
     fprintf(fidTeX,'\\end{longtable}\n ');    
     fprintf(fidTeX,'\\end{center}\n');
diff --git a/matlab/gsa/prior_draw_gsa.m b/matlab/gsa/prior_draw_gsa.m
index a1277e3461edbb60017d55c32e8c1a1b77e79bfa..9c6b2cb16797e4237aa6e2e1da6198fcde9dd176 100644
--- a/matlab/gsa/prior_draw_gsa.m
+++ b/matlab/gsa/prior_draw_gsa.m
@@ -42,8 +42,7 @@ function pdraw = prior_draw_gsa(init,rdraw)
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-% global M_ options_ estim_params_  bayestopt_
-global bayestopt_
+global bayestopt_ options_
 persistent npar pshape p6 p7 p3 p4 lbcum ubcum
   
 if init
@@ -56,30 +55,30 @@ if init
     pdraw = zeros(npar,1);
     lbcum = zeros(npar,1);
     ubcum = ones(npar,1);
-    
+    bounds = prior_bounds(bayestopt_,options_);
     % set bounds for cumulative probabilities
     for i = 1:npar
       switch pshape(i)
         case 5% Uniform prior.
-          p4(i) = min(p4(i),bayestopt_.ub(i));
-          p3(i) = max(p3(i),bayestopt_.lb(i));
+          p4(i) = min(p4(i),bounds.ub(i));
+          p3(i) = max(p3(i),bounds.lb(i));
         case 3% Gaussian prior.
-          lbcum(i) = 0.5 * erfc(-(bayestopt_.lb(i)-p6(i))/p7(i) ./ sqrt(2));;
-          ubcum(i) = 0.5 * erfc(-(bayestopt_.ub(i)-p6(i))/p7(i) ./ sqrt(2));;
+          lbcum(i) = 0.5 * erfc(-(bounds.lb(i)-p6(i))/p7(i) ./ sqrt(2));;
+          ubcum(i) = 0.5 * erfc(-(bounds.ub(i)-p6(i))/p7(i) ./ sqrt(2));;
         case 2% Gamma prior.
-          lbcum(i) = gamcdf(bayestopt_.lb(i)-p3(i),p6(i),p7(i));
-          ubcum(i) = gamcdf(bayestopt_.ub(i)-p3(i),p6(i),p7(i));
+          lbcum(i) = gamcdf(bounds.lb(i)-p3(i),p6(i),p7(i));
+          ubcum(i) = gamcdf(bounds.ub(i)-p3(i),p6(i),p7(i));
         case 1% Beta distribution (TODO: generalized beta distribution)
-          lbcum(i) = betainc((bayestopt_.lb(i)-p3(i))./(p4(i)-p3(i)),p6(i),p7(i));
-          ubcum(i) = betainc((bayestopt_.ub(i)-p3(i))./(p4(i)-p3(i)),p6(i),p7(i));
+          lbcum(i) = betainc((bounds.lb(i)-p3(i))./(p4(i)-p3(i)),p6(i),p7(i));
+          ubcum(i) = betainc((bounds.ub(i)-p3(i))./(p4(i)-p3(i)),p6(i),p7(i));
         case 4% INV-GAMMA1 distribution
           % TO BE CHECKED
-          lbcum(i) = gamcdf(1/(bayestopt_.ub(i)-p3(i))^2,p7(i)/2,2/p6(i));
-          ubcum(i) = gamcdf(1/(bayestopt_.lb(i)-p3(i))^2,p7(i)/2,2/p6(i));
+          lbcum(i) = gamcdf(1/(bounds.ub(i)-p3(i))^2,p7(i)/2,2/p6(i));
+          ubcum(i) = gamcdf(1/(bounds.lb(i)-p3(i))^2,p7(i)/2,2/p6(i));
         case 6% INV-GAMMA2 distribution
           % TO BE CHECKED
-          lbcum(i) = gamcdf(1/(bayestopt_.ub(i)-p3(i)),p7(i)/2,2/p6(i));
-          ubcum(i) = gamcdf(1/(bayestopt_.lb(i)-p3(i)),p7(i)/2,2/p6(i));
+          lbcum(i) = gamcdf(1/(bounds.ub(i)-p3(i)),p7(i)/2,2/p6(i));
+          ubcum(i) = gamcdf(1/(bounds.lb(i)-p3(i)),p7(i)/2,2/p6(i));
         otherwise
           % Nothing to do here.
       end
diff --git a/matlab/gsa/redform_map.m b/matlab/gsa/redform_map.m
index 2a2e9b02791ef75f3d5ab07fff31f962509b1e3e..7421e1c923d6efdd079a4223876e4955ebd0dc57 100644
--- a/matlab/gsa/redform_map.m
+++ b/matlab/gsa/redform_map.m
@@ -54,6 +54,8 @@ alpha2=0;
 pvalue_ks = options_gsa_.ksstat_redform;
 pvalue_corr = options_gsa_.alpha2_redform;
 
+bounds = prior_bounds(bayestopt_,options_);
+
 pnames = M_.param_names(estim_params_.param_vals(:,1),:);
 if nargin==0,
     dirname='';
@@ -98,7 +100,7 @@ end
 offset = length(bayestopt_.pshape)-np;
 if options_gsa_.prior_range,
     pshape=5*(ones(np,1));
-    pd =  [NaN(np,1) NaN(np,1) bayestopt_.lb(offset+1:end) bayestopt_.ub(offset+1:end)];
+    pd =  [NaN(np,1) NaN(np,1) bounds.lb(offset+1:end) bounds.ub(offset+1:end)];
 else
     pshape = bayestopt_.pshape(offset+1:end);
     pd =  [bayestopt_.p6(offset+1:end) bayestopt_.p7(offset+1:end) bayestopt_.p3(offset+1:end) bayestopt_.p4(offset+1:end)];
diff --git a/matlab/gsa/stab_map_.m b/matlab/gsa/stab_map_.m
index 83ae6e10b4392b1ae00aeb5c87b52b4381476f5f..92016f031d9a0a9de7b188605cd12f1282b7fb79 100644
--- a/matlab/gsa/stab_map_.m
+++ b/matlab/gsa/stab_map_.m
@@ -96,6 +96,8 @@ p2 = bayestopt_.p2(nshock+1:end);
 p3 = bayestopt_.p3(nshock+1:end);
 p4 = bayestopt_.p4(nshock+1:end);
 
+bounds = prior_bounds(bayestopt_,options_);
+
 if nargin==0,
     OutputDirectoryName='';
 end
@@ -157,7 +159,7 @@ if fload==0,
                 lpmat0(:,j) = randperm(Nsam)'./(Nsam+1); %latin hypercube
             end
             if opt_gsa.prior_range
-                lpmat0(:,j)=lpmat0(:,j).*(bayestopt_.ub(j)-bayestopt_.lb(j))+bayestopt_.lb(j);
+                lpmat0(:,j)=lpmat0(:,j).*(bounds.ub(j)-bounds.lb(j))+bounds.lb(j);
             end
         end
         if opt_gsa.prior_range
@@ -168,7 +170,7 @@ if fload==0,
             %         end
             %       end
             for j=1:np,
-                lpmat(:,j)=lpmat(:,j).*(bayestopt_.ub(j+nshock)-bayestopt_.lb(j+nshock))+bayestopt_.lb(j+nshock);
+                lpmat(:,j)=lpmat(:,j).*(bounds.ub(j+nshock)-bounds.lb(j+nshock))+bounds.lb(j+nshock);
             end
         else
             xx=prior_draw_gsa(0,[lpmat0 lpmat]);
@@ -221,20 +223,20 @@ if fload==0,
         if neighborhood_width>0,
             for j=1:nshock,
                 lpmat0(:,j) = randperm(Nsam)'./(Nsam+1); %latin hypercube
-                ub=min([bayestopt_.ub(j) xparam1(j)*(1+neighborhood_width)]);
-                lb=max([bayestopt_.lb(j) xparam1(j)*(1-neighborhood_width)]);
+                ub=min([bounds.ub(j) xparam1(j)*(1+neighborhood_width)]);
+                lb=max([bounds.lb(j) xparam1(j)*(1-neighborhood_width)]);
                 lpmat0(:,j)=lpmat0(:,j).*(ub-lb)+lb;
             end
             for j=1:np,
-                ub=min([bayestopt_.ub(j+nshock) xparam1(j+nshock)*(1+neighborhood_width)]);
-                lb=max([bayestopt_.lb(j+nshock) xparam1(j+nshock)*(1-neighborhood_width)]);
+                ub=min([bounds.ub(j+nshock) xparam1(j+nshock)*(1+neighborhood_width)]);
+                lb=max([bounds.lb(j+nshock) xparam1(j+nshock)*(1-neighborhood_width)]);
                 lpmat(:,j)=lpmat(:,j).*(ub-lb)+lb;
             end
         else
             d = chol(inv(hh));
             lp=randn(Nsam*2,nshock+np)*d+kron(ones(Nsam*2,1),xparam1');
             for j=1:Nsam*2,
-                lnprior(j) = any(lp(j,:)'<=bayestopt_.lb | lp(j,:)'>=bayestopt_.ub);
+                lnprior(j) = any(lp(j,:)'<=bounds.lb | lp(j,:)'>=bounds.ub);
             end
             ireal=[1:2*Nsam];
             ireal=ireal(find(lnprior==0));
@@ -620,7 +622,7 @@ if length(iunstable)>0 ,
             stab_map_2(lpmat(iwrong,:),alpha2, pvalue_corr, awrongname, OutputDirectoryName,xparam1,awrongtitle);
         end
         
-        x0=0.5.*(bayestopt_.ub(1:nshock)-bayestopt_.lb(1:nshock))+bayestopt_.lb(1:nshock);
+        x0=0.5.*(bounds.ub(1:nshock)-bounds.lb(1:nshock))+bounds.lb(1:nshock);
         x0 = [x0; lpmat(istable(1),:)'];
         if istable(end)~=Nsam
             M_.params(estim_params_.param_vals(:,1)) = lpmat(istable(1),:)';
@@ -633,7 +635,7 @@ if length(iunstable)>0 ,
     end
 else
     disp('All parameter values in the specified ranges give unique saddle-path solution!')
-        x0=0.5.*(bayestopt_.ub(1:nshock)-bayestopt_.lb(1:nshock))+bayestopt_.lb(1:nshock);
+        x0=0.5.*(bounds.ub(1:nshock)-bounds.lb(1:nshock))+bounds.lb(1:nshock);
         x0 = [x0; lpmat(istable(1),:)'];
 
 end
diff --git a/matlab/independent_metropolis_hastings_core.m b/matlab/independent_metropolis_hastings_core.m
index 7a9a25c7998de36b7e361afc0b2c2d55f8362554..3e2ae2e42f6a95dcadd2af38e9c846d614f8a107 100644
--- a/matlab/independent_metropolis_hastings_core.m
+++ b/matlab/independent_metropolis_hastings_core.m
@@ -150,7 +150,7 @@ for b = fblck:nblck,
     j = 1;
     while j <= nruns(b)
         par = feval(ProposalFun, xparam1, proposal_covariance, n);
-        if all( par(:) > mh_bounds(:,1) ) && all( par(:) < mh_bounds(:,2) )
+        if all( par(:) > mh_bounds.lb ) && all( par(:) < mh_bounds.ub )
             try
                 logpost = - feval(TargetFun, par(:),varargin{:});
             catch,
diff --git a/matlab/initial_estimation_checks.m b/matlab/initial_estimation_checks.m
index ea8fd96eb1ed70d3f8575a984ca3b2266d9d1417..45189b0b18aac5da90e219d662bcd1fb4fc965f4 100644
--- a/matlab/initial_estimation_checks.m
+++ b/matlab/initial_estimation_checks.m
@@ -1,4 +1,4 @@
-function DynareResults = initial_estimation_checks(objective_function,xparam1,DynareDataset,DatasetInfo,Model,EstimatedParameters,DynareOptions,BayesInfo,DynareResults)
+function DynareResults = initial_estimation_checks(objective_function,xparam1,DynareDataset,DatasetInfo,Model,EstimatedParameters,DynareOptions,BayesInfo,BoundsInfo,DynareResults)
 % function initial_estimation_checks(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations)
 % Checks data (complex values, ML evaluation, initial values, BK conditions,..)
 %
@@ -75,7 +75,7 @@ test_for_deep_parameters_calibration(Model);
 % Evaluate the likelihood.
 ana_deriv = DynareOptions.analytic_derivation;
 DynareOptions.analytic_derivation=0;
-[fval,junk1,junk2,a,b,c,d] = feval(objective_function,xparam1,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+[fval,junk1,junk2,a,b,c,d] = feval(objective_function,xparam1,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults);
 DynareOptions.analytic_derivation=ana_deriv;
 
 if DynareOptions.dsge_var || strcmp(func2str(objective_function),'non_linear_dsge_likelihood')
diff --git a/matlab/metropolis_hastings_initialization.m b/matlab/metropolis_hastings_initialization.m
index 7fb056553842a8387c9c1a110d22ea7bc32dfd25..e8f4d0ef156fd80d62af87b542539d077bd07f62 100644
--- a/matlab/metropolis_hastings_initialization.m
+++ b/matlab/metropolis_hastings_initialization.m
@@ -108,9 +108,9 @@ if ~options_.load_mh_file && ~options_.mh_recover
             trial = 1;
             while validate == 0 && trial <= 10
                 candidate = rand_multivariate_normal( transpose(xparam1), d * options_.mh_init_scale, npar);
-                if all(candidate(:) > mh_bounds(:,1)) && all(candidate(:) < mh_bounds(:,2)) 
+                if all(candidate(:) > mh_bounds.lb) && all(candidate(:) < mh_bounds.ub) 
                     ix2(j,:) = candidate;
-                    ilogpo2(j) = - feval(TargetFun,ix2(j,:)',dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+                    ilogpo2(j) = - feval(TargetFun,ix2(j,:)',dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_);
                     if ~isfinite(ilogpo2(j)) % if returned log-density is
                                              % Inf or Nan (penalized value)
                         validate = 0;
@@ -150,9 +150,9 @@ if ~options_.load_mh_file && ~options_.mh_recover
     else% Case 2: one chain (we start from the posterior mode)
         fprintf(fidlog,['  Initial values of the parameters:\n']);
         candidate = transpose(xparam1(:));%
-        if all(candidate(:) > mh_bounds(:,1)) && all(candidate(:) < mh_bounds(:,2)) 
+        if all(candidate(:) > mh_bounds.lb) && all(candidate(:) < mh_bounds.ub) 
             ix2 = candidate;
-            ilogpo2 = - feval(TargetFun,ix2',dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+            ilogpo2 = - feval(TargetFun,ix2',dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_);
             disp('Estimation::mcmc: Initialization at the posterior mode.')
             skipline()
             fprintf(fidlog,['    Blck ' int2str(1) 'params:\n']);
diff --git a/matlab/mode_check.m b/matlab/mode_check.m
index 2fa6f4a9d145b1d8bcbd35fd8e538fab02f25e00..fd80782d7a123fd95d5102f50a535bac03466aed 100644
--- a/matlab/mode_check.m
+++ b/matlab/mode_check.m
@@ -1,4 +1,4 @@
-function mode_check(fun,x,hessian,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults)
+function mode_check(fun,x,hessian,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults)
 % Checks the estimated ML mode or Posterior mode.
 
 %@info:
@@ -62,7 +62,7 @@ if ~isempty(hessian);
     [ s_min, k ] = min(diag(hessian));
 end
 
-fval = feval(fun,x,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+fval = feval(fun,x,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults);
 
 if ~isempty(hessian);
     skipline()
@@ -109,23 +109,23 @@ for plt = 1:nbplt,
             end
         end
         xx = x;
-        l1 = max(BayesInfo.lb(kk),(1-sign(x(kk))*ll)*x(kk)); m1 = 0; %lower bound
-        l2 = min(BayesInfo.ub(kk),(1+sign(x(kk))*ll)*x(kk)); %upper bound
+        l1 = max(BoundsInfo.lb(kk),(1-sign(x(kk))*ll)*x(kk)); m1 = 0; %lower bound
+        l2 = min(BoundsInfo.ub(kk),(1+sign(x(kk))*ll)*x(kk)); %upper bound
         binding_lower_bound=0;
         binding_upper_bound=0;
-        if isequal(x(kk),BayesInfo.lb(kk))
+        if isequal(x(kk),BoundsInfo.lb(kk))
             binding_lower_bound=1;
-            bound_value=BayesInfo.lb(kk);
-        elseif isequal(x(kk),BayesInfo.ub(kk))
+            bound_value=BoundsInfo.lb(kk);
+        elseif isequal(x(kk),BoundsInfo.ub(kk))
             binding_upper_bound=1;
-            bound_value=BayesInfo.ub(kk);           
+            bound_value=BoundsInfo.ub(kk);
         end      
         if DynareOptions.mode_check.symmetric_plots && ~binding_lower_bound && ~binding_upper_bound
             if l2<(1+ll)*x(kk) %test whether upper bound is too small due to prior binding
                 l1 = x(kk) - (l2-x(kk)); %adjust lower bound to become closer
                 m1 = 1;
             end
-            if ~m1 && (l1>(1-ll)*x(kk)) && (x(kk)+(x(kk)-l1)<BayesInfo.ub(kk)) % if lower bound was truncated and using difference from lower bound does not violate upper bound
+            if ~m1 && (l1>(1-ll)*x(kk)) && (x(kk)+(x(kk)-l1)<BoundsInfo.ub(kk)) % if lower bound was truncated and using difference from lower bound does not violate upper bound
                 l2 = x(kk) + (x(kk)-l1); %set upper bound to same distance as lower bound
             end
         end
@@ -138,7 +138,7 @@ for plt = 1:nbplt,
         end
         for i=1:length(z)
             xx(kk) = z(i);
-            [fval, junk1, junk2, exit_flag] = feval(fun,xx,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults);
+            [fval, junk1, junk2, exit_flag] = feval(fun,xx,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults);
             if exit_flag
                 y(i,1) = fval;
             else
diff --git a/matlab/mr_hessian.m b/matlab/mr_hessian.m
index 1cdbb05b1736ca95f885e1d228044a13317f3dbd..109c0147cf302b00c0844e08b2c921e516ece44d 100644
--- a/matlab/mr_hessian.m
+++ b/matlab/mr_hessian.m
@@ -28,7 +28,8 @@ function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1] = mr_hessian(init,x,func,hf
 %  varargin{4} --> Model
 %  varargin{5} --> EstimatedParameters
 %  varargin{6} --> BayesInfo
-%  varargin{1} --> DynareResults
+%  varargin{7} --> BayesInfo
+%  varargin{8} --> DynareResults
 
 
 
@@ -60,9 +61,9 @@ if init
 end
 
 [f0, ff0]=feval(func,x,varargin{:});
-h2=varargin{6}.ub-varargin{6}.lb;
-hmax=varargin{6}.ub-x;
-hmax=min(hmax,x-varargin{6}.lb);
+h2=varargin{7}.ub-varargin{7}.lb;
+hmax=varargin{7}.ub-x;
+hmax=min(hmax,x-varargin{7}.lb);
 if isempty(ff0),
     outer_product_gradient=0;
 else
diff --git a/matlab/non_linear_dsge_likelihood.m b/matlab/non_linear_dsge_likelihood.m
index 2f39016754e64b2b88f60b88ae14272b48be1afa..f3122ec739a76926ecb3f199e92280ffc94b2c4e 100644
--- a/matlab/non_linear_dsge_likelihood.m
+++ b/matlab/non_linear_dsge_likelihood.m
@@ -1,4 +1,4 @@
-function [fval,ys,trend_coeff,exit_flag,info,Model,DynareOptions,BayesInfo,DynareResults] = non_linear_dsge_likelihood(xparam1,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults)
+function [fval,ys,trend_coeff,exit_flag,info,Model,DynareOptions,BayesInfo,DynareResults] = non_linear_dsge_likelihood(xparam1,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults)
 % Evaluates the posterior kernel of a dsge model using a non linear filter.
 
 %@info:
@@ -143,18 +143,18 @@ end
 %------------------------------------------------------------------------------
 
 % Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
-if (DynareOptions.mode_compute~=1) && any(xparam1<BayesInfo.lb)
-    k = find(xparam1(:) < BayesInfo.lb);
-    fval = objective_function_penalty_base+sum((BayesInfo.lb(k)-xparam1(k)).^2);
+if (DynareOptions.mode_compute~=1) && any(xparam1<BoundsInfo.lb)
+    k = find(xparam1(:) < BoundsInfo.lb);
+    fval = objective_function_penalty_base+sum((BoundsInfo.lb(k)-xparam1(k)).^2);
     exit_flag = 0;
     info = 41;
     return
 end
 
 % Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
-if (DynareOptions.mode_compute~=1) && any(xparam1>BayesInfo.ub)
-    k = find(xparam1(:)>BayesInfo.ub);
-    fval = objective_function_penalty_base+sum((xparam1(k)-BayesInfo.ub(k)).^2);
+if (DynareOptions.mode_compute~=1) && any(xparam1>BoundsInfo.ub)
+    k = find(xparam1(:)>BoundsInfo.ub);
+    fval = objective_function_penalty_base+sum((xparam1(k)-BoundsInfo.ub(k)).^2);
     exit_flag = 0;
     info = 42;
     return
diff --git a/matlab/prior_bounds.m b/matlab/prior_bounds.m
index 4eb290e57ddcd9597d34f2360542eef6f22d5970..a224c69a0a184e774e8ca4bd2ba9a21144750947 100644
--- a/matlab/prior_bounds.m
+++ b/matlab/prior_bounds.m
@@ -21,8 +21,7 @@ function bounds = prior_bounds(bayestopt,options)
 %! @sp 1
 %! @table @ @var
 %! @item bounds
-%! p*2 matrix of doubles, where p is the number of estimated parameters. The first and second columns report
-%! respectively the lower and upper bounds.
+%! A structure with two fields lb and up (p*1 vectors of doubles, where p is the number of estimated parameters) for the lower and upper bounds.
 %! @end table
 %! @sp 2
 %! @strong{This function is called by:}
@@ -43,7 +42,7 @@ function bounds = prior_bounds(bayestopt,options)
 %    bayestopt  [structure]  characterizing priors (shape, mean, p1..p4)
 %    
 % OUTPUTS
-%    bounds     [double]      matrix specifying prior bounds (row= parameter, column=lower&upper bound)
+%    bounds     [double]      structure specifying prior bounds (lb and ub fields)
 %    
 % SPECIAL REQUIREMENTS
 %    none
@@ -72,27 +71,27 @@ p6 = bayestopt.p6;
 p7 = bayestopt.p7;
 prior_trunc = options.prior_trunc;
 
-bounds = zeros(length(p6),2);
+bounds.lb = zeros(length(p6),1);
+bounds.ub = zeros(length(p6),1);
 
 for i=1:length(p6)
     switch pshape(i)
       case 1
         if prior_trunc == 0
-            bounds(i,1) = p3(i);
-            bounds(i,2) = p4(i);
+            bounds.lb(i) = p3(i);
+            bounds.ub(i) = p4(i);
         else
-            bounds(i,1) = betainv(prior_trunc,p6(i),p7(i))*(p4(i)-p3(i))+p3(i);
-            bounds(i,2) = betainv(1-prior_trunc,p6(i),p7(i))* ...
-                (p4(i)-p3(i))+p3(i);
+            bounds.lb(i) = betainv(prior_trunc,p6(i),p7(i))*(p4(i)-p3(i))+p3(i);
+            bounds.ub(i) = betainv(1-prior_trunc,p6(i),p7(i))*(p4(i)-p3(i))+p3(i);
         end
       case 2
         if prior_trunc == 0
-            bounds(i,1) = p3(i);
-            bounds(i,2) = Inf;
+            bounds.lb(i) = p3(i);
+            bounds.ub(i) = Inf;
         else
             try
-                bounds(i,1) = gaminv(prior_trunc,p6(i),p7(i))+p3(i);
-                bounds(i,2) = gaminv(1-prior_trunc,p6(i),p7(i))+p3(i);
+                bounds.lb(i) = gaminv(prior_trunc,p6(i),p7(i))+p3(i);
+                bounds.ub(i) = gaminv(1-prior_trunc,p6(i),p7(i))+p3(i);
             catch
                 % Workaround for ticket #161
                 if isoctave
@@ -104,21 +103,20 @@ for i=1:length(p6)
         end
       case 3
         if prior_trunc == 0
-            bounds(i,1) = -Inf;
-            bounds(i,2) = Inf;
+            bounds.lb(i) = -Inf;
+            bounds.ub(i) = Inf;
         else
-            bounds(i,1) = norminv(prior_trunc,p6(i),p7(i));
-            bounds(i,2) = norminv(1-prior_trunc,p6(i),p7(i));
+            bounds.lb(i) = norminv(prior_trunc,p6(i),p7(i));
+            bounds.ub(i) = norminv(1-prior_trunc,p6(i),p7(i));
         end
       case 4
         if prior_trunc == 0
-            bounds(i,1) = p3(i);
-            bounds(i,2) = Inf;
+            bounds.lb(i) = p3(i);
+            bounds.ub(i) = Inf;
         else
             try
-                bounds(i,1) = 1/sqrt(gaminv(1-prior_trunc, p7(i)/2, 2/p6(i)))+p3(i);
-                bounds(i,2) = 1/sqrt(gaminv(prior_trunc, p7(i)/2, ...
-                                            2/p6(i)))+p3(i);
+                bounds.lb(i) = 1/sqrt(gaminv(1-prior_trunc, p7(i)/2, 2/p6(i)))+p3(i);
+                bounds.ub(i) = 1/sqrt(gaminv(prior_trunc, p7(i)/2, 2/p6(i)))+p3(i);
             catch
                 % Workaround for ticket #161
                 if isoctave
@@ -130,20 +128,20 @@ for i=1:length(p6)
         end
       case 5
         if prior_trunc == 0
-            bounds(i,1) = p6(i);
-            bounds(i,2) = p7(i);
+            bounds.lb(i) = p6(i);
+            bounds.ub(i) = p7(i);
         else
-            bounds(i,1) = p6(i)+(p7(i)-p6(i))*prior_trunc;
-            bounds(i,2) = p7(i)-(p7(i)-p6(i))*prior_trunc;
+            bounds.lb(i) = p6(i)+(p7(i)-p6(i))*prior_trunc;
+            bounds.ub(i) = p7(i)-(p7(i)-p6(i))*prior_trunc;
         end
       case 6
         if prior_trunc == 0
-            bounds(i,1) = p3(i);
-            bounds(i,2) = Inf;
+            bounds.lb(i) = p3(i);
+            bounds.ub(i) = Inf;
         else
             try
-                bounds(i,1) = 1/gaminv(1-prior_trunc, p7(i)/2, 2/p6(i))+p3(i);
-                bounds(i,2) = 1/gaminv(prior_trunc, p7(i)/2, 2/p6(i))+ p3(i);
+                bounds.lb(i) = 1/gaminv(1-prior_trunc, p7(i)/2, 2/p6(i))+p3(i);
+                bounds.ub(i) = 1/gaminv(prior_trunc, p7(i)/2, 2/p6(i))+ p3(i);
             catch
                 % Workaround for ticket #161
                 if isoctave
diff --git a/matlab/prior_draw.m b/matlab/prior_draw.m
index 8edf9954e3088e231f051d524349a0db779cc4e1..8923be87c531441714f3445a028f8438d9831200 100644
--- a/matlab/prior_draw.m
+++ b/matlab/prior_draw.m
@@ -42,8 +42,9 @@ if nargin>0 && init
     p7 = evalin('base', 'bayestopt_.p7');
     p3 = evalin('base', 'bayestopt_.p3');
     p4 = evalin('base', 'bayestopt_.p4');
-    lb = evalin('base', 'bayestopt_.lb');
-    ub = evalin('base', 'bayestopt_.ub');
+    bounds = evalin('base', 'prior_bounds(bayestopt_,options_)');
+    lb = bounds.lb;
+    ub = bounds.ub;
     number_of_estimated_parameters = length(p6);
     if nargin>1 && uniform
         prior_shape = repmat(5,number_of_estimated_parameters,1);
diff --git a/matlab/random_walk_metropolis_hastings_core.m b/matlab/random_walk_metropolis_hastings_core.m
index 62e1728eb9790d3bbe01b00fdc069ec74c867948..90c89e871d91aaf7fdbe76dc78fa55b82b9a562b 100644
--- a/matlab/random_walk_metropolis_hastings_core.m
+++ b/matlab/random_walk_metropolis_hastings_core.m
@@ -163,9 +163,9 @@ for b = fblck:nblck,
     j = 1;
     while j <= nruns(b)
         par = feval(ProposalFun, ix2(b,:), proposal_covariance_Cholesky_decomposition, n);
-        if all( par(:) > mh_bounds(:,1) ) && all( par(:) < mh_bounds(:,2) )
+        if all( par(:) > mh_bounds.lb ) && all( par(:) < mh_bounds.ub )
             try
-                logpost = - feval(TargetFun, par(:),dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,oo_);
+                logpost = - feval(TargetFun, par(:),dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_);
             catch
                 logpost = -inf;
             end
diff --git a/matlab/set_prior.m b/matlab/set_prior.m
index 64fbfd3a5290fd57a335697754f337596f27c987..9fcbd10b3e99aefdd90812b545598862538c3da4 100644
--- a/matlab/set_prior.m
+++ b/matlab/set_prior.m
@@ -152,9 +152,6 @@ if np
     bayestopt_.name = [bayestopt_.name; cellstr(M_.param_names(estim_params_.param_vals(:,1),:))];
 end
 
-bayestopt_.ub = ub;
-bayestopt_.lb = lb;
-
 bayestopt_.p6 = NaN(size(bayestopt_.p1)) ;
 bayestopt_.p7 = bayestopt_.p6 ;