diff --git a/doc/dynare.texi b/doc/dynare.texi
index 19d1c4e1de01c40e822b28f06f0fc4fa128a7822..3a2bcaf5e8595556f6ffc0b7cde488a45b1586bd 100644
--- a/doc/dynare.texi
+++ b/doc/dynare.texi
@@ -4797,7 +4797,7 @@ Default value is @code{1}. For advanced use only.
 For internal use and testing only.
 
 @item conf_sig = @var{DOUBLE}
-Confidence interval used for classical forecasting after estimation. See @xref{conf_sig}.
+Confidence interval used for classical forecasting after estimation. @xref{conf_sig}.
 
 @item mh_conf_sig = @var{DOUBLE}
 @anchor{mh_conf_sig} 
@@ -4949,6 +4949,11 @@ value of that function as the posterior mode.
 @noindent
 Default value is @code{4}.
 
+@item silent_optimizer
+@anchor{silent_optimizer}
+Instructs Dynare to run mode computing/optimization silently without displaying results or 
+saving files in between. Useful when running loops.
+
 @item mcmc_jumping_covariance = hessian|prior_variance|identity_matrix|@var{FILENAME}
 Tells Dynare which covariance to use for the proposal density of the MCMC sampler. @code{mcmc_jumping_covariance} can be one of the following:
 
@@ -5074,6 +5079,12 @@ Size of the perturbation used to compute numerically the gradient of the objecti
 @item 'TolFun'
 Stopping criteria. Default: @code{1e-7}
 
+@item 'verbosity'
+Controls verbosity of display during optimization. Set to 0 to set to silent. Default: @code{1}
+
+@item 'SaveFiles'
+Controls saving of intermediate results during optimization. Set to 0 to shut off saving. Default: @code{1}
+
 @end table
 
 @item 5
@@ -5093,6 +5104,12 @@ Maximum number of iterations. Default: @code{1000}
 @item 'TolFun'
 Stopping criteria. Default: @code{1e-5} for numerical derivatives @code{1e-7} for analytic derivatives.
 
+@item 'verbosity'
+Controls verbosity of display during optimization. Set to 0 to set to silent. Default: @code{1}
+
+@item 'SaveFiles'
+Controls saving of intermediate results during optimization. Set to 0 to shut off saving. Default: @code{1}
+
 @end table
 
 @item 6
@@ -5143,6 +5160,9 @@ Tolerance parameter (w.r.t the objective function). Default: @code{1e-4}
 @item 'TolX'
 Tolerance parameter (w.r.t the instruments). Default: @code{1e-4}
 
+@item 'verbosity'
+Controls verbosity of display during optimization. Set to 0 to set to silent. Default: @code{1}
+
 @end table
 
 @item 9
@@ -5162,6 +5182,12 @@ Tolerance parameter (w.r.t the objective function). Default: @code{1e-7}
 @item 'TolX'
 Tolerance parameter (w.r.t the instruments). Default: @code{1e-7}
 
+@item 'verbosity'
+Controls verbosity of display during optimization. Set to 0 to set to silent. Default: @code{1}
+
+@item 'SaveFiles'
+Controls saving of intermediate results during optimization. Set to 0 to shut off saving. Default: @code{1}
+
 @end table
 
 @item 10
@@ -5184,6 +5210,9 @@ Tolerance parameter (w.r.t the objective function). Default: @code{1e-4}
 @item 'TolX'
 Tolerance parameter (w.r.t the instruments). Default: @code{1e-4}
 
+@item 'verbosity'
+Controls verbosity of display during optimization. Set to 0 to set to silent. Default: @code{1}
+
 @end table
 
 @item 101
@@ -5206,6 +5235,9 @@ Tolerance parameter (w.r.t the objective function). Default: @code{1e-6}
 @item 'TolX'
 Tolerance parameter (w.r.t the instruments). Default: @code{1e-6}
 
+@item 'verbosity'
+Controls verbosity of display during optimization. Set to 0 to set to silent. Default: @code{1}
+
 @end table
 
 @item 102
@@ -5250,6 +5282,32 @@ deprecated and will be removed in a future release of Dynare.
 @anchor{dsge_varlag} The number of lags used to estimate a DSGE-VAR
 model. Default: @code{4}.
 
+
+@item use_tarb
+Instructs Dynare to use the Tailored randomized block (TaRB) Metropolis-Hastings algorithm 
+proposed by @cite{Chib and Ramamurthy (2010)} instead of the standard Random-Walk Metropolis-Hastings. 
+In this algorithm, at each iteration the estimated parameters are randomly assigned to different 
+blocks. For each of these blocks a mode-finding step is conducted. The inverse Hessian at this mode 
+is then used as the covariance of the proposal density for a Random-Walk Metropolis-Hastings step. 
+If the numerical Hessian is not positive definite, the generalized Cholesky decomposition of 
+@cite{Schnabel and Eskow (1990)} is used, but without pivoting. The TaRB-MH algorithm massively reduces 
+the autocorrelation in the MH draws and thus reduces the number of draws required to 
+representatively sample from the posterior. However, this comes at a computational costs as the 
+algorithm takes more time to run.
+
+@item tarb_new_block_probability = @var{DOUBLE}
+Specifies the probability of the next parameter belonging to a new block when the random blocking in the TaRB 
+Metropolis-Hastings algorithm is conducted. The higher this number, the smaller is the average block size and the 
+more random blocks are formed during each parameter sweep. Default: @code{0.25}. 
+
+@item tarb_mode_compute = @var{INTEGER}
+Specifies the mode-finder run in every iteration for every block of the 
+TaRB Metropolis-Hastings algorithm. @xref{mode_compute}. Default: @code{4}. 
+
+@item tarb_optim = @var{INTEGER}
+Specifies the options for the mode-finder used in the TaRB 
+Metropolis-Hastings algorithm. @xref{optim}. 
+
 @item moments_varendo
 @anchor{moments_varendo} Triggers the computation of the posterior
 distribution of the theoretical moments of the endogenous
@@ -6666,6 +6724,9 @@ Specifies the optimizer for minimizing the objective function. The same solvers
 @item optim = (@var{NAME}, @var{VALUE}, ...)
 A list of @var{NAME} and @var{VALUE} pairs. Can be used to set options for the optimization routines. The set of available options depends on the selected optimization routine (i.e. on the value of option @ref{opt_algo}). @xref{optim}.
 
+@item silent_optimizer
+@pxref{silent_optimizer}
+
 @item huge_number = @var{DOUBLE}
 Value for replacing the infinite bounds on parameters by finite numbers. Used by some optimizers for numerical reasons (@pxref{huge_number}).
 Users need to make sure that the optimal parameters are not larger than this value. Default: @code{1e7}
@@ -9366,7 +9427,7 @@ Note that creating the configuration file is not enough in order to
 trigger parallelization of the computations: you also need to specify
 the @code{parallel} option to the @code{dynare} command. For more
 details, and for other options related to the parallelization engine,
-see @pxref{Dynare invocation}.
+@pxref{Dynare invocation}.
 
 You also need to verify that the following requirements are met by
 your cluster (which is composed of a master and of one or more
@@ -12552,6 +12613,17 @@ computational and graphical statistics}, 7, pp. 434--455
 @item
 Cardoso, Margarida F., R. L. Salcedo and S. Feyo de Azevedo (1996): ``The simplex simulated annealing approach to continuous non-linear optimization,'' @i{Computers chem. Engng}, 20(9), 1065-1080
 
+@item
+Chib, Siddhartha and Srikanth Ramamurthy (2010):
+``Tailored randomized block MCMC methods with application to DSGE models,'' 
+@i{Journal of Econometrics}, 155, 19--38
+
+@item
+Christiano, Lawrence J., Mathias Trabandt and Karl Walentin (2011):
+``Introducing financial frictions and unemployment into a small open
+economy model,'' @i{Journal of Economic Dynamics and Control}, 35(12),
+1999--2041
+
 @item
 Collard, Fabrice (2001): ``Stochastic simulations with Dynare: A practical guide''
 
@@ -12571,12 +12643,6 @@ Corona, Angelo,  M. Marchesi, Claudio Martini, and Sandro Ridella (1987):
 ``Minimizing multimodal functions of continuous variables with the ``simulated annealing'' algorithm'',
 @i{ACM Transactions on Mathematical Software}, 13(3), 262--280
 
-@item
-Christiano, Lawrence J., Mathias Trabandt and Karl Walentin (2011):
-``Introducing financial frictions and unemployment into a small open
-economy model,'' @i{Journal of Economic Dynamics and Control}, 35(12),
-1999--2041
-
 @item
 Del Negro, Marco and Franck Schorfheide (2004): ``Priors from General Equilibrium Models for VARS'',
 @i{International Economic Review}, 45(2), 643--673
@@ -12716,6 +12782,10 @@ General Equilibrium Models Using a Second-Order Approximation to the
 Policy Function,'' @i{Journal of Economic Dynamics and Control},
 28(4), 755--775
 
+@item
+Schnabel, Robert B. and Elizabeth Eskow (1990): ``A new modified Cholesky algorithm,'' 
+@i{SIAM Journal of Scientific and Statistical Computing}, 11, 1136--1158
+
 @item
 Sims, Christopher A., Daniel F. Waggoner and Tao Zha (2008): ``Methods for
 inference in large multiple-equation Markov-switching models,''
diff --git a/matlab/TaRB_metropolis_hastings_core.m b/matlab/TaRB_metropolis_hastings_core.m
new file mode 100644
index 0000000000000000000000000000000000000000..28716c4273f143f29275c93a57fa31b8107f7ab2
--- /dev/null
+++ b/matlab/TaRB_metropolis_hastings_core.m
@@ -0,0 +1,293 @@
+function myoutput = TaRB_metropolis_hastings_core(myinputs,fblck,nblck,whoiam, ThisMatlab)
+% function myoutput = TaRB_metropolis_hastings_core(myinputs,fblck,nblck,whoiam, ThisMatlab)
+% Contains the most computationally intensive portion of code in
+% random_walk_metropolis_hastings (the 'for xxx = fblck:nblck' loop) using the TaRB algorithm. 
+% The branches in  that 'for'
+% cycle are completely independent to be suitable for parallel execution.
+%
+% INPUTS
+%   o myimput            [struc]     The mandatory variables for local/remote
+%                                    parallel computing obtained from random_walk_metropolis_hastings.m
+%                                    function.
+%   o fblck and nblck    [integer]   The Metropolis-Hastings chains.
+%   o whoiam             [integer]   In concurrent programming a modality to refer to the different threads running in parallel is needed.
+%                                    The integer whoaim is the integer that
+%                                    allows us to distinguish between them. Then it is the index number of this CPU among all CPUs in the
+%                                    cluster.
+%   o ThisMatlab         [integer]   Allows us to distinguish between the
+%                                    'main' Matlab, the slave Matlab worker, local Matlab, remote Matlab,
+%                                     ... Then it is the index number of this slave machine in the cluster.
+% OUTPUTS
+%   o myoutput  [struc]
+%               If executed without parallel, this is the original output of 'for b =
+%               fblck:nblck'. Otherwise, it's a portion of it computed on a specific core or
+%               remote machine. In this case:
+%                               record;
+%                               irun;
+%                               NewFile;
+%                               OutputFileName
+%
+% ALGORITHM
+%   Portion of Tailored Randomized Block Metropolis-Hastings proposed in
+%   Chib/Ramamurthy (2010): Tailored randomized block MCMC methods with
+%   application to DSGE models, Journal of Econometrics 155, pp. 19-38
+% 
+%   This implementation differs from the originally proposed one in the
+%   treatment of non-positive definite Hessians. Here we
+%       - use the Jordan decomposition
+%
+% SPECIAL REQUIREMENTS.
+%   None.
+% 
+% PARALLEL CONTEXT
+% See the comments in the random_walk_metropolis_hastings.m funtion.
+
+
+% Copyright (C) 2006-2015 Dynare Team
+%
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+global objective_function_penalty_base;
+
+if nargin<4,
+    whoiam=0;
+end
+
+% reshape 'myinputs' for local computation.
+% In order to avoid confusion in the name space, the instruction struct2local(myinputs) is replaced by:
+
+TargetFun=myinputs.TargetFun;
+ProposalFun=myinputs.ProposalFun;
+xparam1=myinputs.xparam1;
+mh_bounds=myinputs.mh_bounds;
+last_draw=myinputs.ix2;
+last_posterior=myinputs.ilogpo2;
+fline=myinputs.fline;
+npar=myinputs.npar;
+nruns=myinputs.nruns;
+NewFile=myinputs.NewFile;
+MAX_nruns=myinputs.MAX_nruns;
+d=myinputs.d;
+InitSizeArray=myinputs.InitSizeArray;
+record=myinputs.record;
+dataset_ = myinputs.dataset_;
+dataset_info = myinputs.dataset_info;
+bayestopt_ = myinputs.bayestopt_;
+estim_params_ = myinputs.estim_params_;
+options_ = myinputs.options_;
+M_ = myinputs.M_;
+oo_ = myinputs.oo_;
+
+% Necessary only for remote computing!
+if whoiam
+    % initialize persistent variables in priordens()
+    priordens(xparam1,bayestopt_.pshape,bayestopt_.p6,bayestopt_.p7, bayestopt_.p3,bayestopt_.p4,1);
+end
+
+MetropolisFolder = CheckPath('metropolis',M_.dname);
+ModelName = M_.fname;
+BaseName = [MetropolisFolder filesep ModelName];
+
+options_.lik_algo = 1;
+OpenOldFile = ones(nblck,1);
+
+%
+% Now I run the (nblck-fblck+1) Metropolis-Hastings chains
+%
+
+block_iter=0;
+
+for curr_chain = fblck:nblck,
+    block_iter=block_iter+1;
+    try
+        % This will not work if the master uses a random number generator not
+        % available in the slave (different Matlab version or
+        % Matlab/Octave cluster). Therefore the trap.
+        %
+        % Set the random number generator type (the seed is useless but needed by the function)
+        set_dynare_seed(options_.DynareRandomStreams.algo, options_.DynareRandomStreams.seed);
+        % Set the state of the RNG
+        set_dynare_random_generator_state(record.InitialSeeds(curr_chain).Unifor, record.InitialSeeds(curr_chain).Normal);
+    catch
+        % If the state set by master is incompatible with the slave, we only reseed 
+        set_dynare_seed(options_.DynareRandomStreams.seed+curr_chain);
+    end
+    if (options_.load_mh_file~=0) && (fline(curr_chain)>1) && OpenOldFile(curr_chain) %load previous draws and likelihood
+        load([BaseName '_mh' int2str(NewFile(curr_chain)) '_blck' int2str(curr_chain) '.mat'])
+        x2 = [x2;zeros(InitSizeArray(curr_chain)-fline(curr_chain)+1,npar)];
+        logpo2 = [logpo2;zeros(InitSizeArray(curr_chain)-fline(curr_chain)+1,1)];
+        OpenOldFile(curr_chain) = 0;
+    else
+        x2 = zeros(InitSizeArray(curr_chain),npar);
+        logpo2 = zeros(InitSizeArray(curr_chain),1);
+    end
+    %Prepare waiting bars
+    if whoiam
+        prc0=(curr_chain-fblck)/(nblck-fblck+1)*(isoctave || options_.console_mode);
+        hh = dyn_waitbar({prc0,whoiam,options_.parallel(ThisMatlab)},['MH (' int2str(curr_chain) '/' int2str(options_.mh_nblck) ')...']);
+    else
+        hh = dyn_waitbar(0,['Metropolis-Hastings (' int2str(curr_chain) '/' int2str(options_.mh_nblck) ')...']);
+        set(hh,'Name','Metropolis-Hastings');
+    end
+    accepted_draws_this_chain = 0;
+    accepted_draws_this_file = 0;
+    blocked_draws_counter_this_chain=0;
+    blocked_draws_counter_this_chain_this_file=0;
+    draw_index_current_file = fline(curr_chain); %get location of first draw in current block
+    draw_iter = 1;
+    while draw_iter <= nruns(curr_chain)
+        
+        %% randomize indices for blocking in this iteration
+        indices=randperm(npar)';
+        blocks=[1; (1+cumsum((rand(length(indices)-1,1)>(1-options_.TaRB.new_block_probability))))];
+        nblocks=blocks(end,1); %get number of blocks this iteration
+        current_draw=last_draw(curr_chain,:)'; %get starting point for current draw for updating
+        
+        for block_iter=1:nblocks
+            blocked_draws_counter_this_chain=blocked_draws_counter_this_chain+1;
+            blocked_draws_counter_this_chain_this_file=blocked_draws_counter_this_chain_this_file+1;
+            nxopt=length(indices(blocks==block_iter,1)); %get size of current block
+            par_start_current_block=current_draw(indices(blocks==block_iter,1));
+            [xopt_current_block, fval, exitflag, hess_mat_optimizer, options_, Scale] = dynare_minimize_objective(@TaRB_optimizer_wrapper,par_start_current_block,options_.TaRB.mode_compute,options_,[mh_bounds.lb(indices(blocks==block_iter,1),1) mh_bounds.ub(indices(blocks==block_iter,1),1)],bayestopt_.name,bayestopt_,[],...
+                current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper
+                dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_); %inputs for objective           
+            objective_function_penalty_base=Inf; %reset penalty that may have been changed by optimizer
+            %% covariance for proposal density
+            hessian_mat = reshape(hessian('TaRB_optimizer_wrapper',xopt_current_block, ...
+                    options_.gstep,...
+                    current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper
+                dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_),nxopt,nxopt);
+            
+            if any(any(isnan(hessian_mat))) || any(any(isinf(hessian_mat)))
+                inverse_hessian_mat=eye(nxopt)*1e-4; %use diagonal
+            else
+                inverse_hessian_mat=inv(hessian_mat); %get inverse Hessian
+                if any(any((isnan(inverse_hessian_mat)))) || any(any((isinf(inverse_hessian_mat))))
+                    inverse_hessian_mat=eye(nxopt)*1e-4; %use diagonal
+                end               
+            end
+            [proposal_covariance_Cholesky_decomposition_upper,negeigenvalues]=cholcov(inverse_hessian_mat,0);
+            %if not positive definite, use generalized Cholesky if
+            %Eskow/Schnabel
+            if negeigenvalues~=0 
+                proposal_covariance_Cholesky_decomposition_upper=chol_SE(inverse_hessian_mat,0);
+            end
+            proposal_covariance_Cholesky_decomposition_upper=proposal_covariance_Cholesky_decomposition_upper*diag(bayestopt_.jscale(indices(blocks==block_iter,1),:));
+            %get proposal draw
+            if strcmpi(ProposalFun,'rand_multivariate_normal')
+                n = nxopt;
+            elseif strcmpi(ProposalFun,'rand_multivariate_student')
+                n = options_.student_degrees_of_freedom;
+            end
+    
+            proposed_par = feval(ProposalFun, xopt_current_block', proposal_covariance_Cholesky_decomposition_upper, n);
+            % chech whether draw is valid and compute posterior
+            if all( proposed_par(:) > mh_bounds.lb(indices(blocks==block_iter,1),:) ) && all( proposed_par(:) < mh_bounds.ub(indices(blocks==block_iter,1),:) )
+                try
+                    logpost = - feval('TaRB_optimizer_wrapper', proposed_par(:),...
+                        current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper
+                        dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_);
+                catch
+                    logpost = -inf;
+                end
+            else
+                logpost = -inf;
+            end
+            %get ratio of proposal densities, required because proposal depends
+            %on current mode via Hessian and is thus not symmetric anymore
+            if strcmpi(ProposalFun,'rand_multivariate_normal')
+                proposal_density_proposed_move_forward=multivariate_normal_pdf(proposed_par,xopt_current_block',proposal_covariance_Cholesky_decomposition_upper,n);
+                proposal_density_proposed_move_backward=multivariate_normal_pdf(par_start_current_block',xopt_current_block',proposal_covariance_Cholesky_decomposition_upper,n);
+            elseif strcmpi(ProposalFun,'rand_multivariate_student')
+                proposal_density_proposed_move_forward=multivariate_student_pdf(proposed_par,xopt_current_block',proposal_covariance_Cholesky_decomposition_upper,n);
+                proposal_density_proposed_move_backward=multivariate_student_pdf(par_start_current_block',xopt_current_block',proposal_covariance_Cholesky_decomposition_upper,n);
+            end
+            accprob=logpost-last_posterior(curr_chain)+ log(proposal_density_proposed_move_backward)-log(proposal_density_proposed_move_forward); %Formula (6), Chib/Ramamurthy
+
+            if (logpost > -inf) && (log(rand) < accprob)
+                current_draw(indices(blocks==block_iter,1))=proposed_par;
+                last_posterior(curr_chain)=logpost;
+                accepted_draws_this_chain =accepted_draws_this_chain +1;
+                accepted_draws_this_file = accepted_draws_this_file + 1;
+            else %no updating 
+                %do nothing, keep old value
+            end
+        end
+        %save draws and update stored last values
+        x2(draw_index_current_file,:) = current_draw;
+        last_draw(curr_chain,:) = current_draw;
+        %save posterior after full run through all blocks        
+        logpo2(draw_index_current_file) = last_posterior(curr_chain);
+
+        prtfrc = draw_iter/nruns(curr_chain);
+        if (mod(draw_iter, 3)==0 && ~whoiam) || (mod(draw_iter,50)==0 && whoiam)
+            dyn_waitbar(prtfrc,hh,[ 'MH (' int2str(curr_chain) '/' int2str(options_.mh_nblck) ') ' sprintf('Current acceptance ratio %4.3f', accepted_draws_this_chain/blocked_draws_counter_this_chain)]);
+        end
+        if (draw_index_current_file == InitSizeArray(curr_chain)) || (draw_iter == nruns(curr_chain)) % Now I save the simulations, either because the current file is full or the chain is done
+            save([BaseName '_mh' int2str(NewFile(curr_chain)) '_blck' int2str(curr_chain) '.mat'],'x2','logpo2');
+            fidlog = fopen([MetropolisFolder '/metropolis.log'],'a');
+            fprintf(fidlog,['\n']);
+            fprintf(fidlog,['%% Mh' int2str(NewFile(curr_chain)) 'Blck' int2str(curr_chain) ' (' datestr(now,0) ')\n']);
+            fprintf(fidlog,' \n');
+            fprintf(fidlog,['  Number of simulations.: ' int2str(length(logpo2)) '\n']);
+            fprintf(fidlog,['  Acceptance ratio......: ' num2str(accepted_draws_this_file /blocked_draws_counter_this_chain_this_file) '\n']);
+            fprintf(fidlog,['  Posterior mean........:\n']);
+            for i=1:length(x2(1,:))
+                fprintf(fidlog,['    params:' int2str(i) ': ' num2str(mean(x2(:,i))) '\n']);
+            end
+            fprintf(fidlog,['    log2po:' num2str(mean(logpo2)) '\n']);
+            fprintf(fidlog,['  Minimum value.........:\n']);
+            for i=1:length(x2(1,:))
+                fprintf(fidlog,['    params:' int2str(i) ': ' num2str(min(x2(:,i))) '\n']);
+            end
+            fprintf(fidlog,['    log2po:' num2str(min(logpo2)) '\n']);
+            fprintf(fidlog,['  Maximum value.........:\n']);
+            for i=1:length(x2(1,:))
+                fprintf(fidlog,['    params:' int2str(i) ': ' num2str(max(x2(:,i))) '\n']);
+            end
+            fprintf(fidlog,['    log2po:' num2str(max(logpo2)) '\n']);
+            fprintf(fidlog,' \n');
+            fclose(fidlog);
+            %reset counters;
+            accepted_draws_this_file = 0;
+            blocked_draws_counter_this_chain_this_file=0;
+            if draw_iter == nruns(curr_chain) % I record the last draw...
+                record.LastParameters(curr_chain,:) = x2(end,:);
+                record.LastLogPost(curr_chain) = logpo2(end);
+            end
+            % size of next file in chain curr_chain
+            InitSizeArray(curr_chain) = min(nruns(curr_chain)-draw_iter,MAX_nruns);
+            % initialization of next file if necessary
+            if InitSizeArray(curr_chain)
+                x2 = zeros(InitSizeArray(curr_chain),npar);
+                logpo2 = zeros(InitSizeArray(curr_chain),1);
+                NewFile(curr_chain) = NewFile(curr_chain) + 1;
+                draw_index_current_file = 0;
+            end
+        end
+        draw_iter=draw_iter+1;
+        draw_index_current_file = draw_index_current_file + 1;
+    end% End of the simulations for one mh-block.
+    record.AcceptanceRatio(curr_chain) = accepted_draws_this_chain/blocked_draws_counter_this_chain;
+    dyn_waitbar_close(hh);
+    [record.LastSeeds(curr_chain).Unifor, record.LastSeeds(curr_chain).Normal] = get_dynare_random_generator_state();
+    OutputFileName(block_iter,:) = {[MetropolisFolder,filesep], [ModelName '_mh*_blck' int2str(curr_chain) '.mat']};
+end% End of the loop over the mh-blocks.
+
+
+myoutput.record = record;
+myoutput.irun = draw_index_current_file;
+myoutput.NewFile = NewFile;
+myoutput.OutputFileName = OutputFileName;
\ No newline at end of file
diff --git a/matlab/TaRB_optimizer_wrapper.m b/matlab/TaRB_optimizer_wrapper.m
new file mode 100644
index 0000000000000000000000000000000000000000..9c7ed4cccbdde7ea5079275d9d6d08737ebc7044
--- /dev/null
+++ b/matlab/TaRB_optimizer_wrapper.m
@@ -0,0 +1,40 @@
+function [fval,DLIK,Hess,exit_flag] = TaRB_optimizer_wrapper(optpar,par_vector,parameterindices,TargetFun,varargin)
+% function [fval,DLIK,Hess,exit_flag] = TaRB_optimizer_wrapper(optpar,par_vector,parameterindices,TargetFun,varargin)
+% Wrapper function for target function used in TaRB algorithm; reassembles
+% full parameter vector before calling target function
+%
+% INPUTS 
+%   o optpar            [double]   (p_opt*1) vector of subset of parameters to be considered 
+%   o par_vector        [double]   (p*1) full vector of parameters 
+%   o parameterindices  [double]   (p_opt*1) index of optpar entries in
+%                                   par_vector
+%   o TargetFun         [char]      string specifying the name of the objective
+%                                   function (posterior kernel).
+%   o varargin          [structure] other inputs of target function
+% 
+% OUTPUTS
+%   o fval       [scalar]   value of (minus) the likelihood.
+%   o DLIK       [double]  (p*1) score vector of the likelihood.
+%   o Hess       [double]  (p*p) asymptotic Hessian matrix.
+%   o exit_flag  [scalar]   equal to zero if the routine return with a penalty (one otherwise).
+% 
+% Copyright (C) 2015 Dynare Team
+%
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+
+par_vector(parameterindices,:)=optpar; %reassemble parameter
+[fval,DLIK,Hess,exit_flag] = feval(TargetFun,par_vector,varargin{:}); %call target function
+
diff --git a/matlab/chol_SE.m b/matlab/chol_SE.m
new file mode 100644
index 0000000000000000000000000000000000000000..0ec5a900bf4a23fb44f5f20a852b1d8220f8ede2
--- /dev/null
+++ b/matlab/chol_SE.m
@@ -0,0 +1,309 @@
+function [R,indef, E, P]=chol_SE(A,pivoting)
+% [R,indef, E, P]=chol_SE(A,pivoting)
+% Performs a (modified) Cholesky factorization of the form 
+% 
+%     P'*A*P  + E = R'*R
+% 
+% As detailed in Schnabel/Eskow (1990), the factorization has 2 phases:
+%   Phase 1: While A can still be positive definite, pivot on the maximum diagonal element.
+%            Check that the standard Cholesky update would result in a positive diagonal
+%            at the current iteration. If so, continue with the normal Cholesky update. 
+%            Otherwise switch to phase 2. 
+%            If A is safely positive definite, stage 1 is never left, resulting in 
+%            the standard Cholesky decomposition. 
+%            
+%    Phase 2: Pivot on the minimum of the negatives of the lower Gershgorin bound
+%            estimates. To prevent negative diagonals, compute the amount to add to the
+%            pivot element and add this. Then, do the Cholesky update and update the estimates of the
+%            Gershgorin bounds.
+%
+% Notes:
+%   -   During factorization, L=R' is stored in the lower triangle of the original matrix A,
+%       miminizing the memory requirements
+%   -   Conforming with the original Schnabel/Eskow (1990) algorithm:
+%            - at each iteration the updated Gershgorin bounds are estimated instead of recomputed, 
+%              reducing the computational requirements from o(n^3) to o (n^2)
+%           -  For the last 2 by 2 submatrix, an eigenvalue-based decomposition is used
+%   -   While pivoting is not necessary, it improves the size of E, the add-on on to the diagonal. But this comes at 
+%       the cost of introduding a permutation.
+% 
+%
+% Inputs  
+%   A           [n*n]     Matrix to be factorized
+%   pivoting    [scalar]  dummy whether pivoting is used
+% 
+% Outputs  
+%   R           [n*n]     originally stored in lower triangular portion of A, including the main diagonal
+%
+%   E           [n*1]     Elements added to the diagonal of A
+%   P           [n*1]     record of how the rows and columns of the matrix were permuted while
+%                         performing the decomposition
+%
+% REFERENCES:
+%   This implementation is based on 
+%
+%       Robert B. Schnabel and Elizabeth Eskow. 1990. "A New Modified Cholesky
+%       Factorization," SIAM Journal of Scientific Statistical Computating,
+%       11, 6: 1136-58.
+% 
+%       Elizabeth Eskow and Robert B. Schnabel 1991. "Algorithm 695 - Software for a New Modified Cholesky
+%       Factorization," ACM Transactions on Mathematical Software, Vol 17, No 3: 306-312
+% 
+% 
+% Author: Johannes Pfeifer based on Eskow/Schnabel (1991)
+% 
+% Copyright (C) 2015 Johannes Pfeifer
+% Copyright (C) 2015 Dynare Team
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+
+if sum(sum(abs(A-A'))) > 0
+    error('A is not symmetric')
+end
+
+if nargin==1
+    pivoting=0;
+end
+
+
+n=size(A,1);
+
+tau1=eps^(1/3); %tolerance parameter for determining when to switch phase 2
+tau2=eps^(1/3); %tolerance used for determining the maximum condition number of the final 2X2 submatrix.
+
+phase1 = 1;
+delta = 0;
+
+P=1:n;
+g=zeros(n,1);
+E=zeros(n,1);
+
+% Find the maximum magnitude of the diagonal elements. If any diagonal element is negative, then phase1 is false.
+gammma=max(diag(A));
+if any(diag(A)) < 0
+    phase1 = 0;
+end
+
+taugam = tau1*gammma;
+% If not in phase1, then calculate the initial Gershgorin bounds needed for the start of phase2.
+if ~phase1
+    g=gersh_nested(A,1,n);
+end
+
+% check for n=1
+if n==1
+    delta = tau2*abs(A(1,1)) - A(1,1);
+    if delta > 0
+        E(1) = delta;
+    end
+    if A(1,1) == 0
+        E(1) = tau2;
+    end
+    A(1,1)=sqrt(A(1,1)+E(1));
+end
+
+for j = 1:n-1
+    % PHASE 1
+    if phase1
+        if pivoting==1
+            % Find index of maximum diagonal element A(i,i) where i>=j
+            [tmp,imaxd] = max(diag(A(j:n,j:n)));
+            imaxd=imaxd+j-1;
+            % Pivot to the top the row and column with the max diag
+            if (imaxd ~= j)
+                % Swap row j with row of max diag
+                for ii = 1:j-1
+                    temp = A(j,ii);
+                    A(j,ii) = A(imaxd,ii);
+                    A(imaxd,ii) = temp;
+                end
+                % Swap colj and row maxdiag between j and maxdiag
+                for ii = j+1:imaxd-1
+                    temp = A(ii,j);
+                    A(ii,j) = A(imaxd,ii);
+                    A(imaxd,ii) = temp;
+                end
+                % Swap column j with column of max diag
+                for ii = imaxd+1:n
+                    temp = A(ii,j);
+                    A(ii,j) = A(ii,imaxd);
+                    A(ii,imaxd) = temp;
+                end
+                % Swap diag elements
+                temp = A(j,j);
+                A(j,j) = A(imaxd,imaxd);
+                A(imaxd,imaxd) = temp;
+                % Swap elements of the permutation vector
+                itemp = P(j);
+                P(j) = P(imaxd);
+                P(imaxd) = itemp;          
+            end
+        end
+        % check to see whether the normal Cholesky update for this
+        % iteration would result in a positive diagonal,
+        % and if not then switch to phase 2.
+        jp1 = j+1;
+        if A(j,j)>0
+            if min((diag(A(j+1:n,j+1:n)) - A(j+1:n,j).^2/A(j,j))) < taugam %test whether stage 2 is required
+                phase1=0;
+            end
+        else
+            phase1 = 0;
+        end
+        if phase1
+            % Do the normal cholesky update if still in phase 1
+            A(j,j) = sqrt(A(j,j));
+            tempjj = A(j,j);
+            for ii = jp1: n
+                A(ii,j) = A(ii,j)/tempjj;
+            end
+            for ii=jp1:n
+                temp=A(ii,j);
+                for k = jp1:ii
+                    A(ii,k) = A(ii,k)-(temp * A(k,j));
+                end
+            end
+            if j == n-1
+                A(n,n)=sqrt(A(n,n));
+            end
+        else
+            % Calculate the negatives of the lower Gershgorin bounds
+            g=gersh_nested(A,j,n);
+        end
+    end
+    
+    % PHASE 2
+    if ~phase1
+        if j ~= n-1
+            if pivoting
+                % Find the minimum negative Gershgorin bound
+                [tmp,iming] = min(g(j:n)); 
+                iming=iming+j-1;
+                % Pivot to the top the row and column with the
+                % minimum negative Gershgorin bound
+                if iming ~= j
+                    % Swap row j with row of min Gershgorin bound
+                    for ii = 1:j-1
+                        temp = A(j,ii);
+                        A(j,ii) = A(iming,ii);
+                        A(iming,ii) = temp;
+                    end
+
+                    % Swap col j with row iming from j to iming
+                    for ii = j+1:iming-1
+                        temp = A(ii,j);
+                        A(ii,j) = A(iming,ii);
+                        A(iming,ii) = temp;
+                    end
+                    % Swap column j with column of min Gershgorin bound
+                    for ii = iming+1:n
+                        temp = A(ii,j);
+                        A(ii,j) = A(ii,iming);
+                        A(ii,iming) = temp;
+                    end
+                    % Swap diagonal elements
+                    temp = A(j,j);
+                    A(j,j) = A(iming,iming);
+                    A(iming,iming) = temp;
+                    % Swap elements of the permutation vector
+                    itemp = P(j);
+                    P(j) = P(iming);
+                    P(iming) = itemp;
+                    % Swap elements of the negative Gershgorin bounds vector
+                    temp = g(j);
+                    g(j) = g(iming);
+                    g(iming) = temp;
+                end
+            end
+            % Calculate delta and add to the diagonal. delta=max{0,-A(j,j) + max{normj,taugam},delta_previous}
+            % where normj=sum of |A(i,j)|,for i=1,n, delta_previous is the delta computed at the previous iter and taugam is tau1*gamma.
+                       
+            normj=sum(abs(A(j+1:n,j)));
+            
+            delta = max([0;delta;-A(j,j)+normj;-A(j,j)+taugam]); % get adjustment based on formula on bottom of p. 309 of Eskow/Schnabel (1991)
+
+            E(j) =  delta;
+            A(j,j) = A(j,j) + E(j);
+            % Update the Gershgorin bound estimates (note: g(i) is the negative of the Gershgorin lower bound.)
+            if A(j,j) ~= normj
+                temp = (normj/A(j,j)) - 1;
+                for ii = j+1:n
+                    g(ii) = g(ii) + abs(A(ii,j)) * temp;
+                end
+            end
+            % Do the cholesky update
+            A(j,j) = sqrt(A(j,j));
+            tempjj = A(j,j);
+            for ii = j+1:n
+                A(ii,j) = A(ii,j) / tempjj;
+            end
+            for ii = j+1:n
+                temp = A(ii,j);
+                for k = j+1: ii
+                    A(ii,k) = A(ii,k) - (temp * A(k,j));
+                end
+            end
+        else
+            % Find eigenvalues of final 2 by 2 submatrix
+            % Find delta such that:
+            % 1.  the l2 condition number of the final 2X2 submatrix + delta*I <= tau2
+            % 2. delta >= previous delta, 
+            % 3. min(eigvals) + delta >= tau2 * gamma, where min(eigvals) is the smallest eigenvalue of the final 2X2 submatrix
+            
+            A(n-1,n)=A(n,n-1); %set value above diagonal for computation of eigenvalues
+            eigvals  = eig(A(n-1:n,n-1:n));
+            delta    = max([ 0 ; delta ; -min(eigvals)+tau2*max((max(eigvals)-min(eigvals))/(1-tau1),gammma) ]); %Formula 5.3.2 of Schnabel/Eskow (1990)
+
+            if delta > 0
+                A(n-1,n-1) = A(n-1,n-1) + delta;
+                A(n,n) = A(n,n) + delta;
+                E(n-1) = delta;
+                E(n) = delta;
+            end
+            
+            % Final update
+            A(n-1,n-1) = sqrt(A(n-1,n-1));
+            A(n,n-1) = A(n,n-1)/A(n-1,n-1);
+            A(n,n) = A(n,n) - (A(n,n-1)^2);
+            A(n,n) = sqrt(A(n,n));
+        end
+    end
+end
+
+R=(tril(A))';
+indef=~phase1;
+Pprod=zeros(n,n);
+Pprod(sub2ind([n,n],P,1:n))=1;
+P=Pprod;
+end
+
+function  g=gersh_nested(A,j,n)
+
+g=zeros(n,1);
+for ii = j:n
+    if ii == 1;
+        sum_up_to_i = 0;
+    else
+        sum_up_to_i = sum(abs(A(ii,j:(ii-1))));
+    end;
+    if ii == n;
+        sum_after_i = 0;
+    else
+        sum_after_i = sum(abs(A((ii+1):n,ii)));
+    end;
+    g(ii) = sum_up_to_i + sum_after_i- A(ii,ii);
+end
+end
+
diff --git a/matlab/global_initialization.m b/matlab/global_initialization.m
index b9841745225d19f493e7a9f51c278c828cfb4b9e..70303d3aa1439171bea27b9fca56b083092a5e9b 100644
--- a/matlab/global_initialization.m
+++ b/matlab/global_initialization.m
@@ -416,7 +416,14 @@ options_.recursive_estimation_restart = 0;
 options_.MCMC_jumping_covariance='hessian';
 options_.use_calibration_initialization = 0;
 options_.endo_vars_for_moment_computations_in_estimation=[];
+
+% Tailored Random Block Metropolis-Hastings
 options_.TaRB.use_TaRB = 0;
+options_.TaRB.mode_compute=4;
+options_.TaRB.new_block_probability=0.25; %probability that next parameter belongs to new block
+
+% Run optimizer silently
+options_.silent_optimizer=0;
 
 % Prior restrictions
 options_.prior_restrictions.status = 0;
@@ -487,6 +494,9 @@ options_.homotopy_force_continue = 0;
 %csminwel optimization routine
 csminwel.tolerance.f=1e-7;
 csminwel.maxiter=1000;
+csminwel.verbosity=1;
+csminwel.Save_files=1;
+
 options_.csminwel=csminwel;
 
 %newrat optimization routine
@@ -494,6 +504,9 @@ newrat.hess=1; % dynare numerical hessian
 newrat.tolerance.f=1e-5;
 newrat.tolerance.f_analytic=1e-7;
 newrat.maxiter=1000;
+newrat.verbosity=1;
+newrat.Save_files=1;
+
 options_.newrat=newrat;
 
 % Simplex optimization routine (variation on Nelder Mead algorithm).
diff --git a/matlab/initial_estimation_checks.m b/matlab/initial_estimation_checks.m
index 48a0b47492af30ae1ce09f4526a75edb8a88323d..f3b9ce06ca0607bb0f1c8b5f5e9bb545546c0767 100644
--- a/matlab/initial_estimation_checks.m
+++ b/matlab/initial_estimation_checks.m
@@ -54,6 +54,10 @@ if DynareOptions.student_degrees_of_freedom <= 0
     error('initial_estimation_checks:: the student_degrees_of_freedom takes a positive integer argument');
 end
 
+if DynareOptions.TaRB.use_TaRB && (DynareOptions.TaRB.new_block_probability<0 || DynareOptions.TaRB.new_block_probability>1)
+    error(['initial_estimation_checks:: The tarb_new_block_probability must be between 0 and 1!'])
+end
+
 old_steady_params=Model.params; %save initial parameters for check if steady state changes param values
 
 % % check if steady state solves static model (except if diffuse_filter == 1)
diff --git a/matlab/bfgsi1.m b/matlab/optimization/bfgsi1.m
similarity index 69%
rename from matlab/bfgsi1.m
rename to matlab/optimization/bfgsi1.m
index c3b87ca49f8d550649da7047c39be84e972c65a3..696ee410b1ae82930b51c58ff3221e9c02d3bae0 100644
--- a/matlab/bfgsi1.m
+++ b/matlab/optimization/bfgsi1.m
@@ -1,13 +1,15 @@
-function H = bfgsi1(H0,dg,dx)
-% H = bfgsi1(H0,dg,dx)
+function H = bfgsi1(H0,dg,dx,Verbose,Save_files)
+% H = bfgsi1(H0,dg,dx,Verbose,Save_files)
 % Update Inverse Hessian matrix
 % 
 % Inputs:
 %   H0  [npar by npar]  initial inverse Hessian matrix
 %   dg  [npar by 1]     previous change in gradient
 %   dx  [npar by 1]     previous change in x;
+%   Verbose [scalar]    Indicator for silent mode
+%   Save_files [scalar] Indicator whether to save files  
 % 
-% 6/8/93 version that updates inverse hessian instead of hessian
+% 6/8/93 version that updates inverse Hessian instead of Hessian
 % itself.
 % 
 % Original file downloaded from:
@@ -42,10 +44,12 @@ dgdx = dg'*dx;
 if (abs(dgdx) >1e-12)
     H = H0 + (1+(dg'*Hdg)/dgdx)*(dx*dx')/dgdx - (dx*Hdg'+Hdg*dx')/dgdx;
 else
-    disp('bfgs update failed.')
-    disp(['|dg| = ' num2str(sqrt(dg'*dg)) '|dx| = ' num2str(sqrt(dx'*dx))]);
-    disp(['dg''*dx = ' num2str(dgdx)])
-    disp(['|H*dg| = ' num2str(Hdg'*Hdg)])
+    disp_verbose('bfgs update failed.',Verbose)
+    disp_verbose(['|dg| = ' num2str(sqrt(dg'*dg)) '|dx| = ' num2str(sqrt(dx'*dx))],Verbose);
+    disp_verbose(['dg''*dx = ' num2str(dgdx)],Verbose)
+    disp_verbose(['|H*dg| = ' num2str(Hdg'*Hdg)],Verbose)
     H=H0;
 end
-save('H.dat','H')
+if Save_files
+    save('H.dat','H')
+end
diff --git a/matlab/optimization/csminit1.m b/matlab/optimization/csminit1.m
index b6574b005ad8653d03cf5379ed709e1d58e11a02..428c704c60bb7470dc19ede6a4ad308b21fb2e1c 100644
--- a/matlab/optimization/csminit1.m
+++ b/matlab/optimization/csminit1.m
@@ -1,4 +1,4 @@
-function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin)
+function [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,Verbose,varargin)
 % [fhat,xhat,fcount,retcode] = csminit1(fcn,x0,f0,g0,badg,H0,varargin)
 % 
 % Inputs: 
@@ -101,7 +101,7 @@ else
     %      toc
     dxnorm = norm(dx);
     if dxnorm > 1e12
-        disp('Near-singular H problem.')
+        disp_verbose('Near-singular H problem.',Verbose)
         dx = dx*FCHANGE/dxnorm;
     end
     dfhat = dx'*g0;
@@ -115,10 +115,10 @@ else
             dx = dx - (ANGLE*dxnorm/gnorm+dfhat/(gnorm*gnorm))*g;
             dfhat = dx'*g;
             dxnorm = norm(dx);
-            disp(sprintf('Correct for low angle: %g',a))
+            disp_verbose(sprintf('Correct for low angle: %g',a),Verbose)
         end
     end
-    disp(sprintf('Predicted improvement: %18.9f',-dfhat/2))
+        disp_verbose(sprintf('Predicted improvement: %18.9f',-dfhat/2),Verbose)
     %
     % Have OK dx, now adjust length of step (lambda) until min and
     % max improvement rate criteria are met.
@@ -141,7 +141,7 @@ else
         %ARGLIST
         %f = feval(fcn,dxtest,P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13);
         % f = feval(fcn,x0+dx*lambda,P1,P2,P3,P4,P5,P6,P7,P8);
-        disp(sprintf('lambda = %10.5g; f = %20.7f',lambda,f ))
+            disp_verbose(sprintf('lambda = %10.5g; f = %20.7f',lambda,f ),Verbose)
         %debug
         %disp(sprintf('Improvement too great? f0-f: %g, criterion: %g',f0-f,-(1-THETA)*dfhat*lambda))
         if f<fhat
@@ -176,7 +176,11 @@ else
             if abs(lambda) < MINLAMB
                 if (lambda > 0) && (f0 <= fhat)
                     % try going against gradient, which may be inaccurate
-                    lambda = -lambda*factor^6
+                    if Verbose
+                        lambda = -lambda*factor^6
+                    else
+                        lambda = -lambda*factor^6;                        
+                    end
                 else
                     if lambda < 0
                         retcode = 6;
@@ -222,4 +226,5 @@ else
         end
     end
 end
-disp(sprintf('Norm of dx %10.5g', dxnorm))
+
+disp_verbose(sprintf('Norm of dx %10.5g', dxnorm),Verbose)
diff --git a/matlab/optimization/csminwel1.m b/matlab/optimization/csminwel1.m
index dcc579f50d4076b3a563217c56483b65ab90b552..5c641ba9e3da0d166f09a61c66d5ee915cb9e3d2 100644
--- a/matlab/optimization/csminwel1.m
+++ b/matlab/optimization/csminwel1.m
@@ -1,4 +1,4 @@
-function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,varargin)
+function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,Verbose,Save_files,varargin)
 %[fhat,xhat,ghat,Hhat,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,varargin)
 % Inputs:
 %   fcn:    [string]        string naming the objective function to be minimized
@@ -65,7 +65,6 @@ fh = [];
 xh = [];
 [nx,no]=size(x0);
 nx=max(nx,no);
-Verbose=1;
 NumGrad= isempty(grad);
 done=0;
 itct=0;
@@ -87,7 +86,7 @@ retcodeh = [];
 [f0,junk1,junk2,cost_flag] = feval(fcn,x0,varargin{:});
 
 if ~cost_flag
-    disp('Bad initial parameter.')
+    disp_verbose('Bad initial parameter.',Verbose)
     return
 end
 
@@ -110,15 +109,13 @@ while ~done
 
     g1=[]; g2=[]; g3=[];
     %addition fj. 7/6/94 for control
-    if Verbose
-        disp('-----------------')
-        disp(sprintf('f at the beginning of new iteration, %20.10f',f))
-    end
+    disp_verbose('-----------------',Verbose)
+    disp_verbose(sprintf('f at the beginning of new iteration, %20.10f',f),Verbose)
     %-----------Comment out this line if the x vector is long----------------
-    %   disp([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]);
+    %   disp_verbose([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]);
     %-------------------------
     itct=itct+1;
-    [f1, x1, fc, retcode1] = csminit1(fcn,x,f,g,badg,H,varargin{:});
+    [f1, x1, fc, retcode1] = csminit1(fcn,x,f,g,badg,H,Verbose,varargin{:});
     fcount = fcount+fc;
     % erased on 8/4/94
     % if (retcode == 1) || (abs(f1-f) < crit)
@@ -142,7 +139,9 @@ while ~done
             end
             wall1=badg1;
             % g1
-            save g1.mat g1 x1 f1 varargin;
+            if Save_files
+                save g1.mat g1 x1 f1 varargin;
+            end
         end
         if wall1 % && (~done) by Jinill
                  % Bad gradient or back and forth on step length.  Possibly at
@@ -150,10 +149,8 @@ while ~done
                  %
                  %fcliff=fh;xcliff=xh;
             Hcliff=H+diag(diag(H).*rand(nx,1));
-            if Verbose
-                disp('Cliff.  Perturbing search direction.')
-            end
-            [f2, x2, fc, retcode2] = csminit1(fcn,x,f,g,badg,Hcliff,varargin{:});
+            disp_verbose('Cliff.  Perturbing search direction.',Verbose)
+            [f2, x2, fc, retcode2] = csminit1(fcn,x,f,g,badg,Hcliff,Verbose,varargin{:});
             fcount = fcount+fc; % put by Jinill
             if  f2 < f
                 if retcode2==2 || retcode2==4
@@ -169,11 +166,15 @@ while ~done
                     end
                     wall2=badg2;
                     % g2
-                    badg2
-                    save g2.mat g2 x2 f2 varargin
+                    if Verbose
+                        badg2
+                    end
+                    if Save_files
+                        save g2.mat g2 x2 f2 varargin
+                    end
                 end
                 if wall2
-                    disp('Cliff again.  Try traversing')
+                    disp_verbose('Cliff again.  Try traversing',Verbose)
                     if norm(x2-x1) < 1e-13
                         f3=f; x3=x; badg3=1;retcode3=101;
                     else
@@ -181,7 +182,7 @@ while ~done
                         if(size(x0,2)>1)
                             gcliff=gcliff';
                         end
-                        [f3, x3, fc, retcode3] = csminit1(fcn,x,f,gcliff,0,eye(nx),varargin{:});
+                        [f3, x3, fc, retcode3] = csminit1(fcn,x,f,gcliff,0,eye(nx),Verbose,varargin{:});
                         fcount = fcount+fc; % put by Jinill
                         if retcode3==2 || retcode3==4
                             wall3=1; 
@@ -197,7 +198,9 @@ while ~done
                             end
                             wall3=badg3;
                             % g3
-                            save g3.mat g3 x3 f3 varargin;
+                            if Save_files
+                                save g3.mat g3 x3 f3 varargin;
+                            end
                         end
                     end
                 else
@@ -225,7 +228,7 @@ while ~done
         fh=f1;xh=x1;gh=g1;badgh=badg1;retcodeh=retcode1;
     else
         [fh,ih] = min([f1,f2,f3]);
-        %disp(sprintf('ih = %d',ih))
+        %disp_verbose(sprintf('ih = %d',ih))
         %eval(['xh=x' num2str(ih) ';'])
         switch ih
           case 1
@@ -259,18 +262,16 @@ while ~done
     %end of picking
     stuck = (abs(fh-f) < crit);
     if (~badg) && (~badgh) && (~stuck)
-        H = bfgsi1(H,gh-g,xh-x);
-    end
-    if Verbose
-        disp('----')
-        disp(sprintf('Improvement on iteration %d = %18.9f',itct,f-fh))
+        H = bfgsi1(H,gh-g,xh-x,Verbose,Save_files);
     end
+    disp_verbose('----',Verbose)
+    disp_verbose(sprintf('Improvement on iteration %d = %18.9f',itct,f-fh),Verbose)
     % if Verbose
     if itct > nit
-        disp('iteration count termination')
+        disp_verbose('iteration count termination',Verbose)
         done = 1;
     elseif stuck
-        disp('improvement < crit termination')
+        disp_verbose('improvement < crit termination',Verbose)
         done = 1;
     end
     rc=retcodeh;
@@ -278,19 +279,19 @@ while ~done
         if rc ==0
             %do nothing, just a normal step
         elseif rc == 1
-            disp('zero gradient')
+            disp_verbose('zero gradient',Verbose)
         elseif rc == 6
-            disp('smallest step still improving too slow, reversed gradient')
+            disp_verbose('smallest step still improving too slow, reversed gradient',Verbose)
         elseif rc == 5
-            disp('largest step still improving too fast')
+            disp_verbose('largest step still improving too fast',Verbose)
         elseif (rc == 4) || (rc==2)
-            disp('back and forth on step length never finished')
+            disp_verbose('back and forth on step length never finished',Verbose)
         elseif rc == 3
-            disp('smallest step still improving too slow')
+            disp_verbose('smallest step still improving too slow',Verbose)
         elseif rc == 7
-            disp('warning: possible inaccuracy in H matrix')
+            disp_verbose('warning: possible inaccuracy in H matrix',Verbose)
         else
-            error('Unaccounted Case, please contact the developers')
+            error('Unaccounted Case, please contact the developers',Verbose)
         end
     end
      
diff --git a/matlab/optimization/dynare_minimize_objective.m b/matlab/optimization/dynare_minimize_objective.m
index bf2ed53adf4001bf8895de3cb907823665899f9d..f570a1a3e4bca03865c1edc1b66eb2dd12a54f81 100644
--- a/matlab/optimization/dynare_minimize_objective.m
+++ b/matlab/optimization/dynare_minimize_objective.m
@@ -73,6 +73,9 @@ switch minimizer_algorithm
     if ~isempty(options_.optim_opt)
         eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
     end
+    if options_.silent_optimizer
+        optim_options = optimset(optim_options,'display','off');
+    end
     if options_.analytic_derivation,
         optim_options = optimset(optim_options,'GradObj','on','TolX',1e-7);
     end
@@ -110,15 +113,20 @@ switch minimizer_algorithm
             end
         end
     end
+    if options_.silent_optimizer
+        sa_options.verbosity = 0;
+    end
     npar=length(start_par_value);
     [LB, UB]=set_bounds_to_finite_values(bounds, options_.huge_number);
-    fprintf('\nNumber of parameters= %d, initial temperatur= %4.3f \n', npar,sa_options.initial_temperature);
-    fprintf('rt=  %4.3f; TolFun=  %4.3f; ns=  %4.3f;\n',sa_options.rt,sa_options.TolFun,sa_options.ns);
-    fprintf('nt=  %4.3f; neps=  %4.3f; MaxIter=  %d\n',sa_options.nt,sa_options.neps,sa_options.MaxIter);
-    fprintf('Initial step length(vm): %4.3f; step_length_c: %4.3f\n', sa_options.initial_step_length,sa_options.step_length_c);
-    fprintf('%-20s  %-6s    %-6s    %-6s\n','Name:', 'LB;','Start;','UB;');
-    for pariter=1:npar
-        fprintf('%-20s  %6.4f;   %6.4f;  %6.4f;\n',parameter_names{pariter}, LB(pariter),start_par_value(pariter),UB(pariter));
+    if sa_options.verbosity
+        fprintf('\nNumber of parameters= %d, initial temperatur= %4.3f \n', npar,sa_options.initial_temperature);
+        fprintf('rt=  %4.3f; TolFun=  %4.3f; ns=  %4.3f;\n',sa_options.rt,sa_options.TolFun,sa_options.ns);
+        fprintf('nt=  %4.3f; neps=  %4.3f; MaxIter=  %d\n',sa_options.nt,sa_options.neps,sa_options.MaxIter);
+        fprintf('Initial step length(vm): %4.3f; step_length_c: %4.3f\n', sa_options.initial_step_length,sa_options.step_length_c);
+        fprintf('%-20s  %-6s    %-6s    %-6s\n','Name:', 'LB;','Start;','UB;');
+        for pariter=1:npar
+            fprintf('%-20s  %6.4f;   %6.4f;  %6.4f;\n',parameter_names{pariter}, LB(pariter),start_par_value(pariter),UB(pariter));
+        end
     end
     sa_options.initial_step_length= sa_options.initial_step_length*ones(npar,1); %bring step length to correct vector size
     sa_options.step_length_c= sa_options.step_length_c*ones(npar,1); %bring step_length_c to correct vector size
@@ -138,6 +146,9 @@ switch minimizer_algorithm
     if options_.analytic_derivation,
         optim_options = optimset(optim_options,'GradObj','on');
     end
+    if options_.silent_optimizer
+        optim_options = optimset(optim_options,'display','off');
+    end
     if ~isoctave
         [opt_par_values,fval,exitflag] = fminunc(objective_function,start_par_value,optim_options,varargin{:});
     else
@@ -152,6 +163,8 @@ switch minimizer_algorithm
     nit = options_.csminwel.maxiter;
     numgrad = options_.gradient_method;
     epsilon = options_.gradient_epsilon;
+    Verbose = options_.csminwel.verbosity;
+    Save_files = options_.csminwel.Save_files;
     % Change some options.
     if ~isempty(options_.optim_opt)
         options_list = read_key_value_string(options_.optim_opt);
@@ -167,11 +180,19 @@ switch minimizer_algorithm
                 numgrad = options_list{i,2};
               case 'NumgradEpsilon'
                 epsilon = options_list{i,2};
+              case 'verbosity'
+                Verbose = options_list{i,2};
+              case 'SaveFiles'
+                Save_files = options_list{i,2};                
               otherwise
                 warning(['csminwel: Unknown option (' options_list{i,1} ')!'])
             end
         end
     end
+    if options_.silent_optimizer
+        Save_files = 0; 
+        Verbose = 0;
+    end    
     % Set flag for analytical gradient.
     if options_.analytic_derivation
         analytic_grad=1;
@@ -180,7 +201,7 @@ switch minimizer_algorithm
     end
     % Call csminwell.
     [fval,opt_par_values,grad,inverse_hessian_mat,itct,fcount,exitflag] = ...
-        csminwel1(objective_function, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, varargin{:});
+        csminwel1(objective_function, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, Verbose, Save_files, varargin{:});
     hessian_mat=inv(inverse_hessian_mat);
   case 5
     if options_.analytic_derivation==-1 %set outside as code for use of analytic derivation
@@ -193,6 +214,8 @@ switch minimizer_algorithm
         newratflag = options_.newrat.hess; %default
     end
     nit=options_.newrat.maxiter;
+    Verbose = options_.newrat.verbosity;
+    Save_files = options_.newrat.Save_files;
     if ~isempty(options_.optim_opt)
         options_list = read_key_value_string(options_.optim_opt);
         for i=1:rows(options_list)
@@ -208,12 +231,20 @@ switch minimizer_algorithm
                 end
               case 'TolFun'
                 crit = options_list{i,2};
+              case 'verbosity'
+                Verbose = options_list{i,2};
+              case 'SaveFiles'
+                Save_files = options_list{i,2};                
               otherwise
                 warning(['newrat: Unknown option (' options_list{i,1} ')!'])
             end
         end
     end
-    [opt_par_values,hessian_mat,gg,fval,invhess] = newrat(objective_function,start_par_value,analytic_grad,crit,nit,0,varargin{:});
+    if options_.silent_optimizer
+        Save_files = 0; 
+        Verbose = 0;
+    end    
+    [opt_par_values,hessian_mat,gg,fval,invhess] = newrat(objective_function,start_par_value,analytic_grad,crit,nit,0,Verbose, Save_files,varargin{:});
     %hessian_mat is the plain outer product gradient Hessian
   case 6
     [opt_par_values, hessian_mat, Scale, fval] = gmhmaxlik(objective_function, start_par_value, ...
@@ -229,6 +260,9 @@ switch minimizer_algorithm
     if ~isempty(options_.optim_opt)
         eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
     end
+    if options_.silent_optimizer
+        optim_options = optimset(optim_options,'display','off');
+    end
     if ~isoctave
         [opt_par_values,fval,exitflag] = fminsearch(objective_function,start_par_value,optim_options,varargin{:});
     else
@@ -255,11 +289,16 @@ switch minimizer_algorithm
                 simplexOptions.maxfcallfactor = options_list{i,2};
               case 'InitialSimplexSize'
                 simplexOptions.delta_factor = options_list{i,2};
+              case 'verbosity'
+                simplexOptions.verbose = options_list{i,2};
               otherwise
                 warning(['simplex: Unknown option (' options_list{i,1} ')!'])
             end
         end
     end
+    if options_.silent_optimizer
+        simplexOptions.verbose = options_list{i,2};
+    end    
     [opt_par_values,fval,exitflag] = simplex_optimization_routine(objective_function,start_par_value,simplexOptions,parameter_names,varargin{:});
   case 9
     % Set defaults
@@ -278,11 +317,29 @@ switch minimizer_algorithm
                 cmaesOptions.TolX = options_list{i,2};
               case 'MaxFunEvals'
                 cmaesOptions.MaxFunEvals = options_list{i,2};
+              case 'verbosity'
+                if options_list{i,2}==0
+                    cmaesOptions.DispFinal  = 'off';   % display messages like initial and final message';
+                    cmaesOptions.DispModulo = '0';   % [0:Inf], disp messages after every i-th iteration';
+                end
+              case 'SaveFiles'
+                if options_list{i,2}==0
+                  cmaesOptions.SaveVariables='off';
+                  cmaesOptions.LogModulo = '0';    % [0:Inf] if >1 record data less frequently after gen=100';
+                  cmaesOptions.LogTime   = '0';    % [0:100] max. percentage of time for recording data';
+                end
               otherwise
                 warning(['cmaes: Unknown option (' options_list{i,1}  ')!'])
             end
         end
     end
+    if options_.silent_optimizer
+        cmaesOptions.DispFinal  = 'off';   % display messages like initial and final message';
+        cmaesOptions.DispModulo = '0';   % [0:Inf], disp messages after every i-th iteration';
+        cmaesOptions.SaveVariables='off';
+        cmaesOptions.LogModulo = '0';    % [0:Inf] if >1 record data less frequently after gen=100';
+        cmaesOptions.LogTime   = '0';    % [0:100] max. percentage of time for recording data';
+    end    
     warning('off','CMAES:NonfinitenessRange');
     warning('off','CMAES:InitialSigma');
     [x, fval, COUNTEVAL, STOPFLAG, OUT, BESTEVER] = cmaes(func2str(objective_function),start_par_value,H0,cmaesOptions,varargin{:});
@@ -309,11 +366,20 @@ switch minimizer_algorithm
                 simpsaOptions.TEMP_END = options_list{i,2};
               case 'MaxFunEvals'
                 simpsaOptions.MAX_FUN_EVALS = options_list{i,2};
+              case 'verbosity'
+                  if options_list{i,2} == 0
+                    simpsaOptions.DISPLAY = 'none';
+                  else
+                    simpsaOptions.DISPLAY = 'iter';
+                  end                      
               otherwise
                 warning(['simpsa: Unknown option (' options_list{i,1}  ')!'])
             end
         end
     end
+    if options_.silent_optimizer
+        simpsaOptions.DISPLAY = 'none';
+    end
     simpsaOptionsList = options2cell(simpsaOptions);
     simpsaOptions = simpsaset(simpsaOptionsList{:});
     [LB, UB]=set_bounds_to_finite_values(bounds, options_.huge_number);
@@ -344,6 +410,9 @@ switch minimizer_algorithm
             end
         end
     end
+    if options_.silent_optimizer
+        solveoptoptions.verbosity = 0;
+    end
     [opt_par_values,fval]=solvopt(start_par_value,objective_function,[],[],[],solveoptoptions,varargin{:});
   case 102
     if isoctave
@@ -356,6 +425,9 @@ switch minimizer_algorithm
     if ~isempty(options_.optim_opt)
         eval(['optim_options = saoptimset(optim_options,' options_.optim_opt ');']);
     end
+    if options_.silent_optimizer
+        optim_options = optimset(optim_options,'display','off');
+    end
     func = @(x)objective_function(x,varargin{:});
     [opt_par_values,fval,exitflag,output] = simulannealbnd(func,start_par_value,bounds(:,1),bounds(:,2),optim_options);
   otherwise
diff --git a/matlab/optimization/mr_gstep.m b/matlab/optimization/mr_gstep.m
index 82c8d70f42cd3e2e15a2e4e2d6aefa103fcc8db0..51bfb09c447dcf7c7bd5c8a983fe1ace03be45e0 100644
--- a/matlab/optimization/mr_gstep.m
+++ b/matlab/optimization/mr_gstep.m
@@ -1,4 +1,4 @@
-function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin)
+function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,Verbose,Save_files,varargin)
 % function [f0, x, ig] = mr_gstep(h1,x,func0,htol0,varargin)
 %
 % Gibbs type step in optimisation
@@ -69,14 +69,19 @@ while i<n
         gg(i)=(f1(i)'-f_1(i)')./(2.*h1(i));
         hh(i) = 1/max(1.e-9,abs( (f1(i)+f_1(i)-2*f0)./(h1(i)*h1(i)) ));
         if gg(i)*(hh(i)*gg(i))/2 > htol
-            [f0 x fc retcode] = csminit1(func0,x,f0,gg,0,diag(hh),varargin{:});
+            [f0 x fc retcode] = csminit1(func0,x,f0,gg,0,diag(hh),Verbose,varargin{:});
             ig(i)=1;
-            fprintf(['Done for param %s = %8.4f\n'],varargin{6}.name{i},x(i))
+            if Verbose
+                fprintf(['Done for param %s = %8.4f\n'],varargin{6}.name{i},x(i))
+            end
         end
         xh1=x;
     end
+    if Save_files
+        save gstep.mat x h1 f0
+    end
+end
+if Save_files
     save gstep.mat x h1 f0
 end
 
-save gstep.mat x h1 f0
-
diff --git a/matlab/newrat.m b/matlab/optimization/newrat.m
similarity index 65%
rename from matlab/newrat.m
rename to matlab/optimization/newrat.m
index 424db0c1c4f5c410ed1c3551db6ae72a10b1a7bb..52676db9bebff7018464c3386cbc51495748e605 100644
--- a/matlab/newrat.m
+++ b/matlab/optimization/newrat.m
@@ -1,4 +1,4 @@
-function [xparam1, hh, gg, fval, igg] = newrat(func0, x, analytic_derivation, ftol0, nit, flagg, varargin)
+function [xparam1, hh, gg, fval, igg] = newrat(func0, x, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, varargin)
 %  [xparam1, hh, gg, fval, igg] = newrat(func0, x, hh, gg, igg, ftol0, nit, flagg, varargin)
 %
 %  Optimiser with outer product gradient and with sequences of univariate steps
@@ -49,6 +49,7 @@ function [xparam1, hh, gg, fval, igg] = newrat(func0, x, analytic_derivation, ft
 
 global objective_function_penalty_base
 
+
 icount=0;
 nx=length(x);
 xparam1=x;
@@ -95,14 +96,19 @@ else
     h1=[];
 end
 H = igg;
-disp(['Gradient norm ',num2str(norm(gg))])
+disp_verbose(['Gradient norm ',num2str(norm(gg))],Verbose)
 ee=eig(hh);
-disp(['Minimum Hessian eigenvalue ',num2str(min(ee))])
-disp(['Maximum Hessian eigenvalue ',num2str(max(ee))])
+disp_verbose(['Minimum Hessian eigenvalue ',num2str(min(ee))],Verbose)
+disp_verbose(['Maximum Hessian eigenvalue ',num2str(max(ee))],Verbose)
 g=gg;
 check=0;
-if max(eig(hh))<0, disp('Negative definite Hessian! Local maximum!'), pause, end,
-save m1.mat x hh g hhg igg fval0
+if max(eig(hh))<0
+    disp_verbose('Negative definite Hessian! Local maximum!',Verbose)
+    pause
+end
+if Save_files
+    save m1.mat x hh g hhg igg fval0
+end
 
 igrad=1;
 igibbs=1;
@@ -116,13 +122,13 @@ while norm(gg)>gtol && check==0 && jit<nit
     tic
     icount=icount+1;
     objective_function_penalty_base = fval0(icount);
-    disp([' '])
-    disp(['Iteration ',num2str(icount)])
-    [fval,x0,fc,retcode] = csminit1(func0,xparam1,fval0(icount),gg,0,H,varargin{:});
+    disp_verbose([' '],Verbose)
+    disp_verbose(['Iteration ',num2str(icount)],Verbose)
+    [fval,x0,fc,retcode] = csminit1(func0,xparam1,fval0(icount),gg,0,H,Verbose,varargin{:});
     if igrad
-        [fval1,x01,fc,retcode1] = csminit1(func0,x0,fval,gg,0,inx,varargin{:});
+        [fval1,x01,fc,retcode1] = csminit1(func0,x0,fval,gg,0,inx,Verbose,varargin{:});
         if (fval-fval1)>1
-            disp('Gradient step!!')
+            disp_verbose('Gradient step!!',Verbose)
         else
             igrad=0;
         end
@@ -139,27 +145,27 @@ while norm(gg)>gtol && check==0 && jit<nit
         end
         iggx=eye(length(gg));
         iggx(find(ig),find(ig)) = inv( hhx(find(ig),find(ig)) );
-        [fvala,x0,fc,retcode] = csminit1(func0,x0,fval,ggx,0,iggx,varargin{:});
+        [fvala,x0,fc,retcode] = csminit1(func0,x0,fval,ggx,0,iggx,Verbose,varargin{:});
     end
-    [fvala, x0, ig] = mr_gstep(h1,x0,func0,htol,varargin{:});
+    [fvala, x0, ig] = mr_gstep(h1,x0,func0,htol,Verbose,Save_files,varargin{:});
     nig=[nig ig];
-    disp('Sequence of univariate steps!!')
+    disp_verbose('Sequence of univariate steps!!',Verbose)
     fval=fvala;
     if (fval0(icount)-fval)<ftol && flagit==0
-        disp('Try diagonal Hessian')
+        disp_verbose('Try diagonal Hessian',Verbose)
         ihh=diag(1./(diag(hhg)));
-        [fval2,x0,fc,retcode2] = csminit1(func0,x0,fval,gg,0,ihh,varargin{:});
+        [fval2,x0,fc,retcode2] = csminit1(func0,x0,fval,gg,0,ihh,Verbose,varargin{:});
         if (fval-fval2)>=ftol
-            disp('Diagonal Hessian successful')
+            disp_verbose('Diagonal Hessian successful',Verbose)
         end
         fval=fval2;
     end
     if (fval0(icount)-fval)<ftol && flagit==0
-        disp('Try gradient direction')
+        disp_verbose('Try gradient direction',Verbose)
         ihh0=inx.*1.e-4;
-        [fval3,x0,fc,retcode3] = csminit1(func0,x0,fval,gg,0,ihh0,varargin{:});
+        [fval3,x0,fc,retcode3] = csminit1(func0,x0,fval,gg,0,ihh0,Verbose,varargin{:});
         if (fval-fval3)>=ftol
-            disp('Gradient direction successful')
+            disp_verbose('Gradient direction successful',Verbose)
         end
         fval=fval3;
     end
@@ -167,7 +173,7 @@ while norm(gg)>gtol && check==0 && jit<nit
     x(:,icount+1)=xparam1;
     fval0(icount+1)=fval;
     if (fval0(icount)-fval)<ftol
-        disp('No further improvement is possible!')
+        disp_verbose('No further improvement is possible!',Verbose)
         check=1;
         if analytic_derivation,
             [fvalx,gg,hh]=feval(func0,xparam1,varargin{:});
@@ -189,29 +195,33 @@ while norm(gg)>gtol && check==0 && jit<nit
             end
         end
         end
-        disp(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))])
-        disp(['FVAL          ',num2str(fval)])
-        disp(['Improvement   ',num2str(fval0(icount)-fval)])
-        disp(['Ftol          ',num2str(ftol)])
-        disp(['Htol          ',num2str(htol0)])
-        disp(['Gradient norm  ',num2str(norm(gg))])
+        disp_verbose(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))],Verbose)
+        disp_verbose(['FVAL          ',num2str(fval)],Verbose)
+        disp_verbose(['Improvement   ',num2str(fval0(icount)-fval)],Verbose)
+        disp_verbose(['Ftol          ',num2str(ftol)],Verbose)
+        disp_verbose(['Htol          ',num2str(htol0)],Verbose)
+        disp_verbose(['Gradient norm  ',num2str(norm(gg))],Verbose)
         ee=eig(hh);
-        disp(['Minimum Hessian eigenvalue ',num2str(min(ee))])
-        disp(['Maximum Hessian eigenvalue ',num2str(max(ee))])
+        disp_verbose(['Minimum Hessian eigenvalue ',num2str(min(ee))],Verbose)
+        disp_verbose(['Maximum Hessian eigenvalue ',num2str(max(ee))],Verbose)
         g(:,icount+1)=gg;
     else
         df = fval0(icount)-fval;
-        disp(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))])
-        disp(['FVAL          ',num2str(fval)])
-        disp(['Improvement   ',num2str(df)])
-        disp(['Ftol          ',num2str(ftol)])
-        disp(['Htol          ',num2str(htol0)])
+        disp_verbose(['Actual dxnorm ',num2str(norm(x(:,end)-x(:,end-1)))],Verbose)
+        disp_verbose(['FVAL          ',num2str(fval)],Verbose)
+        disp_verbose(['Improvement   ',num2str(df)],Verbose)
+        disp_verbose(['Ftol          ',num2str(ftol)],Verbose)
+        disp_verbose(['Htol          ',num2str(htol0)],Verbose)
         htol=htol_base;
         if norm(x(:,icount)-xparam1)>1.e-12 && analytic_derivation==0,
             try
-                save m1.mat x fval0 nig -append
+                if Save_files
+                    save m1.mat x fval0 nig -append
+                end
             catch
-                save m1.mat x fval0 nig
+                if Save_files
+                    save m1.mat x fval0 nig
+                end
             end
             [dum, gg, htol0, igg, hhg, h1]=mr_hessian(0,xparam1,func0,flagit,htol,varargin{:});
             if isempty(dum),
@@ -220,12 +230,12 @@ while norm(gg)>gtol && check==0 && jit<nit
             if htol0>htol
                 htol=htol0;
                 skipline()
-                disp('Numerical noise in the likelihood')
-                disp('Tolerance has to be relaxed')
+                disp_verbose('Numerical noise in the likelihood',Verbose)
+                disp_verbose('Tolerance has to be relaxed',Verbose)
                 skipline()
             end
             if ~outer_product_gradient,
-                H = bfgsi1(H,gg-g(:,icount),xparam1-x(:,icount));
+                H = bfgsi1(H,gg-g(:,icount),xparam1-x(:,icount),Verbose,Save_files);
                 hh=inv(H);
                 hhg=hh;
             else
@@ -246,39 +256,44 @@ while norm(gg)>gtol && check==0 && jit<nit
             hhg=hh;
             H = inv(hh);
         end
-        disp(['Gradient norm  ',num2str(norm(gg))])
+        disp_verbose(['Gradient norm  ',num2str(norm(gg))],Verbose)
         ee=eig(hh);
-        disp(['Minimum Hessian eigenvalue ',num2str(min(ee))])
-        disp(['Maximum Hessian eigenvalue ',num2str(max(ee))])
-        if max(eig(hh))<0, disp('Negative definite Hessian! Local maximum!'), pause(1), end,
+        disp_verbose(['Minimum Hessian eigenvalue ',num2str(min(ee))],Verbose)
+        disp_verbose(['Maximum Hessian eigenvalue ',num2str(max(ee))],Verbose)
+        if max(eig(hh))<0
+            disp_verbose('Negative definite Hessian! Local maximum!',Verbose)
+            pause(1)
+        end
         t=toc;
-        disp(['Elapsed time for iteration ',num2str(t),' s.'])
+        disp_verbose(['Elapsed time for iteration ',num2str(t),' s.'],Verbose)
         g(:,icount+1)=gg;
-
-        save m1.mat x hh g hhg igg fval0 nig H
+        if Save_files
+            save m1.mat x hh g hhg igg fval0 nig H
+        end
     end
 end
-
-save m1.mat x hh g hhg igg fval0 nig
+if Save_files
+    save m1.mat x hh g hhg igg fval0 nig
+end
 if ftol>ftol0
     skipline()
-    disp('Numerical noise in the likelihood')
-    disp('Tolerance had to be relaxed')
+    disp_verbose('Numerical noise in the likelihood',Verbose)
+    disp_verbose('Tolerance had to be relaxed',Verbose)
     skipline()
 end
 
 if jit==nit
     skipline()
-    disp('Maximum number of iterations reached')
+    disp_verbose('Maximum number of iterations reached',Verbose)
     skipline()
 end
 
 if norm(gg)<=gtol
-    disp(['Estimation ended:'])
-    disp(['Gradient norm < ', num2str(gtol)])
+    disp_verbose(['Estimation ended:'],Verbose)
+    disp_verbose(['Gradient norm < ', num2str(gtol)],Verbose)
 end
 if check==1,
-    disp(['Estimation successful.'])
+    disp_verbose(['Estimation successful.'],Verbose)
 end
 
-return
\ No newline at end of file
+return
diff --git a/matlab/optimization/simplex_optimization_routine.m b/matlab/optimization/simplex_optimization_routine.m
index 33843cf0d589caef3391eef246f5a3fc39cbdc9c..58b3ff1b097a9a633d74dc8bfbc4cf03a01c6e27 100644
--- a/matlab/optimization/simplex_optimization_routine.m
+++ b/matlab/optimization/simplex_optimization_routine.m
@@ -507,12 +507,12 @@ fval = fv(1);
 exitflag = 1;
 
 if func_count>= max_func_calls
-    disp('Maximum number of objective function calls has been exceeded!')
+    disp_verbose('Maximum number of objective function calls has been exceeded!',verbose)
     exitflag = 0;
 end
 
 if iter_count>= max_iterations
-    disp('Maximum number of iterations has been exceeded!')
+    disp_verbose('Maximum number of iterations has been exceeded!',verbose)
     exitflag = 0;
 end
 
diff --git a/matlab/random_walk_metropolis_hastings.m b/matlab/random_walk_metropolis_hastings.m
index 2092922ac65e464a640a327e4e241e8b9e17e30f..c5ff16c03c0c5ddad38792f0a91abe666e44f332 100644
--- a/matlab/random_walk_metropolis_hastings.m
+++ b/matlab/random_walk_metropolis_hastings.m
@@ -87,6 +87,10 @@ load_last_mh_history_file(MetropolisFolder, ModelName);
 % on many cores). The mandatory variables for local/remote parallel
 % computing are stored in the localVars struct.
 
+if options_.TaRB.use_TaRB
+    options_.silent_optimizer=1; %locally set optimizer to silent mode
+end
+
 localVars =   struct('TargetFun', TargetFun, ...
                      'ProposalFun', ProposalFun, ...
                      'xparam1', xparam1, ...
@@ -117,9 +121,12 @@ localVars =   struct('TargetFun', TargetFun, ...
 % single chain compute Random walk Metropolis-Hastings algorithm sequentially.
 
 if isnumeric(options_.parallel) || (nblck-fblck)==0,
-    fout = random_walk_metropolis_hastings_core(localVars, fblck, nblck, 0);
+    if options_.TaRB.use_TaRB
+        fout = TaRB_metropolis_hastings_core(localVars, fblck, nblck, 0);
+    else
+        fout = random_walk_metropolis_hastings_core(localVars, fblck, nblck, 0);
+    end
     record = fout.record;
-
     % Parallel in Local or remote machine.   
 else 
     % Global variables for parallel routines.
@@ -138,7 +145,11 @@ else
     end
     % from where to get back results
     %     NamFileOutput(1,:) = {[M_.dname,'/metropolis/'],'*.*'};
-    [fout, nBlockPerCPU, totCPU] = masterParallel(options_.parallel, fblck, nblck,NamFileInput,'random_walk_metropolis_hastings_core', localVars, globalVars, options_.parallel_info);
+    if options_.TaRB.use_TaRB
+        [fout, nBlockPerCPU, totCPU] = masterParallel(options_.parallel, fblck, nblck,NamFileInput,'TaRB_metropolis_hastings_core', localVars, globalVars, options_.parallel_info);        
+    else    
+        [fout, nBlockPerCPU, totCPU] = masterParallel(options_.parallel, fblck, nblck,NamFileInput,'random_walk_metropolis_hastings_core', localVars, globalVars, options_.parallel_info);
+    end
     for j=1:totCPU,
         offset = sum(nBlockPerCPU(1:j-1))+fblck-1;
         record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j)));
diff --git a/matlab/utilities/general/disp_verbose.m b/matlab/utilities/general/disp_verbose.m
new file mode 100644
index 0000000000000000000000000000000000000000..09c09e4d7e66ad001945c13aceadd84158f447c0
--- /dev/null
+++ b/matlab/utilities/general/disp_verbose.m
@@ -0,0 +1,25 @@
+function disp_verbose(input_string,Verbose)
+% function disp_verbose(input_string,Verbose)
+% Prints input_string unless Verbose=0 is requested
+% 
+% Copyright (C) 2015 Dynare Team
+%
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+
+
+if Verbose
+    disp(input_string)
+end
\ No newline at end of file
diff --git a/preprocessor/DynareBison.yy b/preprocessor/DynareBison.yy
index 808fa6a76ce9537523078dd758322f8588bac177..58dfb561893589b581649d044d661ca88cd88438 100644
--- a/preprocessor/DynareBison.yy
+++ b/preprocessor/DynareBison.yy
@@ -83,7 +83,7 @@ class ParsingDriver;
 }
 
 %token AIM_SOLVER ANALYTIC_DERIVATION AR AUTOCORR TARB_MODE_COMPUTE
-%token BAYESIAN_IRF BETA_PDF BLOCK USE_CALIBRATION USE_TARB TARB_NEW_BLOCK_PROBABILITY
+%token BAYESIAN_IRF BETA_PDF BLOCK USE_CALIBRATION USE_TARB TARB_NEW_BLOCK_PROBABILITY SILENT_OPTIMIZER
 %token BVAR_DENSITY BVAR_FORECAST NODECOMPOSITION DR_DISPLAY_TOL HUGE_NUMBER
 %token BVAR_PRIOR_DECAY BVAR_PRIOR_FLAT BVAR_PRIOR_LAMBDA TARB_OPTIM
 %token BVAR_PRIOR_MU BVAR_PRIOR_OMEGA BVAR_PRIOR_TAU BVAR_PRIOR_TRAIN
@@ -1722,6 +1722,7 @@ estimation_options : o_datafile
                    | o_tarb_mode_compute
                    | o_tarb_new_block_probability
                    | o_tarb_optim
+                   | o_silent_optimizer
                    | o_proposal_distribution
                    | o_student_degrees_of_freedom
                    ;
@@ -1791,6 +1792,7 @@ osr_options : stoch_simul_primary_options
             | o_opt_algo
             | o_optim
             | o_huge_number
+            | o_silent_optimizer
             ;
 
 osr : OSR ';'
@@ -2917,6 +2919,7 @@ o_equations : EQUATIONS EQUAL vec_int
 o_use_tarb : USE_TARB { driver.option_num("TaRB.use_TaRB", "1"); };
 o_tarb_mode_compute : TARB_MODE_COMPUTE EQUAL INT_NUMBER { driver.option_num("TaRB.mode_compute", $3); };
 o_tarb_new_block_probability : TARB_NEW_BLOCK_PROBABILITY EQUAL non_negative_number {driver.option_num("TaRB.new_block_probability",$3); };
+o_silent_optimizer : SILENT_OPTIMIZER { driver.option_num("silent_optimizer", "1"); };
 o_instruments : INSTRUMENTS EQUAL '(' symbol_list ')' {driver.option_symbol_list("instruments"); };
 
 o_ext_func_name : EXT_FUNC_NAME EQUAL filename { driver.external_function_option("name", $3); };
diff --git a/preprocessor/DynareFlex.ll b/preprocessor/DynareFlex.ll
index 56e8f941437fca28a87884870ddde9767df9a8ad..430c66a79ad3ea00f17bcad76e9af1afbce4d486 100644
--- a/preprocessor/DynareFlex.ll
+++ b/preprocessor/DynareFlex.ll
@@ -576,6 +576,7 @@ DATE -?[0-9]+([YyAa]|[Mm]([1-9]|1[0-2])|[Qq][1-4]|[Ww]([1-9]{1}|[1-4][0-9]|5[0-2
 <DYNARE_STATEMENT>tarb_mode_compute {return token::TARB_MODE_COMPUTE;}
 <DYNARE_STATEMENT>tarb_new_block_probability {return token::TARB_NEW_BLOCK_PROBABILITY;}
 <DYNARE_STATEMENT>tarb_optim {return token::TARB_OPTIM;}
+<DYNARE_STATEMENT>silent_optimizer {return token::SILENT_OPTIMIZER;}
 <DYNARE_STATEMENT>lmmcp {return token::LMMCP;}
 <DYNARE_STATEMENT>occbin {return token::OCCBIN;}
 
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 3cec1bb7b3ccfab1e4f904d73b2169404ce79333..5b07cba6da8ee26e424e38bb3c68c3b8aea3b141 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -4,6 +4,7 @@ MODFILES = \
 	estimation/fs2000_calibrated_covariance.mod \
 	estimation/MH_recover/fs2000_recover.mod \
 	estimation/t_proposal/fs2000_student.mod \
+	estimation/TaRB/fs2000_tarb.mod \
 	gsa/ls2003.mod \
 	gsa/ls2003a.mod \
 	gsa/cod_ML_morris/cod_ML_morris.mod \
diff --git a/tests/estimation/TaRB/fs2000_tarb.mod b/tests/estimation/TaRB/fs2000_tarb.mod
new file mode 100644
index 0000000000000000000000000000000000000000..a3941d5a61d20fb99bc3ee3967dca09b40701307
--- /dev/null
+++ b/tests/estimation/TaRB/fs2000_tarb.mod
@@ -0,0 +1,122 @@
+/*
+ * This file replicates the estimation of the cash in advance model described
+ * Frank Schorfheide (2000): "Loss function-based evaluation of DSGE models",
+ * Journal of Applied Econometrics, 15(6), 645-670.
+ *
+ * The data are in file "fsdat_simul.m", and have been artificially generated.
+ * They are therefore different from the original dataset used by Schorfheide.
+ *
+ * The equations are taken from J. Nason and T. Cogley (1994): "Testing the
+ * implications of long-run neutrality for monetary business cycle models",
+ * Journal of Applied Econometrics, 9, S37-S70.
+ * Note that there is an initial minus sign missing in equation (A1), p. S63.
+ *
+ * This implementation was written by Michel Juillard. Please note that the
+ * following copyright notice only applies to this Dynare implementation of the
+ * model.
+ */
+
+/*
+ * Copyright (C) 2004-2010 Dynare Team
+ *
+ * This file is part of Dynare.
+ *
+ * Dynare is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * Dynare is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+var m P c e W R k d n l gy_obs gp_obs y dA;
+varexo e_a e_m;
+
+parameters alp bet gam mst rho psi del;
+
+alp = 0.33;
+bet = 0.99;
+gam = 0.003;
+mst = 1.011;
+rho = 0.7;
+psi = 0.787;
+del = 0.02;
+
+model;
+dA = exp(gam+e_a);
+log(m) = (1-rho)*log(mst) + rho*log(m(-1))+e_m;
+-P/(c(+1)*P(+1)*m)+bet*P(+1)*(alp*exp(-alp*(gam+log(e(+1))))*k^(alp-1)*n(+1)^(1-alp)+(1-del)*exp(-(gam+log(e(+1)))))/(c(+2)*P(+2)*m(+1))=0;
+W = l/n;
+-(psi/(1-psi))*(c*P/(1-n))+l/n = 0;
+R = P*(1-alp)*exp(-alp*(gam+e_a))*k(-1)^alp*n^(-alp)/W;
+1/(c*P)-bet*P*(1-alp)*exp(-alp*(gam+e_a))*k(-1)^alp*n^(1-alp)/(m*l*c(+1)*P(+1)) = 0;
+c+k = exp(-alp*(gam+e_a))*k(-1)^alp*n^(1-alp)+(1-del)*exp(-(gam+e_a))*k(-1);
+P*c = m;
+m-1+d = l;
+e = exp(e_a);
+y = k(-1)^alp*n^(1-alp)*exp(-alp*(gam+e_a));
+gy_obs = dA*y/y(-1);
+gp_obs = (P/P(-1))*m(-1)/dA;
+end;
+
+shocks;
+var e_a; stderr 0.014;
+var e_m; stderr 0.005;
+end;
+
+steady_state_model;
+  dA = exp(gam);
+  gst = 1/dA;
+  m = mst;
+  khst = ( (1-gst*bet*(1-del)) / (alp*gst^alp*bet) )^(1/(alp-1));
+  xist = ( ((khst*gst)^alp - (1-gst*(1-del))*khst)/mst )^(-1);
+  nust = psi*mst^2/( (1-alp)*(1-psi)*bet*gst^alp*khst^alp );
+  n  = xist/(nust+xist);
+  P  = xist + nust;
+  k  = khst*n;
+
+  l  = psi*mst*n/( (1-psi)*(1-n) );
+  c  = mst/P;
+  d  = l - mst + 1;
+  y  = k^alp*n^(1-alp)*gst^alp;
+  R  = mst/bet;
+  W  = l/n;
+  ist  = y-c;
+  q  = 1 - d;
+
+  e = 1;
+  
+  gp_obs = m/dA;
+  gy_obs = dA;
+end;
+
+steady;
+
+check;
+
+estimated_params;
+alp, beta_pdf, 0.356, 0.02;
+bet, beta_pdf, 0.993, 0.002;
+gam, normal_pdf, 0.0085, 0.003;
+mst, normal_pdf, 1.0002, 0.007;
+rho, beta_pdf, 0.129, 0.223;
+psi, beta_pdf, 0.65, 0.05;
+del, beta_pdf, 0.01, 0.005;
+stderr e_a, inv_gamma_pdf, 0.035449, inf;
+stderr e_m, inv_gamma_pdf, 0.008862, inf;
+end;
+
+varobs gp_obs gy_obs;
+
+options_.silent_optimizer=1;
+estimation(order=1, datafile='../fsdat_simul',nobs=192, loglinear, mh_replic=30, mh_nblocks=2, mh_jscale=0.5,mode_compute=4,
+use_TaRB,
+tarb_mode_compute=4,
+tarb_new_block_probability=0.3
+);
diff --git a/tests/optimizers/optimizer_function_wrapper.m b/tests/optimizers/optimizer_function_wrapper.m
index a7992bab3e980c18478d6554229aa30d2f11198e..262f3d86d527ad8a07623023202df9d111dc0122 100644
--- a/tests/optimizers/optimizer_function_wrapper.m
+++ b/tests/optimizers/optimizer_function_wrapper.m
@@ -9,8 +9,9 @@ crit = 1e-7;
 numgrad = 2; 
 epsilon = 1e-6; 
 analytic_grad=[];
-
+Verbose=1;
+Save_files=1;
 %call optimizer
 [fval,opt_par_values,grad,hessian_mat,itct,fcount,exitflag] = ...
-    csminwel1(objective_function_handle, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, varargin{:});
+    csminwel1(objective_function_handle, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, Verbose,Save_files, varargin{:});
 end
\ No newline at end of file