Skip to content
Snippets Groups Projects
Commit 3bf13dd5 authored by Johannes Pfeifer's avatar Johannes Pfeifer
Browse files

Add functionality for TaRB

parent 0769fd1c
Branches
Tags
No related merge requests found
function myoutput = TaRB_metropolis_hastings_core(myinputs,fblck,nblck,whoiam, ThisMatlab)
% function myoutput = TaRB_metropolis_hastings_core(myinputs,fblck,nblck,whoiam, ThisMatlab)
% Contains the most computationally intensive portion of code in
% random_walk_metropolis_hastings (the 'for xxx = fblck:nblck' loop) using the TaRB algorithm.
% The branches in that 'for'
% cycle are completely independent to be suitable for parallel execution.
%
% INPUTS
% o myimput [struc] The mandatory variables for local/remote
% parallel computing obtained from random_walk_metropolis_hastings.m
% function.
% o fblck and nblck [integer] The Metropolis-Hastings chains.
% o whoiam [integer] In concurrent programming a modality to refer to the different threads running in parallel is needed.
% The integer whoaim is the integer that
% allows us to distinguish between them. Then it is the index number of this CPU among all CPUs in the
% cluster.
% o ThisMatlab [integer] Allows us to distinguish between the
% 'main' Matlab, the slave Matlab worker, local Matlab, remote Matlab,
% ... Then it is the index number of this slave machine in the cluster.
% OUTPUTS
% o myoutput [struc]
% If executed without parallel, this is the original output of 'for b =
% fblck:nblck'. Otherwise, it's a portion of it computed on a specific core or
% remote machine. In this case:
% record;
% irun;
% NewFile;
% OutputFileName
%
% ALGORITHM
% Portion of Tailored Randomized Block Metropolis-Hastings proposed in
% Chib/Ramamurthy (2010): Tailored randomized block MCMC methods with
% application to DSGE models, Journal of Econometrics 155, pp. 19-38
%
% This implementation differs from the originally proposed one in the
% treatment of non-positive definite Hessians. Here we
% - use the Jordan decomposition
%
% SPECIAL REQUIREMENTS.
% None.
%
% PARALLEL CONTEXT
% See the comments in the random_walk_metropolis_hastings.m funtion.
% Copyright (C) 2006-2015 Dynare Team
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
global objective_function_penalty_base;
if nargin<4,
whoiam=0;
end
% reshape 'myinputs' for local computation.
% In order to avoid confusion in the name space, the instruction struct2local(myinputs) is replaced by:
TargetFun=myinputs.TargetFun;
ProposalFun=myinputs.ProposalFun;
xparam1=myinputs.xparam1;
mh_bounds=myinputs.mh_bounds;
last_draw=myinputs.ix2;
last_posterior=myinputs.ilogpo2;
fline=myinputs.fline;
npar=myinputs.npar;
nruns=myinputs.nruns;
NewFile=myinputs.NewFile;
MAX_nruns=myinputs.MAX_nruns;
d=myinputs.d;
InitSizeArray=myinputs.InitSizeArray;
record=myinputs.record;
dataset_ = myinputs.dataset_;
dataset_info = myinputs.dataset_info;
bayestopt_ = myinputs.bayestopt_;
estim_params_ = myinputs.estim_params_;
options_ = myinputs.options_;
M_ = myinputs.M_;
oo_ = myinputs.oo_;
% Necessary only for remote computing!
if whoiam
% initialize persistent variables in priordens()
priordens(xparam1,bayestopt_.pshape,bayestopt_.p6,bayestopt_.p7, bayestopt_.p3,bayestopt_.p4,1);
end
MetropolisFolder = CheckPath('metropolis',M_.dname);
ModelName = M_.fname;
BaseName = [MetropolisFolder filesep ModelName];
options_.lik_algo = 1;
OpenOldFile = ones(nblck,1);
%
% Now I run the (nblck-fblck+1) Metropolis-Hastings chains
%
block_iter=0;
for curr_chain = fblck:nblck,
block_iter=block_iter+1;
try
% This will not work if the master uses a random number generator not
% available in the slave (different Matlab version or
% Matlab/Octave cluster). Therefore the trap.
%
% Set the random number generator type (the seed is useless but needed by the function)
set_dynare_seed(options_.DynareRandomStreams.algo, options_.DynareRandomStreams.seed);
% Set the state of the RNG
set_dynare_random_generator_state(record.InitialSeeds(curr_chain).Unifor, record.InitialSeeds(curr_chain).Normal);
catch
% If the state set by master is incompatible with the slave, we only reseed
set_dynare_seed(options_.DynareRandomStreams.seed+curr_chain);
end
if (options_.load_mh_file~=0) && (fline(curr_chain)>1) && OpenOldFile(curr_chain) %load previous draws and likelihood
load([BaseName '_mh' int2str(NewFile(curr_chain)) '_blck' int2str(curr_chain) '.mat'])
x2 = [x2;zeros(InitSizeArray(curr_chain)-fline(curr_chain)+1,npar)];
logpo2 = [logpo2;zeros(InitSizeArray(curr_chain)-fline(curr_chain)+1,1)];
OpenOldFile(curr_chain) = 0;
else
x2 = zeros(InitSizeArray(curr_chain),npar);
logpo2 = zeros(InitSizeArray(curr_chain),1);
end
%Prepare waiting bars
if whoiam
prc0=(curr_chain-fblck)/(nblck-fblck+1)*(isoctave || options_.console_mode);
hh = dyn_waitbar({prc0,whoiam,options_.parallel(ThisMatlab)},['MH (' int2str(curr_chain) '/' int2str(options_.mh_nblck) ')...']);
else
hh = dyn_waitbar(0,['Metropolis-Hastings (' int2str(curr_chain) '/' int2str(options_.mh_nblck) ')...']);
set(hh,'Name','Metropolis-Hastings');
end
accepted_draws_this_chain = 0;
accepted_draws_this_file = 0;
blocked_draws_counter_this_chain=0;
blocked_draws_counter_this_chain_this_file=0;
draw_index_current_file = fline(curr_chain); %get location of first draw in current block
draw_iter = 1;
while draw_iter <= nruns(curr_chain)
%% randomize indices for blocking in this iteration
indices=randperm(npar)';
blocks=[1; (1+cumsum((rand(length(indices)-1,1)>(1-options_.TaRB.new_block_probability))))];
nblocks=blocks(end,1); %get number of blocks this iteration
current_draw=last_draw(curr_chain,:)'; %get starting point for current draw for updating
for block_iter=1:nblocks
blocked_draws_counter_this_chain=blocked_draws_counter_this_chain+1;
blocked_draws_counter_this_chain_this_file=blocked_draws_counter_this_chain_this_file+1;
nxopt=length(indices(blocks==block_iter,1)); %get size of current block
par_start_current_block=current_draw(indices(blocks==block_iter,1));
[xopt_current_block, fval, exitflag, hess_mat_optimizer, options_, Scale] = dynare_minimize_objective(@TaRB_optimizer_wrapper,par_start_current_block,options_.TaRB.mode_compute,options_,[mh_bounds.lb(indices(blocks==block_iter,1),1) mh_bounds.ub(indices(blocks==block_iter,1),1)],bayestopt_.name,bayestopt_,[],...
current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper
dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_); %inputs for objective
objective_function_penalty_base=Inf; %reset penalty that may have been changed by optimizer
%% covariance for proposal density
hessian_mat = reshape(hessian('TaRB_optimizer_wrapper',xopt_current_block, ...
options_.gstep,...
current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper
dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_),nxopt,nxopt);
if any(any(isnan(hessian_mat))) || any(any(isinf(hessian_mat)))
inverse_hessian_mat=eye(nxopt)*1e-4; %use diagonal
else
inverse_hessian_mat=inv(hessian_mat); %get inverse Hessian
if any(any((isnan(inverse_hessian_mat)))) || any(any((isinf(inverse_hessian_mat))))
inverse_hessian_mat=eye(nxopt)*1e-4; %use diagonal
end
end
[proposal_covariance_Cholesky_decomposition_upper,negeigenvalues]=cholcov(inverse_hessian_mat,0);
%if not positive definite, use generalized Cholesky if
%Eskow/Schnabel
if negeigenvalues~=0
proposal_covariance_Cholesky_decomposition_upper=chol_SE(inverse_hessian_mat,0);
end
proposal_covariance_Cholesky_decomposition_upper=proposal_covariance_Cholesky_decomposition_upper*diag(bayestopt_.jscale(indices(blocks==block_iter,1),:));
%get proposal draw
if strcmpi(ProposalFun,'rand_multivariate_normal')
n = nxopt;
elseif strcmpi(ProposalFun,'rand_multivariate_student')
n = options_.student_degrees_of_freedom;
end
proposed_par = feval(ProposalFun, xopt_current_block', proposal_covariance_Cholesky_decomposition_upper, n);
% chech whether draw is valid and compute posterior
if all( proposed_par(:) > mh_bounds.lb(indices(blocks==block_iter,1),:) ) && all( proposed_par(:) < mh_bounds.ub(indices(blocks==block_iter,1),:) )
try
logpost = - feval('TaRB_optimizer_wrapper', proposed_par(:),...
current_draw,indices(blocks==block_iter,1),TargetFun,...% inputs for wrapper
dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,mh_bounds,oo_);
catch
logpost = -inf;
end
else
logpost = -inf;
end
%get ratio of proposal densities, required because proposal depends
%on current mode via Hessian and is thus not symmetric anymore
if strcmpi(ProposalFun,'rand_multivariate_normal')
proposal_density_proposed_move_forward=multivariate_normal_pdf(proposed_par,xopt_current_block',proposal_covariance_Cholesky_decomposition_upper,n);
proposal_density_proposed_move_backward=multivariate_normal_pdf(par_start_current_block',xopt_current_block',proposal_covariance_Cholesky_decomposition_upper,n);
elseif strcmpi(ProposalFun,'rand_multivariate_student')
proposal_density_proposed_move_forward=multivariate_student_pdf(proposed_par,xopt_current_block',proposal_covariance_Cholesky_decomposition_upper,n);
proposal_density_proposed_move_backward=multivariate_student_pdf(par_start_current_block',xopt_current_block',proposal_covariance_Cholesky_decomposition_upper,n);
end
accprob=logpost-last_posterior(curr_chain)+ log(proposal_density_proposed_move_backward)-log(proposal_density_proposed_move_forward); %Formula (6), Chib/Ramamurthy
if (logpost > -inf) && (log(rand) < accprob)
current_draw(indices(blocks==block_iter,1))=proposed_par;
last_posterior(curr_chain)=logpost;
accepted_draws_this_chain =accepted_draws_this_chain +1;
accepted_draws_this_file = accepted_draws_this_file + 1;
else %no updating
%do nothing, keep old value
end
end
%save draws and update stored last values
x2(draw_index_current_file,:) = current_draw;
last_draw(curr_chain,:) = current_draw;
%save posterior after full run through all blocks
logpo2(draw_index_current_file) = last_posterior(curr_chain);
prtfrc = draw_iter/nruns(curr_chain);
if (mod(draw_iter, 3)==0 && ~whoiam) || (mod(draw_iter,50)==0 && whoiam)
dyn_waitbar(prtfrc,hh,[ 'MH (' int2str(curr_chain) '/' int2str(options_.mh_nblck) ') ' sprintf('Current acceptance ratio %4.3f', accepted_draws_this_chain/blocked_draws_counter_this_chain)]);
end
if (draw_index_current_file == InitSizeArray(curr_chain)) || (draw_iter == nruns(curr_chain)) % Now I save the simulations, either because the current file is full or the chain is done
save([BaseName '_mh' int2str(NewFile(curr_chain)) '_blck' int2str(curr_chain) '.mat'],'x2','logpo2');
fidlog = fopen([MetropolisFolder '/metropolis.log'],'a');
fprintf(fidlog,['\n']);
fprintf(fidlog,['%% Mh' int2str(NewFile(curr_chain)) 'Blck' int2str(curr_chain) ' (' datestr(now,0) ')\n']);
fprintf(fidlog,' \n');
fprintf(fidlog,[' Number of simulations.: ' int2str(length(logpo2)) '\n']);
fprintf(fidlog,[' Acceptance ratio......: ' num2str(accepted_draws_this_file /blocked_draws_counter_this_chain_this_file) '\n']);
fprintf(fidlog,[' Posterior mean........:\n']);
for i=1:length(x2(1,:))
fprintf(fidlog,[' params:' int2str(i) ': ' num2str(mean(x2(:,i))) '\n']);
end
fprintf(fidlog,[' log2po:' num2str(mean(logpo2)) '\n']);
fprintf(fidlog,[' Minimum value.........:\n']);
for i=1:length(x2(1,:))
fprintf(fidlog,[' params:' int2str(i) ': ' num2str(min(x2(:,i))) '\n']);
end
fprintf(fidlog,[' log2po:' num2str(min(logpo2)) '\n']);
fprintf(fidlog,[' Maximum value.........:\n']);
for i=1:length(x2(1,:))
fprintf(fidlog,[' params:' int2str(i) ': ' num2str(max(x2(:,i))) '\n']);
end
fprintf(fidlog,[' log2po:' num2str(max(logpo2)) '\n']);
fprintf(fidlog,' \n');
fclose(fidlog);
%reset counters;
accepted_draws_this_file = 0;
blocked_draws_counter_this_chain_this_file=0;
if draw_iter == nruns(curr_chain) % I record the last draw...
record.LastParameters(curr_chain,:) = x2(end,:);
record.LastLogPost(curr_chain) = logpo2(end);
end
% size of next file in chain curr_chain
InitSizeArray(curr_chain) = min(nruns(curr_chain)-draw_iter,MAX_nruns);
% initialization of next file if necessary
if InitSizeArray(curr_chain)
x2 = zeros(InitSizeArray(curr_chain),npar);
logpo2 = zeros(InitSizeArray(curr_chain),1);
NewFile(curr_chain) = NewFile(curr_chain) + 1;
draw_index_current_file = 0;
end
end
draw_iter=draw_iter+1;
draw_index_current_file = draw_index_current_file + 1;
end% End of the simulations for one mh-block.
record.AcceptanceRatio(curr_chain) = accepted_draws_this_chain/blocked_draws_counter_this_chain;
dyn_waitbar_close(hh);
[record.LastSeeds(curr_chain).Unifor, record.LastSeeds(curr_chain).Normal] = get_dynare_random_generator_state();
OutputFileName(block_iter,:) = {[MetropolisFolder,filesep], [ModelName '_mh*_blck' int2str(curr_chain) '.mat']};
end% End of the loop over the mh-blocks.
myoutput.record = record;
myoutput.irun = draw_index_current_file;
myoutput.NewFile = NewFile;
myoutput.OutputFileName = OutputFileName;
\ No newline at end of file
function [fval,DLIK,Hess,exit_flag] = TaRB_optimizer_wrapper(optpar,par_vector,parameterindices,TargetFun,varargin)
% function [fval,DLIK,Hess,exit_flag] = TaRB_optimizer_wrapper(optpar,par_vector,parameterindices,TargetFun,varargin)
% Wrapper function for target function used in TaRB algorithm; reassembles
% full parameter vector before calling target function
%
% INPUTS
% o optpar [double] (p_opt*1) vector of subset of parameters to be considered
% o par_vector [double] (p*1) full vector of parameters
% o parameterindices [double] (p_opt*1) index of optpar entries in
% par_vector
% o TargetFun [char] string specifying the name of the objective
% function (posterior kernel).
% o varargin [structure] other inputs of target function
%
% OUTPUTS
% o fval [scalar] value of (minus) the likelihood.
% o DLIK [double] (p*1) score vector of the likelihood.
% o Hess [double] (p*p) asymptotic Hessian matrix.
% o exit_flag [scalar] equal to zero if the routine return with a penalty (one otherwise).
%
% Copyright (C) 2015 Dynare Team
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
par_vector(parameterindices,:)=optpar; %reassemble parameter
[fval,DLIK,Hess,exit_flag] = feval(TargetFun,par_vector,varargin{:}); %call target function
function [R,indef, E, P]=chol_SE(A,pivoting)
% [R,indef, E, P]=chol_SE(A,pivoting)
% Performs a (modified) Cholesky factorization of the form
%
% P'*A*P + E = R'*R
%
% As detailed in Schnabel/Eskow (1990), the factorization has 2 phases:
% Phase 1: While A can still be positive definite, pivot on the maximum diagonal element.
% Check that the standard Cholesky update would result in a positive diagonal
% at the current iteration. If so, continue with the normal Cholesky update.
% Otherwise switch to phase 2.
% If A is safely positive definite, stage 1 is never left, resulting in
% the standard Cholesky decomposition.
%
% Phase 2: Pivot on the minimum of the negatives of the lower Gershgorin bound
% estimates. To prevent negative diagonals, compute the amount to add to the
% pivot element and add this. Then, do the Cholesky update and update the estimates of the
% Gershgorin bounds.
%
% Notes:
% - During factorization, L=R' is stored in the lower triangle of the original matrix A,
% miminizing the memory requirements
% - Conforming with the original Schnabel/Eskow (1990) algorithm:
% - at each iteration the updated Gershgorin bounds are estimated instead of recomputed,
% reducing the computational requirements from o(n^3) to o (n^2)
% - For the last 2 by 2 submatrix, an eigenvalue-based decomposition is used
% - While pivoting is not necessary, it improves the size of E, the add-on on to the diagonal. But this comes at
% the cost of introduding a permutation.
%
%
% Inputs
% A [n*n] Matrix to be factorized
% pivoting [scalar] dummy whether pivoting is used
%
% Outputs
% R [n*n] originally stored in lower triangular portion of A, including the main diagonal
%
% E [n*1] Elements added to the diagonal of A
% P [n*1] record of how the rows and columns of the matrix were permuted while
% performing the decomposition
%
% REFERENCES:
% This implementation is based on
%
% Robert B. Schnabel and Elizabeth Eskow. 1990. "A New Modified Cholesky
% Factorization," SIAM Journal of Scientific Statistical Computating,
% 11, 6: 1136-58.
%
% Elizabeth Eskow and Robert B. Schnabel 1991. "Algorithm 695 - Software for a New Modified Cholesky
% Factorization," ACM Transactions on Mathematical Software, Vol 17, No 3: 306-312
%
%
% Author: Johannes Pfeifer based on Eskow/Schnabel (1991)
%
% Copyright (C) 2015 Johannes Pfeifer
% Copyright (C) 2015 Dynare Team
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare. If not, see <http://www.gnu.org/licenses/>.
if sum(sum(abs(A-A'))) > 0
error('A is not symmetric')
end
if nargin==1
pivoting=0;
end
n=size(A,1);
tau1=eps^(1/3); %tolerance parameter for determining when to switch phase 2
tau2=eps^(1/3); %tolerance used for determining the maximum condition number of the final 2X2 submatrix.
phase1 = 1;
delta = 0;
P=1:n;
g=zeros(n,1);
E=zeros(n,1);
% Find the maximum magnitude of the diagonal elements. If any diagonal element is negative, then phase1 is false.
gammma=max(diag(A));
if any(diag(A)) < 0
phase1 = 0;
end
taugam = tau1*gammma;
% If not in phase1, then calculate the initial Gershgorin bounds needed for the start of phase2.
if ~phase1
g=gersh_nested(A,1,n);
end
% check for n=1
if n==1
delta = tau2*abs(A(1,1)) - A(1,1);
if delta > 0
E(1) = delta;
end
if A(1,1) == 0
E(1) = tau2;
end
A(1,1)=sqrt(A(1,1)+E(1));
end
for j = 1:n-1
% PHASE 1
if phase1
if pivoting==1
% Find index of maximum diagonal element A(i,i) where i>=j
[tmp,imaxd] = max(diag(A(j:n,j:n)));
imaxd=imaxd+j-1;
% Pivot to the top the row and column with the max diag
if (imaxd ~= j)
% Swap row j with row of max diag
for ii = 1:j-1
temp = A(j,ii);
A(j,ii) = A(imaxd,ii);
A(imaxd,ii) = temp;
end
% Swap colj and row maxdiag between j and maxdiag
for ii = j+1:imaxd-1
temp = A(ii,j);
A(ii,j) = A(imaxd,ii);
A(imaxd,ii) = temp;
end
% Swap column j with column of max diag
for ii = imaxd+1:n
temp = A(ii,j);
A(ii,j) = A(ii,imaxd);
A(ii,imaxd) = temp;
end
% Swap diag elements
temp = A(j,j);
A(j,j) = A(imaxd,imaxd);
A(imaxd,imaxd) = temp;
% Swap elements of the permutation vector
itemp = P(j);
P(j) = P(imaxd);
P(imaxd) = itemp;
end
end
% check to see whether the normal Cholesky update for this
% iteration would result in a positive diagonal,
% and if not then switch to phase 2.
jp1 = j+1;
if A(j,j)>0
if min((diag(A(j+1:n,j+1:n)) - A(j+1:n,j).^2/A(j,j))) < taugam %test whether stage 2 is required
phase1=0;
end
else
phase1 = 0;
end
if phase1
% Do the normal cholesky update if still in phase 1
A(j,j) = sqrt(A(j,j));
tempjj = A(j,j);
for ii = jp1: n
A(ii,j) = A(ii,j)/tempjj;
end
for ii=jp1:n
temp=A(ii,j);
for k = jp1:ii
A(ii,k) = A(ii,k)-(temp * A(k,j));
end
end
if j == n-1
A(n,n)=sqrt(A(n,n));
end
else
% Calculate the negatives of the lower Gershgorin bounds
g=gersh_nested(A,j,n);
end
end
% PHASE 2
if ~phase1
if j ~= n-1
if pivoting
% Find the minimum negative Gershgorin bound
[tmp,iming] = min(g(j:n));
iming=iming+j-1;
% Pivot to the top the row and column with the
% minimum negative Gershgorin bound
if iming ~= j
% Swap row j with row of min Gershgorin bound
for ii = 1:j-1
temp = A(j,ii);
A(j,ii) = A(iming,ii);
A(iming,ii) = temp;
end
% Swap col j with row iming from j to iming
for ii = j+1:iming-1
temp = A(ii,j);
A(ii,j) = A(iming,ii);
A(iming,ii) = temp;
end
% Swap column j with column of min Gershgorin bound
for ii = iming+1:n
temp = A(ii,j);
A(ii,j) = A(ii,iming);
A(ii,iming) = temp;
end
% Swap diagonal elements
temp = A(j,j);
A(j,j) = A(iming,iming);
A(iming,iming) = temp;
% Swap elements of the permutation vector
itemp = P(j);
P(j) = P(iming);
P(iming) = itemp;
% Swap elements of the negative Gershgorin bounds vector
temp = g(j);
g(j) = g(iming);
g(iming) = temp;
end
end
% Calculate delta and add to the diagonal. delta=max{0,-A(j,j) + max{normj,taugam},delta_previous}
% where normj=sum of |A(i,j)|,for i=1,n, delta_previous is the delta computed at the previous iter and taugam is tau1*gamma.
normj=sum(abs(A(j+1:n,j)));
delta = max([0;delta;-A(j,j)+normj;-A(j,j)+taugam]); % get adjustment based on formula on bottom of p. 309 of Eskow/Schnabel (1991)
E(j) = delta;
A(j,j) = A(j,j) + E(j);
% Update the Gershgorin bound estimates (note: g(i) is the negative of the Gershgorin lower bound.)
if A(j,j) ~= normj
temp = (normj/A(j,j)) - 1;
for ii = j+1:n
g(ii) = g(ii) + abs(A(ii,j)) * temp;
end
end
% Do the cholesky update
A(j,j) = sqrt(A(j,j));
tempjj = A(j,j);
for ii = j+1:n
A(ii,j) = A(ii,j) / tempjj;
end
for ii = j+1:n
temp = A(ii,j);
for k = j+1: ii
A(ii,k) = A(ii,k) - (temp * A(k,j));
end
end
else
% Find eigenvalues of final 2 by 2 submatrix
% Find delta such that:
% 1. the l2 condition number of the final 2X2 submatrix + delta*I <= tau2
% 2. delta >= previous delta,
% 3. min(eigvals) + delta >= tau2 * gamma, where min(eigvals) is the smallest eigenvalue of the final 2X2 submatrix
A(n-1,n)=A(n,n-1); %set value above diagonal for computation of eigenvalues
eigvals = eig(A(n-1:n,n-1:n));
delta = max([ 0 ; delta ; -min(eigvals)+tau2*max((max(eigvals)-min(eigvals))/(1-tau1),gammma) ]); %Formula 5.3.2 of Schnabel/Eskow (1990)
if delta > 0
A(n-1,n-1) = A(n-1,n-1) + delta;
A(n,n) = A(n,n) + delta;
E(n-1) = delta;
E(n) = delta;
end
% Final update
A(n-1,n-1) = sqrt(A(n-1,n-1));
A(n,n-1) = A(n,n-1)/A(n-1,n-1);
A(n,n) = A(n,n) - (A(n,n-1)^2);
A(n,n) = sqrt(A(n,n));
end
end
end
R=(tril(A))';
indef=~phase1;
Pprod=zeros(n,n);
Pprod(sub2ind([n,n],P,1:n))=1;
P=Pprod;
end
function g=gersh_nested(A,j,n)
g=zeros(n,1);
for ii = j:n
if ii == 1;
sum_up_to_i = 0;
else
sum_up_to_i = sum(abs(A(ii,j:(ii-1))));
end;
if ii == n;
sum_after_i = 0;
else
sum_after_i = sum(abs(A((ii+1):n,ii)));
end;
g(ii) = sum_up_to_i + sum_after_i- A(ii,ii);
end
end
...@@ -416,7 +416,14 @@ options_.recursive_estimation_restart = 0; ...@@ -416,7 +416,14 @@ options_.recursive_estimation_restart = 0;
options_.MCMC_jumping_covariance='hessian'; options_.MCMC_jumping_covariance='hessian';
options_.use_calibration_initialization = 0; options_.use_calibration_initialization = 0;
options_.endo_vars_for_moment_computations_in_estimation=[]; options_.endo_vars_for_moment_computations_in_estimation=[];
% Tailored Random Block Metropolis-Hastings
options_.TaRB.use_TaRB = 0; options_.TaRB.use_TaRB = 0;
options_.TaRB.mode_compute=4;
options_.TaRB.new_block_probability=0.25; %probability that next parameter belongs to new block
% Run optimizer silently
options_.silent_optimizer=0;
% Prior restrictions % Prior restrictions
options_.prior_restrictions.status = 0; options_.prior_restrictions.status = 0;
......
...@@ -73,6 +73,9 @@ switch minimizer_algorithm ...@@ -73,6 +73,9 @@ switch minimizer_algorithm
if ~isempty(options_.optim_opt) if ~isempty(options_.optim_opt)
eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']); eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
end end
if options_.silent_optimizer
optim_options = optimset(optim_options,'display','off');
end
if options_.analytic_derivation, if options_.analytic_derivation,
optim_options = optimset(optim_options,'GradObj','on','TolX',1e-7); optim_options = optimset(optim_options,'GradObj','on','TolX',1e-7);
end end
...@@ -110,6 +113,9 @@ switch minimizer_algorithm ...@@ -110,6 +113,9 @@ switch minimizer_algorithm
end end
end end
end end
if options_.silent_optimizer
sa_options.verbosity = 0;
end
npar=length(start_par_value); npar=length(start_par_value);
[LB, UB]=set_bounds_to_finite_values(bounds, options_.huge_number); [LB, UB]=set_bounds_to_finite_values(bounds, options_.huge_number);
if sa_options.verbosity if sa_options.verbosity
...@@ -140,6 +146,9 @@ switch minimizer_algorithm ...@@ -140,6 +146,9 @@ switch minimizer_algorithm
if options_.analytic_derivation, if options_.analytic_derivation,
optim_options = optimset(optim_options,'GradObj','on'); optim_options = optimset(optim_options,'GradObj','on');
end end
if options_.silent_optimizer
optim_options = optimset(optim_options,'display','off');
end
if ~isoctave if ~isoctave
[opt_par_values,fval,exitflag] = fminunc(objective_function,start_par_value,optim_options,varargin{:}); [opt_par_values,fval,exitflag] = fminunc(objective_function,start_par_value,optim_options,varargin{:});
else else
...@@ -180,6 +189,10 @@ switch minimizer_algorithm ...@@ -180,6 +189,10 @@ switch minimizer_algorithm
end end
end end
end end
if options_.silent_optimizer
Save_files = 0;
Verbose = 0;
end
% Set flag for analytical gradient. % Set flag for analytical gradient.
if options_.analytic_derivation if options_.analytic_derivation
analytic_grad=1; analytic_grad=1;
...@@ -227,6 +240,10 @@ switch minimizer_algorithm ...@@ -227,6 +240,10 @@ switch minimizer_algorithm
end end
end end
end end
if options_.silent_optimizer
Save_files = 0;
Verbose = 0;
end
[opt_par_values,hessian_mat,gg,fval,invhess] = newrat(objective_function,start_par_value,analytic_grad,crit,nit,0,Verbose, Save_files,varargin{:}); [opt_par_values,hessian_mat,gg,fval,invhess] = newrat(objective_function,start_par_value,analytic_grad,crit,nit,0,Verbose, Save_files,varargin{:});
%hessian_mat is the plain outer product gradient Hessian %hessian_mat is the plain outer product gradient Hessian
case 6 case 6
...@@ -243,6 +260,9 @@ switch minimizer_algorithm ...@@ -243,6 +260,9 @@ switch minimizer_algorithm
if ~isempty(options_.optim_opt) if ~isempty(options_.optim_opt)
eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']); eval(['optim_options = optimset(optim_options,' options_.optim_opt ');']);
end end
if options_.silent_optimizer
optim_options = optimset(optim_options,'display','off');
end
if ~isoctave if ~isoctave
[opt_par_values,fval,exitflag] = fminsearch(objective_function,start_par_value,optim_options,varargin{:}); [opt_par_values,fval,exitflag] = fminsearch(objective_function,start_par_value,optim_options,varargin{:});
else else
...@@ -276,6 +296,9 @@ switch minimizer_algorithm ...@@ -276,6 +296,9 @@ switch minimizer_algorithm
end end
end end
end end
if options_.silent_optimizer
simplexOptions.verbose = options_list{i,2};
end
[opt_par_values,fval,exitflag] = simplex_optimization_routine(objective_function,start_par_value,simplexOptions,parameter_names,varargin{:}); [opt_par_values,fval,exitflag] = simplex_optimization_routine(objective_function,start_par_value,simplexOptions,parameter_names,varargin{:});
case 9 case 9
% Set defaults % Set defaults
...@@ -310,6 +333,13 @@ switch minimizer_algorithm ...@@ -310,6 +333,13 @@ switch minimizer_algorithm
end end
end end
end end
if options_.silent_optimizer
cmaesOptions.DispFinal = 'off'; % display messages like initial and final message';
cmaesOptions.DispModulo = '0'; % [0:Inf], disp messages after every i-th iteration';
cmaesOptions.SaveVariables='off';
cmaesOptions.LogModulo = '0'; % [0:Inf] if >1 record data less frequently after gen=100';
cmaesOptions.LogTime = '0'; % [0:100] max. percentage of time for recording data';
end
warning('off','CMAES:NonfinitenessRange'); warning('off','CMAES:NonfinitenessRange');
warning('off','CMAES:InitialSigma'); warning('off','CMAES:InitialSigma');
[x, fval, COUNTEVAL, STOPFLAG, OUT, BESTEVER] = cmaes(func2str(objective_function),start_par_value,H0,cmaesOptions,varargin{:}); [x, fval, COUNTEVAL, STOPFLAG, OUT, BESTEVER] = cmaes(func2str(objective_function),start_par_value,H0,cmaesOptions,varargin{:});
...@@ -347,6 +377,9 @@ switch minimizer_algorithm ...@@ -347,6 +377,9 @@ switch minimizer_algorithm
end end
end end
end end
if options_.silent_optimizer
simpsaOptions.DISPLAY = 'none';
end
simpsaOptionsList = options2cell(simpsaOptions); simpsaOptionsList = options2cell(simpsaOptions);
simpsaOptions = simpsaset(simpsaOptionsList{:}); simpsaOptions = simpsaset(simpsaOptionsList{:});
[LB, UB]=set_bounds_to_finite_values(bounds, options_.huge_number); [LB, UB]=set_bounds_to_finite_values(bounds, options_.huge_number);
...@@ -377,6 +410,9 @@ switch minimizer_algorithm ...@@ -377,6 +410,9 @@ switch minimizer_algorithm
end end
end end
end end
if options_.silent_optimizer
solveoptoptions.verbosity = 0;
end
[opt_par_values,fval]=solvopt(start_par_value,objective_function,[],[],[],solveoptoptions,varargin{:}); [opt_par_values,fval]=solvopt(start_par_value,objective_function,[],[],[],solveoptoptions,varargin{:});
case 102 case 102
if isoctave if isoctave
...@@ -389,6 +425,9 @@ switch minimizer_algorithm ...@@ -389,6 +425,9 @@ switch minimizer_algorithm
if ~isempty(options_.optim_opt) if ~isempty(options_.optim_opt)
eval(['optim_options = saoptimset(optim_options,' options_.optim_opt ');']); eval(['optim_options = saoptimset(optim_options,' options_.optim_opt ');']);
end end
if options_.silent_optimizer
optim_options = optimset(optim_options,'display','off');
end
func = @(x)objective_function(x,varargin{:}); func = @(x)objective_function(x,varargin{:});
[opt_par_values,fval,exitflag,output] = simulannealbnd(func,start_par_value,bounds(:,1),bounds(:,2),optim_options); [opt_par_values,fval,exitflag,output] = simulannealbnd(func,start_par_value,bounds(:,1),bounds(:,2),optim_options);
otherwise otherwise
......
...@@ -87,6 +87,10 @@ load_last_mh_history_file(MetropolisFolder, ModelName); ...@@ -87,6 +87,10 @@ load_last_mh_history_file(MetropolisFolder, ModelName);
% on many cores). The mandatory variables for local/remote parallel % on many cores). The mandatory variables for local/remote parallel
% computing are stored in the localVars struct. % computing are stored in the localVars struct.
if options_.TaRB.use_TaRB
options_.silent_optimizer=1; %locally set optimizer to silent mode
end
localVars = struct('TargetFun', TargetFun, ... localVars = struct('TargetFun', TargetFun, ...
'ProposalFun', ProposalFun, ... 'ProposalFun', ProposalFun, ...
'xparam1', xparam1, ... 'xparam1', xparam1, ...
...@@ -117,9 +121,12 @@ localVars = struct('TargetFun', TargetFun, ... ...@@ -117,9 +121,12 @@ localVars = struct('TargetFun', TargetFun, ...
% single chain compute Random walk Metropolis-Hastings algorithm sequentially. % single chain compute Random walk Metropolis-Hastings algorithm sequentially.
if isnumeric(options_.parallel) || (nblck-fblck)==0, if isnumeric(options_.parallel) || (nblck-fblck)==0,
if options_.TaRB.use_TaRB
fout = TaRB_metropolis_hastings_core(localVars, fblck, nblck, 0);
else
fout = random_walk_metropolis_hastings_core(localVars, fblck, nblck, 0); fout = random_walk_metropolis_hastings_core(localVars, fblck, nblck, 0);
end
record = fout.record; record = fout.record;
% Parallel in Local or remote machine. % Parallel in Local or remote machine.
else else
% Global variables for parallel routines. % Global variables for parallel routines.
...@@ -138,7 +145,11 @@ else ...@@ -138,7 +145,11 @@ else
end end
% from where to get back results % from where to get back results
% NamFileOutput(1,:) = {[M_.dname,'/metropolis/'],'*.*'}; % NamFileOutput(1,:) = {[M_.dname,'/metropolis/'],'*.*'};
if options_.TaRB.use_TaRB
[fout, nBlockPerCPU, totCPU] = masterParallel(options_.parallel, fblck, nblck,NamFileInput,'TaRB_metropolis_hastings_core', localVars, globalVars, options_.parallel_info);
else
[fout, nBlockPerCPU, totCPU] = masterParallel(options_.parallel, fblck, nblck,NamFileInput,'random_walk_metropolis_hastings_core', localVars, globalVars, options_.parallel_info); [fout, nBlockPerCPU, totCPU] = masterParallel(options_.parallel, fblck, nblck,NamFileInput,'random_walk_metropolis_hastings_core', localVars, globalVars, options_.parallel_info);
end
for j=1:totCPU, for j=1:totCPU,
offset = sum(nBlockPerCPU(1:j-1))+fblck-1; offset = sum(nBlockPerCPU(1:j-1))+fblck-1;
record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j))); record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j)))=fout(j).record.LastLogPost(offset+1:sum(nBlockPerCPU(1:j)));
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment