DsgeSmoother.m 11.9 KB
Newer Older
1
function [alphahat,etahat,epsilonhat,ahat,SteadyState,trend_coeff,aK,T,R,P,PK,decomp,trend_addition,state_uncertainty,M_,oo_,options_,bayestopt_] = DsgeSmoother(xparam1,gend,Y,data_index,missing_value,M_,oo_,options_,bayestopt_,estim_params_)
2 3 4 5
% Estimation of the smoothed variables and innovations.
%
% INPUTS
%   o xparam1       [double]   (p*1) vector of (estimated) parameters.
assia's avatar
assia committed
6
%   o gend          [integer]  scalar specifying the number of observations ==> varargin{1}.
7
%   o data          [double]   (n*T) matrix of data.
8 9
%   o data_index    [cell]      1*smpl cell of column vectors of indices.
%   o missing_value 1 if missing values, 0 otherwise
10 11 12 13 14
%   o M_            [structure] decribing the model
%   o oo_           [structure] storing the results
%   o options_      [structure] describing the options
%   o bayestopt_    [structure] describing the priors
%   o estim_params_ [structure] characterizing parameters to be estimated
15
%
16 17 18
% OUTPUTS
%   o alphahat      [double]  (m*T) matrix, smoothed endogenous variables (a_{t|T})  (decision-rule order)
%   o etahat        [double]  (r*T) matrix, smoothed structural shocks (r>=n is the number of shocks).
19
%   o epsilonhat    [double]  (n*T) matrix, smoothed measurement errors.
20 21
%   o ahat          [double]  (m*T) matrix, updated (endogenous) variables (a_{t|t}) (decision-rule order)
%   o SteadyState   [double]  (m*1) vector specifying the steady state level of each endogenous variable (declaration order)
22
%   o trend_coeff   [double]  (n*1) vector, parameters specifying the slope of the trend associated to each observed variable.
23 24
%   o aK            [double]  (K,n,T+K) array, k (k=1,...,K) steps ahead
%                                   filtered (endogenous) variables  (decision-rule order)
assia's avatar
assia committed
25
%   o T and R       [double]  Matrices defining the state equation (T is the (m*m) transition matrix).
26 27 28 29 30 31
%   o P:            (m*m*(T+1)) 3D array of one-step ahead forecast error variance
%                       matrices (decision-rule order)
%   o PK:           (K*m*m*(T+K)) 4D array of k-step ahead forecast error variance
%                       matrices (meaningless for periods 1:d) (decision-rule order)
%   o decomp        (K*m*r*(T+K)) 4D array of shock decomposition of k-step ahead
%                       filtered variables (decision-rule order)
32
%   o trend_addition [double] (n*T) pure trend component; stored in options_.varobs order
33 34
%   o state_uncertainty [double] (K,K,T) array, storing the uncertainty
%                                   about the smoothed state (decision-rule order)
35 36 37 38
%   o M_            [structure] decribing the model
%   o oo_           [structure] storing the results
%   o options_      [structure] describing the options
%   o bayestopt_    [structure] describing the priors
39
%
40 41 42 43 44 45
% Notes:
%   m:  number of endogenous variables (M_.endo_nbr)
%   T:  number of Time periods (options_.nobs)
%   r:  number of strucural shocks (M_.exo_nbr)
%   n:  number of observables (length(options_.varobs))
%   K:  maximum forecast horizon (max(options_.nk))
46
%
47 48 49
%   To get variables that are stored in decision rule order in order of declaration
%   as in M_.endo_names, ones needs code along the lines of:
%   variables_declaration_order(dr.order_var,:) = alphahat
50 51 52
%
%   Defines bayestopt_.mf = bayestopt_.smoother_mf (positions of observed variables
%   and requested smoothed variables in decision rules (decision rule order)) and
53
%   passes it back via global variable
54 55 56
%
% ALGORITHM
%   Diffuse Kalman filter (Durbin and Koopman)
michel's avatar
michel committed
57
%
58
% SPECIAL REQUIREMENTS
assia's avatar
assia committed
59
%   None
60

61
% Copyright (C) 2006-2017 Dynare Team
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
assia's avatar
assia committed
77

78 79 80
alphahat        = [];
etahat  = [];
epsilonhat      = [];
81 82 83 84 85 86 87 88 89
ahat          = [];
SteadyState   = [];
trend_coeff   = [];
aK            = [];
T             = [];
R             = [];
P             = [];
PK            = [];
decomp        = [];
90
vobs            = length(options_.varobs);
91
smpl          = size(Y,2);
michel's avatar
michel committed
92

93 94 95
if ~isempty(xparam1) %not calibrated model
    M_ = set_all_parameters(xparam1,estim_params_,M_);
end
michel's avatar
michel committed
96

97 98 99
%------------------------------------------------------------------------------
% 2. call model setup & reduction program
%------------------------------------------------------------------------------
100
oldoo.restrict_var_list = oo_.dr.restrict_var_list;
101 102 103 104 105 106
oldoo.restrict_columns = oo_.dr.restrict_columns;
oo_.dr.restrict_var_list = bayestopt_.smoother_var_list;
oo_.dr.restrict_columns = bayestopt_.smoother_restrict_columns;

[T,R,SteadyState,info,M_,options_,oo_] = dynare_resolve(M_,options_,oo_);

107 108 109 110
if info~=0
    print_info(info,options_.noprint, options_);
    return
end
111 112 113
oo_.dr.restrict_var_list = oldoo.restrict_var_list;
oo_.dr.restrict_columns = oldoo.restrict_columns;

114 115 116
%get location of observed variables and requested smoothed variables in
%decision rules
bayestopt_.mf = bayestopt_.smoother_var_list(bayestopt_.smoother_mf);
117
if options_.noconstant
118
    constant = zeros(vobs,1);
119
else
120
    if options_.loglinear
121 122 123 124 125
        constant = log(SteadyState(bayestopt_.mfys));
    else
        constant = SteadyState(bayestopt_.mfys);
    end
end
126
trend_coeff = zeros(vobs,1);
127
if bayestopt_.with_trend == 1
128 129
    [trend_addition, trend_coeff] =compute_trend_coefficients(M_,options_,vobs,gend);
    trend = constant*ones(1,gend)+trend_addition;
130
else
131
    trend_addition=zeros(size(constant,1),gend);
michel's avatar
michel committed
132
    trend = constant*ones(1,gend);
133 134 135
end
start = options_.presample+1;
np    = size(T,1);
136
mf    = bayestopt_.mf;
137 138 139
% ------------------------------------------------------------------------------
%  3. Initial condition of the Kalman filter
% ------------------------------------------------------------------------------
140 141
%
%  Here, Pinf and Pstar are determined. If the model is stationary, determine
142 143
%  Pstar as the solution of the Lyapunov equation and set Pinf=[] (Notation follows
%  Koopman/Durbin (2003), Journal of Time Series Analysis 24(1))
144 145 146 147
%
Q = M_.Sigma_e;
H = M_.H;

148
if isequal(H,0)
149
    H = zeros(vobs,vobs);
150 151
end

152 153 154 155 156 157
Z = zeros(vobs,size(T,2));
for i=1:vobs
    Z(i,mf(i)) = 1;
end

expanded_state_vector_for_univariate_filter=0;
158
kalman_algo = options_.kalman_algo;
159
if options_.lik_init == 1               % Kalman filter
160 161 162
    if kalman_algo ~= 2
        kalman_algo = 1;
    end
163
    Pstar=lyapunov_solver(T,R,Q,options_);
164
    Pinf        = [];
165
elseif options_.lik_init == 2           % Old Diffuse Kalman filter
166 167 168 169
    if kalman_algo ~= 2
        kalman_algo = 1;
    end
    Pstar = options_.Harvey_scale_factor*eye(np);
170
    Pinf        = [];
171
elseif options_.lik_init == 3           % Diffuse Kalman filter
172 173
    if kalman_algo ~= 4
        kalman_algo = 3;
174 175
    else
        if ~all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
176
                                                %Augment state vector (follows Section 6.4.3 of DK (2012))
177 178 179 180 181 182 183 184
            expanded_state_vector_for_univariate_filter=1;
            T  = blkdiag(T,zeros(vobs));
            np    = size(T,1);
            Q   = blkdiag(Q,H);
            R  = blkdiag(R,eye(vobs));
            H   = zeros(vobs,vobs);
            Z   = [Z, eye(vobs)];
        end
185
    end
186
    [Pstar,Pinf] = compute_Pinf_Pstar(mf,T,R,Q,options_.qz_criterium,oo_.dr.restrict_var_list);
187
elseif options_.lik_init == 4           % Start from the solution of the Riccati equation.
188
    [err, Pstar] = kalman_steady_state(transpose(T),R*Q*transpose(R),transpose(build_selection_matrix(mf,np,vobs)),H);
189 190 191 192 193
    mexErrCheck('kalman_steady_state',err);
    Pinf  = [];
    if kalman_algo~=2
        kalman_algo = 1;
    end
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
elseif options_.lik_init == 5            % Old diffuse Kalman filter only for the non stationary variables
    [eigenvect, eigenv] = eig(T);
    eigenv = diag(eigenv);
    nstable = length(find(abs(abs(eigenv)-1) > 1e-7));
    unstable = find(abs(abs(eigenv)-1) < 1e-7);
    V = eigenvect(:,unstable);
    indx_unstable = find(sum(abs(V),2)>1e-5);
    stable = find(sum(abs(V),2)<1e-5);
    nunit = length(eigenv) - nstable;
    Pstar = options_.Harvey_scale_factor*eye(np);
    if kalman_algo ~= 2
        kalman_algo = 1;
    end
    R_tmp = R(stable, :);
    T_tmp = T(stable,stable);
209
    Pstar_tmp=lyapunov_solver(T_tmp,R_tmp,Q,DynareOptions);
210 211
    Pstar(stable, stable) = Pstar_tmp;
    Pinf  = [];
212
end
213
kalman_tol = options_.kalman_tol;
214
diffuse_kalman_tol = options_.diffuse_kalman_tol;
215 216
riccati_tol = options_.riccati_tol;
data1 = Y-trend;
217 218 219
% -----------------------------------------------------------------------------
%  4. Kalman smoother
% -----------------------------------------------------------------------------
220 221 222

if ~missing_value
    for i=1:smpl
223
        data_index{i}=(1:vobs)';
224 225 226
    end
end

227 228
ST = T;
R1 = R;
229 230

if kalman_algo == 1 || kalman_algo == 3
231
    [alphahat,epsilonhat,etahat,ahat,P,aK,PK,decomp,state_uncertainty] = missing_DiffuseKalmanSmootherH1_Z(ST, ...
232
                                                      Z,R1,Q,H,Pinf,Pstar, ...
233
                                                      data1,vobs,np,smpl,data_index, ...
234
                                                      options_.nk,kalman_tol,diffuse_kalman_tol,options_.filter_decomposition,options_.smoothed_state_uncertainty);
235
    if isinf(alphahat)
236
        if kalman_algo == 1
237
            kalman_algo = 2;
238 239
        elseif kalman_algo == 3
            kalman_algo = 4;
240
        else
241
            error('This case shouldn''t happen')
242
        end
243 244
    end
end
245

246
if kalman_algo == 2 || kalman_algo == 4
247 248 249 250 251 252 253 254 255 256 257 258 259
    if ~all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
        if ~expanded_state_vector_for_univariate_filter
            %Augment state vector (follows Section 6.4.3 of DK (2012))
            expanded_state_vector_for_univariate_filter=1;
            Z   = [Z, eye(vobs)];
            ST  = blkdiag(ST,zeros(vobs));
            np  = size(ST,1);
            Q   = blkdiag(Q,H);
            R1  = blkdiag(R,eye(vobs));
            if kalman_algo == 4
                %recompute Schur state space transformation with
                %expanded state space
                [Pstar,Pinf] = compute_Pinf_Pstar(mf,ST,R1,Q,options_.qz_criterium);
260
            else
261 262 263 264
                Pstar = blkdiag(Pstar,H);
                if ~isempty(Pinf)
                    Pinf  = blkdiag(Pinf,zeros(vobs));
                end
265
            end
266 267 268 269
            %now reset H to 0
            H   = zeros(vobs,vobs);
        else
            %do nothing, state vector was already expanded
270
        end
271 272
    end

273
    [alphahat,epsilonhat,etahat,ahat,P,aK,PK,decomp,state_uncertainty] = missing_DiffuseKalmanSmootherH3_Z(ST, ...
274
                                                      Z,R1,Q,diag(H), ...
275
                                                      Pinf,Pstar,data1,vobs,np,smpl,data_index, ...
276
                                                      options_.nk,kalman_tol,diffuse_kalman_tol, ...
277
                                                      options_.filter_decomposition,options_.smoothed_state_uncertainty);
278 279 280
end


281
if expanded_state_vector_for_univariate_filter && (kalman_algo == 2 || kalman_algo == 4)
282 283
    % extracting measurement errors
    % removing observed variables from the state vector
284
    k = (1:np-vobs);
285 286 287
    alphahat = alphahat(k,:);
    ahat = ahat(k,:);
    aK = aK(:,k,:,:);
288
    epsilonhat=etahat(end-vobs+1:end,:);
289
    etahat=etahat(1:end-vobs,:);
290 291
    if ~isempty(PK)
        PK = PK(:,k,k,:);
292
    end
293 294
    if ~isempty(decomp)
        decomp = decomp(:,k,:,:);
295
    end
296
    if ~isempty(P)
297
        P = P(k,k,:);
298
    end
299 300 301
    if ~isempty(state_uncertainty)
        state_uncertainty = state_uncertainty(k,k,:);
    end
302
end