DsgeLikelihood.m 25.5 KB
Newer Older
1
function [fval,exit_flag,ys,trend_coeff,info,Model,DynareOptions,BayesInfo,DynareResults,DLIK,AHess] = DsgeLikelihood(xparam1,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults,derivatives_info)
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
% Evaluates the posterior kernel of a dsge model.

%@info:
%! @deftypefn {Function File} {[@var{fval},@var{exit_flag},@var{ys},@var{trend_coeff},@var{info},@var{Model},@var{DynareOptions},@var{BayesInfo},@var{DynareResults},@var{DLIK},@var{AHess}] =} DsgeLikelihood (@var{xparam1},@var{DynareDataset},@var{DynareOptions},@var{Model},@var{EstimatedParameters},@var{BayesInfo},@var{DynareResults},@var{derivatives_flag})
%! @anchor{DsgeLikelihood}
%! @sp 1
%! Evaluates the posterior kernel of a dsge model.
%! @sp 2
%! @strong{Inputs}
%! @sp 1
%! @table @ @var
%! @item xparam1
%! Vector of doubles, current values for the estimated parameters.
%! @item DynareDataset
%! Matlab's structure describing the dataset (initialized by dynare, see @ref{dataset_}).
%! @item DynareOptions
%! Matlab's structure describing the options (initialized by dynare, see @ref{options_}).
%! @item Model
%! Matlab's structure describing the Model (initialized by dynare, see @ref{M_}).
%! @item EstimatedParamemeters
%! Matlab's structure describing the estimated_parameters (initialized by dynare, see @ref{estim_params_}).
%! @item BayesInfo
%! Matlab's structure describing the priors (initialized by dynare, see @ref{bayesopt_}).
%! @item DynareResults
%! Matlab's structure gathering the results (initialized by dynare, see @ref{oo_}).
%! @item derivates_flag
%! Integer scalar, flag for analytical derivatives of the likelihood.
%! @end table
%! @sp 2
%! @strong{Outputs}
%! @sp 1
%! @table @ @var
%! @item fval
%! Double scalar, value of (minus) the likelihood.
%! @item exit_flag
%! Integer scalar, equal to zero if the routine return with a penalty (one otherwise).
%! @item ys
%! Vector of doubles, steady state level for the endogenous variables.
%! @item trend_coeffs
%! Matrix of doubles, coefficients of the deterministic trend in the measurement equation.
%! @item info
%! Integer scalar, error code.
%! @table @ @code
%! @item info==0
%! No error.
%! @item info==1
%! The model doesn't determine the current variables uniquely.
%! @item info==2
%! MJDGGES returned an error code.
%! @item info==3
%! Blanchard & Kahn conditions are not satisfied: no stable equilibrium.
%! @item info==4
%! Blanchard & Kahn conditions are not satisfied: indeterminacy.
%! @item info==5
%! Blanchard & Kahn conditions are not satisfied: indeterminacy due to rank failure.
%! @item info==6
%! The jacobian evaluated at the deterministic steady state is complex.
%! @item info==19
%! The steadystate routine thrown an exception (inconsistent deep parameters).
%! @item info==20
%! Cannot find the steady state, info(2) contains the sum of square residuals (of the static equations).
%! @item info==21
%! The steady state is complex, info(2) contains the sum of square of imaginary parts of the steady state.
%! @item info==22
%! The steady has NaNs.
%! @item info==23
%! M_.params has been updated in the steadystate routine and has complex valued scalars.
%! @item info==24
%! M_.params has been updated in the steadystate routine and has some NaNs.
%! @item info==30
%! Ergodic variance can't be computed.
%! @item info==41
%! At least one parameter is violating a lower bound condition.
%! @item info==42
%! At least one parameter is violating an upper bound condition.
%! @item info==43
%! The covariance matrix of the structural innovations is not positive definite.
%! @item info==44
%! The covariance matrix of the measurement errors is not positive definite.
%! @item info==45
%! Likelihood is not a number (NaN).
%! @item info==45
%! Likelihood is a complex valued number.
%! @end table
%! @item Model
%! Matlab's structure describing the model (initialized by dynare, see @ref{M_}).
%! @item DynareOptions
%! Matlab's structure describing the options (initialized by dynare, see @ref{options_}).
%! @item BayesInfo
%! Matlab's structure describing the priors (initialized by dynare, see @ref{bayesopt_}).
%! @item DynareResults
%! Matlab's structure gathering the results (initialized by dynare, see @ref{oo_}).
%! @item DLIK
%! Vector of doubles, score of the likelihood.
%! @item AHess
%! Matrix of doubles, asymptotic hessian matrix.
%! @end table
%! @sp 2
%! @strong{This function is called by:}
%! @sp 1
%! @ref{dynare_estimation_1}, @ref{mode_check}
%! @sp 2
%! @strong{This function calls:}
%! @sp 1
106
%! @ref{dynare_resolve}, @ref{lyapunov_symm}, @ref{schur_statespace_transformation}, @ref{kalman_filter_d}, @ref{missing_observations_kalman_filter_d}, @ref{univariate_kalman_filter_d}, @ref{kalman_steady_state}, @ref{getH}, @ref{kalman_filter}, @ref{score}, @ref{AHessian}, @ref{missing_observations_kalman_filter}, @ref{univariate_kalman_filter}, @ref{priordens}
107
108
%! @end deftypefn
%@eod:
109

110
% Copyright (C) 2004-2011 Dynare Team
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.

127
128
% AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT FR

129
% Declaration of the penalty as a persistent variable.
130
131
132
133
134
persistent penalty

% Initialization of the persistent variable.
if ~nargin || isempty(penalty)
    penalty = 1e8;
135
    if ~nargin, return, end
136
137
138
139
140
141
end
if nargin==1
    penalty = xparam1;
    return
end

142
143
144
145
146
147
% Initialization of the returned variables and others...
fval        = [];
ys          = [];
trend_coeff = [];
exit_flag   = 1;
info        = 0;
148
singularity_flag = 0;
149

150
151
% Set flag related to analytical derivatives.
if nargout > 9
152
153
154
    analytic_derivation=1;
else
    analytic_derivation=0;
155
156
end

157
158
159
%------------------------------------------------------------------------------
% 1. Get the structural parameters & define penalties
%------------------------------------------------------------------------------
160
161
162
163
164
165

% Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
if ~isequal(DynareOptions.mode_compute,1) && any(xparam1<BayesInfo.lb)
    k = find(xparam1<BayesInfo.lb);
    fval = penalty+sum((BayesInfo.lb(k)-xparam1(k)).^2);
    exit_flag = 0;
166
    info = 41;
167
    return
168
end
169
170
171
172
173
174

% Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
if ~isequal(DynareOptions.mode_compute,1) && any(xparam1>BayesInfo.ub)
    k = find(xparam1>BayesInfo.ub);
    fval = penalty+sum((xparam1(k)-BayesInfo.ub(k)).^2);
    exit_flag = 0;
175
    info = 42;
176
    return
177
end
178
179
180
181
182
183

% Get the diagonal elements of the covariance matrices for the structural innovations (Q) and the measurement error (H).
Q = Model.Sigma_e;
H = Model.H;
for i=1:EstimatedParameters.nvx
    k =EstimatedParameters.var_exo(i,1);
michel's avatar
michel committed
184
    Q(k,k) = xparam1(i)*xparam1(i);
185
end
186
187
188
189
offset = EstimatedParameters.nvx;
if EstimatedParameters.nvn
    for i=1:EstimatedParameters.nvn
        k = EstimatedParameters.var_endo(i,1);
190
        H(k,k) = xparam1(i+offset)*xparam1(i+offset);
michel's avatar
michel committed
191
    end
192
    offset = offset+EstimatedParameters.nvn;
193
else
194
    H = zeros(DynareDataset.info.nvobs);
195
end
196
197
198
199
200
201

% Get the off-diagonal elements of the covariance matrix for the structural innovations. Test if Q is positive definite.
if EstimatedParameters.ncx
    for i=1:EstimatedParameters.ncx
        k1 =EstimatedParameters.corrx(i,1);
        k2 =EstimatedParameters.corrx(i,2);
202
203
        Q(k1,k2) = xparam1(i+offset)*sqrt(Q(k1,k1)*Q(k2,k2));
        Q(k2,k1) = Q(k1,k2);
michel's avatar
michel committed
204
    end
205
    % Try to compute the cholesky decomposition of Q (possible iff Q is positive definite)
michel's avatar
michel committed
206
    [CholQ,testQ] = chol(Q);
207
208
    if testQ
        % The variance-covariance matrix of the structural innovations is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
209
210
211
        a = diag(eig(Q));
        k = find(a < 0);
        if k > 0
212
213
            fval = BayesInfo.penalty+sum(-a(k));
            exit_flag = 0;
214
215
216
            info = 43;
            return
        end
michel's avatar
michel committed
217
    end
218
    offset = offset+EstimatedParameters.ncx;
219
end
220
221
222
223
224
225

% Get the off-diagonal elements of the covariance matrix for the measurement errors. Test if H is positive definite.
if EstimatedParameters.ncn
    for i=1:EstimatedParameters.ncn
        k1 = DynareOptions.lgyidx2varobs(EstimatedParameters.corrn(i,1));
        k2 = DynareOptions.lgyidx2varobs(EstimatedParameters.corrn(i,2));
226
227
        H(k1,k2) = xparam1(i+offset)*sqrt(H(k1,k1)*H(k2,k2));
        H(k2,k1) = H(k1,k2);
michel's avatar
michel committed
228
    end
229
    % Try to compute the cholesky decomposition of H (possible iff H is positive definite)
michel's avatar
michel committed
230
231
    [CholH,testH] = chol(H);
    if testH
232
        % The variance-covariance matrix of the structural innovations is not definite positive. We have to compute the eigenvalues of this matrix in order to build the endogenous penalty.
233
234
235
        a = diag(eig(H));
        k = find(a < 0);
        if k > 0
236
237
            fval = BayesInfo.penalty+sum(-a(k));
            exit_flag = 0;
238
239
240
            info = 44;
            return
        end
michel's avatar
michel committed
241
    end
242
    offset = offset+EstimatedParameters.ncn;
243
end
244
245
246
247

% Update estimated structural parameters in Mode.params.
if EstimatedParameters.np > 0
    Model.params(EstimatedParameters.param_vals(:,1)) = xparam1(offset+1:end);
248
end
249
250
251
252
253

% Update Model.Sigma_e and Model.H.
Model.Sigma_e = Q;
Model.H = H;

254
255
256
%------------------------------------------------------------------------------
% 2. call model setup & reduction program
%------------------------------------------------------------------------------
257

258
% Linearize the model around the deterministic sdteadystate and extract the matrices of the state equation (T and R).
259
[T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
260
261

% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
262
if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 22 || info(1) == 24
263
264
265
    fval = penalty+1;
    info = info(1);
    exit_flag = 0;
266
    return
267
elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21  || info(1) == 23
268
269
270
    fval = penalty+info(2);
    info = info(1);
    exit_flag = 0;
271
272
    return
end
273
274
275
276
277
278

% Define a vector of indices for the observed variables. Is this really usefull?...
BayesInfo.mf = BayesInfo.mf1;

% Define the constant vector of the measurement equation.
if DynareOptions.noconstant
279
    constant = zeros(DynareDataset.info.nvobs,1);
280
281
282
else
    if DynareOptions.loglinear
        constant = log(SteadyState(BayesInfo.mfys));
283
    else
284
        constant = SteadyState(BayesInfo.mfys);
285
286
    end
end
287
288
289

% Define the deterministic linear trend of the measurement equation.
if BayesInfo.with_trend
290
    trend_coeff = zeros(DynareDataset.info.nvobs,1);
291
    t = DynareOptions.trend_coeffs;
292
    for i=1:length(t)
293
294
295
        if ~isempty(t{i})
            trend_coeff(i) = evalin('base',t{i});
        end
michel's avatar
michel committed
296
    end
297
    trend = repmat(constant,1,DynareDataset.info.ntobs)+trend_coeff*[1:DynareDataset.info.ntobs];
298
else
299
    trend = repmat(constant,1,DynareDataset.info.ntobs);
300
end
301
302
303
304
305
306
307
308
309
310
311
312

% Get needed informations for kalman filter routines.
start = DynareOptions.presample+1;
Z = BayesInfo.mf; % old mf
no_missing_data_flag = ~DynareDataset.missing.state;
mm = length(T); % old np
pp = DynareDataset.info.nvobs;
rr = length(Q);
kalman_tol = DynareOptions.kalman_tol;
riccati_tol = DynareOptions.riccati_tol;
Y   = DynareDataset.data-trend;

313
314
315
%------------------------------------------------------------------------------
% 3. Initial condition of the Kalman filter
%------------------------------------------------------------------------------
316
kalman_algo = DynareOptions.kalman_algo;
317
318
319
320
321

% resetting measurement errors covariance matrix for univariate filters
if (kalman_algo == 2) || (kalman_algo == 4)
    if isequal(H,0)
        H = zeros(nobs,1);
322
        mmm = mm;
323
324
325
    else
        if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
            H = diag(H);
326
            mmm = mm; 
327
        else
328
329
330
331
332
333
334
335
            Z = [Z, eye(pp)];
            T = blkdiag(T,zeros(pp));
            Q = blkdiag(Q,H);
            R = blkdiag(R,eye(pp));
            Pstar = blkdiag(Pstar,H);
            Pinf  = blckdiag(Pinf,zeros(pp));
            H = zeros(nobs,1);
            mmm   = mm+pp;
336
337
338
339
340
        end
    end
end


341
diffuse_periods = 0;
342
343
344
345
switch DynareOptions.lik_init
  case 1% Standard initialization with the steady state of the state equation.
    if kalman_algo~=2
        % Use standard kalman filter except if the univariate filter is explicitely choosen.
346
347
        kalman_algo = 1;
    end
348
349
350
351
352
    Pstar = lyapunov_symm(T,R*Q*R',DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold);
    Pinf  = [];
    a     = zeros(mm,1);
    Zflag = 0;
  case 2% Initialization with large numbers on the diagonal of the covariance matrix if the states (for non stationary models).
353
    if kalman_algo ~= 2
354
        % Use standard kalman filter except if the univariate filter is explicitely choosen.
355
356
        kalman_algo = 1;
    end
357
358
359
360
361
    Pstar = DynareOptions.Harvey_scale_factor*eye(mm);
    Pinf  = [];
    a     = zeros(mm,1);
    Zflag = 0;
  case 3% Diffuse Kalman filter (Durbin and Koopman)
362
    if kalman_algo ~= 4
363
        % Use standard kalman filter except if the univariate filter is explicitely choosen.
364
365
        kalman_algo = 3;
    end
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
    [Z,T,R,QT,Pstar,Pinf] = schur_statespace_transformation(Z,T,R,Q,DynareOptions.qz_criterium);
    Zflag = 1;
    % Run diffuse kalman filter on first periods.
    if (kalman_algo==3)
        % Multivariate Diffuse Kalman Filter
        if no_missing_data_flag
            [dLIK,tmp,a,Pstar] = kalman_filter_d(Y, 1, size(Y,2), ...
                                                       zeros(mm,1), Pinf, Pstar, ...
                                                       kalman_tol, riccati_tol, DynareOptions.presample, ...
                                                       T,R,Q,H,Z,mm,pp,rr);
        else
            [dLIK,tmp,a,Pstar] = missing_observations_kalman_filter_d(DynareDataset.missing.aindex,DynareDataset.missing.number_of_observations,DynareDataset.missing.no_more_missing_observations, ...
                                                              Y, 1, size(Y,2), ...
                                                              zeros(mm,1), Pinf, Pstar, ...
                                                              kalman_tol, riccati_tol, DynareOptions.presample, ...
                                                              T,R,Q,H,Z,mm,pp,rr);
        end
        diffuse_periods = length(tmp);
        if isinf(dLIK)
            % Go to univariate diffuse filter if singularity problem.
            kalman_algo = 4;
387
            singularity_flag = 1;
388
389
390
391
        end
    end
    if (kalman_algo==4)
        % Univariate Diffuse Kalman Filter
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
        if singularity_flag
            if isequal(H,0)
                H = zeros(nobs,1);
                mmm = mm;
            else
                if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
                    H = diag(H);
                    mmm = mm; 
                else
                    Z = [Z, eye(pp)];
                    T = blkdiag(T,zeros(pp));
                    Q = blkdiag(Q,H);
                    R = blkdiag(R,eye(pp));
                    Pstar = blkdiag(Pstar,H);
                    Pinf  = blckdiag(Pinf,zeros(pp));
                    H = zeros(nobs,1);
                    mmm   = mm+pp;
                end
            end
            % no need to test again for correlation elements
            singularity_flag = 0;
        end
414
415
416
417
418
419
420
421
        [dLIK,tmp,a,Pstar] = univariate_kalman_filter_d(DynareDataset.missing.aindex,DynareDataset.missing.number_of_observations,DynareDataset.missing.no_more_missing_observations, ...
                                                              Y, 1, size(Y,2), ...
                                                              zeros(mmm,1), Pinf, Pstar, ...
                                                              kalman_tol, riccati_tol, DynareOptions.presample, ...
                                                              T,R,Q,H,Z,mmm,pp,rr);
        diffuse_periods = length(tmp);
    end
  case 4% Start from the solution of the Riccati equation.
422
    if kalman_algo ~= 2
423
424
        kalman_algo = 1;
    end
425
    if isequal(H,0)
426
        [err,Pstar] = kalman_steady_state(transpose(T),R*Q*transpose(R),transpose(build_selection_matrix(Z,np,length(Z))));
427
    else
428
        [err,Pstar] = kalman_steady_state(transpose(T),R*Q*transpose(R),transpose(build_selection_matrix(Z,np,length(Z))),H);
429
430
    end
    if err
431
432
433
        disp(['DsgeLikelihood:: I am not able to solve the Riccati equation, so I switch to lik_init=1!']);
        DynareOptions.lik_init = 1;
        Pstar = lyapunov_symm(T,R*Q*R',DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold);
434
435
    end
    Pinf  = [];
436
437
  otherwise
    error('DsgeLikelihood:: Unknown initialization approach for the Kalman filter!')
438
end
439

440
if analytic_derivation
441
    no_DLIK = 0;
442
    full_Hess = 0;
443
444
    DLIK = [];
    AHess = [];
445
    if nargin<8 || isempty(derivatives_info)
446
447
448
        [A,B,nou,nou,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults);
        if ~isempty(EstimatedParameters.var_exo)
            indexo=EstimatedParameters.var_exo(:,1);
449
450
451
        else
            indexo=[];
        end
452
453
        if ~isempty(EstimatedParameters.param_vals)
            indparam=EstimatedParameters.param_vals(:,1);
454
455
456
        else
            indparam=[];
        end
457
458
459
460
461
462

        if full_Hess,
        [dum, DT, DOm, DYss, dum2, D2T, D2Om, D2Yss] = getH(A, B, Model,DynareResults,0,indparam,indexo);
        else
        [dum, DT, DOm, DYss] = getH(A, B, Model,DynareResults,0,indparam,indexo);
        end
463
464
465
466
    else
        DT = derivatives_info.DT;
        DOm = derivatives_info.DOm;
        DYss = derivatives_info.DYss;
467
468
469
470
471
472
473
474
475
        if isfield(derivatives_info,'full_Hess'),
            full_Hess = derivatives_info.full_Hess;
        end
        if full_Hess,
        D2T = derivatives_info.D2T;
        D2Om = derivatives_info.D2Om;
        D2Yss = derivatives_info.D2Yss;
        end
        if isfield(derivatives_info,'no_DLIK'),
476
477
            no_DLIK = derivatives_info.no_DLIK;
        end
478
        clear('derivatives_info');
479
    end
480
    iv = DynareResults.dr.restrict_var_list;
481
    DYss = [zeros(size(DYss,1),offset) DYss];
482
483
484
485
486
487
    DT = DT(iv,iv,:);
    DOm = DOm(iv,iv,:);
    DYss = DYss(iv,:);
    DH=zeros([size(H),length(xparam1)]);
    DQ=zeros([size(Q),length(xparam1)]);
    DP=zeros([size(T),length(xparam1)]);
488
489
490
491
492
493
494
495
496
497
498
    if full_Hess,
        for j=1:size(D2Yss,1),
        tmp(j,:,:) = blkdiag(zeros(offset,offset), squeeze(D2Yss(j,:,:)));
        end
        D2Yss = tmp;
        D2T = D2T(iv,iv,:,:);
        D2Om = D2Om(iv,iv,:,:);
        D2Yss = D2Yss(iv,:,:);
        D2H=zeros([size(H),length(xparam1),length(xparam1)]);
        D2P=zeros([size(T),length(xparam1),length(xparam1)]);
    end
499
500
    for i=1:EstimatedParameters.nvx
        k =EstimatedParameters.var_exo(i,1);
501
        DQ(k,k,i) = 2*sqrt(Q(k,k));
502
        dum =  lyapunov_symm(T,DOm(:,:,i),DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold);
503
504
505
        kk = find(abs(dum) < 1e-12);
        dum(kk) = 0;
        DP(:,:,i)=dum;
506
507
508
509
510
511
512
513
514
        if full_Hess
        for j=1:i,
            dum =  lyapunov_symm(T,D2Om(:,:,i,j),DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold);
            kk = (abs(dum) < 1e-12);
            dum(kk) = 0;
            D2P(:,:,i,j)=dum;
            D2P(:,:,j,i)=dum;
        end
        end
515
    end
516
517
518
    offset = EstimatedParameters.nvx;
    for i=1:EstimatedParameters.nvn
        k = EstimatedParameters.var_endo(i,1);
519
        DH(k,k,i+offset) = 2*sqrt(H(k,k));
520
521
522
        if full_Hess
        D2H(k,k,i+offset,i+offset) = 2;
        end
523
    end
524
525
526
    offset = offset + EstimatedParameters.nvn;
    for j=1:EstimatedParameters.np
        dum =  lyapunov_symm(T,DT(:,:,j+offset)*Pstar*T'+T*Pstar*DT(:,:,j+offset)'+DOm(:,:,j+offset),DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold);
527
528
529
        kk = find(abs(dum) < 1e-12);
        dum(kk) = 0;
        DP(:,:,j+offset)=dum;
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
        if full_Hess
        DTj = DT(:,:,j+offset);
        DPj = dum;
        for i=1:j,
            DTi = DT(:,:,i+offset);
            DPi = DP(:,:,i+offset);
            D2Tij = D2T(:,:,i,j);
            D2Omij = D2Om(:,:,i,j);
            tmp = D2Tij*Pstar*T' + T*Pstar*D2Tij' + DTi*DPj*T' + DTj*DPi*T' + T*DPj*DTi' + T*DPi*DTj' + DTi*Pstar*DTj' + DTj*Pstar*DTi' + D2Omij;
            dum = lyapunov_symm(T,tmp,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold);
            dum(abs(dum)<1.e-12) = 0;
            D2P(:,:,i+offset,j+offset) = dum;
            D2P(:,:,j+offset,i+offset) = dum;
        end
        end
545
546
547
    end
end

548
549
550
%------------------------------------------------------------------------------
% 4. Likelihood evaluation
%------------------------------------------------------------------------------
551
552

if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter
553
    if no_missing_data_flag
554
        if DynareOptions.block == 1
555
            [err, LIK] = block_kalman_filter(T,R,Q,H,Pstar,Y,start,Z,kalman_tol,riccati_tol, Model.nz_state_var, Model.n_diag, Model.nobs_non_statevar);
556
            mexErrCheck('block_kalman_filter', err);
557
        else
Sébastien Villemot's avatar
Sébastien Villemot committed
558
559
560
561
562
563
            LIK = kalman_filter(Y,diffuse_periods+1,size(Y,2), ...
                                a,Pstar, ...
                                kalman_tol, riccati_tol, ...
                                DynareOptions.presample, ...
                                T,Q,R,H,Z,mm,pp,rr,Zflag,diffuse_periods);
        end
564
565
        if analytic_derivation
            if no_DLIK==0
566
                [DLIK] = score(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,start,Z,kalman_tol,riccati_tol);
567
            end
568
569
            if nargout==11
                [AHess] = AHessian(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,start,Z,kalman_tol,riccati_tol);
570
571
572
573
                if full_Hess,
                    Hess = get_Hessian(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,D2T,D2Yss,D2Om,D2H,D2P,start,Z,kalman_tol,riccati_tol);
                    Hess0 = getHessian(Y,T,DT,D2T, R*Q*transpose(R),DOm,D2Om,Z,DYss,D2Yss);
                end
574
            end
575
        end
576
    else
577
578
579
580
581
        LIK = missing_observations_kalman_filter(DynareDataset.missing.aindex,DynareDataset.missing.number_of_observations,DynareDataset.missing.no_more_missing_observations,Y,diffuse_periods+1,size(Y,2), ...
                                               a, Pstar, ...
                                               kalman_tol, DynareOptions.riccati_tol, ...
                                               DynareOptions.presample, ...
                                               T,Q,R,H,Z,mm,pp,rr,Zflag,diffuse_periods);
582
583
    end
    if isinf(LIK)
584
585
586
587
588
        if kalman_algo == 1
            kalman_algo = 2;
        else
            kalman_algo = 4;
        end
589
        singularity_flag = 1;
590
    else
591
592
        if DynareOptions.lik_init==3
            LIK = LIK + dLIK;
593
594
595
        end
    end
end
596

597
598
599
if ( singularity_flag || (kalman_algo==2) || (kalman_algo==4) )
    % Univariate Kalman Filter
    % resetting measurement error covariance matrix when necessary                                                           % 
600
    if singularity_flag
601
602
        if isequal(H,0)
            H = zeros(nobs,1);
603
            mmm = mm;
604
605
606
        else
            if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
                H = diag(H);
607
                mmm = mm;
608
            else
609
610
611
612
613
614
615
616
                Z = [Z, eye(pp)];
                T = blkdiag(T,zeros(pp));
                Q = blkdiag(Q,H);
                R = blkdiag(R,eye(pp));
                Pstar = blkdiag(Pstar,H);
                Pinf  = blckdiag(Pinf,zeros(pp));
                H = zeros(nobs,1);
                mmm   = mm+pp;
617
618
            end
        end
619
    end
620

621
622
623
624
625
626
627
628
    LIK = univariate_kalman_filter(DynareDataset.missing.aindex,DynareDataset.missing.number_of_observations,DynareDataset.missing.no_more_missing_observations,Y,diffuse_periods+1,size(Y,2), ...
                                       a,Pstar, ...
                                       DynareOptions.kalman_tol, ...
                                       DynareOptions.riccati_tol, ...
                                       DynareOptions.presample, ...
                                       T,Q,R,H,Z,mmm,pp,rr,diffuse_periods);
    if DynareOptions.lik_init==3
        LIK = LIK+dLIK;
629
630
    end
end
631

632
if isnan(LIK)
633
634
    info = 45;
    exit_flag = 0;
635
636
637
    return
end
if imag(LIK)~=0
638
    likelihood = penalty;
639
640
641
else
    likelihood = LIK;
end
642

643
% ------------------------------------------------------------------------------
644
% 5. Adds prior if necessary
645
% ------------------------------------------------------------------------------
646
647
648
649
650
651
652
653
654
655
656
657
658
if analytic_derivation
    if full_Hess,
        [lnprior, dlnprior, d2lnprior] = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
        AHess = Hess + d2lnprior;
    else
        [lnprior, dlnprior] = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
    end
    if no_DLIK==0
        DLIK = DLIK - dlnprior';
    end
else
    lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
end
659
fval    = (likelihood-lnprior);
660
661
662
663
664

% Update DynareOptions.kalman_algo.
DynareOptions.kalman_algo = kalman_algo;

% Update the penalty.
Sébastien Villemot's avatar
Sébastien Villemot committed
665
penalty = fval;