DsgeLikelihood_hh.m 10.1 KB
Newer Older
1
function [fval,llik,cost_flag,ys,trend_coeff,info] = DsgeLikelihood_hh(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations)
2
3
% function [fval,cost_flag,ys,trend_coeff,info] = DsgeLikelihood(xparam1,gend,data,data_index,number_of_observations,no_more_missing_observations)
% Evaluates the posterior kernel of a dsge model. 
michel's avatar
michel committed
4
5
% 
% INPUTS 
6
7
8
9
10
11
%   xparam1                        [double]   vector of model parameters.
%   gend                           [integer]  scalar specifying the number of observations.
%   data                           [double]   matrix of data
%   data_index                     [cell]     cell of column vectors
%   number_of_observations         [integer]
%   no_more_missing_observations   [integer] 
michel's avatar
michel committed
12
% OUTPUTS 
13
14
%   fval        :     value of the posterior kernel at xparam1.
%   cost_flag   :     zero if the function returns a penalty, one otherwise.
michel's avatar
michel committed
15
16
%   ys          :     steady state of original endogenous variables
%   trend_coeff :
17
18
19
%   info        :     vector of informations about the penalty:
%                     41: one (many) parameter(s) do(es) not satisfied the lower bound
%                     42: one (many) parameter(s) do(es) not satisfied the upper bound
michel's avatar
michel committed
20
21
%               
% SPECIAL REQUIREMENTS
22
%
michel's avatar
michel committed
23

24
% Copyright (C) 2004-2011 Dynare Team
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
michel's avatar
michel committed
40

41
42
43
44
45
46
47
48
49
50
global bayestopt_ estim_params_ options_ trend_coeff_ M_ oo_
fval          = [];
ys            = [];
trend_coeff   = [];
cost_flag     = 1;
nobs          = size(options_.varobs,1);
llik=NaN;
%------------------------------------------------------------------------------
% 1. Get the structural parameters & define penalties
%------------------------------------------------------------------------------
51
if ~isequal(options_.mode_compute,1) && any(xparam1 < bayestopt_.lb)
52
53
54
55
56
57
    k = find(xparam1 < bayestopt_.lb);
    fval = bayestopt_.penalty+sum((bayestopt_.lb(k)-xparam1(k)).^2);
    cost_flag = 0;
    info = 41;
    return;
end
58
if ~isequal(options_.mode_compute,1) && any(xparam1 > bayestopt_.ub)
59
60
61
62
63
64
65
66
67
    k = find(xparam1 > bayestopt_.ub);
    fval = bayestopt_.penalty+sum((xparam1(k)-bayestopt_.ub(k)).^2);
    cost_flag = 0;
    info = 42;
    return;
end
Q = M_.Sigma_e;
H = M_.H;
for i=1:estim_params_.nvx
michel's avatar
michel committed
68
69
    k =estim_params_.var_exo(i,1);
    Q(k,k) = xparam1(i)*xparam1(i);
70
71
72
end
offset = estim_params_.nvx;
if estim_params_.nvn
michel's avatar
michel committed
73
    for i=1:estim_params_.nvn
74
75
        k = estim_params_.var_endo(i,1);
        H(k,k) = xparam1(i+offset)*xparam1(i+offset);
michel's avatar
michel committed
76
77
    end
    offset = offset+estim_params_.nvn;
78
79
end
if estim_params_.ncx
michel's avatar
michel committed
80
    for i=1:estim_params_.ncx
81
82
83
84
        k1 =estim_params_.corrx(i,1);
        k2 =estim_params_.corrx(i,2);
        Q(k1,k2) = xparam1(i+offset)*sqrt(Q(k1,k1)*Q(k2,k2));
        Q(k2,k1) = Q(k1,k2);
michel's avatar
michel committed
85
86
    end
    [CholQ,testQ] = chol(Q);
87
88
89
90
91
92
93
94
95
96
    if testQ    %% The variance-covariance matrix of the structural innovations is not definite positive.
        %% We have to compute the eigenvalues of this matrix in order to build the penalty.
        a = diag(eig(Q));
        k = find(a < 0);
        if k > 0
            fval = bayestopt_.penalty+sum(-a(k));
            cost_flag = 0;
            info = 43;
            return
        end
michel's avatar
michel committed
97
98
    end
    offset = offset+estim_params_.ncx;
99
100
end
if estim_params_.ncn 
michel's avatar
michel committed
101
    for i=1:estim_params_.ncn
102
103
104
105
        k1 = options_.lgyidx2varobs(estim_params_.corrn(i,1));
        k2 = options_.lgyidx2varobs(estim_params_.corrn(i,2));
        H(k1,k2) = xparam1(i+offset)*sqrt(H(k1,k1)*H(k2,k2));
        H(k2,k1) = H(k1,k2);
michel's avatar
michel committed
106
107
108
    end
    [CholH,testH] = chol(H);
    if testH
109
110
111
112
113
114
115
116
        a = diag(eig(H));
        k = find(a < 0);
        if k > 0
            fval = bayestopt_.penalty+sum(-a(k));
            cost_flag = 0;
            info = 44;
            return
        end
michel's avatar
michel committed
117
118
    end
    offset = offset+estim_params_.ncn;
119
120
121
122
123
124
125
126
127
end
if estim_params_.np > 0
    M_.params(estim_params_.param_vals(:,1)) = xparam1(offset+1:end);
end
M_.Sigma_e = Q;
M_.H = H;
%------------------------------------------------------------------------------
% 2. call model setup & reduction program
%------------------------------------------------------------------------------
128
[T,R,SteadyState,info,M_,options_,oo_] = dynare_resolve(M_,otions_,oo_);
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
if info(1) == 1 || info(1) == 2 || info(1) == 5
    fval = bayestopt_.penalty+1;
    cost_flag = 0;
    return
elseif info(1) == 3 || info(1) == 4 || info(1)==6 ||info(1) == 19 || info(1) == 20 || info(1) == 21
    fval = bayestopt_.penalty+info(2);
    cost_flag = 0;
    return
end
bayestopt_.mf = bayestopt_.mf1;
if options_.noconstant
    constant = zeros(nobs,1);  
else    
    if options_.loglinear
        constant = log(SteadyState(bayestopt_.mfys));
    else
        constant = SteadyState(bayestopt_.mfys);
    end
end
if bayestopt_.with_trend
michel's avatar
michel committed
149
150
151
    trend_coeff = zeros(nobs,1);
    t = options_.trend_coeffs;
    for i=1:length(t)
152
153
154
        if ~isempty(t{i})
            trend_coeff(i) = evalin('base',t{i});
        end
michel's avatar
michel committed
155
156
    end
    trend = repmat(constant,1,gend)+trend_coeff*[1:gend];
157
else
michel's avatar
michel committed
158
    trend = repmat(constant,1,gend);
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
end
start = options_.presample+1;
np    = size(T,1);
mf    = bayestopt_.mf;
no_missing_data_flag = (number_of_observations==gend*nobs);
%------------------------------------------------------------------------------
% 3. Initial condition of the Kalman filter
%------------------------------------------------------------------------------
kalman_algo = options_.kalman_algo;
if options_.lik_init == 1             % Kalman filter
    if kalman_algo ~= 2
        kalman_algo = 1;
    end
    Pstar = lyapunov_symm(T,R*Q*R',options_.qz_criterium,options_.lyapunov_complex_threshold);
    Pinf      = [];
elseif options_.lik_init == 2 % Old Diffuse Kalman filter
    if kalman_algo ~= 2
        kalman_algo = 1;
    end
    Pstar = options_.Harvey_scale_factor*eye(np);
    Pinf = [];
elseif options_.lik_init == 3 % Diffuse Kalman filter
    if kalman_algo ~= 4
        kalman_algo = 3;
    end
184
    [Z,ST,R1,QT,Pstar,Pinf] = schur_statespace_transformation(mf,T,R,Q,options_.qz_criterium);
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
end
kalman_tol = options_.kalman_tol;
riccati_tol = options_.riccati_tol;
mf = bayestopt_.mf1;
Y   = data-trend;
%------------------------------------------------------------------------------
% 4. Likelihood evaluation
%------------------------------------------------------------------------------
if (kalman_algo==1)% Multivariate Kalman Filter
    if no_missing_data_flag
        [LIK, lik] = kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol); 
    else
        [LIK, lik] = ...
            missing_observations_kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol, ...
                                               data_index,number_of_observations,no_more_missing_observations);
    end
    if isinf(LIK)
        kalman_algo = 2;
    end
end
if (kalman_algo==2)% Univariate Kalman Filter
    no_correlation_flag = 1;
207
    if isequal(H,0)
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
        H = zeros(nobs,1);
    else
        if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
            H = diag(H);
        else
            no_correlation_flag = 0;
        end
    end
    if no_correlation_flag
        [LIK, lik] = univariate_kalman_filter(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol,data_index,number_of_observations,no_more_missing_observations);
    else
        [LIK, lik] = univariate_kalman_filter_corr(T,R,Q,H,Pstar,Y,start,mf,kalman_tol,riccati_tol,data_index,number_of_observations,no_more_missing_observations);
    end
end
if (kalman_algo==3)% Multivariate Diffuse Kalman Filter
    if no_missing_data_flag
        [LIK, lik] = diffuse_kalman_filter(ST,R1,Q,H,Pinf,Pstar,Y,start,Z,kalman_tol, ...
                                           riccati_tol);
    else
        [LIK, lik] = missing_observations_diffuse_kalman_filter(ST,R1,Q,H,Pinf, ...
                                                          Pstar,Y,start,Z,kalman_tol,riccati_tol,...
                                                          data_index,number_of_observations,...
                                                          no_more_missing_observations);
    end
    if isinf(LIK)
        kalman_algo = 4;
    end
end
if (kalman_algo==4)% Univariate Diffuse Kalman Filter
    no_correlation_flag = 1;
238
    if isequal(H,0)
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
        H = zeros(nobs,1);
    else
        if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
            H = diag(H);
        else
            no_correlation_flag = 0;
        end
    end
    if no_correlation_flag
        [LIK, lik] = univariate_diffuse_kalman_filter(ST,R1,Q,H,Pinf,Pstar,Y, ...
                                                      start,Z,kalman_tol,riccati_tol,data_index,...
                                                      number_of_observations,no_more_missing_observations);
    else
        [LIK, lik] = univariate_diffuse_kalman_filter_corr(ST,R1,Q,H,Pinf,Pstar, ...
                                                          Y,start,Z,kalman_tol,riccati_tol,...
                                                          data_index,number_of_observations,...
                                                          no_more_missing_observations);
    end
end
if imag(LIK) ~= 0
    likelihood = bayestopt_.penalty;
else
    likelihood = LIK;
end
% ------------------------------------------------------------------------------
% Adds prior if necessary
% ------------------------------------------------------------------------------
lnprior = priordens(xparam1,bayestopt_.pshape,bayestopt_.p6,bayestopt_.p7,bayestopt_.p3,bayestopt_.p4);
fval    = (likelihood-lnprior);
options_.kalman_algo = kalman_algo;
269
270
271
lik=lik(start:end,:);
llik=[-lnprior; lik(:)];
% llik=[-lnprior; lik(start:end)];