nonlinear_kalman_filter.m 8.07 KB
Newer Older
Stéphane Adjemian's avatar
Stéphane Adjemian committed
1
function [LIK,lik] = nonlinear_kalman_filter(ReducedForm, Y, start, ParticleOptions, ThreadsOptions)
2 3 4 5 6 7 8 9 10 11 12
% Evaluates the likelihood of a non-linear model approximating the
% predictive (prior) and filtered (posterior) densities for state variables
% by a Kalman filter.
% Gaussian distribution approximation is done by:
% - a spherical-radial cubature (ref: Arasaratnam & Haykin, 2009).
% - a scaled unscented transform cubature (ref: Julier & Uhlmann 1995)
% - Monte-Carlo draws from a multivariate gaussian distribution.
% First and second moments of prior and posterior state densities are computed
% from the resulting nodes/particles and allows to generate new distributions at the
% following observation.
% Pros: The use of nodes is much faster than Monte-Carlo Gaussian particle and standard particles
Stéphane Adjemian's avatar
Stéphane Adjemian committed
13 14 15
% filters since it treats a lesser number of particles.
% Cons: 1. Application a linear projection formulae in a nonlinear context.
% 2. Parameter estimations may be biaised if the model is truly non-gaussian since predictive and
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
% filtered densities are unimodal.
%
% INPUTS
%    Reduced_Form     [structure] Matlab's structure describing the reduced form model.
%    Y                [double]    matrix of original observed variables.
%    start            [double]    structural parameters.
%    ParticleOptions  [structure] Matlab's structure describing options concerning particle filtering.
%    ThreadsOptions   [structure] Matlab's structure.
%
% OUTPUTS
%    LIK        [double]    scalar, likelihood
%    lik        [double]    vector, density of observations in each period.
%
% REFERENCES
%
% NOTES
%   The vector "lik" is used to evaluate the jacobian of the likelihood.
33
% Copyright (C) 2009-2017 Dynare Team
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
%
% This file is part of Dynare.
%
% Dynare is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Dynare is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.

Stéphane Adjemian's avatar
Stéphane Adjemian committed
50
persistent init_flag mf0 mf1 nodes weights weights_c
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
persistent sample_size number_of_state_variables number_of_observed_variables number_of_structural_innovations

% Set default
if isempty(start)
    start = 1;
end

% Set local state space model (first-order approximation).
ghx  = ReducedForm.ghx;
ghu  = ReducedForm.ghu;
% Set local state space model (second-order approximation).
ghxx = ReducedForm.ghxx;
ghuu = ReducedForm.ghuu;
ghxu = ReducedForm.ghxu;

if any(any(isnan(ghx))) || any(any(isnan(ghu))) || any(any(isnan(ghxx))) || any(any(isnan(ghuu))) || any(any(isnan(ghxu))) || ...
Stéphane Adjemian's avatar
Stéphane Adjemian committed
67 68
        any(any(isinf(ghx))) || any(any(isinf(ghu))) || any(any(isinf(ghxx))) || any(any(isinf(ghuu))) || any(any(isinf(ghxu))) ...
        any(any(abs(ghx)>1e4)) || any(any(abs(ghu)>1e4)) || any(any(abs(ghxx)>1e4)) || any(any(abs(ghuu)>1e4)) || any(any(abs(ghxu)>1e4))
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
    ghx
    ghu
    ghxx
    ghuu
    ghxu
end

constant = ReducedForm.constant;
state_variables_steady_state = ReducedForm.state_variables_steady_state;

% Set persistent variables.
if isempty(init_flag)
    mf0 = ReducedForm.mf0;
    mf1 = ReducedForm.mf1;
    sample_size = size(Y,2);
    number_of_state_variables = length(mf0);
    number_of_observed_variables = length(mf1);
    number_of_structural_innovations = length(ReducedForm.Q);
    init_flag = 1;
end

% compute gaussian quadrature nodes and weights on states and shocks

92 93 94 95 96
if ParticleOptions.proposal_approximation.montecarlo
    nodes = randn(ParticleOptions.number_of_particles,number_of_state_variables+number_of_structural_innovations) ;
    weights = 1/ParticleOptions.number_of_particles ;
    weights_c = weights ;
elseif ParticleOptions.proposal_approximation.cubature
97 98 99 100 101 102 103 104 105 106
    [nodes,weights] = spherical_radial_sigma_points(number_of_state_variables+number_of_structural_innovations) ;
    weights_c = weights ;
elseif ParticleOptions.proposal_approximation.unscented
    [nodes,weights,weights_c] = unscented_sigma_points(number_of_state_variables+number_of_structural_innovations,ParticleOptions);
else
    error('Estimation: This approximation for the proposal is not implemented or unknown!')
end

if ParticleOptions.distribution_approximation.montecarlo
    set_dynare_seed('default');
Stéphane Adjemian's avatar
Stéphane Adjemian committed
107
end
108 109 110

% Get covariance matrices
H = ReducedForm.H;
Stéphane Adjemian's avatar
Stéphane Adjemian committed
111 112
H_lower_triangular_cholesky = chol(H)' ;
Q_lower_triangular_cholesky = chol(ReducedForm.Q)';
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158

% Get initial condition for the state vector.
StateVectorMean = ReducedForm.StateVectorMean;
StateVectorVarianceSquareRoot = chol(ReducedForm.StateVectorVariance)';

% Initialization of the likelihood.
lik  = NaN(sample_size,1);
LIK  = NaN;

for t=1:sample_size

    xbar = [StateVectorMean ; zeros(number_of_structural_innovations,1) ] ;
    sqr_Px = [ [ StateVectorVarianceSquareRoot zeros(number_of_state_variables,number_of_structural_innovations) ] ;
               [ zeros(number_of_structural_innovations,number_of_state_variables) Q_lower_triangular_cholesky ] ];
    sigma_points = bsxfun(@plus,xbar,sqr_Px*(nodes'));
    StateVectors = sigma_points(1:number_of_state_variables,:);
    epsilon = sigma_points(number_of_state_variables+1:number_of_state_variables+number_of_structural_innovations,:);
    yhat = bsxfun(@minus,StateVectors,state_variables_steady_state);
    tmp = local_state_space_iteration_2(yhat,epsilon,ghx,ghu,constant,ghxx,ghuu,ghxu,ThreadsOptions.local_state_space_iteration_2);
    PredictedStateMean = tmp(mf0,:)*weights ;
    PredictedObservedMean = tmp(mf1,:)*weights;

    if ParticleOptions.proposal_approximation.cubature || ParticleOptions.proposal_approximation.montecarlo
        PredictedStateMean = sum(PredictedStateMean,2);
        PredictedObservedMean = sum(PredictedObservedMean,2);
        dState = bsxfun(@minus,tmp(mf0,:),PredictedStateMean)'.*sqrt(weights);
        dObserved = bsxfun(@minus,tmp(mf1,:),PredictedObservedMean)'.*sqrt(weights);
        big_mat = [dObserved  dState ; [H_lower_triangular_cholesky zeros(number_of_observed_variables,number_of_state_variables)] ];
        [mat1,mat] = qr2(big_mat,0);
        mat = mat';
        clear('mat1');
        PredictedObservedVarianceSquareRoot = mat(1:number_of_observed_variables,1:number_of_observed_variables);
        CovarianceObservedStateSquareRoot = mat(number_of_observed_variables+(1:number_of_state_variables),1:number_of_observed_variables);
        StateVectorVarianceSquareRoot = mat(number_of_observed_variables+(1:number_of_state_variables),number_of_observed_variables+(1:number_of_state_variables));
        PredictionError = Y(:,t) - PredictedObservedMean;
        StateVectorMean = PredictedStateMean + (CovarianceObservedStateSquareRoot/PredictedObservedVarianceSquareRoot)*PredictionError;
    else
        dState = bsxfun(@minus,tmp(mf0,:),PredictedStateMean);
        dObserved = bsxfun(@minus,tmp(mf1,:),PredictedObservedMean);
        PredictedStateVariance = dState*diag(weights_c)*dState';
        PredictedObservedVariance = dObserved*diag(weights_c)*dObserved' + H;
        PredictedStateAndObservedCovariance = dState*diag(weights_c)*dObserved';
        PredictionError = Y(:,t) - PredictedObservedMean;
        KalmanFilterGain = PredictedStateAndObservedCovariance/PredictedObservedVariance;
        StateVectorMean = PredictedStateMean + KalmanFilterGain*PredictionError;
        StateVectorVariance = PredictedStateVariance - KalmanFilterGain*PredictedObservedVariance*KalmanFilterGain';
159 160 161 162 163 164 165 166 167 168 169 170
        [StateVectorVarianceSquareRoot, p]= chol(StateVectorVariance,'lower');
        if p
            LIK=-Inf;
            lik(t)=-Inf;
            return
        end
        [PredictedObservedVarianceSquareRoot, p]= chol(PredictedObservedVariance,'lower');
        if p
            LIK=-Inf;
            lik(t)=-Inf;
            return
        end
171 172 173 174 175
    end
    lik(t) = log( probability2(0,PredictedObservedVarianceSquareRoot,PredictionError) ) ;
end

LIK = -sum(lik(start:end));