From 222d8d182910d8194333e6886ee5909e9acf4e54 Mon Sep 17 00:00:00 2001
From: Michel Juillard <michel.juillard@mjui.fr>
Date: Mon, 5 Oct 2015 20:52:00 +0200
Subject: [PATCH] More fixing related to objective_function_penalty_base

---
 matlab/TaRB_optimizer_wrapper.m               |   4 +-
 matlab/dsge_likelihood.m                      | 735 +--------------
 matlab/dsge_likelihood_1.m                    | 858 ++++++++++++++++++
 matlab/dsge_var_likelihood.m                  | 283 +-----
 matlab/dsge_var_likelihood_1.m                | 312 +++++++
 matlab/dynare_estimation_1.m                  |   6 +-
 matlab/identification_analysis.m              |   2 +-
 matlab/mode_check.m                           |   2 +-
 matlab/optimization/mr_hessian.m              |  12 +-
 matlab/optimization/newrat.m                  |   6 +-
 .../optimization/penalty_objective_function.m |   6 +-
 matlab/osr1.m                                 |   2 +-
 matlab/osr_obj.m                              |  60 +-
 matlab/osr_obj_1.m                            |  90 ++
 tests/estimation/TaRB/fs2000_tarb.mod         |   1 +
 15 files changed, 1295 insertions(+), 1084 deletions(-)
 create mode 100644 matlab/dsge_likelihood_1.m
 create mode 100644 matlab/dsge_var_likelihood_1.m
 create mode 100644 matlab/osr_obj_1.m

diff --git a/matlab/TaRB_optimizer_wrapper.m b/matlab/TaRB_optimizer_wrapper.m
index 9c7ed4cccb..ec91451b5d 100644
--- a/matlab/TaRB_optimizer_wrapper.m
+++ b/matlab/TaRB_optimizer_wrapper.m
@@ -1,4 +1,4 @@
-function [fval,DLIK,Hess,exit_flag] = TaRB_optimizer_wrapper(optpar,par_vector,parameterindices,TargetFun,varargin)
+function [fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff] = TaRB_optimizer_wrapper(optpar,par_vector,parameterindices,TargetFun,varargin)
 % function [fval,DLIK,Hess,exit_flag] = TaRB_optimizer_wrapper(optpar,par_vector,parameterindices,TargetFun,varargin)
 % Wrapper function for target function used in TaRB algorithm; reassembles
 % full parameter vector before calling target function
@@ -36,5 +36,5 @@ function [fval,DLIK,Hess,exit_flag] = TaRB_optimizer_wrapper(optpar,par_vector,p
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
 par_vector(parameterindices,:)=optpar; %reassemble parameter
-[fval,DLIK,Hess,exit_flag] = feval(TargetFun,par_vector,varargin{:}); %call target function
+[fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff] = feval(TargetFun,par_vector,varargin{:}); %call target function
 
diff --git a/matlab/dsge_likelihood.m b/matlab/dsge_likelihood.m
index 826c60496d..0552ae6767 100644
--- a/matlab/dsge_likelihood.m
+++ b/matlab/dsge_likelihood.m
@@ -1,7 +1,5 @@
-function [fval,DLIK,Hess,exit_flag,SteadyState,trend_coeff,info,Model,DynareOptions,BayesInfo,DynareResults] = dsge_likelihood(xparam1,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults,derivatives_info)
-% Evaluates the posterior kernel of a dsge model using the specified
-% kalman_algo; the resulting posterior includes the 2*pi constant of the
-% likelihood function
+function [fval,DLIK,Hess,exit_flag,ys,trend_coeff,info,Model,DynareOptions,BayesInfo,DynareResults] = dsge_likelihood(xparam1,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults,derivatives_info)
+% Evaluates the posterior kernel of a dsge model. Deprecated interface.
 
 %@info:
 %! @deftypefn {Function File} {[@var{fval},@var{exit_flag},@var{ys},@var{trend_coeff},@var{info},@var{Model},@var{DynareOptions},@var{BayesInfo},@var{DynareResults},@var{DLIK},@var{AHess}] =} dsge_likelihood (@var{xparam1},@var{DynareDataset},@var{DynareOptions},@var{Model},@var{EstimatedParameters},@var{BayesInfo},@var{DynareResults},@var{derivatives_flag})
@@ -39,7 +37,7 @@ function [fval,DLIK,Hess,exit_flag,SteadyState,trend_coeff,info,Model,DynareOpti
 %! Integer scalar, equal to zero if the routine return with a penalty (one otherwise).
 %! @item ys
 %! Vector of doubles, steady state level for the endogenous variables.
-%! @item trend_coeff
+%! @item trend_coeffs
 %! Matrix of doubles, coefficients of the deterministic trend in the measurement equation.
 %! @item info
 %! Integer scalar, error code.
@@ -70,8 +68,6 @@ function [fval,DLIK,Hess,exit_flag,SteadyState,trend_coeff,info,Model,DynareOpti
 %! M_.params has been updated in the steadystate routine and has complex valued scalars.
 %! @item info==24
 %! M_.params has been updated in the steadystate routine and has some NaNs.
-%! @item info==26
-%! M_.params has been updated in the steadystate routine and has negative/0 values in loglinear model.
 %! @item info==30
 %! Ergodic variance can't be computed.
 %! @item info==41
@@ -134,725 +130,6 @@ function [fval,DLIK,Hess,exit_flag,SteadyState,trend_coeff,info,Model,DynareOpti
 
 % AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT FR
 
-% Initialization of the returned variables and others...
-fval        = [];
-SteadyState = [];
-trend_coeff = [];
-exit_flag   = 1;
-info        = 0;
-DLIK        = [];
-Hess       = [];
-
-if DynareOptions.estimation_dll
-    [fval,exit_flag,SteadyState,trend_coeff,info,params,H,Q] ...
-        = logposterior(xparam1,DynareDataset, DynareOptions,Model, ...
-                          EstimatedParameters,BayesInfo,DynareResults);
-    mexErrCheck('logposterior', exit_flag);
-    Model.params = params;
-    if ~isequal(Model.H,0)
-        Model.H = H;
-    end
-    Model.Sigma_e = Q;
-    DynareResults.dr.ys = SteadyState;
-    return
-end
-
-% Set flag related to analytical derivatives.
-analytic_derivation = DynareOptions.analytic_derivation;
-
-if analytic_derivation && DynareOptions.loglinear
-    error('The analytic_derivation and loglinear options are not compatible')
-end
-
-if nargout==1,
-    analytic_derivation=0;
-end
-
-if analytic_derivation,
-    kron_flag=DynareOptions.analytic_derivation_mode;
-end
-
-%------------------------------------------------------------------------------
-% 1. Get the structural parameters & define penalties
-%------------------------------------------------------------------------------
-
-% Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
-if ~isequal(DynareOptions.mode_compute,1) && any(xparam1<BoundsInfo.lb)
-    k = find(xparam1<BoundsInfo.lb);
-    fval = Inf;
-    exit_flag = 0;
-    info(1) = 41;
-    info(2) = sum((BoundsInfo.lb(k)-xparam1(k)).^2);
-    if analytic_derivation,
-        DLIK=ones(length(xparam1),1);
-    end
-    return
-end
-
-% Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
-if ~isequal(DynareOptions.mode_compute,1) && any(xparam1>BoundsInfo.ub)
-    k = find(xparam1>BoundsInfo.ub);
-    fval = Inf;
-    exit_flag = 0;
-    info(1) = 42;
-    info(2) = sum((xparam1(k)-BoundsInfo.ub(k)).^2);
-    if analytic_derivation,
-        DLIK=ones(length(xparam1),1);
-    end
-    return
-end
-
-% Get the diagonal elements of the covariance matrices for the structural innovations (Q) and the measurement error (H).
-Model = set_all_parameters(xparam1,EstimatedParameters,Model);
-
-Q = Model.Sigma_e;
-H = Model.H;
-
-% Test if Q is positive definite.
-if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances')
-    [Q_is_positive_definite, penalty] = ispd(Q);
-    if ~Q_is_positive_definite
-        fval = Inf;
-        exit_flag = 0;
-        info(1) = 43;
-        info(2) = penalty;
-        return
-    end
-    if isfield(EstimatedParameters,'calibrated_covariances')
-        correct_flag=check_consistency_covariances(Q);
-        if ~correct_flag
-            fval = Inf;
-            exit_flag = 0;
-            info(1) = 71;
-            info(2) = sum(Q(EstimatedParameters.calibrated_covariances.position).^2);
-            return
-        end
-    end
-end
-
-% Test if H is positive definite.
-if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME')
-    [H_is_positive_definite, penalty] = ispd(H);
-    if ~H_is_positive_definite
-        fval = Inf;
-        exit_flag = 0;
-        info(1) = 44;
-        info(2) = penalty;
-        return
-    end
-    if isfield(EstimatedParameters,'calibrated_covariances_ME')
-        correct_flag=check_consistency_covariances(H);
-        if ~correct_flag
-            fval = Inf;
-            exit_flag = 0;
-            info(1) = 72;
-            info(2) = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2);
-            return
-        end
-    end
-end
-
-
-%------------------------------------------------------------------------------
-% 2. call model setup & reduction program
-%------------------------------------------------------------------------------
-
-% Linearize the model around the deterministic sdteadystate and extract the matrices of the state equation (T and R).
-[T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
-
-% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
-if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || ...
-            info(1) == 22 || info(1) == 24 || info(1) == 19 || info(1) == 25 || info(1) == 10
-    fval = Inf;
-    info(2) = 0.1;
-    exit_flag = 0;
-    if analytic_derivation,
-        DLIK=ones(length(xparam1),1);
-    end
-    return
-elseif info(1) == 3 || info(1) == 4 || info(1)==6 || info(1) == 20 || info(1) == 21  || info(1) == 23 || info(1)==26
-    fval = Inf;
-    exit_flag = 0;
-    if analytic_derivation,
-        DLIK=ones(length(xparam1),1);
-    end
-    return
-end
-
-% check endogenous prior restrictions
-info=endogenous_prior_restrictions(T,R,Model,DynareOptions,DynareResults);
-if info(1),
-    fval = Inf;
-    exit_flag = 0;
-    if analytic_derivation,
-        DLIK=ones(length(xparam1),1);
-    end
-    return
-end
-%
-
-% Define a vector of indices for the observed variables. Is this really usefull?...
-BayesInfo.mf = BayesInfo.mf1;
-
-% Define the constant vector of the measurement equation.
-if DynareOptions.noconstant
-    constant = zeros(DynareDataset.vobs,1);
-else
-    if DynareOptions.loglinear
-        constant = log(SteadyState(BayesInfo.mfys));
-    else
-        constant = SteadyState(BayesInfo.mfys);
-    end
-end
-
-% Define the deterministic linear trend of the measurement equation.
-if BayesInfo.with_trend
-    trend_coeff = zeros(DynareDataset.vobs,1);
-    t = DynareOptions.trend_coeffs;
-    for i=1:length(t)
-        if ~isempty(t{i})
-            trend_coeff(i) = evalin('base',t{i});
-        end
-    end
-    trend = repmat(constant,1,DynareDataset.nobs)+trend_coeff*[1:DynareDataset.nobs];
-else
-   trend_coeff = zeros(DynareDataset.vobs,1);
-   trend = repmat(constant,1,DynareDataset.nobs);
-end
-
-% Get needed informations for kalman filter routines.
-start = DynareOptions.presample+1;
-Z = BayesInfo.mf;
-no_missing_data_flag = ~DatasetInfo.missing.state;
-mm = length(T);
-pp = DynareDataset.vobs;
-rr = length(Q);
-kalman_tol = DynareOptions.kalman_tol;
-diffuse_kalman_tol = DynareOptions.diffuse_kalman_tol;
-riccati_tol = DynareOptions.riccati_tol;
-Y = transpose(DynareDataset.data)-trend;
-
-%------------------------------------------------------------------------------
-% 3. Initial condition of the Kalman filter
-%------------------------------------------------------------------------------
-kalman_algo = DynareOptions.kalman_algo;
-
-% resetting measurement errors covariance matrix for univariate filters
-if (kalman_algo == 2) || (kalman_algo == 4)
-    if isequal(H,0)
-        H = zeros(pp,1);
-        mmm = mm;
-    else
-        if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
-            H = diag(H);
-            mmm = mm;
-        else
-            Z = [Z, eye(pp)];
-            T = blkdiag(T,zeros(pp));
-            Q = blkdiag(Q,H);
-            R = blkdiag(R,eye(pp));
-            Pstar = blkdiag(Pstar,H);
-            Pinf  = blkdiag(Pinf,zeros(pp));
-            H = zeros(pp,1);
-            mmm   = mm+pp;
-        end
-    end
-end
-
-
-diffuse_periods = 0;
-correlated_errors_have_been_checked = 0;
-singular_diffuse_filter = 0;
-switch DynareOptions.lik_init
-  case 1% Standard initialization with the steady state of the state equation.
-    if kalman_algo~=2
-        % Use standard kalman filter except if the univariate filter is explicitely choosen.
-        kalman_algo = 1;
-    end
-    if DynareOptions.lyapunov_fp == 1
-        Pstar = lyapunov_symm(T,R*Q'*R',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, 3, [], DynareOptions.debug);
-    elseif DynareOptions.lyapunov_db == 1
-        Pstar = disclyap_fast(T,R*Q*R',DynareOptions.lyapunov_doubling_tol);
-    elseif DynareOptions.lyapunov_srs == 1
-        Pstar = lyapunov_symm(T,Q,DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, 4, R, DynareOptions.debug);
-    else
-        Pstar = lyapunov_symm(T,R*Q*R',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, [], [], DynareOptions.debug);
-    end;
-    Pinf  = [];
-    a     = zeros(mm,1);
-    Zflag = 0;
-  case 2% Initialization with large numbers on the diagonal of the covariance matrix if the states (for non stationary models).
-    if kalman_algo ~= 2
-        % Use standard kalman filter except if the univariate filter is explicitely choosen.
-        kalman_algo = 1;
-    end
-    Pstar = DynareOptions.Harvey_scale_factor*eye(mm);
-    Pinf  = [];
-    a     = zeros(mm,1);
-    Zflag = 0;
-  case 3% Diffuse Kalman filter (Durbin and Koopman)
-        % Use standard kalman filter except if the univariate filter is explicitely choosen.
-    if kalman_algo == 0
-        kalman_algo = 3;
-    elseif ~((kalman_algo == 3) || (kalman_algo == 4))
-            error(['The model requires Diffuse filter, but you specified a different Kalman filter. You must set options_.kalman_algo ' ...
-                   'to 0 (default), 3 or 4'])
-    end
-    [Z,T,R,QT,Pstar,Pinf] = schur_statespace_transformation(Z,T,R,Q,DynareOptions.qz_criterium);
-    Zflag = 1;
-    % Run diffuse kalman filter on first periods.
-    if (kalman_algo==3)
-        % Multivariate Diffuse Kalman Filter
-        if no_missing_data_flag
-            [dLIK,dlik,a,Pstar] = kalman_filter_d(Y, 1, size(Y,2), ...
-                                                       zeros(mm,1), Pinf, Pstar, ...
-                                                       kalman_tol, diffuse_kalman_tol, riccati_tol, DynareOptions.presample, ...
-                                                       T,R,Q,H,Z,mm,pp,rr);
-        else
-            [dLIK,dlik,a,Pstar] = missing_observations_kalman_filter_d(DatasetInfo.missing.aindex,DatasetInfo.missing.number_of_observations,DatasetInfo.missing.no_more_missing_observations, ...
-                                                              Y, 1, size(Y,2), ...
-                                                              zeros(mm,1), Pinf, Pstar, ...
-                                                              kalman_tol, diffuse_kalman_tol, riccati_tol, DynareOptions.presample, ...
-                                                              T,R,Q,H,Z,mm,pp,rr);
-        end
-        diffuse_periods = length(dlik);
-        if isinf(dLIK)
-            % Go to univariate diffuse filter if singularity problem.
-            singular_diffuse_filter = 1;
-        end
-    end
-    if singular_diffuse_filter || (kalman_algo==4)
-        % Univariate Diffuse Kalman Filter
-        if isequal(H,0)
-            H1 = zeros(pp,1);
-            mmm = mm;
-        else
-            if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
-                H1 = diag(H);
-                mmm = mm;
-            else
-                Z = [Z, eye(pp)];
-                T = blkdiag(T,zeros(pp));
-                Q = blkdiag(Q,H);
-                R = blkdiag(R,eye(pp));
-                Pstar = blkdiag(Pstar,H);
-                Pinf  = blkdiag(Pinf,zeros(pp));
-                H1 = zeros(pp,1);
-                mmm   = mm+pp;
-            end
-        end
-        % no need to test again for correlation elements
-        correlated_errors_have_been_checked = 1;
-
-        [dLIK,dlik,a,Pstar] = univariate_kalman_filter_d(DatasetInfo.missing.aindex,...
-                                                        DatasetInfo.missing.number_of_observations,...
-                                                        DatasetInfo.missing.no_more_missing_observations, ...
-                                                        Y, 1, size(Y,2), ...
-                                                        zeros(mmm,1), Pinf, Pstar, ...
-                                                        kalman_tol, diffuse_kalman_tol, riccati_tol, DynareOptions.presample, ...
-                                                        T,R,Q,H1,Z,mmm,pp,rr);
-        diffuse_periods = size(dlik,1);
-    end
-    if isnan(dLIK),
-        fval = Inf;
-        info(1) = 45;
-        info(2) = 0.1;
-        exit_flag = 0;
-        return
-    end
-    
-  case 4% Start from the solution of the Riccati equation.
-    if kalman_algo ~= 2
-        kalman_algo = 1;
-    end
-    if isequal(H,0)
-        [err,Pstar] = kalman_steady_state(transpose(T),R*Q*transpose(R),transpose(build_selection_matrix(Z,mm,length(Z))));
-    else
-        [err,Pstar] = kalman_steady_state(transpose(T),R*Q*transpose(R),transpose(build_selection_matrix(Z,mm,length(Z))),H);
-    end
-    if err
-        disp(['dsge_likelihood:: I am not able to solve the Riccati equation, so I switch to lik_init=1!']);
-        DynareOptions.lik_init = 1;
-        Pstar = lyapunov_symm(T,R*Q*R',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, [], [], DynareOptions.debug);
-    end
-    Pinf  = [];
-    a = zeros(mm,1);
-    Zflag = 0;
-  case 5            % Old diffuse Kalman filter only for the non stationary variables
-    [eigenvect, eigenv] = eig(T);
-    eigenv = diag(eigenv);
-    nstable = length(find(abs(abs(eigenv)-1) > 1e-7));
-    unstable = find(abs(abs(eigenv)-1) < 1e-7);
-    V = eigenvect(:,unstable);
-    indx_unstable = find(sum(abs(V),2)>1e-5);
-    stable = find(sum(abs(V),2)<1e-5);
-    nunit = length(eigenv) - nstable;
-    Pstar = options_.Harvey_scale_factor*eye(np);
-    if kalman_algo ~= 2
-        kalman_algo = 1;
-    end
-    R_tmp = R(stable, :);
-    T_tmp = T(stable,stable);
-    if DynareOptions.lyapunov_fp == 1
-        Pstar_tmp = lyapunov_symm(T_tmp,R_tmp*Q*R_tmp',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, 3, [], DynareOptions.debug);
-    elseif DynareOptions.lyapunov_db == 1
-        Pstar_tmp = disclyap_fast(T_tmp,R_tmp*Q*R_tmp',DynareOptions.lyapunov_doubling_tol);
-    elseif DynareOptions.lyapunov_srs == 1
-        Pstar_tmp = lyapunov_symm(T_tmp,Q,DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, 4, R_tmp, DynareOptions.debug);
-    else
-        Pstar_tmp = lyapunov_symm(T_tmp,R_tmp*Q*R_tmp',DynareOptions.qz_criterium,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, [], [], DynareOptions.debug);
-    end    
-    Pstar(stable, stable) = Pstar_tmp;
-    Pinf  = [];
-  otherwise
-    error('dsge_likelihood:: Unknown initialization approach for the Kalman filter!')
-end
-
-if analytic_derivation,
-    offset = EstimatedParameters.nvx;
-    offset = offset+EstimatedParameters.nvn;
-    offset = offset+EstimatedParameters.ncx;
-    offset = offset+EstimatedParameters.ncn;
-
-    no_DLIK = 0;
-    full_Hess = analytic_derivation==2;
-    asy_Hess = analytic_derivation==-2;
-    outer_product_gradient = analytic_derivation==-1;
-    if asy_Hess,
-        analytic_derivation=1;
-    end
-    if outer_product_gradient,
-        analytic_derivation=1;
-    end
-    DLIK = [];
-    AHess = [];
-    iv = DynareResults.dr.restrict_var_list;
-    if nargin<10 || isempty(derivatives_info)
-        [A,B,nou,nou,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults);
-        if ~isempty(EstimatedParameters.var_exo)
-            indexo=EstimatedParameters.var_exo(:,1);
-        else
-            indexo=[];
-        end
-        if ~isempty(EstimatedParameters.param_vals)
-            indparam=EstimatedParameters.param_vals(:,1);
-        else
-            indparam=[];
-        end
-
-        if full_Hess,
-            [dum, DT, DOm, DYss, dum2, D2T, D2Om, D2Yss] = getH(A, B, Model,DynareResults,DynareOptions,kron_flag,indparam,indexo,iv);
-            clear dum dum2;
-        else
-            [dum, DT, DOm, DYss] = getH(A, B, Model,DynareResults,DynareOptions,kron_flag,indparam,indexo,iv);
-        end
-    else
-        DT = derivatives_info.DT(iv,iv,:);
-        DOm = derivatives_info.DOm(iv,iv,:);
-        DYss = derivatives_info.DYss(iv,:);
-        if isfield(derivatives_info,'full_Hess'),
-            full_Hess = derivatives_info.full_Hess;
-        end
-        if full_Hess,
-        D2T = derivatives_info.D2T;
-        D2Om = derivatives_info.D2Om;
-        D2Yss = derivatives_info.D2Yss;
-        end
-        if isfield(derivatives_info,'no_DLIK'),
-            no_DLIK = derivatives_info.no_DLIK;
-        end
-        clear('derivatives_info');
-    end
-    DYss = [zeros(size(DYss,1),offset) DYss];
-    DH=zeros([length(H),length(H),length(xparam1)]);
-    DQ=zeros([size(Q),length(xparam1)]);
-    DP=zeros([size(T),length(xparam1)]);
-    if full_Hess,
-        for j=1:size(D2Yss,1),
-        tmp(j,:,:) = blkdiag(zeros(offset,offset), squeeze(D2Yss(j,:,:)));
-        end
-        D2Yss = tmp;
-        D2H=sparse(size(D2Om,1),size(D2Om,2)); %zeros([size(H),length(xparam1),length(xparam1)]);
-        D2P=sparse(size(D2Om,1),size(D2Om,2)); %zeros([size(T),length(xparam1),length(xparam1)]);
-        jcount=0;
-    end
-    if DynareOptions.lik_init==1,
-    for i=1:EstimatedParameters.nvx
-        k =EstimatedParameters.var_exo(i,1);
-        DQ(k,k,i) = 2*sqrt(Q(k,k));
-        dum =  lyapunov_symm(T,DOm(:,:,i),DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold,[],[],DynareOptions.debug);
-%         kk = find(abs(dum) < 1e-12);
-%         dum(kk) = 0;
-        DP(:,:,i)=dum;
-        if full_Hess
-        for j=1:i,
-            jcount=jcount+1;
-            dum =  lyapunov_symm(T,dyn_unvech(D2Om(:,jcount)),DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold,[],[],DynareOptions.debug);
-%             kk = (abs(dum) < 1e-12);
-%             dum(kk) = 0;
-            D2P(:,jcount)=dyn_vech(dum);
-%             D2P(:,:,j,i)=dum;
-        end
-        end
-    end
-    end
-    offset = EstimatedParameters.nvx;
-    for i=1:EstimatedParameters.nvn
-        k = EstimatedParameters.var_endo(i,1);
-        DH(k,k,i+offset) = 2*sqrt(H(k,k));
-        if full_Hess
-        D2H(k,k,i+offset,i+offset) = 2;
-        end
-    end
-    offset = offset + EstimatedParameters.nvn;
-    if DynareOptions.lik_init==1,
-    for j=1:EstimatedParameters.np
-        dum =  lyapunov_symm(T,DT(:,:,j+offset)*Pstar*T'+T*Pstar*DT(:,:,j+offset)'+DOm(:,:,j+offset),DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold,[],[],DynareOptions.debug);
-%         kk = find(abs(dum) < 1e-12);
-%         dum(kk) = 0;
-        DP(:,:,j+offset)=dum;
-        if full_Hess
-        DTj = DT(:,:,j+offset);
-        DPj = dum;
-        for i=1:j+offset,
-            jcount=jcount+1;
-            DTi = DT(:,:,i);
-            DPi = DP(:,:,i);
-            D2Tij = reshape(D2T(:,jcount),size(T));
-            D2Omij = dyn_unvech(D2Om(:,jcount));
-            tmp = D2Tij*Pstar*T' + T*Pstar*D2Tij' + DTi*DPj*T' + DTj*DPi*T' + T*DPj*DTi' + T*DPi*DTj' + DTi*Pstar*DTj' + DTj*Pstar*DTi' + D2Omij;
-            dum = lyapunov_symm(T,tmp,DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold,[],[],DynareOptions.debug);
-%             dum(abs(dum)<1.e-12) = 0;
-            D2P(:,jcount) = dyn_vech(dum);
-%             D2P(:,:,j+offset,i) = dum;
-        end
-        end
-    end
-    end
-    if analytic_derivation==1,
-        analytic_deriv_info={analytic_derivation,DT,DYss,DOm,DH,DP,asy_Hess};
-    else
-        analytic_deriv_info={analytic_derivation,DT,DYss,DOm,DH,DP,D2T,D2Yss,D2Om,D2H,D2P};
-        clear DT DYss DOm DP D2T D2Yss D2Om D2H D2P,
-    end
-else
-    analytic_deriv_info={0};
-end
-
-%------------------------------------------------------------------------------
-% 4. Likelihood evaluation
-%------------------------------------------------------------------------------
-
-if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter
-    if no_missing_data_flag
-        if DynareOptions.block
-            [err, LIK] = block_kalman_filter(T,R,Q,H,Pstar,Y,start,Z,kalman_tol,riccati_tol, Model.nz_state_var, Model.n_diag, Model.nobs_non_statevar);
-            mexErrCheck('block_kalman_filter', err);
-        else
-            [LIK,lik] = kalman_filter(Y,diffuse_periods+1,size(Y,2), ...
-                                a,Pstar, ...
-                                kalman_tol, riccati_tol, ...
-                                DynareOptions.presample, ...
-                                T,Q,R,H,Z,mm,pp,rr,Zflag,diffuse_periods, ...
-                                analytic_deriv_info{:});
-
-        end
-    else
-        if 0 %DynareOptions.block
-            [err, LIK,lik] = block_kalman_filter(DatasetInfo.missing.aindex,DatasetInfo.missing.number_of_observations,DatasetInfo.missing.no_more_missing_observations,...
-                                                                 T,R,Q,H,Pstar,Y,start,Z,kalman_tol,riccati_tol, Model.nz_state_var, Model.n_diag, Model.nobs_non_statevar);
-        else
-            [LIK,lik] = missing_observations_kalman_filter(DatasetInfo.missing.aindex,DatasetInfo.missing.number_of_observations,DatasetInfo.missing.no_more_missing_observations,Y,diffuse_periods+1,size(Y,2), ...
-                                               a, Pstar, ...
-                                               kalman_tol, DynareOptions.riccati_tol, ...
-                                               DynareOptions.presample, ...
-                                               T,Q,R,H,Z,mm,pp,rr,Zflag,diffuse_periods);
-        end
-    end
-    if analytic_derivation,
-        LIK1=LIK;
-        LIK=LIK1{1};
-        lik1=lik;
-        lik=lik1{1};
-    end
-    if isinf(LIK)
-        if DynareOptions.use_univariate_filters_if_singularity_is_detected
-            if kalman_algo == 1
-                kalman_algo = 2;
-            else
-                kalman_algo = 4;
-            end
-        else
-            if isinf(LIK)
-                fval = Inf;
-                info(1) = 66;
-                info(2) = 0.1;
-                exit_flag = 0;
-                return
-            end
-        end
-    else
-        if DynareOptions.lik_init==3
-            LIK = LIK + dLIK;
-            if analytic_derivation==0 && nargout==2,
-                lik = [dlik; lik];
-            end
-        end
-    end
-end
-
-if (kalman_algo==2) || (kalman_algo==4)
-    % Univariate Kalman Filter
-    % resetting measurement error covariance matrix when necessary                                                           %
-    if ~correlated_errors_have_been_checked
-        if isequal(H,0)
-            H1 = zeros(pp,1);
-            mmm = mm;
-            if analytic_derivation,
-                DH = zeros(pp,length(xparam1));
-            end
-        else
-            if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
-                H1 = diag(H);
-                mmm = mm;
-                clear tmp
-                if analytic_derivation,
-                    for j=1:pp,
-                        tmp(j,:)=DH(j,j,:);
-                    end
-                    DH=tmp;
-                end
-            else
-                Z = [Z, eye(pp)];
-                T = blkdiag(T,zeros(pp));
-                Q = blkdiag(Q,H);
-                R = blkdiag(R,eye(pp));
-                Pstar = blkdiag(Pstar,H);
-                Pinf  = blkdiag(Pinf,zeros(pp));
-                H1 = zeros(pp,1);
-                mmm   = mm+pp;
-            end
-        end
-        if analytic_derivation,
-            analytic_deriv_info{5}=DH;
-        end
-    end
-
-    [LIK, lik] = univariate_kalman_filter(DatasetInfo.missing.aindex,DatasetInfo.missing.number_of_observations,DatasetInfo.missing.no_more_missing_observations,Y,diffuse_periods+1,size(Y,2), ...
-                                       a,Pstar, ...
-                                       DynareOptions.kalman_tol, ...
-                                       DynareOptions.riccati_tol, ...
-                                       DynareOptions.presample, ...
-                                       T,Q,R,H1,Z,mmm,pp,rr,Zflag,diffuse_periods,analytic_deriv_info{:});
-    if analytic_derivation,
-        LIK1=LIK;
-        LIK=LIK1{1};
-        lik1=lik;
-        lik=lik1{1};
-    end
-    if DynareOptions.lik_init==3
-        LIK = LIK+dLIK;
-        if analytic_derivation==0 && nargout==2,
-            lik = [dlik; lik];
-        end
-    end
-end
-
-if analytic_derivation
-    if no_DLIK==0
-        DLIK = LIK1{2};
-        %                 [DLIK] = score(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,start,Z,kalman_tol,riccati_tol);
-    end
-    if full_Hess ,
-        Hess = -LIK1{3};
-        %                     [Hess, DLL] = get_Hessian(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,D2T,D2Yss,D2Om,D2H,D2P,start,Z,kalman_tol,riccati_tol);
-        %                     Hess0 = getHessian(Y,T,DT,D2T, R*Q*transpose(R),DOm,D2Om,Z,DYss,D2Yss);
-    end
-    if asy_Hess,
-%         if ~((kalman_algo==2) || (kalman_algo==4)),
-%             [Hess] = AHessian(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,start,Z,kalman_tol,riccati_tol);
-%         else
-        Hess = LIK1{3};
-%         end
-    end
-end
-
-if isnan(LIK)
-    fval = Inf;
-    info(1) = 45;
-    info(2) = 0.1;
-    exit_flag = 0;
-    return
-end
-
-if imag(LIK)~=0
-    fval = Inf;
-    info(1) = 46;
-    info(2) = 0.1;
-    exit_flag = 0;
-    return
-end
-
-likelihood = LIK;
-
-% ------------------------------------------------------------------------------
-% 5. Adds prior if necessary
-% ------------------------------------------------------------------------------
-if analytic_derivation
-    if full_Hess,
-        [lnprior, dlnprior, d2lnprior] = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
-        Hess = Hess - d2lnprior;
-    else
-        [lnprior, dlnprior] = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
-    end
-    if no_DLIK==0
-        DLIK = DLIK - dlnprior';
-    end
-    if outer_product_gradient,
-        dlik = lik1{2};
-        dlik=[- dlnprior; dlik(start:end,:)];
-        Hess = dlik'*dlik;
-    end
-else
-    lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
-end
-
-if DynareOptions.endogenous_prior==1
-  if DynareOptions.lik_init==2 || DynareOptions.lik_init==3
-    error('Endogenous prior not supported with non-stationary models')
-  else
-    [lnpriormom]  = endogenous_prior(Y,Pstar,BayesInfo,H);
-    fval    = (likelihood-lnprior-lnpriormom);
-  end
-else
-  fval    = (likelihood-lnprior);
-end
-
-if DynareOptions.prior_restrictions.status
-    tmp = feval(DynareOptions.prior_restrictions.routine, Model, DynareResults, DynareOptions, DynareDataset, DatasetInfo);
-    fval = fval - tmp;
-end
-
-if isnan(fval)
-    fval = Inf;
-    info(1) = 47;
-    info(2) = 0.1;
-    exit_flag = 0;
-    return
-end
-
-if imag(fval)~=0
-    fval = Inf;
-    info(1) = 48;
-    info(2) = 0.1;
-    exit_flag = 0;
-    return
-end
-
-% Update DynareOptions.kalman_algo.
-DynareOptions.kalman_algo = kalman_algo;
-
-if analytic_derivation==0 && nargout > 1
-    lik=lik(start:end,:);
-    DLIK=[-lnprior; lik(:)];
-end
\ No newline at end of file
+[fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff,Model,DynareOptions,BayesInfo,DynareResults] = ...
+dsge_likelihood_1(xparam1,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,...
+                  BoundsInfo,DynareResults,derivatives_info);
\ No newline at end of file
diff --git a/matlab/dsge_likelihood_1.m b/matlab/dsge_likelihood_1.m
new file mode 100644
index 0000000000..cfa54c0e87
--- /dev/null
+++ b/matlab/dsge_likelihood_1.m
@@ -0,0 +1,858 @@
+function [fval,info,exit_flag,DLIK,Hess,SteadyState,trend_coeff,Model,DynareOptions,BayesInfo,DynareResults] = dsge_likelihood_1(xparam1,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults,derivatives_info)
+% Evaluates the posterior kernel of a dsge model using the specified
+% kalman_algo; the resulting posterior includes the 2*pi constant of the
+% likelihood function.
+
+%@info:
+%! @deftypefn {Function File} {[@var{fval},@var{exit_flag},@var{ys},@var{trend_coeff},@var{info},@var{Model},@var{DynareOptions},@var{BayesInfo},@var{DynareResults},@var{DLIK},@var{AHess}] =} dsge_likelihood (@var{xparam1},@var{DynareDataset},@var{DynareOptions},@var{Model},@var{EstimatedParameters},@var{BayesInfo},@var{DynareResults},@var{derivatives_flag})
+%! @anchor{dsge_likelihood}
+%! @sp 1
+%! Evaluates the posterior kernel of a dsge model.
+%! @sp 2
+%! @strong{Inputs}
+%! @sp 1
+%! @table @ @var
+%! @item xparam1
+%! Vector of doubles, current values for the estimated parameters.
+%! @item DynareDataset
+%! Matlab's structure describing the dataset (initialized by dynare, see @ref{dataset_}).
+%! @item DynareOptions
+%! Matlab's structure describing the options (initialized by dynare, see @ref{options_}).
+%! @item Model
+%! Matlab's structure describing the Model (initialized by dynare, see @ref{M_}).
+%! @item EstimatedParamemeters
+%! Matlab's structure describing the estimated_parameters (initialized by dynare, see @ref{estim_params_}).
+%! @item BayesInfo
+%! Matlab's structure describing the priors (initialized by dynare, see @ref{bayesopt_}).
+%! @item DynareResults
+%! Matlab's structure gathering the results (initialized by dynare, see @ref{oo_}).
+%! @item derivates_flag
+%! Integer scalar, flag for analytical derivatives of the likelihood.
+%! @end table
+%! @sp 2
+%! @strong{Outputs}
+%! @sp 1
+%! @table @ @var
+%! @item fval
+%! Double scalar, value of (minus) the likelihood.
+%! @item info
+%! Integer scalar, error code.
+%! @table @ @code
+%! @item info==0
+%! No error.
+%! @item info==1
+%! The model doesn't determine the current variables uniquely.
+%! @item info==2
+%! MJDGGES returned an error code.
+%! @item info==3
+%! Blanchard & Kahn conditions are not satisfied: no stable equilibrium.
+%! @item info==4
+%! Blanchard & Kahn conditions are not satisfied: indeterminacy.
+%! @item info==5
+%! Blanchard & Kahn conditions are not satisfied: indeterminacy due to rank failure.
+%! @item info==6
+%! The jacobian evaluated at the deterministic steady state is complex.
+%! @item info==19
+%! The steadystate routine thrown an exception (inconsistent deep parameters).
+%! @item info==20
+%! Cannot find the steady state, info(2) contains the sum of square residuals (of the static equations).
+%! @item info==21
+%! The steady state is complex, info(2) contains the sum of square of imaginary parts of the steady state.
+%! @item info==22
+%! The steady has NaNs.
+%! @item info==23
+%! M_.params has been updated in the steadystate routine and has complex valued scalars.
+%! @item info==24
+%! M_.params has been updated in the steadystate routine and has some NaNs.
+%! @item info==26
+%! M_.params has been updated in the steadystate routine and has negative/0 values in loglinear model.
+%! @item info==30
+%! Ergodic variance can't be computed.
+%! @item info==41
+%! At least one parameter is violating a lower bound condition.
+%! @item info==42
+%! At least one parameter is violating an upper bound condition.
+%! @item info==43
+%! The covariance matrix of the structural innovations is not positive definite.
+%! @item info==44
+%! The covariance matrix of the measurement errors is not positive definite.
+%! @item info==45
+%! Likelihood is not a number (NaN).
+%! @item info==46
+%! Likelihood is a complex valued number.
+%! @item info==47
+%! Posterior kernel is not a number (logged prior density is NaN)
+%! @item info==48
+%! Posterior kernel is a complex valued number (logged prior density is complex).
+%! @end table
+%! @item exit_flag
+%! Integer scalar, equal to zero if the routine return with a penalty (one otherwise).
+%! @item ys
+%! Vector of doubles, steady state level for the endogenous variables.
+%! @item trend_coeff
+%! Matrix of doubles, coefficients of the deterministic trend in the measurement equation.
+%! @item Model
+%! Matlab's structure describing the model (initialized by dynare, see @ref{M_}).
+%! @item DynareOptions
+%! Matlab's structure describing the options (initialized by dynare, see @ref{options_}).
+%! @item BayesInfo
+%! Matlab's structure describing the priors (initialized by dynare, see @ref{bayesopt_}).
+%! @item DynareResults
+%! Matlab's structure gathering the results (initialized by dynare, see @ref{oo_}).
+%! @item DLIK
+%! Vector of doubles, score of the likelihood.
+%! @item AHess
+%! Matrix of doubles, asymptotic hessian matrix.
+%! @end table
+%! @sp 2
+%! @strong{This function is called by:}
+%! @sp 1
+%! @ref{dynare_estimation_1}, @ref{mode_check}
+%! @sp 2
+%! @strong{This function calls:}
+%! @sp 1
+%! @ref{dynare_resolve}, @ref{lyapunov_symm}, @ref{schur_statespace_transformation}, @ref{kalman_filter_d}, @ref{missing_observations_kalman_filter_d}, @ref{univariate_kalman_filter_d}, @ref{kalman_steady_state}, @ref{getH}, @ref{kalman_filter}, @ref{score}, @ref{AHessian}, @ref{missing_observations_kalman_filter}, @ref{univariate_kalman_filter}, @ref{priordens}
+%! @end deftypefn
+%@eod:
+
+% Copyright (C) 2004-2013 Dynare Team
+%
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+
+% AUTHOR(S) stephane DOT adjemian AT univ DASH lemans DOT FR
+
+% Initialization of the returned variables and others...
+fval        = [];
+SteadyState = [];
+trend_coeff = [];
+exit_flag   = 1;
+info        = 0;
+DLIK        = [];
+Hess       = [];
+
+if DynareOptions.estimation_dll
+    [fval,exit_flag,SteadyState,trend_coeff,info,params,H,Q] ...
+        = logposterior(xparam1,DynareDataset, DynareOptions,Model, ...
+                          EstimatedParameters,BayesInfo,DynareResults);
+    mexErrCheck('logposterior', exit_flag);
+    Model.params = params;
+    if ~isequal(Model.H,0)
+        Model.H = H;
+    end
+    Model.Sigma_e = Q;
+    DynareResults.dr.ys = SteadyState;
+    return
+end
+
+% Set flag related to analytical derivatives.
+analytic_derivation = DynareOptions.analytic_derivation;
+
+if analytic_derivation && DynareOptions.loglinear
+    error('The analytic_derivation and loglinear options are not compatible')
+end
+
+if nargout==1,
+    analytic_derivation=0;
+end
+
+if analytic_derivation,
+    kron_flag=DynareOptions.analytic_derivation_mode;
+end
+
+%------------------------------------------------------------------------------
+% 1. Get the structural parameters & define penalties
+%------------------------------------------------------------------------------
+
+% Return, with endogenous penalty, if some parameters are smaller than the lower bound of the prior domain.
+if ~isequal(DynareOptions.mode_compute,1) && any(xparam1<BoundsInfo.lb)
+    k = find(xparam1<BoundsInfo.lb);
+    fval = Inf;
+    exit_flag = 0;
+    info(1) = 41;
+    info(2) = sum((BoundsInfo.lb(k)-xparam1(k)).^2);
+    if analytic_derivation,
+        DLIK=ones(length(xparam1),1);
+    end
+    return
+end
+
+% Return, with endogenous penalty, if some parameters are greater than the upper bound of the prior domain.
+if ~isequal(DynareOptions.mode_compute,1) && any(xparam1>BoundsInfo.ub)
+    k = find(xparam1>BoundsInfo.ub);
+    fval = Inf;
+    exit_flag = 0;
+    info(1) = 42;
+    info(2) = sum((xparam1(k)-BoundsInfo.ub(k)).^2);
+    if analytic_derivation,
+        DLIK=ones(length(xparam1),1);
+    end
+    return
+end
+
+% Get the diagonal elements of the covariance matrices for the structural innovations (Q) and the measurement error (H).
+Model = set_all_parameters(xparam1,EstimatedParameters,Model);
+
+Q = Model.Sigma_e;
+H = Model.H;
+
+% Test if Q is positive definite.
+if ~issquare(Q) || EstimatedParameters.ncx || isfield(EstimatedParameters,'calibrated_covariances')
+    [Q_is_positive_definite, penalty] = ispd(Q);
+    if ~Q_is_positive_definite
+        fval = Inf;
+        exit_flag = 0;
+        info(1) = 43;
+        info(2) = penalty;
+        return
+    end
+    if isfield(EstimatedParameters,'calibrated_covariances')
+        correct_flag=check_consistency_covariances(Q);
+        if ~correct_flag
+            fval = Inf;
+            exit_flag = 0;
+            info(1) = 71;
+            info(2) = sum(Q(EstimatedParameters.calibrated_covariances.position).^2);
+            return
+        end
+    end
+end
+
+% Test if H is positive definite.
+if ~issquare(H) || EstimatedParameters.ncn || isfield(EstimatedParameters,'calibrated_covariances_ME')
+    [H_is_positive_definite, penalty] = ispd(H);
+    if ~H_is_positive_definite
+        fval = Inf;
+        exit_flag = 0;
+        info(1) = 44;
+        info(2) = penalty;
+        return
+    end
+    if isfield(EstimatedParameters,'calibrated_covariances_ME')
+        correct_flag=check_consistency_covariances(H);
+        if ~correct_flag
+            fval = Inf;
+            exit_flag = 0;
+            info(1) = 72;
+            info(2) = sum(H(EstimatedParameters.calibrated_covariances_ME.position).^2);
+            return
+        end
+    end
+end
+
+
+%------------------------------------------------------------------------------
+% 2. call model setup & reduction program
+%------------------------------------------------------------------------------
+
+% Linearize the model around the deterministic sdteadystate and extract the matrices of the state equation (T and R).
+[T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
+
+% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
+if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || ...
+            info(1) == 22 || info(1) == 24 || info(1) == 19 || info(1) == 25 || info(1) == 10
+    fval = Inf;
+    info(2) = 0.1;
+    exit_flag = 0;
+    if analytic_derivation,
+        DLIK=ones(length(xparam1),1);
+    end
+    return
+elseif info(1) == 3 || info(1) == 4 || info(1)==6 || info(1) == 20 || info(1) == 21  || info(1) == 23 || info(1)==26
+    fval = Inf;
+    exit_flag = 0;
+    if analytic_derivation,
+        DLIK=ones(length(xparam1),1);
+    end
+    return
+end
+
+% check endogenous prior restrictions
+info=endogenous_prior_restrictions(T,R,Model,DynareOptions,DynareResults);
+if info(1),
+    fval = Inf;
+    exit_flag = 0;
+    if analytic_derivation,
+        DLIK=ones(length(xparam1),1);
+    end
+    return
+end
+%
+
+% Define a vector of indices for the observed variables. Is this really usefull?...
+BayesInfo.mf = BayesInfo.mf1;
+
+% Define the constant vector of the measurement equation.
+if DynareOptions.noconstant
+    constant = zeros(DynareDataset.vobs,1);
+else
+    if DynareOptions.loglinear
+        constant = log(SteadyState(BayesInfo.mfys));
+    else
+        constant = SteadyState(BayesInfo.mfys);
+    end
+end
+
+% Define the deterministic linear trend of the measurement equation.
+if BayesInfo.with_trend
+    trend_coeff = zeros(DynareDataset.vobs,1);
+    t = DynareOptions.trend_coeffs;
+    for i=1:length(t)
+        if ~isempty(t{i})
+            trend_coeff(i) = evalin('base',t{i});
+        end
+    end
+    trend = repmat(constant,1,DynareDataset.nobs)+trend_coeff*[1:DynareDataset.nobs];
+else
+   trend_coeff = zeros(DynareDataset.vobs,1);
+   trend = repmat(constant,1,DynareDataset.nobs);
+end
+
+% Get needed informations for kalman filter routines.
+start = DynareOptions.presample+1;
+Z = BayesInfo.mf;
+no_missing_data_flag = ~DatasetInfo.missing.state;
+mm = length(T);
+pp = DynareDataset.vobs;
+rr = length(Q);
+kalman_tol = DynareOptions.kalman_tol;
+diffuse_kalman_tol = DynareOptions.diffuse_kalman_tol;
+riccati_tol = DynareOptions.riccati_tol;
+Y = transpose(DynareDataset.data)-trend;
+
+%------------------------------------------------------------------------------
+% 3. Initial condition of the Kalman filter
+%------------------------------------------------------------------------------
+kalman_algo = DynareOptions.kalman_algo;
+
+% resetting measurement errors covariance matrix for univariate filters
+if (kalman_algo == 2) || (kalman_algo == 4)
+    if isequal(H,0)
+        H = zeros(pp,1);
+        mmm = mm;
+    else
+        if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
+            H = diag(H);
+            mmm = mm;
+        else
+            Z = [Z, eye(pp)];
+            T = blkdiag(T,zeros(pp));
+            Q = blkdiag(Q,H);
+            R = blkdiag(R,eye(pp));
+            Pstar = blkdiag(Pstar,H);
+            Pinf  = blkdiag(Pinf,zeros(pp));
+            H = zeros(pp,1);
+            mmm   = mm+pp;
+        end
+    end
+end
+
+
+diffuse_periods = 0;
+correlated_errors_have_been_checked = 0;
+singular_diffuse_filter = 0;
+switch DynareOptions.lik_init
+  case 1% Standard initialization with the steady state of the state equation.
+    if kalman_algo~=2
+        % Use standard kalman filter except if the univariate filter is explicitely choosen.
+        kalman_algo = 1;
+    end
+    if DynareOptions.lyapunov_fp == 1
+        Pstar = lyapunov_symm(T,R*Q'*R',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, 3, [], DynareOptions.debug);
+    elseif DynareOptions.lyapunov_db == 1
+        Pstar = disclyap_fast(T,R*Q*R',DynareOptions.lyapunov_doubling_tol);
+    elseif DynareOptions.lyapunov_srs == 1
+        Pstar = lyapunov_symm(T,Q,DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, 4, R, DynareOptions.debug);
+    else
+        Pstar = lyapunov_symm(T,R*Q*R',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, [], [], DynareOptions.debug);
+    end;
+    Pinf  = [];
+    a     = zeros(mm,1);
+    Zflag = 0;
+  case 2% Initialization with large numbers on the diagonal of the covariance matrix if the states (for non stationary models).
+    if kalman_algo ~= 2
+        % Use standard kalman filter except if the univariate filter is explicitely choosen.
+        kalman_algo = 1;
+    end
+    Pstar = DynareOptions.Harvey_scale_factor*eye(mm);
+    Pinf  = [];
+    a     = zeros(mm,1);
+    Zflag = 0;
+  case 3% Diffuse Kalman filter (Durbin and Koopman)
+        % Use standard kalman filter except if the univariate filter is explicitely choosen.
+    if kalman_algo == 0
+        kalman_algo = 3;
+    elseif ~((kalman_algo == 3) || (kalman_algo == 4))
+            error(['The model requires Diffuse filter, but you specified a different Kalman filter. You must set options_.kalman_algo ' ...
+                   'to 0 (default), 3 or 4'])
+    end
+    [Z,T,R,QT,Pstar,Pinf] = schur_statespace_transformation(Z,T,R,Q,DynareOptions.qz_criterium);
+    Zflag = 1;
+    % Run diffuse kalman filter on first periods.
+    if (kalman_algo==3)
+        % Multivariate Diffuse Kalman Filter
+        if no_missing_data_flag
+            [dLIK,dlik,a,Pstar] = kalman_filter_d(Y, 1, size(Y,2), ...
+                                                       zeros(mm,1), Pinf, Pstar, ...
+                                                       kalman_tol, diffuse_kalman_tol, riccati_tol, DynareOptions.presample, ...
+                                                       T,R,Q,H,Z,mm,pp,rr);
+        else
+            [dLIK,dlik,a,Pstar] = missing_observations_kalman_filter_d(DatasetInfo.missing.aindex,DatasetInfo.missing.number_of_observations,DatasetInfo.missing.no_more_missing_observations, ...
+                                                              Y, 1, size(Y,2), ...
+                                                              zeros(mm,1), Pinf, Pstar, ...
+                                                              kalman_tol, diffuse_kalman_tol, riccati_tol, DynareOptions.presample, ...
+                                                              T,R,Q,H,Z,mm,pp,rr);
+        end
+        diffuse_periods = length(dlik);
+        if isinf(dLIK)
+            % Go to univariate diffuse filter if singularity problem.
+            singular_diffuse_filter = 1;
+        end
+    end
+    if singular_diffuse_filter || (kalman_algo==4)
+        % Univariate Diffuse Kalman Filter
+        if isequal(H,0)
+            H1 = zeros(pp,1);
+            mmm = mm;
+        else
+            if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
+                H1 = diag(H);
+                mmm = mm;
+            else
+                Z = [Z, eye(pp)];
+                T = blkdiag(T,zeros(pp));
+                Q = blkdiag(Q,H);
+                R = blkdiag(R,eye(pp));
+                Pstar = blkdiag(Pstar,H);
+                Pinf  = blkdiag(Pinf,zeros(pp));
+                H1 = zeros(pp,1);
+                mmm   = mm+pp;
+            end
+        end
+        % no need to test again for correlation elements
+        correlated_errors_have_been_checked = 1;
+
+        [dLIK,dlik,a,Pstar] = univariate_kalman_filter_d(DatasetInfo.missing.aindex,...
+                                                        DatasetInfo.missing.number_of_observations,...
+                                                        DatasetInfo.missing.no_more_missing_observations, ...
+                                                        Y, 1, size(Y,2), ...
+                                                        zeros(mmm,1), Pinf, Pstar, ...
+                                                        kalman_tol, diffuse_kalman_tol, riccati_tol, DynareOptions.presample, ...
+                                                        T,R,Q,H1,Z,mmm,pp,rr);
+        diffuse_periods = size(dlik,1);
+    end
+    if isnan(dLIK),
+        fval = Inf;
+        info(1) = 45;
+        info(2) = 0.1;
+        exit_flag = 0;
+        return
+    end
+    
+  case 4% Start from the solution of the Riccati equation.
+    if kalman_algo ~= 2
+        kalman_algo = 1;
+    end
+    if isequal(H,0)
+        [err,Pstar] = kalman_steady_state(transpose(T),R*Q*transpose(R),transpose(build_selection_matrix(Z,mm,length(Z))));
+    else
+        [err,Pstar] = kalman_steady_state(transpose(T),R*Q*transpose(R),transpose(build_selection_matrix(Z,mm,length(Z))),H);
+    end
+    if err
+        disp(['dsge_likelihood:: I am not able to solve the Riccati equation, so I switch to lik_init=1!']);
+        DynareOptions.lik_init = 1;
+        Pstar = lyapunov_symm(T,R*Q*R',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, [], [], DynareOptions.debug);
+    end
+    Pinf  = [];
+    a = zeros(mm,1);
+    Zflag = 0;
+  case 5            % Old diffuse Kalman filter only for the non stationary variables
+    [eigenvect, eigenv] = eig(T);
+    eigenv = diag(eigenv);
+    nstable = length(find(abs(abs(eigenv)-1) > 1e-7));
+    unstable = find(abs(abs(eigenv)-1) < 1e-7);
+    V = eigenvect(:,unstable);
+    indx_unstable = find(sum(abs(V),2)>1e-5);
+    stable = find(sum(abs(V),2)<1e-5);
+    nunit = length(eigenv) - nstable;
+    Pstar = options_.Harvey_scale_factor*eye(np);
+    if kalman_algo ~= 2
+        kalman_algo = 1;
+    end
+    R_tmp = R(stable, :);
+    T_tmp = T(stable,stable);
+    if DynareOptions.lyapunov_fp == 1
+        Pstar_tmp = lyapunov_symm(T_tmp,R_tmp*Q*R_tmp',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, 3, [], DynareOptions.debug);
+    elseif DynareOptions.lyapunov_db == 1
+        Pstar_tmp = disclyap_fast(T_tmp,R_tmp*Q*R_tmp',DynareOptions.lyapunov_doubling_tol);
+    elseif DynareOptions.lyapunov_srs == 1
+        Pstar_tmp = lyapunov_symm(T_tmp,Q,DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, 4, R_tmp, DynareOptions.debug);
+    else
+        Pstar_tmp = lyapunov_symm(T_tmp,R_tmp*Q*R_tmp',DynareOptions.qz_criterium,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, [], [], DynareOptions.debug);
+    end    
+    Pstar(stable, stable) = Pstar_tmp;
+    Pinf  = [];
+  otherwise
+    error('dsge_likelihood:: Unknown initialization approach for the Kalman filter!')
+end
+
+if analytic_derivation,
+    offset = EstimatedParameters.nvx;
+    offset = offset+EstimatedParameters.nvn;
+    offset = offset+EstimatedParameters.ncx;
+    offset = offset+EstimatedParameters.ncn;
+
+    no_DLIK = 0;
+    full_Hess = analytic_derivation==2;
+    asy_Hess = analytic_derivation==-2;
+    outer_product_gradient = analytic_derivation==-1;
+    if asy_Hess,
+        analytic_derivation=1;
+    end
+    if outer_product_gradient,
+        analytic_derivation=1;
+    end
+    DLIK = [];
+    AHess = [];
+    iv = DynareResults.dr.restrict_var_list;
+    if nargin<10 || isempty(derivatives_info)
+        [A,B,nou,nou,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults);
+        if ~isempty(EstimatedParameters.var_exo)
+            indexo=EstimatedParameters.var_exo(:,1);
+        else
+            indexo=[];
+        end
+        if ~isempty(EstimatedParameters.param_vals)
+            indparam=EstimatedParameters.param_vals(:,1);
+        else
+            indparam=[];
+        end
+
+        if full_Hess,
+            [dum, DT, DOm, DYss, dum2, D2T, D2Om, D2Yss] = getH(A, B, Model,DynareResults,DynareOptions,kron_flag,indparam,indexo,iv);
+            clear dum dum2;
+        else
+            [dum, DT, DOm, DYss] = getH(A, B, Model,DynareResults,DynareOptions,kron_flag,indparam,indexo,iv);
+        end
+    else
+        DT = derivatives_info.DT(iv,iv,:);
+        DOm = derivatives_info.DOm(iv,iv,:);
+        DYss = derivatives_info.DYss(iv,:);
+        if isfield(derivatives_info,'full_Hess'),
+            full_Hess = derivatives_info.full_Hess;
+        end
+        if full_Hess,
+        D2T = derivatives_info.D2T;
+        D2Om = derivatives_info.D2Om;
+        D2Yss = derivatives_info.D2Yss;
+        end
+        if isfield(derivatives_info,'no_DLIK'),
+            no_DLIK = derivatives_info.no_DLIK;
+        end
+        clear('derivatives_info');
+    end
+    DYss = [zeros(size(DYss,1),offset) DYss];
+    DH=zeros([length(H),length(H),length(xparam1)]);
+    DQ=zeros([size(Q),length(xparam1)]);
+    DP=zeros([size(T),length(xparam1)]);
+    if full_Hess,
+        for j=1:size(D2Yss,1),
+        tmp(j,:,:) = blkdiag(zeros(offset,offset), squeeze(D2Yss(j,:,:)));
+        end
+        D2Yss = tmp;
+        D2H=sparse(size(D2Om,1),size(D2Om,2)); %zeros([size(H),length(xparam1),length(xparam1)]);
+        D2P=sparse(size(D2Om,1),size(D2Om,2)); %zeros([size(T),length(xparam1),length(xparam1)]);
+        jcount=0;
+    end
+    if DynareOptions.lik_init==1,
+    for i=1:EstimatedParameters.nvx
+        k =EstimatedParameters.var_exo(i,1);
+        DQ(k,k,i) = 2*sqrt(Q(k,k));
+        dum =  lyapunov_symm(T,DOm(:,:,i),DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold,[],[],DynareOptions.debug);
+%         kk = find(abs(dum) < 1e-12);
+%         dum(kk) = 0;
+        DP(:,:,i)=dum;
+        if full_Hess
+        for j=1:i,
+            jcount=jcount+1;
+            dum =  lyapunov_symm(T,dyn_unvech(D2Om(:,jcount)),DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold,[],[],DynareOptions.debug);
+%             kk = (abs(dum) < 1e-12);
+%             dum(kk) = 0;
+            D2P(:,jcount)=dyn_vech(dum);
+%             D2P(:,:,j,i)=dum;
+        end
+        end
+    end
+    end
+    offset = EstimatedParameters.nvx;
+    for i=1:EstimatedParameters.nvn
+        k = EstimatedParameters.var_endo(i,1);
+        DH(k,k,i+offset) = 2*sqrt(H(k,k));
+        if full_Hess
+        D2H(k,k,i+offset,i+offset) = 2;
+        end
+    end
+    offset = offset + EstimatedParameters.nvn;
+    if DynareOptions.lik_init==1,
+    for j=1:EstimatedParameters.np
+        dum =  lyapunov_symm(T,DT(:,:,j+offset)*Pstar*T'+T*Pstar*DT(:,:,j+offset)'+DOm(:,:,j+offset),DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold,[],[],DynareOptions.debug);
+%         kk = find(abs(dum) < 1e-12);
+%         dum(kk) = 0;
+        DP(:,:,j+offset)=dum;
+        if full_Hess
+        DTj = DT(:,:,j+offset);
+        DPj = dum;
+        for i=1:j+offset,
+            jcount=jcount+1;
+            DTi = DT(:,:,i);
+            DPi = DP(:,:,i);
+            D2Tij = reshape(D2T(:,jcount),size(T));
+            D2Omij = dyn_unvech(D2Om(:,jcount));
+            tmp = D2Tij*Pstar*T' + T*Pstar*D2Tij' + DTi*DPj*T' + DTj*DPi*T' + T*DPj*DTi' + T*DPi*DTj' + DTi*Pstar*DTj' + DTj*Pstar*DTi' + D2Omij;
+            dum = lyapunov_symm(T,tmp,DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold,[],[],DynareOptions.debug);
+%             dum(abs(dum)<1.e-12) = 0;
+            D2P(:,jcount) = dyn_vech(dum);
+%             D2P(:,:,j+offset,i) = dum;
+        end
+        end
+    end
+    end
+    if analytic_derivation==1,
+        analytic_deriv_info={analytic_derivation,DT,DYss,DOm,DH,DP,asy_Hess};
+    else
+        analytic_deriv_info={analytic_derivation,DT,DYss,DOm,DH,DP,D2T,D2Yss,D2Om,D2H,D2P};
+        clear DT DYss DOm DP D2T D2Yss D2Om D2H D2P,
+    end
+else
+    analytic_deriv_info={0};
+end
+
+%------------------------------------------------------------------------------
+% 4. Likelihood evaluation
+%------------------------------------------------------------------------------
+
+if ((kalman_algo==1) || (kalman_algo==3))% Multivariate Kalman Filter
+    if no_missing_data_flag
+        if DynareOptions.block
+            [err, LIK] = block_kalman_filter(T,R,Q,H,Pstar,Y,start,Z,kalman_tol,riccati_tol, Model.nz_state_var, Model.n_diag, Model.nobs_non_statevar);
+            mexErrCheck('block_kalman_filter', err);
+        else
+            [LIK,lik] = kalman_filter(Y,diffuse_periods+1,size(Y,2), ...
+                                a,Pstar, ...
+                                kalman_tol, riccati_tol, ...
+                                DynareOptions.presample, ...
+                                T,Q,R,H,Z,mm,pp,rr,Zflag,diffuse_periods, ...
+                                analytic_deriv_info{:});
+
+        end
+    else
+        if 0 %DynareOptions.block
+            [err, LIK,lik] = block_kalman_filter(DatasetInfo.missing.aindex,DatasetInfo.missing.number_of_observations,DatasetInfo.missing.no_more_missing_observations,...
+                                                                 T,R,Q,H,Pstar,Y,start,Z,kalman_tol,riccati_tol, Model.nz_state_var, Model.n_diag, Model.nobs_non_statevar);
+        else
+            [LIK,lik] = missing_observations_kalman_filter(DatasetInfo.missing.aindex,DatasetInfo.missing.number_of_observations,DatasetInfo.missing.no_more_missing_observations,Y,diffuse_periods+1,size(Y,2), ...
+                                               a, Pstar, ...
+                                               kalman_tol, DynareOptions.riccati_tol, ...
+                                               DynareOptions.presample, ...
+                                               T,Q,R,H,Z,mm,pp,rr,Zflag,diffuse_periods);
+        end
+    end
+    if analytic_derivation,
+        LIK1=LIK;
+        LIK=LIK1{1};
+        lik1=lik;
+        lik=lik1{1};
+    end
+    if isinf(LIK)
+        if DynareOptions.use_univariate_filters_if_singularity_is_detected
+            if kalman_algo == 1
+                kalman_algo = 2;
+            else
+                kalman_algo = 4;
+            end
+        else
+            if isinf(LIK)
+                fval = Inf;
+                info(1) = 66;
+                info(2) = 0.1;
+                exit_flag = 0;
+                return
+            end
+        end
+    else
+        if DynareOptions.lik_init==3
+            LIK = LIK + dLIK;
+            if analytic_derivation==0 && nargout==2,
+                lik = [dlik; lik];
+            end
+        end
+    end
+end
+
+if (kalman_algo==2) || (kalman_algo==4)
+    % Univariate Kalman Filter
+    % resetting measurement error covariance matrix when necessary                                                           %
+    if ~correlated_errors_have_been_checked
+        if isequal(H,0)
+            H1 = zeros(pp,1);
+            mmm = mm;
+            if analytic_derivation,
+                DH = zeros(pp,length(xparam1));
+            end
+        else
+            if all(all(abs(H-diag(diag(H)))<1e-14))% ie, the covariance matrix is diagonal...
+                H1 = diag(H);
+                mmm = mm;
+                clear tmp
+                if analytic_derivation,
+                    for j=1:pp,
+                        tmp(j,:)=DH(j,j,:);
+                    end
+                    DH=tmp;
+                end
+            else
+                Z = [Z, eye(pp)];
+                T = blkdiag(T,zeros(pp));
+                Q = blkdiag(Q,H);
+                R = blkdiag(R,eye(pp));
+                Pstar = blkdiag(Pstar,H);
+                Pinf  = blkdiag(Pinf,zeros(pp));
+                H1 = zeros(pp,1);
+                mmm   = mm+pp;
+            end
+        end
+        if analytic_derivation,
+            analytic_deriv_info{5}=DH;
+        end
+    end
+
+    [LIK, lik] = univariate_kalman_filter(DatasetInfo.missing.aindex,DatasetInfo.missing.number_of_observations,DatasetInfo.missing.no_more_missing_observations,Y,diffuse_periods+1,size(Y,2), ...
+                                       a,Pstar, ...
+                                       DynareOptions.kalman_tol, ...
+                                       DynareOptions.riccati_tol, ...
+                                       DynareOptions.presample, ...
+                                       T,Q,R,H1,Z,mmm,pp,rr,Zflag,diffuse_periods,analytic_deriv_info{:});
+    if analytic_derivation,
+        LIK1=LIK;
+        LIK=LIK1{1};
+        lik1=lik;
+        lik=lik1{1};
+    end
+    if DynareOptions.lik_init==3
+        LIK = LIK+dLIK;
+        if analytic_derivation==0 && nargout==2,
+            lik = [dlik; lik];
+        end
+    end
+end
+
+if analytic_derivation
+    if no_DLIK==0
+        DLIK = LIK1{2};
+        %                 [DLIK] = score(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,start,Z,kalman_tol,riccati_tol);
+    end
+    if full_Hess ,
+        Hess = -LIK1{3};
+        %                     [Hess, DLL] = get_Hessian(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,D2T,D2Yss,D2Om,D2H,D2P,start,Z,kalman_tol,riccati_tol);
+        %                     Hess0 = getHessian(Y,T,DT,D2T, R*Q*transpose(R),DOm,D2Om,Z,DYss,D2Yss);
+    end
+    if asy_Hess,
+%         if ~((kalman_algo==2) || (kalman_algo==4)),
+%             [Hess] = AHessian(T,R,Q,H,Pstar,Y,DT,DYss,DOm,DH,DP,start,Z,kalman_tol,riccati_tol);
+%         else
+        Hess = LIK1{3};
+%         end
+    end
+end
+
+if isnan(LIK)
+    fval = Inf;
+    info(1) = 45;
+    info(2) = 0.1;
+    exit_flag = 0;
+    return
+end
+
+if imag(LIK)~=0
+    fval = Inf;
+    info(1) = 46;
+    info(2) = 0.1;
+    exit_flag = 0;
+    return
+end
+
+likelihood = LIK;
+
+% ------------------------------------------------------------------------------
+% 5. Adds prior if necessary
+% ------------------------------------------------------------------------------
+if analytic_derivation
+    if full_Hess,
+        [lnprior, dlnprior, d2lnprior] = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
+        Hess = Hess - d2lnprior;
+    else
+        [lnprior, dlnprior] = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
+    end
+    if no_DLIK==0
+        DLIK = DLIK - dlnprior';
+    end
+    if outer_product_gradient,
+        dlik = lik1{2};
+        dlik=[- dlnprior; dlik(start:end,:)];
+        Hess = dlik'*dlik;
+    end
+else
+    lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
+end
+
+if DynareOptions.endogenous_prior==1
+  if DynareOptions.lik_init==2 || DynareOptions.lik_init==3
+    error('Endogenous prior not supported with non-stationary models')
+  else
+    [lnpriormom]  = endogenous_prior(Y,Pstar,BayesInfo,H);
+    fval    = (likelihood-lnprior-lnpriormom);
+  end
+else
+  fval    = (likelihood-lnprior);
+end
+
+if DynareOptions.prior_restrictions.status
+    tmp = feval(DynareOptions.prior_restrictions.routine, Model, DynareResults, DynareOptions, DynareDataset, DatasetInfo);
+    fval = fval - tmp;
+end
+
+if isnan(fval)
+    fval = Inf;
+    info(1) = 47;
+    info(2) = 0.1;
+    exit_flag = 0;
+    return
+end
+
+if imag(fval)~=0
+    fval = Inf;
+    info(1) = 48;
+    info(2) = 0.1;
+    exit_flag = 0;
+    return
+end
+
+% Update DynareOptions.kalman_algo.
+DynareOptions.kalman_algo = kalman_algo;
+
+if analytic_derivation==0 && nargout > 1
+    lik=lik(start:end,:);
+    DLIK=[-lnprior; lik(:)];
+end
\ No newline at end of file
diff --git a/matlab/dsge_var_likelihood.m b/matlab/dsge_var_likelihood.m
index d0a5a8b4d3..044ed72695 100644
--- a/matlab/dsge_var_likelihood.m
+++ b/matlab/dsge_var_likelihood.m
@@ -1,5 +1,5 @@
-function [fval,grad,hess,exit_flag,SteadyState,trend_coeff,info,PHI,SIGMAu,iXX,prior] = dsge_var_likelihood(xparam1,DynareDataset,DynareInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults)
-% Evaluates the posterior kernel of the bvar-dsge model.
+function [fval,grad,hess,exit_flag,info,PHI,SIGMAu,iXX,prior] = dsge_var_likelihood(xparam1,DynareDataset,DynareOptions,Model,EstimatedParameters,BayesInfo,DynareResults)
+% Evaluates the posterior kernel of the bvar-dsge model. Deprecated interface.
 %
 % INPUTS
 %   o xparam1       [double]     Vector of model's parameters.
@@ -8,10 +8,6 @@ function [fval,grad,hess,exit_flag,SteadyState,trend_coeff,info,PHI,SIGMAu,iXX,p
 % OUTPUTS
 %   o fval          [double]     Value of the posterior kernel at xparam1.
 %   o cost_flag     [integer]    Zero if the function returns a penalty, one otherwise.
-%   o SteadyState   [double]     Steady state vector possibly recomputed
-%                                by call to dynare_results()
-%   o trend_coeff   [double]     place holder for trend coefficients,
-%                                currently not supported by dsge_var
 %   o info          [integer]    Vector of informations about the penalty.
 %   o PHI           [double]     Stacked BVAR-DSGE autoregressive matrices (at the mode associated to xparam1).
 %   o SIGMAu        [double]     Covariance matrix of the BVAR-DSGE (at the mode associated to xparam1).
@@ -38,275 +34,6 @@ function [fval,grad,hess,exit_flag,SteadyState,trend_coeff,info,PHI,SIGMAu,iXX,p
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-persistent dsge_prior_weight_idx
-
-grad=[];
-hess=[];
-exit_flag = [];
-info = 0;
-PHI = [];
-SIGMAu = [];
-iXX = [];
-prior = [];
-SteadyState = [];
-trend_coeff = [];
-
-% Initialization of of the index for parameter dsge_prior_weight in Model.params.
-if isempty(dsge_prior_weight_idx)
-    dsge_prior_weight_idx = strmatch('dsge_prior_weight',Model.param_names);
-end
-
-% Get the number of estimated (dsge) parameters.
-nx = EstimatedParameters.nvx + EstimatedParameters.np;
-
-% Get the number of observed variables in the VAR model.
-NumberOfObservedVariables = DynareDataset.vobs;
-
-% Get the number of observations.
-NumberOfObservations = DynareDataset.nobs;
-
-
-% Get the number of lags in the VAR model.
-NumberOfLags = DynareOptions.dsge_varlag;
-
-% Get the number of parameters in the VAR model.
-NumberOfParameters = NumberOfObservedVariables*NumberOfLags ;
-if ~DynareOptions.noconstant
-    NumberOfParameters = NumberOfParameters + 1;
-end
-
-% Get empirical second order moments for the observed variables.
-mYY = evalin('base', 'mYY');
-mYX = evalin('base', 'mYX');
-mXY = evalin('base', 'mXY');
-mXX = evalin('base', 'mXX');
-
-% Initialize some of the output arguments.
-fval = [];
-exit_flag = 1;
-
-% Return, with endogenous penalty, if some dsge-parameters are smaller than the lower bound of the prior domain.
-if DynareOptions.mode_compute ~= 1 && any(xparam1 < BoundsInfo.lb)
-    fval = Inf;
-    exit_flag = 0;
-    info(1) = 41;
-    k = find(xparam1 < BoundsInfo.lb);
-    info(2) = sum((BoundsInfo.lb(k)-xparam1(k)).^2);
-    return;
-end
-
-% Return, with endogenous penalty, if some dsge-parameters are greater than the upper bound of the prior domain.
-if DynareOptions.mode_compute ~= 1 && any(xparam1 > BoundsInfo.ub)
-    fval = Inf;
-    exit_flag = 0;
-    info(1) = 42;
-    k = find(xparam1 > BoundsInfo.ub);
-    info(2) = sum((xparam1(k)-BoundsInfo.ub(k)).^2);
-    return;
-end
-
-% Get the variance of each structural innovation.
-Q = Model.Sigma_e;
-for i=1:EstimatedParameters.nvx
-    k = EstimatedParameters.var_exo(i,1);
-    Q(k,k) = xparam1(i)*xparam1(i);
-end
-offset = EstimatedParameters.nvx;
-
-% Update Model.params and Model.Sigma_e.
-Model.params(EstimatedParameters.param_vals(:,1)) = xparam1(offset+1:end);
-Model.Sigma_e = Q;
-
-% Get the weight of the dsge prior.
-dsge_prior_weight = Model.params(dsge_prior_weight_idx);
-
-% Is the dsge prior proper?
-if dsge_prior_weight<(NumberOfParameters+NumberOfObservedVariables)/ ...
-        NumberOfObservations;
-    fval = Inf;
-    exit_flag = 0;
-    info(1) = 51;
-    info(2) = abs(NumberOfObservations*dsge_prior_weight-(NumberOfParameters+NumberOfObservedVariables));
-    %    info(2)=dsge_prior_weight;
-    %    info(3)=(NumberOfParameters+NumberOfObservedVariables)/DynareDataset.nobs;
-    return
-end
-
-%------------------------------------------------------------------------------
-% 2. call model setup & reduction program
-%------------------------------------------------------------------------------
-
-% Solve the Dsge model and get the matrices of the reduced form solution. T and R are the matrices of the
-% state equation
-[T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
-
-% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
-if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || ...
-            info(1) == 22 || info(1) == 24 || info(1) == 25 || info(1) == 10
-    fval = Inf;
-    info(2) = 0.1;
-    exit_flag = 0;
-    return
-elseif info(1) == 3 || info(1) == 4 || info(1) == 19 || info(1) == 20 || info(1) == 21
-    fval = Inf;
-    info(2) = 0.1;
-    exit_flag = 0;
-    return
-end
-
-% Define the mean/steady state vector.
-if ~DynareOptions.noconstant
-    if DynareOptions.loglinear
-        constant = transpose(log(SteadyState(BayesInfo.mfys)));
-    else
-        constant = transpose(SteadyState(BayesInfo.mfys));
-    end
-else
-    constant = zeros(1,NumberOfObservedVariables);
-end
-
-
-%------------------------------------------------------------------------------
-% 3. theoretical moments (second order)
-%------------------------------------------------------------------------------
-
-% Compute the theoretical second order moments
-tmp0 = lyapunov_symm(T,R*Q*R',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, [], [], DynareOptions.debug);
-mf  = BayesInfo.mf1;
-
-% Get the non centered second order moments
-TheoreticalAutoCovarianceOfTheObservedVariables = zeros(NumberOfObservedVariables,NumberOfObservedVariables,NumberOfLags+1);
-TheoreticalAutoCovarianceOfTheObservedVariables(:,:,1) = tmp0(mf,mf)+constant'*constant;
-for lag = 1:NumberOfLags
-    tmp0 = T*tmp0;
-    TheoreticalAutoCovarianceOfTheObservedVariables(:,:,lag+1) = tmp0(mf,mf) + constant'*constant;
-end
-
-% Build the theoretical "covariance" between Y and X
-GYX = zeros(NumberOfObservedVariables,NumberOfParameters);
-for i=1:NumberOfLags
-    GYX(:,(i-1)*NumberOfObservedVariables+1:i*NumberOfObservedVariables) = TheoreticalAutoCovarianceOfTheObservedVariables(:,:,i+1);
-end
-if ~DynareOptions.noconstant
-    GYX(:,end) = constant';
-end
-
-% Build the theoretical "covariance" between X and X
-GXX = kron(eye(NumberOfLags), TheoreticalAutoCovarianceOfTheObservedVariables(:,:,1));
-for i = 1:NumberOfLags-1
-    tmp1 = diag(ones(NumberOfLags-i,1),i);
-    tmp2 = diag(ones(NumberOfLags-i,1),-i);
-    GXX = GXX + kron(tmp1,TheoreticalAutoCovarianceOfTheObservedVariables(:,:,i+1));
-    GXX = GXX + kron(tmp2,TheoreticalAutoCovarianceOfTheObservedVariables(:,:,i+1)');
-end
-
-if ~DynareOptions.noconstant
-    % Add one row and one column to GXX
-    GXX = [GXX , kron(ones(NumberOfLags,1),constant') ; [  kron(ones(1,NumberOfLags),constant) , 1] ];
-end
-
-GYY = TheoreticalAutoCovarianceOfTheObservedVariables(:,:,1);
-
-assignin('base','GYY',GYY);
-assignin('base','GXX',GXX);
-assignin('base','GYX',GYX);
-
-if ~isinf(dsge_prior_weight)% Evaluation of the likelihood of the dsge-var model when the dsge prior weight is finite.
-    tmp0 = dsge_prior_weight*NumberOfObservations*TheoreticalAutoCovarianceOfTheObservedVariables(:,:,1) + mYY ;
-    tmp1 = dsge_prior_weight*NumberOfObservations*GYX + mYX;
-    tmp2 = inv(dsge_prior_weight*NumberOfObservations*GXX+mXX);
-    SIGMAu = tmp0 - tmp1*tmp2*tmp1'; clear('tmp0');
-    [SIGMAu_is_positive_definite, penalty] = ispd(SIGMAu);
-    if ~SIGMAu_is_positive_definite
-        fval = Inf;
-        info(1) = 52;
-        info(2) = penalty;
-        exit_flag = 0;
-        return;
-    end
-    SIGMAu = SIGMAu / (NumberOfObservations*(1+dsge_prior_weight));
-    PHI = tmp2*tmp1'; clear('tmp1');
-    prodlng1 = sum(gammaln(.5*((1+dsge_prior_weight)*NumberOfObservations- ...
-                               NumberOfObservedVariables*NumberOfLags ...
-                               +1-(1:NumberOfObservedVariables)')));
-    prodlng2 = sum(gammaln(.5*(dsge_prior_weight*NumberOfObservations- ...
-                               NumberOfObservedVariables*NumberOfLags ...
-                               +1-(1:NumberOfObservedVariables)')));
-    lik = .5*NumberOfObservedVariables*log(det(dsge_prior_weight*NumberOfObservations*GXX+mXX)) ...
-          + .5*((dsge_prior_weight+1)*NumberOfObservations-NumberOfParameters)*log(det((dsge_prior_weight+1)*NumberOfObservations*SIGMAu)) ...
-          - .5*NumberOfObservedVariables*log(det(dsge_prior_weight*NumberOfObservations*GXX)) ...
-          - .5*(dsge_prior_weight*NumberOfObservations-NumberOfParameters)*log(det(dsge_prior_weight*NumberOfObservations*(GYY-GYX*inv(GXX)*GYX'))) ...
-          + .5*NumberOfObservedVariables*NumberOfObservations*log(2*pi)  ...
-          - .5*log(2)*NumberOfObservedVariables*((dsge_prior_weight+1)*NumberOfObservations-NumberOfParameters) ...
-          + .5*log(2)*NumberOfObservedVariables*(dsge_prior_weight*NumberOfObservations-NumberOfParameters) ...
-          - prodlng1 + prodlng2;
-else% Evaluation of the likelihood of the dsge-var model when the dsge prior weight is infinite.
-    iGXX = inv(GXX);
-    SIGMAu = GYY - GYX*iGXX*transpose(GYX);
-    PHI = iGXX*transpose(GYX);
-    lik = NumberOfObservations * ( log(det(SIGMAu)) + NumberOfObservedVariables*log(2*pi) +  ...
-                   trace(inv(SIGMAu)*(mYY - transpose(mYX*PHI) - mYX*PHI + transpose(PHI)*mXX*PHI)/NumberOfObservations));
-    lik = .5*lik;% Minus likelihood
-end
-
-if isnan(lik)
-    info(1) = 45;
-    info(2) = 0.1;
-    fval = Inf;
-    exit_flag = 0;
-    return
-end
-
-if imag(lik)~=0
-    info(1) = 46;
-    info(2) = 0.1;
-    fval = Inf;
-    exit_flag = 0;
-    return
-end
-
-% Add the (logged) prior density for the dsge-parameters.
-lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
-fval = (lik-lnprior);
-
-if isnan(fval)
-    info(1) = 47;
-    info(2) = 0.1;
-    fval = Inf;
-    exit_flag = 0;
-    return
-end
-
-if imag(fval)~=0
-    info(1) = 48;
-    info(2) = 0.1;
-    fval = Inf;
-    exit_flag = 0;
-    return
-end
-
-if (nargout == 10)
-    if isinf(dsge_prior_weight)
-        iXX = iGXX;
-    else
-        iXX = tmp2;
-    end
-end
-
-if (nargout==11)
-    if isinf(dsge_prior_weight)
-        iXX = iGXX;
-    else
-        iXX = tmp2;
-    end
-    iGXX = inv(GXX);
-    prior.SIGMAstar = GYY - GYX*iGXX*GYX';
-    prior.PHIstar = iGXX*transpose(GYX);
-    prior.ArtificialSampleSize = fix(dsge_prior_weight*NumberOfObservations);
-    prior.DF = prior.ArtificialSampleSize - NumberOfParameters - NumberOfObservedVariables;
-    prior.iGXX = iGXX;
-end
-
-if fval == Inf
-    pause
-end
\ No newline at end of file
+[fval,info,exit_flag,grad,hess,SteadyState,trend_coeff,PHI,SIGMAu,iXX,prior] = ...
+    dsge_var_likelihood_1(xparam1,DynareDataset,DynareInfo,DynareOptions,Model,...
+                          EstimatedParameters,BayesInfo,BoundsInfo,DynareResults);
\ No newline at end of file
diff --git a/matlab/dsge_var_likelihood_1.m b/matlab/dsge_var_likelihood_1.m
new file mode 100644
index 0000000000..e01764d269
--- /dev/null
+++ b/matlab/dsge_var_likelihood_1.m
@@ -0,0 +1,312 @@
+function [fval,info,exit_flag,grad,hess,SteadyState,trend_coeff,PHI,SIGMAu,iXX,prior] = dsge_var_likelihood_1(xparam1,DynareDataset,DynareInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults)
+% Evaluates the posterior kernel of the bvar-dsge model.
+%
+% INPUTS
+%   o xparam1       [double]     Vector of model's parameters.
+%   o gend          [integer]    Number of observations (without conditionning observations for the lags).
+%
+% OUTPUTS
+%   o fval          [double]     Value of the posterior kernel at xparam1.
+%   o cost_flag     [integer]    Zero if the function returns a penalty, one otherwise.
+%   o SteadyState   [double]     Steady state vector possibly recomputed
+%                                by call to dynare_results()
+%   o trend_coeff   [double]     place holder for trend coefficients,
+%                                currently not supported by dsge_var
+%   o info          [integer]    Vector of informations about the penalty.
+%   o PHI           [double]     Stacked BVAR-DSGE autoregressive matrices (at the mode associated to xparam1).
+%   o SIGMAu        [double]     Covariance matrix of the BVAR-DSGE (at the mode associated to xparam1).
+%   o iXX           [double]     inv(X'X).
+%   o prior         [double]     a matlab structure describing the dsge-var prior.
+%
+% SPECIAL REQUIREMENTS
+%   None.
+
+% Copyright (C) 2006-2012 Dynare Team
+%
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+
+persistent dsge_prior_weight_idx
+
+grad=[];
+hess=[];
+exit_flag = [];
+info = 0;
+PHI = [];
+SIGMAu = [];
+iXX = [];
+prior = [];
+SteadyState = [];
+trend_coeff = [];
+
+% Initialization of of the index for parameter dsge_prior_weight in Model.params.
+if isempty(dsge_prior_weight_idx)
+    dsge_prior_weight_idx = strmatch('dsge_prior_weight',Model.param_names);
+end
+
+% Get the number of estimated (dsge) parameters.
+nx = EstimatedParameters.nvx + EstimatedParameters.np;
+
+% Get the number of observed variables in the VAR model.
+NumberOfObservedVariables = DynareDataset.vobs;
+
+% Get the number of observations.
+NumberOfObservations = DynareDataset.nobs;
+
+
+% Get the number of lags in the VAR model.
+NumberOfLags = DynareOptions.dsge_varlag;
+
+% Get the number of parameters in the VAR model.
+NumberOfParameters = NumberOfObservedVariables*NumberOfLags ;
+if ~DynareOptions.noconstant
+    NumberOfParameters = NumberOfParameters + 1;
+end
+
+% Get empirical second order moments for the observed variables.
+mYY = evalin('base', 'mYY');
+mYX = evalin('base', 'mYX');
+mXY = evalin('base', 'mXY');
+mXX = evalin('base', 'mXX');
+
+% Initialize some of the output arguments.
+fval = [];
+exit_flag = 1;
+
+% Return, with endogenous penalty, if some dsge-parameters are smaller than the lower bound of the prior domain.
+if DynareOptions.mode_compute ~= 1 && any(xparam1 < BoundsInfo.lb)
+    fval = Inf;
+    exit_flag = 0;
+    info(1) = 41;
+    k = find(xparam1 < BoundsInfo.lb);
+    info(2) = sum((BoundsInfo.lb(k)-xparam1(k)).^2);
+    return;
+end
+
+% Return, with endogenous penalty, if some dsge-parameters are greater than the upper bound of the prior domain.
+if DynareOptions.mode_compute ~= 1 && any(xparam1 > BoundsInfo.ub)
+    fval = Inf;
+    exit_flag = 0;
+    info(1) = 42;
+    k = find(xparam1 > BoundsInfo.ub);
+    info(2) = sum((xparam1(k)-BoundsInfo.ub(k)).^2);
+    return;
+end
+
+% Get the variance of each structural innovation.
+Q = Model.Sigma_e;
+for i=1:EstimatedParameters.nvx
+    k = EstimatedParameters.var_exo(i,1);
+    Q(k,k) = xparam1(i)*xparam1(i);
+end
+offset = EstimatedParameters.nvx;
+
+% Update Model.params and Model.Sigma_e.
+Model.params(EstimatedParameters.param_vals(:,1)) = xparam1(offset+1:end);
+Model.Sigma_e = Q;
+
+% Get the weight of the dsge prior.
+dsge_prior_weight = Model.params(dsge_prior_weight_idx);
+
+% Is the dsge prior proper?
+if dsge_prior_weight<(NumberOfParameters+NumberOfObservedVariables)/ ...
+        NumberOfObservations;
+    fval = Inf;
+    exit_flag = 0;
+    info(1) = 51;
+    info(2) = abs(NumberOfObservations*dsge_prior_weight-(NumberOfParameters+NumberOfObservedVariables));
+    %    info(2)=dsge_prior_weight;
+    %    info(3)=(NumberOfParameters+NumberOfObservedVariables)/DynareDataset.nobs;
+    return
+end
+
+%------------------------------------------------------------------------------
+% 2. call model setup & reduction program
+%------------------------------------------------------------------------------
+
+% Solve the Dsge model and get the matrices of the reduced form solution. T and R are the matrices of the
+% state equation
+[T,R,SteadyState,info,Model,DynareOptions,DynareResults] = dynare_resolve(Model,DynareOptions,DynareResults,'restrict');
+
+% Return, with endogenous penalty when possible, if dynare_resolve issues an error code (defined in resol).
+if info(1) == 1 || info(1) == 2 || info(1) == 5 || info(1) == 7 || info(1) == 8 || ...
+            info(1) == 22 || info(1) == 24 || info(1) == 25 || info(1) == 10
+    fval = Inf;
+    info(2) = 0.1;
+    exit_flag = 0;
+    return
+elseif info(1) == 3 || info(1) == 4 || info(1) == 19 || info(1) == 20 || info(1) == 21
+    fval = Inf;
+    info(2) = 0.1;
+    exit_flag = 0;
+    return
+end
+
+% Define the mean/steady state vector.
+if ~DynareOptions.noconstant
+    if DynareOptions.loglinear
+        constant = transpose(log(SteadyState(BayesInfo.mfys)));
+    else
+        constant = transpose(SteadyState(BayesInfo.mfys));
+    end
+else
+    constant = zeros(1,NumberOfObservedVariables);
+end
+
+
+%------------------------------------------------------------------------------
+% 3. theoretical moments (second order)
+%------------------------------------------------------------------------------
+
+% Compute the theoretical second order moments
+tmp0 = lyapunov_symm(T,R*Q*R',DynareOptions.lyapunov_fixed_point_tol,DynareOptions.qz_criterium,DynareOptions.lyapunov_complex_threshold, [], [], DynareOptions.debug);
+mf  = BayesInfo.mf1;
+
+% Get the non centered second order moments
+TheoreticalAutoCovarianceOfTheObservedVariables = zeros(NumberOfObservedVariables,NumberOfObservedVariables,NumberOfLags+1);
+TheoreticalAutoCovarianceOfTheObservedVariables(:,:,1) = tmp0(mf,mf)+constant'*constant;
+for lag = 1:NumberOfLags
+    tmp0 = T*tmp0;
+    TheoreticalAutoCovarianceOfTheObservedVariables(:,:,lag+1) = tmp0(mf,mf) + constant'*constant;
+end
+
+% Build the theoretical "covariance" between Y and X
+GYX = zeros(NumberOfObservedVariables,NumberOfParameters);
+for i=1:NumberOfLags
+    GYX(:,(i-1)*NumberOfObservedVariables+1:i*NumberOfObservedVariables) = TheoreticalAutoCovarianceOfTheObservedVariables(:,:,i+1);
+end
+if ~DynareOptions.noconstant
+    GYX(:,end) = constant';
+end
+
+% Build the theoretical "covariance" between X and X
+GXX = kron(eye(NumberOfLags), TheoreticalAutoCovarianceOfTheObservedVariables(:,:,1));
+for i = 1:NumberOfLags-1
+    tmp1 = diag(ones(NumberOfLags-i,1),i);
+    tmp2 = diag(ones(NumberOfLags-i,1),-i);
+    GXX = GXX + kron(tmp1,TheoreticalAutoCovarianceOfTheObservedVariables(:,:,i+1));
+    GXX = GXX + kron(tmp2,TheoreticalAutoCovarianceOfTheObservedVariables(:,:,i+1)');
+end
+
+if ~DynareOptions.noconstant
+    % Add one row and one column to GXX
+    GXX = [GXX , kron(ones(NumberOfLags,1),constant') ; [  kron(ones(1,NumberOfLags),constant) , 1] ];
+end
+
+GYY = TheoreticalAutoCovarianceOfTheObservedVariables(:,:,1);
+
+assignin('base','GYY',GYY);
+assignin('base','GXX',GXX);
+assignin('base','GYX',GYX);
+
+if ~isinf(dsge_prior_weight)% Evaluation of the likelihood of the dsge-var model when the dsge prior weight is finite.
+    tmp0 = dsge_prior_weight*NumberOfObservations*TheoreticalAutoCovarianceOfTheObservedVariables(:,:,1) + mYY ;
+    tmp1 = dsge_prior_weight*NumberOfObservations*GYX + mYX;
+    tmp2 = inv(dsge_prior_weight*NumberOfObservations*GXX+mXX);
+    SIGMAu = tmp0 - tmp1*tmp2*tmp1'; clear('tmp0');
+    [SIGMAu_is_positive_definite, penalty] = ispd(SIGMAu);
+    if ~SIGMAu_is_positive_definite
+        fval = Inf;
+        info(1) = 52;
+        info(2) = penalty;
+        exit_flag = 0;
+        return;
+    end
+    SIGMAu = SIGMAu / (NumberOfObservations*(1+dsge_prior_weight));
+    PHI = tmp2*tmp1'; clear('tmp1');
+    prodlng1 = sum(gammaln(.5*((1+dsge_prior_weight)*NumberOfObservations- ...
+                               NumberOfObservedVariables*NumberOfLags ...
+                               +1-(1:NumberOfObservedVariables)')));
+    prodlng2 = sum(gammaln(.5*(dsge_prior_weight*NumberOfObservations- ...
+                               NumberOfObservedVariables*NumberOfLags ...
+                               +1-(1:NumberOfObservedVariables)')));
+    lik = .5*NumberOfObservedVariables*log(det(dsge_prior_weight*NumberOfObservations*GXX+mXX)) ...
+          + .5*((dsge_prior_weight+1)*NumberOfObservations-NumberOfParameters)*log(det((dsge_prior_weight+1)*NumberOfObservations*SIGMAu)) ...
+          - .5*NumberOfObservedVariables*log(det(dsge_prior_weight*NumberOfObservations*GXX)) ...
+          - .5*(dsge_prior_weight*NumberOfObservations-NumberOfParameters)*log(det(dsge_prior_weight*NumberOfObservations*(GYY-GYX*inv(GXX)*GYX'))) ...
+          + .5*NumberOfObservedVariables*NumberOfObservations*log(2*pi)  ...
+          - .5*log(2)*NumberOfObservedVariables*((dsge_prior_weight+1)*NumberOfObservations-NumberOfParameters) ...
+          + .5*log(2)*NumberOfObservedVariables*(dsge_prior_weight*NumberOfObservations-NumberOfParameters) ...
+          - prodlng1 + prodlng2;
+else% Evaluation of the likelihood of the dsge-var model when the dsge prior weight is infinite.
+    iGXX = inv(GXX);
+    SIGMAu = GYY - GYX*iGXX*transpose(GYX);
+    PHI = iGXX*transpose(GYX);
+    lik = NumberOfObservations * ( log(det(SIGMAu)) + NumberOfObservedVariables*log(2*pi) +  ...
+                   trace(inv(SIGMAu)*(mYY - transpose(mYX*PHI) - mYX*PHI + transpose(PHI)*mXX*PHI)/NumberOfObservations));
+    lik = .5*lik;% Minus likelihood
+end
+
+if isnan(lik)
+    info(1) = 45;
+    info(2) = 0.1;
+    fval = Inf;
+    exit_flag = 0;
+    return
+end
+
+if imag(lik)~=0
+    info(1) = 46;
+    info(2) = 0.1;
+    fval = Inf;
+    exit_flag = 0;
+    return
+end
+
+% Add the (logged) prior density for the dsge-parameters.
+lnprior = priordens(xparam1,BayesInfo.pshape,BayesInfo.p6,BayesInfo.p7,BayesInfo.p3,BayesInfo.p4);
+fval = (lik-lnprior);
+
+if isnan(fval)
+    info(1) = 47;
+    info(2) = 0.1;
+    fval = Inf;
+    exit_flag = 0;
+    return
+end
+
+if imag(fval)~=0
+    info(1) = 48;
+    info(2) = 0.1;
+    fval = Inf;
+    exit_flag = 0;
+    return
+end
+
+if (nargout == 10)
+    if isinf(dsge_prior_weight)
+        iXX = iGXX;
+    else
+        iXX = tmp2;
+    end
+end
+
+if (nargout==11)
+    if isinf(dsge_prior_weight)
+        iXX = iGXX;
+    else
+        iXX = tmp2;
+    end
+    iGXX = inv(GXX);
+    prior.SIGMAstar = GYY - GYX*iGXX*GYX';
+    prior.PHIstar = iGXX*transpose(GYX);
+    prior.ArtificialSampleSize = fix(dsge_prior_weight*NumberOfObservations);
+    prior.DF = prior.ArtificialSampleSize - NumberOfParameters - NumberOfObservedVariables;
+    prior.iGXX = iGXX;
+end
+
+if fval == Inf
+    pause
+end
\ No newline at end of file
diff --git a/matlab/dynare_estimation_1.m b/matlab/dynare_estimation_1.m
index fe57db9c42..9e3b891144 100644
--- a/matlab/dynare_estimation_1.m
+++ b/matlab/dynare_estimation_1.m
@@ -85,10 +85,10 @@ if ~options_.dsge_var
             error(['Estimation: Unknown filter ' options_.particle.filter_algorithm])
         end
     else
-        objective_function = str2func('dsge_likelihood');
+        objective_function = str2func('dsge_likelihood_1');
     end
 else
-    objective_function = str2func('dsge_var_likelihood');
+    objective_function = str2func('dsge_var_likelihood_1');
 end
 
 [dataset_, dataset_info, xparam1, hh, M_, options_, oo_, estim_params_, bayestopt_, bounds] = ...
@@ -253,7 +253,7 @@ if ~isequal(options_.mode_compute,0) && ~options_.mh_posterior_mode_estimation
         if options_.analytic_derivation && strcmp(func2str(objective_function),'dsge_likelihood')
             options = options_.analytic_derivation;
             options.analytic_derivation = 2;
-            [junk1, junk2, hh] = feval(objective_function,xparam1, ...
+            [junk1, junk2, junk3, junk4, hh] = feval(objective_function,xparam1, ...
                                        dataset_,dataset_info,options_,M_, ...
                                        estim_params_,bayestopt_,bounds,oo_);
         elseif isequal(options_.mode_compute,4) || ...
diff --git a/matlab/identification_analysis.m b/matlab/identification_analysis.m
index 7eac2d1999..fe26bd35cd 100644
--- a/matlab/identification_analysis.m
+++ b/matlab/identification_analysis.m
@@ -148,7 +148,7 @@ if info(1)==0,
             dataset_ = dseries(oo_.endo_simul(options_.varobs_id,100+1:end)',dates('1Q1'), options_.varobs);            
             derivatives_info.no_DLIK=1;
             bounds = prior_bounds(bayestopt_,options_);
-            [fval,DLIK,AHess,cost_flag,ys,trend_coeff,info,M_,options_,bayestopt_,oo_] = dsge_likelihood(params',dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_,derivatives_info);
+            [fval,info,cost_flag,DLIK,AHess,ys,trend_coeff,M_,options_,bayestopt_,oo_] = dsge_likelihood(params',dataset_,dataset_info,options_,M_,estim_params_,bayestopt_,bounds,oo_,derivatives_info);
 %                 fval = DsgeLikelihood(xparam1,data_info,options_,M_,estim_params_,bayestopt_,oo_);
             options_.analytic_derivation = analytic_derivation;
             AHess=-AHess;
diff --git a/matlab/mode_check.m b/matlab/mode_check.m
index 4e37d41be0..55a1414385 100644
--- a/matlab/mode_check.m
+++ b/matlab/mode_check.m
@@ -140,7 +140,7 @@ for plt = 1:nbplt,
         end
         for i=1:length(z)
             xx(kk) = z(i);
-            [fval, junk1, junk2, exit_flag] = feval(fun,xx,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults);
+            [fval, info, exit_flag] = feval(fun,xx,DynareDataset,DatasetInfo,DynareOptions,Model,EstimatedParameters,BayesInfo,BoundsInfo,DynareResults);
             if exit_flag
                 y(i,1) = fval;
             else
diff --git a/matlab/optimization/mr_hessian.m b/matlab/optimization/mr_hessian.m
index 0fab774add..c88e94cd78 100644
--- a/matlab/optimization/mr_hessian.m
+++ b/matlab/optimization/mr_hessian.m
@@ -61,7 +61,7 @@ if init
     return
 end
 
-[f0, ff0]=penalty_objective_function(x,func,penalty,varargin{:});
+[f0, exit_flag, ff0]=penalty_objective_function(x,func,penalty,varargin{:});
 h2=varargin{7}.ub-varargin{7}.lb;
 hmax=varargin{7}.ub-x;
 hmax=min(hmax,x-varargin{7}.lb);
@@ -93,7 +93,7 @@ while i<n
     hcheck=0;
     xh1(i)=x(i)+h1(i);
     try
-        [fx, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
+        [fx, exit_flag, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
     catch
         fx=1.e8;
     end
@@ -114,7 +114,7 @@ while i<n
             h1(i) = max(h1(i),1.e-10);
             xh1(i)=x(i)+h1(i);
             try
-                [fx, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
+                [fx, exit_flag, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
             catch
                 fx=1.e8;
             end
@@ -123,14 +123,14 @@ while i<n
             h1(i)= htol/abs(dx(it))*h1(i);
             xh1(i)=x(i)+h1(i);
             try
-                [fx, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
+                [fx, exit_flag, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
             catch
                 fx=1.e8;
             end
             while (fx-f0)==0
                 h1(i)= h1(i)*2;
                 xh1(i)=x(i)+h1(i);
-                [fx, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
+                [fx, exit_flag, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
                 ic=1;
             end
         end
@@ -151,7 +151,7 @@ while i<n
         end
     end
     xh1(i)=x(i)-h1(i);
-    [fx, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
+    [fx, exit_flag, ffx]=penalty_objective_function(xh1,func,penalty,varargin{:});
     f_1(:,i)=fx;
     if outer_product_gradient,
         if any(isnan(ffx)) || isempty(ffx),
diff --git a/matlab/optimization/newrat.m b/matlab/optimization/newrat.m
index 94309aa0f8..2fa65ed497 100644
--- a/matlab/optimization/newrat.m
+++ b/matlab/optimization/newrat.m
@@ -68,7 +68,7 @@ end
 
 % func0 = str2func([func2str(func0),'_hh']);
 % func0 = func0;
-[fval0,gg,hh]=penalty_objective_function(x,func0,penalty,varargin{:});
+[fval0,exit_flag,gg,hh]=penalty_objective_function(x,func0,penalty,varargin{:});
 fval=fval0;
 
 % initialize mr_gstep and mr_hessian
@@ -180,7 +180,7 @@ while norm(gg)>gtol && check==0 && jit<nit
         disp_verbose('No further improvement is possible!',Verbose)
         check=1;
         if analytic_derivation,
-            [fvalx,gg,hh]=penalty_objective_function(xparam1,func0,varargin{:});
+            [fvalx,exit_flag,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
             hhg=hh;
             H = inv(hh);
         else
@@ -256,7 +256,7 @@ while norm(gg)>gtol && check==0 && jit<nit
                 H = igg;
             end
         elseif analytic_derivation,
-            [fvalx,gg,hh]=penalty_objective_function(xparam1,func0,varargin{:});
+            [fvalx,exit_flag,gg,hh]=penalty_objective_function(xparam1,func0,penalty,varargin{:});
             hhg=hh;
             H = inv(hh);
         end
diff --git a/matlab/optimization/penalty_objective_function.m b/matlab/optimization/penalty_objective_function.m
index ef4d0183ea..89003884ec 100644
--- a/matlab/optimization/penalty_objective_function.m
+++ b/matlab/optimization/penalty_objective_function.m
@@ -1,7 +1,5 @@
-function [fval,DLIK,Hess,exit_flag] = objective_function_penalty(x0,fcn,penalty,varargin)
-    [fval,DLIK,Hess,exit_flag,SteadyState,trend_coeff,info] = fcn(x0,varargin{:});
-
-    
+function [fval,exit_flag,arg1,arg2] = penalty_objective_function(x0,fcn,penalty,varargin)
+    [fval,info,exit_flag,arg1,arg2] = fcn(x0,varargin{:});
     
     if info(1) ~= 0
         fval = penalty + info(2);
diff --git a/matlab/osr1.m b/matlab/osr1.m
index a96c618fb3..97113af6b5 100644
--- a/matlab/osr1.m
+++ b/matlab/osr1.m
@@ -77,7 +77,7 @@ inv_order_var = oo_.dr.inv_order_var;
 %extract unique entries of covariance
 i_var=unique(i_var);
 %% do initial checks
-[loss,vx,info,exit_flag]=osr_obj(t0,i_params,inv_order_var(i_var),weights(i_var,i_var));
+[loss,info,exit_flag,vx]=osr_obj(t0,i_params,inv_order_var(i_var),weights(i_var,i_var));
 if info~=0
    print_info(info, options_.noprint, options_);
 else
diff --git a/matlab/osr_obj.m b/matlab/osr_obj.m
index 378d2b06ed..550fd7aad4 100644
--- a/matlab/osr_obj.m
+++ b/matlab/osr_obj.m
@@ -1,5 +1,7 @@
 function [loss,vx,info,exit_flag]=osr_obj(x,i_params,i_var,weights)
-% objective function for optimal simple rules (OSR)
+% objective function for optimal simple rules (OSR). Deprecated
+% interface. New one: osr_obj_1.m
+%
 % INPUTS
 %   x                         vector           values of the parameters
 %                                              over which to optimize
@@ -32,58 +34,4 @@ function [loss,vx,info,exit_flag]=osr_obj(x,i_params,i_var,weights)
 % You should have received a copy of the GNU General Public License
 % along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
 
-global M_ oo_ options_ optimal_Q_ it_
-%  global ys_ Sigma_e_ endo_nbr exo_nbr optimal_Q_ it_ ykmin_ options_
-
-junk = [];
-exit_flag = 1;
-vx = [];
-info=0;
-loss=[];
-% set parameters of the policiy rule
-M_.params(i_params) = x;
-
-% don't change below until the part where the loss function is computed
-it_ = M_.maximum_lag+1;
-[dr,info,M_,options_,oo_] = resol(0,M_,options_,oo_);
-
-switch info(1)
-  case 1
-    loss = 1e8;
-    return
-  case 2
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 3
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 4
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 5
-    loss = 1e8;
-    return
-  case 6
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 7
-    loss = 1e8*min(1e3);
-    return
-  case 8
-    loss = 1e8*min(1e3,info(2));
-    return
-  case 9
-    loss = 1e8*min(1e3,info(2));
-    return   
-  case 20
-    loss = 1e8*min(1e3,info(2));
-    return
-  otherwise
-    if info(1)~=0
-      loss = 1e8;
-      return;
-    end  
-end
-
-vx = get_variance_of_endogenous_variables(dr,i_var);
-loss = full(weights(:)'*vx(:));
+[loss,info,exit_flag,vx,junk]=osr_obj_1(x,i_params,i_var,weights);
\ No newline at end of file
diff --git a/matlab/osr_obj_1.m b/matlab/osr_obj_1.m
new file mode 100644
index 0000000000..d889d3ceea
--- /dev/null
+++ b/matlab/osr_obj_1.m
@@ -0,0 +1,90 @@
+function [loss,info,exit_flag,vx,junk]=osr_obj_1(x,i_params,i_var,weights)
+% objective function for optimal simple rules (OSR)
+% INPUTS
+%   x                         vector           values of the parameters
+%                                              over which to optimize
+%   i_params                  vector           index of optimizing parameters in M_.params
+%   i_var                     vector           variables indices
+%   weights                   vector           weights in the OSRs
+%
+% OUTPUTS
+%   loss                      scalar           loss function returned to solver
+%   info                      vector           info vector returned by resol
+%   exit_flag                 scalar           exit flag returned to solver
+%   vx                        vector           variances of the endogenous variables
+%   junk                      empty            place holder for penalty_objective_function
+%
+% SPECIAL REQUIREMENTS
+%   none
+% Copyright (C) 2005-2013 Dynare Team
+%
+% This file is part of Dynare.
+%
+% Dynare is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% Dynare is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with Dynare.  If not, see <http://www.gnu.org/licenses/>.
+
+global M_ oo_ options_ optimal_Q_ it_
+%  global ys_ Sigma_e_ endo_nbr exo_nbr optimal_Q_ it_ ykmin_ options_
+
+junk = [];
+exit_flag = 1;
+vx = [];
+info=0;
+loss=[];
+% set parameters of the policiy rule
+M_.params(i_params) = x;
+
+% don't change below until the part where the loss function is computed
+it_ = M_.maximum_lag+1;
+[dr,info,M_,options_,oo_] = resol(0,M_,options_,oo_);
+
+switch info(1)
+  case 1
+    loss = 1e8;
+    return
+  case 2
+    loss = 1e8*min(1e3,info(2));
+    return
+  case 3
+    loss = 1e8*min(1e3,info(2));
+    return
+  case 4
+    loss = 1e8*min(1e3,info(2));
+    return
+  case 5
+    loss = 1e8;
+    return
+  case 6
+    loss = 1e8*min(1e3,info(2));
+    return
+  case 7
+    loss = 1e8*min(1e3);
+    return
+  case 8
+    loss = 1e8*min(1e3,info(2));
+    return
+  case 9
+    loss = 1e8*min(1e3,info(2));
+    return   
+  case 20
+    loss = 1e8*min(1e3,info(2));
+    return
+  otherwise
+    if info(1)~=0
+      loss = 1e8;
+      return;
+    end  
+end
+
+vx = get_variance_of_endogenous_variables(dr,i_var);
+loss = full(weights(:)'*vx(:));
diff --git a/tests/estimation/TaRB/fs2000_tarb.mod b/tests/estimation/TaRB/fs2000_tarb.mod
index 6586df1950..2dc573d96b 100644
--- a/tests/estimation/TaRB/fs2000_tarb.mod
+++ b/tests/estimation/TaRB/fs2000_tarb.mod
@@ -120,3 +120,4 @@ tarb_mode_compute=4,
 tarb_new_block_probability=0.3,
 silent_optimizer
 );
+
-- 
GitLab