Skip to content
Snippets Groups Projects
Select Git revision
  • slice
  • master default
  • 6.x.aoa_sdfix
  • occbin_ppf_jae
  • occbin_ppf_featured
  • 6.x.occbin_fixes
  • occbin_fixes
  • 6.x.jae.test
  • 6.x
  • 6.x.jrc
  • plot_initval_decomp
  • bugfixes
  • occbin_enhance
  • occbin_utilities
  • enhance_newrat
  • fixes_6.x
  • master_old
  • master_4.8
  • 5.x
  • occbin_init_smo
  • init_smo
  • 4.5.6
  • 4.5.5
  • 4.5.4
  • 4.5.3
  • 4.5.2
  • 4.5.1
  • 4.5.0
  • 4.4.3
  • 4.4.2
  • 4.4.1
  • 4.4.0
  • 4.4-beta1
  • 4.3.3
  • 4.3.2
  • 4.3.1
  • 4.3.0
  • 4.2.5
  • 4.2.4
  • 4.2.3
  • 4.2.2
41 results

normal_conjugate.cweb

Blame
  • Forked from Dynare / dynare
    8123 commits behind, 22 commits ahead of the upstream repository.
    user avatar
    sebastien authored
    git-svn-id: https://www.dynare.org/svn/dynare/trunk@2905 ac1d8469-bf42-47a9-8791-bf33cf982152
    b92b48c5
    History
    normal_conjugate.cweb 2.78 KiB
    @q $Id$ @>
    @q Copyright 2007, Ondra Kamenik @>
    
    @ Start of {\tt normal\_conjugate.cpp} file.
    
    @c
    
    #include "normal_conjugate.h"
    #include "kord_exception.h"
    
    @<|NormalConj| diffuse prior constructor@>;
    @<|NormalConj| data update constructor@>;
    @<|NormalConj| copy constructor@>;
    @<|NormalConj::update| one observation code@>;
    @<|NormalConj::update| multiple observations code@>;
    @<|NormalConj::update| with |NormalConj| code@>;
    @<|NormalConj::getVariance| code@>;
    
    @ 
    @<|NormalConj| diffuse prior constructor@>=
    NormalConj::NormalConj(int d)
    	: mu(d), kappa(0), nu(-1), lambda(d,d)
    {
    	mu.zeros();
    	lambda.zeros();
    }
    
    @ 
    @<|NormalConj| data update constructor@>=
    NormalConj::NormalConj(const ConstTwoDMatrix& ydata)
    	: mu(ydata.numRows()), kappa(ydata.numCols()), nu(ydata.numCols()-1),
    	  lambda(ydata.numRows(), ydata.numRows())
    {
    	mu.zeros();
    	for (int i = 0; i < ydata.numCols(); i++)
    		mu.add(1.0/ydata.numCols(), ConstVector(ydata, i));
    
    	lambda.zeros();
    	for (int i = 0; i < ydata.numCols(); i++) {
    		Vector diff(ConstVector(ydata, i));
    		diff.add(-1, mu);
    		lambda.addOuter(diff);
    	}
    }
    
    @ 
    @<|NormalConj| copy constructor@>=
    NormalConj::NormalConj(const NormalConj& nc)
    	: mu(nc.mu), kappa(nc.kappa), nu(nc.nu), lambda(nc.lambda)
    {
    }
    
    @ The method performs the following:
    $$\eqalign{
      \mu_1 = &\; {\kappa_0\over \kappa_0+1}\mu_0 + {1\over \kappa_0+1}y\cr
      \kappa_1 = &\; \kappa_0 + 1\cr
      \nu_1 = &\; \nu_0 + 1\cr
      \Lambda_1 = &\; \Lambda_0 + {\kappa_0\over\kappa_0+1}(y-\mu_0)(y-\mu_0)^T,
    }$$
    
    @<|NormalConj::update| one observation code@>=
    void NormalConj::update(const ConstVector& y)
    {
    	KORD_RAISE_IF(y.length() != mu.length(),
    				  "Wrong length of a vector in NormalConj::update");
    
    	mu.mult(kappa/(1.0+kappa));
    	mu.add(1.0/(1.0+kappa), y);
    
    	Vector diff(y);
    	diff.add(-1, mu);
    	lambda.addOuter(diff, kappa/(1.0+kappa));
    
    	kappa++;
    	nu++;
    }
    
    @ The method evaluates the formula in the header file.
    
    @<|NormalConj::update| multiple observations code@>=
    void NormalConj::update(const ConstTwoDMatrix& ydata)
    {
    	NormalConj nc(ydata);
    	update(nc);
    }
    
    
    @ 
    @<|NormalConj::update| with |NormalConj| code@>=
    void NormalConj::update(const NormalConj& nc)
    {
    	double wold = ((double)kappa)/(kappa+nc.kappa);
    	double wnew = 1-wold;
    
    	mu.mult(wold);
    	mu.add(wnew, nc.mu);
    
    	Vector diff(nc.mu);
    	diff.add(-1, mu);
    	lambda.add(1.0, nc.lambda);
    	lambda.addOuter(diff);
    
    	kappa = kappa + nc.kappa;
    	nu = nu + nc.kappa;
    }
    
    
    @ This returns ${1\over \nu-d-1}\Lambda$, which is the mean of the
    variance in the posterior distribution. If the number of degrees of
    freedom is less than $d$, then NaNs are returned.
    
    @<|NormalConj::getVariance| code@>=
    void NormalConj::getVariance(TwoDMatrix& v) const
    {
    	if (nu > getDim()+1) {
    		v = (const TwoDMatrix&)lambda;
    		v.mult(1.0/(nu-getDim()-1));
    	} else
    		v.nans();
    }
    
    
    @ End of {\tt normal\_conjugate.cpp} file.